content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
archetypesBoundary <- function(data,numArch,verbose,numRep){
ldata <- data
#Run archetypes algorithm repeatedly from 1 to numArchet archetypes:
sequen <- seq(length = numArch)
lass <- stepArchetypesRawData(data = ldata, numArch = sequen,
numRep = numRep, verbose = verbose)
return(lass)
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/archetypesBoundary.R |
archetypoids <- function(numArchoid,data,huge=200,step,init,ArchObj,nearest="cand_ns",sequ,aux){
if(!step){
N = dim(data)[1]
if(sequ){
ai <- archetypes::bestModel(ArchObj[[numArchoid]])
}else{
ai <- archetypes::bestModel(ArchObj[[numArchoid-aux]])
}
if(is.null(archetypes::parameters(ai))){
stop("No archetypes computed")
}else{
ras <- rbind(archetypes::parameters(ai),data)
dras <- dist(ras, method = "euclidean", diag = F, upper = T, p = 2)
mdras <- as.matrix(dras)
diag(mdras) = 1e+11
}
if(nearest == "cand_ns"){
ini_arch <- sapply(seq(length = numArchoid), nearestToArchetypes, numArchoid, mdras)
if( all(ini_arch > numArchoid) == FALSE){
k=1
neig <- knn(data, archetypes::parameters(ai), 1:N, k=k)
indices1 <- attr(neig, "nn.index")
ini_arch <- indices1[,k]
while(any(duplicated(ini_arch))){
k=k+1
neig <- knn(data, archetypes::parameters(ai), 1:N, k=k)
indicesk <- attr(neig, "nn.index")
dupl <- anyDuplicated(indices1[,1])
ini_arch <- c(indices1[-dupl,1],indicesk[dupl,k])
}
}
}else if(nearest == "cand_alpha"){
ini_arch <- apply(coef(ai, "alphas"), 2, which.max)
}else if(nearest == "cand_beta"){
ini_arch <- c()
for (j in 1:numArchoid){
ini_arch[j] <- which.max(ai$betas[j,])
}
}else{
stop("The nearest vector must be cand_ns, cand_alpha or cand_beta")
}
}else{
ini_arch <- init
}
n <- ncol(t(data))
x_gvv <- rbind(t(data), rep(huge, n))
zs <- x_gvv[,ini_arch]
zs <- as.matrix(zs)
alphas <- matrix(0, nrow = numArchoid, ncol = n)
for (j in 1 : n){
alphas[, j] = coef(nnls(zs, x_gvv[,j]))
}
resid <- zs %*% alphas - x_gvv
rss_ini <- max(svd(resid)$d) / n
res_def <- swap(ini_arch, rss_ini, huge, numArchoid, x_gvv, n)
return(res_def)
} | /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/archetypoids.R |
array3Dlandm <- function(numLandm,numIndiv,matLandm){
dg <- array(0,dim = c(numLandm,3,numIndiv))
for(k in 1:numIndiv){
for(l in 1:3){
dg[,l,k] <- as.matrix(matLandm[k,][seq(l,dim(matLandm)[2]+(l-1),by=3)], ncol = 1, byrow = TRUE)
}
}
return(dg)
} | /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/array3Dlandm.R |
bustSizesStandard <- function(bustCirc_4, bustCirc_6){
bustCirc <- c(bustCirc_4, bustCirc_6)
nsizes <- length(bustCirc)
return(list(bustCirc=bustCirc, nsizes=nsizes))
} | /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/bustSizesStandard.R |
cdfDissWomenPrototypes <- function(min_med,min_med_UNE,main,xlab,ylab,leg,cexLeg,...){
plot(sort(min_med), (1:length(min_med))/length(min_med), type = "s", main = main, xlab = xlab, ylab = ylab,
xlim = c(0,1), ylim = c(0,1), yaxt = "n",...)
axis(2,at = seq(0,1,0.1),labels = seq(0,1,0.1))
lines(sort(min_med_UNE), (1:length(min_med_UNE))/length(min_med_UNE), type="s", lty = "dashed",...)
legend("bottomright", legend = leg, lty = c("solid", "dashed"), cex = cexLeg)
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/cdfDissWomenPrototypes.R |
checkBranchLocalIMO <- function(tree,data,i,maxsplit,asw.tol,local.const,
orness,type,ah,verbose,...){
if (is.vector(data)){
proposal <- list(reject = TRUE, tree = -1)
} else {
if (ncol(data) <= 2){
proposal <- list(reject = TRUE,tree = -1)
} else {
if(sum(tree$clustering == i) <= 2){ #First stopping criteria.
proposal <- list(tree = tree,reject = TRUE)
} else {
which.x <- (tree$clustering == i)
xi <- data[which.x,]
if (nrow(xi) <= maxsplit){
maxsplit2 <- max(nrow(xi) - 1, 2)
DIST <- ext.dist(xi, maxsplit2, orness, ah, verbose)
out <- INCAnumclu(DIST, K = maxsplit2, method = "pam", L = NULL, noise = NULL)
maxsplit <- maxsplit2
}else{
DIST <- ext.dist(xi, maxsplit, orness, ah, verbose)
out <- INCAnumclu(DIST, K = maxsplit, method = "pam", L = NULL, noise = NULL)
}
if(max(out$INCAindex[2:maxsplit]) <= 0.2){ #Second stopping criteria.
proposal <- list(tree = tree,reject = TRUE)
}else{
xi.ps <- getBestPamsamIMO(xi, maxsplit, orness = orness, type,
ah, verbose, ...)
if (is.null(local.const)){
n.sub.clust <- xi.ps$num.of.clusters
asw.vec <- rep(NA,n.sub.clust)
for (j in 1:n.sub.clust){
if (sum(xi.ps$clustering==j) <= 2){
asw.vec[j] <- 0
} else {
xij <- xi[xi.ps$clustering==j,]
asw.vec[j] <- getBestPamsamIMO(xij, maxsplit, orness = orness, type,
ah, verbose, ...)$asw
}
}
if (xi.ps$asw > mean(asw.vec) - asw.tol){ #Third stopping criteria.
tree <- update.tree.local(object = tree, xi.ps, which.x, i)
proposal <- list(tree = tree, reject = FALSE)
} else {
proposal <- list(tree = tree, reject = TRUE)
}
} else {
if (xi.ps$asw > local.const){
tree <- update.tree.local(object = tree, xi.ps, which.x, i)
proposal <- list(tree = tree, reject = FALSE)
} else {
proposal <- list(tree = tree, reject = TRUE)
}
}
}
}
}
}
proposal
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/checkBranchLocalIMO.R |
checkBranchLocalMO <- function(tree,data,i,maxsplit,asw.tol,local.const,orness,
type, ah, verbose, ...){
if (is.vector(data)){
proposal <- list(reject = TRUE,tree = -1)
}else{
if(ncol(data) <= 2){
proposal <- list(reject = TRUE,tree = -1)
}else{
if(sum(tree$clustering == i) <= 2){ #First stopping criteria.
proposal <- list(tree=tree,reject=TRUE)
}else{
which.x <- (tree$clustering == i)
xi <- data[which.x,]
xi.ps <- getBestPamsamMO(xi,maxsplit,orness,type,ah,verbose,...)
if(is.null(local.const)){
n.sub.clust <- xi.ps$num.of.clusters
asw.vec <- rep(NA, n.sub.clust)
for(j in 1:n.sub.clust){
if(sum(xi.ps$clustering == j) <= 2){
asw.vec[j] <- 0
}else{
xij <- xi[xi.ps$clustering == j,]
asw.vec[j] <- getBestPamsamMO(xij,maxsplit,orness,type,ah,verbose,...)$asw
}
}
if(xi.ps$asw > mean(asw.vec) - asw.tol){ #Second stopping criteria.
tree <- update.tree.local(object = tree, xi.ps, which.x, i)
proposal <- list(tree = tree,reject = FALSE)
}else{
proposal <- list(tree = tree,reject = TRUE)
}
}else{
if(xi.ps$asw > local.const){
tree <- update.tree.local(object = tree, xi.ps, which.x, i)
proposal <- list(tree = tree, reject = FALSE)
}else{
proposal <- list(tree=tree,reject=TRUE)
}
}
}
}
}
proposal
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/checkBranchLocalMO.R |
computSizesHipamAnthropom <- function(dataHip, bust, bustMeasur, nsizes, maxsplit, orness, type, ah, verbose = FALSE){
res_hipam <- list() ; class(res_hipam) <- "hipamAnthropom"
for(i in 1 : nsizes){
data = dataHip[(bust >= bustMeasur[i]) &
(bust < bustMeasur[i + 1]), ]
dataMat <- as.matrix(data)
res_hipam[[i]] <- hipamAnthropom(dataMat, maxsplit = maxsplit, orness = orness,
type = type, ah = ah, verbose = FALSE)
}
return(res_hipam)
} | /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/computSizesHipamAnthropom.R |
computSizesTrimowa <- function(dataTrim, bust, bustMeasur, nsizes, w, numClust, alpha, niter, algSteps, ah, verbose = FALSE){
res_trimowa <- list() ; class(res_trimowa) <- "trimowa"
for (i in 1 : nsizes){
data = dataTrim[(bust >= bustMeasur[i]) &
(bust < bustMeasur[i + 1]), ]
res_trimowa[[i]] <- trimowa(data, w, numClust, alpha,
niter, algSteps, ah, verbose = FALSE)
}
return(res_trimowa)
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/computSizesTrimowa.R |
figures8landm <- function(figure,data){
if(figure == "cube"){
cube <- data
cube_arr <- array(as.matrix(cube), dim = c(dim(cube)[1], 3, 1))
x <- cube_arr[,,1] ; type = "p" ; color = 2
xt <- array(0, c(dim(x), 1))
xt[, , 1] <- x
x <- xt
open3d()
bg3d(color = "white")
k <- dim(x)[1]
sz <- centroid.size(x[, , 1])/sqrt(k)/30
joinline = c(4,6,5,2,4,3,8,6,8,7,5,7,1,3,1,2)
plotshapes3d(x, type = type, color = color, size = sz, joinline = joinline)
axes3d(color = "black")
title3d(xlab = "x", ylab = "y", zlab = "z", color = "black")
text3d(x = x[,,1][,1][1] + 1, y = x[,,1][,2][1] + 0.5, z = x[,,1][,3][1] + 0.5, "1", adj = c(1,1), cex = 1, col = "black")
text3d(x = x[,,1][,1][2] + 0, y = x[,,1][,2][2] + 0.5, z = x[,,1][,3][2] + 0.5, "2", adj = c(1,1), cex = 1, col = "black")
text3d(x = x[,,1][,1][3] + 1.2, y = x[,,1][,2][3] + 0.5, z = x[,,1][,3][3] + 0.5, "3", adj = c(1,1), cex = 1, col = "black")
text3d(x = x[,,1][,1][4] + 1, y = x[,,1][,2][4] + 0.5, z = x[,,1][,3][4] + 0.5, "4", adj = c(1,1), cex = 1, col = "black")
text3d(x = x[,,1][,1][5] + 0.8, y = x[,,1][,2][5] + 0.5, z = x[,,1][,3][5] + 0.5, "5", adj = c(1,1), cex = 1, col = "black")
text3d(x = x[,,1][,1][6] + 1, y = x[,,1][,2][6] + 0.5, z = x[,,1][,3][6] + 0.5, "6", adj = c(1,1), cex = 1, col = "black")
text3d(x = x[,,1][,1][7] + 1, y = x[,,1][,2][7] + 0.5, z = x[,,1][,3][7] + 0.5, "7", adj = c(1,1), cex = 1, col = "black")
text3d(x = x[,,1][,1][8] + 0, y = x[,,1][,2][8] + 0.5, z = x[,,1][,3][8] + 0.5, "8", adj = c(1,1), cex = 1, col = "black")
}else if(figure == "paral"){
paral <- data
paral_arr <- array(as.matrix(paral), dim = c(dim(paral)[1], 3, 1))
x <- paral_arr[,,1] ; type = "p" ; color = 2
xt <- array(0, c(dim(x), 1))
xt[, , 1] <- x
x <- xt
open3d()
bg3d(color = "white")
k <- dim(x)[1]
sz <- centroid.size(x[, , 1])/sqrt(k)/30
joinline = c(8,6,5,7,8,3,4,2,5,2,1,7,1,3,4,6)
plotshapes3d(x, type = type, color = color, size = sz, joinline = joinline)
axes3d(color = "black")
title3d(xlab = "x", ylab = "y", zlab = "z", color = "black")
text3d(x = x[,,1][,1][1] + 2, y = x[,,1][,2][1] + 1, z = x[,,1][,3][1] + 1, "1", adj = c(1,1), cex = 1, col = "black")
text3d(x = x[,,1][,1][2] + 1, y = x[,,1][,2][2] + 1, z = x[,,1][,3][2] + 1, "2", adj = c(1,1), cex = 1, col = "black")
text3d(x = x[,,1][,1][3] + 2, y = x[,,1][,2][3] + 1, z = x[,,1][,3][3] + 1, "3", adj = c(1,1), cex = 1, col = "black")
text3d(x = x[,,1][,1][4] + 2, y = x[,,1][,2][4] + 1, z = x[,,1][,3][4] + 1, "4", adj = c(1,1), cex = 1, col = "black")
text3d(x = x[,,1][,1][5] + 1, y = x[,,1][,2][5] + 1, z = x[,,1][,3][5] + 1, "5", adj = c(1,1), cex = 1, col = "black")
text3d(x = x[,,1][,1][6] + 2, y = x[,,1][,2][6] + 1, z = x[,,1][,3][6] + 1, "6", adj = c(1,1), cex = 1, col = "black")
text3d(x = x[,,1][,1][7] + 2, y = x[,,1][,2][7] + 1, z = x[,,1][,3][7] + 1, "7", adj = c(1,1), cex = 1, col = "black")
text3d(x = x[,,1][,1][8] + 2, y = x[,,1][,2][8] + 1, z = x[,,1][,3][8] + 1, "8", adj = c(1,1), cex = 1, col = "black")
}else{
stop("Sorry,that figure does not belong to the package")
}
} | /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/figures8landm.R |
getBestPamsamIMO <- function(data,maxsplit,orness=0.7,type,ah,verbose,...){
if (nrow(data) <= maxsplit){
maxsplit <- max(nrow(data) - 1, 2)
}
if(maxsplit > 2){
DIST <- ext.dist(data, maxsplit, orness, ah, verbose)
out <- INCAnumclu(DIST, K = maxsplit, method = "pam", L = NULL, noise = NULL)
if(max(out$INCAindex[2:maxsplit]) <= 0.2){
k <- 3
}else{
#According to Irigoien at al. (2008), "We propose to choose k as the value of k prior to the first biggest
#slope decrease".
#out$INCAindex[2:maxsplit] are the out$INCAindex values without the NA value located at the first position.
#The diff function calculates the subtraction between consecutive elements. Thus, we can know the decrease
#between INCA indexes for each k.
restas <- diff(out$INCAindex[2:maxsplit], 1)
if(sum(restas >= 0) == length(restas)){#if all subtraction are positive, there are no decreases).
#In this radical case, we fix k = 3.
k <- 3
}else{
#Greatest subtraction:
min_resta <- min(restas)
if(length(which(restas == min_resta)) > 1){ #it may happen that several subtractions take the same value.
#In this case, R will provide several k and will display many warnings on the screen. Therefore, we
#choose among those subtractions, the first one (because the others take the same value and therefore
#they are not smaller):
k <- which(restas == min_resta)[1] + 1 #In order to identify correctly the value of k prior to the first
#biggest slope decrease, we have to add 1.
}else{
k <- which(restas == min_resta) + 1
}
}
}
x.ps <- pamsam(data, k = k, type = type, DIST = DIST, ah = ah, verbose = verbose, ...)
}else{
DIST <- ext.dist(data, maxsplit, orness, ah, verbose)
x.ps <- pamsam(data, k = 2, type = type, DIST = DIST, ah = ah, verbose = verbose,...)
}
#Object checking:
check <- c()
for(i in 1:length(x.ps)){
check[i] <- exists(names(x.ps)[i], where = x.ps)
}
if("FALSE" %in% check){
stop("Any of the objects doesn't exist!. Revise the function.")
}else{
return(x.ps)
}
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/getBestPamsamIMO.R |
getBestPamsamMO <- function(data,maxsplit,orness = 0.7,type,ah,verbose, ...){
if(nrow(data) <= maxsplit){
maxsplit <- max(nrow(data) - 1, 2)
}
if(maxsplit > 2){
#Create the pamsams with 2 to maxsplit splits:
max.asw <- -1
for (k in 2 : maxsplit){
xtemp.ps <- pamsam(data, k = k, type = type, DIST = NULL, maxsplit = maxsplit,
orness = orness, ah, verbose, ...)
if (xtemp.ps$asw > max.asw){
max.asw <- xtemp.ps$asw
x.ps <- xtemp.ps
}
}
}else{
x.ps <- pamsam(data, k = 2, type = type, DIST = NULL, maxsplit = maxsplit,
orness = orness, ah, verbose, ...)
}
#Object checking:
check <- c()
for(i in 1:length(x.ps)){
check[i] <- exists(names(x.ps)[i], where = x.ps)
}
if("FALSE" %in% check){
stop("Any of the objects doesn't exist!. Revise the function.")
}else{
return(x.ps)
}
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/getBestPamsamMO.R |
getDistMatrix <- function(data,np,nv,w,bl,bh,al,ah,verbose){
if( (!is.numeric(np)) || (np!=as.integer(np)) )
{
stop("getDistMatrix Error: np is not an integer.\n");
#break;
}
if( (!is.numeric(nv)) || (nv!=as.integer(nv)) )
{
stop("getDistMatrix Error: nv is not an integer.\n");
#break;
}
if(length(data)!=(np*nv))
{
stop("getDistMatrix Error: data is not a vector of",np*nv,"components.\n");
#break;
}
if((length(bh)!=nv) || (length(bl)!=nv) || (length(ah)!=nv) || (length(al)!=nv))
{
stop("getDistMatrix Error: any of the vectors bh,bl,ah or al has not the correct length (it must be ",nv,")\n");
#break;
}
if(sum(bl<0)!=nv)
{
stop("getDistMatrix Error: any of the elements of bl is not strictly negative.\n");
#break;
}
if(sum(bh>0)!=nv)
{
stop("getDistMatrix Error: any of the elements of bh is not strictly positive.\n");
#break;
}
if(sum(al>0)!=nv)
{
stop("getDistMatrix Error: any of the elements of al is not strictly positive (remember: the sign changes inside).\n");
#break;
}
if(sum(ah>0)!=nv)
{
stop("getDistMatrix Error: any of the elements of ah is not strictly positive.\n");
#break;
}
if(verbose)
{
t1=Sys.time();
}
errorfill=0;
errorfill=.Call("FillAllDistOwa",data,w,nv,np,al,ah,bl,bh,verbose);
if(errorfill==1)
{
stop("Error: the dissimilarity symmetric matrix was reserved but not released.\n");
#break;
}
if(errorfill==2)
{
stop("Error: it was not possible to reserve memory not even for the pointers to the rows of the dissimilarity matrix.\n");
#break;
}
if(errorfill==3)
{
stop("Error: it was not possible to reserve memory for all or part of the dissimilarity matrix.\n");
#break;
}
if(verbose)
{
t2=Sys.time();
cat("Time spent:",difftime(t2,t1,units="secs"),"seconds.\n");
}
if(verbose)
{
cat("Reserving space for d...\n");
}
d=rep(0,np*np);
dim(d)<-c(np,np);
if(verbose)
{
cat("Done\n");
cat("Returning the distance matrix...\n");
cat("Row:\n0 ");
t1=Sys.time();
}
for( row in (1:np) )
{
retval=.Call("GetRowAndFree",row-1);
if(is.null(retval))
{
stop("Error calling GetRowAndFree: trying to access a non-existent matrix row.\n");
#break;
}
d[row,row:np]=retval;
if((verbose) && (row==100*as.integer(row/100)))
{
cat(row," ");
}
}
if(verbose)
{
t2=Sys.time();
cat("\nTime spent::",difftime(t2,t1,units="secs"),"seconds.\n");
}
.Call("DeleteDistOwa");
if(verbose)
{
cat("Making the matrix symmetrical ...\n");
}
for( row in (2:np) )
{
d[row,1:(row-1)]=d[1:(row-1),row];
}
if(verbose)
{
cat("Done.\n");
}
d
} | /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/getDistMatrix.R |
hipamAnthropom <- function(data,asw.tol=0,maxsplit=5,local.const=NULL,orness=0.7,type,
ah=c(23,28,20,25,25),verbose,...){
#Initialize the tree:
tree <- initialize.tree(data, maxsplit, orness, type, ah, verbose, ...)
#Local hipam:
tree <- hipam.local(tree, data, asw.tol, maxsplit, local.const, orness, type, ah, verbose, ...)
tree$cases <- tree$medoids
tree$medoids <- NULL
tree
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/hipamAnthropom.R |
nearestToArchetypes <- function(indivs,numArch,mdras){
as.numeric(which.min(mdras[indivs,]) - (numArch))
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/nearestToArchetypes.R |
optraShapes <- function(array3D,n,c,numClust,ic1,ic2,nc,an1,an2,ncp,d,itran,live,indx){
#If cluster L is updated in the last quick-transfer stage, it belongs to the live set throughout this stage.
#Otherwise, at each step, it is not in the live set if it has not been updated in the last M optimal transfer
#steps:
for(l in 1 : numClust){
if ( itran[l] == 1){
live[l] = n + 1
}
}
for(i in 1 : n){
indx = indx + 1
l1 = ic1[i]
l2 = ic2[i]
ll = l2
#If point I is the only member of cluster L1, no transfer:
if(1 < nc[l1]){
#If L1 has not yet been updated in this stage, no need to re-compute D(I):
if( ncp[1] != 0 ){
de = (riemdist(array3D[,,i], c[,,l1]))^2
d[i] = de * an1[l1]
}
#Find the cluster with minimum R2:
da = (riemdist(array3D[,,i], c[,,l2]))^2
r2 = da * an2[l2]
for(l in 1 : numClust){
#If LIVE(L1) <= I, then L1 is not in the live set. If this is true, we only need to consider clusters that
#are in the live set for possible transfer of point I (Step 4b). Otherwise, we need to consider all possible
#clusters (Step 4a):
if( ( i < live[l1] || i < live[l2] ) && l != l1 && l != ll ){
rr = r2 / an2[l]
dc = (riemdist(array3D[,,i], c[,,l]))^2
if(dc < rr)
r2 = dc * an2[l]
l2 = l
}
}
#If no transfer is necessary, L2 is the new IC2(I):
if(d[i] <= r2){
ic2[i] = l2
#Update cluster centers, LIVE, NCP, AN1 and AN2 for clusters L1 and L2, and update IC1(I) and IC2(I):
}else{
indx = 0
live[l1] = n + i
live[l2] = n + i
ncp[l1] = i
ncp[l2] = i
al1 = nc[l1]
alw = al1 - 1
al2 = nc[l2]
alt = al2 + 1
nc[l1] = nc[l1] - 1
nc[l2] = nc[l2] + 1
an2[l1] = alw / al1
if(1 < alw){
an1[l1] = alw / ( alw - 1 )
}else{
an1[l1] = Inf
}
an1[l2] = alt / al2
an2[l2] = alt / ( alt + 1 )
ic1[i] = l2
ic2[i] = l1
x1 <- array3D[, , ic1 == l1]
if (length(dim(x1)) != 3) {
return(cat("Please ensure that array3D has 3 dimensions."))
}else{
c[,,l1] = shapes::procGPA(x1, distances = TRUE, pcaoutput = TRUE)$mshape
}
x2 <- array3D[, , ic1 == l2]
if (length(dim(x2)) != 3) {
return(cat("Please ensure that array3D has 3 dimensions."))
}else{
c[,,l2] = shapes::procGPA(x2, distances = TRUE, pcaoutput = TRUE)$mshape
}
#c[,,l1] = procGPA(array3D[, , ic1 == l1], distances = TRUE, pcaoutput = TRUE)$mshape
#c[,,l2] = procGPA(array3D[, , ic1 == l2], distances = TRUE, pcaoutput = TRUE)$mshape
}
}
if( indx == n ){ #indx is the number of steps since a transfer took place.
return(list(c, ic1, ic2, nc, an1, an2, ncp, d, itran, live, indx))
}
}
#ITRAN(L) = 0 before entering QTRAN. Also, LIVE(L) has to be decreased by M before re-entering OPTRA:
for (l in 1 : numClust){
itran[l] = 0
live[l] = live[l] - n
}
return(list(c, ic1, ic2, nc, an1, an2, ncp, d, itran, live, indx))
} | /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/optraShapes.R |
overlapBiclustersByRows <- function(Bic,resBicluster) {
x <- rep(0, nrow(resBicluster@RowxNumber))
x <- x + Bic * resBicluster@RowxNumber[, Bic]
x
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/overlapBiclustersByRows.R |
percentilsArchetypoid <- function(column,indiv,data,digits){
aux1 <- data[,colnames(data)[column]]
aux2 <- as.matrix(aux1)
Fn <- ecdf(aux2)
aux3 <- Fn(data[indiv,colnames(data)[column]])
round(aux3 * 100, digits = digits)
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/percentilsArchetypoid.R |
plotPrototypes <- function(data,prototypes,nsizes,bustVariable,variable,col,xlim,ylim,main,EN){
#Bust intervals defined by the European standard to sizing system:
Bust_4 <- seq(76, 104, 4) ; Bust_6 <- seq(110, 128, 6) ; BustVec <- c(Bust_4, Bust_6)
if(variable == "chest"){
plot(data[, bustVariable], data[, variable], pch = "*", col = "thistle1", xlab = bustVariable, ylab = variable,
main = main, xlim = xlim, ylim = ylim, xaxt = "n", yaxt = "n")
axis(1, at = seq(xlim[1], xlim[2], 10), labels = seq(xlim[1], xlim[2], 10))
axis(2, at = seq(ylim[1], ylim[2], 10), labels = seq(ylim[1], ylim[2], 10))
for(i in 1 : nsizes){
#To locate correctly the rows of the prototypes in the whole database, the prototypes labels must be
#converted into a character.
points(data[as.character(prototypes[[i]]), bustVariable], data[as.character(prototypes[[i]]), variable], pch = i,
col = col[i])
}
if(EN){
#The European standard to sizing system does not fix the chest standard measurements. In order to
#overcome this limitation, we round the values obtained by means of a linear regression
#(see Ibanez et al. (2012)), to the nearest integer:
Chest_4 <- seq(79, 107, 4) ; Chest_6 <- seq(112, 130, 6) ; Chest <- c(Chest_4, Chest_6)
for(i in 1:length(Bust_4)){
symbols(Bust_4[i], Chest_4[i], rectangles = matrix(c(4,4), 1, 2), add = TRUE, inches = FALSE)
points(Bust_4[i], Chest_4[i], pch = ".", cex = 2.3)
}
for(i in 1:length(Bust_6)){
symbols(Bust_6[i] - 1, Chest_6[i] - 1, rectangles = matrix(c(6,6),1,2), add = TRUE, inches = FALSE)
points(Bust_6[i] - 1, Chest_6[i] - 0.5, pch = ".", cex = 2.3)
}
}
}
if(variable == "hip"){
plot(data[, bustVariable], data[, variable], pch = "*", col = "thistle1", xlab = bustVariable, ylab = variable,
main = main, xlim = xlim, ylim = ylim, xaxt = "n", yaxt = "n")
axis(1, at = seq(xlim[1], xlim[2], 10), labels = seq(xlim[1], xlim[2], 10))
axis(2, at = seq(ylim[1], ylim[2], 10), labels = seq(ylim[1], ylim[2], 10))
for(i in 1 : nsizes){
points(data[as.character(prototypes[[i]]), bustVariable], data[as.character(prototypes[[i]]), variable], pch = i,
col = col[i])
}
if(EN){
#Hip intervals defined by the European standard to sizing system.
Hip_4 <- seq(84,112,4) ; Hip_5 <- seq(117,132,5) ; Hip = c(Hip_4,Hip_5)
for(i in 1:length(Bust_4)){
symbols(Bust_4[i], Hip_4[i], rectangles = matrix(c(4,4), 1, 2), add = TRUE, inches = FALSE)
points(Bust_4[i], Hip_4[i], pch = ".", cex = 2.3)
}
for(i in 1:length(Bust_6)){
symbols(Bust_6[i] - 1, Hip_5[i] - 0.5, rectangles = matrix(c(6,5),1,2), add = TRUE, inches = FALSE)
points(Bust_6[i] - 1, Hip_5[i] - 0.5, pch = ".", cex = 2.3)
}
}
}
if(variable == "necktoground"){
plot(data[, bustVariable], data[, variable], pch = "*", col = "thistle1", xlab = bustVariable, ylab = variable,
main = main, xlim = xlim, ylim = ylim, xaxt = "n", yaxt = "n")
axis(1, at = seq(xlim[1], xlim[2], 10), labels = seq(xlim[1], xlim[2], 10))
axis(2, at = seq(ylim[1], ylim[2], 10), labels = seq(ylim[1], ylim[2], 10))
for(i in 1 : nsizes){
points(data[as.character(prototypes[[i]]), bustVariable], data[as.character(prototypes[[i]]), variable], pch = i,
col = col[i])
}
if(EN){
#We take as neck to ground measures for the standard sizing system, the values 132, 136 and 140 cm
#because those are the most repeated measurements:
vec <- seq(130, 142, 4)
Bust_ng_4 <- seq(74, 102, 4) ; Bust_ng_6 <- seq(107, 131, 6) ; Bust_ng <- c(Bust_ng_4, Bust_ng_6)
for(i in 1:length(Bust_ng)){
segments(Bust_ng[i], vec[1], Bust_ng[i], vec[length(vec)])
}
for(i in 1:length(vec)){
segments(Bust_ng[1], vec[i], Bust_ng[length(Bust_ng)], vec[i])
}
}
}
if(variable == "waist"){
plot(data[, bustVariable], data[, variable], pch = "*", col = "thistle1", xlab = bustVariable, ylab = variable,
main = main, xlim = xlim, ylim = ylim, xaxt = "n", yaxt = "n")
axis(1, at = seq(xlim[1], xlim[2], 10), labels = seq(xlim[1], xlim[2], 10))
axis(2, at = seq(ylim[1], ylim[2], 10), labels = seq(ylim[1], ylim[2], 10))
for(i in 1 : nsizes){
points(data[as.character(prototypes[[i]]), bustVariable], data[as.character(prototypes[[i]]), variable], pch = i,
col = col[i])
}
if(EN){
#Waist intervals defined by the European standard to sizing system:
Waist_4 <- seq(60,88,4) ; Waist_6 <- seq(94,112,6) ; Waist = c(Waist_4,Waist_6)
for(i in 1:length(Bust_4)){
symbols(Bust_4[i], Waist_4[i], rectangles = matrix(c(4,4), 1, 2), add = TRUE, inches = FALSE)
points(Bust_4[i], Waist_4[i], pch = ".", cex = 2.3)
}
for(i in 1:length(Bust_6)){
symbols(Bust_6[i] - 1, Waist_6[i] - 1, rectangles = matrix(c(6,6),1,2), add = TRUE, inches = FALSE)
points(Bust_6[i] - 1, Waist_6[i] - 0.5, pch = ".", cex = 2.3)
}
}
}
if((variable != "chest") & (variable != "hip") & (variable != "necktoground") & (variable != "waist")){
stop("This variable doesn't belong to the database")
}
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/plotPrototypes.R |
plotTreeHipamAnthropom <- function(x,main,...){
tree.obj <- x
n.levels <-tree.obj$n.levels
dmat <- tree.obj$development
max.x.clust <- max(apply(dmat, 2, function(x) sum(!is.na(unique(x)))))
box.tuning <- 0.35
if(ncol(dmat) > 1){
box.size <- max(box.tuning / n.levels, box.tuning / max.x.clust)
}else{
box.size <- 0.2
}
plot(0, 0, xlim = c(0,1), ylim = c(0,1), xlab = "", ylab = "", axes = FALSE, type = "n",...)
centre.list <- NULL
#Draw boxes and cluster numbers; calculate box centres (needed for drawing the arrows):
for (i in 1:n.levels){
which.clust <- unique(dmat[,n.levels + 1 - i])
which.clust <- which.clust[!is.na(which.clust)]
n.clust <- length(which.clust)
for (j in 1:n.clust){
centre <- c((j - .5) / (n.clust),(i-.5)/(n.levels + 1))
centre.list <- rbind(centre.list, c(which.clust[j], centre))
make.circle.discovery(centre, box.size)
text(centre[1], centre[2], labels = which.clust[j], cex = 2.5 / max(n.clust,n.levels)^.5, pos = 1,
offset = -0.45)
}
}
centre.list <- centre.list[order(centre.list[,1]),]
#Draw root box:
root.centre <- c(0.5,(n.levels + .5) / (n.levels + 1))
make.circle.discovery(root.centre, box.size)
text(root.centre[1], root.centre[2], labels = c("R"), cex = 3 / max(n.clust, n.levels)^.5, pos = 1,
offset = -0.45)
#Draw the arrows:
if(ncol(dmat) > 1){
for (i in 1:nrow(dmat)){
for (j in 2:ncol(dmat)){
if (!is.na(dmat[i,j])){
make.arrow.circle(centre.list[dmat[i,j-1],2:3],centre.list[dmat[i,j],2:3],box.size)
}
}
}
}
#Draw root arrows:
root.shoots <- unique(dmat[,1])
for (i in root.shoots){
make.arrow.circle(root.centre,centre.list[i,2:3],box.size)
}
title(main=main)
} | /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/plotTreeHipamAnthropom.R |
plotTrimmOutl <- function(data,trimmOutl,nsizes,bustVariable,variable,col,xlim,ylim,main){
if(variable == "chest"){
plot(data[, bustVariable], data[, variable], pch = "*", col = "thistle1", xlab = bustVariable, ylab = variable,
main = main, xlim = xlim, ylim = ylim, xaxt = "n", yaxt = "n")
axis(1, at = seq(xlim[1], xlim[2], 10), labels = seq(xlim[1], xlim[2], 10))
axis(2, at = seq(ylim[1], ylim[2], 10), labels = seq(ylim[1], ylim[2], 10))
for(i in 1 : nsizes){
points(data[as.character(trimmOutl[[i]]), bustVariable], data[as.character(trimmOutl[[i]]), variable], pch = i,
col = col[i])
}
}
if(variable == "hip"){
plot(data[, bustVariable], data[, variable], pch = "*", col = "thistle1", xlab = bustVariable, ylab = variable,
main = main, xlim = xlim, ylim = ylim, xaxt = "n", yaxt = "n")
axis(1, at = seq(xlim[1], xlim[2], 10), labels = seq(xlim[1], xlim[2], 10))
axis(2, at = seq(ylim[1], ylim[2], 10), labels = seq(ylim[1], ylim[2], 10))
for(i in 1 : nsizes){
points(data[as.character(trimmOutl[[i]]), bustVariable], data[as.character(trimmOutl[[i]]), variable], pch = i,
col = col[i])
}
}
if(variable == "necktoground"){
plot(data[, bustVariable], data[, variable], pch = "*", col = "thistle1", xlab = bustVariable, ylab = variable,
main = main, xlim = xlim, ylim = ylim, xaxt = "n", yaxt = "n")
axis(1, at = seq(xlim[1], xlim[2], 10), labels = seq(xlim[1], xlim[2], 10))
axis(2, at = seq(ylim[1], ylim[2], 10), labels = seq(ylim[1], ylim[2], 10))
for(i in 1 : nsizes){
points(data[as.character(trimmOutl[[i]]), bustVariable], data[as.character(trimmOutl[[i]]), variable], pch = i,
col = col[i])
}
}
if(variable == "waist"){
plot(data[, bustVariable], data[, variable], pch = "*", col = "thistle1", xlab = bustVariable, ylab = variable,
main = main, xlim = xlim, ylim = ylim, xaxt = "n", yaxt = "n")
axis(1, at = seq(xlim[1], xlim[2], 10), labels = seq(xlim[1], xlim[2], 10))
axis(2, at = seq(ylim[1], ylim[2], 10), labels = seq(ylim[1], ylim[2], 10))
for(i in 1 : nsizes){
points(data[as.character(trimmOutl[[i]]), bustVariable], data[as.character(trimmOutl[[i]]), variable], pch = i,
col = col[i])
}
}
if((variable != "chest") & (variable != "hip") & (variable != "necktoground") & (variable != "waist")){
stop("This variable doesn't belong to the database")
}
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/plotTrimmOutl.R |
preprocessing <- function(data,stand,percAccomm,mahal=TRUE){
if(stand == TRUE){
data1 <- scale(data,center=sapply(data,mean),scale=sapply(data,sd))
}else{
data1 <- data
}
if(percAccomm != 1){
if(mahal == TRUE){
Sx <- cov(data1)
D2 <- mahalanobis(data1,colMeans(data1), Sx)
indivYes <- which(D2 <= qchisq(percAccomm, df=dim(data1)[2]))
indivNo <- which(D2 > qchisq(percAccomm, df=dim(data1)[2]))
perc <- (length(indivYes) / dim(data1)[1]) * 100
data1 <- data1[indivYes,]
}else{
if(ncol(data) <= 3){
appr <- FALSE
}else{
appr <- TRUE
}
dt = c()
for(i in 1:nrow(data1)){
dt[i] <- depth.halfspace(data1[i,], data1, exact = appr)
}
num <- sum(dt == min(dt))
indivYes <- which(dt != min(dt))
indivNo <- which(dt == min(dt))
perc <- (length(indivYes) / dim(data1)[1]) * 100
data1 <- data1[indivYes,]
}
}else{
data1 <- data1
}
if(percAccomm != 1){
print(paste("The percentage of accommodation is exactly ", round(perc,2), "%",sep=""))
}
if(percAccomm != 1){
return(list(data=data1,indivYes=indivYes,indivNo=indivNo))
}
else{
return(list(data=data1))
}
} | /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/preprocessing.R |
projShapes <- function(clust, array3D, asig, prototypes){
out_proc <- c()
x <- array3D[, , asig == clust]
if (length(dim(x)) != 3) {
return(cat("Please ensure that array3D has 3 dimensions."))
}else{
out_proc <- shapes::procGPA(x, distances = TRUE, pcaoutput = TRUE)
shapes::plotshapes(out_proc$rotated)
points(prototypes[, , clust], col = 2)
}
} | /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/projShapes.R |
qtranShapes <- function(array3D,n,c,ic1,ic2,nc,an1,an2,ncp,d,itran,indx){
ncp <- ncp
#In the optimal transfer stage, NCP(L) indicates the step at which cluster L is last updated.
#In the quick transfer stage, NCP(L) is equal to the step at which cluster L is last updated plus M:
icoun = 0
istep = 0
for (i in 1 : n){
icoun = icoun + 1
istep = istep + 1
l1 = ic1[i]
l2 = ic2[i]
#If point I is the only member of cluster L1, no transfer:
if (1 < nc[l1]){
#If NCP(L1) < ISTEP, no need to re-compute distance from point I to cluster L1.
#Note that if cluster L1 is last updated exactly M steps ago, we still need to
#compute the distance from point I to cluster L1:
if(istep <= ncp[l1]){
da = (riemdist(array3D[,,i], c[,,l1]))^2
d[i] = da * an1[l1]
}
#If NCP(L1) <= ISTEP and NCP(L2) <= ISTEP, there will be no transfer of point I at this step:
if ( istep < ncp[l1] | istep < ncp[l2] ){
r2 = d[i] / an2[l2]
dd = (riemdist(array3D[,,i], c[,,l2]))^2
#Update cluster centers, NCP, NC, ITRAN, AN1 and AN2 for clusters L1 and L2. Also update IC1(I) and
#IC2(I). Note that if any updating occurs in this stage, INDX is set back to 0:
if( dd < r2 ){
icoun = 0
indx = 0
itran[l1] = 1
itran[l2] = 1
ncp[l1] = istep + n
ncp[l2] = istep + n
al1 = nc[l1]
alw = al1 - 1
al2 = nc[l2]
alt = al2 + 1
nc[l1] = nc[l1] - 1
nc[l2] = nc[l2] + 1
an2[l1] = alw / al1
if(1 < alw){
an1[l1] = alw / ( alw - 1 )
}else{
an1[l1] = Inf
}
an1[l2] = alt / al2
an2[l2] = alt / ( alt + 1 )
ic1[i] = l2
ic2[i] = l1
x1 <- array3D[, , ic1 == l1]
if (length(dim(x1)) != 3) {
return(cat("Please ensure that array3D has 3 dimensions."))
}else{
c[,,l1] = shapes::procGPA(x1, distances = TRUE, pcaoutput = TRUE)$mshape
}
x2 <- array3D[, , ic1 == l2]
if (length(dim(x2)) != 3) {
return(cat("Please ensure that array3D has 3 dimensions."))
}else{
c[,,l2] = shapes::procGPA(x2, distances = TRUE, pcaoutput = TRUE)$mshape
}
}else{
}
}
}
#If no re-allocation took place in the last n steps, return:
if( icoun == n ){
return(list(c, ic1,ic2, nc, an1, an2, ncp, d, itran, indx, icoun))
}
}
return(list(c, ic1,ic2, nc, an1, an2, ncp, d, itran, indx, icoun))
} | /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/qtranShapes.R |
screeArchetypal <- function(numArch,rss_lass_def,rss_step_ns,rss_step_alpha,rss_step_beta,ylim,main,
xlab,ylab,col=c("red","blue","green3"),axis2,seq,leg){
archsPlot <- seq(length = numArch)
plot(archsPlot, rss_lass_def, xaxt="n", yaxt="n", ylim=ylim, main=main, xlab=xlab, ylab=ylab, type="b")
points(archsPlot, rss_step_ns, type="b", col=col[1])
points(archsPlot, rss_step_alpha, type="b", col=col[2], lty=2)
points(archsPlot, rss_step_beta, type="b", col=col[3], lty=2)
axis(1, at=archsPlot, labels=archsPlot)
if(axis2){
axis(2,at=seq,labels=seq)
}
if(leg){
legend("topright",c("Archetypes",
expression(paste("Archetypoids from ", cand[ns])),
expression(paste("Archetypoids from ", cand[alpha])),
expression(paste("Archetypoids from ", cand[beta]))),
lty = c(1,1,2,6),
col=c("black",col[1],col[2],col[3]),
text.col=c("black",col[1],col[2],col[3]))
}
} | /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/screeArchetypal.R |
shapes3dShapes <- function (x,loop=0,type="p",color=2,joinline=c(1:1),axes3=FALSE,rglopen=TRUE,main=main){
if (is.matrix(x)) {
xt <- array(0, c(dim(x), 1))
xt[, , 1] <- x
x <- xt
}
if (is.array(x) == FALSE) {
stop("Data not in right format : require an array \n")
}
if (is.array(x) == TRUE) {
if (rglopen) {
open3d()
bg3d(color = "white")
}
if (dim(x)[2] == 2) {
x <- as.3d(x)
}
if (loop == 0) {
k <- dim(x)[1]
sz <- centroid.size(x[, , 1])/sqrt(k)/30
plotshapes3d(x, type = type, color = color, size = sz,
joinline = joinline)
if (axes3) {
axes3d(color = "black", cex = 0.1, nticks = 4)
title3d(main = main, xlab = "x", ylab = "y", zlab = "z", color = "black")
}
}
if (loop > 0) {
for (i in 1:loop) {
plotshapestime3d(x, type = type)
}
}
}
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/shapes3dShapes.R |
skeletonsArchetypal <- function(measuArch,main){
#This R code allows us to reproduce the skeleton plots visualizing the seven archetypes of Figure 5 of the
#paper Epifanio et al. (2013): "Archetypal analysis: Contributions for estimating boundary cases in
#multivariate accommodation problem".
#List with the measurements of each archetype:
a <- measuArch
#Popliteal height sitting:
x1 <- c(40,40)
y1 <- c(0,a[3]);
par(pty = "s")
plot(x1,y1,type="l",lwd=5, xlim = c(0,60), ylim = c(0,60), xaxt = "n", yaxt = "n", xlab = "", ylab = "",
main = main)
axis(1, at = seq(0,60,10), labels = seq(0,60,10))
axis(2, at = seq(0,60,10), labels = seq(0,60,10))
#Buttock knee length:
x2 <- c(40, 40-a[2])
y2 <- c(a[3], a[3])
lines(x2,y2,lwd=5)
#Shoulder height sitting:
x3 <- c(40-a[2], 40-a[2])
y3 <- c(a[3], a[3]+ a[6])
lines(x3,y3,lwd=5)
#Eye height sitting:
x4 <- c(40-a[2], 40-a[2])
y4 <- c(a[3], a[3]+ a[5])
lines(x4,y4,lwd=5)
#Sitting height:
x5 <- c(40-a[2], 40-a[2])
y5 <- c(a[3], a[3]+ a[4])
lines(x5,y5,lwd=5)
#Thumb tip reach:
x6=c(40-a[2], 40-a[2]+a[1])
y6=c(a[3]+a[6], a[3]+a[6])
lines(x6,y6,lwd=5)
#For the eyes postion:
x7 <- c(40-a[2]-2, 40-a[2]+2)
y7 <- c(a[3]+a[5], a[3]+a[5])
lines(x7,y7,lwd=5)
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/skeletonsArchetypal.R |
stepArchetypesRawData <- function(data,numArch,numRep=3,verbose=TRUE){
mycall <- match.call()
as <- list()
for (i in 1:length(numArch)) {
as[[i]] <- list()
class(as[[i]]) <- "repArchetypes"
for (j in seq_len(numRep)) {
if (verbose)
cat("\n*** numArch=", numArch[i], ", rep=", j, ":\n", sep = "")
as[[i]][[j]] <- archetypes(data, k = numArch[i],
family = archetypesFamily("original",scalefn = no.scalefn,
rescalefn = no.rescalefn))
}
}
return(structure(as, class='stepArchetypes',call=mycall))
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/stepArchetypesRawData.R |
stepArchetypoids <- function(numArchoid,nearest="cand_ns",data,ArchObj){
N = dim(data)[1]
ai <- archetypes::bestModel(ArchObj[[numArchoid]])
if(is.null(archetypes::parameters(ai))){
stop("No archetypes computed")
}else{
ras <- rbind(archetypes::parameters(ai),data)
dras <- dist(ras, method = "euclidean", diag = FALSE, upper = TRUE, p = 2)
mdras <- as.matrix(dras)
diag(mdras) = 1e+11
}
if(nearest == "cand_ns"){
ini_arch <- sapply(seq(length=numArchoid),nearestToArchetypes,numArchoid,mdras)
if( all(ini_arch > numArchoid) == FALSE){
k=1
neig <- knn(data, archetypes::parameters(ai), 1:N, k=k)
indices1 <- attr(neig, "nn.index")
ini_arch <- indices1[,k]
while(any(duplicated(ini_arch))){
k=k+1
neig <- knn(data, archetypes::parameters(ai), 1:N, k=k)
indicesk <- attr(neig, "nn.index")
dupl <- anyDuplicated(indices1[,1])
ini_arch <- c(indices1[-dupl,1],indicesk[dupl,k])
}
}
}else if(nearest == "cand_alpha"){
ini_arch <- apply(coef(ai, "alphas"), 2, which.max)
}else if(nearest == "cand_beta"){
ini_arch <- c()
for (j in 1:numArchoid){
ini_arch[j] <- which.max(ai$betas[j,])
}
}else{
stop("The nearest vector must be cand_ns, cand_alpha or cand_beta")
}
res <- archetypoids(numArchoid,data,huge=200,step=TRUE,init=ini_arch)
cat("Done!")
return(list(cases = res[[1]], rss = res[[2]], archet_ini = ini_arch, alphas = res[[4]]))
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/stepArchetypoids.R |
trimmOutl <- function(resMethod, nsizes){
UseMethod("trimmOutl")
}
trimmOutl.default <- function(resMethod, nsizes){
discarded <- resMethod$discarded
class(discarded) <- "trimmOutl"
return(discarded)
}
trimmOutl.trimowa <- function(resMethod, nsizes){
if (nsizes == 1){
discarded <- c()
discarded <- resMethod$discarded
}else{
discarded <- list()
for (i in 1 : nsizes){
discarded[[i]] <- resMethod[[i]]$discarded
}
}
class(discarded) <- "trimmOutl"
return(discarded)
}
trimmOutl.hipamAnthropom <- function(resMethod, nsizes){
discarded <- list()
for (i in 1 : nsizes){
aux <- table(resMethod[[i]]$clustering)
aux <- as.numeric(aux)
auxNoBig <- which(aux == 1 | aux == 2)
discarded[[i]] <- rownames(unique(resMethod[[i]]$cases))[auxNoBig]
}
class(discarded) <- "trimmOutl"
return(discarded)
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/trimmOutl.R |
trimmedLloydShapes <- function(array3D,n,alpha,numClust,algSteps=10,niter=10,stopCr=0.0001,verbose){
no.trim <- floor(n*(1-alpha)) #Elements that left after the trimmed procedure.
vect_dist <- c() #Ancillary vector for the trimmed procedure.
time_iter <- list() #List to save the real time in which each iteration ends.
comp_time <- c() #List to save the computational time of each iteration.
list_asig_step <- list() #List to save the clustering obtained in each Nstep.
list_asig <- list() #List to save the optimal clustering obtained among all the Nstep of each iteration.
vect_all_rate <- c() #List to save the optimal allocation rate of each iteration.
initials <- list() #List to save the random initial values used by this Lloyd algorithm. Thus,
#the Hartigan algorithm can be executed with these same values.
trimms <- list() #List to save the trimmed women of each iteration.
trimms_iter <- c() #Vector to find the iteration where the optimum has reached and therefore, to
#identify the trimmed women of that iteration.
betterNstep <- c() #Vector to find the Nstep of the iteration where the optimum has reached and
#therefore, to identify the trimmed women of that iteration.
ll <- 1 : numClust
dist <- matrix(0, n, numClust)
if(verbose){
print(Sys.time())
}
time_ini <- Sys.time()
#Initialize the objective function by a large enough value:
vopt <- 1e+08
#Random restarts:
for(iter in 1 : niter){
trimms[[iter]] <- list()
obj <- list() #List to save the objective function (without dividing between n) of each Nstep.
meanshapes <- 0 ; asig <- 0
mean_sh <- list()
if(verbose){
cat("New iteration:")
print(iter)
cat("Optimal value with which this iteration starts:")
print(vopt)
}
#Randomly choose the numClust initial centers:
initials[[iter]] <- sample(1:n, numClust, replace = FALSE)
if(verbose){
cat("Initial values of this iteration:")
print(initials[[iter]])
}
meanshapes <- array3D[, , initials[[iter]]]
for(step in 1 : algSteps){
for(h in 1 : numClust){
dist[,h] = apply(array3D[,,1:n], 3, riemdist, y = meanshapes[,,h])
}
asig = max.col(-dist)
#For the trimmed procedure:
for(filas in 1 : n){
vect_dist[filas] <- dist[filas,asig[filas]]
}
qq <- (1:n)[vect_dist <= sort(vect_dist)[no.trim]]
distmod <- dist[qq,]
asigqq <- asig[qq] #asignations vector of the elements that left after the trimmed procedure.
if(verbose){
cat("Trimmed woman:")
print(setdiff(1:dim(array3D)[3],qq))
}
trimms[[iter]][[step]] <- setdiff(1:dim(array3D)[3],qq)
for(h in 1 : numClust){
if(table(asigqq == h)[2] == 1){
meanshapes[,,h] = array3D[, , asigqq == h]
mean_sh[[step]] <- meanshapes
}else{
meanshapes[,,h] = procGPA(array3D[, , asigqq == h], distances = TRUE, pcaoutput = TRUE)$mshape
mean_sh[[step]] <- meanshapes
}
}
obj[[step]] <- c(0)
for (l in 1:no.trim){
obj[[step]] <- obj[[step]] + dist[l,asigqq[l]]^2
}
obj[[step]] <- obj[[step]] / no.trim
list_asig_step[[step]] <- asigqq
if(verbose){
paste(cat("Clustering of the Nstep", step, ":\n"))
print(table(list_asig_step[[step]]))
}
if(verbose){
if(iter <= 10){
paste(cat("Objective function of the Nstep", step))
print(obj[[step]])
}
}
if(step > 1){
aux <- obj[[step]]
aux1 <- obj[[step-1]]
if( ((aux1 - aux) / aux1) < stopCr ){
break
}
}
}#The algSteps loop ends here.
#Calculus of the objective function (the total within-cluster sum of squares):
obj1 <- 0
for(l in 1:no.trim){
obj1 <- obj1 + dist[l,asigqq[l]]^2
}
obj1 <- obj1/no.trim
#Change the optimal value and the optimal centers (copt) if a reduction in the objective function happens:
if( obj1 > min(unlist(obj)) ){
if( min(unlist(obj)) < vopt ){
vopt <- min(unlist(obj))
if(verbose){
#Improvements in the objective functions are printed:
cat("optimal")
print(vopt)
}
optim_obj <- which.min(unlist(obj))
copt <- mean_sh[[optim_obj]] #optimal centers.
asig_opt <- list_asig_step[[optim_obj]]
if(verbose){
cat("Optimal iteration:")
print(iter)
}
trimms_iter[iter] <- iter
betterNstep <- which.min(unlist(obj))
}
}else if(obj1 < vopt){
vopt <- obj1
if(verbose){
#Improvements in the objective functions are printed:
cat("optimal")
print(vopt)
}
optim_obj <- which.min(unlist(obj))
copt <- mean_sh[[optim_obj]] #optimal centers.
asig_opt <- list_asig_step[[optim_obj]]
if(verbose){
cat("Optimal iteration:")
print(iter)
}
trimms_iter[iter] <- iter
betterNstep <- which.min(unlist(obj))
}
time_iter[[iter]] <- Sys.time()
if(iter == 1){
comp_time[1] <- difftime(time_iter[[iter]], time_ini, units = "mins")
if(verbose){
cat("Computational time of this iteration: \n")
print(time_iter[[iter]] - time_ini)
}
}else{
comp_time[iter] <- difftime(time_iter[[iter]], time_iter[[iter - 1]], units = "mins")
if(verbose){
cat("Computational time of this iteration: \n")
print(time_iter[[iter]] - time_iter[[iter - 1]])
}
}
if(verbose){
cat("Optimal clustering of this iteration: \n")
}
optim_obj <- which.min(unlist(obj))
list_asig[[iter]] <- list_asig_step[[optim_obj]]
if(verbose){
print(table(list_asig[[iter]]))
}
}#The niter loop ends here.
opt_iter <- trimms_iter[length(trimms_iter)]
trimmed <- trimms[[opt_iter]][[betterNstep]]
return(list(asig=asig_opt,cases=copt,vopt=vopt,trimmWomen=trimms,trimmsIter=trimms_iter,
betterNstep=betterNstep,initials=initials,discarded=trimmed))
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/trimmedLloydShapes.R |
trimmedoid <- function(D,numClust,alpha,niter,algSteps=7,verbose){
n <- dim(D)[1]
no.trim <- floor(n*(1-alpha))
ll <- (1:numClust)
ind <- (1:n)
dist <- ind
#Initialize the objective function by a large enough value:
vopt <- 100000000
#Ramdon restarts:
for(iter in 1:niter){
if(verbose){
cat("new iteration")
print(iter)
}
#Randomly choose the numClust initial centers:
cini <- sample(1:n,size=numClust,replace=FALSE)
#C-steps: step 2:
for(t in 1:algSteps){
disti=c()
ind=c()
#Distances of each data point to its closest medoid:
for(h in 1:n){
for(k in 1:numClust){
ll[k] <- D[h,cini[k]]
}
disti[h] <- min(ll)
ind[h] <- which.min(ll)
}
#Modified data (Dmod) with the non-trimmed points and a vector indqq equal to the clusters allocations:
qq <- (1:n)[disti<=sort(disti)[no.trim]]
Dmod <- D[qq,qq]
indqq=ind[qq]
if(length(unique(indqq))<numClust) {t=algSteps}
else{
#Calculus of the new k centers:
for(k in 1:numClust){
ni <- sum(indqq==k)
if(ni>1){
#cini[k,]<-apply(xmod[xmod[,p+1]==k,1:p],2,mean)
#convert Dmod in dist:
Dmodkv=as.dist(Dmod[indqq==k,indqq==k])
rpam=pam(Dmodkv,k=1) #here initial medoid, obtained by build.
medk=rpam$id.med
}
if(ni==1){
medk=1
}
if (ni==0){
medk=1
}
aux=qq[indqq==k]
cini[k]=aux[medk]
}
obj <- 0
for(l in 1:no.trim){
for (k in 1:numClust){
ll[k] <- D[l,cini[k]]
}
obj <- obj+ min(ll)
}
if(iter<10){
if(verbose){
print(obj/no.trim)
}
}
rm(obj)
}
}
#Calculus of the trimmed k-variance:
obj <- 0
for(l in 1:no.trim){
for(k in 1:numClust){
ll[k] <- D[l,cini[k]]
}
obj <- obj+ min(ll)
}
obj <- obj/no.trim
#Change the optimal value and the optimal centers (copt) if a reduction in the objective function happens:
if (obj < vopt){
vopt <- obj
#Improvements in the objective functions are printed:
if(verbose){
cat("optimal")
print(vopt)
}
copt <- cini
}
}
#Obtain the final cluster allocations (this is necesary, because a final cluster assigment
#movement is possible):
asig <- rep(0,n)
#Distances of each data point to its closest medoid:
for(h in 1:n){
for(k in 1:numClust){
ll[k] <- D[h,cini[k]]
}
disti[h] <- min(ll)
ind[h] <- which.min(ll)
}
#a vector indqq equal to the clusters allocations:
qq <- (1:n)[disti<=sort(disti)[no.trim]]
indqq=ind[qq]
#Assign every observation to each cluster and 0 for the trimmed observations:
asig[qq]=indqq
#Between clusters sum of distances:
b=0
for(k in 1:(numClust-1)){
for(j in (k+1):numClust){
b=b+D[cini[k],cini[j]]
}
}
##ch goodness index:
ch=b*(no.trim-numClust)/(vopt*no.trim*(numClust-1))
rt=list(vopt=vopt,copt=cini,asig=asig,ch=ch,Dmod=Dmod,qq=qq)
return(rt)
} | /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/trimmedoid.R |
trimowa <- function(data,w,numClust,alpha,niter,algSteps,ah=c(23, 28, 20, 25, 25),verbose){
asig <- c() ; copt <- c() ; s <- c(0) ; n <- c() ; no.trim <- c()
res_qq <- c() ; difs <- c() ; bh <- c() ; bl <- c() ; trim <- c()
n1 <- nrow(data)
n2 <- floor(n1 * (1 - alpha))
s = s + (n1 - n2) #number of trimmed individuals.
#Constants that appear in "An Optimisation Approach to Apparel Sizing" of McCulloch et al:
bh <- (apply(as.matrix(log(data)), 2, range)[2,] - apply(as.matrix(log(data)), 2, range)[1,]) / ((numClust-1) * 8)
bl <- -3 * bh
ah <- ah
al <- ah / 3
#Data processing:
num.persons <- dim(data)[1]
num.variables <- dim(data)[2]
datam <- as.matrix(data)
datat <- aperm(datam, c(2,1))
dim(datat) <- c(1, num.persons * num.variables)
rm(datam)
#Computing the dissimilarity matrix:
D <- getDistMatrix(datat, num.persons, num.variables, w, bl, bh, al, ah, verbose)
rm(datat)
n <- dim(D)[1] #number of individuals in each bust class.
no.trim <- floor(n * (1 - alpha)) #number that left after trimming.
#"Garbage collector" to release as much memory as possible:
gc(verbose = FALSE)
#Trimmed K-medoids:
results <- trimmedoid(D, numClust, alpha, niter, algSteps, verbose)
#asig gives the cluster to which each individual belongs and copt are the centroids of the clusters:
asig <- results$asig
#Obtain the current medoids obtained for each bust class, regarding the whole database:
copt <- as.numeric(rownames(data)[results$copt])
#qq is the vector that only contains the individuals after the trimmed procedure.
#With the following commands, we obtain the current trimmed individuals in the whole database:
res_qq <- results$qq
difs <- setdiff(1:n,res_qq)
trim <- as.numeric(rownames(data[difs, ]))
return(list(cases=copt,numTrim=s,numClass=n,noTrim=no.trim,C1=bh,C2=bl,C3=ah,C4=al,asig=asig,discarded=trim))
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/trimowa.R |
weightsMixtureUB <- function(orness,numVar){
if(orness == .5){
w = rep(1/numVar,numVar)
}else{
lambda = 0.5
prob0 = 3/2 - 2 * orness
w = lambda * dbinom(0:(numVar-1), size = numVar-1, prob = prob0) +
(1 - lambda) * rep(1/numVar,numVar)
}
w
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/weightsMixtureUB.R |
xyplotPCArchetypes <- function(x, y, data.col = 1, data.pch = 19, data.bg = NULL, atypes.col = 2, atypes.pch = 19,
ahull.show = FALSE, ahull.col = atypes.col,chull = NULL, chull.col = gray(0.7),
chull.pch = 19, adata.show = FALSE, adata.col = 3, adata.pch = 13, link.col = data.col,
link.lty = 1, ...){
zs <- x
data <- y
plot(data, col = data.col, pch = data.pch, bg = data.bg, ...)
points(zs, col = atypes.col, pch = atypes.pch, ...) #change respect to xyplot.
if(!is.null(chull)){
points(data[chull, ], col = chull.col, pch = chull.pch, ...)
lines(data[c(chull, chull[1]), ], col = chull.col, ...)
}
if(ahull.show)
lines(ahull(zs), col = ahull.col)
if(adata.show){
adata <- fitted(zs)
link.col <- rep(link.col, length = nrow(adata))
link.lty <- rep(link.lty, length = nrow(adata))
points(adata, col = adata.col, pch = adata.pch, ...)
for (i in seq_len(nrow(data)))
lines(rbind(data[i, ], adata[i, ]), col = link.col[i], lty = link.lty[i], ...)
}
invisible(NULL)
}
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/R/xyplotPCArchetypes.R |
## ----paquete,eval=FALSE-------------------------------------------------------
# library("Anthropometry")
## ----trimowa1,eval=FALSE,tidy=FALSE-------------------------------------------
# dataTrimowa <- sampleSpanishSurvey
# numVar <- dim(dataTrimowa)[2]
# bust <- dataTrimowa$bust
# bustSizes <- bustSizesStandard(seq(74, 102, 4), seq(107, 131, 6))
## ----trimowa2,eval=FALSE,tidy=FALSE-------------------------------------------
# orness <- 0.7
# weightsTrimowa <- weightsMixtureUB(orness, numVar)
## ----trimowa3,eval=FALSE,tidy=FALSE-------------------------------------------
# numClust <- 3 ; alpha <- 0.01 ; niter <- 10 ; algSteps <- 7
# ah <- c(23, 28, 20, 25, 25)
#
# #suppressWarnings(RNGversion("3.5.0"))
# #set.seed(2014)
# numSizes <- bustSizes$nsizes - 1
# res_trimowa <- computSizesTrimowa(dataTrimowa, bust, bustSizes$bustCirc,
# numSizes, weightsTrimowa, numClust,
# alpha, niter, algSteps, ah, FALSE)
## ----trimowa4,eval=FALSE,tidy=FALSE-------------------------------------------
# prototypes <- anthrCases(res_trimowa, numSizes)
## ----trimowa5,eval=FALSE,tidy=FALSE-------------------------------------------
# bustVariable <- "bust"
# xlim <- c(72, 132)
# color <- c("black", "red", "green", "blue", "cyan", "brown", "gray",
# "deeppink3", "orange", "springgreen4", "khaki3", "steelblue1")
# variable <- "necktoground"
# ylim <- c(116, 156)
# title <- "Prototypes \n bust vs neck to ground"
# plotPrototypes(dataTrimowa, prototypes, numSizes, bustVariable,
# variable, color, xlim, ylim, title, FALSE)
# plotPrototypes(dataTrimowa, prototypes, numSizes, bustVariable,
# variable, color, xlim, ylim, title, TRUE)
## ----TDDclust,eval=FALSE,tidy=FALSE-------------------------------------------
# dataTDDcl <- sampleSpanishSurvey[1 : 25, c(2, 3, 5)]
# dataTDDcl_aux <- sampleSpanishSurvey[1 : 25, c(2, 3, 5)]
## ----TDDclust2,eval=FALSE,tidy=FALSE------------------------------------------
# numClust <- 3 ; alpha <- 0.01 ; lambda <- 0.5 ; niter <- 5
# Th <- 0 ; T0 <- 0 ; simAnn <- 0.9
#
# #suppressWarnings(RNGversion("3.5.0"))
# #set.seed(2014)
# res_TDDcl <- TDDclust(dataTDDcl, numClust, lambda, Th, niter, T0, simAnn,
# alpha, dataTDDcl_aux, verbose = FALSE)
## ----TDDclust3,eval=FALSE,tidy=FALSE------------------------------------------
# table(res_TDDcl$NN[1,])
# #1 2 3
# #5 10 9
# res_TDDcl$Cost
# #[1] 0.3717631
# res_TDDcl$klBest
# #[1] 3
## ----TDDclust4,eval=FALSE,tidy=FALSE------------------------------------------
# prototypes <- anthrCases(res_TDDcl)
# trimmed <- trimmOutl(res_TDDcl)
## ----hipam,eval=FALSE,tidy=FALSE----------------------------------------------
# dataHipam <- sampleSpanishSurvey
# bust <- dataHipam$bust
# bustSizes <- bustSizesStandard(seq(74, 102, 4), seq(107, 131, 6))
## ----hipam2,eval=FALSE,tidy=FALSE---------------------------------------------
# type <- "IMO"
# maxsplit <- 5 ; orness <- 0.7
# ah <- c(23, 28, 20, 25, 25)
#
# #suppressWarnings(RNGversion("3.5.0"))
# #set.seed(2013)
# numSizes <- bustSizes$nsizes - 1
# res_hipam <- computSizesHipamAnthropom(dataHipam, bust, bustSizes$bustCirc,
# numSizes, maxsplit, orness, type,
# ah, FALSE)
## ----hipam3,eval=FALSE,tidy=FALSE---------------------------------------------
# fitmodels <- anthrCases(res_hipam, numSizes)
# outliers <- trimmOutl(res_hipam, numSizes)
## ----hipam4,eval=FALSE,tidy=FALSE---------------------------------------------
# bustVariable <- "bust"
# xlim <- c(72, 132)
# color <- c("black", "red", "green", "blue", "cyan", "brown", "gray",
# "deeppink3", "orange", "springgreen4", "khaki3", "steelblue1")
# variable <- "hip"
# ylim <- c(83, 153)
# title <- "Fit models HIPAM_IMO \n bust vs hip"
# title_outl <- "Outlier women HIPAM_IMO \n bust vs hip"
# plotPrototypes(dataHipam, fitmodels, numSizes, bustVariable,
# variable, color, xlim, ylim, title, FALSE)
# plotTrimmOutl(dataHipam, outliers, numSizes, bustVariable,
# variable, color, xlim, ylim, title_outl)
## ----ssa,eval=FALSE,tidy=FALSE------------------------------------------------
# landmarksNoNa <- na.exclude(landmarksSampleSpaSurv)
# numLandmarks <- (dim(landmarksNoNa)[2]) / 3
# landmarksNoNa_First50 <- landmarksNoNa[1 : 50, ]
# numIndiv <- dim(landmarksNoNa_First50)[1]
## ----ssa1,eval=FALSE,tidy=FALSE-----------------------------------------------
# array3D <- array3Dlandm(numLandmarks, numIndiv, landmarksNoNa_First50)
## ----ssa2,eval=FALSE,tidy=FALSE-----------------------------------------------
# numClust <- 3 ; alpha <- 0.01 ; algSteps <- 5
# niter <- 5 ; stopCr <- 0.0001
## ----ssa22,eval=FALSE,tidy=FALSE----------------------------------------------
# #suppressWarnings(RNGversion("3.5.0"))
# #set.seed(2013)
# res_kmProc <- trimmedLloydShapes(array3D, numIndiv, alpha, numClust,
# algSteps, niter, stopCr,
# verbose = FALSE)
## ----ssa3,eval=FALSE,tidy=FALSE-----------------------------------------------
# clust_kmProc <- res_kmProc$asig
# table(clust_kmProc)
# #1 2 3
# #19 18 12
## ----ssa4,eval=FALSE,tidy=FALSE-----------------------------------------------
# prototypes <- anthrCases(res_kmProc)
# trimmed <- trimmOutl(res_kmProc)
## ----ssa5,eval=FALSE,tidy=FALSE-----------------------------------------------
# data_First50 <- sampleSpanishSurvey[1 : 50, ]
# data_First50_notrimm <- data_First50[-trimmed, ]
# boxplot(data_First50_notrimm$necktoground ~ as.factor(clust_kmProc),
# main = "Neck to ground")
## ----ssa6,eval=FALSE,tidy=FALSE-----------------------------------------------
# projShapes(1, array3D, clust_kmProc, prototypes)
# legend("topleft", c("Registrated data", "Mean shape"),
# pch = 1, col = 1:2, text.col = 1:2)
# title("Procrustes registrated data for cluster 1 \n
# with its mean shape superimposed", sub = "Plane xy")
## ----AA,eval=FALSE,tidy=FALSE-------------------------------------------------
# USAFSurvey_First50 <- USAFSurvey[1 : 50, ]
# variabl_sel <- c(48, 40, 39, 33, 34, 36)
# USAFSurvey_First50_inch <- USAFSurvey_First50[,variabl_sel] / (10 * 2.54)
# USAFSurvey_preproc <- preprocessing(data = USAFSurvey_First50_inch,
# stand = TRUE, percAccomm = 0.95,
# mahal= TRUE)
## ----AA3,eval=FALSE,tidy=FALSE------------------------------------------------
# #suppressWarnings(RNGversion("3.5.0"))
# #set.seed(2010)
# numArch <- 10 ; numRep <- 20
# oldw <- getOption("warn")
# options(warn = -1)
# lass <- stepArchetypesRawData(data = USAFSurvey_preproc$data,
# numArch=1:numArch, numRep = numRep,
# verbose = FALSE)
# options(warn = oldw)
# screeplot(lass)
## ----AA4,eval=FALSE,tidy=FALSE------------------------------------------------
# numArchoid <- 3
# res_archoids_ns <- archetypoids(numArchoid, USAFSurvey_preproc$data,
# huge = 200, step = FALSE, ArchObj = lass,
# nearest = "cand_ns" , sequ = TRUE)
# res_archoids_alpha <- archetypoids(numArchoid, USAFSurvey_preproc$data,
# huge = 200, step = FALSE, ArchObj = lass,
# nearest = "cand_alpha", sequ = TRUE)
# res_archoids_beta <- archetypoids(numArchoid, USAFSurvey_preproc$data,
# huge = 200, step = FALSE, ArchObj = lass,
# nearest = "cand_beta", sequ = TRUE)
#
# boundaries_ns <- anthrCases(res_archoids_ns)
# boundaries_alpha <- anthrCases(res_archoids_alpha)
# boundaries_beta <- anthrCases(res_archoids_beta)
## ----AA5,eval=FALSE,tidy=FALSE------------------------------------------------
# df <- USAFSurvey_preproc$data
# matPer <- t(sapply(1:dim(df)[2], percentilsArchetypoid, boundaries_ns, df, 0))
## ----AA6,eval=FALSE,tidy=FALSE------------------------------------------------
# barplot(matPer, beside = TRUE, main = paste(numArchoid,
# " archetypoids", sep = ""),
# ylim = c(0, 100), ylab = "Percentile",
# xlab = "Each bar is related to each anthropometric
# variable selected")
| /scratch/gouwar.j/cran-all/cranData/Anthropometry/inst/doc/Anthropometry.R |
# source: 20210924_AntibodyTiters_1.txt
emptyABT <- function(fileName = "empty.xlsx", pmax = 7, returnDF = FALSE,
Attrib = c("Sex", "Age", "VeryLow"),
attribFactors = list(c("F", "M"), c(18, 80), c(TRUE, FALSE))){
# pmax should be less than 18
# require(openxlsx)
if(length(attribFactors) != length(Attrib)){
stop("length(attribFactors) and length(Attrib) must be the same.")
}
minimum.colnames <- c("ID", "pre_vaccination_yyyymmdd", "pre_vaccination_score",
"1st_shot_yyyymmdd", "post_1st_shot_yyyymmdd", "post_1st_shot_score",
"2nd_shot_yyyymmdd", "point3_yyyymmdd", "point3_score")
additional.colnames <- ""
for(p in 4:pmax){
additional.colnames[(p-4)*2+1] <- paste("point", p, "_yyyymmdd", sep = "")
additional.colnames[(p-4)*2+2] <- paste("point", p, "_score", sep = "")
}
C0 <- 1000
Thalf <- 90
total.colnames <- c(minimum.colnames, additional.colnames)
emptyDF <- data.frame(matrix(nrow = 1, ncol = length(total.colnames)),
stringsAsFactors = FALSE)
emptyDF[, c(1, 2, 4, 5, 7, 8)] <- as.character(emptyDF[, c(1, 2, 4, 5, 7, 8)])
emptyDF[, c(3, 6, 9)] <- as.numeric(emptyDF[, c(3, 6, 9)])
for(p in 4:pmax){
emptyDF[, 9+(p-4)*2+1] <- as.character(emptyDF[, 9+(p-4)*2+1])
emptyDF[, 9+(p-4)*2+2] <- as.numeric(emptyDF[, 9+(p-4)*2+2])
}
colnames(emptyDF) <- total.colnames
emptyDF[["ID"]][1] <- "#patient X"
emptyDF[["pre_vaccination_yyyymmdd"]][1] <- "20200901"
emptyDF[["pre_vaccination_score"]][1] <- 2
emptyDF[["1st_shot_yyyymmdd"]][1] <- "20201001"
emptyDF[["post_1st_shot_yyyymmdd"]][1] <- "20201015"
emptyDF[["post_1st_shot_score"]][1] <- 50
emptyDF[["2nd_shot_yyyymmdd"]][1] <- "20201101"
emptyDF[["point3_yyyymmdd"]][1] <- "20201115"
emptyDF[["point3_score"]][1] <- C0
getCt <- function(t, C0 = 1000, Thalf = 90){
Ct <- C0 / 2^(t/Thalf)
return(Ct)
}
# days <- integer(length = pmax - 3)
# scores <- integer(length = pmax - 3)
T0 <- as.Date(as.character(emptyDF[["point3_yyyymmdd"]][1]), "%Y%m%d")
C0 <- emptyDF[["point3_score"]][1]
for(p in 4:pmax){
DAY <- T0 + 60 * ((p-4)+1)
Period <- as.integer(DAY - T0)
# days[p-3] <- gsub(pattern = "-", replacement = "", as.character(DAY))
# scores[p-3] <- getCt(t = Period, C0 = C0, Thalf = Thalf)
emptyDF[, 9+(p-4)*2+1][1] <- gsub(pattern = "-", replacement = "", as.character(DAY))
emptyDF[, 9+(p-4)*2+2][1] <- getCt(t = Period, C0 = C0, Thalf = Thalf)
}
for(c in 1:length(Attrib)){
colName <- Attrib[c]
if(colName != "Age"){
emptyDF[[colName]] <- sample(attribFactors[[c]], size = 1)
}
if(colName == "Age"){
emptyDF[[colName]] <- sample(attribFactors[[c]][1]:attribFactors[[c]][2], size = 1)
}
}
if(returnDF == FALSE) write.xlsx(emptyDF, file = fileName, overwrite = TRUE)
if(returnDF == TRUE) return(emptyDF)
}
| /scratch/gouwar.j/cran-all/cranData/AntibodyTiters/R/emptyABT.R |
groupAttribABT <- function(objName = "inData", sourceAttrib = "Age",
newAttribName = "AgeGroup", groupNames = c("young", "middle", "elderly"),
groupLimmits = list(c(0, 39), c(40, 64), c(65, 200))){
x <- get(objName)
DATA <- x$DATA
inAttrib <- x$Attrib
if(length(which(names(inAttrib) == sourceAttrib)) == 0){
stop(paste("sourceAttrib (", sourceAttrib, ") must be included in the Attrib of ", objName, ".\n", sep = ""))
}
if(is.numeric(inAttrib[[sourceAttrib]]) == FALSE & is.integer(inAttrib[[sourceAttrib]]) == FALSE){
stop(paste("sourceAttrib (", sourceAttrib, ") in the Attrib of ", objName,
" must be an integer or numeric vector", ".\n", sep = ""))
}
if(length(groupNames) != length(groupLimmits)){
stop(paste("Length of groupNames must be the same as that of groupLimmits.\n"))
}
names(groupLimmits) <- groupNames
DATA[[newAttribName]] <- as.character(NA)
for(g in 1:length(groupLimmits)){
groupName <- groupNames[g]
lowerLimmit <- groupLimmits[[groupName]][1]
upperLimmit <- groupLimmits[[groupName]][2]
trueIndex <- DATA[[sourceAttrib]] >= lowerLimmit & DATA[[sourceAttrib]] <= upperLimmit
DATA[[newAttribName]][trueIndex] <- groupName
}
inData <- DATA
additional.colnames <- colnames(inData)[10:ncol(inData)]
point.index <- grep(pattern = "point", additional.colnames)
yyyymmdd.index <- grep(pattern = "_yyyymmdd", additional.colnames)
score.index <- grep(pattern = "_score", additional.colnames)
additional.DF <- data.frame(name = additional.colnames)
additional.DF$noPoint <- FALSE
additional.DF$noPoint[grep(pattern = "point", additional.colnames, invert = TRUE)] <- TRUE
additional.DF$noM1 <- FALSE
additional.DF$noM1[grep(pattern = "M1_", additional.colnames, invert = TRUE)] <- TRUE
additional.DF$noM2 <- FALSE
additional.DF$noM2[grep(pattern = "M2_", additional.colnames, invert = TRUE)] <- TRUE
additional.DF$noM3 <- FALSE
additional.DF$noM3[grep(pattern = "M3_", additional.colnames, invert = TRUE)] <- TRUE
additional.DF$noM4 <- FALSE
additional.DF$noM4[grep(pattern = "M4_", additional.colnames, invert = TRUE)] <- TRUE
additional.DF$noM6 <- FALSE
additional.DF$noM6[grep(pattern = "M6_", additional.colnames, invert = TRUE)] <- TRUE
additional.DF$attrib <- FALSE
additional.DF$attrib[rowSums(additional.DF[,2:7]) == 6] <- TRUE
attrib.index <- which(additional.DF$attrib == TRUE)
attribColnames <- additional.colnames[attrib.index]
Attrib <- list()
for(a in 1:length(attribColnames)){
if(is.numeric(inData[[attribColnames[a]]]) == FALSE & is.integer(inData[[attribColnames[a]]]) == FALSE){
colNames <- as.character(unique(inData[[attribColnames[a]]]))
for(c in 1:length(colNames)){
Attrib[[attribColnames[a]]][colNames[c]] <-
length(which(inData[[attribColnames[a]]] == unique(inData[[attribColnames[a]]])[c]))
}
}
if(is.numeric(inData[[attribColnames[a]]]) == TRUE | is.integer(inData[[attribColnames[a]]]) == TRUE){
# Attrib[[attribColnames[a]]] <- c(min(inData[[attribColnames[a]]]), max(inData[[attribColnames[a]]]))
Attrib[[attribColnames[a]]] <- inData[[attribColnames[a]]]
}
}
pmax <- x$pmax
longestFromSecond <- x$longestFromSecond
shortestFromSecond <- x$shortestFromSecond
inData <- list(DATA = inData, pmax = pmax,
longestFromSecond = longestFromSecond,
shortestFromSecond = shortestFromSecond,
Attrib = Attrib)
class(inData) <- "ABT"
return(inData)
}
| /scratch/gouwar.j/cran-all/cranData/AntibodyTiters/R/groupAttribABT.R |
halfLifeABT <- function(x, output = "list", OutFileName = "Thalf.xlsx"){
temp <- x$DATA
pmax <- x$pmax
IDs <- temp$ID
shortestFromSecond <- x$shortestFromSecond
DateColNames <- character(length = pmax - 2)
ScoreColNames <- character(length = pmax - 2)
DateColIndexs <- integer(length = pmax - 2)
ScoreColIndexs <- integer(length = pmax - 2)
for(p in 3:pmax){
DateColNames[p-2] <- paste("point", p, "_from2ndShot", sep = "")
ScoreColNames[p-2] <- paste("point", p, "_score", sep = "")
DateColIndexs[p-2] <- grep(pattern = DateColNames[p-2], colnames(temp))
ScoreColIndexs[p-2] <- grep(pattern = ScoreColNames[p-2], colnames(temp))
}
getThalf <- function(C0, Ct, t){
Thalf <- t/log(C0/Ct, base = 2)
return(Thalf)
}
outList <- list()
for(i in 1:nrow(temp)){
ID <- IDs[i]
ID.scores <- temp[i, ScoreColIndexs]
ID.dates <- temp[i, DateColIndexs]
NA.index <- is.na(ID.scores) == FALSE & is.na(ID.dates) == FALSE
ID.scores <- ID.scores[NA.index]
ID.dates <- ID.dates[NA.index]
if(length(ID.scores) >= 2 & length(ID.scores) == length(ID.dates)){
ID.DF <- data.frame(ID = ID,
START = as.integer(ID.dates[1:(length(ID.dates)-1)]),
END = as.integer(ID.dates[2:length(ID.dates)]),
SPAN = as.integer(NA), MID = as.integer(NA),
C0 = as.numeric(ID.scores[1:(length(ID.scores)-1)]),
Ct = as.numeric(ID.scores[2:length(ID.scores)]),
Thalf = as.numeric(NA))
ID.DF$SPAN <- ID.DF$END - ID.DF$START
ID.DF$MID <- ID.DF$START + floor(ID.DF$SPAN/2)
ID.DF$Thalf <- unlist(Map(f = getThalf, C0 = ID.DF$C0, Ct = ID.DF$Ct, t = ID.DF$SPAN), use.names = FALSE)
outList[[ID]] <- ID.DF
}
if(length(ID.scores) < 2 | length(ID.scores) != length(ID.dates)){
if(length(ID.scores) != length(ID.dates)){
stop("check the validity of scores and dates for ", ID, "\n")
}
outList[[ID]] <- NA
}
}
if(output == "list") return(outList)
if(output == "data.frame" | output == "xlsx"){
outDF <- outList[[1]]
if(length(outList) > 1){
for(l in 2:length(outList)){
if(inherits(outList[[l]], "data.frame")){
outDF <- rbind(outDF, outList[[l]])
}
}
}
if(output == "data.frame") return(outDF)
if(output == "xlsx") write.xlsx(outDF, file = OutFileName)
}
}
| /scratch/gouwar.j/cran-all/cranData/AntibodyTiters/R/halfLifeABT.R |
# source: 20210922_AntibodyTiters_1.txt
idABT <- function(objName = "inData"){
if(inherits(get(objName), "ABT") != TRUE) stop("The class must be ABT\n")
return(get(objName)$DATA$ID)
}
| /scratch/gouwar.j/cran-all/cranData/AntibodyTiters/R/idABT.R |
plotAllABT <- function(objName = "inData", prefix = "",
dayStart = as.integer(NA), dayEnd = as.integer(NA), type = "weeks", rainbow = FALSE, ylab = "Titer (AU/ml)",
savePDF = FALSE, alphaFactor = 10, lwd = 2, lineAttrib = "", addPoints = FALSE,
orderOfCategories = "", lowessSmooth = FALSE, geometricMean = FALSE, lineForPre = FALSE, lineFor1st = FALSE,
logY = TRUE, PDFwidth = 8, PDFheight = 5, main = NULL, omitPreVac = FALSE, lineColDark = FALSE){
# orderOfCategories = c("young", "middle", "elderly")
if(is.null(main) == TRUE) main <- objName
if(type != "days" & type != "weeks"){
cat("discrete mode\n")
plotAllABT_discrete(objName = objName, prefix = prefix, dayStart = dayStart, dayEnd = dayEnd,
type = type, rainbow = rainbow, ylab = ylab, savePDF = savePDF, alphaFactor = alphaFactor,
lwd = lwd, lineAttrib = lineAttrib, addPoints = addPoints,
orderOfCategories = orderOfCategories, lowessSmooth = lowessSmooth,
geometricMean = geometricMean, lineForPre = lineForPre, lineFor1st = lineFor1st, logY = logY,
PDFwidth = PDFwidth, PDFheight = PDFheight, main = main, omitPreVac = omitPreVac,
lineColDark = lineColDark)
}
if(type == "days" | type == "weeks"){
if(inherits(get(objName), "ABT") != TRUE) stop("The class must be ABT\n")
if(is.na(dayStart) == FALSE & is.na(dayEnd) == FALSE & dayStart >= dayEnd){
stop("dayStart must be smaller than dayEnd")
}
if(prefix == ""){
fileName <- paste(objName, "_", type, "_All", ".pdf", sep = "")
}
if(prefix != ""){
fileName <- paste(prefix, "_", objName, "_", type, "_All",
".pdf", sep = "")
}
if(prefix == "" & lineAttrib != ""){
fileName <- paste(objName, "_", lineAttrib, "_", type, "_All", ".pdf", sep = "")
}
if(prefix != "" & lineAttrib != ""){
fileName <- paste(prefix, "_", objName, "_", lineAttrib, "_", type, "_All",
".pdf", sep = "")
}
temp <- get(objName)$DATA
pmax <- get(objName)$pmax
Attrib <- get(objName)$Attrib
if(lineAttrib != "" & orderOfCategories[1] != ""){
attributeCategories <- names(Attrib[[lineAttrib]])
if(length(attributeCategories) != length(orderOfCategories)){
stop(paste("Contents of orderOfCategories do not match those in the provided object ", objName, ".\n", sep = ""))
}
attributeCategories <- attributeCategories[order(attributeCategories)]
for(a in 1:length(attributeCategories)){
if(attributeCategories[a] != orderOfCategories[order(orderOfCategories)][a]){
stop(paste("Contents of orderOfCategories do not match those in the provided object ", objName, ".\n", sep = ""))
}
}
}
if(is.na(dayStart) == TRUE) shortestFromSecond <- get(objName)$shortestFromSecond
if(is.na(dayEnd) == TRUE) longestFromSecond <- get(objName)$longestFromSecond
if(is.na(dayStart) == FALSE) shortestFromSecond <- dayStart
if(is.na(dayEnd) == FALSE) longestFromSecond <- dayEnd
if(type == "days"){
xlab <- "Days after second shot"
}
if(type == "weeks"){
xlab <- "Weeks after second shot"
shortestFromSecond <- shortestFromSecond*1/7
longestFromSecond <- longestFromSecond*1/7
}
if(type != "weeks" & type != "days") {
stop("type must be weeks, days, M1, M2, M3, M4 or M6.")
}
xRange <- longestFromSecond - shortestFromSecond
posPre <- shortestFromSecond - xRange*2/8
pos1st <- shortestFromSecond - xRange*1/8
# xFromSecond <- c(posPre, pos1st, from.secondShot[-(1:2)])
if(omitPreVac == FALSE) xlim <- c(posPre, longestFromSecond)
if(omitPreVac == TRUE) xlim <- c(pos1st, longestFromSecond)
#ylim <- c(0, max(temp[, grep(pattern = "score", colnames(temp))], na.rm = TRUE))
temp2 <- temp
patients.pop <- nrow(temp2)
# if(1 <= patients.pop && patients.pop <= 30) alpha <- 0.5
# if(30 < patients.pop && patients.pop <= 70) alpha <- 0.3
# if(70 < patients.pop) alpha <- 0.1
alpha <- round(1/patients.pop*alphaFactor, digits = 2)
if(alpha > 1) alpha <- 1
if(alphaFactor <= 0 | alphaFactor > 100){
stop("alphaFactor should be in a range from 1 to 100")
}
if(savePDF == TRUE) pdf(file = fileName, width = PDFwidth, height = PDFheight)
if(lineAttrib != "" & inherits(temp2[[lineAttrib]], "numeric") != TRUE){
categories <- names(Attrib[[lineAttrib]])
categories <- categories[order(categories)]
if(orderOfCategories[1] != "") categories <- orderOfCategories
COLs <- hcl.colors(n = length(categories), palette = "Dynamic", rev = TRUE, alpha = 0.8)
COLsDark <- rgb(t(col2rgb(COLs)/1.2), maxColorValue=255)
pointCOLs <- hcl.colors(n = length(categories), palette = "Dynamic", rev = TRUE, alpha = 0.3)
lineCOLs <- hcl.colors(n = length(categories), palette = "Dynamic", rev = TRUE, alpha = alpha)
}
total.scores <- temp2[, grep(pattern = "_score", x = colnames(temp2))]
if(logY == FALSE){
ymin <- min(total.scores, na.rm = TRUE)
ymax <- max(total.scores, na.rm = TRUE)
ymax <- ymax * 1.1
ylim <- c(ymin, ymax)
plot(x = 1:2, y = numeric(length = 2), xlim = xlim, ylim = ylim, main = main,
xaxt = "n", yaxt = "n", type = "n", xlab = xlab, ylab = ylab)
FROM <- floor(ymin/10000)
# BY <- floor((ymax - ymin)/5/100)*10^floor(log(ymax, base = 10))
TO <- ceiling(ymax/10^(floor(log(ymax, base = 10))))*10^(floor(log(ymax, base = 10)))
# BY <- (TO - FROM)/5
BY <- ceiling((TO - FROM)/8/(10^floor(log((TO - FROM)/8, base = 10)))) * 10^floor(log((TO - FROM)/8, base = 10))
axis(2, at = seq(from = FROM, to = TO, by = BY), lwd = lwd)
}
if(logY == TRUE){
if(length(total.scores[total.scores == 0]) > 0){
temp.ymin <- min(total.scores[total.scores != 0 & is.na(total.scores) == FALSE])
addToY <- 10^(floor(log(temp.ymin*1000, base = 10))-3)
for(n in 1:ncol(total.scores)){
total.scores[total.scores[,n] == 0 & is.na(total.scores[,n]) == FALSE, n] <-
total.scores[total.scores[,n] == 0 & is.na(total.scores[,n]) == FALSE, n] + addToY
}
cat(addToY, " was added to the time points of score=0.\n", sep = "")
}
ymin <- min(total.scores[total.scores != 0 & is.na(total.scores) == FALSE])*0.9
ymax <- max(total.scores, na.rm = TRUE)
ymax <- ymax * 11 # 5
ylim <- c(ymin, ymax)
plot(x = 1:2, y = c(1, 1), xlim = xlim, ylim = ylim, main = main,
xaxt = "n", yaxt = "n", type = "n", xlab = xlab, ylab = ylab, log = "y")
FROM <- floor(log(ymin, base = 10))
TO <- floor(log(ymax, base = 10))+1
AT <- integer(length = (TO - FROM + 1) * 10 - (TO - FROM))
for(i in 1:length(FROM:TO)){
p <- seq(FROM, TO, 1)[i]
if(p == FROM){
START <- ((i-1)*10+1)
END <- ((i-1)*10+10)
AT[START:END] <- seq(from = (10^p)/10, to = (10^(p+1))/10, by = (10^p)/10)
}
if(p != FROM){
START <- END
END <- END + 9
AT[START:END] <- seq(from = (10^p)/10, to = (10^(p+1))/10, by = (10^p)/10)
}
}
axis(2, at = AT, labels = FALSE, lwd = lwd/2)
AT <- integer(length = (TO - FROM + 1))
for(i in 1:length(FROM:TO)){
p <- seq(FROM, TO, 1)[i]
AT[i] <- (10^(p+1))/10
}
axis(2, at = AT, labels = formatC(AT, format = "g"), lwd = lwd)
if(length(AT) < 5){
AT <- integer(length = (TO - FROM + 1))
for(i in 1:length(FROM:TO)){
p <- seq(FROM, TO, 1)[i]
AT[i] <- (10^(p+1))/10*5
}
axis(2, at = AT, labels = formatC(AT, format = "g"), lwd = lwd)
}
}
if(omitPreVac == FALSE) axis(1, at = c(posPre, pos1st), labels = c("pre", "1st"), lwd = lwd)
if(omitPreVac == TRUE) axis(1, at = c(pos1st), labels = c("1st"), lwd = lwd)
if(type == "weeks") axis(1, at = seq(floor(shortestFromSecond/10)*10, longestFromSecond, 10), lwd = lwd)
if(type == "days") axis(1, at = seq(floor(shortestFromSecond/50)*50, longestFromSecond, 50), lwd = lwd)
# axis(2, lwd = lwd)
box(lwd = lwd)
patients.max <- nrow(temp2)+1
for(p in 1:nrow(temp2)){
patientID <- temp2$ID[p]
temp2.scores <- temp2[p, grep(pattern = "score", colnames(temp2))]
temp2.dates <- temp2[p, grep(pattern = "yyyymmdd", colnames(temp2))]
secondShot <- temp2.dates[["2nd_shot_yyyymmdd"]][1]
temp2.dates <- temp2.dates[, colnames(temp2.dates) != "1st_shot_yyyymmdd"]
temp2.dates <- temp2.dates[, colnames(temp2.dates) != "2nd_shot_yyyymmdd"]
from.secondShot <- as.integer(temp2.dates) - as.integer(secondShot)
if(type == "weeks"){
from.secondShot <- from.secondShot*1/7
}
temp2.scores <- as.integer(temp2.scores)
temp2.scores <- c(temp2.scores[1:2], temp2.scores[-(1:2)][is.na(temp2.scores[-(1:2)]) == FALSE])
from.secondShot <- c(from.secondShot[1:2], from.secondShot[-(1:2)][is.na(from.secondShot[-(1:2)]) == FALSE])
temp2.scores <- c(temp2.scores[1:2], temp2.scores[-(1:2)][from.secondShot[-(1:2)] >= shortestFromSecond])
from.secondShot <- c(from.secondShot[1:2], from.secondShot[-(1:2)][from.secondShot[-(1:2)] >= shortestFromSecond])
# if(length(temp2.scores) == 3 | length(from.secondShot) == 3){
if(length(temp2.scores) == 2 | length(from.secondShot) == 2){
cat(paste("There is no enough data for ", patientID, "\n", sep = ""))
warning(paste("No plot was drawn for ", patientID, "\n", sep = ""))
}
# if(length(temp2.scores) > 3 & length(from.secondShot) > 3){
if(length(temp2.scores) > 2 & length(from.secondShot) > 2){
xFromSecond <- c(posPre, pos1st, from.secondShot[-(1:2)])
if(logY == TRUE){
if(length(temp2.scores[temp2.scores == 0 & is.na(temp2.scores) == FALSE]) > 0){
# temp.ymin <- min(temp2.scores[temp2.scores != 0 & is.na(temp2.scores) == FALSE])
# addToY <- 10^(floor(log(temp.ymin*1000, base = 10))-3)
temp2.scores[temp2.scores == 0 & is.na(temp2.scores) == FALSE] <-
temp2.scores[temp2.scores == 0 & is.na(temp2.scores) == FALSE] + addToY
# cat(addToY, " was added to the time points of score=0.\n", sep = "")
}
}
if(rainbow == TRUE){
if(omitPreVac == FALSE){
lines(x = xFromSecond[1:3], y = temp2.scores[1:3], lty = 1, lwd = lwd,
col = rainbow(1, start = p/patients.max, alpha = alpha))
}
if(omitPreVac == TRUE){
lines(x = xFromSecond[2:3], y = temp2.scores[2:3], lty = 1, lwd = lwd,
col = rainbow(1, start = p/patients.max, alpha = alpha))
}
lines(x = xFromSecond[-(1:2)], y = temp2.scores[-(1:2)], lwd = lwd,
col = rainbow(1, start = p/patients.max, alpha = alpha))
}
if(rainbow == FALSE){
if(lineAttrib == "" | inherits(temp2[[lineAttrib]], "numeric") == TRUE){
if(omitPreVac == FALSE){
lines(x = xFromSecond[1:3], y = temp2.scores[1:3], lty = 1, lwd = lwd, col = gray(0, alpha = alpha))
}
if(omitPreVac == TRUE){
lines(x = xFromSecond[2:3], y = temp2.scores[2:3], lty = 1, lwd = lwd, col = gray(0, alpha = alpha))
}
lines(x = xFromSecond[-(1:2)], y = temp2.scores[-(1:2)], lwd = lwd, col = gray(0, alpha = alpha))
}
if(lineAttrib != "" & inherits(temp2[[lineAttrib]], "numeric") != TRUE){
c <- which(temp2[[lineAttrib]][p] == categories)
if(omitPreVac == FALSE){
lines(x = xFromSecond[1:3], y = temp2.scores[1:3], lty = 1, lwd = lwd, col = lineCOLs[c])
}
if(omitPreVac == TRUE){
lines(x = xFromSecond[2:3], y = temp2.scores[2:3], lty = 1, lwd = lwd, col = lineCOLs[c])
}
lines(x = xFromSecond[-(1:2)], y = temp2.scores[-(1:2)], lwd = lwd, col = lineCOLs[c])
}
}
}
}
if(lineAttrib != ""){
if(length(grep(pattern = lineAttrib, x = names(Attrib))) == 0){
stop("lineAttrib must be found in the Attrib of the given ABT object")
}
if(inherits(temp2[[lineAttrib]], "numeric") == TRUE){
stop("lineAttrib cannot be applied for numeric values")
}
if(inherits(temp2[[lineAttrib]], "numeric") != TRUE){
for(c in 1:length(categories)){
if(is.na(dayStart) == TRUE) shortestFromSecond <- get(objName)$shortestFromSecond
if(is.na(dayEnd) == TRUE) longestFromSecond <- get(objName)$longestFromSecond
if(is.na(dayStart) == FALSE) shortestFromSecond <- dayStart
if(is.na(dayEnd) == FALSE) longestFromSecond <- dayEnd
temp3 <- subset(temp2, temp2[[lineAttrib]] == categories[c])
temp3.scores <- temp3[, grep(pattern = "score", colnames(temp3))]
temp3.scores <- temp3.scores[, -c(1:2)]
temp3.dates <- temp3[, grep(pattern = "_from2ndShot", colnames(temp3))]
temp3.dates <- temp3.dates[, grep(pattern = "point", colnames(temp3.dates))]
# temp3.dates <- temp3.dates[, -grep(pattern = "point3", colnames(temp3.dates))]
RAW <- list()
# RAW[shortestFromSecond:longestFromSecond] <- as.numeric(NA)
RAW[(shortestFromSecond:longestFromSecond)+1] <- as.numeric(NA)
for(dcol in 1:ncol(temp3.dates)){
for(drow in 1:nrow(temp3.dates)){
# x.index <- temp3.dates[drow, dcol] # min=0
x.index <- temp3.dates[drow, dcol] +1
# if(is.na(x.index) == FALSE & x.index == 9) cat("drow:", drow, ", dcol:", dcol, "\n")
if(is.na(dayStart) == FALSE & is.na(dayEnd) == FALSE){
if(is.na(x.index) == FALSE & x.index >= dayStart & x.index <= dayEnd){
score <- temp3.scores[drow, dcol]
RAW[[x.index]] <- c(RAW[[x.index]], score)
}
}
if(is.na(dayStart) == FALSE & is.na(dayEnd) == TRUE){
if(is.na(x.index) == FALSE & x.index >= dayStart){
score <- temp3.scores[drow, dcol]
RAW[[x.index]] <- c(RAW[[x.index]], score)
}
}
if(is.na(dayStart) == TRUE & is.na(dayEnd) == FALSE){
if(is.na(x.index) == FALSE & x.index <= dayEnd){
score <- temp3.scores[drow, dcol]
RAW[[x.index]] <- c(RAW[[x.index]], score)
}
}
if(is.na(dayStart) == TRUE & is.na(dayEnd) == TRUE){
if(is.na(x.index) == FALSE){
score <- temp3.scores[drow, dcol]
RAW[[x.index]] <- c(RAW[[x.index]], score)
}
}
}
}
RAW2 <- data.frame(x = shortestFromSecond:longestFromSecond, score = NA)
for(i in shortestFromSecond:longestFromSecond){
# scores <- RAW[[i]][is.na(RAW[[i]]) == FALSE]
scores <- RAW[[i+1]][is.na(RAW[[i+1]]) == FALSE]
if(length(scores) > 0) RAW2$score[RAW2$x == i] <- mean(scores)
}
RAW3 <- subset(RAW2, is.na(RAW2$score) == FALSE & RAW2$score != 0)
if(logY == FALSE){
RAW3$l <- lowess(RAW3)$y
}
if(logY == TRUE){
RAW3$scoreLog <- log(RAW3$score, base = 10)
RAW3$lLog <- lowess(x = RAW3$x, y = RAW3$scoreLog)$y
RAW3$l <- 10^RAW3$lLog
}
if(type == "weeks"){
RAW3$x <- RAW3$x*1/7
shortestFromSecond <- shortestFromSecond*1/7
longestFromSecond <- longestFromSecond*1/7
}
if(addPoints == TRUE){
if(omitPreVac == FALSE){
points(x = RAW3$x, y = RAW3$score, pch = 20, cex = 0.8, col = pointCOLs[c])
}
if(omitPreVac == TRUE){
points(x = RAW3$x[2:nrow(RAW3)], y = RAW3$score[2:nrow(RAW3)], pch = 20, cex = 0.8, col = pointCOLs[c])
}
}
if(lowessSmooth == TRUE){
if(lineColDark == FALSE){
if(omitPreVac == FALSE){
lines(x = RAW3$x, y = RAW3$l, col = COLs[c], lwd = lwd*2)
}
if(omitPreVac == TRUE){
lines(x = RAW3$x[2:nrow(RAW3)], y = RAW3$l[2:nrow(RAW3)], col = COLs[c], lwd = lwd*2)
}
}
if(lineColDark == TRUE){
if(omitPreVac == FALSE){
lines(x = RAW3$x, y = RAW3$l, col = COLsDark[c], lwd = lwd*2)
}
if(omitPreVac == TRUE){
lines(x = RAW3$x[2:nrow(RAW3)], y = RAW3$l[2:nrow(RAW3)], col = COLsDark[c], lwd = lwd*2)
}
}
}
legendX <- longestFromSecond - (longestFromSecond - shortestFromSecond)/3 # 3 or 4
if(logY == FALSE) legendY <- ylim[2] - (ylim[2] - ylim[1])/10*c
if(logY == TRUE) legendY <- 10^(log(ylim[2], base = 10) -
((log(ylim[2], base = 10) - log(ylim[1], base = 10))/15*(c))) # c+1 or c?
if(lineColDark == FALSE){
legend(x = legendX, y = legendY, col = COLs[c], lwd = lwd*2,
legend = categories[c], bty = "n")
}
if(lineColDark == TRUE){
legend(x = legendX, y = legendY, col = COLsDark[c], lwd = lwd*2,
legend = categories[c], bty = "n")
}
if(c == 1){
legendX <- longestFromSecond - (longestFromSecond - shortestFromSecond)/3.5 # 4.5
# legendY <- ylim[2] - (ylim[2] - ylim[1])/10*c * 1.4
if(logY == FALSE) legendY <- ylim[2] - (ylim[2] - ylim[1])/10*c * 1.4
if(logY == TRUE) legendY <- 10^(log(ylim[2], base = 10) -
((log(ylim[2], base = 10) - log(ylim[1], base = 10))/20*(c+1))) # c+1
text(x = legendX, y = legendY, pos = 3, labels = lineAttrib)
}
}
}
}
if(savePDF == TRUE) dev.off()
}
}
| /scratch/gouwar.j/cran-all/cranData/AntibodyTiters/R/plotAllABT.R |
plotAllABT_discrete <- function(objName = "inData", prefix = "",
dayStart = as.integer(NA), dayEnd = as.integer(NA), type = "M1", rainbow = FALSE, ylab = "Titer (AU)",
savePDF = FALSE, alphaFactor = 10, lwd = 2, lineAttrib = "", addPoints = FALSE,
orderOfCategories = "", lowessSmooth = FALSE, geometricMean = FALSE, lineForPre = FALSE, lineFor1st = FALSE,
logY = TRUE, PDFwidth = 8, PDFheight = 5, main = NULL, omitPreVac = FALSE, lineColDark = FALSE){
# orderOfCategories = c("young", "middle", "elderly")
if(is.null(main) == TRUE) main <- objName
if(inherits(get(objName), "ABT") != TRUE) stop("The class must be ABT\n")
if(is.na(dayStart) == FALSE & is.na(dayEnd) == FALSE & dayStart >= dayEnd){
stop("dayStart must be smaller than dayEnd")
}
if(prefix == ""){
fileName <- paste(objName, "_", type, "_All", ".pdf", sep = "")
}
if(prefix != ""){
fileName <- paste(prefix, "_", objName, "_", type, "_All",
".pdf", sep = "")
}
if(prefix == "" & lineAttrib != ""){
fileName <- paste(objName, "_", lineAttrib, "_", type, "_All", ".pdf", sep = "")
}
if(prefix != "" & lineAttrib != ""){
fileName <- paste(prefix, "_", objName, "_", lineAttrib, "_", type, "_All",
".pdf", sep = "")
}
temp <- get(objName)$DATA
pmax <- get(objName)$pmax
Attrib <- get(objName)$Attrib
if(lineAttrib != "" & orderOfCategories[1] != ""){
attributeCategories <- names(Attrib[[lineAttrib]])
if(length(attributeCategories) != length(orderOfCategories)){
stop(paste("Contents of orderOfCategories do not match those in the provided object ", objName, ".\n", sep = ""))
}
attributeCategories <- attributeCategories[order(attributeCategories)]
for(a in 1:length(attributeCategories)){
if(attributeCategories[a] != orderOfCategories[order(orderOfCategories)][a]){
stop(paste("Contents of orderOfCategories do not match those in the provided object ", objName, ".\n", sep = ""))
}
}
}
if(is.na(dayStart) == TRUE) shortestFromSecond <- get(objName)$shortestFromSecond
if(is.na(dayEnd) == TRUE) longestFromSecond <- get(objName)$longestFromSecond
if(is.na(dayStart) == FALSE) shortestFromSecond <- dayStart
if(is.na(dayEnd) == FALSE) longestFromSecond <- dayEnd
xlab <- "Months after second shot"
colNames <- character(length = 0)
x.labels <- character(length = 0)
post2ndDays <- as.integer(temp[["point3_yyyymmdd"]] - temp[["2nd_shot_yyyymmdd"]])
c <- 1
if(type == "M1"){
firstMonth <- min(ceiling(shortestFromSecond/30), ceiling(post2ndDays/30), na.rm = TRUE)
if(firstMonth == 0) firstMonth <- 1
lastMonth <- ceiling(longestFromSecond/30)
for(m in firstMonth:lastMonth){
colNames[c] <-paste("M1_", m, "_from2ndShot", sep = "")
x.labels[c] <- m
c <- c + 1
}
}
if(type == "M2"){
firstM2group <- min(ceiling(shortestFromSecond/60), ceiling(post2ndDays/60), na.rm = TRUE)
if(firstM2group == 0) firstM2group <- 1
lastM2group <- ceiling(longestFromSecond/60)
for(m in firstM2group: lastM2group){
M2start <- (m-1)*2 + 1
M2end <- (m-1)*2 + 2
colNames[c] <- paste("M2_", M2start, "-", M2end, "_from2ndShot", sep = "")
x.labels[c] <- paste(M2start, "-", M2end, sep = "")
c <- c + 1
}
}
if(type == "M3"){
firstM3group <- min(ceiling(shortestFromSecond/90), ceiling(post2ndDays/90), na.rm = TRUE)
if(firstM3group == 0) firstM3group <- 1
lastM3group <- ceiling(longestFromSecond/90)
for(m in firstM3group: lastM3group){
M3start <- (m-1)*3 + 1
M3end <- (m-1)*3 + 3
colNames[c] <- paste("M3_", M3start, "-", M3end, "_from2ndShot", sep = "")
x.labels[c] <- paste(M3start, "-", M3end, sep = "")
c <- c + 1
}
}
if(type == "M4"){
firstM4group <- min(ceiling(shortestFromSecond/120), ceiling(post2ndDays/120), na.rm = TRUE)
if(firstM4group == 0) firstM4group <- 1
lastM4group <- ceiling(longestFromSecond/120)
for(m in firstM4group: lastM4group){
M4start <- (m-1)*4 + 1
M4end <- (m-1)*4 + 4
colNames[c] <- paste("M4_", M4start, "-", M4end, "_from2ndShot", sep = "")
x.labels[c] <- paste(M4start, "-", M4end, sep = "")
c <- c + 1
}
}
if(type == "M6"){
firstM6group <- min(ceiling(shortestFromSecond/180), ceiling(post2ndDays/180), na.rm = TRUE)
if(firstM6group == 0) firstM6group <- 1
lastM6group <- ceiling(longestFromSecond/180)
for(m in firstM6group: lastM6group){
M6start <- (m-1)*6 + 1
M6end <- (m-1)*6 + 6
colNames[c] <- paste("M6_", M6start, "-", M6end, "_from2ndShot", sep = "")
x.labels[c] <- paste(M6start, "-", M6end, sep = "")
c <- c + 1
}
}
if(type != "M1" & type != "M2" & type != "M3" & type != "M4" & type != "M6") {
stop("type must be weeks, days, M1, M2, M3, M4 or M6.")
}
# x <- c(1:3, 4:(3+length(colNames)))
x <- c(1:2, 3:(2+length(colNames)))
# if(omitPreVac == FALSE) x <- c(1:2, 3:(2+length(colNames)))
# if(omitPreVac == TRUE) x <- c(-1, 2, 3:(2+length(colNames)))
# ylim <- c(0, max(temp[, grep(pattern = "score", colnames(temp))], na.rm = TRUE))
temp2 <- temp
patients.pop <- nrow(temp2)
# if(1 <= patients.pop && patients.pop <= 30) alpha <- 0.5
# if(30 < patients.pop && patients.pop <= 70) alpha <- 0.3
# if(70 < patients.pop) alpha <- 0.1
alpha <- round(1/patients.pop*alphaFactor, digits = 2)
if(alpha > 1) alpha <- 1
if(alphaFactor <= 0 | alphaFactor > 100){
stop("alphaFactor should be in a range from 1 to 100")
}
if(savePDF == TRUE) pdf(file = fileName, width = PDFwidth, height = PDFheight)
if(lineAttrib != "" & inherits(temp2[[lineAttrib]], "numeric") != TRUE){
categories <- names(Attrib[[lineAttrib]])
categories <- categories[order(categories)]
if(orderOfCategories[1] != "") categories <- orderOfCategories
COLs <- hcl.colors(n = length(categories), palette = "Dynamic", rev = TRUE, alpha = 0.8)
COLsDark <- rgb(t(col2rgb(COLs)/1.2), maxColorValue=255)
pointCOLs <- hcl.colors(n = length(categories), palette = "Dynamic", rev = TRUE, alpha = 0.3)
lineCOLs <- hcl.colors(n = length(categories), palette = "Dynamic", rev = TRUE, alpha = alpha)
}
total.scores <- temp2[, grep(pattern = "_score", x = colnames(temp2))]
if(logY == FALSE){
ymin <- min(total.scores, na.rm = TRUE)
ymax <- max(total.scores, na.rm = TRUE)
ymax <- ymax * 1.1
ylim <- c(ymin, ymax)
if(omitPreVac == FALSE){
plot(x = x, y = numeric(length = length(x)), ylim = ylim, main = main,
xaxt = "n", yaxt = "n", type = "n", xlab = xlab, ylab = ylab)
}
if(omitPreVac == TRUE){
plot(x = x[2:length(x)], y = numeric(length = length(x)-1), ylim = ylim, main = main,
xaxt = "n", yaxt = "n", type = "n", xlab = xlab, ylab = ylab)
}
FROM <- floor(ymin/10000)
# BY <- floor((ymax - ymin)/5/100)*10^floor(log(ymax, base = 10))
TO <- ceiling(ymax/10^(floor(log(ymax, base = 10))))*10^(floor(log(ymax, base = 10)))
# BY <- (TO - FROM)/5
BY <- ceiling((TO - FROM)/8/(10^floor(log((TO - FROM)/8, base = 10)))) * 10^floor(log((TO - FROM)/8, base = 10))
axis(2, at = seq(from = FROM, to = TO, by = BY), lwd = lwd)
}
if(logY == TRUE){
if(length(total.scores[total.scores == 0]) > 0){
temp.ymin <- min(total.scores[total.scores != 0 & is.na(total.scores) == FALSE])
addToY <- 10^(floor(log(temp.ymin*1000, base = 10))-3)
for(n in 1:ncol(total.scores)){
total.scores[total.scores[,n] == 0 & is.na(total.scores[,n]) == FALSE, n] <-
total.scores[total.scores[,n] == 0 & is.na(total.scores[,n]) == FALSE, n] + addToY
}
cat(addToY, " was added to the time points of score=0.\n", sep = "")
}
ymin <- min(total.scores[total.scores != 0 & is.na(total.scores) == FALSE])*0.9
ymax <- max(total.scores, na.rm = TRUE)
ymax <- ymax * 11 # 5
ylim <- c(ymin, ymax)
xtemp <- numeric(length = length(x))
xtemp[1:length(x)] <- 1
if(omitPreVac == FALSE){
plot(x = x, y = xtemp, ylim = ylim, main = main,
xaxt = "n", yaxt = "n", type = "n", xlab = xlab, ylab = ylab, log = "y")
}
if(omitPreVac == TRUE){
plot(x = x[2:length(x)], y = xtemp[2:length(x)], ylim = ylim, main = main,
xaxt = "n", yaxt = "n", type = "n", xlab = xlab, ylab = ylab, log = "y")
}
FROM <- floor(log(ymin, base = 10))
TO <- floor(log(ymax, base = 10))+1
AT <- integer(length = (TO - FROM + 1) * 10 - (TO - FROM))
for(i in 1:length(FROM:TO)){
p <- seq(FROM, TO, 1)[i]
if(p == FROM){
START <- ((i-1)*10+1)
END <- ((i-1)*10+10)
AT[START:END] <- seq(from = (10^p)/10, to = (10^(p+1))/10, by = (10^p)/10)
}
if(p != FROM){
START <- END
END <- END + 9
AT[START:END] <- seq(from = (10^p)/10, to = (10^(p+1))/10, by = (10^p)/10)
}
}
axis(2, at = AT, labels = FALSE, lwd = lwd/2)
AT <- integer(length = (TO - FROM + 1))
for(i in 1:length(FROM:TO)){
p <- seq(FROM, TO, 1)[i]
AT[i] <- (10^(p+1))/10
}
axis(2, at = AT, labels = formatC(AT, format = "g"), lwd = lwd)
if(length(AT) < 5){
AT <- integer(length = (TO - FROM + 1))
for(i in 1:length(FROM:TO)){
p <- seq(FROM, TO, 1)[i]
AT[i] <- (10^(p+1))/10*5
}
axis(2, at = AT, labels = formatC(AT, format = "g"), lwd = lwd)
}
}
if(omitPreVac == FALSE){
axis(1, at = 1:2, labels = c("pre", "1st"), lwd = lwd)
axis(1, at = 3:length(x), labels = x.labels, lwd = lwd)
}
if(omitPreVac == TRUE){
axis(1, at = c(-1, 2), labels = c("pre", "1st"), lwd = lwd)
axis(1, at = 3:length(x), labels = x.labels, lwd = lwd)
}
box(lwd = lwd)
patients.max <- nrow(temp2)+1
for(p in 1:nrow(temp2)){
patientID <- temp2$ID[p]
# temp2.scores <- as.numeric(temp2[p, grep(pattern = "score", colnames(temp2))][1:3])
temp2.scores <- as.numeric(temp2[p, grep(pattern = "score", colnames(temp2))][1:2])
for(c in 1:length(colNames)){
colName <- colNames[c]
# temp2.scores[3 + c] <- temp2[[colName]][p]
temp2.scores[2 + c] <- temp2[[colName]][p]
}
if(logY == TRUE){
if(length(temp2.scores[temp2.scores == 0 & is.na(temp2.scores) == FALSE]) > 0){
# temp.ymin <- min(temp2.scores[temp2.scores != 0 & is.na(temp2.scores) == FALSE])
# addToY <- 10^(floor(log(temp.ymin*1000, base = 10))-3)
temp2.scores[temp2.scores == 0 & is.na(temp2.scores) == FALSE] <-
temp2.scores[temp2.scores == 0 & is.na(temp2.scores) == FALSE] + addToY
# cat(addToY, " was added to the time points of score=0.\n", sep = "")
}
}
x.patient <- x[is.na(temp2.scores) == FALSE]
temp2.scores.patient <- temp2.scores[is.na(temp2.scores) == FALSE]
# if(omitPreVac == TRUE){
# x.patient <- x.patient[x.patient != 1]
# temp2.scores.patient <- temp2.scores.patient[x.patient != 1]
# }
if(rainbow == TRUE){
lines(x = x.patient,
y = temp2.scores.patient, lty = 1, lwd = lwd,
col = rainbow(1, start = p/patients.max, alpha = alpha))
}
if(rainbow == FALSE){
if(lineAttrib == "" | inherits(temp2[[lineAttrib]], "numeric") == TRUE){
lines(x = x.patient,
y = temp2.scores.patient, lty = 1, lwd = lwd, col = gray(0, alpha = alpha))
}
if(lineAttrib != "" & inherits(temp2[[lineAttrib]], "numeric") != TRUE){
c <- which(temp2[[lineAttrib]][p] == categories)
lines(x = x.patient,
y = temp2.scores.patient, lty = 1, lwd = lwd, col = lineCOLs[c])
}
}
}
if(lineAttrib != ""){
# require(DescTools)
if(length(grep(pattern = lineAttrib, x = names(Attrib))) == 0){
stop("lineAttrib must be found in the Attrib of the given ABT object")
}
if(inherits(temp2[[lineAttrib]], "numeric") == TRUE){
stop("lineAttrib cannot be applied for numeric values")
}
if(inherits(temp2[[lineAttrib]], "numeric") != TRUE){
for(c in 1:length(categories)){
temp3 <- subset(temp2, temp2[[lineAttrib]] == categories[c])
if(type == "M1") temp3.scores <- temp3[, grep(pattern = "M1_", colnames(temp3))]
if(type == "M2") temp3.scores <- temp3[, grep(pattern = "M2_", colnames(temp3))]
if(type == "M4") temp3.scores <- temp3[, grep(pattern = "M4_", colnames(temp3))]
if(type == "M3") temp3.scores <- temp3[, grep(pattern = "M3_", colnames(temp3))]
if(type == "M6") temp3.scores <- temp3[, grep(pattern = "M6_", colnames(temp3))]
RAW <- list()
if(type == "M1"){
firstMonth <- as.integer(strsplit(colNames[1], split = "_")[[1]][2])
lastMonth <- as.integer(strsplit(colNames[length(colNames)], split = "_")[[1]][2])
}
if(type != "M1"){
tempM <- strsplit(colNames[1], split = "_")[[1]][2]
firstMonth <- as.integer(strsplit(tempM, split = "-")[[1]][1])
tempM <- strsplit(colNames[length(colNames)], split = "_")[[1]][2]
lastMonth <- as.integer(strsplit(tempM, split = "-")[[1]][1])
}
RAW[["Pre"]] <- temp3$pre_vaccination_score
RAW[["Post1st"]] <- temp3$post_1st_shot_score
RAW[(firstMonth:lastMonth)+2] <- as.numeric(NA)
names(RAW)[(firstMonth:lastMonth)+2] <- firstMonth:lastMonth
for(m in 1:length(colNames)){
colName <- colNames[m]
if(type == "M1"){
Month <- as.character(strsplit(colName, split = "_")[[1]][2])
}
if(type != "M1"){
tempM <- strsplit(colName, split = "_")[[1]][2]
Month <- as.character(strsplit(tempM, split = "-")[[1]][1])
}
RAW[[Month]] <- temp3.scores[[colName]]
}
RAW <- RAW[is.na(RAW) == FALSE]
if(omitPreVac == TRUE){
RAW[["Pre"]] <- NA
}
if(addPoints == TRUE){
for(mx in 1:length(RAW)){
for(y in 1:length(RAW[[mx]])){
xShiftFactor <- 0.05 * length(x) /12
# shifted.x <- mx + 3 - ((length(categories) * xShiftFactor)/2) + (c-1) * xShiftFactor
shifted.x <- mx - ((length(categories) * xShiftFactor)/2) + (c-1) * xShiftFactor
points(x = shifted.x, y = RAW[[mx]][y], pch = 20, cex = 0.8, col = pointCOLs[c])
}
}
}
if(lowessSmooth == TRUE){
RAW2 <- data.frame(x = names(RAW))
RAW2$score <- NA
for(m in 1:length(RAW)){
rawValues <- RAW[[m]][is.na(RAW[[m]]) == FALSE & RAW[[m]] >= 0]
if(length(rawValues) > 0){
RAW2$score[m] <- mean(rawValues)
}
}
RAW3 <- subset(RAW2, is.na(RAW2$score) == FALSE)
# RAW3 <- RAW2
# RAW3$l <- lowess(RAW3)$y
if(logY == FALSE){
RAW3$l <- lowess(RAW3)$y
}
if(logY == TRUE){
RAW3$scoreLog <- log(RAW3$score, base = 10)
RAW3$lLog <- as.numeric(NA)
RAW3$lLog[-(1:2)] <- lowess(x = RAW3$x[-(1:2)], y = RAW3$scoreLog[-(1:2)])$y
RAW3$l <- 10^RAW3$lLog
}
# lowess.x <- x
lowess.x <- x[is.na(RAW2$score) == FALSE]
names(lowess.x) <- RAW3$x
if(lineForPre == FALSE){
RAW3 <- RAW3[RAW3$x != "Pre",]
lowess.x <- lowess.x[names(lowess.x) != "Pre"]
}
if(lineFor1st == FALSE){
RAW3 <- RAW3[RAW3$x != "Post1st",]
lowess.x <- lowess.x[names(lowess.x) != "Post1st"]
}
lowess.index <- is.na(RAW3$l) == FALSE
if(lineColDark == FALSE){
lines(x = lowess.x[lowess.index], y = RAW3$l[lowess.index], col = COLs[c], lwd = lwd*2)
}
if(lineColDark == TRUE){
lines(x = lowess.x[lowess.index], y = RAW3$l[lowess.index], col = COLsDark[c], lwd = lwd*2)
}
} # if(lowessSmooth == TRUE)
if(geometricMean == TRUE){
months <- seq(firstMonth, lastMonth, as.integer(strsplit(type, "M")[[1]][2]))
GMDF <- data.frame(x = c("Pre", "Post1st", as.character(months)), mean = as.numeric(NA),
lwr.ci = as.numeric(NA), upr.ci = as.numeric(NA))
tooLow <- 1
for(m in 1:length(RAW)){
rawValues <- RAW[[m]][is.na(RAW[[m]]) == FALSE & RAW[[m]] > 0]
# if(length(rawValues) > 1){
if(length(rawValues) > 2){
GmeanResult <- Gmean(rawValues, conf.level = 0.95, na.rm = TRUE)
GM <- GmeanResult[1]
if(is.na(GM) == FALSE) GMDF$mean[m] <- GM
LOWCI <- GmeanResult[2]
if(is.na(LOWCI) == FALSE) GMDF$lwr.ci[m] <- LOWCI
UPRCI <- GmeanResult[3]
if(is.na(UPRCI) == FALSE) GMDF$upr.ci[m] <- UPRCI
}
if(length(rawValues) <= 2){
if(m <= 2) DataPoint <- GMDF$x[m]
if(m > 2) DataPoint <- paste("M", GMDF$x[m], sep = "")
if(tooLow > 1) cat(paste(", ", DataPoint, sep = ""))
if(tooLow == 1){
cat(paste("Number of samples is too low (<3) for geometric mean calculation",
" of the category \"", categories[c], "\"\n", sep = ""))
cat(paste(" at ", DataPoint, sep = ""))
tooLow <- tooLow + 1
}
}
}
if(tooLow > 1) cat("\n")
GMs <- numeric(length = length(x))
GMs[1:length(GMs)] <- NA
LOWCIs <- numeric(length = length(x))
LOWCIs[1:length(LOWCIs)] <- NA
UPRCIs <- numeric(length = length(x))
UPRCIs[1:length(UPRCIs)] <- NA
for(m in 1:length(months)){
GMs[m] <- GMDF$mean[m]
LOWCIs[m] <- GMDF$lwr.ci[m]
UPRCIs[m] <- GMDF$upr.ci[m]
}
GMDF$x.axis <- x
GMDF <- GMDF[is.na(GMDF$mean) == FALSE, ]
if(lineForPre == FALSE) GMDF <- GMDF[GMDF$x != "Pre",]
if(lineFor1st == FALSE) GMDF <- GMDF[GMDF$x != "Post1st",]
for(l in 1:nrow(GMDF)){
xShiftFactor <- 0.05 * length(x) /12
shifted.x <- GMDF$x.axis[l] - ((length(categories) * xShiftFactor)/2) + (c-1) * xShiftFactor
if(lineColDark == FALSE){
lines(x = c(shifted.x, shifted.x),
y = c(GMDF$lwr.ci[l], GMDF$mean[l]), col = COLs[c], lwd = lwd)
lines(x = c(shifted.x-0.1, shifted.x +0.1),
y = c(GMDF$lwr.ci[l], GMDF$lwr.ci[l]), col = COLs[c], lwd = lwd)
lines(x = c(shifted.x, shifted.x),
y = c(GMDF$upr.ci[l], GMDF$mean[l]), col = COLs[c], lwd = lwd)
lines(x = c(shifted.x-0.1, shifted.x +0.1),
y = c(GMDF$upr.ci[l], GMDF$upr.ci[l]), col = COLs[c], lwd = lwd)
}
if(lineColDark == TRUE){
lines(x = c(shifted.x, shifted.x),
y = c(GMDF$lwr.ci[l], GMDF$mean[l]), col = COLsDark[c], lwd = lwd)
lines(x = c(shifted.x-0.1, shifted.x +0.1),
y = c(GMDF$lwr.ci[l], GMDF$lwr.ci[l]), col = COLsDark[c], lwd = lwd)
lines(x = c(shifted.x, shifted.x),
y = c(GMDF$upr.ci[l], GMDF$mean[l]), col = COLsDark[c], lwd = lwd)
lines(x = c(shifted.x-0.1, shifted.x +0.1),
y = c(GMDF$upr.ci[l], GMDF$upr.ci[l]), col = COLsDark[c], lwd = lwd)
}
}
shiftedX <- GMDF$x.axis - ((length(categories) * xShiftFactor)/2) + (c-1) * xShiftFactor
if(lineColDark == FALSE){
lines(x = shiftedX, y = GMDF$mean, col = COLs[c], lwd = lwd*2)
}
if(lineColDark == TRUE){
lines(x = shiftedX, y = GMDF$mean, col = COLsDark[c], lwd = lwd*2)
}
} # if(geometricMean == TRUE)
legendX <- 3+length(colNames) - (3+length(colNames))/3
# legendY <- ylim[2] - (ylim[2] - ylim[1])/10*c
if(logY == FALSE) legendY <- ylim[2] - (ylim[2] - ylim[1])/10*c
if(logY == TRUE) legendY <- 10^(log(ylim[2], base = 10) -
((log(ylim[2], base = 10) - log(ylim[1], base = 10))/15*(c))) # c+1 or c?
if(lineColDark == FALSE){
legend(x = legendX, y = legendY, col = COLs[c], lwd = lwd*2,
legend = categories[c], bty = "n")
}
if(lineColDark == TRUE){
legend(x = legendX, y = legendY, col = COLsDark[c], lwd = lwd*2,
legend = categories[c], bty = "n")
}
if(c == 1){
legendX <- 2+length(colNames) - (2+length(colNames))/4
# legendY <- ylim[2] - (ylim[2] - ylim[1])/10*c * 1.4
if(logY == FALSE) legendY <- ylim[2] - (ylim[2] - ylim[1])/10*c * 1.4
if(logY == TRUE) legendY <- 10^(log(ylim[2], base = 10) -
((log(ylim[2], base = 10) - log(ylim[1], base = 10))/20*(c+1))) # c+1
text(x = legendX, y = legendY, pos = 3, labels = lineAttrib)
}
}
}
}
if(savePDF == TRUE) dev.off()
}
| /scratch/gouwar.j/cran-all/cranData/AntibodyTiters/R/plotAllABT_discrete.R |
plotEachABT <- function(patientID = "patient A", objName = "inData", prefix = "",
dayStart = as.integer(NA), dayEnd = as.integer(NA), type = "weeks",
ylab = "Titer (AU/ml)", savePDF = FALSE, addPoints = FALSE, lwd = 2,
logY = TRUE, PDFwidth = 8, PDFheight = 5, main = NULL){
if(inherits(get(objName), "ABT") != TRUE) stop("The class must be ABT\n")
if(is.na(dayStart) == FALSE & is.na(dayEnd) == FALSE & dayStart >= dayEnd){
stop("dayStart must be smaller than dayEnd")
}
if(prefix == ""){
fileName <- paste(objName, "_",
sub(pattern = " ", replacement = "_", patientID),
".pdf", sep = "")
}
if(prefix != ""){
fileName <- paste(prefix, "_", objName, "_",
sub(pattern = " ", replacement = "_", patientID),
".pdf", sep = "")
}
if(is.null(main) == TRUE){
main <- paste(patientID, "@", objName, sep = "")
}
temp <- get(objName)$DATA
pmax <- get(objName)$pmax
if(is.na(dayStart) == TRUE) shortestFromSecond <- get(objName)$shortestFromSecond
if(is.na(dayEnd) == TRUE) longestFromSecond <- get(objName)$longestFromSecond
if(is.na(dayStart) == FALSE) shortestFromSecond <- dayStart
if(is.na(dayEnd) == FALSE) longestFromSecond <- dayEnd
if(type == "days"){
xlab <- "Days after second shot"
}
if(type == "weeks"){
xlab <- "Weeks after second shot"
shortestFromSecond <- shortestFromSecond*1/7
longestFromSecond <- longestFromSecond*1/7
}
if(type != "weeks" & type != "days") {
stop("type must be weeks or days.")
}
temp2 <- subset(temp, temp$ID == patientID)
if(nrow(temp2) == 0){
stop("There is no ", patientID, " in ", objName, "\n", sep = "")
}
temp2.scores <- temp2[1, grep(pattern = "score", colnames(temp2))]
temp2.dates <- temp2[1, grep(pattern = "yyyymmdd", colnames(temp2))]
secondShot <- temp2.dates[["2nd_shot_yyyymmdd"]][1]
temp2.dates <- temp2.dates[, colnames(temp2.dates) != "1st_shot_yyyymmdd"]
temp2.dates <- temp2.dates[, colnames(temp2.dates) != "2nd_shot_yyyymmdd"]
from.secondShot <- as.integer(temp2.dates) - as.integer(secondShot)
if(type == "weeks"){
from.secondShot <- from.secondShot*1/7
}
temp2.scores <- as.integer(temp2.scores)
temp2.scores <- c(temp2.scores[1:2], temp2.scores[-(1:2)][is.na(temp2.scores[-(1:2)]) == FALSE])
from.secondShot <- c(from.secondShot[1:2], from.secondShot[-(1:2)][is.na(from.secondShot[-(1:2)]) == FALSE])
temp2.scores <- c(temp2.scores[1:2], temp2.scores[-(1:2)][from.secondShot[-(1:2)] >= shortestFromSecond])
from.secondShot <- c(from.secondShot[1:2], from.secondShot[-(1:2)][from.secondShot[-(1:2)] >= shortestFromSecond])
# if(length(temp2.scores) == 3 | length(from.secondShot) == 3){
if(length(temp2.scores) == 2 | length(from.secondShot) == 2){
warning(paste("There is no enough data for ", patientID, "\n", sep = ""))
return(paste("No plot was drawn for ", patientID, sep = ""))
}
# xFromSecond <- c(shortestFromSecond*1/3, shortestFromSecond*2/3,
# from.secondShot[-(1:2)])
xRange <- longestFromSecond - shortestFromSecond
posPre <- shortestFromSecond - xRange*2/8
pos1st <- shortestFromSecond - xRange*1/8
xFromSecond <- c(posPre, pos1st, from.secondShot[-(1:2)])
xlim <- c(posPre, longestFromSecond)
if(savePDF == TRUE) pdf(file = fileName, width = PDFwidth, height = PDFheight)
total.scores <- temp[, grep(pattern = "_score", x = colnames(temp))]
if(logY == FALSE){
ymin <- min(total.scores, na.rm = TRUE)
ymax <- max(total.scores, na.rm = TRUE)
ymax <- ymax * 1.1
ylim <- c(ymin, ymax)
plot(x = 1:2, y = numeric(length = 2), xlim = xlim, ylim = ylim, main = main,
xaxt = "n", yaxt = "n", type = "n", xlab = xlab, ylab = ylab)
FROM <- floor(ymin/10000)
# BY <- floor((ymax - ymin)/5/100)*10^floor(log(ymax, base = 10))
TO <- ceiling(ymax/10^(floor(log(ymax, base = 10))))*10^(floor(log(ymax, base = 10)))
# BY <- (TO - FROM)/5
BY <- ceiling((TO - FROM)/8/(10^floor(log((TO - FROM)/8, base = 10)))) * 10^floor(log((TO - FROM)/8, base = 10))
axis(2, at = seq(from = FROM, to = TO, by = BY), lwd = lwd)
}
if(logY == TRUE){
if(length(total.scores[total.scores == 0]) > 0){
temp.ymin <- min(total.scores[total.scores != 0 & is.na(total.scores) == FALSE])
addToY <- 10^(floor(log(temp.ymin*1000, base = 10))-3)
for(n in 1:ncol(total.scores)){
total.scores[total.scores[,n] == 0 & is.na(total.scores[,n]) == FALSE, n] <-
total.scores[total.scores[,n] == 0 & is.na(total.scores[,n]) == FALSE, n] + addToY
}
# cat(addToY, " was added to the time points of score=0.\n", sep = "")
}
ymin <- min(total.scores[total.scores != 0 & is.na(total.scores) == FALSE])*0.9
ymax <- max(total.scores, na.rm = TRUE)
ymax <- ymax * 11 # 5
ylim <- c(ymin, ymax)
plot(x = 1:2, y = c(1, 1), xlim = xlim, ylim = ylim, main = main,
xaxt = "n", yaxt = "n", type = "n", xlab = xlab, ylab = ylab, log = "y")
FROM <- floor(log(ymin, base = 10))
TO <- floor(log(ymax, base = 10))+1
AT <- integer(length = (TO - FROM + 1) * 10 - (TO - FROM))
for(i in 1:length(FROM:TO)){
p <- seq(FROM, TO, 1)[i]
if(p == FROM){
START <- ((i-1)*10+1)
END <- ((i-1)*10+10)
AT[START:END] <- seq(from = (10^p)/10, to = (10^(p+1))/10, by = (10^p)/10)
}
if(p != FROM){
START <- END
END <- END + 9
AT[START:END] <- seq(from = (10^p)/10, to = (10^(p+1))/10, by = (10^p)/10)
}
}
axis(2, at = AT, labels = FALSE, lwd = lwd/2)
AT <- integer(length = (TO - FROM + 1))
for(i in 1:length(FROM:TO)){
p <- seq(FROM, TO, 1)[i]
AT[i] <- (10^(p+1))/10
}
axis(2, at = AT, labels = formatC(AT, format = "g"), lwd = lwd)
if(length(AT) < 5){
AT <- integer(length = (TO - FROM + 1))
for(i in 1:length(FROM:TO)){
p <- seq(FROM, TO, 1)[i]
AT[i] <- (10^(p+1))/10*5
}
axis(2, at = AT, labels = formatC(AT, format = "g"), lwd = lwd)
}
}
axis(1, at = xFromSecond[1:2], labels = c("pre", "1st"), lwd = lwd)
if(type == "weeks") axis(1, at = seq(floor(shortestFromSecond/10)*10, longestFromSecond, 10), lwd = lwd)
if(type == "days") axis(1, at = seq(floor(shortestFromSecond/50)*50, longestFromSecond, 50), lwd = lwd)
# axis(2, lwd = lwd)
box(lwd = lwd)
if(logY == TRUE){
if(length(temp2.scores[temp2.scores == 0 & is.na(temp2.scores) == FALSE]) > 0){
# temp.ymin <- min(temp2.scores[temp2.scores != 0 & is.na(temp2.scores) == FALSE])
# addToY <- 10^(floor(log(temp.ymin*1000, base = 10))-3)
temp2.scores[temp2.scores == 0 & is.na(temp2.scores) == FALSE] <-
temp2.scores[temp2.scores == 0 & is.na(temp2.scores) == FALSE] + addToY
cat(addToY, " was added to the time points of score=0.\n", sep = "")
}
}
if(addPoints == TRUE){
points(x = xFromSecond[1:2], y = temp2.scores[1:2], pch = 20)
}
lines(x = xFromSecond[1:3], y = temp2.scores[1:3], lwd = lwd)
lines(x = xFromSecond[-(1:2)], y = temp2.scores[-(1:2)], lwd = lwd)
if(addPoints == TRUE){
points(x = xFromSecond[-(1:2)], y = temp2.scores[-(1:2)], pch = 1)
}
if(savePDF == TRUE) dev.off()
}
| /scratch/gouwar.j/cran-all/cranData/AntibodyTiters/R/plotEachABT.R |
print.ABT <- function(x, ...){
patients <- x$DATA$ID
pmax <- x$pmax
longestFromSecond <- x$longestFromSecond
shortestFromSecond <- x$shortestFromSecond
cat(" An ABT class object\n")
cat(paste(" Number of patients: ", length(patients), "\n", sep = ""))
if(length(patients) > 3){
cat(paste(" ", paste(patients[1:3], collapse = ", "), ", ...\n", sep = ""))
}
if(length(patients) <= 3){
cat(paste(" ", paste(patients[1:length(patients)], collapse = ", "), "\n", sep = ""))
}
cat(paste(" pmax: ", pmax, "\n", sep = ""))
cat(paste(" longestFromSecond: ", longestFromSecond, " (", longestFromSecond/30, " months)", "\n", sep = ""))
cat(paste(" shortestFromSecond: ", shortestFromSecond, " (", shortestFromSecond/30, " months)", "\n", sep = ""))
cat(paste(" Attrib:\n", sep = ""))
for(a in 1:length(x$Attrib)){
if(names(x$Attrib)[a] != "Age"){
cat(" ",paste(names(x$Attrib)[a], ":\n", sep = ""))
for(i in 1:length(x$Attrib[[a]])){
if(i == 1) cat(" ", paste(names(x$Attrib[[a]][i]), "=",
as.character(x$Attrib[[a]][i]), sep = ""))
if(i != 1) cat(", ", paste(names(x$Attrib[[a]][i]), "=",
as.character(x$Attrib[[a]][i]), sep = ""))
if(i == length(x$Attrib[[a]])) cat("\n")
}
}
if(names(x$Attrib)[a] == "Age"){
cat(" ",paste(names(x$Attrib)[a], ":\n", sep = ""))
temp <- summary(x$Attrib[[a]])
cat(paste(" ", "Min: ", temp[1], "\n", sep = ""))
cat(paste(" ", "1st Q: ", temp[2], "\n", sep = ""))
cat(paste(" ", "Median: ", temp[3], "\n", sep = ""))
cat(paste(" ", "Mean: ", temp[4], "\n", sep = ""))
cat(paste(" ", "3rd Q: ", temp[5], "\n", sep = ""))
cat(paste(" ", "Max: ", temp[6], "\n", sep = ""))
}
}
}
| /scratch/gouwar.j/cran-all/cranData/AntibodyTiters/R/print.ABT.R |
# source: 20210922_AntibodyTiters_1.txt
readABT <- function(fileName = "xxx.xlsx", attribNumeric = "Age"){
# require(openxlsx)
cat("Loading the xlsx file ", fileName, "...\n", sep = "")
inData <- read.xlsx(xlsxFile = fileName)
if(length(grep(pattern = "#", substr(inData$ID, start = 1, stop = 1))) > 0){
inData <- inData[-grep(pattern = "#", substr(inData$ID, start = 1, stop = 1)),]
}
cat("Checking colnames...\n", sep = "")
minimum.colnames <- c("ID", "pre_vaccination_yyyymmdd", "pre_vaccination_score",
"1st_shot_yyyymmdd", "post_1st_shot_yyyymmdd", "post_1st_shot_score",
"2nd_shot_yyyymmdd", "point3_yyyymmdd", "point3_score")
for(i in 1:9){
if(colnames(inData)[i] != minimum.colnames[i]){
stop("Invalid colnames\n")
}
}
# 20211122
score.col.index <- grep(pattern = "_score", colnames(inData))
for(s in 1:length(score.col.index)){
class(inData[[score.col.index[s]]]) <- "numeric"
}
additional.colnames <- colnames(inData)[10:ncol(inData)]
point.index <- grep(pattern = "point", additional.colnames)
yyyymmdd.index <- grep(pattern = "_yyyymmdd", additional.colnames)
score.index <- grep(pattern = "_score", additional.colnames)
attrib.index <- grep(pattern = "point", additional.colnames, invert = TRUE)
if(length(yyyymmdd.index) != length(score.index)){
stop("Invalid colnames\n")
}
if(length(attrib.index) > 0){
attribColnames <- additional.colnames[attrib.index]
for(a in 1:length(attribColnames)){
for(n in 1:length(attribNumeric)){
if(attribColnames[a] == attribNumeric[n]){
class(inData[[attribColnames[a]]]) <- "numeric"
}
}
}
}
Attrib <- list()
if(length(attrib.index) > 0){
attribColnames <- additional.colnames[attrib.index]
for(a in 1:length(attribColnames)){
if(is.numeric(inData[[attribColnames[a]]]) == FALSE & is.integer(inData[[attribColnames[a]]]) == FALSE){
colNames <- as.character(unique(inData[[attribColnames[a]]]))
for(c in 1:length(colNames)){
if(is.na(colNames[c]) == FALSE){
Attrib[[attribColnames[a]]][colNames[c]] <-
length(which(inData[[attribColnames[a]]] == unique(inData[[attribColnames[a]]])[c]))
}
if(is.na(colNames[c]) == TRUE){
Attrib[[attribColnames[a]]][colNames[c]] <-
length(which(is.na(inData[[attribColnames[a]]]) == TRUE))
}
}
}
if(is.numeric(inData[[attribColnames[a]]]) == TRUE | is.integer(inData[[attribColnames[a]]]) == TRUE){
# Attrib[[attribColnames[a]]] <- c(min(inData[[attribColnames[a]]]), max(inData[[attribColnames[a]]]))
Attrib[[attribColnames[a]]] <- inData[[attribColnames[a]]]
}
}
}
p <- 4
for(a in seq(1, length(point.index), 2)){
curr.colname <- additional.colnames[a]
next.colname <- additional.colnames[a+1]
POINT <- unlist(strsplit(curr.colname, split = "_"))[1]
if(p != as.integer(unlist(strsplit(POINT, split = "point"))[2])){
stop("Invalid colnames\n")
}
POINT <- unlist(strsplit(next.colname, split = "_"))[1]
if(p != as.integer(unlist(strsplit(POINT, split = "point"))[2])){
stop("Invalid colnames\n")
}
if(next.colname != paste(POINT, "_", "score", sep = "")){
stop("Invalid colnames\n")
}
if(curr.colname != paste(POINT, "_", "yyyymmdd", sep = "")){
stop("Invalid colnames\n")
}
p <- p + 1
}
cat("Checking ID uniqueness...\n", sep = "")
if(length(unique(inData$ID)) != length(inData$ID)){
stop("Identical IDs were found.\n", sep = "")
}
yyyymmddCols <- colnames(inData)[grep(pattern = "_yyyymmdd", colnames(inData))]
# 20211213
key <- FALSE
for(i in 1:length(yyyymmddCols)){
# 20211213
dayCheck <- inData[[yyyymmddCols[i]]]
noNAindex <- is.na(dayCheck) == FALSE
for(d in 1:length(dayCheck)){
if(is.na(dayCheck[d]) == FALSE){
if(nchar(dayCheck[d]) != 8){
cat("Date must be written in yyyymmdd: ID=", inData$ID[d], " value=", dayCheck[d],
" colname=", yyyymmddCols[i], "\n", sep = "")
key <- TRUE
}
}
}
inData[[yyyymmddCols[i]]] <- as.Date(as.character(inData[[yyyymmddCols[i]]]), "%Y%m%d")
}
# 20211213
if(key == TRUE) stop("Reading failed. Check the dates")
pmax <- p - 1
temp <- inData[, grep(pattern = "yyyymmdd", colnames(inData))]
longestFromSecond <- 0
shortestFromSecond <- 600
for(i in 1:nrow(temp)){
secondShot <- as.integer(temp[["2nd_shot_yyyymmdd"]][i])
for(p in 3:pmax){
colName <- paste("point", p, "_yyyymmdd", sep = "")
if(is.na(as.integer(temp[[colName]][i])) == FALSE){
longestFromSecond <- max(longestFromSecond,
as.integer(temp[[colName]][i]) - secondShot)
shortestFromSecond <- min(shortestFromSecond,
as.integer(temp[[colName]][i]) - secondShot)
}
}
}
# Addition of point*_from2ndShot columns
for(p in 3:pmax){
colName <- paste("point", p, "_from2ndShot", sep = "")
inData[[colName]] <- as.integer(NA)
}
for(i in 1:nrow(inData)){
secondShot <- as.integer(inData[["2nd_shot_yyyymmdd"]][i])
for(p in 3:pmax){
colName <- paste("point", p, "_yyyymmdd", sep = "")
Day <- as.integer(inData[[colName]][i]) - secondShot
colName <- paste("point", p, "_from2ndShot", sep = "")
inData[[colName]][i] <- Day
}
}
post2ndDays <- as.integer(inData[["point3_yyyymmdd"]] - inData[["2nd_shot_yyyymmdd"]])
# Addition of M1_*_from2ndShot columns
firstMonth <- min(ceiling(shortestFromSecond/30), ceiling(post2ndDays/30), na.rm = TRUE)
lastMonth <- ceiling(longestFromSecond/30)
for(m in firstMonth:lastMonth){
colName <- paste("M1_", m, "_from2ndShot", sep = "")
inData[[colName]] <- as.integer(NA)
}
for(i in 1:nrow(inData)){
secondShot <- as.integer(inData[["2nd_shot_yyyymmdd"]][i])
for(p in 3:pmax){
colName <- paste("point", p, "_yyyymmdd", sep = "")
Day <- as.integer(inData[[colName]][i]) - secondShot
colName <- paste("point", p, "_score", sep = "")
Score <- inData[[colName]][i]
Month <- ceiling(Day/30)
if(is.na(Day) == FALSE & is.na(Score) == FALSE & is.na(Month) == FALSE){
colName <- paste("M1_", Month, "_from2ndShot", sep = "")
inData[[colName]][i] <- Score
}
}
}
# Addition of M2_*-*_from2ndShot columns
firstM2group <- min(ceiling(shortestFromSecond/60), ceiling(post2ndDays/60), na.rm = TRUE)
lastM2group <- ceiling(longestFromSecond/60)
for(m in firstM2group: lastM2group){
M2start <- (m-1)*2 + 1
M2end <- (m-1)*2 + 2
colName <- paste("M2_", M2start, "-", M2end, "_from2ndShot", sep = "")
inData[[colName]] <- as.integer(NA)
}
for(i in 1:nrow(inData)){
secondShot <- as.integer(inData[["2nd_shot_yyyymmdd"]][i])
for(p in 3:pmax){
colName <- paste("point", p, "_yyyymmdd", sep = "")
Day <- as.integer(inData[[colName]][i]) - secondShot
colName <- paste("point", p, "_score", sep = "")
Score <- inData[[colName]][i]
m <- ceiling(Day/60)
if(is.na(Day) == FALSE & is.na(Score) == FALSE & is.na(m) == FALSE){
M2start <- (m-1)*2 + 1
M2end <- (m-1)*2 + 2
colName <- paste("M2_", M2start, "-", M2end, "_from2ndShot", sep = "")
inData[[colName]][i] <- Score
}
}
}
# Addition of M3_*-*_from2ndShot columns
firstM3group <- min(ceiling(shortestFromSecond/90), ceiling(post2ndDays/90), na.rm = TRUE)
lastM3group <- ceiling(longestFromSecond/90)
for(m in firstM3group: lastM3group){
M3start <- (m-1)*3 + 1
M3end <- (m-1)*3 + 3
colName <- paste("M3_", M3start, "-", M3end, "_from2ndShot", sep = "")
inData[[colName]] <- as.integer(NA)
}
for(i in 1:nrow(inData)){
secondShot <- as.integer(inData[["2nd_shot_yyyymmdd"]][i])
for(p in 3:pmax){
colName <- paste("point", p, "_yyyymmdd", sep = "")
Day <- as.integer(inData[[colName]][i]) - secondShot
colName <- paste("point", p, "_score", sep = "")
Score <- inData[[colName]][i]
m <- ceiling(Day/90)
if(is.na(Day) == FALSE & is.na(Score) == FALSE & is.na(m) == FALSE){
M3start <- (m-1)*3 + 1
M3end <- (m-1)*3 + 3
colName <- paste("M3_", M3start, "-", M3end, "_from2ndShot", sep = "")
inData[[colName]][i] <- Score
}
}
}
# Addition of M4_*-*_from2ndShot columns
firstM4group <- min(ceiling(shortestFromSecond/120), ceiling(post2ndDays/120), na.rm = TRUE)
lastM4group <- ceiling(longestFromSecond/120)
for(m in firstM4group: lastM4group){
M4start <- (m-1)*4 + 1
M4end <- (m-1)*4 + 4
colName <- paste("M4_", M4start, "-", M4end, "_from2ndShot", sep = "")
inData[[colName]] <- as.integer(NA)
}
for(i in 1:nrow(inData)){
secondShot <- as.integer(inData[["2nd_shot_yyyymmdd"]][i])
for(p in 3:pmax){
colName <- paste("point", p, "_yyyymmdd", sep = "")
Day <- as.integer(inData[[colName]][i]) - secondShot
colName <- paste("point", p, "_score", sep = "")
Score <- inData[[colName]][i]
m <- ceiling(Day/120)
if(is.na(Day) == FALSE & is.na(Score) == FALSE & is.na(m) == FALSE){
M4start <- (m-1)*4 + 1
M4end <- (m-1)*4 + 4
colName <- paste("M4_", M4start, "-", M4end, "_from2ndShot", sep = "")
inData[[colName]][i] <- Score
}
}
}
# Addition of M6_*-*_from2ndShot columns
firstM6group <- min(ceiling(shortestFromSecond/180), ceiling(post2ndDays/180), na.rm = TRUE)
lastM6group <- ceiling(longestFromSecond/180)
for(m in firstM6group: lastM6group){
M6start <- (m-1)*6 + 1
M6end <- (m-1)*6 + 6
colName <- paste("M6_", M6start, "-", M6end, "_from2ndShot", sep = "")
inData[[colName]] <- as.integer(NA)
}
for(i in 1:nrow(inData)){
secondShot <- as.integer(inData[["2nd_shot_yyyymmdd"]][i])
for(p in 3:pmax){
colName <- paste("point", p, "_yyyymmdd", sep = "")
Day <- as.integer(inData[[colName]][i]) - secondShot
colName <- paste("point", p, "_score", sep = "")
Score <- inData[[colName]][i]
m <- ceiling(Day/180)
if(is.na(Day) == FALSE & is.na(Score) == FALSE & is.na(m) == FALSE){
M6start <- (m-1)*6 + 1
M6end <- (m-1)*6 + 6
colName <- paste("M6_", M6start, "-", M6end, "_from2ndShot", sep = "")
inData[[colName]][i] <- Score
}
}
}
inData <- list(DATA = inData, pmax = pmax,
longestFromSecond = longestFromSecond,
shortestFromSecond = shortestFromSecond,
Attrib = Attrib)
class(inData) <- "ABT"
# assign(objName, inData, envir = .GlobalEnv)
# cat("An ABT object \"", objName, "\" has been successfully assigned in .GlobalEnv.\n", sep = "")
cat("This object contains a data.frame named \"DATA\", numeric vectors \"pmax\",
\"longestFromSecond\" and \"shortestFromSecond\", and a list named \"Attrib\".\n", sep = "")
cat("patients: ", nrow(inData$DATA), "\n", sep = "")
cat("pmax is ", pmax, "\n", sep = "")
cat("longestFromSecond is ", longestFromSecond, " (", longestFromSecond/30, " months)", "\n", sep = "")
cat("shortestFromSecond is ", shortestFromSecond, " (", shortestFromSecond/30, " months)", "\n", sep = "")
if(length(attrib.index) == 0){
cat("There is no contents in Attrib.\n")
}
if(length(attrib.index) > 0){
for(a in 1:length(inData$Attrib)){
if(inherits(inData$DATA[[names(inData$Attrib[a])]], "numeric") != TRUE){
cat(paste(names(inData$Attrib)[a], ":\n", sep = ""))
for(i in 1:length(inData$Attrib[[a]])){
cat(" ", paste(names(inData$Attrib[[a]][i]), ": ",
as.character(inData$Attrib[[a]][i]), "\n", sep = ""))
}
}
if(inherits(inData$DATA[[names(inData$Attrib[a])]], "numeric") == TRUE){
cat(paste(names(inData$Attrib)[a], ":\n", sep = ""))
temp <- summary(inData$Attrib[[a]])
cat(paste(" ", "Min: ", temp[1], "\n"))
cat(paste(" ", "1st Q: ", temp[2], "\n"))
cat(paste(" ", "Median: ", temp[3], "\n"))
cat(paste(" ", "Mean: ", temp[4], "\n"))
cat(paste(" ", "3rd Q: ", temp[5], "\n"))
cat(paste(" ", "Max: ", temp[6], "\n"))
}
}
}
return(inData)
}
| /scratch/gouwar.j/cran-all/cranData/AntibodyTiters/R/readABT.R |
subset.ABT <- function(x, ...){
DATA <- x$DATA
inData <- subset(DATA, ...)
additional.colnames <- colnames(inData)[10:ncol(inData)]
point.index <- grep(pattern = "point", additional.colnames)
yyyymmdd.index <- grep(pattern = "_yyyymmdd", additional.colnames)
score.index <- grep(pattern = "_score", additional.colnames)
additional.DF <- data.frame(name = additional.colnames)
additional.DF$noPoint <- FALSE
additional.DF$noPoint[grep(pattern = "point", additional.colnames, invert = TRUE)] <- TRUE
additional.DF$noM1 <- FALSE
additional.DF$noM1[grep(pattern = "M1_", additional.colnames, invert = TRUE)] <- TRUE
additional.DF$noM2 <- FALSE
additional.DF$noM2[grep(pattern = "M2_", additional.colnames, invert = TRUE)] <- TRUE
additional.DF$noM3 <- FALSE
additional.DF$noM3[grep(pattern = "M3_", additional.colnames, invert = TRUE)] <- TRUE
additional.DF$noM4 <- FALSE
additional.DF$noM4[grep(pattern = "M4_", additional.colnames, invert = TRUE)] <- TRUE
additional.DF$noM6 <- FALSE
additional.DF$noM6[grep(pattern = "M6_", additional.colnames, invert = TRUE)] <- TRUE
additional.DF$attrib <- FALSE
additional.DF$attrib[rowSums(additional.DF[,2:7]) == 6] <- TRUE
attrib.index <- which(additional.DF$attrib == TRUE)
attribColnames <- additional.colnames[attrib.index]
Attrib <- list()
for(a in 1:length(attribColnames)){
if(is.numeric(inData[[attribColnames[a]]]) == FALSE & is.integer(inData[[attribColnames[a]]]) == FALSE){
colNames <- as.character(unique(inData[[attribColnames[a]]]))
for(c in 1:length(colNames)){
if(is.na(colNames[c]) == FALSE){
Attrib[[attribColnames[a]]][colNames[c]] <-
length(which(inData[[attribColnames[a]]] == unique(inData[[attribColnames[a]]])[c]))
}
if(is.na(colNames[c]) == TRUE){
Attrib[[attribColnames[a]]][colNames[c]] <-
length(which(is.na(inData[[attribColnames[a]]]) == TRUE))
}
}
}
if(is.numeric(inData[[attribColnames[a]]]) == TRUE | is.integer(inData[[attribColnames[a]]]) == TRUE){
# Attrib[[attribColnames[a]]] <- c(min(inData[[attribColnames[a]]]), max(inData[[attribColnames[a]]]))
Attrib[[attribColnames[a]]] <- inData[[attribColnames[a]]]
}
}
pmax <- x$pmax
longestFromSecond <- x$longestFromSecond
shortestFromSecond <- x$shortestFromSecond
inData <- list(DATA = inData, pmax = pmax,
longestFromSecond = longestFromSecond,
shortestFromSecond = shortestFromSecond,
Attrib = Attrib)
class(inData) <- "ABT"
return(inData)
}
| /scratch/gouwar.j/cran-all/cranData/AntibodyTiters/R/subset.ABT.R |
# source: 20210924_AntibodyTiters_1.txt
toyABT <- function(fileName = "toy.xlsx", pmax = 7, patients = 20, outsiderPercent = 2, NaPercent = 10,
Attrib = c("Sex", "Age", "VeryLow"),
attribFactors = list(c("F", "M"), c(18, 80), c(TRUE, FALSE))){
# pmax should be less than 18
# require(openxlsx)
if(length(attribFactors) != length(Attrib)){
stop("length(attribFactors) and length(Attrib) must be the same.")
}
toyDF <- emptyABT(pmax = pmax, returnDF = TRUE,
Attrib = Attrib, attribFactors = attribFactors)
toyDF[2:(patients+1),] <- NA
patientIDs <- paste("patient ", 1:patients, sep = "")
toyDF[["ID"]][2:(patients+1)] <- patientIDs
# days
colIndexs <- c(2, 4, 5, 7, 8, seq(10, (10+(pmax-4)*2), 2))
for(c in 1:5){
colIndex <- colIndexs[c]
DAY <- as.integer(as.Date(toyDF[[colIndex]][1], format = "%Y%m%d"))
for(p in 1:patients){
temp <- as.character(as.Date(DAY + sample(0:20, size = 1),
origin = "1970-01-01"))
temp <- paste(strsplit(temp, split = "-")[[1]], collapse = "")
toyDF[[colIndex]][(p+1)] <- temp
}
}
for(c in 6:length(colIndexs)){
colIndex <- colIndexs[c]
DAY <- as.integer(as.Date(toyDF[[colIndex]][1], format = "%Y%m%d"))
for(p in 1:patients){
temp <- as.character(as.Date(DAY + sample(0:60, size = 1),
origin = "1970-01-01"))
temp <- paste(strsplit(temp, split = "-")[[1]], collapse = "")
toyDF[[colIndex]][(p+1)] <- temp
}
}
for(p in 1:patients){
temp <- as.integer(toyDF[p+1, colIndexs])
temp <- as.character(temp[order(temp, decreasing = FALSE)])
toyDF[p+1, colIndexs] <- temp
}
# scores
colIndexs <- c(3, 6, 9, seq(11, (11+(pmax-4)*2), 2))
smax <- max(toyDF[1, colIndexs], na.rm = TRUE)
for(c in 1:3){
colIndex <- colIndexs[c]
score <- toyDF[[colIndex]][1]
# scores <- sample((score - score*(1/2)):(score + score*(1/2)), size = patients, replace = TRUE)
# scores <- exp(rnorm(n = patients, mean = log(score), sd = 0.2))
scores <- exp(rnorm(n = patients, mean = log(score), sd = 0.4))
toyDF[[colIndex]][2:(patients+1)] <- scores
}
if(outsiderPercent > 0){
# Already high before vaccination
outsiderIndex <- sample(1:patients, size = ceiling(patients * 0.02 * outsiderPercent))
for(o in 1:length(outsiderIndex)){
patientScores <- as.numeric(toyDF[1+outsiderIndex[o], colIndexs])
# patientScores[1] <- sample((smax*0.3):(smax*0.8), size = 1)
patientScores[1] <- exp(rnorm(n = 1, mean = log(smax), sd = 0.05))
patientScores[3] <- patientScores[2] + patientScores[2] + patientScores[1]
patientScores[2] <- patientScores[2] + patientScores[1]
# patientScores[3] <- patientScores[2] + patientScores[1] * sample(1:3, size = 1) * 0.1
toyDF[1+outsiderIndex[o], colIndexs] <- patientScores
}
}
getCt <- function(t, C0 = 1000, Thalf = 90){
Ct <- C0 / 2^(t/Thalf)
return(Ct)
}
Thalf <- 90
for(p in 1:patients){
T0 <- as.Date(as.character(toyDF[[colIndexs[3]-1]][p+1]), "%Y%m%d")
C0 <- toyDF[[colIndexs[3]]][p+1]
for(c in 4:length(colIndexs)){
DAY <- as.Date(as.character(toyDF[[colIndexs[c]-1]][p+1]), "%Y%m%d")
Period <- as.integer(DAY - T0)
tempCt <- getCt(t = Period, C0 = C0, Thalf = Thalf)
toyDF[[colIndexs[c]]][p+1] <- exp(rnorm(n = 1, mean = log(tempCt), sd = 0.05))
}
}
## Attrib
for(c in 1:length(Attrib)){
colName <- Attrib[c]
if(colName != "Age" | colName != "VeryLow"){
toyDF[[colName]][2:(patients+1)] <- sample(attribFactors[[c]],
size = patients, replace = TRUE)
}
if(colName == "Age"){
toyDF[[colName]][2:(patients+1)] <- sample(attribFactors[[c]][1]:attribFactors[[c]][2],
size = patients, replace = TRUE)
}
if(colName == "VeryLow"){
toyDF[[colName]][2:(patients+1)] <- FALSE
VLindex <- sample(2:(patients+1), size = ceiling(patients/20), replace = TRUE)
VLindex <- VLindex[VLindex != outsiderIndex]
toyDF[[colName]][VLindex] <- TRUE
for(v in 1:length(VLindex)){
patientScores <- as.numeric(toyDF[VLindex[v], colIndexs])
# patientScores[1:2] <- sample(1:3, size = 1)
# patientScores[3:length(patientScores)] <- sample(1:3,
# size = length(patientScores) -2, replace = TRUE)
toyDF[VLindex[v], colIndexs] <- exp(log(patientScores)*0.6)
}
}
}
# NA
if(NaPercent > 0){
colIndexsC <- c(2, 5, 8, seq(10, (10+(pmax-4)*2), 2))
colIndexsS <- c(3, 6, 9, seq(11, (11+(pmax-4)*2), 2))
NaIndex <- sample(1:patients, size = ceiling(patients * 0.02 * NaPercent))
for(n in 1:length(NaIndex)){
x <- sample(1:length(colIndexsC), size = 1)
toyDF[1+NaIndex[n], colIndexsC[x]] <- NA
toyDF[1+NaIndex[n], colIndexsS[x]] <- NA
}
}
# Age
colIndexsC <- c(2, 5, 8, seq(10, (10+(pmax-4)*2), 2))
colIndexsS <- c(3, 6, 9, seq(11, (11+(pmax-4)*2), 2))
OldIndex <- which(toyDF$Age >= 35)
for(o in 1:length(OldIndex)){
# toyDF[OldIndex[o], colIndexsS] <- toyDF[OldIndex[o], colIndexsS] * 0.8
tempScore <- toyDF[OldIndex[o], colIndexsS]
toyDF[OldIndex[o], colIndexsS]<- exp(log(tempScore)*0.98)
}
OldIndex <- which(toyDF$Age >= 55)
for(o in 1:length(OldIndex)){
tempScore <- toyDF[OldIndex[o], colIndexsS]
toyDF[OldIndex[o], colIndexsS]<- exp(log(tempScore)*0.98)
}
OldIndex <- which(toyDF$Age >= 65)
for(o in 1:length(OldIndex)){
tempScore <- toyDF[OldIndex[o], colIndexsS]
toyDF[OldIndex[o], colIndexsS]<- exp(log(tempScore)*0.98)
}
write.xlsx(toyDF, file = fileName, overwrite = TRUE)
}
| /scratch/gouwar.j/cran-all/cranData/AntibodyTiters/R/toyABT.R |
#' Anxiety & Confinement
#'
#' Data from the anxiety and confinement study from Alvarado et al. (2022) <doi: 10.3390/bs12100398>.
#'
#' @keywords internal
"_PACKAGE"
## usethis namespace: start
#' @importFrom data.table :=
#' @importFrom data.table .BY
#' @importFrom data.table .EACHI
#' @importFrom data.table .GRP
#' @importFrom data.table .I
#' @importFrom data.table .N
#' @importFrom data.table .NGRP
#' @importFrom data.table .SD
#' @importFrom data.table data.table
#' @importFrom lifecycle deprecated
## usethis namespace: end
NULL
| /scratch/gouwar.j/cran-all/cranData/AnxietySleep/R/AnxietySleep-package.R |
#' Data from the anxiety and confinement study.
#'
#' @description
#' `r lifecycle::badge('stable')`
#'
#' This dataset contains the variables used in the anxiety and confinement study carried out
#' by Alvarado-Aravena et al. 2022.
#'
#'
#' @format A data frame with 617 rows y 7 variables:
#'
#' \itemize{
#' \item{\code{id}: Factor. An identification code for each subject.}
#' \item{\code{sex}: Factor w/ 2 levels "Female", "Male". Sex of participants.}
#' \item{\code{zone}: Factor w/ 2 levels "CZ", "PZ". Zone in which the subject were by the time he was answering the questionnaire, either CZ (Confinement Zone) or PZ (Partial confinement Zone).}
#' \item{\code{beck_global}: Integer. Global score of Beck Anxiety Inventory.}
#' \item{\code{pits_global}: Integer. Global score of Pittsburgh Sleep Quality Index.}
#' \item{\code{age}: Integer. Age of the subjects in years.}
#' \item{\code{cat_age}: Factor w/ 4 levels "18-25", "26-40", "41-50", ">50". Age of the subjects in years.}
#' }
#'
#' @examples
#' # Mean age grouped by sex and zone using `data.table` syntax
#' anxiety[, # No filtering (i)
#' list(mean_age = mean(age)), # Action to do (j)
#' list(sex, zone)] # Grouping vars (by)
#'
#' # Mean PSQI score grouped by sex and zone, for those with
#' # an age greater than 18 AND a Beck score greater than 10.
#' anxiety[age > 18 & beck_global > 10,
#' list(mean_psqi = mean(pits_global)),
#' list(sex, zone)]
#'
#' @source
#' - Alvarado-Aravena, C., Arriaza, K., Castillo-Aguilar, M., Flores, K.,
#' Dagnino-Subiabre, A., Estrada-Goic, C., & Núñez-Espinosa, C. (2022). Effect
#' of Confinement on Anxiety Symptoms and Sleep Quality during the COVID-19
#' Pandemic. Behavioral Sciences, 12(10), 398.
"anxiety"
| /scratch/gouwar.j/cran-all/cranData/AnxietySleep/R/dataset.R |
library(MASS)
library(lpSolve)
block.design=function(N)
{
design=NULL
v=nrow(N)
b=ncol(N)
kvec=t(N)%*%matrix(1,v,1)
k=max(kvec)
for (i in 1:b)
{
trts=which(N[,i]>0)
design=rbind(design,rep(trts,N[trts,i]))
}
return(design)
}
g=function(v,b,k,x,z,alpha,rho=0)
{
A=k*(v-1)*(b*(k-x)-z)-(1-rho)*(v*(b*(k-x)-z)-b*k*k-b*x*x-2*x*z-z+2*k*(b*x+z))
B=b*(1-rho)*(k*(b*x+z)-(b*x*x+2*x*z+z))+rho*(b*x+z)*(b*k-b*x-z)
value=v*k*(((1-alpha+alpha*v)*(v-1)^2)/A+(1-alpha)*b/B)
return(value)
}
getts=function(v,b,k,alpha,rho)
{
kby2=floor(k/2)
ming=999999999
ts=NULL
for (x in 0:(kby2-1))
{
for(z in 0:b)
{
if (!(x==0 & z==0))
{
temp=g(v,b,k,x,z,alpha,rho)
if (temp<ming)
{
ming=temp
ts=c(x,z)
} else {
if (temp==ming) ts=rbind(ts,c(x,z))
}
}
}
}
return(ts)
}
getrow=function(v,b,k,r,r0,lambda,lambda0,N1,T,rownum,relaxed)
{
kvec_obt=colSums(N1)
w=matrix(0,1,b)
for (j in 1:b)
{
if (kvec_obt[j]==0) w[,j]=1
else w[,j]=1/kvec_obt[j]
}
obj=w
constr1=matrix(1,1,b)
constr2=matrix(0,b,b)
for (j in 1:b) constr2[j,j]=1
constr3=N1
constr4=T
if (relaxed>0) constr=rbind(constr1,constr2,constr4) else constr=rbind(constr1,constr2,constr3,constr4)
dir1=rep("=", times=(1))
dir2=rep("<=",times=(b))
dim(dir2)=c(b,1)
dir3=rep("=",times=(nrow(N1)))
dim(dir3)=c(nrow(N1),1)
dir4=rep("<",times=(nrow(constr4)))
dim(dir4)=c(nrow(constr4),1)
if (relaxed>0) dir=rbind(dir1,dir2,dir4) else dir=rbind(dir1,dir2,dir3,dir4)
rhs1=r
rhs2=k-kvec_obt
dim(rhs2)=c(b,1)
rhs3=matrix(0,nrow(N1),1)
for (j in 1:nrow(N1))
{
if (sum(N1[j,])>0)
{
if (j==1) rhs3[j,]=lambda0 else rhs3[j,]=lambda
} else rhs3[j,]=0
}
rhs4=matrix(r-0.5,nrow(constr4),1)
if (relaxed>0) rhs=rbind(rhs1,rhs2,rhs4) else rhs=rbind(rhs1,rhs2,rhs3,rhs4)
sol=lp (direction = "max", obj, constr, dir, rhs,transpose.constraints = TRUE, all.bin=TRUE, use.rw=TRUE)
if (sol[[28]]==0)
{
row=sol[[12]]
dim(row)=c(1,b)
if (rownum>nrow(N1)) N1=rbind(N1,row) else N1[rownum,]=row
}
return(N1)
}
alternate.sol=function(v,b,k,r,r0,lambda,lambda0,N1,T,relaxed)
{
row_detected=0
result=0
k0=1
while (k0<=min(4,nrow(N1)) & row_detected==0)
{
row_indices=combn(2:nrow(N1),k0)
nr=ncol(row_indices)
j=1
while(j<=nr & row_detected==0)
{
rows=row_indices[,j]
T_temp=rbind(T,N1[rows,])
N1_temp=N1
N1_temp[rows,]=matrix(0,1,b)
cnt=0
for (m in 1:k0)
{
rownum=rows[m]
N1_temp=getrow(v,b,k,r,r0,lambda,lambda0,N1_temp,T_temp,rownum,relaxed)
if (sum(N1_temp[rownum,])>0) cnt=cnt+1
}
if (cnt==k0)
{
row_detected=1
result=list(rows,N1_temp)
}
j=j+1
}
k0=k0+1
}
return(result)
}
btibts=function(v,b,k,t,s,alpha,rho=0,ntrial)
{
r0=s+b*t
r=(b*k-r0)/v
lambda0=(s*(t+1)*(k-t-1)+(b-s)*t*(k-t))/v
lambda=(r*(k-1)-lambda0)/(v-1)
q2=s*(k-t-1)/v
q3=(q2*(k-t-2)+(r-q2)*(k-t-1))/(v-1)
if (r-floor(r)==0 & q2-floor(q2)==0 & q3-floor(q3)==0 & lambda-floor(lambda)==0 & lambda0-floor(lambda0)==0)
{
trial=0
success=0
while(trial<ntrial & success==0)
{
trial=trial+1
N1=matrix(0,1,b)
cols=sample(b,b-s)
N1[1,cols]=t
if (s>0)
{
remblocks=setdiff(1:b,cols)
N1[1,remblocks]=t+1
}
T=matrix(0,1,b)
i=2
relaxed=0
breaker=0
while (i<=(v+1) & breaker==0)
{
N1=getrow(v,b,k,r,r0,lambda,lambda0,N1,T,i,relaxed)
if (nrow(N1)<i)
{
if(nrow(N1)>=2)
{
temp=alternate.sol(v,b,k,r,r0,lambda,lambda0,N1,T,relaxed)
rows=temp[[1]]
if (all(rows>0))
{
T=rbind(T,N1[rows,])
N1=temp[[2]]
} else breaker=1
if (nrow(T)>5*v) breaker=1
} else breaker=1
}
i=nrow(N1)+1
}
if (nrow(N1)==(v+1))
{
success=1
NNP=N1%*%t(N1)
rvec=rowSums(N1)
R=diag(rvec,nrow=(v+1))
diag(NNP)=rvec
design=block.design(N1)
C=R-NNP/k
P=matrix(0,v*(v+1)/2,v+1)
temp=0
for(ii in 1:v)
{
for(jj in (ii+1):(v+1))
{
temp=temp+1
P[temp,ii]=1
P[temp,jj]=-1
}
}
Pc=P[1:v,]
Pt=P[(v+1):(v*(v+1)/2),]
if(length(Pt)==(v+1)) dim(Pt)=c(1,(v+1))
Cinv=ginv(C)
den=(1-alpha)*sum(diag(Pc%*%Cinv%*%t(Pc)))+alpha*sum(diag(Pt%*%Cinv%*%t(Pt)))
nume=g(v,b,k,t,s,alpha,rho)
Aeff=nume/den
if (s>0) type="S" else type="R"
parameter=c(v,b,k,t,s,alpha, rho,round(Aeff,3),type)
names(parameter)=c("v","b","k","t","s","alpha","rho","A-eff","type")
txtparm=paste(as.character(v),as.character( b), as.character( k),as.character( t),as.character( s), as.character( alpha), as.character( rho))
result=list(parameters=noquote(parameter),design=design,N=N1,NNP=NNP)
} else {
result="Design not found"
parameter=c(v,b,k,t,s, alpha, rho)
}
}
} else {
result="BTIB design does not exist for these parameters"
parameter=c(v,b,k,t,s,alpha, rho)
}
return(result)
}
wtaoptbtib=function(v,b,k,alpha,rho=0,ntrial=5)
{
if(k%%2!=0)
{
if (alpha/(1-alpha) <=((2*v*k-2*v-k+1)^2-(k-1)^2*(v-1)^2+(rho*rho*(2*v-k-1)^2+2*rho*(2*v-k-1)*(2*v*k-2*v-k+1)))/v*((k-1)*(v-1))^2) c1=1 else c1=0
} else {
if (alpha/(1-alpha) <=(2*v*k-2*v-k)^2-k^2*(v-1)^2+(rho*rho*(2*v-k)^2+2*rho*(2*v-k)*(2*v*k-2*v-k))/v*(k*(v-1))^2) c1=1 else c1=0
}
if (c1==1)
{
ts=getts(v,b,k,alpha,rho)
dim(ts)=c(length(ts)/2,2)
if(nrow(ts)>1)
{
output=vector("list",nrow(ts))
for (i in 1:nrow(ts))
{
t=ts[i,1]
s=ts[i,2]
output[[i]]=btibts(v,b,k,t,s,alpha,rho,ntrial)
}
} else {
t=ts[1,1]
s=ts[1,2]
output=btibts(v,b,k,t,s,alpha,rho,ntrial)
}
} else {
output="Certain conditions are not satisfied"
}
return(output)
}
getr0=function(v,b,k)
{
minH1=99999999
M=9999999
for (r0 in 1:(b*k-v))
{
r=(b*k-r0)/v
if (r-floor(r)==0)
{
rr0=floor(r)
alphar0=floor(r0/b)
Rr0=(r0-b*alphar0)*(alphar0+1)^2+(b-r0+b*alphar0)*alphar0^2
m1r0=(k*r0-Rr0)/(v*k)
Ar0=(b*k-r0)*(k-1)/k
B1r0=((b*k-r0-v*rr0)*((rr0+1)*(k-1))^2+(v-b*k+r0+v*rr0)*(rr0*(k-1))^2)/k^2
Abarr0=b*k*(k-1)-r0*(2*k-1)+Rr0
lambdar0=floor(Abarr0/(v*(v-1)))
B2r0=((Abarr0-v*(v-1)*lambdar0)*(lambdar0+1)^2+(v*(v-1)-Abarr0+v*(v-1)*lambdar0)*lambdar0^2)/k^2
Br0=B1r0+B2r0
temp=Br0-m1r0^2-((Ar0-m1r0)^2)/(v-1)
if (abs(temp)<1e-8) temp=0
Pr0=sqrt(temp)
m2r0=(Ar0-m1r0-sqrt((v-1)/(v-2))*Pr0)/(v-1)
m3r0=(Ar0-m1r0+sqrt((v-1)*(v-2))*Pr0)/(v-1)
temp2=Br0-(Ar0^2)/v
if (abs(temp2)<1e-8) temp2=0
Phatr0=sqrt(temp2)
m12r0=(Ar0-sqrt(v/(v-1))*Phatr0)/(v-1)
m13r0=(Ar0+sqrt(v*(v-1))*Phatr0)/(v-1)
if (m1r0 <= m2r0) H1r0=1/m1r0+(v-2)/m2r0+1/m3r0 else H1r0=(v-1)/m12r0+1/m13r0
if (H1r0<minH1)
{
minH1=H1r0
r0star=r0
}
m1hatr0=min((r0*k-Rr0)/(v*k),(Ar0-2/k)/v)
m4r0=(Ar0-2/k-m1r0)/(v-1)
H2r0=1/m1hatr0+(v-1)/m4r0
Mr0=min(H1r0,H2r0)
if (Mr0<M) M=Mr0
}
}
out=list(r0star=r0star,M=M)
return(out)
}
aoptgdtd=function(m,n,b,k,ntrial=5)
{
v=m*n
out=getr0(v,b,k)
r0=out$r0star
r=(b*k-r0)/v
rr0=floor(r)
alphar0=floor(r0/b)
Rr0=(r0-b*alphar0)*(alphar0+1)^2+(b-r0+b*alphar0)*alphar0^2
lambda0=(r0*k-Rr0)/v
lambda1=(r*(k-1)-lambda0-(m-1)*n)/(v-1)
if (r-floor(r)==0 & lambda1-floor(lambda1)==0 & lambda0-floor(lambda0)==0)
{
trial=0
success=0
while(trial<ntrial & success==0)
{
trial=trial+1
N1=matrix(0,1,b)
if (r0>b)
{
n2=(r0+b*(alphar0+1))/(2*alphar0+1)
if (n2>=0 & n2<=b) s=b-n2 else s=0
t=alphar0
cols=sample(b,b-s)
N1[1,cols]=t
if (s>0)
{
remblocks=setdiff(1:b,cols)
N1[1,remblocks]=t+1
}
} else {
cols=sample(b,r0)
N1[1,cols]=1
}
T=matrix(0,1,b)
i=2
relaxed=0
breaker=0
while (i<=(v+1) & breaker==0)
{
N1=getrow2(m,n,b,k,r,r0,lambda1,lambda0,N1,T,i,relaxed)
if (nrow(N1)<i)
{
temp=alternate.sol2(m,n,b,k,r,r0,lambda1,lambda0,N1,T,relaxed)
rows=temp[[1]]
if (all(rows>0))
{
T=rbind(T,N1[rows,])
N1=temp[[2]]
} else breaker=1
if (nrow(T)>5*v) breaker=1
}
i=nrow(N1)+1
}
if (nrow(N1)==(v+1))
{
success=1
if (is.matrix(N1))
{
NNP=N1%*%t(N1)
rvec=rowSums(N1)
R=diag(rvec,nrow=(v+1))
diag(NNP)=rvec
design=block.design(N1)
C=R-NNP/k
C11=C[-1,-1]
e=eigen(C11)
ev=e$values
sumev=sum(1/ev)
Aeff=out$M/sumev
parameters=c(m,n,b,k,r,r0,lambda1,lambda0)
names(parameters)=c("m","n","b","k","r","r0","lambda1","lambda0")
result=list(parameters=parameters,design=design,N=N1,NNP=NNP,Aeff=Aeff)
}
}
}
if (success==0)
{
design="Design not found"
result=list(m=m,n=n,b=b,k=k,r=r,r0=r0,lambda1=lambda1,lambda0=lambda0,design=design)
parameter=c(m,n,b,k,r,r0,lambda1,lambda0)
}
} else {
result="A-optimal Design does not exist for these parameters"
parameter=c(m,n,b,k,r,r0,lambda1,lambda0)
}
return(result)
}
check.group=function(m,n,x,y)
{
i1=ceiling(x/n)
i2=ceiling(y/n)
if (i1!=i2) group="different" else group="same"
return(group)
}
getrow2=function(m,n,b,k,r,r0,lambda1,lambda0,N1,T,rownum,relaxed)
{
kvec_obt=colSums(N1)
w=matrix(0,1,b)
for (j in 1:b)
{
if (kvec_obt[j]==0) w[,j]=1
else w[,j]=1/kvec_obt[j]
}
obj=w
constr1=matrix(1,1,b)
constr2=matrix(0,b,b)
for (j in 1:b) constr2[j,j]=1
constr3=N1
constr4=T
if (relaxed>0) constr=rbind(constr1,constr2,constr4) else constr=rbind(constr1,constr2,constr3,constr4)
dir1=rep("=", times=(1))
dir2=rep("<=",times=(b))
dim(dir2)=c(b,1)
dir3=rep("=",times=(nrow(N1)))
dim(dir3)=c(nrow(N1),1)
dir4=rep("<",times=(nrow(constr4)))
dim(dir4)=c(nrow(constr4),1)
if (relaxed>0) dir=rbind(dir1,dir2,dir4) else dir=rbind(dir1,dir2,dir3,dir4)
rhs1=r
rhs2=k-kvec_obt
dim(rhs2)=c(b,1)
rhs3=matrix(0,nrow(N1),1)
for (j in 1:nrow(N1))
{
if (sum(N1[j,])>0)
{
if (j==1) rhs3[j,]=lambda0 else {
if (check.group(m,n,(j-1),(rownum-1))=="same") rhs3[j,]=lambda1 else rhs3[j,]=lambda1+1
}
} else rhs3[j,]=0
}
rhs4=matrix(r-0.5,nrow(constr4),1)
if (relaxed>0) rhs=rbind(rhs1,rhs2,rhs4) else rhs=rbind(rhs1,rhs2,rhs3,rhs4)
sol=lp (direction = "max", obj, constr, dir, rhs,transpose.constraints = TRUE, all.bin=TRUE, use.rw=TRUE)
if (sol[[28]]==0)
{
row=sol[[12]]
dim(row)=c(1,b)
if (rownum>nrow(N1)) N1=rbind(N1,row) else N1[rownum,]=row
}
return(N1)
}
alternate.sol2=function(m,n,b,k,r,r0,lambda1,lambda0,N1,T,relaxed)
{
row_detected=0
result=0
k0=1
while (k0<=min(4,nrow(N1)-1) & row_detected==0)
{
row_indices=combn(nrow(N1),k0)
nr=ncol(row_indices)
j=1
while(j<=nr & row_detected==0)
{
rows=row_indices[,j]
T_temp=rbind(T,N1[rows,])
N1_temp=N1
N1_temp[rows,]=matrix(0,1,b)
cnt=0
for (m in 1:k0)
{
rownum=rows[m]
N1_temp=getrow2(m,n,b,k,r,r0,lambda1,lambda0,N1_temp,T_temp,rownum,relaxed)
if (sum(N1_temp[rownum,])>0) cnt=cnt+1
}
if (cnt==k0)
{
row_detected=1
result=list(rows,N1_temp)
}
j=j+1
}
k0=k0+1
}
return(result)
}
gbbpb=function(v1,v2,b,k,x,z)
{
C=b*x+z
A=(k*C-v2*(b*x*x+2*x*z+z))/(v1*k)
B=(b*k*v1*(k-1)-v2*(v1*(k-1)+k)*C+v2*v2*(b*x*x+2*x*z+z))/(v1*k)
a=v2*(v1-1)^2
d=v1*(v2-1)
value=1/A+a/B+d/C
return(value)
}
getwq=function(v1,v2,b,k)
{
kbyv2=floor(k/v2)
ming=999999999
wq=NULL
for (x in 0:(kbyv2-1))
{
for(z in 0:b)
{
if (!(x==0 & z==0))
{
temp=gbbpb(v1,v2,b,k,x,z)
if (temp<ming)
{
ming=temp
wq=c(x,z)
} else if (temp==ming)
{
wq=rbind(wq,c(x,z))
}
}
}
}
return(wq)
}
bbpbwq=function(v1,v2,b,k,w,q,ntrial)
{
r0=q+b*w
r=(b*k-v2*r0)/v1
lambda2=r0
lambda12=(q*(w+1)*(k-v2*(w+1))+(b-q)*w*(k-v2*w))/v1
lambda1=(r*(k-1)-v2*lambda12)/(v1-1)
if (r-floor(r)==0 & r0-floor(r0)==0 & lambda1-floor(lambda1)==0 & lambda12-floor(lambda12)==0)
{
trial=0
success=0
while(trial<ntrial & success==0)
{
trial=trial+1
N1=matrix(0,v2,b)
if (b>q)
{
cols=sample(b,b-q)
N1[1:v2,cols]=w
if (q>0)
{
remblocks=setdiff(1:b,cols)
N1[1:v2,remblocks]=w+1
}
} else N1[1:v2,1:b]=w+1
T=matrix(0,1,b)
i=v2+1
relaxed=0
breaker=0
while (i<=(v1+v2) & breaker==0)
{
N1=getrow3(v1,v2,b,k,r,r0,lambda1,lambda2,lambda12,N1,T,i,relaxed)
if (nrow(N1)<i)
{
temp=alternate.sol3(v1,v2,b,k,r,r0,lambda1,lambda2,lambda12,N1,T,relaxed)
rows=temp[[1]]
if (all(rows>0))
{
T=rbind(T,N1[rows,])
N1=temp[[2]]
} else breaker=1
if (nrow(T)>5*(v1+v2)) breaker=1
}
Sys.sleep(0.1)
i=nrow(N1)+1
}
if (nrow(N1)==(v1+v2))
{
success=1
NNP=N1%*%t(N1)
rvec=rowSums(N1)
R=diag(rvec,nrow=(v1+v2))
diag(NNP)=rvec
adtt=r-NNP[v2+1,v2+1]/k
adttdash=-lambda1/k
dss=r0-NNP[1,1]/k
dssdash=-lambda2/k
bts=-lambda12/k
f1bar=adtt
f2bar=adttdash
f3bar=dssdash
f4bar=dss
f5bar=dssdash
design=block.design(N1)
Aeff=gbbpb(v1,v2,b,k,w,q)/(v2*(v1-1)/(f1bar-f2bar)+v1*(v2-1)/(f4bar-f5bar)+v2/(f1bar+(v1-1)*f2bar))
if (q>0) type="S" else type="R"
parameters=c(v1,v2,b,k,w,q,type)
names(parameters)=c("v1","v2","b","k","w","q","type")
result=list(parameters=parameters,design=design,N=N1,NNP=NNP,Aeff=Aeff,type=type)
} else {
result="Design not found"
parameter=c(v1,v2,b,k,w,q)
}
}
} else {
result="BBPB design does not exist for these parameters"
parameter=c(v1,v2,b,k,w,q)
}
return(result)
}
getrow3=function(v1,v2,b,k,r,r0,lambda1,lambda2,lambda12,N1,T,rownum,relaxed)
{
kvec_obt=colSums(N1)
w=matrix(0,1,b)
for (j in 1:b)
{
if (kvec_obt[j]==0) w[,j]=1
else w[,j]=1/kvec_obt[j]
}
obj=w
constr1=matrix(1,1,b)
constr2=matrix(0,b,b)
for (j in 1:b) constr2[j,j]=1
constr3=N1
constr4=T
if (relaxed>0) constr=rbind(constr1,constr2,constr4) else constr=rbind(constr1,constr2,constr3,constr4)
dir1=rep("=", times=(1))
dir2=rep("<=",times=(b))
dim(dir2)=c(b,1)
dir3=rep("=",times=(nrow(N1)))
dim(dir3)=c(nrow(N1),1)
dir4=rep("<",times=(nrow(constr4)))
dim(dir4)=c(nrow(constr4),1)
if (relaxed>0) dir=rbind(dir1,dir2,dir4) else dir=rbind(dir1,dir2,dir3,dir4)
rhs1=r
rhs2=k-kvec_obt
dim(rhs2)=c(b,1)
rhs3=matrix(0,nrow(N1),1)
for (j in 1:nrow(N1))
{
if (sum(N1[j,])>0)
{
if (j<=v2) rhs3[j,]=lambda12 else rhs3[j,]=lambda1
} else rhs3[j,]=0
}
rhs4=matrix(r-0.5,nrow(constr4),1)
if (relaxed>0) rhs=rbind(rhs1,rhs2,rhs4) else rhs=rbind(rhs1,rhs2,rhs3,rhs4)
sol=lp (direction = "max", obj, constr, dir, rhs,transpose.constraints = TRUE, all.bin=TRUE, use.rw=TRUE)
if (sol[[28]]==0)
{
row=sol[[12]]
dim(row)=c(1,b)
if (rownum>nrow(N1)) N1=rbind(N1,row) else N1[rownum,]=row
}
return(N1)
}
alternate.sol3=function(v1,v2,b,k,r,r0,lambda1,lambda2,lambda12,N1,T,relaxed)
{
row_detected=0
result=0
k0=1
while (k0<=min(4,nrow(N1)-v2) & row_detected==0)
{
row_indices=combn((v2+1):nrow(N1),k0)
nr=ncol(row_indices)
j=1
while(j<=nr & row_detected==0)
{
rows=row_indices[,j]
T_temp=rbind(T,N1[rows,])
N1_temp=N1
N1_temp[rows,]=matrix(0,1,b)
cnt=0
for (m in 1:k0)
{
rownum=rows[m]
N1_temp=getrow3(v1,v2,b,k,r,r0,lambda1,lambda2,lambda12,N1_temp,T_temp,rownum,relaxed)
if (sum(N1_temp[rownum,])>0) cnt=cnt+1
}
if (cnt==k0)
{
row_detected=1
result=list(rows,N1_temp)
}
j=j+1
}
k0=k0+1
}
return(result)
}
aoptbbpb=function(v1,v2,b,k,ntrial=5)
{
wq=getwq(v1,v2,b,k)
dim(wq)=c(length(wq)/2,2)
for (i in 1:nrow(wq))
{
w=wq[i,1]
q=wq[i,2]
result=bbpbwq(v1,v2,b,k,w,q,ntrial)
return(result)
}
}
| /scratch/gouwar.j/cran-all/cranData/Aoptbdtvc/R/Aoptbdtvc.R |
#' The 'AovBay' package.
#'
#' @description Package developed for the visualization and presentation of one-way analysis of variance models, with a classical, non-parametric and Bayesian approach.
#'
#' @docType package
#' @name AovBay-package
#' @aliases AovBay
#' @useDynLib AovBay, .registration = TRUE
#' @import methods
#' @import Rcpp
#' @importFrom rstan sampling
#'
#' @references
#' Stan Development Team (2020). RStan: the R interface to Stan. R package version 2.19.3. https://mc-stan.org
#'
NULL
| /scratch/gouwar.j/cran-all/cranData/AovBay/R/AovBay-package.R |
#' @importFrom stats TukeyHSD aggregate bartlett.test density kruskal.test lm na.omit pairwise.wilcox.test qnorm qqnorm quantile sd shapiro.test
#' @importFrom utils read.csv2 str
#' @importFrom graphics hist
#' @importFrom DT renderDataTable
#' @importFrom DT datatable
#' @importFrom DT formatSignif
#' @importFrom shinydashboard sidebarMenu
#' @importFrom shinydashboard menuItem
#' @importFrom shinydashboard menuSubItem
#' @importFrom shinydashboard tabItem
#' @importFrom shinydashboard tabItems
#' @importFrom shinydashboard dashboardBody
#' @importFrom car durbinWatsonTest
#' @importFrom reshape melt
#' @importFrom shiny column radioButtons textOutput checkboxInput fileInput fluidRow htmlOutput icon numericInput reactive renderPrint renderTable renderText renderUI runApp selectInput shinyApp sliderInput stopApp tableOutput tabPanel uiOutput withMathJax verbatimTextOutput
#' @import shinycssloaders shinydashboardPlus tibble BayesFactor broom dplyr highcharter moments nortest rstan rstantools stringr waiter
#' @importFrom car some
#' @import htmltools
#' @importFrom purrr map
globalVariables(c("aov","fluidRow","column","a","img","dashboardPage","tagList","spin_three_bounce","textOutput","h3","Trat","upr","Trat","upr","lwr","hist","Names","Mean","se_mean","n_eff","names_from_WB","Iteration","mu","sig2","value","HTML","h2","radioButtons","checkboxInput","fileInput","variable"))
#' Interactive panel ANOVA classic, non parametric and bayesian
#'
#' Interactive panel to visualize and develop one-way analysis of variance models, from the classical, non-parametric and Bayesian approach.
#' @param dataset Data set
#' @return A shiny panel with the classical, non-parametric and Bayesian analyzes of variance, based on the specification of the dependent and independent variable of the data set provided in \code{dataset}, also provides a decision diagram that suggests which method is appropriate, based on the assumptions of the models.
#' @examples
#' \dontrun{
#' data(PollutionData)
#' aovbayes(PollutionData)
#' }
#' @export
aovbayes <- function(dataset=FALSE) {
# require(shiny)
# require(highcharter)
# require(shinydashboard)
# require(shinydashboardPlus)
# require(BayesFactor)
# require(dplyr)
# require(waiter)
# require(broom)
# require(nortest)
# require(moments)
# require(car)
# require(DT)
# require(shinycssloaders)
# require(rstan)
# require(rstantools)
# require(reshape)
# require(purrr)
# require(stringr)
# left_footer <- fluidRow(
# column(
# width = 6,
# align = "left",
# a(
# href = "http://www.fcnm.espol.edu.ec/",
# target = "_blank",
# img(src = "https://github.com/JavierRojasC/JavierRCam/blob/master/fcnm.png?raw=true", height = "30px"),
# class = "dropdown",
# title = "Facultad de Ciencias Naturales y Matematicas")
# )
# )
runApp(list(
ui = dashboardPage(
preloader = list(html = tagList(spin_three_bounce(), h3("Please wait a moment ...")), color = "#1E3A4E"),
title = 'One-way analysis of variance' ,
dashboardHeader(title = "One-way analysis of variance",
titleWidth = 450),
dashboardSidebar(
sidebarMenu(
menuItem("Database", tabName = "BD", startExpanded = TRUE,icon = icon("database")),
menuItem("Assumptions", tabName = "Assumptions", startExpanded = TRUE,icon = icon("tasks")),
menuItem("Classic ANOVA", tabName = "ANOVAcl", startExpanded = TRUE,icon = icon("adn")),
menuItem("Kruskal Wallis", tabName = "KW", startExpanded = TRUE,icon = icon("kickstarter-k")),
menuItem("Bayesian ANOVA", tabName = "ANOVAby", startExpanded = TRUE,icon = icon("bold"))
)),
dashboardBody( tags$head(tags$style(HTML('
/* logo */
.skin-blue .main-header .logo {
background-color: #DADADA;
color: #2B1F57
}
/* logo when hovered */
.skin-blue .main-header .logo:hover {
background-color: #A1A1A1;
}
/* navbar (rest of the header) */
.skin-blue .main-header .navbar {
background-color: #6B94BF;
}
/* main sidebar */
.skin-blue .main-sidebar {
background-color: #546A90;
}
/* active selected tab in the sidebarmenu */
.skin-blue .main-sidebar .sidebar .sidebar-menu .active a{
background-color: #A8A8A8;
}
/* other links in the sidebarmenu */
.skin-blue .main-sidebar .sidebar .sidebar-menu a{
background-color: #8B8989;
color: #151515;
style:"font-family:verdana";
}
/* other links in the sidebarmenu when hovered */
.skin-blue .main-sidebar .sidebar .sidebar-menu a:hover{
background-color: #6F6F6F;
/* toggle button when hovered */
.skin-blue .main-header .navbar .sidebar-toggle:hover{
background-color: #DDDDDD;
}
/* body */
.skin-blue .main-body .content-wrapper, .right-side {
background-color: #F3F3F3;
}
.box.box-solid.box-primary>.box-header{
background: rgb(0, 129, 201);
color: #57A184;
font-size: 18px;
font-weight; bold;
}
.box.box-solid.box-primary{
font-family: OpenSans;
font-size: 16px;
text-align: left;
color: #AA3B3B;
}
'))),
tags$head(tags$link(rel = "shortcut icon", href = "favicon.ico")),
tabItems(
tabItem(tabName= "BD",
box(width=12,title="Upload base in csv",
fluidRow(
column(12,fileInput("file1", " ",
accept = c(
"text/csv",
"comma-separated-values,text/plain",
".csv")
),
checkboxInput("header", "Press if the first row contains the column names", TRUE),
radioButtons(inputId="separador",label="Separador",
choices = c(Comma=',', Semicolon=";", Tab="\t", Space=''),
selected = ','))
),uiOutput('var')),
fluidRow(width=12,
box(title="Viewer",
width=12,
DT::dataTableOutput("DTable")))
),
tabItem(tabName = "Assumptions",
sliderInput(inputId = 'alpha',
label='Enter Alpha (Type I Error)',
value=0.05,
min=0,
max=1),
box(title = 'Normality of the residuals',collapsible = TRUE,
width = 12,
column(6,
withSpinner(highchartOutput('normality', height = "350px"), type = 7, color='#C7D5EB')
),
column(6,
h2(textOutput('pruebaNorm')),
tableOutput('normalityTest'),
h3(textOutput('normalityConclu')),
h2(htmlOutput('CumpleNorm')))),
box(title = 'Homoscedasticity of the residuals',collapsible = TRUE,
width = 12,
column(6,
withSpinner(highchartOutput('homoscedasticity', height = "350px"), type = 7, color='#C7D5EB')
),
column(6,
h2('Homoscedasticity by Bartlett`s test'),
tableOutput('homoscedasticityBart'),
h3(textOutput('homoscedasticityConclu')),
h2(htmlOutput('CumpleHomoc'))
)),
box(title = 'Independence of residuals',collapsible = TRUE,
width = 12,
column(12,
h2('Independence by Durbin Watson Test'),
tableOutput('independenceDurbin'),
h3(textOutput('independenceConclu')),
h2(htmlOutput('Cumpleindependence')))),
box(title = 'Symmetry of the residuals',
width = 12,collapsible = TRUE,
column(6,
withSpinner(highchartOutput('symmetry', height = "350px"), type = 7, color='#C7D5EB')
),
column(6,
h2('Symmetry - Asymmetry coefficient'),
tableOutput('symmetryCoef'),
h3(textOutput('symmetryConclu')),
h2(htmlOutput('CumpleSimet')))),
box(width = 12,collapsible = TRUE,
withSpinner(highchartOutput('diagram', height = "650px"), type = 7, color='#C7D5EB'),
h2('Technique available'),
withSpinner(highchartOutput('technique'), type = 7, color='#C7D5EB'))),
tabItem(tabName = "ANOVAcl",
sliderInput(inputId = 'alpha2',
label='Enter Alpha (Type I Error)',
value=0.05,
min=0,
max=1),
box(width=12,
title = "Classic ANOVA Table",collapsible = TRUE,
column(width=12,align="center",
tableOutput('Aov')),
h2("Conclution"),
h3(textOutput('conclutionAov'))),
column(12,withSpinner(highchartOutput('Box', height = "450px"), type = 7, color='#C7D5EB')),
box(title = "Post-Hoc",collapsible = TRUE,
width=12,
column(6,
h3('TukeyHSD'),
tableOutput('AovPostHoc')),
column(6,
withSpinner(highchartOutput('AovPostHocGraph', height = "450px"), type = 7, color='#C7D5EB')))
),
tabItem(tabName = "KW",
sliderInput(inputId = 'alphakw',
label='Enter Alpha (Type I Error)',
value=0.05,
min=0,
max=1),
box(title = "Kruskal Wallis Table",collapsible = TRUE,
tableOutput('kw'),
h2("Conclution"),
h3(textOutput('conclusionKW'))),
box(title = 'Post Hoc: Pairwise comparisons using Wilcoxon rank sum exact test ',collapsible = TRUE,
selectInput('padjust', 'Adjustment methods',
c("holm", "hochberg", "hommel", "bonferroni", "BH", "BY",
"fdr", "none")),
h3('p-values adjusted'),
DT::dataTableOutput('KWpost')
)
),
tabItem(tabName = "ANOVAby",
box(title = "Bayesian ANOVA Table",collapsible = TRUE,
tableOutput('AovBY'),
h2("Conclution"),
h3(textOutput('conclutionaovby'))),
box(title = 'Control center',collapsible = TRUE,
sliderInput(inputId = 'prior',
label='Enter prior probability',
value=0.5,
min=0,
max=1),
numericInput(inputId = 'numberiterations',
label='Enter the number of iterations',
value=1000,
min=500,
max=3000),
sliderInput(inputId = 'chainsnumber',
label='Enter number of chains:',
value=1,
min=1,
max=4)),
box(title = "Posterior", width=12,collapsible = TRUE,
column(12, align="center",DT::dataTableOutput('AovBYpost'))),
box(title = "MCMC",collapsible = TRUE,
width = 12,
column(6,
selectInput("mcmcCHAIN","Seleccione MCMC",
c("Mean and Variance",
"Treatments")),
withSpinner(highchartOutput('AovBYposmcmc', height = "450px"), type = 7, color='#C7D5EB')),
column(6,withSpinner(highchartOutput('AovBYposcurves', height = "450px"), type = 7, color='#C7D5EB'))
)
)
))),
dashboardFooter(
left = NULL,
right = NULL),
server = function(input, output) {
data <- reactive({
if (dataset == FALSE){
inFile <- input$file1
if (is.null(inFile))
return(NULL)
data=read.csv2(inFile$datapath, sep=input$separador,header = input$header)
data
} else {
data = dataset}
})
output$DTable <- DT::renderDataTable({
Data <- data()
datatable(Data, extensions = 'FixedColumns',
options = list(
dom = 't',
scrollX = TRUE,
fixedColumns = TRUE
))
})
output$var <- renderUI({
if(is.null(data())){return()}
else list (
selectInput("y", "Dependent variable", choices = names(data())),
selectInput("x", "Independent variable", choices = names(data()))
)
})
output$Aov <- renderTable({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
SA <- summary(aov(Depend~Factor))
S <- as.data.frame(SA[[1]])
S <- signif(S,4)
S[is.na(S)] <- ' '
S <- data.frame(c(Ind,'Residuals'),S)
colnames(S) <- c('','df','SS','MS','F','p-value')
S
})
output$conclutionAov <- renderText({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
SA <- summary(aov(as.numeric(as.matrix(Data[,Dep]))~as.factor(as.matrix(Data[,Ind]))))
if (SA[[1]][['Pr(>F)']][1] < input$alpha2){
response <- paste0('There are significant differences between the groups of',Ind)
} else if (SA[[1]][['Pr(>F)']][1] > input$alpha2){
response <- paste0('There are no significant differences between the groups of',Ind)}
response
})
output$normality <- renderHighchart({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
SA <- (aov(as.numeric(as.matrix(Data[,Dep]))~as.factor(as.matrix(Data[,Ind]))))
Graph <- qqnorm(SA$residuals, pch = 1, frame = FALSE)
DataLine <- data.frame(xd=Graph[[1]],yd=Graph['y'])
colnames(DataLine) <- c('xd','yd')
LIN <- augment(lm(yd~xd, data=DataLine))
yRES=SA$residuals
distribution = qnorm
probs = c(0.25, 0.75)
qtype = 7
y1 <- quantile(yRES, probs, names = FALSE, type = qtype, na.rm = TRUE)
x1 <- distribution(probs)
slope <- diff(y1)/diff(x1)
int <- y1[1L] - slope * x1[1L]
Int=int
Slp=slope
x=Graph[[1]]
Recta <- Int+Slp*x
lineQQ <- data.frame(x2=Graph[[1]], y2=Recta)
highchart() %>%
hc_add_series(lineQQ, "line", hcaes(x = 'x2', y = 'y2'), name='QQ line', color='#A9DEDE',
marker= list(symbol='url(graphic.png)'))%>%
hc_add_series(LIN, "scatter", hcaes(x='xd', y='yd'), name='Points', color='#2B275A') %>%
hc_yAxis(
title = list(text = "Standardized Residuals"),
max=max(lineQQ$y2),
min=min(lineQQ$y2))%>%
hc_xAxis(
title = list(text = "Theoretical Quantiles"))%>%
hc_title(text='QQ plot')
})
output$AovPostHoc <- renderTable({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
SA <- (aov(Depend~Factor))
intervals = TukeyHSD(SA)
S <- as.data.frame(intervals[[1]])
S <- signif(S,4)
S <- cbind(rownames(S),S)
names(S)[1] <- ' '
S
})
output$AovPostHocGraph <- renderHighchart({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
SA <- (aov(Depend~Factor))
intervals = TukeyHSD(SA)
S <- as.data.frame(intervals[[1]])
S <- signif(S,4)
S <- cbind(rownames(S),S)
names(S)[1] <- 'Trat'
hchart(pointWidth=0,type = 'columnrange',S,name='Interval',
hcaes(x=Trat,high=upr, low=lwr), color='#224361')%>%
hc_add_series(S, type='scatter', hcaes(x=Trat, y=diff), name='Diferences', color='#289B9C',
tooltip = list(pointFormat = "<br> Diference = {point.y}"))%>%
hc_xAxis(title=list(text=('Treatment combinations')))%>%
hc_yAxis(title=list(text=('Diferences')),
plotLines = list(list(
value = 0,
color = '#DAE0EA',
width = 3,
zIndex = 4,
label = list(text = "",
style = list( color = '#DAE0EA', fontWeight = 'bold' )))))
})
output$normalityTest <- renderTable({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
SA <- (aov(as.numeric(as.matrix(Data[,Dep]))~as.factor(as.matrix(Data[,Ind]))))
if (length(SA$residuals)>30){
Test <- lillie.test(SA$residuals)
Tabla <- data.frame(Statistic=signif(Test$statistic,4),
ValP=signif(Test$p.value,4))
colnames(Tabla) <- c('KS Statistic','p-value')
Tabla
} else {
Test <- shapiro.test(SA$residuals)
Tabla <- data.frame(Statistic=Test$statistic,
ValP=Test$p.value)
colnames(Tabla) <- c('Shapiro-Wilk statistic','p-value')
Tabla
}
})
output$normalityConclu <- renderText({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
SA <- (aov(as.numeric(as.matrix(Data[,Dep]))~as.factor(as.matrix(Data[,Ind]))))
if (length(SA$residuals)>30){
Test <- lillie.test(SA$residuals)
if (Test$p.value >= input$alpha){
response=paste0('According to the Kolmogorov-Smirnov test, the residuals are normal')
} else {
response=paste0('According to the Kolmogorov-Smirnov test, the residuals are not normal')
}
response
} else {
Test <- shapiro.test(SA$residuals)
if (Test$p.value >= input$alpha){
response=paste0('According to the Shapiro-Wilk test, the residuals are normal')
} else {
response=paste0('According to the Shapiro-Wilk test, the residuals are not normal')
}
response
}
})
output$pruebaNorm <- renderText({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
SA <- (aov(as.numeric(as.matrix(Data[,Dep]))~as.factor(as.matrix(Data[,Ind]))))
if (length(SA$residuals)>30){
response=paste0('Normality by Kolmogorov-Smirnov test')
response
} else {
response=paste0('Normality by Shapiro-Wilk test')
response
}
})
output$CumpleNorm <- renderText({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
SA <- (aov(as.numeric(as.matrix(Data[,Dep]))~as.factor(as.matrix(Data[,Ind]))))
if (length(SA$residuals)>30){
Test <- lillie.test(SA$residuals)
if(Test$p.value >= input$alpha ){
return(paste("Assumption of Normality: ","<span style=\"color:green;\"> Is met. </span>"))
}else{
return(paste("Assumption of Normality: ","<span style=\"color:red;\"> Is not met.</span>"))
}} else {
Test <- shapiro.test(SA$residuals)
if(Test$p.value >= input$alpha ){
return(paste("Assumption of Normality: ","<span style=\"color:green;\"> Is met.</span>"))
}else{
return(paste("Assumption of Normality: ","<span style=\"color:red;\"> Is not met.</span>"))
}
}
})
#_________________________________________________________________
output$homoscedasticity <- renderHighchart({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Ind2','Dep2')
SA <- (aov(Dep2 ~ Ind2, data=dataBY))
xs=SA$fitted.values
ys=SA$residuals
lineAR <- data.frame(x2=xs, y2=ys)
highchart() %>%
hc_yAxis(
title = list(text = "Residuals"),
plotLines = list(list(
value = 0,
color = '#A9DEDE',
width = 3,
zIndex = 4,
label = list(text = "",
style = list( color = '#1D4B5E', fontWeight = 'bold' )))),
max=max(lineAR$y2),
min=min(lineAR$y2))%>%
hc_add_series(lineAR, "scatter", hcaes(x = 'x2', y = 'y2'), name='Residual vs Adjusted', color='#2B275A'
)%>%
hc_xAxis(
title = list(text = "Adjusted values"))%>%
hc_title(text='Residual vs Adjusted')
})
output$homoscedasticityBart <- renderTable({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Ind2','Dep2')
Bart <- bartlett.test(Dep2 ~ Ind2, data=dataBY)
Tabla <- data.frame(Statistic=signif(Bart$statistic,4),
ValP=signif(Bart$p.value,4))
colnames(Tabla) <- c('Bartlett`s K-square statistic','p-value')
Tabla
})
output$homoscedasticityConclu <- renderText({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
prio <- input$prior
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Ind2','Dep2')
Bart <- bartlett.test(Dep2 ~ Ind2, data=dataBY)
if (Bart$p.value >= input$alpha){
response=paste0('According to the Bartlett test, the samples show equal variances')
} else {
response=paste0('According to the Bartlett test, the samples show unequal variances')
}
response
})
output$CumpleHomoc <- renderText({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
prio <- input$prior
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Ind2','Dep2')
Bart <- bartlett.test(Dep2 ~ Ind2, data=dataBY)
if(Bart$p.value >= input$alpha ){
return(paste("Homoscedasticity assumption: ","<span style=\"color:green;\"> Is met.</span>"))
}else{
return(paste("Homoscedasticity assumption: ","<span style=\"color:red;\"> Is not met.</span>"))
}
})
#________________________________________________________________
output$independenceDurbin <- renderTable({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Ind2','Dep2')
Aov <- aov(Dep2 ~ Ind2, data=dataBY)
DW <- durbinWatsonTest(Aov)
Tabla <- data.frame(Autocor=DW[1],
Dw=signif(as.numeric(DW[2]),4),
ValP=signif(as.numeric(DW[3]),4))
colnames(Tabla) <- c('Autocorrelation','D-W Statistic',
'p-value')
Tabla
})
output$independenceConclu <- renderText({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
prio <- input$prior
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Ind2','Dep2')
Aov <- aov(Dep2 ~ Ind2, data=dataBY)
DW <- durbinWatsonTest(Aov)
if (DW[3] >= input$alpha){
response=paste0('According to the Durbin Watson test, there is no presence of autocorrelation in the residuals.')
} else {
response=paste0('According to the Durbin Watson test, there is the presence of autocorrelation in the residuals.')
}
response
})
output$Cumpleindependence <- renderText({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
prio <- input$prior
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Ind2','Dep2')
Aov <- aov(Dep2 ~ Ind2, data=dataBY)
DW <- durbinWatsonTest(Aov)
if (DW[3] >= input$alpha){
return(paste("Independence assumption: ","<span style=\"color:green;\"> Is met.</span>"))
}else{
return(paste("Independence assumption: ","<span style=\"color:red;\"> Is not met.</span>"))
}
})
#_________________________________________________________
output$symmetry <- renderHighchart({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
# Dep <- names(Data)[2]
# Ind <- names(Data)[1]
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Ind2','Dep2')
SA <- (aov(Dep2 ~ Ind2, data=dataBY))
skewness(SA$residuals)
histRes <- hist(SA$residuals, plot=FALSE)
hchart(histRes, name='', color='#84DED4')%>%
hc_title(text='Histogram of the residuals')
})
output$symmetryCoef <- renderTable({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Ind2','Dep2')
SA <- (aov(Dep2 ~ Ind2, data=dataBY))
Tabla <- data.frame(Statistic=skewness(SA$residuals))
colnames(Tabla) <- c('Asymmetry coefficient')
Tabla
})
output$symmetryConclu <- renderText({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Ind2','Dep2')
SA <- (aov(Dep2 ~ Ind2, data=dataBY))
if (round(skewness(SA$residuals),2) > 0){
response=paste0('According to the skewness coefficient, the distribution of the residuals has a positive skewness (Right bias)')
} else if (round(skewness(SA$residuals),2) < 0){
response=paste0('According to the skewness coefficient, the distribution of the residuals has a negative skewness (Left bias)')
} else {
response=paste0('According to the coefficient of skewness, the distribution of the residuals is symmetric.')
}
response
})
output$CumpleSimet <- renderText({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Ind2','Dep2')
SA <- (aov(Dep2 ~ Ind2, data=dataBY))
if(skewness(SA$residuals) == 0 ){
return(paste("Symmetry Assumption: ","<span style=\"color:green;\"> Is met.</span>"))
}else{
return(paste("Symmetry Assumption: ","<span style=\"color:red;\"> Is not met.</span>"))
}
})
#__________________________________________________
output$Box <- renderHighchart({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
Means <- aggregate(as.matrix(Data[,Dep]) ~ as.factor(as.matrix(Data[,Ind])), data = Data, mean)
colnames(Means) <- c('Names', 'Mean')
hcboxplot(x=as.numeric(as.matrix(Data[,Dep])), var=as.factor(as.matrix(Data[,Ind])), name = "Boxplot", color = "#0E1142", outliers = FALSE,
showInLegend=TRUE)%>%
hc_yAxis(title = list(text = Dep))%>%
hc_xAxis(title = list(text = "Levels"))%>%
hc_chart(type = "column")%>%
hc_plotOptions(showInLegend=TRUE,dataLabels=TRUE)%>%
hc_add_series(Means, type='bubble', hcaes(x =Names,y=Mean),maxSize = "7%",
tooltip=list(pointFormat='<br> {point.y} ',headerFormat='<b> Mean'), name='Means',
showInLegend=TRUE)
})
output$AovBY <- renderTable({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
prio <- input$prior
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Treatment','Dep2')
#str(dataBY)
Anovabyy <- (anovaBF(Dep2 ~ Treatment, data=dataBY, whichRandom = "ID",
rscaleFixed = prio,iterations = input$numberiterations))
S <- data.frame(Priori=(prio), BF=Anovabyy[1][1])
TabBY <- S[,1:3]
TabBY$BF.bf <- round(TabBY$BF.bf,3)
TabBY$BF.error <- signif(TabBY$BF.error,3 )
colnames(TabBY) <- c('Priori','BF10','Error')
TabBY <- rbind(TabBY, c(1-prio,1,''))
rownames(TabBY) <- c('Alternative Model', 'Null Model')
TabBY <- cbind(rownames(TabBY),TabBY)
names(TabBY)[1] <- ''
TabBY
})
stan_summary = function(
from_stan
, par
, probs = c(.5,.025,.975)
, X = NULL
, W = NULL
, B = NULL
, is_cor = F
){
m = monitor(from_stan,probs=probs,print=F)
all_pars = dimnames(m)[[1]]
all_pars_no_squares = str_replace(dimnames(m)[[1]],'\\[.*\\]','')
select_pars = all_pars[all_pars_no_squares%in%par]
requested_pars = par
m %>%
tibble::as_tibble(m) %>%
dplyr::mutate(
par = str_replace(dimnames(m)[[1]],'\\[.*\\]','')
) %>%
dplyr::filter(
par%in%requested_pars
) %>%
dplyr::select(
par
, mean
, se_mean
, sd
, contains('%')
, n_eff
, Rhat
) ->
m
if(!is_cor){
if(!is.null(X)){
m$par = dimnames(X)[[2]]
}
if(!is.null(W)){
m$par = names_from_WB(W,B)
}
}else{
temp = select_pars
temp = gsub(']','',temp)
temp = unlist(strsplit(temp,'[',fixed=T))
temp = temp[(1:length(temp))%%2==0]
temp = unlist(strsplit(temp,',',fixed=T))
v1 = temp[(1:length(temp))%%2==1]
v2 = temp[(1:length(temp))%%2==0]
keep = v2>v1
v1 = v1[keep]
v2 = v2[keep]
if(!is.null(X)){
v1 = dimnames(X)[[2]][as.numeric(v1)]
v2 = dimnames(X)[[2]][as.numeric(v2)]
}
if(!is.null(W)){
temp = names_from_WB(W,B)
v1 = temp[as.numeric(v1)]
v2 = temp[as.numeric(v2)]
}
m = m[keep,]
m$par = paste(v1,v2,sep='~')
}
return(m)
}
output$AovBYpost <- DT::renderDataTable({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
prio <- input$prior
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Tratamiento','Dep2')
lambda <- -log(0.01)/(3*sd(dataBY$Dep2))
data2 <- list(N=length(dataBY$Dep2),
J=length(unique(dataBY$Tratamiento)),
response=dataBY$Dep2,
predictor=as.numeric(dataBY$Tratamiento),
lambda=lambda)
sm <- rstan::sampling(stanmodels$onewaymodel,
data=data2, chains=input$chainsnumber,
seed = 12345,iter=input$numberiterations,
open_progress =FALSE)
tab <- stan_summary(sm, par=c("mu","sigmaalpha","sigmaepsilon","a"),
probs =c(.5,.025,.975))
tab$par <- c('Mu','Sigma Alpha','Sigma Epsilon',unique(as.character(dataBY$Tratamiento)))
DT::datatable(tab, extensions = 'FixedColumns',
options = list(
dom = 't',
scrollX = TRUE,
fixedColumns = TRUE,
pageLength = length(tab$par)
))%>% formatSignif(c("mean", "se_mean", "sd","50%","2.5%","97.5%", "Rhat"), 3)
})
output$AovBYposmcmc <- renderHighchart({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
prio <- input$prior
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Treatment','Dep2')
#str(dataBY)
Anovabyy <- (anovaBF(Dep2 ~ Treatment, data=dataBY, whichRandom = "ID",
rscaleFixed = prio,iterations = input$numberiterations))
post <- (posterior(Anovabyy,iterations = input$numberiterations))
MCMC <- data.frame(Iteration=1:input$numberiterations,post[,])
if (input$mcmcCHAIN=="Mean and Variance"){
highchart()%>%
hc_yAxis_multiples( list(top = "0%", height = "50%", title = list(text = "Mean"),opposite=FALSE),
list(top = "50%", height = "50%", title = list(text = "Sigma2") ,opposite=TRUE))%>%
hc_add_series(MCMC, type='line', hcaes(x=Iteration,y=mu),yAxis=0, name='Mean',color='#24509C')%>%
hc_add_series(MCMC, type='line', hcaes(x=Iteration,y=sig2),yAxis=1, name='Sigma2',color='#31999C')
} else {
MCMCCom <- MCMC[,-c(2,ncol(MCMC),ncol(MCMC)-1)]
rownames(MCMCCom) <- MCMC[,1]
MCMCCom2 <- as.matrix(MCMCCom)
MCMCMer <- melt(MCMCCom, id.vars="Iteration")
highchart()%>%
hc_add_series(MCMCMer, type='line', hcaes(x=Iteration, y=value, group=variable))%>%
hc_title(text='MCMC chains')%>%
hc_exporting(enabled = TRUE,
filename = paste0('Markov chains'))
}
})
output$AovBYposcurves <- renderHighchart({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
prio <- input$prior
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Treatment','Dep2')
#str(dataBY)
Anovabyy <- (anovaBF(Dep2 ~ Treatment, data=dataBY, whichRandom = "ID",
rscaleFixed = prio,iterations = input$numberiterations))
post <- (posterior(Anovabyy,iterations = input$numberiterations))
MCMC <- data.frame(Iteration=1:input$numberiterations,post[,])
MCMCCom <- MCMC[,-c(2,ncol(MCMC),ncol(MCMC)-1)]
MCMCMer <- melt(MCMCCom, id.vars="Iteration")
ds <- map(levels(MCMCMer$variable), function(x){
MCMCMer <- density(MCMCMer$value[MCMCMer$variable == x])[1:2]
MCMCMer <- list_parse2(as.data.frame(MCMCMer))
list(data = MCMCMer, name = x)
})
highchart() %>%
hc_add_series_list(ds)%>%
hc_yAxis(title=list(text='Density'))%>%
hc_exporting(enabled = TRUE,
filename = paste0('Density curves - Posterior marginal distributions.'))
})
output$conclutionaovby <- renderText({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
prio <- input$prior
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Ind2','Dep2')
str(dataBY)
Anovabyy <- (anovaBF(Dep2 ~ Ind2, data=dataBY, whichRandom = "all",
rscaleFixed = prio))
S <- data.frame(Priori=prio, BF=Anovabyy[1])
FB <- S[,2]
if (FB <= 3 & FB > 1 ){
response <- paste0('Weak evidence in favor of rejection of the null hypothesis')
} else if (FB <= 10 & FB > 3 ) {
response <- paste0('Moderate evidence in favor of rejection of the null hypothesis')
} else if (FB <= 30 & FB > 10 ){
response <- paste0('Strong evidence in favor of the rejection of the null hypothesis')
}else if (FB > 30 ){
response <- paste0('Decisive evidence in favor of the rejection of the null hypothesis')
}else if (FB < 1 & FB > 1/3 ){
response <- paste0('Weak evidence in favor of the null hypothesis')
}else if (FB < 1/3 & FB > 1/10 ){
response <- paste0('Moderate evidence in favor of the null hypothesis')
}else if (FB <= 1/10 & FB > 1/30 ){
response <- paste0('Strong evidence in favor of the null hypothesis')
}else if (FB < 1/30){
response <- paste0('Decisive evidence in favor of the null hypothesis')
}else if (FB == 1){
response <- paste0('There is no evidence')}
response
})
output$diagram <- renderHighchart({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
alph <- input$alpha
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
dataBY <- data.frame(Ind2=Factor, Dep2=Depend)
colnames(dataBY) <- c('Ind2','Dep2')
SA <- (aov(Dep2 ~ Ind2, data=dataBY))
Test <- lillie.test(SA$residuals)
if(Test$p.value >= alph ){
col_normality= "#77DA85"
col_normality_yes= "#77DA85"
col_normality_no= "#D5D5D5"
}else{
col_normality= "#D5D5D5"
col_normality_yes= "#D5D5D5"
col_normality_no= "#77DA85"
}
Bart <- bartlett.test(Dep2 ~ Ind2, data=dataBY)
if(Bart$p.value >= alph ){
col_homoscedasticity= "#77DA85"
col_homoscedasticity_yes= "#77DA85"
col_homoscedasticity_no= "#D5D5D5"
}else{
col_homoscedasticity= "#D5D5D5"
col_homoscedasticity_yes= "#D5D5D5"
col_homoscedasticity_no= "#77DA85"
}
if(skewness(SA$residuals) == 0 ){
col_symmetry= "#77DA85"
col_symmetry_yes= "#77DA85"
col_symmetry_no= "#D5D5D5"
}else{
col_symmetry= "#D5D5D5"
col_symmetry_yes= "#D5D5D5"
col_symmetry_no= "#77DA85"
}
if(durbinWatsonTest(SA)[3] >= alph){
col_independence= "#77DA85"
col_independence_yes= "#77DA85"
col_independence_no= "#D5D5D5"
}else{
col_independence= "#D5D5D5"
col_independence_yes= "#D5D5D5"
col_independence_no= "#77DA85"
}
if (col_symmetry_yes == "#77DA85"){
col_kw="#77DA85"
} else {col_kw="#D5D5D5" }
if (col_homoscedasticity_yes == "#77DA85"){
col_independence="#77DA85"
} else {col_independence="#D5D5D5" }
# if (col_independence_no == "#77DA85" | col_independence_yes == "#77DA85"){
# col_independence="#77DA85"
# } else {col_independence="#D5D5D5" }
if (col_normality_yes== "#77DA85" & col_homoscedasticity_yes== "#77DA85" ){
col_anova="#77DA85"
}else {col_anova="#D5D5D5" }
if (col_symmetry_yes=="#77DA85" | col_symmetry_no=="#77DA85"){
col_symmetry= "#77DA85"
}
if (col_homoscedasticity_yes=="#77DA85" | col_homoscedasticity_no=="#77DA85"){
col_homoscedasticity= "#77DA85"
}
highchart() %>%
hc_chart(type = 'organization', inverted = TRUE) %>%
hc_add_series(name='Diagram of techniques according to compliance with assumptions',
data = list(
list(from = 'Comparison of means by group', to = 'Does it comply with the normality assumption?'),
list(from = 'Does it comply with the normality assumption?', to = 'Yes, it fulfills normality'),
list(from = 'Yes, it fulfills normality', to = 'Does it meet the homoscedasticity assumption?'),
list(from = 'Does it comply with the normality assumption?', to = 'It does not meet normality'),
list(from = 'Does it meet the homoscedasticity assumption?', to = 'Yes, it fulfills homoscedasticity'),
list(from = 'Yes, it fulfills homoscedasticity', to = 'Does it comply with the independence assumption?'),
list(from = 'Does it comply with the independence assumption?', to = 'Yes, it fulfills independence'),
list(from = 'Does it comply with the independence assumption?', to = 'It does not meet independence'),
list(from = 'Does it meet the homoscedasticity assumption?', to = 'It does not meet homoscedasticity'),
list(from = 'Does it meet the simmetry assumption?', to = 'Yes, it fulfills simmetry'),
list(from = 'Does it meet the simmetry assumption?', to = 'It does not meet simmetry'),
list(from = 'It does not meet homoscedasticity', to = 'Does it meet the simmetry assumption?')
),
nodes= list(
list(id = 'Comparison of means by group', color="#77D0DA"),
list(id = 'Does it comply with the normality assumption?', color=col_normality),
list(id = 'Yes, it fulfills normality', color=col_normality_yes),
list(id = 'It does not meet normality', color=col_normality_no),
list(id = 'Does it meet the homoscedasticity assumption?', color=col_homoscedasticity),
list(id = 'Yes, it fulfills homoscedasticity', color=col_homoscedasticity_yes),
list(id = 'It does not meet homoscedasticity', color=col_homoscedasticity_no),
list(id = 'Does it meet the simmetry assumption?', color=col_symmetry),
list(id = 'Yes, it fulfills simmetry', color=col_symmetry_yes),
list(id = 'It does not meet simmetry', color=col_symmetry_no),
list(id = 'Does it comply with the independence assumption?', color=col_independence),
list(id = 'Yes, it fulfills independence', color=col_independence_yes),
list(id = 'It does not meet independence', color=col_independence_no)
))
})
output$technique <- renderHighchart({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
Factor <- as.factor(as.matrix(Data[,Ind]))
dataBY <- data.frame(Ind2=Factor, Dep2=Data[,Dep])
colnames(dataBY) <- c('Ind2','Dep2')
SA <- (aov(Dep2 ~ Ind2, data=dataBY))
Test <- lillie.test(SA$residuals)
Bart <- bartlett.test(Dep2 ~ Ind2, data=dataBY)
if(Test$p.value >= input$alpha & Bart$p.value >= input$alpha){
col_anova="#77DA85"
} else {col_anova="#DC7676"}
if(skewness(SA$residuals) == 0){
col_kw="#77DA85"
} else {col_kw="#DC7676"}
highchart() %>%
hc_chart(type = 'organization', inverted=TRUE) %>%
hc_add_series(name='Diagram of techniques according to compliance with assumptions',
data = list(
list(from = 'Kruskal Wallis', to = 'Kruskal Wallis'),
list(from = 'Classic ANOVA', to = 'Classic ANOVA'),
list(from = 'Bayesian ANOVA', to = 'Bayesian ANOVA')
),
nodes= list(
list(id = 'Classic ANOVA', color=col_anova),
list(id = 'Kruskal Wallis', color=col_kw),
list(id = 'Bayesian ANOVA', color='#77DA85')
))
})
output$kw <- renderTable({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
SA <-kruskal.test(Depend~Factor, data = Data)
S <- data.frame(SA$statistic,SA$parameter,signif(SA$p.value,4))
colnames(S) <- c('Kruskal-Wallis chi-squared','Gl','Val-p')
S
})
output$conclusionKW <- renderText({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
SA <-kruskal.test(Depend~Factor, data = Data)
if (SA$p.value < input$alphakw){
response <- paste0('There are significant differences between the groups of ',Ind)
} else if (SA$p.value > input$alphakw){
response <- paste0('There are no significant differences between the groups of ',Ind)}
response
})
output$KWpost <- DT::renderDataTable({
Data <- data()
Data <- na.omit(Data)
Dep <- input$y
Ind <- input$x
Factor <- as.factor(as.matrix(Data[,Ind]))
Depend <- as.numeric(as.matrix(Data[,Dep]))
Pares <- pairwise.wilcox.test(x = Depend, g = Factor, p.adjust.method = input$padjust )
Pv <- Pares$p.value
Pv[is.na(Pv)] <- ' - '
#Pv <- cbind(rownames(Pv),Pv)
DT::datatable(Pv, extensions = 'FixedColumns',
options = list(
dom = 't',
scrollX = TRUE,
fixedColumns = TRUE,
pageLength = nrow(Pv)
))
})
}))
}
| /scratch/gouwar.j/cran-all/cranData/AovBay/R/aovbayes.R |
#' @title Pollutions Data Set
#'
#' @description A data set of removal of a pharmaceutical product classified as emerging pollutants in aqueous medium using the vetiver species (Chrysopogon zizanioides).
#'
#' @format A data frame:
#' \describe{
#' \item{CONC.ppm}{Concentration of the pollution in parts per million.}
#' \item{RemocionPorc}{Remotion Percent.}
#' }
#' @source <http://revistabionatura.com/2021.06.01.7.html>
"PollutionData"
| /scratch/gouwar.j/cran-all/cranData/AovBay/R/data.R |
# Generated by rstantools. Do not edit by hand.
# names of stan models
stanmodels <- c("onewaymodel")
# load each stan module
Rcpp::loadModule("stan_fit4onewaymodel_mod", what = TRUE)
# instantiate each stanmodel object
stanmodels <- sapply(stanmodels, function(model_name) {
# create C++ code for stan model
stan_file <- if(dir.exists("stan")) "stan" else file.path("inst", "stan")
stan_file <- file.path(stan_file, paste0(model_name, ".stan"))
stanfit <- rstan::stanc_builder(stan_file,
allow_undefined = TRUE,
obfuscate_model_name = FALSE)
stanfit$model_cpp <- list(model_cppname = stanfit$model_name,
model_cppcode = stanfit$cppcode)
# create stanmodel object
methods::new(Class = "stanmodel",
model_name = stanfit$model_name,
model_code = stanfit$model_code,
model_cpp = stanfit$model_cpp,
mk_cppmodule = function(x) get(paste0("model_", model_name)))
})
| /scratch/gouwar.j/cran-all/cranData/AovBay/R/stanmodels.R |
#' read.apache.log
#'
#' Reads the Apache Log Common or Combined Format and return a data frame with the log data.
#'
#' The functions recives a full path to the log file and process the default log in common or combined format of Apache.
#' LogFormat "\%h \%l \%u \%t \\"\%r\\" \%>s \%b \\"\%\{Referer\}i\\" \\"\%\{User-Agent\}i\\"" combined
#' LogFormat "\%h \%l \%u \%t \\"\%r\\" \%>s \%b\\" common
#'
#' @param file string. Full path to the log file.
#' @param format string. Values "common" or "combined" to set the input log format. The default value is the combined.
#' @param url_includes regex. If passed only the urls that matches with the regular expression passed will be returned.
#' @param url_excludes regex. If passed only the urls that don't matches with the regular expression passed will be returned.
#' @param columns list. List of columns names that will be included in data frame output. All columns is the default value. c("ip", "datetime", "url", "httpcode", "size" , "referer", "useragent")
#' @param num_cores number. Number of cores for parallel execution, if not passed 1 core is assumed. Used only to convert datetime form string to datetime type.
#' @param fields_have_quotes boolean. If passesd as true search and remove the quotes inside the all text fields.
#' @return a data frame with the apache log file information.
#' @author Diogo Silveira Mendonca
#' @seealso \url{http://httpd.apache.org/docs/1.3/logs.html}
#' @examples
#' path_combined = system.file("examples", "access_log_combined.txt", package = "ApacheLogProcessor")
#' path_common = system.file("examples", "access_log_common.txt", package = "ApacheLogProcessor")
#'
#' #Read a log file with combined format and return it in a data frame
#' df1 = read.apache.access.log(path_combined)
#'
#' #Read a log file with common format and return it in a data frame
#' df2 = read.apache.access.log(path_common, format="common")
#'
#' #Read only the lines that url matches with the pattern passed
#' df3 = read.apache.access.log(path_combined, url_includes="infinance")
#'
#' #Read only the lines that url matches with the pattern passed, but do not matche the exclude pattern
#' df4 = read.apache.access.log(path_combined,
#' url_includes="infinance", url_excludes="infinanceclient")
#'
#' #Return only the ip, url and datetime columns
#' df5 = read.apache.access.log(path_combined, columns=c("ip", "url", "datetime"))
#'
#' #Process using 2 cores in parallel for speed up.
#' df6 = read.apache.access.log(path_combined, num_cores=2)
#'
#'
#' @import foreach
#' @import parallel
#' @import doParallel
#' @importFrom utils read.csv
#' @export
read.apache.access.log <- function(file, format = "combined", url_includes = "", url_excludes = "",
columns = c("ip", "datetime", "url", "httpcode", "size" ,
"referer", "useragent"), num_cores = 1,
fields_have_quotes = TRUE){
#=== REMOVE QUOTES INSIDE QUOTES IN URL FIELD ===================================
if(fields_have_quotes == TRUE){
text <- readLines(file)
text <- gsub("\\\\\"", "'", text)
tConnection <- textConnection(text)
}else{
tConnection <- file
}
#=== LOAD THE APACHE ACCESS LOG FILE AS CSV =====================================
logDf = read.csv(tConnection, header = FALSE, sep = " ", quote = "\"",
dec = ".", fill = FALSE, stringsAsFactors = FALSE)
if (fields_have_quotes == TRUE){
close(tConnection)
}
#=== SET UP THE COLUMNS =========================================================
#remove the columns that will not be used
logDf$V2 <- NULL;
logDf$V3 <- NULL;
#set the column names
if(format == "common"){
cl <- c("ip", "datetime", "timezone", "url", "httpcode", "size")
colnames(logDf) <- cl
columns <- columns[columns %in% cl]
}else{
colnames(logDf) <- c("ip", "datetime", "timezone", "url", "httpcode", "size" , "referer", "useragent")
}
#include only the columns required
c_include = c()
for (col in colnames(logDf)){
if (col %in% columns){
c_include <- c(c_include, col)
if(col == "datetime"){
c_include <- c(c_include, "timezone")
}
}
}
logDf <- logDf[,c_include]
#=== APPLY RULES FROM LINES ====================================================
#filter the lines to be included
line_numbers <- grep(url_includes, t(logDf["url"]))
#filter the lines to be excluded
if(url_excludes != ""){
line_numbers <-
line_numbers[ !line_numbers %in%
grep(url_excludes, t(logDf["url"]))]
}
#Get only the necessary lines
logDf <- logDf[line_numbers,]
#=== CLEAR THE DATETIME AND TIMEZONE COLUMNS ====================================
if ("datetime" %in% c_include){
#Create a vector of dates
dates = seq( as.POSIXlt(Sys.Date()), by=1, len=nrow(logDf))
lct <- Sys.getlocale("LC_TIME")
Sys.setlocale("LC_TIME", "C")
#CREATE CLUSTERS FOR PARALLEL EXECUTION
cl <- makeCluster(num_cores)
registerDoParallel(cl)
#parse the dates form data frame to dates vector
i <- 0
dates <- foreach (i = 1:nrow(logDf), .combine=rbind) %dopar%{
Sys.setlocale("LC_TIME", "C")
datetimeWithTimezone = paste(logDf$datetime[i], logDf$timezone[i])
dates[i] <- strptime(datetimeWithTimezone, format="[%d/%b/%Y:%H:%M:%S %z]")
return(dates[i])
}
Sys.setlocale("LC_TIME", lct)
dates <- as.POSIXct(dates, origin="1970-01-01")
#Shutdown the cluster
stopCluster(cl)
#Create a new data frame with coverted dates
datesFrame <- data.frame(dates)
colnames(datesFrame) <- c("datetime")
#Remove old date and timezone columns
logDf$datetime <- NULL;
logDf$timezone <- NULL;
#Inserts the converted dates in logDf data frame
logDf <- cbind(logDf, datesFrame)
}
#=== CONVERT THE SIZE COLUMN FROM TEXT TO NUMERIC ===========================
if("size" %in% c_include){
sizes <- logDf$size
sizes <- as.numeric(sizes)
logDf$size <- NULL
sizesFrame <- data.frame(sizes)
colnames(sizesFrame) <- c("size")
logDf <- cbind(logDf, sizesFrame)
}
#=== RETURN THE DATA FRAME ==================================================
logDf
}
#' Reads multiple files of apache web server.
#'
#' The files can be gziped or not. If the files are gziped they are extracted once at time, processed and after only the extracted file is deleted.
#'
#'
#' @param path path where the files are located
#' @param prefix the prefix that identify the logs files
#' @param verbose if prints messages during the processing
#' @param ... parameter to be passed to read.apache.access.log function
#'
#' @return a data frame with the apache log files information.
#' @author Diogo Silveira Mendonca
#'
#' @examples
#' path <- system.file("examples", package="ApacheLogProcessor")
#' path <- paste(path, "/", sep="")
#'
#' #read multiple gziped logs with the prefix m_access_log_combined_
#' dfLog <- read.multiple.apache.access.log(path, "m_access_log_combined_")
#'
#' @export
read.multiple.apache.access.log <- function(path, prefix, verbose = TRUE, ...){
#create the dataframe variable
df <- NULL
#list the log files in the path
prefix <- paste("^", prefix, sep="")
fVector <- list.files(path, pattern = prefix)
if(verbose) print("Starting the log processing. This may take a long time...")
#for each log file
for (inputFile in fVector) {
if(verbose) print(paste("Processing file ", inputFile))
#check if the file is gziped
gziped = FALSE
gzipedFile <- NULL
if(grepl("\\.gz$", inputFile)){
gziped = TRUE
#store the name of gziped file
gzipedFile <- inputFile
#change the input file name for unziped file
inputFile <- sub(".gz", "", inputFile)
#unzip the file
if(verbose) print(paste("Unziping ", gzipedFile))
write(readLines(zz <- gzfile(paste(path, gzipedFile, sep=""))),
file = paste(tempdir(), inputFile, sep="\\"))
close(zz)
unlink(zz)
}
#build the full file path
if(gziped == TRUE){
f <- paste(tempdir(), inputFile, sep = "\\")
}else{
f <- paste(path, inputFile, sep = "")
}
#read the log
if(verbose) print(paste("Reading file ", inputFile))
dfTemp <- read.apache.access.log(file = f, ...)
#if the first file read
if(is.null(df)){
#just assing
df <- dfTemp
}else{
#else concat with the previous dataframe
df <- rbind(df, dfTemp)
}
#delete the uziped file
if(gziped){
file.remove(paste(tempdir(), inputFile, sep="\\"))
if(verbose) print(paste("Removed ", inputFile))
}
}
#sort the data frame by timestamp
df <- df[order(df$datetime, decreasing=FALSE), ]
#rbind cast the date time format to integer, casting it back to date time
dates <- as.POSIXct(df$datetime, origin="1970-01-01")
#Create a new data frame with coverted dates
datesFrame <- data.frame(dates)
colnames(datesFrame) <- c("datetime")
#Removes the old column
df$datetime <- NULL
#Inserts the converted dates in logDf data frame
df <- cbind(df, datesFrame)
#Clear the memory
remove(dates)
remove(datesFrame)
#return the dataframe
df
}
#' Clear a list of URLs according parameters.
#'
#' @param urls list of URLs
#' @param remove_http_method boolean. If the http method will be removed from the urls.
#' @param remove_http_version booelan. If the http version will be removed from the urls.
#' @param remove_params_inside_url boolean. If the parameters inside the URL, commonly used in REST web services, will be removed from the urls.
#' @param remove_query_string boolean. If the query string will be removed from the urls.
#'
#' @return a vector with the urls cleaned
#' @author Diogo Silveira Mendonca
#' @export
#'
#' @examples
#'
#' #Load the path to the log file
#' path_combined = system.file("examples", "access_log_combined.txt", package = "ApacheLogProcessor")
#'
#' #Read a log file with combined format and return it in a data frame
#' df1 = read.apache.access.log(path_combined)
#'
#' #Clear the urls
#' urls <- clear.urls(df1$url)
#'
#' #Clear the urls but do not remove query strings
#' urlsWithQS <- clear.urls(df1$url, remove_query_string = FALSE)
#'
#' #Load a log which the urls have parameters inside
#' path2 = system.file("examples",
#' "access_log_with_params_inside_url.txt", package = "ApacheLogProcessor")
#'
#' #Read a log file with combined format and return it in a data frame
#' df2 = read.apache.access.log(path2, format = "common")
#'
#' #Clear the urls with parameters inside
#' urls2 <- clear.urls(df2$url)
#'
clear.urls <- function(urls, remove_http_method = TRUE,
remove_http_version = TRUE,
remove_params_inside_url = TRUE,
remove_query_string = TRUE){
#instantiate a new vector for the urls cleaned
urlsClean <- vector(length = length(urls))
for(i in 1:length(urls)){
urlsClean[i] <- urls[i]
#removes the query string
if (remove_query_string){
urlsClean[i] <- sub("\\?.* ", " ", urlsClean[i])
}
#removes the http version
if(remove_http_version){
urlsClean[i] <- sub(" HTTP/.*", "", urlsClean[i])
}
#removes the url method
if (remove_http_method){
urlsClean[i] <- sub("[A-Z]* ", "", urlsClean[i])
}
#removes the parameters inside urls
if (remove_params_inside_url){
#Common Parameter Patterns
urlsClean[i] <- gsub("/[0-9]+$", "", urlsClean[i])
urlsClean[i] <- gsub("/[0-9]+\\?", "\\?", urlsClean[i])
urlsClean[i] <- gsub("/[0-9]+/", "/", urlsClean[i])
#OsTicket Parameter Patterns
urlsClean[i] <- gsub("\\.[0-9]+$", "", urlsClean[i])
urlsClean[i] <- gsub("\\..{12}$", "", urlsClean[i])
}
}
urlsClean
}
#' Extract from the data frame with the access log the urls query strings parameters and values.
#'
#' The function supports multivalued parameters, but does not support parameters inside urls yet.
#' @param dfLog a dataframe with the access log. Can be load with read.apache.access.log or read.multiple.apache.access.log.
#'
#' @return a structure of data frames with query strings parameters for each url of the log
#' @author Diogo Silveira Mendonca
#' @importFrom utils URLdecode
#' @export
#'
#' @examples
#' #Load a log which the urls have query strings
#' path = system.file("examples", "access_log_with_query_string.log", package = "ApacheLogProcessor")
#'
#' #Read a log file with combined format and return it in a data frame
#' df = read.apache.access.log(path, format = "common")
#'
#' #Clear the urls with parameters inside
#' params <- get.url.params(df)
#'
get.url.params <- function(dfLog){
#extract the url column
urls <- dfLog$url
#instantiate a new list for the urls parameter
urlList <- list()
#for each url access
for(i in 1:length(urls)){
#clear urls for work only with the data needed
urlClean <- clear.urls(urls[i])
urlParams <- clear.urls(urls[i], remove_query_string = FALSE)
#instantiate the data frame for parameters
if(is.null(urlList[[urlClean]])){
dfParams <- data.frame(stringsAsFactors=FALSE)
}else{
dfParams <- urlList[[urlClean]]
}
#get the url parameters
getParams <- unlist(strsplit(urlParams, "?", fixed = TRUE))[2]
#check if the url has GET parameters
if(!is.na(getParams)){
#create a vector to store parameters name when they are discovered
newParams <- vector()
#get the parameter splited as a vector
parameter <- unlist(strsplit(getParams, "&", fixed = TRUE))
#new list to store the parameters for the urls
paramsList <- list()
#store the the data frame row index
paramsList["dfRowIndex"] <- i
#store the the data frame row name
paramsList["dfRowName"] <- rownames(dfLog[i, ])
multiValuedParams <- vector()
#for each parameter
for(j in 1:length(parameter)){
#split the key and value
keyValue <- unlist(strsplit(parameter[j], "=", fixed = TRUE))
key <- keyValue[1]
#avoid malformed params
if(is.na(key)) next
#decode the url value
value <- URLdecode(keyValue[2])
#store the key-value pair
#first check if is known that the parameter has multiple values
if(key %in% multiValuedParams){
#stores in the list
paramsList[[key]][nrow(paramsList[[key]]) + 1, "values"] <- value
}else{
#we do not know if it is multi-valued
#check if it is not multi-valued
if(!(key %in% names(paramsList))){
#store the single value
paramsList[key] <- value
}else{
#if it is multi-valued (second value found for the same parameter)
#create a data frame to store the values
tempFrame <- data.frame(stringsAsFactors = FALSE)
#the old value is the first
tempFrame[1, "values"] <- paramsList[key]
#the new value is the second
tempFrame[2, "values"] <- value
#stores the list
paramsList[[key]] <- tempFrame
#now we know that the parameter is multi-valued
multiValuedParams[[length(multiValuedParams) + 1]] <- key
}
}
#check if the key already exists as column in data frame
if(!(key %in% colnames(dfParams))){
#create a column
column <- vector(length = nrow(dfParams))
#add the column in data frame
dfParams <- cbind(dfParams, column, stringsAsFactors=FALSE)
#set their name
colnames(dfParams)[length(colnames(dfParams))] <- key
#store the key name for new parameters
newParams[[length(newParams)+1]] <- key
}
}
#check the columns that exists in the data frame but not in parameter
absentParamNames <- colnames(dfParams)[!(colnames(dfParams) %in% names(paramsList))]
#include the absent parameters in the line
for(name in absentParamNames){
paramsList[name] <- NA
}
#include the row in the data frame
dfParams <- rbind(dfParams, paramsList, make.row.names=FALSE, stringsAsFactors=FALSE)
#replace FALSE value by NA in the new columns
if (length(newParams) > 0){
for(p in 1:length(newParams)){
for(k in 1:(nrow(dfParams) -1)){
dfParams[k, newParams[p]] <- NA
}
}
}
}#end if the URL has parameters
#store the data frame in URL index
urlList[[urlClean]] <- dfParams
}#end for each URL
#return the list of URLs with its respective parameters data frame
urlList
}
#' Read the apache erro log file and loads it to a data frame.
#'
#' @param file path to the error log file
#' @param columns which columns should be loaded. Default value is all columns. c("datetime", "logLevel", "pid", "ip_port", "msg")
#'
#' @return a data frame with the error log data
#' @author Diogo Silveira Mendonca
#' @import stringr
#' @export
#'
#' @examples
#'
#' #Loads the path of the erro log
#' path <- system.file("examples", "error_log.log", package = "ApacheLogProcessor")
#'
#' #Loads the error log to a data frame
#' dfELog <- read.apache.error.log(path)
#'
read.apache.error.log <- function(file, columns = c("datetime", "logLevel", "pid", "ip_port", "msg")){
#store the client locale and change it
lct <- Sys.getlocale("LC_TIME")
Sys.setlocale("LC_TIME", "C")
#open the file for reading
con <- file(file, open = "r")
#create the return data frame
df <- data.frame(stringsAsFactors = FALSE)
#create a counter variable
i <- 0
#for each line in file
while (length(oneLine <- readLines(con, n = 1, warn = FALSE)) > 0) {
#increment the counter variable
i <- i + 1
#list the regexp format for each column
regChunks <- list()
regChunks["datetime"] <- "\\[(.*?)\\]"
regChunks["logLevel"] <- "\\[:(.*?)\\]"
regChunks["pid"] <- "\\[pid (.*?)\\]"
regChunks["ip_port"] <- "\\[client (.*?)\\]"
regChunks["msg"] <- "(.*)"
#build one regular expression with the columns passed
strRegexp <- NULL
for(column in columns){
if (is.null(strRegexp)){
strRegexp <- regChunks[column]
}else{
strRegexp <- paste(strRegexp, regChunks[column], sep=" ")
}
}
#matchs the regexp
dfChunks <- as.data.frame(str_match(oneLine, strRegexp), stringsAsFactors = FALSE)
#chek if the line is well formed
if(ncol(dfChunks) == (length(columns)+1)){
#assing columns names
names(dfChunks) <- c("line", columns)
#create a new entry
entry <- list()
#for each field
for(column in columns){
#get its value
value <- dfChunks[1, column]
#if the column is datetime clean and process datetime
if(column == "datetime"){
value <- dfChunks$datetime[1]
#strip miliseconds
value <- sub("\\.[0-9]+", "", value)
#parse date and time
value <- strptime(value, format="%a %b %e %H:%M:%S %Y")
#convert the datetime to include in dataframe
value <- as.POSIXct(value, origin="1970-01-01")
}
#store the entry
entry[column] <- value
}
#insert a new row in the data frame
df <- rbind(df, entry, stringsAsFactors = FALSE)
}else{
#skip and warn if it is a malformed line
warning(gettextf("Line %d at %s skiped. Diferent number of fields (%d) and columns (%d).", i, file, ncol(dfChunks) -1, length(columns)))
}
}#end of one line
#close the log file
close(con)
#convert datetime back to a readable format
df$datetime <- as.POSIXlt(df$datetime, origin="1970-01-01")
#restore the client locale
Sys.setlocale("LC_TIME", lct)
#return the data frame
df
}
#' Reads multiple apache error log files and loads them to a data frame.
#'
#' @param path path to the folder that contains the error log files
#' @param prefix prefix for all error log files that will be loaded
#' @param verbose if the function prints messages during the logs processing
#' @param ... parameters to be passed to read.apache.error.log function
#'
#' @return a data frame with the error log data
#' @export
#'
#' @examples
#'
#' path <- system.file("examples", package="ApacheLogProcessor")
#' path <- paste(path, "/", sep="")
#'
#' #read multiple gziped logs with the prefix m_access_log_combined_
#' dfELog <- read.multiple.apache.error.log(path, "m_error_log_")
#'
read.multiple.apache.error.log <- function(path, prefix, verbose = TRUE, ...){
#create the dataframe variable
df <- NULL
#list the log files in the path
prefix <- paste("^", prefix, sep="")
fVector <- list.files(path, pattern = prefix)
if(verbose) print("Starting the log processing. This may take a long time...")
#for each log file
for (inputFile in fVector) {
if(verbose) print(paste("Processing file ", inputFile))
#check if the file is gziped
gziped = FALSE
gzipedFile <- NULL
if(grepl("\\.gz$", inputFile)){
gziped = TRUE
#store the name of gziped file
gzipedFile <- inputFile
#change the input file name for unziped file
inputFile <- sub(".gz", "", inputFile)
#unzip the file
if(verbose) print(paste("Unziping ", gzipedFile))
write(readLines(zz <- gzfile(paste(path, gzipedFile, sep=""))),
file = paste(tempdir(), inputFile, sep="\\"))
close(zz)
unlink(zz)
}
#build the full file path
if(gziped == TRUE){
f <- paste(tempdir(), inputFile, sep = "\\")
}else{
f <- paste(path, inputFile, sep = "")
}
#read the log
if(verbose) print(paste("Reading file ", inputFile))
dfTemp <- read.apache.error.log(file = f, ...)
#if the first file read
if(is.null(df)){
#just assing
df <- dfTemp
}else{
#else concat with the previous dataframe
df <- rbind(df, dfTemp)
}
#delete the uziped file
if(gziped){
file.remove(paste(tempdir(), inputFile, sep="\\"))
if(verbose) print(paste("Removed ", inputFile))
}
}
#sort the data frame by timestamp
df <- df[order(df$datetime, decreasing=FALSE), ]
#rbind cast the date time format to integer, casting it back to date time
dates <- as.POSIXct(df$datetime, origin="1970-01-01")
#Create a new data frame with coverted dates
datesFrame <- data.frame(dates)
colnames(datesFrame) <- c("datetime")
#Removes the old column
df$datetime <- NULL
#Inserts the converted dates in logDf data frame
df <- cbind(df, datesFrame)
#Clear the memory
remove(dates)
remove(datesFrame)
#return the dataframe
df
}
#' Parses PHP mesages and store its parts in a data frame that contains level, message, file, line number and referer.
#'
#' @param dfErrorLog Error log load with the read.apache.error.log or read.multiple.apache.error.log functions.
#'
#' @return a data frame with PHP error message split in parts.
#' @export
#'
#' @examples
#'
#' #Loads the path of the erro log
#' path <- system.file("examples", "error_log.log", package = "ApacheLogProcessor")
#'
#' #Loads the error log to a data frame
#' dfELog <- read.apache.error.log(path)
#'
#' dfPHPMsgs <- parse.php.msgs(dfELog)
#'
#'
parse.php.msgs <- function(dfErrorLog){
#create the return data frame
df <- data.frame(stringsAsFactors = FALSE)
#for each line in error log
for(i in 1:nrow(dfErrorLog)){
#extract the message
msg <- dfErrorLog$msg[i]
#check if it is a PHP message, same as startsWith
if (grepl("^PHP", msg)){
#create a entry
entry <- list()
#store the the data frame row index
entry["dfRowIndex"] <- i
#store the the data frame row name
entry["dfRowName"] <- rownames(dfErrorLog[i, ])
#match the regexp
dfChunks <- as.data.frame(
str_match(msg, "PHP (.*?):(.*?) in (.*?) on line ([0-9]+)(, referer: (.*))?"),
stringsAsFactors = FALSE)
#give names to the columns
names(dfChunks) <- c("fullMsg", "level", "phpMsg", "file", "lineNo", "fullReferer", "referer")
#move only the intrest data
entry["level"] <- dfChunks[1, "level"]
entry["phpMsg"] <- dfChunks[1, "phpMsg"]
entry["file"] <- dfChunks[1, "file"]
entry["lineNo"] <- as.numeric(dfChunks[1, "lineNo"])
entry["referer"] <- dfChunks[1, "referer"]
#bind a new row
df <- rbind(df, entry, stringsAsFactors = FALSE)
}else{
#skip and warn if it is not a PHP message
warning(gettextf("Line %d skiped. Not a PHP message.", i))
}
}
#return the data frame
df
}
#' Apache log combined file example.
#'
#' A set of 12 log lines in Apache Log Combined Format
#'
#' @format LogFormat "\%h \%l \%u \%t \\"\%r\\" \%>s \%b \\"\%\{Referer\}i\\" \\"\%\{User-Agent\}i\\"" combined
#' @source \url{http://www.infinance.com.br/}
#' @name access_log_combined
NULL
#' Apache log common file example.
#'
#' A set of 12 log lines in Apache Log Common Format
#'
#' @format LogFormat "\%h \%l \%u \%t \\"\%r\\" \%>s \%b\\" common
#' @source \url{http://www.infinance.com.br/}
#' @name access_log_common
NULL | /scratch/gouwar.j/cran-all/cranData/ApacheLogProcessor/R/ApacheLogProcessor.R |
#get upperRank from OTU name. List is necessary if getting rank upper than genus.
#this function is vectorized for the lowerRank.
get.upperRank<-function(data,OTUrankData=NULL)
{
#when getting genus from species name
if(is.null(OTUrankData))
{
match<-regexpr("_",data)
result<-substr(data,1,match-1)
}
else
#when getting upper rank from lower rank
{
match<-match(data,OTUrankData[[1]])
result<-OTUrankData[[2]][match]
}
return(result)
}
deleteAnomaly<-function(tree,score,OTUrankData=NULL,drop=FALSE)
{
droppingIndex<-list()
score<-score[order(as.numeric(score[,2]),decreasing=TRUE),]
topOTUScore<-score[1,2][[1]]
#when the top OTU score is 0, return
if(topOTUScore==0)
{
return("No anomaly OTU in the tree.")
}
#find index whose clade score is identical to the top clade score
topScoreOTU<-score[topOTUScore==score[,2],1]
if(length(topScoreOTU)==1)
{
#check if the topScoreOTU is monotypic
topRank<-get.upperRank(topScoreOTU,OTUrankData)
if(is.null(OTUrankData))
{
OTUrankData<-vector("list",2)
OTUrankData[[2]]<-get.upperRank(tree$tip)
}
rankOTU<-extractOTUbyRankName_C(topRank,tree$tip,OTUrankData[[2]])
#when the dropping OTU rank has one or more than two OTUs, drop the OTU with the highest score
if(length(rankOTU)==1||length(rankOTU)>2)
{
return(list(score[1,1][[1]],drop.tip(tree,score[1,1][[1]])))
}
#when the dropping OTU rank has two OTUs, compare thier intruder score and drop one with the higher score
else
{
int1<-score[rankOTU[1]==score[,1],4]
int2<-score[rankOTU[2]==score[,1],4]
if(int1>int2)
{
if(drop)
{
return(list(rankOTU[1],drop.tip(tree,rankOTU[1])))
}
else
{
return(list(rankOTU[1],tree))
}
}
else if(int2>int1)
{
if(drop)
{
return(list(rankOTU[2],drop.tip(tree,rankOTU[2])))
}
else
{
return(list(rankOTU[2],tree))
}
}
else
{
if(drop)
{
return(list(rankOTU,drop.tip(tree,rankOTU)))
}
else
{
return(list(rankOTU,tree))
}
}
}
}
#when there are multiple OTUs with the highest score
else
{
#find OTUs whose clade number has the smallest number of OTUs
topScoreCladeNumber<-score[topOTUScore==score[,2],6]
minCladeNumber<-Inf
minCladeIndex<-0
for(i in 1:length(topScoreCladeNumber))
{
#count the same clade number OTUs
temp<-sum(topScoreCladeNumber[i]==topScoreCladeNumber)
if(minCladeNumber>temp)
{
minCladeIndex<-i
minCladeNumber<-temp
}
else if(minCladeNumber==temp)
{
minCladeIndex<-c(minCladeIndex,i)
}
}
#cladeNumber with the smallest number of OTUs
candidateCladeNumber<-topScoreCladeNumber[minCladeIndex]
#choose ones with the smallest clade number
candidateCladeNumber<-candidateCladeNumber[candidateCladeNumber==min(candidateCladeNumber)]
candidateCladeNumber<-candidateCladeNumber[1]
deletingOTU<-score[score[,6]==candidateCladeNumber,1]
if(drop)
{
return(list(deletingOTU,drop.tip(tree,deletingOTU)))
}
else
{
return(list(deletingOTU,tree))
}
}
}
autoDeletion<-function(tree,OTUrankData=NULL,show_progress=TRUE,num_threads=1)
{
if(length(tree$tip)<=3)
{
return("The tree includes only three or less OTUs.")
}
if(show_progress)
{
calcTime<-proc.time()
}
totalScore<-list()
droppedOTUs<-character()
dropIndex<-integer()
allRankNames<-getAllRankNames(tree,OTUrankData)
if(!is.null(OTUrankData))
{
rankList<-OTUrankData[[2]]
}
else
{
rankList<-get.upperRank(tree$tip)
}
allCentroids<-getAllCentroids(tree,OTUrankData,show_progress,num_threads)
counter<-1
progress = 0
firstPositiveScoreOTU = -1
currentPositiveScoreOTU = 0
while(TRUE)
{
if(length(tree$tip)<=3)
{
break
}
score<-calc.Score(tree,OTUrankData,allRankNames,allCentroids,dropIndex,show_progress=show_progress,num_threads=num_threads)
#check the score reached 0
if(score[1,2][[1]]==0)
{
break
}
if(firstPositiveScoreOTU==-1)
{
firstPositiveScoreOTU<-sum(score[,2]>0)
}
currentPositiveScoreOTU<-sum(score[,2]>0)
if(show_progress)
{
print(paste0("auto-deletion loop",counter))
if(firstPositiveScoreOTU == -1)
{
print(paste0("progress: 0%"))
}
else if(firstPositiveScoreOTU == 0)
{
print(paste0("progress: 100%"))
}
else
{
print(paste0("progress: " ,100*(firstPositiveScoreOTU-currentPositiveScoreOTU)/firstPositiveScoreOTU,"%"))
}
counter<-counter+1
}
totalScore<-append(totalScore,list(score))
temp<-deleteAnomaly(tree,score,OTUrankData)
droppedOTUs<-append(droppedOTUs,as.vector(temp[[1]]))
#index of dropped OTU
index<-match(temp[[1]],tree$tip)
dropIndex<-c(dropIndex,index)
lastDropRank<-unique(get.upperRank(tree$tip[index[1]],OTUrankData))
lastDropRankIndex<-match(lastDropRank,allRankNames)
#renew centroids
if(show_progress)
{
print("renewing a centroid")
centroidTime<-proc.time()
}
allCentroids[[lastDropRankIndex]]<-getRankCentroid_C(lastDropRank,dropIndex,tree$tip,tree[[1]][,1],tree[[1]][,2],rankList,show_progress,num_threads)
if(show_progress)
{
print(proc.time()-centroidTime)
}
}
tree<-drop.tip(tree,dropIndex)
if(show_progress)
{
print("total time")
print(proc.time()-calcTime)
}
return(list(resultantTree=tree,droppedOTU=droppedOTUs,scoreTransition=totalScore))
}
getAllRankNames<-function(tree,OTUrankData=NULL)
{
dataRank<-get.upperRank(tree$tip,OTUrankData)
return(unique(dataRank))
}
getAllCentroids<-function(tree,OTUrankData=NULL,show_progress=FALSE,num_threads=1)
{
allRankNames<-getAllRankNames(tree,OTUrankData)
allCentroid<-vector("list",length(allRankNames))
if(show_progress)
{
print("calculating all centroids")
calcTime<-proc.time()
}
if(is.null(OTUrankData))
{
OTUrankData<-list()
OTUrankData[[2]]<-get.upperRank(tree$tip)
}
allCentroid<-getAllCentroids_C(tree$tip,allRankNames,tree[[1]][,1],tree[[1]][,2],OTUrankData[[2]],show_progress,num_threads)
if(show_progress)
{
print(proc.time()-calcTime)
}
return(allCentroid)
}
#obtain upper node of a given node/tip
findUpperNode<-function(tree,node)
{
treeMatrix<-tree[[1]]
return(treeMatrix[treeMatrix[,2]==node,1])
}
#return TRUE if all the given nodes belong to the same rank
is.monophyleticByRank<-function(tree,nodeIndex,OTUrankData)
{
nodes<-tree$tip[nodeIndex]
rank<-OTUrankData[[2]][nodeIndex]
return(all(rank[1]==rank))
}
calc.Score<-function(tree,OTUrankData=NULL,allRankNames=NULL,allCentroids=NULL,dropIndex=NULL,sort=TRUE,show_progress=TRUE,num_threads=1)
{
OTUList<-tree$tip
intScore<-numeric(length(OTUList))
outScore<-numeric(length(OTUList))
OTUScore<-numeric(length(OTUList))
#numeric group which has the identical score and is monophyletic
cladeNumber<-numeric(length(OTUList))
#total score for the monophyletic group
cladeScore<-numeric(length(OTUList))
#cladeScore/number of OTUs in the clade
perCladeScore<-numeric(length(OTUList))
if(is.null(allCentroids))
{
allCentroids<-getAllCentroids(tree,OTUrankData,show_progress,num_threads)
}
if(is.null(dropIndex))
{
dropIndex<-integer(0)
}
if(is.null(allRankNames))
{
allRankNames<-getAllRankNames(tree,OTUrankData)
}
if(is.null(OTUrankData))
{
OTUrankData<-list()
OTUrankData[[1]]<-tree$tip
OTUrankData[[2]]<-get.upperRank(tree$tip)
}
isScored<-logical(length(OTUList))
scoreCounter<-1
if(show_progress)
{
#for new line
print("calculating score")
calcTime<-proc.time()
pb<-txtProgressBar(style=3)
}
range<-1:length(OTUList)
range<-setdiff(range,dropIndex)
for(i in range)
{
if(show_progress)
{
setTxtProgressBar(pb,i/length(range))
}
if(isScored[i])
{
next
}
isScored[i]<-TRUE
cladeIndex<-i
nextCladeIndex<-i
checkingNode<-i
while(is.monophyleticByRank(tree,nextCladeIndex,OTUrankData))
{
cladeIndex<-nextCladeIndex
checkingNode<-findUpperNode(tree,checkingNode)
#when the checking node is the root node
if(length(checkingNode)==0)
{
break
}
nextCladeIndex<-findSubTips_C(tree$tip,tree[[1]][,1],tree[[1]][,2],checkingNode)
}
intscore<-calcIntScore_C(tree$tip,tree[[1]][,1],tree[[1]][,2],OTUList[i],allCentroids,allRankNames,OTUrankData[[2]])
outscore<-calcOutScore_C(tree$tip,tree[[1]][,1],tree[[1]][,2],OTUList[i],allCentroids,allRankNames,OTUrankData[[2]],dropIndex=dropIndex)
intScore[cladeIndex]<-intscore
outScore[cladeIndex]<-outscore
OTUScore[cladeIndex]<-intscore+outscore
perCladeScore[cladeIndex]<-(intscore+outscore)/length(cladeIndex)
isScored[cladeIndex]<-TRUE
cladeNumber[cladeIndex]<-scoreCounter
scoreCounter<-scoreCounter+1
}
if(show_progress)
{
close(pb)
print(proc.time()-calcTime)
}
score<-array(c(OTUList,perCladeScore,OTUScore,intScore,outScore,cladeNumber),dim=c(length(OTUList),6))
score<-score[order(as.numeric(score[,6])),]
colnames(score)<-c("OTU","perCladeOTUScore","sum","intruder","outlier","#clade")
if(sort)
{
return(score[order(as.numeric(score[,2]),decreasing=TRUE),])
}
else
{
return(score)
}
}
| /scratch/gouwar.j/cran-all/cranData/Apoderoides/R/Apoderoides.R |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
getTipNo_C <- function(treeTip, OTU) {
.Call(`_Apoderoides_getTipNo_C`, treeTip, OTU)
}
extractOTUbyRankName_C <- function(rankName, treeTip, rankList) {
.Call(`_Apoderoides_extractOTUbyRankName_C`, rankName, treeTip, rankList)
}
getRankCentroid_C <- function(rankName, dropIndex, treeTip, treeMatCol0, treeMatCol1, rankList, show_progress = 0L, num_threads = 1L) {
.Call(`_Apoderoides_getRankCentroid_C`, rankName, dropIndex, treeTip, treeMatCol0, treeMatCol1, rankList, show_progress, num_threads)
}
getAllCentroids_C <- function(treeTip, allRankNames, treeMatCol0, treeMatCol1, rankList, show_progress = 0L, num_threads = 1L) {
.Call(`_Apoderoides_getAllCentroids_C`, treeTip, allRankNames, treeMatCol0, treeMatCol1, rankList, show_progress, num_threads)
}
calcIntScore_C <- function(treeTip, treeMatCol0, treeMatCol1, OTU, allCentroids, allRankNames, rankList) {
.Call(`_Apoderoides_calcIntScore_C`, treeTip, treeMatCol0, treeMatCol1, OTU, allCentroids, allRankNames, rankList)
}
findSubTips_C <- function(treeTip, treeMatCol0, treeMatCol1, node) {
.Call(`_Apoderoides_findSubTips_C`, treeTip, treeMatCol0, treeMatCol1, node)
}
calcOutScore_C <- function(treeTip, treeMatCol0, treeMatCol1, OTU, allCentroids, allRankNames, rankList, dropIndex) {
.Call(`_Apoderoides_calcOutScore_C`, treeTip, treeMatCol0, treeMatCol1, OTU, allCentroids, allRankNames, rankList, dropIndex)
}
| /scratch/gouwar.j/cran-all/cranData/Apoderoides/R/RcppExports.R |
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----eval=FALSE---------------------------------------------------------------
# install.packages("Apoderoides")
## ----results="hide", warning=FALSE, message=FALSE-----------------------------
library(Apoderoides)
## ----eval=FALSE---------------------------------------------------------------
# data(testTree)
## ----eval=FALSE---------------------------------------------------------------
# testTree <- read.tree(file="/directory/yourTree.tre")
## ----eval=FALSE---------------------------------------------------------------
# #for the test tree
# calc.Score(testTree)
# #for the user imported tree
# calc.Score(tree)
## -----------------------------------------------------------------------------
calc.Score(testTree,show_progress=FALSE)[1:10,]
## ----eval=FALSE---------------------------------------------------------------
# data("testRankList")
## -----------------------------------------------------------------------------
data("testRankList")
testRankList[[1]][1:10]
testRankList[[2]][1:10]
## ----eval=FALSE---------------------------------------------------------------
# calc.Score(testTree,testRankList)
## ----eval=FALSE---------------------------------------------------------------
# #for genus level
# autoDeletion(testTree)
# #for family level
# autoDeletion(testTree,testRankList)
| /scratch/gouwar.j/cran-all/cranData/Apoderoides/inst/doc/vingnette.R |
---
title: "Apoderoides Tutorial"
author: "[Satoshi Aoki](https://sites.google.com/view/s-aoki)"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Apoderoides Tutorial}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## 1 Aim of this package, Apoderoides
Apoderoides is an R package for finding and deleting erroneous taxa from a phylogenetic tree by calculating scores for each taxa. The score shows how erroneous the taxon and can prioritize which taxon should be deleted first. Apoderoides especially focuses on erroneous taxa caused by taxon mistake or misidentification.
## 2 Installation
You can install Apoderoides by the following commands on R console. This command requires the Internet connection.
```{r, eval=FALSE}
install.packages("Apoderoides")
```
## 3 How to use
First, please load the package by the following command:
``` {r, results="hide", warning=FALSE, message=FALSE}
library(Apoderoides)
```
### 3.1 Import a phylogenetic tree
We need a phylogenetic tree for the analysis. The next command imports a test tree included in this package.
```{r, eval=FALSE}
data(testTree)
```
Otherwise, please load your own phylogenetic tree by a command like this:
```{r, eval=FALSE}
testTree <- read.tree(file="/directory/yourTree.tre")
```
### 3.2 Score calculation
#### 3.2.1. Score at genus level
Let's calculate the score of taxa in the loaded tree at genus level. Taxa with higher scores are more harmful to monophyly of genera, and such taxa are considered erroneous. The score is calculated by the next command:
```{r, eval=FALSE}
#for the test tree
calc.Score(testTree)
#for the user imported tree
calc.Score(tree)
```
Here, let's have a look at the top 10 scores of the test tree.
```{r}
calc.Score(testTree,show_progress=FALSE)[1:10,]
```
The columns of the calculation results show the following information:
- "OTU": The names of the tree tips.
- "perCladeOTUscore": The final score of the tree tip calculated by "sum" divided by the number of taxa with the same "#clade".
- "sum": The sum of "intruder" and "outlier".
- "intruder": The intruder score of the tree tip.
- "outlier": The outlier score of the tree tip.
- "#clade": Identifier of clades of the same rank (Here, genus). Different clades have different #clade.
In short, the intruder score shows how many clades of the other ranks the tree tip is intruding, and the outlier score shows how far the tree tip is from the main clade of the belonging rank.
The result shows that "Araucaria_cunninghamii" is by far the top candidate to delete from the tree due to its high score.
Please note that this function assumes that all the names of tree tips are scientific names connected by underbars like "Homo_sapiens". If the tree tips are named otherwise, please see the next chapter.
#### 3.2.2. Score at the other rank
When you want to calculate score for the rank other than genus, or when the tree tips are not scientific names, you need a list of belonging ranks of the tree tips. Let's see the rank list of the test tree called by the following command:
```{r, eval=FALSE}
data("testRankList")
```
The contains of the rank list is like this:
```{r}
data("testRankList")
testRankList[[1]][1:10]
testRankList[[2]][1:10]
```
The rank list is a list of size 2. The first element is equivalent to a character vector of the tree tips (obtained by like `testTree$tip`). The second element is a character vector of the rank names corresponding to the first element of the rank list. In this test data, the rank list indicates the family of the test tree tips. When the tree tips are not scientific names and you want to calculate the score for genus, you can calculate it by setting the genus names in the second element of the rank list.
Using this rank list, the score of test tree for family can be calculated by the following command:
```{r, eval=FALSE}
calc.Score(testTree,testRankList)
```
The output can be interpreted just like the score for genus. The only difference is the score is based on monophyly of genus or family.
### 3.3 Auto deletion of erroneous tree tips
The score tells us which tree tip(s) are most erroneous in the tree. Therefore, repeating score calculation and deleting the top-score tip(s) until all tips have 0 scores can provide a tree without erroneous tips with the small number of deleted tips. The following commands conduct such auto deletion of erroneous taxa for the test tree:
```{r, eval=FALSE}
#for genus level
autoDeletion(testTree)
#for family level
autoDeletion(testTree,testRankList)
```
The output of `autoDeletion()` is a list of size 3. The first element is the tree without erroneous tips. The second element is a character vector of deleted tree tips. The last element is a lists of scores repeatedly calculated until all the erroneous tips are deleted.
### 3.4 The other utilities to help analysis
The functions `calc.Score()` and `autoDeletion()` have arguments of `show_progress` and `num_threads`. `show_progress` is a boolean (TRUE or FALSE) and TRUE by default. When it is TRUE, the progress of calculation is reported on the R console. When it is FALSE, it provides no reports but the calculation will be slightly faster.
`num_threads` is a positive integer and 1 by default. You can specify the number of threads for faster calculation by this argument. However, this option validly works only when OpenMP is available, and the default compiler in MacOS does not support OpenMP. Single thread calculation is still available for MacOS, but if you want to use multiple threads in MacOS, you need to get OpenMP. One way to install OpenMP is using following commands in the terminal:
```{eval=FALSE}
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
brew install libomp
```
The function `calc.Score()` also has an argument of `sort`.
`sort` is a boolean and TRUE by default. When it is FALSE, the resultant score is no longer sorted by the descending order, and it will be remained as the original order of the tree tips.
The function `get.upperRank()` returns the genus name of given scientific names assuming that they are connected by underbars. This may be useful to search upper ranks to make a rank list.
| /scratch/gouwar.j/cran-all/cranData/Apoderoides/inst/doc/vingnette.Rmd |
---
title: "Apoderoides Tutorial"
author: "[Satoshi Aoki](https://sites.google.com/view/s-aoki)"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Apoderoides Tutorial}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## 1 Aim of this package, Apoderoides
Apoderoides is an R package for finding and deleting erroneous taxa from a phylogenetic tree by calculating scores for each taxa. The score shows how erroneous the taxon and can prioritize which taxon should be deleted first. Apoderoides especially focuses on erroneous taxa caused by taxon mistake or misidentification.
## 2 Installation
You can install Apoderoides by the following commands on R console. This command requires the Internet connection.
```{r, eval=FALSE}
install.packages("Apoderoides")
```
## 3 How to use
First, please load the package by the following command:
``` {r, results="hide", warning=FALSE, message=FALSE}
library(Apoderoides)
```
### 3.1 Import a phylogenetic tree
We need a phylogenetic tree for the analysis. The next command imports a test tree included in this package.
```{r, eval=FALSE}
data(testTree)
```
Otherwise, please load your own phylogenetic tree by a command like this:
```{r, eval=FALSE}
testTree <- read.tree(file="/directory/yourTree.tre")
```
### 3.2 Score calculation
#### 3.2.1. Score at genus level
Let's calculate the score of taxa in the loaded tree at genus level. Taxa with higher scores are more harmful to monophyly of genera, and such taxa are considered erroneous. The score is calculated by the next command:
```{r, eval=FALSE}
#for the test tree
calc.Score(testTree)
#for the user imported tree
calc.Score(tree)
```
Here, let's have a look at the top 10 scores of the test tree.
```{r}
calc.Score(testTree,show_progress=FALSE)[1:10,]
```
The columns of the calculation results show the following information:
- "OTU": The names of the tree tips.
- "perCladeOTUscore": The final score of the tree tip calculated by "sum" divided by the number of taxa with the same "#clade".
- "sum": The sum of "intruder" and "outlier".
- "intruder": The intruder score of the tree tip.
- "outlier": The outlier score of the tree tip.
- "#clade": Identifier of clades of the same rank (Here, genus). Different clades have different #clade.
In short, the intruder score shows how many clades of the other ranks the tree tip is intruding, and the outlier score shows how far the tree tip is from the main clade of the belonging rank.
The result shows that "Araucaria_cunninghamii" is by far the top candidate to delete from the tree due to its high score.
Please note that this function assumes that all the names of tree tips are scientific names connected by underbars like "Homo_sapiens". If the tree tips are named otherwise, please see the next chapter.
#### 3.2.2. Score at the other rank
When you want to calculate score for the rank other than genus, or when the tree tips are not scientific names, you need a list of belonging ranks of the tree tips. Let's see the rank list of the test tree called by the following command:
```{r, eval=FALSE}
data("testRankList")
```
The contains of the rank list is like this:
```{r}
data("testRankList")
testRankList[[1]][1:10]
testRankList[[2]][1:10]
```
The rank list is a list of size 2. The first element is equivalent to a character vector of the tree tips (obtained by like `testTree$tip`). The second element is a character vector of the rank names corresponding to the first element of the rank list. In this test data, the rank list indicates the family of the test tree tips. When the tree tips are not scientific names and you want to calculate the score for genus, you can calculate it by setting the genus names in the second element of the rank list.
Using this rank list, the score of test tree for family can be calculated by the following command:
```{r, eval=FALSE}
calc.Score(testTree,testRankList)
```
The output can be interpreted just like the score for genus. The only difference is the score is based on monophyly of genus or family.
### 3.3 Auto deletion of erroneous tree tips
The score tells us which tree tip(s) are most erroneous in the tree. Therefore, repeating score calculation and deleting the top-score tip(s) until all tips have 0 scores can provide a tree without erroneous tips with the small number of deleted tips. The following commands conduct such auto deletion of erroneous taxa for the test tree:
```{r, eval=FALSE}
#for genus level
autoDeletion(testTree)
#for family level
autoDeletion(testTree,testRankList)
```
The output of `autoDeletion()` is a list of size 3. The first element is the tree without erroneous tips. The second element is a character vector of deleted tree tips. The last element is a lists of scores repeatedly calculated until all the erroneous tips are deleted.
### 3.4 The other utilities to help analysis
The functions `calc.Score()` and `autoDeletion()` have arguments of `show_progress` and `num_threads`. `show_progress` is a boolean (TRUE or FALSE) and TRUE by default. When it is TRUE, the progress of calculation is reported on the R console. When it is FALSE, it provides no reports but the calculation will be slightly faster.
`num_threads` is a positive integer and 1 by default. You can specify the number of threads for faster calculation by this argument. However, this option validly works only when OpenMP is available, and the default compiler in MacOS does not support OpenMP. Single thread calculation is still available for MacOS, but if you want to use multiple threads in MacOS, you need to get OpenMP. One way to install OpenMP is using following commands in the terminal:
```{eval=FALSE}
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
brew install libomp
```
The function `calc.Score()` also has an argument of `sort`.
`sort` is a boolean and TRUE by default. When it is FALSE, the resultant score is no longer sorted by the descending order, and it will be remained as the original order of the tree tips.
The function `get.upperRank()` returns the genus name of given scientific names assuming that they are connected by underbars. This may be useful to search upper ranks to make a rank list.
| /scratch/gouwar.j/cran-all/cranData/Apoderoides/vignettes/vingnette.Rmd |
#' @title Apollonius diagram and Apollonius graph
#' @description Computation of the Apollonius diagram and the Apollonius graph
#' of some weighted 2D points. The Apollonius graph is the dual of the
#' Apollonius diagram. It is also called the additively weighted Voronoï
#' diagram.
#'
#' @param sites the 2D points, a numeric matrix with two columns (one point
#' per row)
#' @param radii the weights, a numeric vector of length equal to the number of
#' points (i.e. the number of rows of \code{sites})
#' @param tmax a positive number passed to \code{\link[gyro]{gyroray}},
#' controlling the length of the infinite edges (i.e. the hyperbolic rays)
#' of the Apollonius graph
#' @param nsegs a positive integer, the desired number of points of each
#' finite edge of the Apollonius graph
#' @param nrays a positive integer, the desired number of points of each
#' infinite edge of the Apollonius graph
#'
#' @return A list with two fields \code{diagram} and \code{graph}. The
#' \code{diagram} field is a list providing the sites and the faces of the
#' Apollonius diagram. The \code{graph} field is a list providing the sites
#' and the edges of the Apollonius graph.
#' @export
#'
#' @details
#' See the \href{https://doc.cgal.org/latest/Apollonius_graph_2/index.html#Chapter_2D_Apollonius_Graphs}{CGAL documentation}.
#'
#'
#' @importFrom gyro gyromidpoint gyrosegment gyroray gyroABt
#' @importFrom abind abind
#' @importFrom stats uniroot
#'
#' @examples
#' library(Apollonius)
#' sites <- rbind(
#' c(0, 0),
#' c(4, 1),
#' c(2, 4),
#' c(7, 4),
#' c(8, 0),
#' c(5, -2),
#' c(-4, 4),
#' c(-2, -1),
#' c(11, 4),
#' c(11, 0)
#' )
#' radii <- c(1, 1.5, 1.25, 2, 1.75, 0.5, 0.4, 0.6, 0.7, 0.3)
#' apo <- Apollonius(sites, radii)
#' opar <- par(mar = c(4, 4, 1, 1))
#' plotApolloniusGraph(apo, xlab = "x", ylab = "y")
#' par(opar)
#'
#' # Example of a non-valid graph ####
#' library(Apollonius)
#' sites <- rbind(
#' c(-1, -1),
#' c(-1, 1),
#' c(1, 1),
#' c(1, -1),
#' c(0, 0)
#' )
#' angle_ <- seq(0, 2*pi, length.out = 13L)[-1L]
#' circle <- cbind(2 * cos(angle_), 2 * sin(angle_))
#' sites <- rbind(sites, circle)
#' radii <- c(rep(2, 5), rep(1, 12))
#' \dontrun{apo <- Apollonius(sites, radii)}
Apollonius <- function(
sites, radii, tmax = 30, nsegs = 100L, nrays = 300L
) {
stopifnot(
is.numeric(sites), is.matrix(sites), ncol(sites) == 2L, nrow(sites) >= 3L
)
storage.mode(sites) <- "double"
if(anyNA(sites)) {
stop("Missing values are not allowed.")
}
if(anyDuplicated(sites)) {
stop("Found duplicated sites.")
}
stopifnot(is.numeric(radii), length(radii) == nrow(sites), all(radii != 0))
storage.mode(radii) <- "double"
if(anyNA(radii)) {
stop("Found missing value(s) in `radii`.")
}
stopifnot(isNumber(tmax), tmax > 1)
#
stuff <- ApolloniusCpp(sites, radii)
neighbors <- stuff[["neighbors"]]
if(nrow(neighbors) == 0L) {
stop("The Apollonius diagram is empty.")
}
#
Neighs <- vector("list", nrow(neighbors))
for(i in 1L:nrow(neighbors)) {
Neighs_i <- 1L:3L
neighs_i <- neighbors[i, ]
remove <- which(is.na(neighs_i))
for(j in seq_len(i-1L)) {
if(j %in% neighs_i && i %in% neighbors[j, ]) {
remove <- c(remove, which(neighs_i == j))
}
}
if(length(remove) > 0L) {
Neighs_i <- Neighs_i[-remove]
}
Neighs[[i]] <- Neighs_i
}
#
commonVertices <-
abind(stuff[["cvertex1"]], stuff[["cvertex2"]], along = 3L)
#
duals <- stuff[["duals"]]
vertices <- stuff[["vertices"]]
#
nedges <- sum(lengths(Neighs))
edges <- vector("list", nedges)
type <- character(nedges) # store the type segment or ray
h <- 1L
for(i in 1L:nrow(duals)) {
P1 <- duals[i, ] # it's a point or a line
infinite <- !is.na(P1[3L]) # it's a line
Neighs_i <- Neighs[[i]]
vert_i <- vertices[[i]]
for(k in seq_along(Neighs_i)) {
vs <- commonVertices[i, Neighs_i[k], ]
A <- vert_i[vs[1L], 1L:2L]
rA <- vert_i[vs[1L], 3L]
B <- vert_i[vs[2L], 1L:2L]
rB <- vert_i[vs[2L], 3L]
ctr <- (A + B)/2
P2 <- duals[neighbors[i, Neighs_i[k]], c(1L, 2L)]
if(infinite) {
# we take a point P on the hyperbolic ray, different from P2
type[h] <- "rays"
AB <- sqrt(c(crossprod(B-A)))
u <- (B-A) / AB
P <- A + (rA + (AB - (rA + rB))/2) * u
} else {
# we take a point P on the hyperbolic segment, different from P2
type[h] <- "segments"
P <- P1[c(1L, 2L)]
}
if(rA != rB) {
# we solve an equation to find the gyrocurvature s
f <- function(log_s) {
d <- ctr + gyromidpoint(P-ctr, P2-ctr, exp(log_s))
sqrt(c(crossprod(d-A))) - sqrt(c(crossprod(d-B))) - (rA - rB)
}
uroot <- uniroot(f, lower = -5, upper = 2, extendInt = "yes")
s <- exp(uroot[["root"]])
}
if(infinite) {
# P1 = (a, b, c) stores the parameters of the line ax+by+c=0
a <- P1[1L]
b <- P1[2L]
c <- P1[3L]
cP <- a*P[1L] + b*P[2L] + c
cP2 <- a*P2[1L] + b*P2[2L] + c
if(cP < 0) {
reverse <- cP2 > cP
} else {
reverse <- cP2 < cP
}
PP2 <- sqrt(c(crossprod(P - P2)))
tmaxi <- tmax / min(1, PP2)
nrays2 <- floor(nrays / min(1, PP2))
if(rA == rB) {
edges[[h]] <- ray(P2, P, OtoA = !reverse, tmax = tmaxi, n = 2L)
} else {
edges[[h]] <-
t(ctr + t(gyroray(
P2-ctr, P-ctr, s = s, OtoA = !reverse, tmax = tmaxi, n = nrays2
)))
}
} else {
if(rA == rB) {
edges[[h]] <- segment(P, P2, n = 2L)
} else {
edges[[h]] <- t(ctr + t(gyrosegment(
P-ctr, P2-ctr, s = s, n = nsegs
)))
}
}
h <- h + 1L
}
}
#
wsites <- cbind(sites, radii)
colnames(wsites) <- c("x", "y", "weight")
dsites <- duals[is.na(duals[, 3L]), ]
list(
"diagram" = list("sites" = wsites, "faces" = stuff[["faces"]]),
"graph" = list("sites" = dsites, "edges" = split(edges, type))
)
}
| /scratch/gouwar.j/cran-all/cranData/Apollonius/R/Apollonius.R |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
ApolloniusCpp <- function(sites, radii) {
.Call(`_Apollonius_ApolloniusCpp`, sites, radii)
}
| /scratch/gouwar.j/cran-all/cranData/Apollonius/R/RcppExports.R |
#' @useDynLib Apollonius, .registration=TRUE
#' @importFrom Rcpp evalCpp
NULL
| /scratch/gouwar.j/cran-all/cranData/Apollonius/R/aaa.R |
#' @importFrom Polychrome createPalette
#' @noRd
distinctColors <- function(n, argsList) {
f <- function(...) {
createPalette(n, ...)
}
do.call(f, argsList)
}
#' @importFrom colorsGen randomColor
rcolors <- function(n, argsList) {
f <- function(...) {
randomColor(n, ...)
}
do.call(f, argsList)
}
| /scratch/gouwar.j/cran-all/cranData/Apollonius/R/colors.R |
isBoolean <- function(x) {
is.atomic(x) && is.logical(x) && length(x) == 1L && !is.na(x)
}
isString <- function(x) {
is.atomic(x) && is.character(x) && length(x) == 1L && !is.na(x)
}
isNumber <- function(x) {
is.numeric(x) && length(x) == 1L && !is.na(x)
}
isPositiveNumber <- function(x) {
isNumber(x) && x > 0
}
segment <- function(A, B, n) {
t_ <- seq(0, 1, length.out = n)
t(vapply(t_, function(t) {A + t*(B-A)}, numeric(2L)))
}
ray <- function(O, A, n, tmax, OtoA) {
stopifnot(isBoolean(OtoA))
if(OtoA) {
t_ <- seq(0, tmax, length.out = n)
} else {
t_ <- seq(-tmax, 0, length.out = n)
}
t(vapply(t_, function(t) {O + t*(A-O)}, numeric(2L)))
}
| /scratch/gouwar.j/cran-all/cranData/Apollonius/R/internal.R |
#' @title Plot Apollonius graph
#' @description Plot an Apollonius graph.
#'
#' @param apo an output of \code{\link{Apollonius}}
#' @param limits either \code{NULL} or a vector of length two passed to the
#' arguments \code{xlim} and \code{ylim} of \code{\link[graphics]{plot}};
#' if \code{NULL}, automatic limits are calculated
#' @param circles Boolean, whether to plot the original sites as circles with
#' the given radii
#' @param fill Boolean, whether to fill the circles if \code{circles=TRUE}
#' or to plot only their border
#' @param centers when \code{circles=TRUE} and \code{fill=FALSE}, whether to
#' plot the centers of the circles
#' @param colors a character string controlling the colors of the sites;
#' \code{"random"} to get multiple colors with
#' \code{\link[colorsGen]{randomColor}}, \code{"distinct"} to get multiple
#' colors with \code{\link[Polychrome]{createPalette}}, or a color name or
#' a hexadecimal color code
#' @param distinctArgs if \code{colors = "distinct"}, a list of arguments
#' passed to \code{\link[Polychrome]{createPalette}}
#' @param randomArgs if \code{colors = "random"}, a list of arguments passed
#' to \code{\link[colorsGen]{randomColor}}
#' @param ... arguments passed to \code{\link[graphics]{plot}}, such as
#' \code{xlab} and \code{ylab}
#'
#' @return No returned value, called for plotting.
#' @export
#'
#' @importFrom grDevices extendrange
#' @importFrom graphics plot points lines
#' @importFrom plotrix draw.circle
#'
#' @examples
#' library(Apollonius)
#' sites <- rbind(
#' c(0, 0),
#' c(4, 1),
#' c(2, 4),
#' c(7, 4),
#' c(8, 0),
#' c(5, -2),
#' c(-4, 4),
#' c(-2, -1),
#' c(11, 4),
#' c(11, 0)
#' )
#' radii <- c(1, 1.5, 1.25, 2, 1.75, 0.5, 0.4, 0.6, 0.7, 0.3)
#' apo <- Apollonius(sites, radii)
#' opar <- par(mar = c(3, 3, 1, 1))
#' plotApolloniusGraph(
#' apo, fill = FALSE, colors = "random", xlab = NA, ylab = NA
#' )
#' par(opar)
plotApolloniusGraph <- function(
apo, limits = NULL, circles = TRUE, fill = TRUE, centers = TRUE,
colors = "distinct",
distinctArgs = list(seedcolors = c("#ff0000", "#00ff00", "#0000ff")),
randomArgs = list(hue = "random", luminosity = "dark"), ...
) {
stopifnot(isBoolean(circles))
stopifnot(isBoolean(fill))
stopifnot(isBoolean(centers))
stopifnot(isString(colors))
sites <- apo[["diagram"]][["sites"]]
nsites <- nrow(sites)
radii <- sites[, "weight"]
dsites <- apo[["graph"]][["sites"]]
edges <- apo[["graph"]][["edges"]]
hsegments <- edges[["segments"]]
hrays <- edges[["rays"]]
#
if(colors == "distinct") {
clrs <- distinctColors(nsites, distinctArgs)
} else if(colors == "random") {
clrs <- rcolors(nsites, randomArgs)
} else {
clrs <- rep(colors, nsites)
}
#
if(is.null(limits)) {
x <- extendrange(sites[, "x"])
y <- extendrange(sites[, "y"])
limits <- c(min(x[1L], y[1L]), max(x[2L], y[2L]))
}
#
plot(NULL, xlim = limits, ylim = limits, asp = 1, ...)
if(circles) {
borders <- if(fill) NA else clrs
cols <- if(fill) clrs else NA
for(i in 1L:nsites) {
draw.circle(
sites[i, "x"], sites[i, "y"], radius = radii[i],
border = borders[i], col = cols[i], lwd = 2
)
}
if(!fill && centers) {
for(i in 1L:nsites) {
points(
sites[i, "x"], sites[i, "y"], pch = 19L, col = clrs[i]
)
}
}
} else {
for(i in 1L:nsites) {
points(
sites[i, "x"], sites[i, "y"], pch = 19L, col = clrs[i]
)
}
}
points(dsites, pch = 19)
for(i in seq_along(hsegments)) {
lines(hsegments[[i]], col="black", lwd = 2)
}
for(i in seq_along(hrays)) {
lines(hrays[[i]], col="black", lwd = 2)
}
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/Apollonius/R/plotApollonius.R |
bookTheme <- function(set = TRUE){
theme <- list(
plot.polygon = list(alpha = 1, col = "aliceblue", border = "black", lty = 1, lwd = 1),
background = list(col = "transparent"),
bar.fill = list(col = "#cce6ff"),
box.rectangle = list(col = "black"),
box.umbrella = list(col = "black"),
dot.line = list(col = "#e8e8e8"),
dot.symbol = list(col = "black"),
plot.line = list(col = "black", lwd = 1, lty = 1),
plot.symbol = list(col = "black", pch = 16),
regions = list(col =
c("#FEF8FA", "#FDF6F9", "#FBF5F9", "#FAF3F8",
"#F8F2F7", "#F7F0F7", "#F5EEF6", "#F4EDF5",
"#F2EBF5", "#F1EAF4", "#EFE8F3", "#EDE7F2",
"#ECE5F1", "#EAE4F1", "#E8E2F0", "#E6E1EF",
"#E4DFEE", "#E2DEED", "#E0DCEC", "#DEDAEB",
"#DCD9EA", "#D9D7E9", "#D7D6E8", "#D4D4E7",
"#D1D2E6", "#CED1E5", "#CCCFE4", "#C8CEE3",
"#C5CCE2", "#C2CAE1", "#BFC9E0", "#BBC7DF",
"#B8C5DF", "#B4C4DE", "#B1C2DD", "#ADC0DC",
"#A9BFDB", "#A6BDDA", "#A2BBD9", "#9EB9D9",
"#9BB8D8", "#97B6D7", "#93B4D6", "#8FB2D5",
"#8BB0D4", "#87AFD3", "#83ADD2", "#7FABD1",
"#7AA9D0", "#76A7CF", "#71A5CE", "#6CA3CC",
"#68A1CB", "#63A0CA", "#5D9EC9", "#589CC8",
"#539AC6", "#4E98C5", "#4996C4", "#4493C3",
"#3F91C1", "#3A8FC0", "#358DBF", "#308BBE",
"#2C89BD", "#2887BC", "#2385BB", "#1F83BA",
"#1C80B9", "#187EB7", "#157CB6", "#127AB5",
"#0F78B3", "#0D76B2", "#0A73B0", "#0971AE",
"#076FAC", "#066DAA", "#056AA7", "#0568A5")
),
strip.shingle = list(col = c(
"#ff7f00", "#00ff00", "#00ffff",
"#ff00ff", "#ff0000", "#ffff00", "#0080ff")
),
strip.background = list(col = c(
"#ffe5cc", "#ccffcc", "#ccffff",
"#ffccff", "#ffcccc", "#ffffcc", "#cce6ff")
),
reference.line = list(col = "#e8e8e8"),
superpose.line = list(
col = c(
"#053061", "#B2182B", "#F46D43", "#5E4FA2", "#66C2A5", "black",
"#053061", "#B2182B", "#F46D43", "#5E4FA2", "#66C2A5", "black",
"#053061", "#B2182B", "#F46D43", "#5E4FA2", "#66C2A5", "black",
"#053061", "#B2182B", "#F46D43", "#5E4FA2", "#66C2A5", "black",
"#053061", "#B2182B", "#F46D43", "#5E4FA2", "#66C2A5", "black",
"#053061", "#B2182B", "#F46D43", "#5E4FA2", "#66C2A5", "black"
),
lty = rep(1:6, each = 6)),
superpose.symbol = list(
pch = c(
1, 4, 6, 0, 5, 17,
4, 6, 0, 5, 17, 1,
6, 0, 5, 17, 1, 4,
0, 5, 17, 1, 4, 6,
5, 17, 1, 4, 6, 0 ,
17, 1, 4, 6, 0, 5),
cex = rep(0.7, 6 * 6),
col = c(
"#053061", "#B2182B", "#F46D43", "#5E4FA2", "#66C2A5", "black",
"#053061", "#B2182B", "#F46D43", "#5E4FA2", "#66C2A5", "black",
"#053061", "#B2182B", "#F46D43", "#5E4FA2", "#66C2A5", "black",
"#053061", "#B2182B", "#F46D43", "#5E4FA2", "#66C2A5", "black",
"#053061", "#B2182B", "#F46D43", "#5E4FA2", "#66C2A5", "black",
"#053061", "#B2182B", "#F46D43", "#5E4FA2", "#66C2A5", "black"
)
)
)
if(set) trellis.par.set(theme)
invisible(theme)
}
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/R/bookTheme.R |
easyBoundaryFunc <- function(n,
intercept = 0,
interaction = 2) {
sigma <- matrix(c(2, 1.3, 1.3, 2), 2, 2)
tmpData <- data.frame(mvrnorm(n = n, c(0, 0), sigma))
xSeq <- seq(-4, 4, length = 40)
plotGrid <- expand.grid(x = xSeq, y = xSeq)
zFoo <- function(x, y)
intercept - 4 * x + 4 * y + interaction * x * y
z2p <- function(x)
1 / (1 + exp(-x))
tmpData$prob <- z2p(zFoo(tmpData$X1, tmpData$X2))
tmpData$class <-
factor(ifelse(runif(length(tmpData$prob)) <= tmpData$prob, "Class1", "Class2"))
tmpData
}
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/R/easyBoundaryFunc.R |
getPackages <- function(chapter, ...) {
if(is.numeric(chapter))
chapter <- paste(chapter)
pkg <- list()
pkg[["2"]] <- c("earth", "caret", "lattice")
pkg[["3"]] <- c("e1071", "caret", "corrplot")
pkg[["4"]] <- c("kernlab", "caret")
pkg[["6"]] <- c("lattice", "corrplot", "pls", "elasticnet")
pkg[["7"]] <- c("caret", "earth", "kernlab","lattice", "nnet")
pkg[["8"]] <- c("caret", "Cubist", "gbm", "lattice", "party", "partykit",
"randomForest", "rpart", "RWeka")
pkg[["10"]] <- c("caret", "Cubist", "earth", "elasticnet", "gbm", "ipred",
"lattice", "nnet", "party","pls", "randomForests", "rpart",
"RWeka")
pkg[["11"]] <- c("caret", "MASS", "randomForest", "pROC", "klaR")
pkg[["12"]] <- c("caret", "glmnet", "lattice",
"MASS", "pamr", "pls", "pROC", "sparseLDA")
pkg[["13"]] <- c("caret", "kernlab", "klaR", "lattice", "latticeExtra",
"MASS", "mda", "nnet", "pROC")
pkg[["14"]] <- c("C50", "caret", "gbm", "lattice", "partykit", "pROC",
"randomForest", "reshape2",
"rpart", "RWeka")
pkg[["16"]] <- c("caret", "C50", "earth", "DMwR", "DWD", " kernlab", "mda",
"pROC", "randomForest", "rpart")
pkg[["17"]] <- c("C50", "caret", "earth", "Hmisc", "ipred", "tabplot",
"kernlab", "lattice", "MASS", "mda", "nnet", "pls",
"randomForest", "rpart", "sparseLDA")
pkg[["18"]] <- c("caret", "CORElearn", "corrplot", "pROC", "minerva")
pkg[["19"]] <- c("caret", "MASS", "corrplot", "RColorBrewer", "randomForest",
"kernlab", "klaR")
plist <-
paste(paste("'", names(pkg), "'", sep = ""), collapse = ", ")
if (!any(chapter %in% names(pkg)))
stop(paste("'chapter' must be: ",
paste(plist, collapse = ", ")))
pkg <- unlist(pkg[chapter])
pkg <- pkg[!is.na(pkg)]
pkg <- pkg[pkg != ""]
pkg <- pkg[order(tolower(pkg))]
install.packages(pkg, ...)
}
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/R/getPackages.R |
upperp <- function(...) {
args <- list(...)
circ1 <- ellipse(diag(rep(1, 2)), t = .1)
panel.xyplot(
circ1[, 1],
circ1[, 2],
type = "l",
lty = trellis.par.get("reference.line")$lty,
col = trellis.par.get("reference.line")$col,
lwd = trellis.par.get("reference.line")$lwd
)
circ2 <- ellipse(diag(rep(1, 2)), t = .2)
panel.xyplot(
circ2[, 1],
circ2[, 2],
type = "l",
lty = trellis.par.get("reference.line")$lty,
col = trellis.par.get("reference.line")$col,
lwd = trellis.par.get("reference.line")$lwd
)
circ3 <- ellipse(diag(rep(1, 2)), t = .3)
panel.xyplot(
circ3[, 1],
circ3[, 2],
type = "l",
lty = trellis.par.get("reference.line")$lty,
col = trellis.par.get("reference.line")$col,
lwd = trellis.par.get("reference.line")$lwd
)
panel.xyplot(args$x,
args$y,
groups = args$groups,
subscripts = args$subscripts)
}
lowerp <- function(...) {
}
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/R/panels.R |
permuteRelief <-
function(x, y, nperm = 100, ...) {
dat <- x
dat$y <- y
obs <- attrEval(y ~ ., data = dat, ...)
permuted <- matrix(NA, ncol = length(obs), nrow = nperm)
colnames(permuted) <- names(obs)
for (i in 1:nperm) {
dat$y <- sample(y)
permuted[i,] <- attrEval(y ~ ., data = dat, ...)
}
means <- colMeans(permuted)
sds <- apply(permuted, 2, sd)
permuted <- melt(permuted)
names(permuted)[2] <- "Predictor"
permuted$X1 <- NULL
list(
standardized = (obs - means) / sds,
permutations = permuted,
observed = obs,
options = list(...)
)
}
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/R/permuteRelief.R |
quadBoundaryFunc <- function(n) {
sigma <- matrix(c(1, .7, .7, 2), 2, 2)
tmpData <- data.frame(mvrnorm(n = n, c(1, 0), sigma))
xSeq <- seq(-4, 4, length = 40)
plotGrid <- expand.grid(x = xSeq, y = xSeq)
zFoo <- function(x, y)
- 1 - 2 * x - 0 * y - .2 * x ^ 2 + 2 * y ^ 2
z2p <- function(x)
1 / (1 + exp(-x))
tmpData$prob <- z2p(zFoo(tmpData$X1, tmpData$X2))
tmpData$class <-
factor(ifelse(runif(length(tmpData$prob)) <= tmpData$prob, "Class1", "Class2"))
tmpData
}
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/R/quadBoundaryFunc.R |
scriptLocation <-
function()
system.file("chapters", package = "AppliedPredictiveModeling")
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/R/scriptLocation.R |
transparentTheme <-
function(set = TRUE, pchSize = 1, trans = .2) {
theme <- list(
plot.polygon = list(alpha = 1, col = "aliceblue", border = "black", lty = 1, lwd = 1),
background = list(col = "transparent"),
bar.fill = list(col = "#cce6ff"),
box.rectangle = list(col = "black"),
box.umbrella = list(col = "black"),
dot.line = list(col = "#e8e8e8"),
dot.symbol = list(col = "black"),
plot.line = list(col = "black"),
plot.symbol = list(col = "black"),
regions = list(col =
c("#FEF8FA", "#FDF6F9", "#FBF5F9", "#FAF3F8",
"#F8F2F7", "#F7F0F7", "#F5EEF6", "#F4EDF5",
"#F2EBF5", "#F1EAF4", "#EFE8F3", "#EDE7F2",
"#ECE5F1", "#EAE4F1", "#E8E2F0", "#E6E1EF",
"#E4DFEE", "#E2DEED", "#E0DCEC", "#DEDAEB",
"#DCD9EA", "#D9D7E9", "#D7D6E8", "#D4D4E7",
"#D1D2E6", "#CED1E5", "#CCCFE4", "#C8CEE3",
"#C5CCE2", "#C2CAE1", "#BFC9E0", "#BBC7DF",
"#B8C5DF", "#B4C4DE", "#B1C2DD", "#ADC0DC",
"#A9BFDB", "#A6BDDA", "#A2BBD9", "#9EB9D9",
"#9BB8D8", "#97B6D7", "#93B4D6", "#8FB2D5",
"#8BB0D4", "#87AFD3", "#83ADD2", "#7FABD1",
"#7AA9D0", "#76A7CF", "#71A5CE", "#6CA3CC",
"#68A1CB", "#63A0CA", "#5D9EC9", "#589CC8",
"#539AC6", "#4E98C5", "#4996C4", "#4493C3",
"#3F91C1", "#3A8FC0", "#358DBF", "#308BBE",
"#2C89BD", "#2887BC", "#2385BB", "#1F83BA",
"#1C80B9", "#187EB7", "#157CB6", "#127AB5",
"#0F78B3", "#0D76B2", "#0A73B0", "#0971AE",
"#076FAC", "#066DAA", "#056AA7", "#0568A5")
),
strip.shingle = list(col = c(
"#ff7f00", "#00ff00", "#00ffff",
"#ff00ff", "#ff0000", "#ffff00", "#0080ff")),
strip.background = list(col = c(
"#ffe5cc", "#ccffcc", "#ccffff",
"#ffccff", "#ffcccc", "#ffffcc", "#cce6ff")),
reference.line = list(col = "#e8e8e8"),
superpose.line = list(
col = c(
rgb(1, 0, 0, trans), rgb(0, 0, 1, trans),
rgb(0.3984375, 0.7578125, 0.6445312, max(.6, trans)),
rgb(0, 0, 0, trans)),
lty = rep(1:2, 6)),
superpose.symbol = list(
pch = c(16, 15, 17, 18, 16),
cex = rep(pchSize, 5),
col = c(
rgb(1, 0, 0, trans), rgb(0, 0, 1, trans),
rgb(0.3984375, 0.7578125, 0.6445312, max(.6, trans)),
rgb(0, 0, 0, trans)))
)
if(set) trellis.par.set(theme, warn = FALSE)
invisible(theme)
}
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/R/transparentTheme.R |
################################################################################
### R code from Applied Predictive Modeling (2013) by Kuhn and Johnson.
### Copyright 2013 Kuhn and Johnson
### Web Page: http://www.appliedpredictivemodeling.com
### Contact: Max Kuhn ([email protected])
###
### Chapter 2: A Short Tour of the Predictive Modeling Process
###
### Required packages: AppliedPredictiveModeling, earth, caret, lattice
###
### Data used: The FuelEconomy data in the AppliedPredictiveModeling package
###
### Notes:
### 1) This code is provided without warranty.
###
### 2) This code should help the user reproduce the results in the
### text. There will be differences between this code and what is is
### the computing section. For example, the computing sections show
### how the source functions work (e.g. randomForest() or plsr()),
### which were not directly used when creating the book. Also, there may be
### syntax differences that occur over time as packages evolve. These files
### will reflect those changes.
###
### 3) In some cases, the calculations in the book were run in
### parallel. The sub-processes may reset the random number seed.
### Your results may slightly vary.
###
################################################################################
################################################################################
### Section 2.1 Case Study: Predicting Fuel Economy
library(AppliedPredictiveModeling)
data(FuelEconomy)
## Format data for plotting against engine displacement
## Sort by engine displacement
cars2010 <- cars2010[order(cars2010$EngDispl),]
cars2011 <- cars2011[order(cars2011$EngDispl),]
## Combine data into one data frame
cars2010a <- cars2010
cars2010a$Year <- "2010 Model Year"
cars2011a <- cars2011
cars2011a$Year <- "2011 Model Year"
plotData <- rbind(cars2010a, cars2011a)
library(lattice)
xyplot(FE ~ EngDispl|Year, plotData,
xlab = "Engine Displacement",
ylab = "Fuel Efficiency (MPG)",
between = list(x = 1.2))
## Fit a single linear model and conduct 10-fold CV to estimate the error
library(caret)
set.seed(1)
lm1Fit <- train(FE ~ EngDispl,
data = cars2010,
method = "lm",
trControl = trainControl(method= "cv"))
lm1Fit
## Fit a quadratic model too
## Create squared terms
cars2010$ED2 <- cars2010$EngDispl^2
cars2011$ED2 <- cars2011$EngDispl^2
set.seed(1)
lm2Fit <- train(FE ~ EngDispl + ED2,
data = cars2010,
method = "lm",
trControl = trainControl(method= "cv"))
lm2Fit
## Finally a MARS model (via the earth package)
library(earth)
set.seed(1)
marsFit <- train(FE ~ EngDispl,
data = cars2010,
method = "earth",
tuneLength = 15,
trControl = trainControl(method= "cv"))
marsFit
plot(marsFit)
## Predict the test set data
cars2011$lm1 <- predict(lm1Fit, cars2011)
cars2011$lm2 <- predict(lm2Fit, cars2011)
cars2011$mars <- predict(marsFit, cars2011)
## Get test set performance values via caret's postResample function
postResample(pred = cars2011$lm1, obs = cars2011$FE)
postResample(pred = cars2011$lm2, obs = cars2011$FE)
postResample(pred = cars2011$mars, obs = cars2011$FE)
################################################################################
### Session Information
sessionInfo()
q("no")
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/inst/chapters/02_A_Short_Tour.R |
################################################################################
### R code from Applied Predictive Modeling (2013) by Kuhn and Johnson.
### Copyright 2013 Kuhn and Johnson
### Web Page: http://www.appliedpredictivemodeling.com
### Contact: Max Kuhn ([email protected])
###
### Chapter 3: Data Pre-Processing
###
### Required packages: AppliedPredictiveModeling, e1071, caret, corrplot
###
### Data used: The (unprocessed) cell segmentation data from the
### AppliedPredictiveModeling package.
###
### Notes:
### 1) This code is provided without warranty.
###
### 2) This code should help the user reproduce the results in the
### text. There will be differences between this code and what is is
### the computing section. For example, the computing sections show
### how the source functions work (e.g. randomForest() or plsr()),
### which were not directly used when creating the book. Also, there may be
### syntax differences that occur over time as packages evolve. These files
### will reflect those changes.
###
### 3) In some cases, the calculations in the book were run in
### parallel. The sub-processes may reset the random number seed.
### Your results may slightly vary.
###
################################################################################
################################################################################
### Section 3.1 Case Study: Cell Segmentation in High-Content Screening
library(AppliedPredictiveModeling)
data(segmentationOriginal)
## Retain the original training set
segTrain <- subset(segmentationOriginal, Case == "Train")
## Remove the first three columns (identifier columns)
segTrainX <- segTrain[, -(1:3)]
segTrainClass <- segTrain$Class
################################################################################
### Section 3.2 Data Transformations for Individual Predictors
## The column VarIntenCh3 measures the standard deviation of the intensity
## of the pixels in the actin filaments
max(segTrainX$VarIntenCh3)/min(segTrainX$VarIntenCh3)
library(e1071)
skewness(segTrainX$VarIntenCh3)
library(caret)
## Use caret's preProcess function to transform for skewness
segPP <- preProcess(segTrainX, method = "BoxCox")
## Apply the transformations
segTrainTrans <- predict(segPP, segTrainX)
## Results for a single predictor
segPP$bc$VarIntenCh3
histogram(~segTrainX$VarIntenCh3,
xlab = "Natural Units",
type = "count")
histogram(~log(segTrainX$VarIntenCh3),
xlab = "Log Units",
ylab = " ",
type = "count")
segPP$bc$PerimCh1
histogram(~segTrainX$PerimCh1,
xlab = "Natural Units",
type = "count")
histogram(~segTrainTrans$PerimCh1,
xlab = "Transformed Data",
ylab = " ",
type = "count")
################################################################################
### Section 3.3 Data Transformations for Multiple Predictors
## R's prcomp is used to conduct PCA
pr <- prcomp(~ AvgIntenCh1 + EntropyIntenCh1,
data = segTrainTrans,
scale. = TRUE)
transparentTheme(pchSize = .7, trans = .3)
xyplot(AvgIntenCh1 ~ EntropyIntenCh1,
data = segTrainTrans,
groups = segTrain$Class,
xlab = "Channel 1 Fiber Width",
ylab = "Intensity Entropy Channel 1",
auto.key = list(columns = 2),
type = c("p", "g"),
main = "Original Data",
aspect = 1)
xyplot(PC2 ~ PC1,
data = as.data.frame(pr$x),
groups = segTrain$Class,
xlab = "Principal Component #1",
ylab = "Principal Component #2",
main = "Transformed",
xlim = extendrange(pr$x),
ylim = extendrange(pr$x),
type = c("p", "g"),
aspect = 1)
## Apply PCA to the entire set of predictors.
## There are a few predictors with only a single value, so we remove these first
## (since PCA uses variances, which would be zero)
isZV <- apply(segTrainX, 2, function(x) length(unique(x)) == 1)
segTrainX <- segTrainX[, !isZV]
segPP <- preProcess(segTrainX, c("BoxCox", "center", "scale"))
segTrainTrans <- predict(segPP, segTrainX)
segPCA <- prcomp(segTrainTrans, center = TRUE, scale. = TRUE)
## Plot a scatterplot matrix of the first three components
transparentTheme(pchSize = .8, trans = .3)
panelRange <- extendrange(segPCA$x[, 1:3])
splom(as.data.frame(segPCA$x[, 1:3]),
groups = segTrainClass,
type = c("p", "g"),
as.table = TRUE,
auto.key = list(columns = 2),
prepanel.limits = function(x) panelRange)
## Format the rotation values for plotting
segRot <- as.data.frame(segPCA$rotation[, 1:3])
## Derive the channel variable
vars <- rownames(segPCA$rotation)
channel <- rep(NA, length(vars))
channel[grepl("Ch1$", vars)] <- "Channel 1"
channel[grepl("Ch2$", vars)] <- "Channel 2"
channel[grepl("Ch3$", vars)] <- "Channel 3"
channel[grepl("Ch4$", vars)] <- "Channel 4"
segRot$Channel <- channel
segRot <- segRot[complete.cases(segRot),]
segRot$Channel <- factor(as.character(segRot$Channel))
## Plot a scatterplot matrix of the first three rotation variables
transparentTheme(pchSize = .8, trans = .7)
panelRange <- extendrange(segRot[, 1:3])
library(ellipse)
upperp <- function(...)
{
args <- list(...)
circ1 <- ellipse(diag(rep(1, 2)), t = .1)
panel.xyplot(circ1[,1], circ1[,2],
type = "l",
lty = trellis.par.get("reference.line")$lty,
col = trellis.par.get("reference.line")$col,
lwd = trellis.par.get("reference.line")$lwd)
circ2 <- ellipse(diag(rep(1, 2)), t = .2)
panel.xyplot(circ2[,1], circ2[,2],
type = "l",
lty = trellis.par.get("reference.line")$lty,
col = trellis.par.get("reference.line")$col,
lwd = trellis.par.get("reference.line")$lwd)
circ3 <- ellipse(diag(rep(1, 2)), t = .3)
panel.xyplot(circ3[,1], circ3[,2],
type = "l",
lty = trellis.par.get("reference.line")$lty,
col = trellis.par.get("reference.line")$col,
lwd = trellis.par.get("reference.line")$lwd)
panel.xyplot(args$x, args$y, groups = args$groups, subscripts = args$subscripts)
}
splom(~segRot[, 1:3],
groups = segRot$Channel,
lower.panel = function(...){}, upper.panel = upperp,
prepanel.limits = function(x) panelRange,
auto.key = list(columns = 2))
################################################################################
### Section 3.5 Removing Variables
## To filter on correlations, we first get the correlation matrix for the
## predictor set
segCorr <- cor(segTrainTrans)
library(corrplot)
corrplot(segCorr, order = "hclust", tl.cex = .35)
## caret's findCorrelation function is used to identify columns to remove.
highCorr <- findCorrelation(segCorr, .75)
################################################################################
### Section 3.8 Computing (Creating Dummy Variables)
data(cars)
type <- c("convertible", "coupe", "hatchback", "sedan", "wagon")
cars$Type <- factor(apply(cars[, 14:18], 1, function(x) type[which(x == 1)]))
carSubset <- cars[sample(1:nrow(cars), 20), c(1, 2, 19)]
head(carSubset)
levels(carSubset$Type)
simpleMod <- dummyVars(~Mileage + Type,
data = carSubset,
## Remove the variable name from the
## column name
levelsOnly = TRUE)
simpleMod
withInteraction <- dummyVars(~Mileage + Type + Mileage:Type,
data = carSubset,
levelsOnly = TRUE)
withInteraction
predict(withInteraction, head(carSubset))
################################################################################
### Session Information
sessionInfo()
q("no")
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/inst/chapters/03_Data_Pre_Processing.R |
################################################################################
### R code from Applied Predictive Modeling (2013) by Kuhn and Johnson.
### Copyright 2013 Kuhn and Johnson
### Web Page: http://www.appliedpredictivemodeling.com
### Contact: Max Kuhn ([email protected])
###
### Chapter 4: Over-Fitting and Model Tuning
###
### Required packages: caret, doMC (optional), kernlab
###
### Data used:
###
### Notes:
### 1) This code is provided without warranty.
###
### 2) This code should help the user reproduce the results in the
### text. There will be differences between this code and what is is
### the computing section. For example, the computing sections show
### how the source functions work (e.g. randomForest() or plsr()),
### which were not directly used when creating the book. Also, there may be
### syntax differences that occur over time as packages evolve. These files
### will reflect those changes.
###
### 3) In some cases, the calculations in the book were run in
### parallel. The sub-processes may reset the random number seed.
### Your results may slightly vary.
###
################################################################################
################################################################################
### Section 4.6 Choosing Final Tuning Parameters
library(caret)
data(GermanCredit)
## First, remove near-zero variance predictors then get rid of a few predictors
## that duplicate values. For example, there are two possible values for the
## housing variable: "Rent", "Own" and "ForFree". So that we don't have linear
## dependencies, we get rid of one of the levels (e.g. "ForFree")
GermanCredit <- GermanCredit[, -nearZeroVar(GermanCredit)]
GermanCredit$CheckingAccountStatus.lt.0 <- NULL
GermanCredit$SavingsAccountBonds.lt.100 <- NULL
GermanCredit$EmploymentDuration.lt.1 <- NULL
GermanCredit$EmploymentDuration.Unemployed <- NULL
GermanCredit$Personal.Male.Married.Widowed <- NULL
GermanCredit$Property.Unknown <- NULL
GermanCredit$Housing.ForFree <- NULL
## Split the data into training (80%) and test sets (20%)
set.seed(100)
inTrain <- createDataPartition(GermanCredit$Class, p = .8)[[1]]
GermanCreditTrain <- GermanCredit[ inTrain, ]
GermanCreditTest <- GermanCredit[-inTrain, ]
## The model fitting code shown in the computing section is fairly
## simplistic. For the text we estimate the tuning parameter grid
## up-front and pass it in explicitly. This generally is not needed,
## but was used here so that we could trim the cost values to a
## presentable range and to re-use later with different resampling
## methods.
library(kernlab)
set.seed(231)
sigDist <- sigest(Class ~ ., data = GermanCreditTrain, frac = 1)
svmTuneGrid <- data.frame(sigma = as.vector(sigDist)[1], C = 2^(-2:7))
### Optional: parallel processing can be used via the 'do' packages,
### such as doMC, doMPI etc. We used doMC (not on Windows) to speed
### up the computations.
### WARNING: Be aware of how much memory is needed to parallel
### process. It can very quickly overwhelm the available hardware. We
### estimate the memory usage (VSIZE = total memory size) to be
### 2566M/core.
library(doMC)
registerDoMC(4)
set.seed(1056)
svmFit <- train(Class ~ .,
data = GermanCreditTrain,
method = "svmRadial",
preProc = c("center", "scale"),
tuneGrid = svmTuneGrid,
trControl = trainControl(method = "repeatedcv",
repeats = 5,
classProbs = TRUE))
## classProbs = TRUE was added since the text was written
## Print the results
svmFit
## A line plot of the average performance. The 'scales' argument is actually an
## argument to xyplot that converts the x-axis to log-2 units.
plot(svmFit, scales = list(x = list(log = 2)))
## Test set predictions
predictedClasses <- predict(svmFit, GermanCreditTest)
str(predictedClasses)
## Use the "type" option to get class probabilities
predictedProbs <- predict(svmFit, newdata = GermanCreditTest, type = "prob")
head(predictedProbs)
## Fit the same model using different resampling methods. The main syntax change
## is the control object.
set.seed(1056)
svmFit10CV <- train(Class ~ .,
data = GermanCreditTrain,
method = "svmRadial",
preProc = c("center", "scale"),
tuneGrid = svmTuneGrid,
trControl = trainControl(method = "cv", number = 10))
svmFit10CV
set.seed(1056)
svmFitLOO <- train(Class ~ .,
data = GermanCreditTrain,
method = "svmRadial",
preProc = c("center", "scale"),
tuneGrid = svmTuneGrid,
trControl = trainControl(method = "LOOCV"))
svmFitLOO
set.seed(1056)
svmFitLGO <- train(Class ~ .,
data = GermanCreditTrain,
method = "svmRadial",
preProc = c("center", "scale"),
tuneGrid = svmTuneGrid,
trControl = trainControl(method = "LGOCV",
number = 50,
p = .8))
svmFitLGO
set.seed(1056)
svmFitBoot <- train(Class ~ .,
data = GermanCreditTrain,
method = "svmRadial",
preProc = c("center", "scale"),
tuneGrid = svmTuneGrid,
trControl = trainControl(method = "boot", number = 50))
svmFitBoot
set.seed(1056)
svmFitBoot632 <- train(Class ~ .,
data = GermanCreditTrain,
method = "svmRadial",
preProc = c("center", "scale"),
tuneGrid = svmTuneGrid,
trControl = trainControl(method = "boot632",
number = 50))
svmFitBoot632
################################################################################
### Section 4.8 Choosing Between Models
set.seed(1056)
glmProfile <- train(Class ~ .,
data = GermanCreditTrain,
method = "glm",
trControl = trainControl(method = "repeatedcv",
repeats = 5))
glmProfile
resamp <- resamples(list(SVM = svmFit, Logistic = glmProfile))
summary(resamp)
## These results are slightly different from those shown in the text.
## There are some differences in the train() function since the
## original results were produced. This is due to a difference in
## predictions from the ksvm() function when class probs are requested
## and when they are not. See, for example,
## https://stat.ethz.ch/pipermail/r-help/2013-November/363188.html
modelDifferences <- diff(resamp)
summary(modelDifferences)
## The actual paired t-test:
modelDifferences$statistics$Accuracy
################################################################################
### Session Information
sessionInfo()
q("no")
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/inst/chapters/04_Over_Fitting.R |
################################################################################
### R code from Applied Predictive Modeling (2013) by Kuhn and Johnson.
### Copyright 2013 Kuhn and Johnson
### Web Page: http://www.appliedpredictivemodeling.com
### Contact: Max Kuhn ([email protected])
###
### Chapter 6: Linear Regression and Its Cousins
###
### Required packages: AppliedPredictiveModeling, lattice, corrplot, pls,
### elasticnet,
###
### Data used: The solubility from the AppliedPredictiveModeling package
###
### Notes:
### 1) This code is provided without warranty.
###
### 2) This code should help the user reproduce the results in the
### text. There will be differences between this code and what is is
### the computing section. For example, the computing sections show
### how the source functions work (e.g. randomForest() or plsr()),
### which were not directly used when creating the book. Also, there may be
### syntax differences that occur over time as packages evolve. These files
### will reflect those changes.
###
### 3) In some cases, the calculations in the book were run in
### parallel. The sub-processes may reset the random number seed.
### Your results may slightly vary.
###
################################################################################
################################################################################
### Section 6.1 Case Study: Quantitative Structure- Activity
### Relationship Modeling
library(AppliedPredictiveModeling)
data(solubility)
library(lattice)
### Some initial plots of the data
xyplot(solTrainY ~ solTrainX$MolWeight, type = c("p", "g"),
ylab = "Solubility (log)",
main = "(a)",
xlab = "Molecular Weight")
xyplot(solTrainY ~ solTrainX$NumRotBonds, type = c("p", "g"),
ylab = "Solubility (log)",
xlab = "Number of Rotatable Bonds")
bwplot(solTrainY ~ ifelse(solTrainX[,100] == 1,
"structure present",
"structure absent"),
ylab = "Solubility (log)",
main = "(b)",
horizontal = FALSE)
### Find the columns that are not fingerprints (i.e. the continuous
### predictors). grep will return a list of integers corresponding to
### column names that contain the pattern "FP".
notFingerprints <- grep("FP", names(solTrainXtrans))
library(caret)
featurePlot(solTrainXtrans[, -notFingerprints],
solTrainY,
between = list(x = 1, y = 1),
type = c("g", "p", "smooth"),
labels = rep("", 2))
library(corrplot)
### We used the full namespace to call this function because the pls
### package (also used in this chapter) has a function with the same
### name.
corrplot::corrplot(cor(solTrainXtrans[, -notFingerprints]),
order = "hclust",
tl.cex = .8)
################################################################################
### Section 6.2 Linear Regression
### Create a control function that will be used across models. We
### create the fold assignments explicitly instead of relying on the
### random number seed being set to identical values.
set.seed(100)
indx <- createFolds(solTrainY, returnTrain = TRUE)
ctrl <- trainControl(method = "cv", index = indx)
### Linear regression model with all of the predictors. This will
### produce some warnings that a 'rank-deficient fit may be
### misleading'. This is related to the predictors being so highly
### correlated that some of the math has broken down.
set.seed(100)
lmTune0 <- train(x = solTrainXtrans, y = solTrainY,
method = "lm",
trControl = ctrl)
lmTune0
### And another using a set of predictors reduced by unsupervised
### filtering. We apply a filter to reduce extreme between-predictor
### correlations. Note the lack of warnings.
tooHigh <- findCorrelation(cor(solTrainXtrans), .9)
trainXfiltered <- solTrainXtrans[, -tooHigh]
testXfiltered <- solTestXtrans[, -tooHigh]
set.seed(100)
lmTune <- train(x = trainXfiltered, y = solTrainY,
method = "lm",
trControl = ctrl)
lmTune
### Save the test set results in a data frame
testResults <- data.frame(obs = solTestY,
Linear_Regression = predict(lmTune, testXfiltered))
################################################################################
### Section 6.3 Partial Least Squares
## Run PLS and PCR on solubility data and compare results
set.seed(100)
plsTune <- train(x = solTrainXtrans, y = solTrainY,
method = "pls",
tuneGrid = expand.grid(ncomp = 1:20),
trControl = ctrl)
plsTune
testResults$PLS <- predict(plsTune, solTestXtrans)
set.seed(100)
pcrTune <- train(x = solTrainXtrans, y = solTrainY,
method = "pcr",
tuneGrid = expand.grid(ncomp = 1:35),
trControl = ctrl)
pcrTune
plsResamples <- plsTune$results
plsResamples$Model <- "PLS"
pcrResamples <- pcrTune$results
pcrResamples$Model <- "PCR"
plsPlotData <- rbind(plsResamples, pcrResamples)
xyplot(RMSE ~ ncomp,
data = plsPlotData,
#aspect = 1,
xlab = "# Components",
ylab = "RMSE (Cross-Validation)",
auto.key = list(columns = 2),
groups = Model,
type = c("o", "g"))
plsImp <- varImp(plsTune, scale = FALSE)
plot(plsImp, top = 25, scales = list(y = list(cex = .95)))
################################################################################
### Section 6.4 Penalized Models
## The text used the elasticnet to obtain a ridge regression model.
## There is now a simple ridge regression method.
ridgeGrid <- expand.grid(lambda = seq(0, .1, length = 15))
set.seed(100)
ridgeTune <- train(x = solTrainXtrans, y = solTrainY,
method = "ridge",
tuneGrid = ridgeGrid,
trControl = ctrl,
preProc = c("center", "scale"))
ridgeTune
print(update(plot(ridgeTune), xlab = "Penalty"))
enetGrid <- expand.grid(lambda = c(0, 0.01, .1),
fraction = seq(.05, 1, length = 20))
set.seed(100)
enetTune <- train(x = solTrainXtrans, y = solTrainY,
method = "enet",
tuneGrid = enetGrid,
trControl = ctrl,
preProc = c("center", "scale"))
enetTune
plot(enetTune)
testResults$Enet <- predict(enetTune, solTestXtrans)
################################################################################
### Session Information
sessionInfo()
q("no")
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/inst/chapters/06_Linear_Regression.R |
################################################################################
### R code from Applied Predictive Modeling (2013) by Kuhn and Johnson.
### Copyright 2013 Kuhn and Johnson
### Web Page: http://www.appliedpredictivemodeling.com
### Contact: Max Kuhn ([email protected])
###
### Chapter 7: Non-Linear Regression Models
###
### Required packages: AppliedPredictiveModeling, caret, doMC (optional), earth,
### kernlab, lattice, nnet
###
### Data used: The solubility from the AppliedPredictiveModeling package
###
### Notes:
### 1) This code is provided without warranty.
###
### 2) This code should help the user reproduce the results in the
### text. There will be differences between this code and what is is
### the computing section. For example, the computing sections show
### how the source functions work (e.g. randomForest() or plsr()),
### which were not directly used when creating the book. Also, there may be
### syntax differences that occur over time as packages evolve. These files
### will reflect those changes.
###
### 3) In some cases, the calculations in the book were run in
### parallel. The sub-processes may reset the random number seed.
### Your results may slightly vary.
###
################################################################################
################################################################################
### Load the data
library(AppliedPredictiveModeling)
data(solubility)
### Create a control funciton that will be used across models. We
### create the fold assignments explictily instead of relying on the
### random number seed being set to identical values.
library(caret)
set.seed(100)
indx <- createFolds(solTrainY, returnTrain = TRUE)
ctrl <- trainControl(method = "cv", index = indx)
################################################################################
### Section 7.1 Neural Networks
### Optional: parallel processing can be used via the 'do' packages,
### such as doMC, doMPI etc. We used doMC (not on Windows) to speed
### up the computations.
### WARNING: Be aware of how much memory is needed to parallel
### process. It can very quickly overwhelm the availible hardware. We
### estimate the memory usuage (VSIZE = total memory size) to be
### 2677M/core.
library(doMC)
registerDoMC(10)
library(caret)
nnetGrid <- expand.grid(decay = c(0, 0.01, .1),
size = c(1, 3, 5, 7, 9, 11, 13),
bag = FALSE)
set.seed(100)
nnetTune <- train(x = solTrainXtrans, y = solTrainY,
method = "avNNet",
tuneGrid = nnetGrid,
trControl = ctrl,
preProc = c("center", "scale"),
linout = TRUE,
trace = FALSE,
MaxNWts = 13 * (ncol(solTrainXtrans) + 1) + 13 + 1,
maxit = 1000,
allowParallel = FALSE)
nnetTune
plot(nnetTune)
testResults <- data.frame(obs = solTestY,
NNet = predict(nnetTune, solTestXtrans))
################################################################################
### Section 7.2 Multivariate Adaptive Regression Splines
set.seed(100)
marsTune <- train(x = solTrainXtrans, y = solTrainY,
method = "earth",
tuneGrid = expand.grid(degree = 1, nprune = 2:38),
trControl = ctrl)
marsTune
plot(marsTune)
testResults$MARS <- predict(marsTune, solTestXtrans)
marsImp <- varImp(marsTune, scale = FALSE)
plot(marsImp, top = 25)
################################################################################
### Section 7.3 Support Vector Machines
## In a recent update to caret, the method to estimate the
## sigma parameter was slightly changed. These results will
## slightly differ from the text for that reason.
set.seed(100)
svmRTune <- train(x = solTrainXtrans, y = solTrainY,
method = "svmRadial",
preProc = c("center", "scale"),
tuneLength = 14,
trControl = ctrl)
svmRTune
plot(svmRTune, scales = list(x = list(log = 2)))
svmGrid <- expand.grid(degree = 1:2,
scale = c(0.01, 0.005, 0.001),
C = 2^(-2:5))
set.seed(100)
svmPTune <- train(x = solTrainXtrans, y = solTrainY,
method = "svmPoly",
preProc = c("center", "scale"),
tuneGrid = svmGrid,
trControl = ctrl)
svmPTune
plot(svmPTune,
scales = list(x = list(log = 2),
between = list(x = .5, y = 1)))
testResults$SVMr <- predict(svmRTune, solTestXtrans)
testResults$SVMp <- predict(svmPTune, solTestXtrans)
################################################################################
### Section 7.4 K-Nearest Neighbors
### First we remove near-zero variance predictors
knnDescr <- solTrainXtrans[, -nearZeroVar(solTrainXtrans)]
set.seed(100)
knnTune <- train(x = knnDescr, y = solTrainY,
method = "knn",
preProc = c("center", "scale"),
tuneGrid = data.frame(k = 1:20),
trControl = ctrl)
knnTune
plot(knnTune)
testResults$Knn <- predict(svmRTune, solTestXtrans[, names(knnDescr)])
################################################################################
### Session Information
sessionInfo()
q("no")
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/inst/chapters/07_Non-Linear_Reg.R |
################################################################################
### R code from Applied Predictive Modeling (2013) by Kuhn and Johnson.
### Copyright 2013 Kuhn and Johnson
### Web Page: http://www.appliedpredictivemodeling.com
### Contact: Max Kuhn ([email protected])
###
### Chapter 8: Regression Trees and Rule-Based Models
###
### Required packages: AppliedPredictiveModeling, caret, Cubis, doMC (optional),
### gbm, lattice, party, partykit, randomForest, rpart, RWeka
###
### Data used: The solubility from the AppliedPredictiveModeling package
###
### Notes:
### 1) This code is provided without warranty.
###
### 2) This code should help the user reproduce the results in the
### text. There will be differences between this code and what is is
### the computing section. For example, the computing sections show
### how the source functions work (e.g. randomForest() or plsr()),
### which were not directly used when creating the book. Also, there may be
### syntax differences that occur over time as packages evolve. These files
### will reflect those changes.
###
### 3) In some cases, the calculations in the book were run in
### parallel. The sub-processes may reset the random number seed.
### Your results may slightly vary.
###
################################################################################
################################################################################
### Load the data
library(AppliedPredictiveModeling)
data(solubility)
### Create a control function that will be used across models. We
### create the fold assignments explicitly instead of relying on the
### random number seed being set to identical values.
library(caret)
set.seed(100)
indx <- createFolds(solTrainY, returnTrain = TRUE)
ctrl <- trainControl(method = "cv", index = indx)
################################################################################
### Section 8.1 Basic Regression Trees
library(rpart)
### Fit two CART models to show the initial splitting process. rpart
### only uses formulas, so we put the predictors and outcome into
### a common data frame first.
trainData <- solTrainXtrans
trainData$y <- solTrainY
rpStump <- rpart(y ~ ., data = trainData,
control = rpart.control(maxdepth = 1))
rpSmall <- rpart(y ~ ., data = trainData,
control = rpart.control(maxdepth = 2))
### Tune the model
library(caret)
set.seed(100)
cartTune <- train(x = solTrainXtrans, y = solTrainY,
method = "rpart",
tuneLength = 25,
trControl = ctrl)
cartTune
## cartTune$finalModel
### Plot the tuning results
plot(cartTune, scales = list(x = list(log = 10)))
### Use the partykit package to make some nice plots. First, convert
### the rpart objects to party objects.
# library(partykit)
#
# cartTree <- as.party(cartTune$finalModel)
# plot(cartTree)
### Get the variable importance. 'competes' is an argument that
### controls whether splits not used in the tree should be included
### in the importance calculations.
cartImp <- varImp(cartTune, scale = FALSE, competes = FALSE)
cartImp
### Save the test set results in a data frame
testResults <- data.frame(obs = solTestY,
CART = predict(cartTune, solTestXtrans))
### Tune the conditional inference tree
cGrid <- data.frame(mincriterion = sort(c(.95, seq(.75, .99, length = 2))))
set.seed(100)
ctreeTune <- train(x = solTrainXtrans, y = solTrainY,
method = "ctree",
tuneGrid = cGrid,
trControl = ctrl)
ctreeTune
plot(ctreeTune)
##ctreeTune$finalModel
plot(ctreeTune$finalModel)
testResults$cTree <- predict(ctreeTune, solTestXtrans)
################################################################################
### Section 8.2 Regression Model Trees and 8.3 Rule-Based Models
### Tune the model tree. Using method = "M5" actually tunes over the
### tree- and rule-based versions of the model. M = 10 is also passed
### in to make sure that there are larger terminal nodes for the
### regression models.
set.seed(100)
m5Tune <- train(x = solTrainXtrans, y = solTrainY,
method = "M5",
trControl = ctrl,
control = Weka_control(M = 10))
m5Tune
plot(m5Tune)
## m5Tune$finalModel
## plot(m5Tune$finalModel)
### Show the rule-based model too
ruleFit <- M5Rules(y~., data = trainData, control = Weka_control(M = 10))
ruleFit
################################################################################
### Section 8.4 Bagged Trees
### Optional: parallel processing can be used via the 'do' packages,
### such as doMC, doMPI etc. We used doMC (not on Windows) to speed
### up the computations.
### WARNING: Be aware of how much memory is needed to parallel
### process. It can very quickly overwhelm the available hardware. The
### estimate of the median memory usage (VSIZE = total memory size)
### was 9706M for a core, but could range up to 9706M. This becomes
### severe when parallelizing randomForest() and (especially) calls
### to cforest().
### WARNING 2: The RWeka package does not work well with some forms of
### parallel processing, such as mutlicore (i.e. doMC).
library(doMC)
registerDoMC(5)
set.seed(100)
treebagTune <- train(x = solTrainXtrans, y = solTrainY,
method = "treebag",
nbagg = 50,
trControl = ctrl)
treebagTune
################################################################################
### Section 8.5 Random Forests
mtryGrid <- data.frame(mtry = floor(seq(10, ncol(solTrainXtrans), length = 10)))
### Tune the model using cross-validation
set.seed(100)
rfTune <- train(x = solTrainXtrans, y = solTrainY,
method = "rf",
tuneGrid = mtryGrid,
ntree = 1000,
importance = TRUE,
trControl = ctrl)
rfTune
plot(rfTune)
rfImp <- varImp(rfTune, scale = FALSE)
rfImp
### Tune the model using the OOB estimates
ctrlOOB <- trainControl(method = "oob")
set.seed(100)
rfTuneOOB <- train(x = solTrainXtrans, y = solTrainY,
method = "rf",
tuneGrid = mtryGrid,
ntree = 1000,
importance = TRUE,
trControl = ctrlOOB)
rfTuneOOB
plot(rfTuneOOB)
### Tune the conditional inference forests
set.seed(100)
condrfTune <- train(x = solTrainXtrans, y = solTrainY,
method = "cforest",
tuneGrid = mtryGrid,
controls = cforest_unbiased(ntree = 1000),
trControl = ctrl)
condrfTune
plot(condrfTune)
set.seed(100)
condrfTuneOOB <- train(x = solTrainXtrans, y = solTrainY,
method = "cforest",
tuneGrid = mtryGrid,
controls = cforest_unbiased(ntree = 1000),
trControl = trainControl(method = "oob"))
condrfTuneOOB
plot(condrfTuneOOB)
################################################################################
### Section 8.6 Boosting
gbmGrid <- expand.grid(interaction.depth = seq(1, 7, by = 2),
n.trees = seq(100, 1000, by = 50),
shrinkage = c(0.01, 0.1))
set.seed(100)
gbmTune <- train(x = solTrainXtrans, y = solTrainY,
method = "gbm",
tuneGrid = gbmGrid,
trControl = ctrl,
verbose = FALSE)
gbmTune
plot(gbmTune, auto.key = list(columns = 4, lines = TRUE))
gbmImp <- varImp(gbmTune, scale = FALSE)
gbmImp
################################################################################
### Section 8.7 Cubist
cbGrid <- expand.grid(committees = c(1:10, 20, 50, 75, 100),
neighbors = c(0, 1, 5, 9))
set.seed(100)
cubistTune <- train(solTrainXtrans, solTrainY,
"cubist",
tuneGrid = cbGrid,
trControl = ctrl)
cubistTune
plot(cubistTune, auto.key = list(columns = 4, lines = TRUE))
cbImp <- varImp(cubistTune, scale = FALSE)
cbImp
################################################################################
### Session Information
sessionInfo()
q("no")
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/inst/chapters/08_Regression_Trees.R |
################################################################################
### R code from Applied Predictive Modeling (2013) by Kuhn and Johnson.
### Copyright 2013 Kuhn and Johnson
### Web Page: http://www.appliedpredictivemodeling.com
### Contact: Max Kuhn ([email protected])
###
### Chapter 10: Case Study: Compressive Strength of Concrete Mixtures
###
### Required packages: AppliedPredictiveModeling, caret, Cubist, doMC (optional),
### earth, elasticnet, gbm, ipred, lattice, nnet, party, pls,
### randomForests, rpart, RWeka
###
### Data used: The concrete from the AppliedPredictiveModeling package
###
### Notes:
### 1) This code is provided without warranty.
###
### 2) This code should help the user reproduce the results in the
### text. There will be differences between this code and what is is
### the computing section. For example, the computing sections show
### how the source functions work (e.g. randomForest() or plsr()),
### which were not directly used when creating the book. Also, there may be
### syntax differences that occur over time as packages evolve. These files
### will reflect those changes.
###
### 3) In some cases, the calculations in the book were run in
### parallel. The sub-processes may reset the random number seed.
### Your results may slightly vary.
###
################################################################################
################################################################################
### Load the data and plot the data
library(AppliedPredictiveModeling)
data(concrete)
library(caret)
library(plyr)
featurePlot(concrete[, -9], concrete$CompressiveStrength,
between = list(x = 1, y = 1),
type = c("g", "p", "smooth"))
################################################################################
### Section 10.1 Model Building Strategy
### There are replicated mixtures, so take the average per mixture
averaged <- ddply(mixtures,
.(Cement, BlastFurnaceSlag, FlyAsh, Water,
Superplasticizer, CoarseAggregate,
FineAggregate, Age),
function(x) c(CompressiveStrength =
mean(x$CompressiveStrength)))
### Split the data and create a control object for train()
set.seed(975)
inTrain <- createDataPartition(averaged$CompressiveStrength, p = 3/4)[[1]]
training <- averaged[ inTrain,]
testing <- averaged[-inTrain,]
ctrl <- trainControl(method = "repeatedcv", repeats = 5, number = 10)
### Create a model formula that can be used repeatedly
modForm <- paste("CompressiveStrength ~ (.)^2 + I(Cement^2) + I(BlastFurnaceSlag^2) +",
"I(FlyAsh^2) + I(Water^2) + I(Superplasticizer^2) +",
"I(CoarseAggregate^2) + I(FineAggregate^2) + I(Age^2)")
modForm <- as.formula(modForm)
### Fit the various models
### Optional: parallel processing can be used via the 'do' packages,
### such as doMC, doMPI etc. We used doMC (not on Windows) to speed
### up the computations.
### WARNING: Be aware of how much memory is needed to parallel
### process. It can very quickly overwhelm the available hardware. The
### estimate of the median memory usage (VSIZE = total memory size)
### was 2800M for a core although the M5 calculations require about
### 3700M without parallel processing.
### WARNING 2: The RWeka package does not work well with some forms of
### parallel processing, such as mutlicore (i.e. doMC).
library(doMC)
registerDoMC(14)
set.seed(669)
lmFit <- train(modForm, data = training,
method = "lm",
trControl = ctrl)
set.seed(669)
plsFit <- train(modForm, data = training,
method = "pls",
preProc = c("center", "scale"),
tuneLength = 15,
trControl = ctrl)
lassoGrid <- expand.grid(lambda = c(0, .001, .01, .1),
fraction = seq(0.05, 1, length = 20))
set.seed(669)
lassoFit <- train(modForm, data = training,
method = "enet",
preProc = c("center", "scale"),
tuneGrid = lassoGrid,
trControl = ctrl)
set.seed(669)
earthFit <- train(CompressiveStrength ~ ., data = training,
method = "earth",
tuneGrid = expand.grid(degree = 1,
nprune = 2:25),
trControl = ctrl)
set.seed(669)
svmRFit <- train(CompressiveStrength ~ ., data = training,
method = "svmRadial",
tuneLength = 15,
preProc = c("center", "scale"),
trControl = ctrl)
nnetGrid <- expand.grid(decay = c(0.001, .01, .1),
size = seq(1, 27, by = 2),
bag = FALSE)
set.seed(669)
nnetFit <- train(CompressiveStrength ~ .,
data = training,
method = "avNNet",
tuneGrid = nnetGrid,
preProc = c("center", "scale"),
linout = TRUE,
trace = FALSE,
maxit = 1000,
allowParallel = FALSE,
trControl = ctrl)
set.seed(669)
rpartFit <- train(CompressiveStrength ~ .,
data = training,
method = "rpart",
tuneLength = 30,
trControl = ctrl)
set.seed(669)
treebagFit <- train(CompressiveStrength ~ .,
data = training,
method = "treebag",
trControl = ctrl)
set.seed(669)
ctreeFit <- train(CompressiveStrength ~ .,
data = training,
method = "ctree",
tuneLength = 10,
trControl = ctrl)
set.seed(669)
rfFit <- train(CompressiveStrength ~ .,
data = training,
method = "rf",
tuneLength = 10,
ntrees = 1000,
importance = TRUE,
trControl = ctrl)
gbmGrid <- expand.grid(interaction.depth = seq(1, 7, by = 2),
n.trees = seq(100, 1000, by = 50),
shrinkage = c(0.01, 0.1))
set.seed(669)
gbmFit <- train(CompressiveStrength ~ .,
data = training,
method = "gbm",
tuneGrid = gbmGrid,
verbose = FALSE,
trControl = ctrl)
cbGrid <- expand.grid(committees = c(1, 5, 10, 50, 75, 100),
neighbors = c(0, 1, 3, 5, 7, 9))
set.seed(669)
cbFit <- train(CompressiveStrength ~ .,
data = training,
method = "cubist",
tuneGrid = cbGrid,
trControl = ctrl)
### Turn off the parallel processing to use RWeka.
registerDoSEQ()
set.seed(669)
mtFit <- train(CompressiveStrength ~ .,
data = training,
method = "M5",
trControl = ctrl)
################################################################################
### Section 10.2 Model Performance
### Collect the resampling statistics across all the models
rs <- resamples(list("Linear Reg" = lmFit, "
PLS" = plsFit,
"Elastic Net" = lassoFit,
MARS = earthFit,
SVM = svmRFit,
"Neural Networks" = nnetFit,
CART = rpartFit,
"Cond Inf Tree" = ctreeFit,
"Bagged Tree" = treebagFit,
"Boosted Tree" = gbmFit,
"Random Forest" = rfFit,
Cubist = cbFit))
#parallelPlot(rs)
#parallelPlot(rs, metric = "Rsquared")
### Get the test set results across several models
nnetPred <- predict(nnetFit, testing)
gbmPred <- predict(gbmFit, testing)
cbPred <- predict(cbFit, testing)
testResults <- rbind(postResample(nnetPred, testing$CompressiveStrength),
postResample(gbmPred, testing$CompressiveStrength),
postResample(cbPred, testing$CompressiveStrength))
testResults <- as.data.frame(testResults)
testResults$Model <- c("Neural Networks", "Boosted Tree", "Cubist")
testResults <- testResults[order(testResults$RMSE),]
################################################################################
### Section 10.3 Optimizing Compressive Strength
library(proxy)
### Create a function to maximize compressive strength* while keeping
### the predictor values as mixtures. Water (in x[7]) is used as the
### 'slack variable'.
### * We are actually minimizing the negative compressive strength
modelPrediction <- function(x, mod, limit = 2500)
{
if(x[1] < 0 | x[1] > 1) return(10^38)
if(x[2] < 0 | x[2] > 1) return(10^38)
if(x[3] < 0 | x[3] > 1) return(10^38)
if(x[4] < 0 | x[4] > 1) return(10^38)
if(x[5] < 0 | x[5] > 1) return(10^38)
if(x[6] < 0 | x[6] > 1) return(10^38)
x <- c(x, 1 - sum(x))
if(x[7] < 0.05) return(10^38)
tmp <- as.data.frame(t(x))
names(tmp) <- c('Cement','BlastFurnaceSlag','FlyAsh',
'Superplasticizer','CoarseAggregate',
'FineAggregate', 'Water')
tmp$Age <- 28
-predict(mod, tmp)
}
### Get mixtures at 28 days
subTrain <- subset(training, Age == 28)
### Center and scale the data to use dissimilarity sampling
pp1 <- preProcess(subTrain[, -(8:9)], c("center", "scale"))
scaledTrain <- predict(pp1, subTrain[, 1:7])
### Randomly select a few mixtures as a starting pool
set.seed(91)
startMixture <- sample(1:nrow(subTrain), 1)
starters <- scaledTrain[startMixture, 1:7]
pool <- scaledTrain
index <- maxDissim(starters, pool, 14)
startPoints <- c(startMixture, index)
starters <- subTrain[startPoints,1:7]
startingValues <- starters[, -4]
### For each starting mixture, optimize the Cubist model using
### a simplex search routine
cbResults <- startingValues
cbResults$Water <- NA
cbResults$Prediction <- NA
for(i in 1:nrow(cbResults))
{
results <- optim(unlist(cbResults[i,1:6]),
modelPrediction,
method = "Nelder-Mead",
control=list(maxit=5000),
mod = cbFit)
cbResults$Prediction[i] <- -results$value
cbResults[i,1:6] <- results$par
}
cbResults$Water <- 1 - apply(cbResults[,1:6], 1, sum)
cbResults <- subset(cbResults, Prediction > 0 & Water > .02)
cbResults <- cbResults[order(-cbResults$Prediction),][1:3,]
cbResults$Model <- "Cubist"
### Do the same for the neural network model
nnetResults <- startingValues
nnetResults$Water <- NA
nnetResults$Prediction <- NA
for(i in 1:nrow(nnetResults))
{
results <- optim(unlist(nnetResults[i, 1:6,]),
modelPrediction,
method = "Nelder-Mead",
control=list(maxit=5000),
mod = nnetFit)
nnetResults$Prediction[i] <- -results$value
nnetResults[i,1:6] <- results$par
}
nnetResults$Water <- 1 - apply(nnetResults[,1:6], 1, sum)
nnetResults <- subset(nnetResults, Prediction > 0 & Water > .02)
nnetResults <- nnetResults[order(-nnetResults$Prediction),][1:3,]
nnetResults$Model <- "NNet"
### Convert the predicted mixtures to PCA space and plot
pp2 <- preProcess(subTrain[, 1:7], "pca")
pca1 <- predict(pp2, subTrain[, 1:7])
pca1$Data <- "Training Set"
pca1$Data[startPoints] <- "Starting Values"
pca3 <- predict(pp2, cbResults[, names(subTrain[, 1:7])])
pca3$Data <- "Cubist"
pca4 <- predict(pp2, nnetResults[, names(subTrain[, 1:7])])
pca4$Data <- "Neural Network"
pcaData <- rbind(pca1, pca3, pca4)
pcaData$Data <- factor(pcaData$Data,
levels = c("Training Set","Starting Values",
"Cubist","Neural Network"))
lim <- extendrange(pcaData[, 1:2])
xyplot(PC2 ~ PC1,
data = pcaData,
groups = Data,
auto.key = list(columns = 2),
xlim = lim,
ylim = lim,
type = c("g", "p"))
################################################################################
### Session Information
sessionInfo()
q("no")
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/inst/chapters/10_Case_Study_Concrete.R |
################################################################################
### R code from Applied Predictive Modeling (2013) by Kuhn and Johnson.
### Copyright 2013 Kuhn and Johnson
### Web Page: http://www.appliedpredictivemodeling.com
### Contact: Max Kuhn ([email protected])
###
### Chapter 11: Measuring Performance in Classification Models
###
### Required packages: AppliedPredictiveModeling, caret, MASS, randomForest,
### pROC, klaR
###
### Data used: The solubility from the AppliedPredictiveModeling package
###
### Notes:
### 1) This code is provided without warranty.
###
### 2) This code should help the user reproduce the results in the
### text. There will be differences between this code and what is is
### the computing section. For example, the computing sections show
### how the source functions work (e.g. randomForest() or plsr()),
### which were not directly used when creating the book. Also, there may be
### syntax differences that occur over time as packages evolve. These files
### will reflect those changes.
###
### 3) In some cases, the calculations in the book were run in
### parallel. The sub-processes may reset the random number seed.
### Your results may slightly vary.
###
################################################################################
################################################################################
### Section 11.1 Class Predictions
library(AppliedPredictiveModeling)
### Simulate some two class data with two predictors
set.seed(975)
training <- quadBoundaryFunc(500)
testing <- quadBoundaryFunc(1000)
testing$class2 <- ifelse(testing$class == "Class1", 1, 0)
testing$ID <- 1:nrow(testing)
### Fit models
library(MASS)
qdaFit <- qda(class ~ X1 + X2, data = training)
library(randomForest)
rfFit <- randomForest(class ~ X1 + X2, data = training, ntree = 2000)
### Predict the test set
testing$qda <- predict(qdaFit, testing)$posterior[,1]
testing$rf <- predict(rfFit, testing, type = "prob")[,1]
### Generate the calibration analysis
library(caret)
calData1 <- calibration(class ~ qda + rf, data = testing, cuts = 10)
### Plot the curve
xyplot(calData1, auto.key = list(columns = 2))
### To calibrate the data, treat the probabilities as inputs into the
### model
trainProbs <- training
trainProbs$qda <- predict(qdaFit)$posterior[,1]
### These models take the probabilities as inputs and, based on the
### true class, re-calibrate them.
library(klaR)
nbCal <- NaiveBayes(class ~ qda, data = trainProbs, usekernel = TRUE)
### We use relevel() here because glm() models the probability of the
### second factor level.
lrCal <- glm(relevel(class, "Class2") ~ qda, data = trainProbs, family = binomial)
### Now re-predict the test set using the modified class probability
### estimates
testing$qda2 <- predict(nbCal, testing[, "qda", drop = FALSE])$posterior[,1]
testing$qda3 <- predict(lrCal, testing[, "qda", drop = FALSE], type = "response")
### Manipulate the data a bit for pretty plotting
simulatedProbs <- testing[, c("class", "rf", "qda3")]
names(simulatedProbs) <- c("TrueClass", "RandomForestProb", "QDACalibrated")
simulatedProbs$RandomForestClass <- predict(rfFit, testing)
calData2 <- calibration(class ~ qda + qda2 + qda3, data = testing)
calData2$data$calibModelVar <- as.character(calData2$data$calibModelVar)
calData2$data$calibModelVar <- ifelse(calData2$data$calibModelVar == "qda",
"QDA",
calData2$data$calibModelVar)
calData2$data$calibModelVar <- ifelse(calData2$data$calibModelVar == "qda2",
"Bayesian Calibration",
calData2$data$calibModelVar)
calData2$data$calibModelVar <- ifelse(calData2$data$calibModelVar == "qda3",
"Sigmoidal Calibration",
calData2$data$calibModelVar)
calData2$data$calibModelVar <- factor(calData2$data$calibModelVar,
levels = c("QDA",
"Bayesian Calibration",
"Sigmoidal Calibration"))
xyplot(calData2, auto.key = list(columns = 1))
### Recreate the model used in the over-fitting chapter
library(caret)
data(GermanCredit)
## First, remove near-zero variance predictors then get rid of a few predictors
## that duplicate values. For example, there are two possible values for the
## housing variable: "Rent", "Own" and "ForFree". So that we don't have linear
## dependencies, we get rid of one of the levels (e.g. "ForFree")
GermanCredit <- GermanCredit[, -nearZeroVar(GermanCredit)]
GermanCredit$CheckingAccountStatus.lt.0 <- NULL
GermanCredit$SavingsAccountBonds.lt.100 <- NULL
GermanCredit$EmploymentDuration.lt.1 <- NULL
GermanCredit$EmploymentDuration.Unemployed <- NULL
GermanCredit$Personal.Male.Married.Widowed <- NULL
GermanCredit$Property.Unknown <- NULL
GermanCredit$Housing.ForFree <- NULL
## Split the data into training (80%) and test sets (20%)
set.seed(100)
inTrain <- createDataPartition(GermanCredit$Class, p = .8)[[1]]
GermanCreditTrain <- GermanCredit[ inTrain, ]
GermanCreditTest <- GermanCredit[-inTrain, ]
set.seed(1056)
logisticReg <- train(Class ~ .,
data = GermanCreditTrain,
method = "glm",
trControl = trainControl(method = "repeatedcv",
repeats = 5))
logisticReg
### Predict the test set
creditResults <- data.frame(obs = GermanCreditTest$Class)
creditResults$prob <- predict(logisticReg, GermanCreditTest, type = "prob")[, "Bad"]
creditResults$pred <- predict(logisticReg, GermanCreditTest)
creditResults$Label <- ifelse(creditResults$obs == "Bad",
"True Outcome: Bad Credit",
"True Outcome: Good Credit")
### Plot the probability of bad credit
histogram(~prob|Label,
data = creditResults,
layout = c(2, 1),
nint = 20,
xlab = "Probability of Bad Credit",
type = "count")
### Calculate and plot the calibration curve
creditCalib <- calibration(obs ~ prob, data = creditResults)
xyplot(creditCalib)
### Create the confusion matrix from the test set.
confusionMatrix(data = creditResults$pred,
reference = creditResults$obs)
### ROC curves:
### Like glm(), roc() treats the last level of the factor as the event
### of interest so we use relevel() to change the observed class data
library(pROC)
creditROC <- roc(relevel(creditResults$obs, "Good"), creditResults$prob)
coords(creditROC, "all")[,1:3]
auc(creditROC)
ci.auc(creditROC)
### Note the x-axis is reversed
plot(creditROC)
### Old-school:
plot(creditROC, legacy.axes = TRUE)
### Lift charts
creditLift <- lift(obs ~ prob, data = creditResults)
xyplot(creditLift)
################################################################################
### Session Information
sessionInfo()
q("no")
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/inst/chapters/11_Class_Performance.R |
################################################################################
### R code from Applied Predictive Modeling (2013) by Kuhn and Johnson.
### Copyright 2013 Kuhn and Johnson
### Web Page: http://www.appliedpredictivemodeling.com
### Contact: Max Kuhn ([email protected])
###
### Chapter 12 Discriminant Analysis and Other Linear Classification Models
###
### Required packages: AppliedPredictiveModeling, caret, doMC (optional),
### glmnet, lattice, MASS, pamr, pls, pROC, sparseLDA
###
### Data used: The grant application data. See the file 'CreateGrantData.R'
###
### Notes:
### 1) This code is provided without warranty.
###
### 2) This code should help the user reproduce the results in the
### text. There will be differences between this code and what is is
### the computing section. For example, the computing sections show
### how the source functions work (e.g. randomForest() or plsr()),
### which were not directly used when creating the book. Also, there may be
### syntax differences that occur over time as packages evolve. These files
### will reflect those changes.
###
### 3) In some cases, the calculations in the book were run in
### parallel. The sub-processes may reset the random number seed.
### Your results may slightly vary.
###
################################################################################
################################################################################
### Section 12.1 Case Study: Predicting Successful Grant Applications
load("grantData.RData")
library(caret)
library(doMC)
registerDoMC(12)
library(plyr)
library(reshape2)
## Look at two different ways to split and resample the data. A support vector
## machine is used to illustrate the differences. The full set of predictors
## is used.
pre2008Data <- training[pre2008,]
year2008Data <- rbind(training[-pre2008,], testing)
set.seed(552)
test2008 <- createDataPartition(year2008Data$Class, p = .25)[[1]]
allData <- rbind(pre2008Data, year2008Data[-test2008,])
holdout2008 <- year2008Data[test2008,]
## Use a common tuning grid for both approaches.
svmrGrid <- expand.grid(sigma = c(.00007, .00009, .0001, .0002),
C = 2^(-3:8))
## Evaluate the model using overall 10-fold cross-validation
ctrl0 <- trainControl(method = "cv",
summaryFunction = twoClassSummary,
classProbs = TRUE)
set.seed(477)
svmFit0 <- train(pre2008Data[,fullSet], pre2008Data$Class,
method = "svmRadial",
tuneGrid = svmrGrid,
preProc = c("center", "scale"),
metric = "ROC",
trControl = ctrl0)
svmFit0
### Now fit the single 2008 test set
ctrl00 <- trainControl(method = "LGOCV",
summaryFunction = twoClassSummary,
classProbs = TRUE,
index = list(TestSet = 1:nrow(pre2008Data)))
set.seed(476)
svmFit00 <- train(allData[,fullSet], allData$Class,
method = "svmRadial",
tuneGrid = svmrGrid,
preProc = c("center", "scale"),
metric = "ROC",
trControl = ctrl00)
svmFit00
## Combine the two sets of results and plot
grid0 <- subset(svmFit0$results, sigma == svmFit0$bestTune$sigma)
grid0$Model <- "10-Fold Cross-Validation"
grid00 <- subset(svmFit00$results, sigma == svmFit00$bestTune$sigma)
grid00$Model <- "Single 2008 Test Set"
plotData <- rbind(grid00, grid0)
plotData <- plotData[!is.na(plotData$ROC),]
xyplot(ROC ~ C, data = plotData,
groups = Model,
type = c("g", "o"),
scales = list(x = list(log = 2)),
auto.key = list(columns = 1))
################################################################################
### Section 12.2 Logistic Regression
modelFit <- glm(Class ~ Day, data = training[pre2008,], family = binomial)
dataGrid <- data.frame(Day = seq(0, 365, length = 500))
dataGrid$Linear <- 1 - predict(modelFit, dataGrid, type = "response")
linear2008 <- auc(roc(response = training[-pre2008, "Class"],
predictor = 1 - predict(modelFit,
training[-pre2008,],
type = "response"),
levels = rev(levels(training[-pre2008, "Class"]))))
modelFit2 <- glm(Class ~ Day + I(Day^2),
data = training[pre2008,],
family = binomial)
dataGrid$Quadratic <- 1 - predict(modelFit2, dataGrid, type = "response")
quad2008 <- auc(roc(response = training[-pre2008, "Class"],
predictor = 1 - predict(modelFit2,
training[-pre2008,],
type = "response"),
levels = rev(levels(training[-pre2008, "Class"]))))
dataGrid <- melt(dataGrid, id.vars = "Day")
byDay <- training[pre2008, c("Day", "Class")]
byDay$Binned <- cut(byDay$Day, seq(0, 360, by = 5))
observedProps <- ddply(byDay, .(Binned),
function(x) c(n = nrow(x), mean = mean(x$Class == "successful")))
observedProps$midpoint <- seq(2.5, 357.5, by = 5)
xyplot(value ~ Day|variable, data = dataGrid,
ylab = "Probability of A Successful Grant",
ylim = extendrange(0:1),
between = list(x = 1),
panel = function(...)
{
panel.xyplot(x = observedProps$midpoint, observedProps$mean,
pch = 16., col = rgb(.2, .2, .2, .5))
panel.xyplot(..., type = "l", col = "black", lwd = 2)
})
## For the reduced set of factors, fit the logistic regression model (linear and
## quadratic) and evaluate on the
training$Day2 <- training$Day^2
testing$Day2 <- testing$Day^2
fullSet <- c(fullSet, "Day2")
reducedSet <- c(reducedSet, "Day2")
## This control object will be used across multiple models so that the
## data splitting is consistent
ctrl <- trainControl(method = "LGOCV",
summaryFunction = twoClassSummary,
classProbs = TRUE,
index = list(TrainSet = pre2008),
savePredictions = TRUE)
set.seed(476)
lrFit <- train(x = training[,reducedSet],
y = training$Class,
method = "glm",
metric = "ROC",
trControl = ctrl)
lrFit
set.seed(476)
lrFit2 <- train(x = training[,c(fullSet, "Day2")],
y = training$Class,
method = "glm",
metric = "ROC",
trControl = ctrl)
lrFit2
lrFit$pred <- merge(lrFit$pred, lrFit$bestTune)
## Get the confusion matrices for the hold-out set
lrCM <- confusionMatrix(lrFit, norm = "none")
lrCM
lrCM2 <- confusionMatrix(lrFit2, norm = "none")
lrCM2
## Get the area under the ROC curve for the hold-out set
lrRoc <- roc(response = lrFit$pred$obs,
predictor = lrFit$pred$successful,
levels = rev(levels(lrFit$pred$obs)))
lrRoc2 <- roc(response = lrFit2$pred$obs,
predictor = lrFit2$pred$successful,
levels = rev(levels(lrFit2$pred$obs)))
lrImp <- varImp(lrFit, scale = FALSE)
plot(lrRoc, legacy.axes = TRUE)
################################################################################
### Section 12.3 Linear Discriminant Analysis
## Fit the model to the reduced set
set.seed(476)
ldaFit <- train(x = training[,reducedSet],
y = training$Class,
method = "lda",
preProc = c("center","scale"),
metric = "ROC",
trControl = ctrl)
ldaFit
ldaFit$pred <- merge(ldaFit$pred, ldaFit$bestTune)
ldaCM <- confusionMatrix(ldaFit, norm = "none")
ldaCM
ldaRoc <- roc(response = ldaFit$pred$obs,
predictor = ldaFit$pred$successful,
levels = rev(levels(ldaFit$pred$obs)))
plot(lrRoc, type = "s", col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(ldaRoc, add = TRUE, type = "s", legacy.axes = TRUE)
################################################################################
### Section 12.4 Partial Least Squares Discriminant Analysis
## This model uses all of the predictors
set.seed(476)
plsFit <- train(x = training[,fullSet],
y = training$Class,
method = "pls",
tuneGrid = expand.grid(ncomp = 1:10),
preProc = c("center","scale"),
metric = "ROC",
probMethod = "Bayes",
trControl = ctrl)
plsFit
plsImpGrant <- varImp(plsFit, scale = FALSE)
bestPlsNcomp <- plsFit$results[best(plsFit$results, "ROC", maximize = TRUE), "ncomp"]
bestPlsROC <- plsFit$results[best(plsFit$results, "ROC", maximize = TRUE), "ROC"]
## Only keep the final tuning parameter data
plsFit$pred <- merge(plsFit$pred, plsFit$bestTune)
plsRoc <- roc(response = plsFit$pred$obs,
predictor = plsFit$pred$successful,
levels = rev(levels(plsFit$pred$obs)))
### PLS confusion matrix information
plsCM <- confusionMatrix(plsFit, norm = "none")
plsCM
## Now fit a model that uses a smaller set of predictors chosen by unsupervised
## filtering.
set.seed(476)
plsFit2 <- train(x = training[,reducedSet],
y = training$Class,
method = "pls",
tuneGrid = expand.grid(ncomp = 1:10),
preProc = c("center","scale"),
metric = "ROC",
probMethod = "Bayes",
trControl = ctrl)
plsFit2
bestPlsNcomp2 <- plsFit2$results[best(plsFit2$results, "ROC", maximize = TRUE), "ncomp"]
bestPlsROC2 <- plsFit2$results[best(plsFit2$results, "ROC", maximize = TRUE), "ROC"]
plsFit2$pred <- merge(plsFit2$pred, plsFit2$bestTune)
plsRoc2 <- roc(response = plsFit2$pred$obs,
predictor = plsFit2$pred$successful,
levels = rev(levels(plsFit2$pred$obs)))
plsCM2 <- confusionMatrix(plsFit2, norm = "none")
plsCM2
pls.ROC <- cbind(plsFit$results,Descriptors="Full Set")
pls2.ROC <- cbind(plsFit2$results,Descriptors="Reduced Set")
plsCompareROC <- data.frame(rbind(pls.ROC,pls2.ROC))
xyplot(ROC ~ ncomp,
data = plsCompareROC,
xlab = "# Components",
ylab = "ROC (2008 Hold-Out Data)",
auto.key = list(columns = 2),
groups = Descriptors,
type = c("o", "g"))
## Plot ROC curves and variable importance scores
plot(ldaRoc, type = "s", col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(lrRoc, type = "s", col = rgb(.2, .2, .2, .2), add = TRUE, legacy.axes = TRUE)
plot(plsRoc2, type = "s", add = TRUE, legacy.axes = TRUE)
plot(plsImpGrant, top=20, scales = list(y = list(cex = .95)))
################################################################################
### Section 12.5 Penalized Models
## The glmnet model
glmnGrid <- expand.grid(alpha = c(0, .1, .2, .4, .6, .8, 1),
lambda = seq(.01, .2, length = 40))
set.seed(476)
glmnFit <- train(x = training[,fullSet],
y = training$Class,
method = "glmnet",
tuneGrid = glmnGrid,
preProc = c("center", "scale"),
metric = "ROC",
trControl = ctrl)
glmnFit
glmnet2008 <- merge(glmnFit$pred, glmnFit$bestTune)
glmnetCM <- confusionMatrix(glmnFit, norm = "none")
glmnetCM
glmnetRoc <- roc(response = glmnet2008$obs,
predictor = glmnet2008$successful,
levels = rev(levels(glmnet2008$obs)))
glmnFit0 <- glmnFit
glmnFit0$results$lambda <- format(round(glmnFit0$results$lambda, 3))
glmnPlot <- plot(glmnFit0,
plotType = "level",
cuts = 15,
scales = list(x = list(rot = 90, cex = .65)))
update(glmnPlot,
ylab = "Mixing Percentage\nRidge <---------> Lasso",
sub = "",
main = "Area Under the ROC Curve",
xlab = "Amount of Regularization")
plot(plsRoc2, type = "s", col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(ldaRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(lrRoc, type = "s", col = rgb(.2, .2, .2, .2), add = TRUE, legacy.axes = TRUE)
plot(glmnetRoc, type = "s", add = TRUE, legacy.axes = TRUE)
## Sparse logistic regression
set.seed(476)
spLDAFit <- train(x = training[,fullSet],
y = training$Class,
"sparseLDA",
tuneGrid = expand.grid(lambda = c(.1),
NumVars = c(1:20, 50, 75, 100, 250, 500, 750, 1000)),
preProc = c("center", "scale"),
metric = "ROC",
trControl = ctrl)
spLDAFit
spLDA2008 <- merge(spLDAFit$pred, spLDAFit$bestTune)
spLDACM <- confusionMatrix(spLDAFit, norm = "none")
spLDACM
spLDARoc <- roc(response = spLDA2008$obs,
predictor = spLDA2008$successful,
levels = rev(levels(spLDA2008$obs)))
update(plot(spLDAFit, scales = list(x = list(log = 10))),
ylab = "ROC AUC (2008 Hold-Out Data)")
plot(plsRoc2, type = "s", col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(glmnetRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(ldaRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(lrRoc, type = "s", col = rgb(.2, .2, .2, .2), add = TRUE, legacy.axes = TRUE)
plot(spLDARoc, type = "s", add = TRUE, legacy.axes = TRUE)
################################################################################
### Section 12.6 Nearest Shrunken Centroids
set.seed(476)
nscFit <- train(x = training[,fullSet],
y = training$Class,
method = "pam",
preProc = c("center", "scale"),
tuneGrid = data.frame(threshold = seq(0, 25, length = 30)),
metric = "ROC",
trControl = ctrl)
nscFit
nsc2008 <- merge(nscFit$pred, nscFit$bestTune)
nscCM <- confusionMatrix(nscFit, norm = "none")
nscCM
nscRoc <- roc(response = nsc2008$obs,
predictor = nsc2008$successful,
levels = rev(levels(nsc2008$obs)))
update(plot(nscFit), ylab = "ROC AUC (2008 Hold-Out Data)")
plot(plsRoc2, type = "s", col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(glmnetRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(ldaRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(lrRoc, type = "s", col = rgb(.2, .2, .2, .2), add = TRUE, legacy.axes = TRUE)
plot(spLDARoc, type = "s", col = rgb(.2, .2, .2, .2), add = TRUE, legacy.axes = TRUE)
plot(nscRoc, type = "s", add = TRUE, legacy.axes = TRUE)
sessionInfo()
q("no")
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/inst/chapters/12_Discriminant_Analysis.R |
################################################################################
### R code from Applied Predictive Modeling (2013) by Kuhn and Johnson.
### Copyright 2013 Kuhn and Johnson
### Web Page: http://www.appliedpredictivemodeling.com
### Contact: Max Kuhn ([email protected])
###
### Chapter 13 Non-Linear Classification Models
###
### Required packages: AppliedPredictiveModeling, caret, doMC (optional)
### kernlab, klaR, lattice, latticeExtra, MASS, mda, nnet,
### pROC
###
### Data used: The grant application data. See the file 'CreateGrantData.R'
###
### Notes:
### 1) This code is provided without warranty.
###
### 2) This code should help the user reproduce the results in the
### text. There will be differences between this code and what is is
### the computing section. For example, the computing sections show
### how the source functions work (e.g. randomForest() or plsr()),
### which were not directly used when creating the book. Also, there may be
### syntax differences that occur over time as packages evolve. These files
### will reflect those changes.
###
### 3) In some cases, the calculations in the book were run in
### parallel. The sub-processes may reset the random number seed.
### Your results may slightly vary.
###
################################################################################
################################################################################
### Section 13.1 Nonlinear Discriminant Analysis
load("grantData.RData")
library(caret)
### Optional: parallel processing can be used via the 'do' packages,
### such as doMC, doMPI etc. We used doMC (not on Windows) to speed
### up the computations.
### WARNING: Be aware of how much memory is needed to parallel
### process. It can very quickly overwhelm the available hardware. We
### estimate the memory usage (VSIZE = total memory size) to be
### 2700M/core.
library(doMC)
registerDoMC(12)
## This control object will be used across multiple models so that the
## data splitting is consistent
ctrl <- trainControl(method = "LGOCV",
summaryFunction = twoClassSummary,
classProbs = TRUE,
index = list(TrainSet = pre2008),
savePredictions = TRUE)
set.seed(476)
mdaFit <- train(x = training[,reducedSet],
y = training$Class,
method = "mda",
metric = "ROC",
tries = 40,
tuneGrid = expand.grid(subclasses = 1:8),
trControl = ctrl)
mdaFit
mdaFit$results <- mdaFit$results[!is.na(mdaFit$results$ROC),]
mdaFit$pred <- merge(mdaFit$pred, mdaFit$bestTune)
mdaCM <- confusionMatrix(mdaFit, norm = "none")
mdaCM
mdaRoc <- roc(response = mdaFit$pred$obs,
predictor = mdaFit$pred$successful,
levels = rev(levels(mdaFit$pred$obs)))
mdaRoc
update(plot(mdaFit,
ylab = "ROC AUC (2008 Hold-Out Data)"))
################################################################################
### Section 13.2 Neural Networks
nnetGrid <- expand.grid(size = 1:10, decay = c(0, .1, 1, 2))
maxSize <- max(nnetGrid$size)
## Four different models are evaluate based on the data pre-processing and
## whethera single or multiple models are used
set.seed(476)
nnetFit <- train(x = training[,reducedSet],
y = training$Class,
method = "nnet",
metric = "ROC",
preProc = c("center", "scale"),
tuneGrid = nnetGrid,
trace = FALSE,
maxit = 2000,
MaxNWts = 1*(maxSize * (length(reducedSet) + 1) + maxSize + 1),
trControl = ctrl)
nnetFit
set.seed(476)
nnetFit2 <- train(x = training[,reducedSet],
y = training$Class,
method = "nnet",
metric = "ROC",
preProc = c("center", "scale", "spatialSign"),
tuneGrid = nnetGrid,
trace = FALSE,
maxit = 2000,
MaxNWts = 1*(maxSize * (length(reducedSet) + 1) + maxSize + 1),
trControl = ctrl)
nnetFit2
nnetGrid$bag <- FALSE
set.seed(476)
nnetFit3 <- train(x = training[,reducedSet],
y = training$Class,
method = "avNNet",
metric = "ROC",
preProc = c("center", "scale"),
tuneGrid = nnetGrid,
repeats = 10,
trace = FALSE,
maxit = 2000,
MaxNWts = 10*(maxSize * (length(reducedSet) + 1) + maxSize + 1),
allowParallel = FALSE, ## this will cause to many workers to be launched.
trControl = ctrl)
nnetFit3
set.seed(476)
nnetFit4 <- train(x = training[,reducedSet],
y = training$Class,
method = "avNNet",
metric = "ROC",
preProc = c("center", "scale", "spatialSign"),
tuneGrid = nnetGrid,
trace = FALSE,
maxit = 2000,
repeats = 10,
MaxNWts = 10*(maxSize * (length(reducedSet) + 1) + maxSize + 1),
allowParallel = FALSE,
trControl = ctrl)
nnetFit4
nnetFit4$pred <- merge(nnetFit4$pred, nnetFit4$bestTune)
nnetCM <- confusionMatrix(nnetFit4, norm = "none")
nnetCM
nnetRoc <- roc(response = nnetFit4$pred$obs,
predictor = nnetFit4$pred$successful,
levels = rev(levels(nnetFit4$pred$obs)))
nnet1 <- nnetFit$results
nnet1$Transform <- "No Transformation"
nnet1$Model <- "Single Model"
nnet2 <- nnetFit2$results
nnet2$Transform <- "Spatial Sign"
nnet2$Model <- "Single Model"
nnet3 <- nnetFit3$results
nnet3$Transform <- "No Transformation"
nnet3$Model <- "Model Averaging"
nnet3$bag <- NULL
nnet4 <- nnetFit4$results
nnet4$Transform <- "Spatial Sign"
nnet4$Model <- "Model Averaging"
nnet4$bag <- NULL
nnetResults <- rbind(nnet1, nnet2, nnet3, nnet4)
nnetResults$Model <- factor(as.character(nnetResults$Model),
levels = c("Single Model", "Model Averaging"))
library(latticeExtra)
useOuterStrips(
xyplot(ROC ~ size|Model*Transform,
data = nnetResults,
groups = decay,
as.table = TRUE,
type = c("p", "l", "g"),
lty = 1,
ylab = "ROC AUC (2008 Hold-Out Data)",
xlab = "Number of Hidden Units",
auto.key = list(columns = 4,
title = "Weight Decay",
cex.title = 1)))
plot(nnetRoc, type = "s", legacy.axes = TRUE)
################################################################################
### Section 13.3 Flexible Discriminant Analysis
set.seed(476)
fdaFit <- train(x = training[,reducedSet],
y = training$Class,
method = "fda",
metric = "ROC",
tuneGrid = expand.grid(degree = 1, nprune = 2:25),
trControl = ctrl)
fdaFit
fdaFit$pred <- merge(fdaFit$pred, fdaFit$bestTune)
fdaCM <- confusionMatrix(fdaFit, norm = "none")
fdaCM
fdaRoc <- roc(response = fdaFit$pred$obs,
predictor = fdaFit$pred$successful,
levels = rev(levels(fdaFit$pred$obs)))
update(plot(fdaFit), ylab = "ROC AUC (2008 Hold-Out Data)")
plot(nnetRoc, type = "s", col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(fdaRoc, type = "s", add = TRUE, legacy.axes = TRUE)
################################################################################
### Section 13.4 Support Vector Machines
library(kernlab)
set.seed(201)
sigmaRangeFull <- sigest(as.matrix(training[,fullSet]))
svmRGridFull <- expand.grid(sigma = as.vector(sigmaRangeFull)[1],
C = 2^(-3:4))
set.seed(476)
svmRFitFull <- train(x = training[,fullSet],
y = training$Class,
method = "svmRadial",
metric = "ROC",
preProc = c("center", "scale"),
tuneGrid = svmRGridFull,
trControl = ctrl)
svmRFitFull
set.seed(202)
sigmaRangeReduced <- sigest(as.matrix(training[,reducedSet]))
svmRGridReduced <- expand.grid(sigma = sigmaRangeReduced[1],
C = 2^(seq(-4, 4)))
set.seed(476)
svmRFitReduced <- train(x = training[,reducedSet],
y = training$Class,
method = "svmRadial",
metric = "ROC",
preProc = c("center", "scale"),
tuneGrid = svmRGridReduced,
trControl = ctrl)
svmRFitReduced
svmPGrid <- expand.grid(degree = 1:2,
scale = c(0.01, .005),
C = 2^(seq(-6, -2, length = 10)))
set.seed(476)
svmPFitFull <- train(x = training[,fullSet],
y = training$Class,
method = "svmPoly",
metric = "ROC",
preProc = c("center", "scale"),
tuneGrid = svmPGrid,
trControl = ctrl)
svmPFitFull
svmPGrid2 <- expand.grid(degree = 1:2,
scale = c(0.01, .005),
C = 2^(seq(-6, -2, length = 10)))
set.seed(476)
svmPFitReduced <- train(x = training[,reducedSet],
y = training$Class,
method = "svmPoly",
metric = "ROC",
preProc = c("center", "scale"),
tuneGrid = svmPGrid2,
fit = FALSE,
trControl = ctrl)
svmPFitReduced
svmPFitReduced$pred <- merge(svmPFitReduced$pred, svmPFitReduced$bestTune)
svmPCM <- confusionMatrix(svmPFitReduced, norm = "none")
svmPRoc <- roc(response = svmPFitReduced$pred$obs,
predictor = svmPFitReduced$pred$successful,
levels = rev(levels(svmPFitReduced$pred$obs)))
svmRadialResults <- rbind(svmRFitReduced$results,
svmRFitFull$results)
svmRadialResults$Set <- c(rep("Reduced Set", nrow(svmRFitReduced$result)),
rep("Full Set", nrow(svmRFitFull$result)))
svmRadialResults$Sigma <- paste("sigma = ",
format(svmRadialResults$sigma,
scientific = FALSE, digits= 5))
svmRadialResults <- svmRadialResults[!is.na(svmRadialResults$ROC),]
xyplot(ROC ~ C|Set, data = svmRadialResults,
groups = Sigma, type = c("g", "o"),
xlab = "Cost",
ylab = "ROC (2008 Hold-Out Data)",
auto.key = list(columns = 2),
scales = list(x = list(log = 2)))
svmPolyResults <- rbind(svmPFitReduced$results,
svmPFitFull$results)
svmPolyResults$Set <- c(rep("Reduced Set", nrow(svmPFitReduced$result)),
rep("Full Set", nrow(svmPFitFull$result)))
svmPolyResults <- svmPolyResults[!is.na(svmPolyResults$ROC),]
svmPolyResults$scale <- paste("scale = ",
format(svmPolyResults$scale,
scientific = FALSE))
svmPolyResults$Degree <- "Linear"
svmPolyResults$Degree[svmPolyResults$degree == 2] <- "Quadratic"
useOuterStrips(xyplot(ROC ~ C|Degree*Set, data = svmPolyResults,
groups = scale, type = c("g", "o"),
xlab = "Cost",
ylab = "ROC (2008 Hold-Out Data)",
auto.key = list(columns = 2),
scales = list(x = list(log = 2))))
plot(nnetRoc, type = "s", col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(fdaRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(svmPRoc, type = "s", add = TRUE, legacy.axes = TRUE)
################################################################################
### Section 13.5 K-Nearest Neighbors
set.seed(476)
knnFit <- train(x = training[,reducedSet],
y = training$Class,
method = "knn",
metric = "ROC",
preProc = c("center", "scale"),
tuneGrid = data.frame(k = c(4*(0:5)+1,20*(1:5)+1,50*(2:9)+1)),
trControl = ctrl)
knnFit
knnFit$pred <- merge(knnFit$pred, knnFit$bestTune)
knnCM <- confusionMatrix(knnFit, norm = "none")
knnCM
knnRoc <- roc(response = knnFit$pred$obs,
predictor = knnFit$pred$successful,
levels = rev(levels(knnFit$pred$obs)))
update(plot(knnFit, ylab = "ROC (2008 Hold-Out Data)"))
plot(fdaRoc, type = "s", col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(nnetRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(svmPRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(knnRoc, type = "s", add = TRUE, legacy.axes = TRUE)
################################################################################
### Section 13.6 Naive Bayes
## Create factor versions of some of the predictors so that they are treated
## as categories and not dummy variables
factors <- c("SponsorCode", "ContractValueBand", "Month", "Weekday")
nbPredictors <- factorPredictors[factorPredictors %in% reducedSet]
nbPredictors <- c(nbPredictors, factors)
nbPredictors <- nbPredictors[nbPredictors != "SponsorUnk"]
nbTraining <- training[, c("Class", nbPredictors)]
nbTesting <- testing[, c("Class", nbPredictors)]
for(i in nbPredictors)
{
if(length(unique(training[,i])) <= 15)
{
nbTraining[, i] <- factor(nbTraining[,i], levels = paste(sort(unique(training[,i]))))
nbTesting[, i] <- factor(nbTesting[,i], levels = paste(sort(unique(training[,i]))))
}
}
set.seed(476)
nBayesFit <- train(x = nbTraining[,nbPredictors],
y = nbTraining$Class,
method = "nb",
metric = "ROC",
tuneGrid = data.frame(usekernel = c(TRUE, FALSE), fL = 2),
trControl = ctrl)
nBayesFit
nBayesFit$pred <- merge(nBayesFit$pred, nBayesFit$bestTune)
nBayesCM <- confusionMatrix(nBayesFit, norm = "none")
nBayesCM
nBayesRoc <- roc(response = nBayesFit$pred$obs,
predictor = nBayesFit$pred$successful,
levels = rev(levels(nBayesFit$pred$obs)))
nBayesRoc
sessionInfo()
q("no")
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/inst/chapters/13_Non-Linear_Class.R |
################################################################################
### R code from Applied Predictive Modeling (2013) by Kuhn and Johnson.
### Copyright 2013 Kuhn and Johnson
### Web Page: http://www.appliedpredictivemodeling.com
### Contact: Max Kuhn ([email protected])
###
### Chapter 14 Classification Trees and Rule Based Models
###
### Required packages: AppliedPredictiveModeling, C50, caret, doMC (optional),
### gbm, lattice, partykit, pROC, randomForest, reshape2,
### rpart, RWeka
###
### Data used: The grant application data. See the file 'CreateGrantData.R'
###
### Notes:
### 1) This code is provided without warranty.
###
### 2) This code should help the user reproduce the results in the
### text. There will be differences between this code and what is is
### the computing section. For example, the computing sections show
### how the source functions work (e.g. randomForest() or plsr()),
### which were not directly used when creating the book. Also, there may be
### syntax differences that occur over time as packages evolve. These files
### will reflect those changes.
###
### 3) In some cases, the calculations in the book were run in
### parallel. The sub-processes may reset the random number seed.
### Your results may slightly vary.
###
################################################################################
### NOTE: Many of the models here are computationally expensive. If
### this script is run as-is, the memory requirements will accumulate
### until it exceeds 32gb.
################################################################################
### Section 14.1 Basic Classification Trees
library(caret)
load("grantData.RData")
ctrl <- trainControl(method = "LGOCV",
summaryFunction = twoClassSummary,
classProbs = TRUE,
index = list(TrainSet = pre2008),
savePredictions = TRUE)
set.seed(476)
rpartFit <- train(x = training[,fullSet],
y = training$Class,
method = "rpart",
tuneLength = 30,
metric = "ROC",
trControl = ctrl)
rpartFit
library(partykit)
plot(as.party(rpartFit$finalModel))
rpart2008 <- merge(rpartFit$pred, rpartFit$bestTune)
rpartCM <- confusionMatrix(rpartFit, norm = "none")
rpartCM
rpartRoc <- roc(response = rpartFit$pred$obs,
predictor = rpartFit$pred$successful,
levels = rev(levels(rpartFit$pred$obs)))
set.seed(476)
rpartFactorFit <- train(x = training[,factorPredictors],
y = training$Class,
method = "rpart",
tuneLength = 30,
metric = "ROC",
trControl = ctrl)
rpartFactorFit
plot(as.party(rpartFactorFit$finalModel))
rpartFactor2008 <- merge(rpartFactorFit$pred, rpartFactorFit$bestTune)
rpartFactorCM <- confusionMatrix(rpartFactorFit, norm = "none")
rpartFactorCM
rpartFactorRoc <- roc(response = rpartFactorFit$pred$obs,
predictor = rpartFactorFit$pred$successful,
levels = rev(levels(rpartFactorFit$pred$obs)))
plot(rpartRoc, type = "s", print.thres = c(.5),
print.thres.pch = 3,
print.thres.pattern = "",
print.thres.cex = 1.2,
col = "red", legacy.axes = TRUE,
print.thres.col = "red")
plot(rpartFactorRoc,
type = "s",
add = TRUE,
print.thres = c(.5),
print.thres.pch = 16, legacy.axes = TRUE,
print.thres.pattern = "",
print.thres.cex = 1.2)
legend(.75, .2,
c("Grouped Categories", "Independent Categories"),
lwd = c(1, 1),
col = c("black", "red"),
pch = c(16, 3))
set.seed(476)
j48FactorFit <- train(x = training[,factorPredictors],
y = training$Class,
method = "J48",
metric = "ROC",
trControl = ctrl)
j48FactorFit
j48Factor2008 <- merge(j48FactorFit$pred, j48FactorFit$bestTune)
j48FactorCM <- confusionMatrix(j48FactorFit, norm = "none")
j48FactorCM
j48FactorRoc <- roc(response = j48FactorFit$pred$obs,
predictor = j48FactorFit$pred$successful,
levels = rev(levels(j48FactorFit$pred$obs)))
set.seed(476)
j48Fit <- train(x = training[,fullSet],
y = training$Class,
method = "J48",
metric = "ROC",
trControl = ctrl)
j482008 <- merge(j48Fit$pred, j48Fit$bestTune)
j48CM <- confusionMatrix(j48Fit, norm = "none")
j48CM
j48Roc <- roc(response = j48Fit$pred$obs,
predictor = j48Fit$pred$successful,
levels = rev(levels(j48Fit$pred$obs)))
plot(j48FactorRoc, type = "s", print.thres = c(.5),
print.thres.pch = 16, print.thres.pattern = "",
print.thres.cex = 1.2, legacy.axes = TRUE)
plot(j48Roc, type = "s", print.thres = c(.5),
print.thres.pch = 3, print.thres.pattern = "",
print.thres.cex = 1.2, legacy.axes = TRUE,
add = TRUE, col = "red", print.thres.col = "red")
legend(.75, .2,
c("Grouped Categories", "Independent Categories"),
lwd = c(1, 1),
col = c("black", "red"),
pch = c(16, 3))
plot(rpartFactorRoc, type = "s", add = TRUE,
col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
################################################################################
### Section 14.2 Rule-Based Models
set.seed(476)
partFit <- train(x = training[,fullSet],
y = training$Class,
method = "PART",
metric = "ROC",
trControl = ctrl)
partFit
part2008 <- merge(partFit$pred, partFit$bestTune)
partCM <- confusionMatrix(partFit, norm = "none")
partCM
partRoc <- roc(response = partFit$pred$obs,
predictor = partFit$pred$successful,
levels = rev(levels(partFit$pred$obs)))
partRoc
set.seed(476)
partFactorFit <- train(training[,factorPredictors], training$Class,
method = "PART",
metric = "ROC",
trControl = ctrl)
partFactorFit
partFactor2008 <- merge(partFactorFit$pred, partFactorFit$bestTune)
partFactorCM <- confusionMatrix(partFactorFit, norm = "none")
partFactorCM
partFactorRoc <- roc(response = partFactorFit$pred$obs,
predictor = partFactorFit$pred$successful,
levels = rev(levels(partFactorFit$pred$obs)))
partFactorRoc
################################################################################
### Section 14.3 Bagged Trees
set.seed(476)
treebagFit <- train(x = training[,fullSet],
y = training$Class,
method = "treebag",
nbagg = 50,
metric = "ROC",
trControl = ctrl)
treebagFit
treebag2008 <- merge(treebagFit$pred, treebagFit$bestTune)
treebagCM <- confusionMatrix(treebagFit, norm = "none")
treebagCM
treebagRoc <- roc(response = treebagFit$pred$obs,
predictor = treebagFit$pred$successful,
levels = rev(levels(treebagFit$pred$obs)))
set.seed(476)
treebagFactorFit <- train(x = training[,factorPredictors],
y = training$Class,
method = "treebag",
nbagg = 50,
metric = "ROC",
trControl = ctrl)
treebagFactorFit
treebagFactor2008 <- merge(treebagFactorFit$pred, treebagFactorFit$bestTune)
treebagFactorCM <- confusionMatrix(treebagFactorFit, norm = "none")
treebagFactorCM
treebagFactorRoc <- roc(response = treebagFactorFit$pred$obs,
predictor = treebagFactorFit$pred$successful,
levels = rev(levels(treebagFactorFit$pred$obs)))
plot(rpartRoc, type = "s", col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(j48FactorRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2),
legacy.axes = TRUE)
plot(treebagRoc, type = "s", add = TRUE, print.thres = c(.5),
print.thres.pch = 3, legacy.axes = TRUE, print.thres.pattern = "",
print.thres.cex = 1.2,
col = "red", print.thres.col = "red")
plot(treebagFactorRoc, type = "s", add = TRUE, print.thres = c(.5),
print.thres.pch = 16, print.thres.pattern = "", legacy.axes = TRUE,
print.thres.cex = 1.2)
legend(.75, .2,
c("Grouped Categories", "Independent Categories"),
lwd = c(1, 1),
col = c("black", "red"),
pch = c(16, 3))
################################################################################
### Section 14.4 Random Forests
### For the book, this model was run with only 500 trees (by
### accident). More than 1000 trees usually required to get consistent
### results.
mtryValues <- c(5, 10, 20, 32, 50, 100, 250, 500, 1000)
set.seed(476)
rfFit <- train(x = training[,fullSet],
y = training$Class,
method = "rf",
ntree = 500,
tuneGrid = data.frame(mtry = mtryValues),
importance = TRUE,
metric = "ROC",
trControl = ctrl)
rfFit
rf2008 <- merge(rfFit$pred, rfFit$bestTune)
rfCM <- confusionMatrix(rfFit, norm = "none")
rfCM
rfRoc <- roc(response = rfFit$pred$obs,
predictor = rfFit$pred$successful,
levels = rev(levels(rfFit$pred$obs)))
gc()
## The randomForest package cannot handle factors with more than 32
## levels, so we make a new set of predictors where the sponsor code
## factor is entered as dummy variables instead of a single factor.
sponsorVars <- grep("Sponsor", names(training), value = TRUE)
sponsorVars <- sponsorVars[sponsorVars != "SponsorCode"]
rfPredictors <- factorPredictors
rfPredictors <- rfPredictors[rfPredictors != "SponsorCode"]
rfPredictors <- c(rfPredictors, sponsorVars)
set.seed(476)
rfFactorFit <- train(x = training[,rfPredictors],
y = training$Class,
method = "rf",
ntree = 1500,
tuneGrid = data.frame(mtry = mtryValues),
importance = TRUE,
metric = "ROC",
trControl = ctrl)
rfFactorFit
rfFactor2008 <- merge(rfFactorFit$pred, rfFactorFit$bestTune)
rfFactorCM <- confusionMatrix(rfFactorFit, norm = "none")
rfFactorCM
rfFactorRoc <- roc(response = rfFactorFit$pred$obs,
predictor = rfFactorFit$pred$successful,
levels = rev(levels(rfFactorFit$pred$obs)))
plot(treebagRoc, type = "s", col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(rpartRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(j48FactorRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2),
legacy.axes = TRUE)
plot(rfRoc, type = "s", add = TRUE, print.thres = c(.5),
print.thres.pch = 3, legacy.axes = TRUE, print.thres.pattern = "",
print.thres.cex = 1.2,
col = "red", print.thres.col = "red")
plot(rfFactorRoc, type = "s", add = TRUE, print.thres = c(.5),
print.thres.pch = 16, print.thres.pattern = "", legacy.axes = TRUE,
print.thres.cex = 1.2)
legend(.75, .2,
c("Grouped Categories", "Independent Categories"),
lwd = c(1, 1),
col = c("black", "red"),
pch = c(16, 3))
################################################################################
### Section 14.5 Boosting
gbmGrid <- expand.grid(interaction.depth = c(1, 3, 5, 7, 9),
n.trees = (1:20)*100,
shrinkage = c(.01, .1))
set.seed(476)
gbmFit <- train(x = training[,fullSet],
y = training$Class,
method = "gbm",
tuneGrid = gbmGrid,
metric = "ROC",
verbose = FALSE,
trControl = ctrl)
gbmFit
gbmFit$pred <- merge(gbmFit$pred, gbmFit$bestTune)
gbmCM <- confusionMatrix(gbmFit, norm = "none")
gbmCM
gbmRoc <- roc(response = gbmFit$pred$obs,
predictor = gbmFit$pred$successful,
levels = rev(levels(gbmFit$pred$obs)))
set.seed(476)
gbmFactorFit <- train(x = training[,factorPredictors],
y = training$Class,
method = "gbm",
tuneGrid = gbmGrid,
verbose = FALSE,
metric = "ROC",
trControl = ctrl)
gbmFactorFit
gbmFactorFit$pred <- merge(gbmFactorFit$pred, gbmFactorFit$bestTune)
gbmFactorCM <- confusionMatrix(gbmFactorFit, norm = "none")
gbmFactorCM
gbmFactorRoc <- roc(response = gbmFactorFit$pred$obs,
predictor = gbmFactorFit$pred$successful,
levels = rev(levels(gbmFactorFit$pred$obs)))
gbmROCRange <- extendrange(cbind(gbmFactorFit$results$ROC,gbmFit$results$ROC))
plot(gbmFactorFit, ylim = gbmROCRange,
auto.key = list(columns = 4, lines = TRUE))
plot(gbmFit, ylim = gbmROCRange,
auto.key = list(columns = 4, lines = TRUE))
plot(treebagRoc, type = "s", col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(rpartRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(j48FactorRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(rfFactorRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(gbmRoc, type = "s", print.thres = c(.5), print.thres.pch = 3,
print.thres.pattern = "", print.thres.cex = 1.2,
add = TRUE, col = "red", print.thres.col = "red", legacy.axes = TRUE)
plot(gbmFactorRoc, type = "s", print.thres = c(.5), print.thres.pch = 16,
legacy.axes = TRUE, print.thres.pattern = "", print.thres.cex = 1.2,
add = TRUE)
legend(.75, .2,
c("Grouped Categories", "Independent Categories"),
lwd = c(1, 1),
col = c("black", "red"),
pch = c(16, 3))
################################################################################
### Section 14.5 C5.0
c50Grid <- expand.grid(trials = c(1:9, (1:10)*10),
model = c("tree", "rules"),
winnow = c(TRUE, FALSE))
set.seed(476)
c50FactorFit <- train(training[,factorPredictors], training$Class,
method = "C5.0",
tuneGrid = c50Grid,
verbose = FALSE,
metric = "ROC",
trControl = ctrl)
c50FactorFit
c50FactorFit$pred <- merge(c50FactorFit$pred, c50FactorFit$bestTune)
c50FactorCM <- confusionMatrix(c50FactorFit, norm = "none")
c50FactorCM
c50FactorRoc <- roc(response = c50FactorFit$pred$obs,
predictor = c50FactorFit$pred$successful,
levels = rev(levels(c50FactorFit$pred$obs)))
set.seed(476)
c50Fit <- train(training[,fullSet], training$Class,
method = "C5.0",
tuneGrid = c50Grid,
metric = "ROC",
verbose = FALSE,
trControl = ctrl)
c50Fit
c50Fit$pred <- merge(c50Fit$pred, c50Fit$bestTune)
c50CM <- confusionMatrix(c50Fit, norm = "none")
c50CM
c50Roc <- roc(response = c50Fit$pred$obs,
predictor = c50Fit$pred$successful,
levels = rev(levels(c50Fit$pred$obs)))
update(plot(c50FactorFit), ylab = "ROC AUC (2008 Hold-Out Data)")
plot(treebagRoc, type = "s", col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(rpartRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(j48FactorRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(rfFactorRoc, type = "s", add = TRUE, col = rgb(.2, .2, .2, .2), legacy.axes = TRUE)
plot(gbmRoc, type = "s", col = rgb(.2, .2, .2, .2), add = TRUE, legacy.axes = TRUE)
plot(c50Roc, type = "s", print.thres = c(.5), print.thres.pch = 3,
print.thres.pattern = "", print.thres.cex = 1.2,
add = TRUE, col = "red", print.thres.col = "red", legacy.axes = TRUE)
plot(c50FactorRoc, type = "s", print.thres = c(.5), print.thres.pch = 16,
print.thres.pattern = "", print.thres.cex = 1.2,
add = TRUE, legacy.axes = TRUE)
legend(.75, .2,
c("Grouped Categories", "Independent Categories"),
lwd = c(1, 1),
col = c("black", "red"),
pch = c(16, 3))
################################################################################
### Section 14.7 Comparing Two Encodings of Categorical Predictors
## Pull the hold-out results from each model and merge
rp1 <- caret:::getTrainPerf(rpartFit)
names(rp1) <- gsub("Train", "Independent", names(rp1))
rp2 <- caret:::getTrainPerf(rpartFactorFit)
rp2$Label <- "CART"
names(rp2) <- gsub("Train", "Grouped", names(rp2))
rp <- cbind(rp1, rp2)
j481 <- caret:::getTrainPerf(j48Fit)
names(j481) <- gsub("Train", "Independent", names(j481))
j482 <- caret:::getTrainPerf(j48FactorFit)
j482$Label <- "J48"
names(j482) <- gsub("Train", "Grouped", names(j482))
j48 <- cbind(j481, j482)
part1 <- caret:::getTrainPerf(partFit)
names(part1) <- gsub("Train", "Independent", names(part1))
part2 <- caret:::getTrainPerf(partFactorFit)
part2$Label <- "PART"
names(part2) <- gsub("Train", "Grouped", names(part2))
part <- cbind(part1, part2)
tb1 <- caret:::getTrainPerf(treebagFit)
names(tb1) <- gsub("Train", "Independent", names(tb1))
tb2 <- caret:::getTrainPerf(treebagFactorFit)
tb2$Label <- "Bagged Tree"
names(tb2) <- gsub("Train", "Grouped", names(tb2))
tb <- cbind(tb1, tb2)
rf1 <- caret:::getTrainPerf(rfFit)
names(rf1) <- gsub("Train", "Independent", names(rf1))
rf2 <- caret:::getTrainPerf(rfFactorFit)
rf2$Label <- "Random Forest"
names(rf2) <- gsub("Train", "Grouped", names(rf2))
rf <- cbind(rf1, rf2)
gbm1 <- caret:::getTrainPerf(gbmFit)
names(gbm1) <- gsub("Train", "Independent", names(gbm1))
gbm2 <- caret:::getTrainPerf(gbmFactorFit)
gbm2$Label <- "Boosted Tree"
names(gbm2) <- gsub("Train", "Grouped", names(gbm2))
bst <- cbind(gbm1, gbm2)
c501 <- caret:::getTrainPerf(c50Fit)
names(c501) <- gsub("Train", "Independent", names(c501))
c502 <- caret:::getTrainPerf(c50FactorFit)
c502$Label <- "C5.0"
names(c502) <- gsub("Train", "Grouped", names(c502))
c5 <- cbind(c501, c502)
trainPerf <- rbind(rp, j48, part, tb, rf, bst, c5)
library(lattice)
library(reshape2)
trainPerf <- melt(trainPerf)
trainPerf$metric <- "ROC"
trainPerf$metric[grepl("Sens", trainPerf$variable)] <- "Sensitivity"
trainPerf$metric[grepl("Spec", trainPerf$variable)] <- "Specificity"
trainPerf$model <- "Grouped"
trainPerf$model[grepl("Independent", trainPerf$variable)] <- "Independent"
trainPerf <- melt(trainPerf)
trainPerf$metric <- "ROC"
trainPerf$metric[grepl("Sens", trainPerf$variable)] <- "Sensitivity"
trainPerf$metric[grepl("Spec", trainPerf$variable)] <- "Specificity"
trainPerf$model <- "Independent"
trainPerf$model[grepl("Grouped", trainPerf$variable)] <- "Grouped"
trainPerf$Label <- factor(trainPerf$Label,
levels = rev(c("CART", "Cond. Trees", "J48", "Ripper",
"PART", "Bagged Tree", "Random Forest",
"Boosted Tree", "C5.0")))
dotplot(Label ~ value|metric,
data = trainPerf,
groups = model,
horizontal = TRUE,
auto.key = list(columns = 2),
between = list(x = 1),
xlab = "")
sessionInfo()
q("no")
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/inst/chapters/14_Class_Trees.R |
################################################################################
### R code from Applied Predictive Modeling (2013) by Kuhn and Johnson.
### Copyright 2013 Kuhn and Johnson
### Web Page: http://www.appliedpredictivemodeling.com
### Contact: Max Kuhn ([email protected])
###
### Chapter 16: Remedies for Severe Class Imbalance
###
### Required packages: AppliedPredictiveModeling, caret, C50, earth, DMwR,
### DVD, kernlab, mda, pROC, randomForest, rpart
###
### Data used: The insurance data from the DWD package.
###
### Notes:
### 1) This code is provided without warranty.
###
### 2) This code should help the user reproduce the results in the
### text. There will be differences between this code and what is is
### the computing section. For example, the computing sections show
### how the source functions work (e.g. randomForest() or plsr()),
### which were not directly used when creating the book. Also, there may be
### syntax differences that occur over time as packages evolve. These files
### will reflect those changes.
###
### 3) In some cases, the calculations in the book were run in
### parallel. The sub-processes may reset the random number seed.
### Your results may slightly vary.
###
################################################################################
################################################################################
### Section 16.1 Case Study: Predicting Caravan Policy Ownership
library(DWD)
data(ticdata)
### Some of the predictor names and levels have characters that would results in
### illegal variable names. We convert then to more generic names and treat the
### ordered factors as nominal (i.e. unordered) factors.
isOrdered <- unlist(lapply(ticdata, function(x) any(class(x) == "ordered")))
recodeLevels <- function(x)
{
x <- gsub("f ", "", as.character(x))
x <- gsub(" - ", "_to_", x)
x <- gsub("-", "_to_", x)
x <- gsub("%", "", x)
x <- gsub("?", "Unk", x, fixed = TRUE)
x <- gsub("[,'\\(\\)]", "", x)
x <- gsub(" ", "_", x)
factor(paste("_", x, sep = ""))
}
convertCols <- c("STYPE", "MGEMLEEF", "MOSHOOFD",
names(isOrdered)[isOrdered])
for(i in convertCols) ticdata[,i] <- factor(gsub(" ", "0",format(as.numeric(ticdata[,i]))))
ticdata$CARAVAN <- factor(as.character(ticdata$CARAVAN),
levels = rev(levels(ticdata$CARAVAN)))
### Split the data into three sets: training, test and evaluation.
library(caret)
set.seed(156)
split1 <- createDataPartition(ticdata$CARAVAN, p = .7)[[1]]
other <- ticdata[-split1,]
training <- ticdata[ split1,]
set.seed(934)
split2 <- createDataPartition(other$CARAVAN, p = 1/3)[[1]]
evaluation <- other[ split2,]
testing <- other[-split2,]
predictors <- names(training)[names(training) != "CARAVAN"]
testResults <- data.frame(CARAVAN = testing$CARAVAN)
evalResults <- data.frame(CARAVAN = evaluation$CARAVAN)
trainingInd <- data.frame(model.matrix(CARAVAN ~ ., data = training))[,-1]
evaluationInd <- data.frame(model.matrix(CARAVAN ~ ., data = evaluation))[,-1]
testingInd <- data.frame(model.matrix(CARAVAN ~ ., data = testing))[,-1]
trainingInd$CARAVAN <- training$CARAVAN
evaluationInd$CARAVAN <- evaluation$CARAVAN
testingInd$CARAVAN <- testing$CARAVAN
isNZV <- nearZeroVar(trainingInd)
noNZVSet <- names(trainingInd)[-isNZV]
testResults <- data.frame(CARAVAN = testing$CARAVAN)
evalResults <- data.frame(CARAVAN = evaluation$CARAVAN)
################################################################################
### Section 16.2 The Effect of Class Imbalance
### These functions are used to measure performance
fiveStats <- function(...) c(twoClassSummary(...), defaultSummary(...))
fourStats <- function (data, lev = levels(data$obs), model = NULL)
{
accKapp <- postResample(data[, "pred"], data[, "obs"])
out <- c(accKapp,
sensitivity(data[, "pred"], data[, "obs"], lev[1]),
specificity(data[, "pred"], data[, "obs"], lev[2]))
names(out)[3:4] <- c("Sens", "Spec")
out
}
ctrl <- trainControl(method = "cv",
classProbs = TRUE,
summaryFunction = fiveStats)
ctrlNoProb <- ctrl
ctrlNoProb$summaryFunction <- fourStats
ctrlNoProb$classProbs <- FALSE
set.seed(1410)
rfFit <- train(CARAVAN ~ ., data = trainingInd,
method = "rf",
trControl = ctrl,
ntree = 1500,
tuneLength = 5,
metric = "ROC")
rfFit
evalResults$RF <- predict(rfFit, evaluationInd, type = "prob")[,1]
testResults$RF <- predict(rfFit, testingInd, type = "prob")[,1]
rfROC <- roc(evalResults$CARAVAN, evalResults$RF,
levels = rev(levels(evalResults$CARAVAN)))
rfROC
rfEvalCM <- confusionMatrix(predict(rfFit, evaluationInd), evalResults$CARAVAN)
rfEvalCM
set.seed(1410)
lrFit <- train(CARAVAN ~ .,
data = trainingInd[, noNZVSet],
method = "glm",
trControl = ctrl,
metric = "ROC")
lrFit
evalResults$LogReg <- predict(lrFit, evaluationInd[, noNZVSet], type = "prob")[,1]
testResults$LogReg <- predict(lrFit, testingInd[, noNZVSet], type = "prob")[,1]
lrROC <- roc(evalResults$CARAVAN, evalResults$LogReg,
levels = rev(levels(evalResults$CARAVAN)))
lrROC
lrEvalCM <- confusionMatrix(predict(lrFit, evaluationInd), evalResults$CARAVAN)
lrEvalCM
set.seed(1401)
fdaFit <- train(CARAVAN ~ ., data = training,
method = "fda",
tuneGrid = data.frame(degree = 1, nprune = 1:25),
metric = "ROC",
trControl = ctrl)
fdaFit
evalResults$FDA <- predict(fdaFit, evaluation[, predictors], type = "prob")[,1]
testResults$FDA <- predict(fdaFit, testing[, predictors], type = "prob")[,1]
fdaROC <- roc(evalResults$CARAVAN, evalResults$FDA,
levels = rev(levels(evalResults$CARAVAN)))
fdaROC
fdaEvalCM <- confusionMatrix(predict(fdaFit, evaluation[, predictors]), evalResults$CARAVAN)
fdaEvalCM
labs <- c(RF = "Random Forest", LogReg = "Logistic Regression",
FDA = "FDA (MARS)")
lift1 <- lift(CARAVAN ~ RF + LogReg + FDA, data = evalResults,
labels = labs)
plotTheme <- caretTheme()
plot(fdaROC, type = "S", col = plotTheme$superpose.line$col[3], legacy.axes = TRUE)
plot(rfROC, type = "S", col = plotTheme$superpose.line$col[1], add = TRUE, legacy.axes = TRUE)
plot(lrROC, type = "S", col = plotTheme$superpose.line$col[2], add = TRUE, legacy.axes = TRUE)
legend(.7, .25,
c("Random Forest", "Logistic Regression", "FDA (MARS)"),
cex = .85,
col = plotTheme$superpose.line$col[1:3],
lwd = rep(2, 3),
lty = rep(1, 3))
xyplot(lift1,
ylab = "%Events Found",
xlab = "%Customers Evaluated",
lwd = 2,
type = "l")
################################################################################
### Section 16.4 Alternate Cutoffs
rfThresh <- coords(rfROC, x = "best", ret="threshold",
best.method="closest.topleft")
rfThreshY <- coords(rfROC, x = "best", ret="threshold",
best.method="youden")
cutText <- ifelse(rfThresh == rfThreshY,
"is the same as",
"is similar to")
evalResults$rfAlt <- factor(ifelse(evalResults$RF > rfThresh,
"insurance", "noinsurance"),
levels = levels(evalResults$CARAVAN))
testResults$rfAlt <- factor(ifelse(testResults$RF > rfThresh,
"insurance", "noinsurance"),
levels = levels(testResults$CARAVAN))
rfAltEvalCM <- confusionMatrix(evalResults$rfAlt, evalResults$CARAVAN)
rfAltEvalCM
rfAltTestCM <- confusionMatrix(testResults$rfAlt, testResults$CARAVAN)
rfAltTestCM
rfTestCM <- confusionMatrix(predict(rfFit, testingInd), testResults$CARAVAN)
plot(rfROC, print.thres = c(.5, .3, .10, rfThresh), type = "S",
print.thres.pattern = "%.3f (Spec = %.2f, Sens = %.2f)",
print.thres.cex = .8, legacy.axes = TRUE)
################################################################################
### Section 16.5 Adjusting Prior Probabilities
priors <- table(ticdata$CARAVAN)/nrow(ticdata)*100
fdaPriors <- fdaFit
fdaPriors$finalModel$prior <- c(insurance = .6, noinsurance = .4)
fdaPriorPred <- predict(fdaPriors, evaluation[,predictors])
evalResults$FDAprior <- predict(fdaPriors, evaluation[,predictors], type = "prob")[,1]
testResults$FDAprior <- predict(fdaPriors, testing[,predictors], type = "prob")[,1]
fdaPriorCM <- confusionMatrix(fdaPriorPred, evaluation$CARAVAN)
fdaPriorCM
fdaPriorROC <- roc(testResults$CARAVAN, testResults$FDAprior,
levels = rev(levels(testResults$CARAVAN)))
fdaPriorROC
################################################################################
### Section 16.7 Sampling Methods
set.seed(1237)
downSampled <- downSample(trainingInd[, -ncol(trainingInd)], training$CARAVAN)
set.seed(1237)
upSampled <- upSample(trainingInd[, -ncol(trainingInd)], training$CARAVAN)
library(DMwR)
set.seed(1237)
smoted <- SMOTE(CARAVAN ~ ., data = trainingInd)
set.seed(1410)
rfDown <- train(Class ~ ., data = downSampled,
"rf",
trControl = ctrl,
ntree = 1500,
tuneLength = 5,
metric = "ROC")
rfDown
evalResults$RFdown <- predict(rfDown, evaluationInd, type = "prob")[,1]
testResults$RFdown <- predict(rfDown, testingInd, type = "prob")[,1]
rfDownROC <- roc(evalResults$CARAVAN, evalResults$RFdown,
levels = rev(levels(evalResults$CARAVAN)))
rfDownROC
set.seed(1401)
rfDownInt <- train(CARAVAN ~ ., data = trainingInd,
"rf",
ntree = 1500,
tuneLength = 5,
strata = training$CARAVAN,
sampsize = rep(sum(training$CARAVAN == "insurance"), 2),
metric = "ROC",
trControl = ctrl)
rfDownInt
evalResults$RFdownInt <- predict(rfDownInt, evaluationInd, type = "prob")[,1]
testResults$RFdownInt <- predict(rfDownInt, testingInd, type = "prob")[,1]
rfDownIntRoc <- roc(evalResults$CARAVAN,
evalResults$RFdownInt,
levels = rev(levels(training$CARAVAN)))
rfDownIntRoc
set.seed(1410)
rfUp <- train(Class ~ ., data = upSampled,
"rf",
trControl = ctrl,
ntree = 1500,
tuneLength = 5,
metric = "ROC")
rfUp
evalResults$RFup <- predict(rfUp, evaluationInd, type = "prob")[,1]
testResults$RFup <- predict(rfUp, testingInd, type = "prob")[,1]
rfUpROC <- roc(evalResults$CARAVAN, evalResults$RFup,
levels = rev(levels(evalResults$CARAVAN)))
rfUpROC
set.seed(1410)
rfSmote <- train(CARAVAN ~ ., data = smoted,
"rf",
trControl = ctrl,
ntree = 1500,
tuneLength = 5,
metric = "ROC")
rfSmote
evalResults$RFsmote <- predict(rfSmote, evaluationInd, type = "prob")[,1]
testResults$RFsmote <- predict(rfSmote, testingInd, type = "prob")[,1]
rfSmoteROC <- roc(evalResults$CARAVAN, evalResults$RFsmote,
levels = rev(levels(evalResults$CARAVAN)))
rfSmoteROC
rfSmoteCM <- confusionMatrix(predict(rfSmote, evaluationInd), evalResults$CARAVAN)
rfSmoteCM
samplingSummary <- function(x, evl, tst)
{
lvl <- rev(levels(tst$CARAVAN))
evlROC <- roc(evl$CARAVAN,
predict(x, evl, type = "prob")[,1],
levels = lvl)
rocs <- c(auc(evlROC),
auc(roc(tst$CARAVAN,
predict(x, tst, type = "prob")[,1],
levels = lvl)))
cut <- coords(evlROC, x = "best", ret="threshold",
best.method="closest.topleft")
bestVals <- coords(evlROC, cut, ret=c("sensitivity", "specificity"))
out <- c(rocs, bestVals*100)
names(out) <- c("evROC", "tsROC", "tsSens", "tsSpec")
out
}
rfResults <- rbind(samplingSummary(rfFit, evaluationInd, testingInd),
samplingSummary(rfDown, evaluationInd, testingInd),
samplingSummary(rfDownInt, evaluationInd, testingInd),
samplingSummary(rfUp, evaluationInd, testingInd),
samplingSummary(rfSmote, evaluationInd, testingInd))
rownames(rfResults) <- c("Original", "Down--Sampling", "Down--Sampling (Internal)",
"Up--Sampling", "SMOTE")
rfResults
rocCols <- c("black", rgb(1, 0, 0, .5), rgb(0, 0, 1, .5))
plot(roc(testResults$CARAVAN, testResults$RF, levels = rev(levels(testResults$CARAVAN))),
type = "S", col = rocCols[1], legacy.axes = TRUE)
plot(roc(testResults$CARAVAN, testResults$RFdownInt, levels = rev(levels(testResults$CARAVAN))),
type = "S", col = rocCols[2],add = TRUE, legacy.axes = TRUE)
plot(roc(testResults$CARAVAN, testResults$RFsmote, levels = rev(levels(testResults$CARAVAN))),
type = "S", col = rocCols[3], add = TRUE, legacy.axes = TRUE)
legend(.6, .4,
c("Normal", "Down-Sampling (Internal)", "SMOTE"),
lty = rep(1, 3),
lwd = rep(2, 3),
cex = .8,
col = rocCols)
xyplot(lift(CARAVAN ~ RF + RFdownInt + RFsmote,
data = testResults),
type = "l",
ylab = "%Events Found",
xlab = "%Customers Evaluated")
################################################################################
### Section 16.8 Cost–Sensitive Training
library(kernlab)
set.seed(1157)
sigma <- sigest(CARAVAN ~ ., data = trainingInd[, noNZVSet], frac = .75)
names(sigma) <- NULL
svmGrid1 <- data.frame(sigma = sigma[2],
C = 2^c(2:10))
set.seed(1401)
svmFit <- train(CARAVAN ~ .,
data = trainingInd[, noNZVSet],
method = "svmRadial",
tuneGrid = svmGrid1,
preProc = c("center", "scale"),
metric = "Kappa",
trControl = ctrl)
svmFit
evalResults$SVM <- predict(svmFit, evaluationInd[, noNZVSet], type = "prob")[,1]
testResults$SVM <- predict(svmFit, testingInd[, noNZVSet], type = "prob")[,1]
svmROC <- roc(evalResults$CARAVAN, evalResults$SVM,
levels = rev(levels(evalResults$CARAVAN)))
svmROC
svmTestROC <- roc(testResults$CARAVAN, testResults$SVM,
levels = rev(levels(testResults$CARAVAN)))
svmTestROC
confusionMatrix(predict(svmFit, evaluationInd[, noNZVSet]), evalResults$CARAVAN)
confusionMatrix(predict(svmFit, testingInd[, noNZVSet]), testingInd$CARAVAN)
set.seed(1401)
svmWtFit <- train(CARAVAN ~ .,
data = trainingInd[, noNZVSet],
method = "svmRadial",
tuneGrid = svmGrid1,
preProc = c("center", "scale"),
metric = "Kappa",
class.weights = c(insurance = 18, noinsurance = 1),
trControl = ctrlNoProb)
svmWtFit
svmWtEvalCM <- confusionMatrix(predict(svmWtFit, evaluationInd[, noNZVSet]), evalResults$CARAVAN)
svmWtEvalCM
svmWtTestCM <- confusionMatrix(predict(svmWtFit, testingInd[, noNZVSet]), testingInd$CARAVAN)
svmWtTestCM
initialRpart <- rpart(CARAVAN ~ ., data = training,
control = rpart.control(cp = 0.0001))
rpartGrid <- data.frame(cp = initialRpart$cptable[, "CP"])
cmat <- list(loss = matrix(c(0, 1, 20, 0), ncol = 2))
set.seed(1401)
cartWMod <- train(x = training[,predictors],
y = training$CARAVAN,
method = "rpart",
trControl = ctrlNoProb,
tuneGrid = rpartGrid,
metric = "Kappa",
parms = cmat)
cartWMod
library(C50)
c5Grid <- expand.grid(model = c("tree", "rules"),
trials = c(1, (1:10)*10),
winnow = FALSE)
finalCost <- matrix(c(0, 20, 1, 0), ncol = 2)
rownames(finalCost) <- colnames(finalCost) <- levels(training$CARAVAN)
set.seed(1401)
C5CostFit <- train(training[, predictors],
training$CARAVAN,
method = "C5.0",
metric = "Kappa",
tuneGrid = c5Grid,
cost = finalCost,
control = C5.0Control(earlyStopping = FALSE),
trControl = ctrlNoProb)
C5CostCM <- confusionMatrix(predict(C5CostFit, testing), testing$CARAVAN)
C5CostCM
################################################################################
### Session Information
sessionInfo()
q("no")
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/inst/chapters/16_Class_Imbalance.R |
################################################################################
### R code from Applied Predictive Modeling (2013) by Kuhn and Johnson.
### Copyright 2013 Kuhn and Johnson
### Web Page: http://www.appliedpredictivemodeling.com
### Contact: Max Kuhn ([email protected])
###
### Chapter 17: Case Study: Job Scheduling
###
### Required packages: AppliedPredictiveModeling, C50, caret, doMC (optional),
### earth, Hmisc, ipred, tabplot, kernlab, lattice, MASS,
### mda, nnet, pls, randomForest, rpart, sparseLDA,
###
### Data used: The HPC job scheduling data in the AppliedPredictiveModeling
### package.
###
### Notes:
### 1) This code is provided without warranty.
###
### 2) This code should help the user reproduce the results in the
### text. There will be differences between this code and what is is
### the computing section. For example, the computing sections show
### how the source functions work (e.g. randomForest() or plsr()),
### which were not directly used when creating the book. Also, there may be
### syntax differences that occur over time as packages evolve. These files
### will reflect those changes.
###
### 3) In some cases, the calculations in the book were run in
### parallel. The sub-processes may reset the random number seed.
### Your results may slightly vary.
###
################################################################################
library(AppliedPredictiveModeling)
data(schedulingData)
### Make a vector of predictor names
predictors <- names(schedulingData)[!(names(schedulingData) %in% c("Class"))]
### A few summaries and plots of the data
library(Hmisc)
describe(schedulingData)
library(tabplot)
tableplot(schedulingData[, c( "Class", predictors)])
mosaicplot(table(schedulingData$Protocol,
schedulingData$Class),
main = "")
library(lattice)
xyplot(Compounds ~ InputFields|Protocol,
data = schedulingData,
scales = list(x = list(log = 10), y = list(log = 10)),
groups = Class,
xlab = "Input Fields",
auto.key = list(columns = 4),
aspect = 1,
as.table = TRUE)
################################################################################
### Section 17.1 Data Splitting and Model Strategy
## Split the data
library(caret)
set.seed(1104)
inTrain <- createDataPartition(schedulingData$Class, p = .8, list = FALSE)
### There are a lot of zeros and the distribution is skewed. We add
### one so that we can log transform the data
schedulingData$NumPending <- schedulingData$NumPending + 1
trainData <- schedulingData[ inTrain,]
testData <- schedulingData[-inTrain,]
### Create a main effects only model formula to use
### repeatedly. Another formula with nonlinear effects is created
### below.
modForm <- as.formula(Class ~ Protocol + log10(Compounds) +
log10(InputFields)+ log10(Iterations) +
log10(NumPending) + Hour + Day)
### Create an expanded set of predictors with interactions.
modForm2 <- as.formula(Class ~ (Protocol + log10(Compounds) +
log10(InputFields)+ log10(Iterations) +
log10(NumPending) + Hour + Day)^2)
### Some of these terms will not be estimable. For example, if there
### are no data points were a particular protocol was run on a
### particular day, the full interaction cannot be computed. We use
### model.matrix() to create the whole set of predictor columns, then
### remove those that are zero variance
expandedTrain <- model.matrix(modForm2, data = trainData)
expandedTest <- model.matrix(modForm2, data = testData)
expandedTrain <- as.data.frame(expandedTrain)
expandedTest <- as.data.frame(expandedTest)
### Some models have issues when there is a zero variance predictor
### within the data of a particular class, so we used caret's
### checkConditionalX() function to find the offending columns and
### remove them
zv <- checkConditionalX(expandedTrain, trainData$Class)
### Keep the expanded set to use for models where we must manually add
### more complex terms (such as logistic regression)
expandedTrain <- expandedTrain[,-zv]
expandedTest <- expandedTest[, -zv]
### Create the cost matrix
costMatrix <- ifelse(diag(4) == 1, 0, 1)
costMatrix[4, 1] <- 10
costMatrix[3, 1] <- 5
costMatrix[4, 2] <- 5
costMatrix[3, 2] <- 5
rownames(costMatrix) <- colnames(costMatrix) <- levels(trainData$Class)
### Create a cost function
cost <- function(pred, obs)
{
isNA <- is.na(pred)
if(!all(isNA))
{
pred <- pred[!isNA]
obs <- obs[!isNA]
cost <- ifelse(pred == obs, 0, 1)
if(any(pred == "VF" & obs == "L")) cost[pred == "L" & obs == "VF"] <- 10
if(any(pred == "F" & obs == "L")) cost[pred == "F" & obs == "L"] <- 5
if(any(pred == "F" & obs == "M")) cost[pred == "F" & obs == "M"] <- 5
if(any(pred == "VF" & obs == "M")) cost[pred == "VF" & obs == "M"] <- 5
out <- mean(cost)
} else out <- NA
out
}
### Make a summary function that can be used with caret's train() function
costSummary <- function (data, lev = NULL, model = NULL)
{
if (is.character(data$obs)) data$obs <- factor(data$obs, levels = lev)
c(postResample(data[, "pred"], data[, "obs"]),
Cost = cost(data[, "pred"], data[, "obs"]))
}
### Create a control object for the models
ctrl <- trainControl(method = "repeatedcv",
repeats = 5,
summaryFunction = costSummary)
### Optional: parallel processing can be used via the 'do' packages,
### such as doMC, doMPI etc. We used doMC (not on Windows) to speed
### up the computations.
### WARNING: Be aware of how much memory is needed to parallel
### process. It can very quickly overwhelm the available hardware. The
### estimate of the median memory usage (VSIZE = total memory size)
### was 3300-4100M per core although the some calculations require as
### much as 3400M without parallel processing.
library(doMC)
registerDoMC(14)
### Fit the CART model with and without costs
set.seed(857)
rpFit <- train(x = trainData[, predictors],
y = trainData$Class,
method = "rpart",
metric = "Cost",
maximize = FALSE,
tuneLength = 20,
trControl = ctrl)
rpFit
set.seed(857)
rpFitCost <- train(x = trainData[, predictors],
y = trainData$Class,
method = "rpart",
metric = "Cost",
maximize = FALSE,
tuneLength = 20,
parms =list(loss = costMatrix),
trControl = ctrl)
rpFitCost
set.seed(857)
ldaFit <- train(x = expandedTrain,
y = trainData$Class,
method = "lda",
metric = "Cost",
maximize = FALSE,
trControl = ctrl)
ldaFit
sldaGrid <- expand.grid(NumVars = seq(2, 112, by = 5),
lambda = c(0, 0.01, .1, 1, 10))
set.seed(857)
sldaFit <- train(x = expandedTrain,
y = trainData$Class,
method = "sparseLDA",
tuneGrid = sldaGrid,
preProc = c("center", "scale"),
metric = "Cost",
maximize = FALSE,
trControl = ctrl)
sldaFit
set.seed(857)
nnetGrid <- expand.grid(decay = c(0, 0.001, 0.01, .1, .5),
size = (1:10)*2 - 1)
nnetFit <- train(modForm,
data = trainData,
method = "nnet",
metric = "Cost",
maximize = FALSE,
tuneGrid = nnetGrid,
trace = FALSE,
MaxNWts = 2000,
maxit = 1000,
preProc = c("center", "scale"),
trControl = ctrl)
nnetFit
set.seed(857)
plsFit <- train(x = expandedTrain,
y = trainData$Class,
method = "pls",
metric = "Cost",
maximize = FALSE,
tuneLength = 100,
preProc = c("center", "scale"),
trControl = ctrl)
plsFit
set.seed(857)
fdaFit <- train(modForm, data = trainData,
method = "fda",
metric = "Cost",
maximize = FALSE,
tuneLength = 25,
trControl = ctrl)
fdaFit
set.seed(857)
rfFit <- train(x = trainData[, predictors],
y = trainData$Class,
method = "rf",
metric = "Cost",
maximize = FALSE,
tuneLength = 10,
ntree = 2000,
importance = TRUE,
trControl = ctrl)
rfFit
set.seed(857)
rfFitCost <- train(x = trainData[, predictors],
y = trainData$Class,
method = "rf",
metric = "Cost",
maximize = FALSE,
tuneLength = 10,
ntree = 2000,
classwt = c(VF = 1, F = 1, M = 5, L = 10),
importance = TRUE,
trControl = ctrl)
rfFitCost
c5Grid <- expand.grid(trials = c(1, (1:10)*10),
model = "tree",
winnow = c(TRUE, FALSE))
set.seed(857)
c50Fit <- train(x = trainData[, predictors],
y = trainData$Class,
method = "C5.0",
metric = "Cost",
maximize = FALSE,
tuneGrid = c5Grid,
trControl = ctrl)
c50Fit
set.seed(857)
c50Cost <- train(x = trainData[, predictors],
y = trainData$Class,
method = "C5.0",
metric = "Cost",
maximize = FALSE,
costs = costMatrix,
tuneGrid = c5Grid,
trControl = ctrl)
c50Cost
set.seed(857)
bagFit <- train(x = trainData[, predictors],
y = trainData$Class,
method = "treebag",
metric = "Cost",
maximize = FALSE,
nbagg = 50,
trControl = ctrl)
bagFit
### Use the caret bag() function to bag the cost-sensitive CART model
rpCost <- function(x, y)
{
costMatrix <- ifelse(diag(4) == 1, 0, 1)
costMatrix[4, 1] <- 10
costMatrix[3, 1] <- 5
costMatrix[4, 2] <- 5
costMatrix[3, 2] <- 5
library(rpart)
tmp <- x
tmp$y <- y
rpart(y~., data = tmp, control = rpart.control(cp = 0),
parms =list(loss = costMatrix))
}
rpPredict <- function(object, x) predict(object, x)
rpAgg <- function (x, type = "class")
{
pooled <- x[[1]] * NA
n <- nrow(pooled)
classes <- colnames(pooled)
for (i in 1:ncol(pooled))
{
tmp <- lapply(x, function(y, col) y[, col], col = i)
tmp <- do.call("rbind", tmp)
pooled[, i] <- apply(tmp, 2, median)
}
pooled <- apply(pooled, 1, function(x) x/sum(x))
if (n != nrow(pooled)) pooled <- t(pooled)
out <- factor(classes[apply(pooled, 1, which.max)], levels = classes)
out
}
set.seed(857)
rpCostBag <- train(trainData[, predictors],
trainData$Class,
"bag",
B = 50,
bagControl = bagControl(fit = rpCost,
predict = rpPredict,
aggregate = rpAgg,
downSample = FALSE,
allowParallel = FALSE),
trControl = ctrl)
rpCostBag
set.seed(857)
svmRFit <- train(modForm ,
data = trainData,
method = "svmRadial",
metric = "Cost",
maximize = FALSE,
preProc = c("center", "scale"),
tuneLength = 15,
trControl = ctrl)
svmRFit
set.seed(857)
svmRFitCost <- train(modForm, data = trainData,
method = "svmRadial",
metric = "Cost",
maximize = FALSE,
preProc = c("center", "scale"),
class.weights = c(VF = 1, F = 1, M = 5, L = 10),
tuneLength = 15,
trControl = ctrl)
svmRFitCost
modelList <- list(C5.0 = c50Fit,
"C5.0 (Costs)" = c50Cost,
CART =rpFit,
"CART (Costs)" = rpFitCost,
"Bagging (Costs)" = rpCostBag,
FDA = fdaFit,
SVM = svmRFit,
"SVM (Weights)" = svmRFitCost,
PLS = plsFit,
"Random Forests" = rfFit,
LDA = ldaFit,
"LDA (Sparse)" = sldaFit,
"Neural Networks" = nnetFit,
Bagging = bagFit)
################################################################################
### Section 17.2 Results
rs <- resamples(modelList)
summary(rs)
confusionMatrix(rpFitCost, "none")
confusionMatrix(rfFit, "none")
plot(bwplot(rs, metric = "Cost"))
rfPred <- predict(rfFit, testData)
rpPred <- predict(rpFitCost, testData)
confusionMatrix(rfPred, testData$Class)
confusionMatrix(rpPred, testData$Class)
################################################################################
### Session Information
sessionInfo()
q("no")
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/inst/chapters/17_Job_Scheduling.R |
################################################################################
### R code from Applied Predictive Modeling (2013) by Kuhn and Johnson.
### Copyright 2013 Kuhn and Johnson
### Web Page: http://www.appliedpredictivemodeling.com
### Contact: Max Kuhn ([email protected])
###
### Chapter 18: Measuring Predictor Importance
###
### Required packages: AppliedPredictiveModeling, caret, CORElearn, corrplot,
### pROC, minerva
###
###
### Data used: The solubility data from the AppliedPredictiveModeling
### package, the segmentation data in the caret package and the
### grant data (created using "CreateGrantData.R" in the same
### directory as this file).
###
### Notes:
### 1) This code is provided without warranty.
###
### 2) This code should help the user reproduce the results in the
### text. There will be differences between this code and what is is
### the computing section. For example, the computing sections show
### how the source functions work (e.g. randomForest() or plsr()),
### which were not directly used when creating the book. Also, there may be
### syntax differences that occur over time as packages evolve. These files
### will reflect those changes.
###
### 3) In some cases, the calculations in the book were run in
### parallel. The sub-processes may reset the random number seed.
### Your results may slightly vary.
###
################################################################################
################################################################################
### Section 18.1 Numeric Outcomes
## Load the solubility data
library(AppliedPredictiveModeling)
data(solubility)
trainData <- solTrainXtrans
trainData$y <- solTrainY
## keep the continuous predictors and append the outcome to the data frame
SolContPred <- solTrainXtrans[, !grepl("FP", names(solTrainXtrans))]
numSolPred <- ncol(SolContPred)
SolContPred$Sol <- solTrainY
## Get the LOESS smoother and the summary measure
library(caret)
smoother <- filterVarImp(x = SolContPred[, -ncol(SolContPred)],
y = solTrainY,
nonpara = TRUE)
smoother$Predictor <- rownames(smoother)
names(smoother)[1] <- "Smoother"
## Calculate the correlation matrices and keep the columns with the correlations
## between the predictors and the outcome
correlations <- cor(SolContPred)[-(numSolPred+1),(numSolPred+1)]
rankCorrelations <- cor(SolContPred, method = "spearman")[-(numSolPred+1),(numSolPred+1)]
corrs <- data.frame(Predictor = names(SolContPred)[1:numSolPred],
Correlation = correlations,
RankCorrelation = rankCorrelations)
## The maximal information coefficient (MIC) values can be obtained from the
### minerva package:
library(minerva)
MIC <- mine(x = SolContPred[, 1:numSolPred], y = solTrainY)$MIC
MIC <- data.frame(Predictor = rownames(MIC),
MIC = MIC[,1])
## The Relief values for regression can be computed using the CORElearn
## package:
library(CORElearn)
ReliefF <- attrEval(Sol ~ ., data = SolContPred,
estimator = "RReliefFequalK")
ReliefF <- data.frame(Predictor = names(ReliefF),
Relief = ReliefF)
## Combine them all together for a plot
contDescrScores <- merge(smoother, corrs)
contDescrScores <- merge(contDescrScores, MIC)
contDescrScores <- merge(contDescrScores, ReliefF)
rownames(contDescrScores) <- contDescrScores$Predictor
contDescrScores
contDescrSplomData <- contDescrScores
contDescrSplomData$Correlation <- abs(contDescrSplomData$Correlation)
contDescrSplomData$RankCorrelation <- abs(contDescrSplomData$RankCorrelation)
contDescrSplomData$Group <- "Other"
contDescrSplomData$Group[grepl("Surface", contDescrSplomData$Predictor)] <- "SA"
featurePlot(solTrainXtrans[, c("NumCarbon", "SurfaceArea2")],
solTrainY,
between = list(x = 1),
type = c("g", "p", "smooth"),
df = 3,
aspect = 1,
labels = c("", "Solubility"))
splom(~contDescrSplomData[,c(3, 4, 2, 5)],
groups = contDescrSplomData$Group,
varnames = c("Correlation", "Rank\nCorrelation", "LOESS", "MIC"))
## Now look at the categorical (i.e. binary) predictors
SolCatPred <- solTrainXtrans[, grepl("FP", names(solTrainXtrans))]
SolCatPred$Sol <- solTrainY
numSolCatPred <- ncol(SolCatPred) - 1
tests <- apply(SolCatPred[, 1:numSolCatPred], 2,
function(x, y)
{
tStats <- t.test(y ~ x)[c("statistic", "p.value", "estimate")]
unlist(tStats)
},
y = solTrainY)
## The results are a matrix with predictors in columns. We reverse this
tests <- as.data.frame(t(tests))
names(tests) <- c("t.Statistic", "t.test_p.value", "mean0", "mean1")
tests$difference <- tests$mean1 - tests$mean0
tests
## Create a volcano plot
xyplot(-log10(t.test_p.value) ~ difference,
data = tests,
xlab = "Mean With Structure - Mean Without Structure",
ylab = "-log(p-Value)",
type = "p")
################################################################################
### Section 18.2 Categorical Outcomes
## Load the segmentation data
data(segmentationData)
segTrain <- subset(segmentationData, Case == "Train")
segTrain$Case <- segTrain$Cell <- NULL
segTest <- subset(segmentationData, Case != "Train")
segTest$Case <- segTest$Cell <- NULL
## Compute the areas under the ROC curve
aucVals <- filterVarImp(x = segTrain[, -1], y = segTrain$Class)
aucVals$Predictor <- rownames(aucVals)
## Cacluate the t-tests as before but with x and y switched
segTests <- apply(segTrain[, -1], 2,
function(x, y)
{
tStats <- t.test(x ~ y)[c("statistic", "p.value", "estimate")]
unlist(tStats)
},
y = segTrain$Class)
segTests <- as.data.frame(t(segTests))
names(segTests) <- c("t.Statistic", "t.test_p.value", "mean0", "mean1")
segTests$Predictor <- rownames(segTests)
## Fit a random forest model and get the importance scores
library(randomForest)
set.seed(791)
rfImp <- randomForest(Class ~ ., data = segTrain,
ntree = 2000,
importance = TRUE)
rfValues <- data.frame(RF = importance(rfImp)[, "MeanDecreaseGini"],
Predictor = rownames(importance(rfImp)))
## Now compute the Relief scores
set.seed(791)
ReliefValues <- attrEval(Class ~ ., data = segTrain,
estimator="ReliefFequalK", ReliefIterations = 50)
ReliefValues <- data.frame(Relief = ReliefValues,
Predictor = names(ReliefValues))
## and the MIC statistics
set.seed(791)
segMIC <- mine(x = segTrain[, -1],
## Pass the outcome as 0/1
y = ifelse(segTrain$Class == "PS", 1, 0))$MIC
segMIC <- data.frame(Predictor = rownames(segMIC),
MIC = segMIC[,1])
rankings <- merge(segMIC, ReliefValues)
rankings <- merge(rankings, rfValues)
rankings <- merge(rankings, segTests)
rankings <- merge(rankings, aucVals)
rankings
rankings$channel <- "Channel 1"
rankings$channel[grepl("Ch2$", rankings$Predictor)] <- "Channel 2"
rankings$channel[grepl("Ch3$", rankings$Predictor)] <- "Channel 3"
rankings$channel[grepl("Ch4$", rankings$Predictor)] <- "Channel 4"
rankings$t.Statistic <- abs(rankings$t.Statistic)
splom(~rankings[, c("PS", "t.Statistic", "RF", "Relief", "MIC")],
groups = rankings$channel,
varnames = c("ROC\nAUC", "Abs\nt-Stat", "Random\nForest", "Relief", "MIC"),
auto.key = list(columns = 2))
## Load the grant data. A script to create and save these data is contained
## in the same directory as this file.
load("grantData.RData")
dataSubset <- training[pre2008, c("Sponsor62B", "ContractValueBandUnk", "RFCD240302")]
## This is a simple function to compute several statistics for binary predictors
tableCalcs <- function(x, y)
{
tab <- table(x, y)
fet <- fisher.test(tab)
out <- c(OR = fet$estimate,
P = fet$p.value,
Gain = attrEval(y ~ x, estimator = "GainRatio"))
}
## lapply() is used to execute the function on each column
tableResults <- lapply(dataSubset, tableCalcs, y = training[pre2008, "Class"])
## The results come back as a list of vectors, and "rbind" is used to join
## then together as rows of a table
tableResults <- do.call("rbind", tableResults)
tableResults
## The permuted Relief scores can be computed using a function from the
## AppliedPredictiveModeling package.
permuted <- permuteRelief(x = training[pre2008, c("Sponsor62B", "Day", "NumCI")],
y = training[pre2008, "Class"],
nperm = 500,
### the remaining options are passed to attrEval()
estimator="ReliefFequalK",
ReliefIterations= 50)
## The original Relief scores:
permuted$observed
## The number of standard deviations away from the permuted mean:
permuted$standardized
## The distributions of the scores if there were no relationship between the
## predictors and outcomes
histogram(~value|Predictor,
data = permuted$permutations,
xlim = extendrange(permuted$permutations$value),
xlab = "Relief Score")
################################################################################
### Session Information
sessionInfo()
q("no")
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/inst/chapters/18_Importance.R |
################################################################################
### R code from Applied Predictive Modeling (2013) by Kuhn and Johnson.
### Copyright 2013 Kuhn and Johnson
### Web Page: http://www.appliedpredictivemodeling.com
### Contact: Max Kuhn ([email protected])
###
### Chapter 19: An Introduction to Feature Selection
###
### Required packages: AppliedPredictiveModeling, caret, MASS, corrplot,
### RColorBrewer, randomForest, kernlab, klaR,
###
###
### Data used: The Alzheimer disease data from the AppliedPredictiveModeling
### package
###
### Notes:
### 1) This code is provided without warranty.
###
### 2) This code should help the user reproduce the results in the
### text. There will be differences between this code and what is is
### the computing section. For example, the computing sections show
### how the source functions work (e.g. randomForest() or plsr()),
### which were not directly used when creating the book. Also, there may be
### syntax differences that occur over time as packages evolve. These files
### will reflect those changes.
###
### 3) In some cases, the calculations in the book were run in
### parallel. The sub-processes may reset the random number seed.
### Your results may slightly vary.
###
################################################################################
################################################################################
### Section 19.6 Case Study: Predicting Cognitive Impairment
library(AppliedPredictiveModeling)
data(AlzheimerDisease)
## The baseline set of predictors
bl <- c("Genotype", "age", "tau", "p_tau", "Ab_42", "male")
## The set of new assays
newAssays <- colnames(predictors)
newAssays <- newAssays[!(newAssays %in% c("Class", bl))]
## Decompose the genotype factor into binary dummy variables
predictors$E2 <- predictors$E3 <- predictors$E4 <- 0
predictors$E2[grepl("2", predictors$Genotype)] <- 1
predictors$E3[grepl("3", predictors$Genotype)] <- 1
predictors$E4[grepl("4", predictors$Genotype)] <- 1
genotype <- predictors$Genotype
## Partition the data
library(caret)
set.seed(730)
split <- createDataPartition(diagnosis, p = .8, list = FALSE)
adData <- predictors
adData$Class <- diagnosis
training <- adData[ split, ]
testing <- adData[-split, ]
predVars <- names(adData)[!(names(adData) %in% c("Class", "Genotype"))]
## This summary function is used to evaluate the models.
fiveStats <- function(...) c(twoClassSummary(...), defaultSummary(...))
## We create the cross-validation files as a list to use with different
## functions
set.seed(104)
index <- createMultiFolds(training$Class, times = 5)
## The candidate set of the number of predictors to evaluate
varSeq <- seq(1, length(predVars)-1, by = 2)
## We can also use parallel processing to run each resampled RFE
## iteration (or resampled model with train()) using different
## workers.
library(doMC)
registerDoMC(15)
## The rfe() function in the caret package is used for recursive feature
## elimiation. We setup control functions for this and train() that use
## the same cross-validation folds. The 'ctrl' object will be modifed several
## times as we try different models
ctrl <- rfeControl(method = "repeatedcv", repeats = 5,
saveDetails = TRUE,
index = index,
returnResamp = "final")
fullCtrl <- trainControl(method = "repeatedcv",
repeats = 5,
summaryFunction = fiveStats,
classProbs = TRUE,
index = index)
## The correlation matrix of the new data
predCor <- cor(training[, newAssays])
library(RColorBrewer)
cols <- c(rev(brewer.pal(7, "Blues")),
brewer.pal(7, "Reds"))
library(corrplot)
corrplot(predCor,
order = "hclust",
tl.pos = "n",addgrid.col = rgb(1,1,1,.01),
col = colorRampPalette(cols)(51))
## Fit a series of models with the full set of predictors
set.seed(721)
rfFull <- train(training[, predVars],
training$Class,
method = "rf",
metric = "ROC",
tuneGrid = data.frame(mtry = floor(sqrt(length(predVars)))),
ntree = 1000,
trControl = fullCtrl)
rfFull
set.seed(721)
ldaFull <- train(training[, predVars],
training$Class,
method = "lda",
metric = "ROC",
## The 'tol' argument helps lda() know when a matrix is
## singular. One of the predictors has values very close to
## zero, so we raise the vaue to be smaller than the default
## value of 1.0e-4.
tol = 1.0e-12,
trControl = fullCtrl)
ldaFull
set.seed(721)
svmFull <- train(training[, predVars],
training$Class,
method = "svmRadial",
metric = "ROC",
tuneLength = 12,
preProc = c("center", "scale"),
trControl = fullCtrl)
svmFull
set.seed(721)
nbFull <- train(training[, predVars],
training$Class,
method = "nb",
metric = "ROC",
trControl = fullCtrl)
nbFull
lrFull <- train(training[, predVars],
training$Class,
method = "glm",
metric = "ROC",
trControl = fullCtrl)
lrFull
set.seed(721)
knnFull <- train(training[, predVars],
training$Class,
method = "knn",
metric = "ROC",
tuneLength = 20,
preProc = c("center", "scale"),
trControl = fullCtrl)
knnFull
## Now fit the RFE versions. To do this, the 'functions' argument of the rfe()
## object is modified to the approproate functions. For model details about
## these functions and their arguments, see
##
## http://caret.r-forge.r-project.org/featureSelection.html
##
## for more information.
ctrl$functions <- rfFuncs
ctrl$functions$summary <- fiveStats
set.seed(721)
rfRFE <- rfe(training[, predVars],
training$Class,
sizes = varSeq,
metric = "ROC",
ntree = 1000,
rfeControl = ctrl)
rfRFE
ctrl$functions <- ldaFuncs
ctrl$functions$summary <- fiveStats
set.seed(721)
ldaRFE <- rfe(training[, predVars],
training$Class,
sizes = varSeq,
metric = "ROC",
tol = 1.0e-12,
rfeControl = ctrl)
ldaRFE
ctrl$functions <- nbFuncs
ctrl$functions$summary <- fiveStats
set.seed(721)
nbRFE <- rfe(training[, predVars],
training$Class,
sizes = varSeq,
metric = "ROC",
rfeControl = ctrl)
nbRFE
## Here, the caretFuncs list allows for a model to be tuned at each iteration
## of feature seleciton.
ctrl$functions <- caretFuncs
ctrl$functions$summary <- fiveStats
## This options tells train() to run it's model tuning
## sequentially. Otherwise, there would be parallel processing at two
## levels, which is possible but requires W^2 workers. On our machine,
## it was more efficient to only run the RFE process in parallel.
cvCtrl <- trainControl(method = "cv",
verboseIter = FALSE,
classProbs = TRUE,
allowParallel = FALSE)
set.seed(721)
svmRFE <- rfe(training[, predVars],
training$Class,
sizes = varSeq,
rfeControl = ctrl,
metric = "ROC",
## Now arguments to train() are used.
method = "svmRadial",
tuneLength = 12,
preProc = c("center", "scale"),
trControl = cvCtrl)
svmRFE
ctrl$functions <- lrFuncs
ctrl$functions$summary <- fiveStats
set.seed(721)
lrRFE <- rfe(training[, predVars],
training$Class,
sizes = varSeq,
metric = "ROC",
rfeControl = ctrl)
lrRFE
ctrl$functions <- caretFuncs
ctrl$functions$summary <- fiveStats
set.seed(721)
knnRFE <- rfe(training[, predVars],
training$Class,
sizes = varSeq,
metric = "ROC",
method = "knn",
tuneLength = 20,
preProc = c("center", "scale"),
trControl = cvCtrl,
rfeControl = ctrl)
knnRFE
## Each of these models can be evaluate using the plot() function to see
## the profile across subset sizes.
## Test set ROC results:
rfROCfull <- roc(testing$Class,
predict(rfFull, testing[,predVars], type = "prob")[,1])
rfROCfull
rfROCrfe <- roc(testing$Class,
predict(rfRFE, testing[,predVars])$Impaired)
rfROCrfe
ldaROCfull <- roc(testing$Class,
predict(ldaFull, testing[,predVars], type = "prob")[,1])
ldaROCfull
ldaROCrfe <- roc(testing$Class,
predict(ldaRFE, testing[,predVars])$Impaired)
ldaROCrfe
nbROCfull <- roc(testing$Class,
predict(nbFull, testing[,predVars], type = "prob")[,1])
nbROCfull
nbROCrfe <- roc(testing$Class,
predict(nbRFE, testing[,predVars])$Impaired)
nbROCrfe
svmROCfull <- roc(testing$Class,
predict(svmFull, testing[,predVars], type = "prob")[,1])
svmROCfull
svmROCrfe <- roc(testing$Class,
predict(svmRFE, testing[,predVars])$Impaired)
svmROCrfe
lrROCfull <- roc(testing$Class,
predict(lrFull, testing[,predVars], type = "prob")[,1])
lrROCfull
lrROCrfe <- roc(testing$Class,
predict(lrRFE, testing[,predVars])$Impaired)
lrROCrfe
knnROCfull <- roc(testing$Class,
predict(knnFull, testing[,predVars], type = "prob")[,1])
knnROCfull
knnROCrfe <- roc(testing$Class,
predict(knnRFE, testing[,predVars])$Impaired)
knnROCrfe
## For filter methods, the sbf() function (named for Selection By Filter) is
## used. It has similar arguments to rfe() to control the model fitting and
## filtering methods.
## P-values are created for filtering.
## A set of four LDA models are fit based on two factors: p-value adjustment
## using a Bonferroni adjustment and whether the predictors should be
## pre-screened for high correlations.
sbfResamp <- function(x, fun = mean)
{
x <- unlist(lapply(x$variables, length))
fun(x)
}
sbfROC <- function(mod) auc(roc(testing$Class, predict(mod, testing)$Impaired))
## This function calculates p-values using either a t-test (when the predictor
## has 2+ distinct values) or using Fisher's Exact Test otherwise.
pScore <- function(x, y)
{
numX <- length(unique(x))
if(numX > 2)
{
out <- t.test(x ~ y)$p.value
} else {
out <- fisher.test(factor(x), y)$p.value
}
out
}
ldaWithPvalues <- ldaSBF
ldaWithPvalues$score <- pScore
ldaWithPvalues$summary <- fiveStats
## Predictors are retained if their p-value is less than the completely
## subjective cut-off of 0.05.
ldaWithPvalues$filter <- function (score, x, y)
{
keepers <- score <= 0.05
keepers
}
sbfCtrl <- sbfControl(method = "repeatedcv",
repeats = 5,
verbose = TRUE,
functions = ldaWithPvalues,
index = index)
rawCorr <- sbf(training[, predVars],
training$Class,
tol = 1.0e-12,
sbfControl = sbfCtrl)
rawCorr
ldaWithPvalues$filter <- function (score, x, y)
{
score <- p.adjust(score, "bonferroni")
keepers <- score <= 0.05
keepers
}
sbfCtrl <- sbfControl(method = "repeatedcv",
repeats = 5,
verbose = TRUE,
functions = ldaWithPvalues,
index = index)
adjCorr <- sbf(training[, predVars],
training$Class,
tol = 1.0e-12,
sbfControl = sbfCtrl)
adjCorr
ldaWithPvalues$filter <- function (score, x, y)
{
keepers <- score <= 0.05
corrMat <- cor(x[,keepers])
tooHigh <- findCorrelation(corrMat, .75)
if(length(tooHigh) > 0) keepers[tooHigh] <- FALSE
keepers
}
sbfCtrl <- sbfControl(method = "repeatedcv",
repeats = 5,
verbose = TRUE,
functions = ldaWithPvalues,
index = index)
rawNoCorr <- sbf(training[, predVars],
training$Class,
tol = 1.0e-12,
sbfControl = sbfCtrl)
rawNoCorr
ldaWithPvalues$filter <- function (score, x, y)
{
score <- p.adjust(score, "bonferroni")
keepers <- score <= 0.05
corrMat <- cor(x[,keepers])
tooHigh <- findCorrelation(corrMat, .75)
if(length(tooHigh) > 0) keepers[tooHigh] <- FALSE
keepers
}
sbfCtrl <- sbfControl(method = "repeatedcv",
repeats = 5,
verbose = TRUE,
functions = ldaWithPvalues,
index = index)
adjNoCorr <- sbf(training[, predVars],
training$Class,
tol = 1.0e-12,
sbfControl = sbfCtrl)
adjNoCorr
## Filter methods test set ROC results:
sbfROC(rawCorr)
sbfROC(rawNoCorr)
sbfROC(adjCorr)
sbfROC(adjNoCorr)
## Get the resampling results for all the models
rfeResamples <- resamples(list(RF = rfRFE,
"Logistic Reg." = lrRFE,
"SVM" = svmRFE,
"$K$--NN" = knnRFE,
"N. Bayes" = nbRFE,
"LDA" = ldaRFE))
summary(rfeResamples)
fullResamples <- resamples(list(RF = rfFull,
"Logistic Reg." = lrFull,
"SVM" = svmFull,
"$K$--NN" = knnFull,
"N. Bayes" = nbFull,
"LDA" = ldaFull))
summary(fullResamples)
filteredResamples <- resamples(list("No Adjustment, Corr Vars" = rawCorr,
"No Adjustment, No Corr Vars" = rawNoCorr,
"Bonferroni, Corr Vars" = adjCorr,
"Bonferroni, No Corr Vars" = adjNoCorr))
summary(filteredResamples)
sessionInfo()
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/inst/chapters/19_Feature_Select.R |
################################################################################
### R code from Applied Predictive Modeling (2013) by Kuhn and Johnson.
### Copyright 2013 Kuhn and Johnson
### Web Page: http://www.appliedpredictivemodeling.com
### Contact: Max Kuhn ([email protected])
###
### R code to process the Kaggle grant application data.
###
### Required packages: plyr, caret, lubridate
###
###
### Data used: The file unimelb_training.csv
###
### Notes:
### 1) This code is provided without warranty.
###
### 2) This code should help the user reproduce the results in the
### text. There will be differences between this code and what is is
### the computing section. For example, the computing sections show
### how the source functions work (e.g. randomForest() or plsr()),
### which were not directly used when creating the book. Also, there may be
### syntax differences that occur over time as packages evolve. These files
### will reflect those changes.
###
### 3) In some cases, the calculations in the book were run in
### parallel. The sub-processes may reset the random number seed.
### Your results may slightly vary.
###
################################################################################
## The plyr, caret and libridate packages are used in this script. The
## code can also be run using multiple cores using the ddply()
## function. See ?ddply to get more information.
##
## The user will need the unimelb_training.csv file from the
## competition.
##
## These computations will take a fair amount of time and may consume
## a non-trivial amount of memory in the process.
##
## Load required libraries
library(plyr)
library(caret)
library(lubridate)
## How many cores on the machine should be used for the data
## processing. Making cores > 1 will speed things up (depending on your
## machine) but will consume more memory.
cores <- 3
if(cores > 1) {
library(doMC)
registerDoMC(cores)
}
## Read in the data in it's raw form. Some of the column headings do
## not convert to proper R variable names, so many will contain dots,
## such as "Dept.No" instead of "Dept No"
raw <- read.csv("unimelb_training.csv")
## In many cases, missing values in categorical data will be converted
## to a value of "Unk"
raw$Sponsor.Code <- as.character(raw$Sponsor.Code)
raw$Sponsor.Code[raw$Sponsor.Code == ""] <- "Unk"
raw$Sponsor.Code <- factor(paste("Sponsor", raw$Sponsor.Code, sep = ""))
raw$Grant.Category.Code <- as.character(raw$Grant.Category.Code)
raw$Grant.Category.Code[raw$Grant.Category.Code == ""] <- "Unk"
raw$Grant.Category.Code <- factor(paste("GrantCat", raw$Grant.Category.Code, sep = ""))
raw$Contract.Value.Band...see.note.A <- as.character(raw$Contract.Value.Band...see.note.A)
raw$Contract.Value.Band...see.note.A[raw$Contract.Value.Band...see.note.A == ""] <- "Unk"
raw$Contract.Value.Band...see.note.A <- factor(paste("ContractValueBand", raw$Contract.Value.Band...see.note.A, sep = ""))
## Change missing Role.1 information to Unk
raw$Role.1 <- as.character(raw$Role.1)
raw$Role.1[raw$Role.1 == ""] <- "Unk"
## Get the unique values of the birth years and department
## codes. These will be used later to make factor variables
bYears <- unique(do.call("c", raw[,grep("Year.of.Birth", names(raw), fixed = TRUE)]))
bYears <- bYears[!is.na(bYears)]
dpmt <- unique(do.call("c", raw[,grep("Dept.No", names(raw), fixed = TRUE)]))
dpmt <- sort(dpmt[!is.na(dpmt)])
## At this point, the data for investigators is in different
## columns. We'll take this "horizontal" format and convert it to a
## "vertical" format where the data are stacked. This will make some
## of the data processing easier.
## Split up the data by role number (1-15) and add any missing columns
## (roles 1-5 have more columns than the others)
tmp <- vector(mode = "list", length = 15)
for(i in 1:15) {
tmpData <- raw[, c("Grant.Application.ID", grep(paste("\\.", i, "$", sep = ""), names(raw), value = TRUE))]
names(tmpData) <- gsub(paste("\\.", i, "$", sep = ""), "", names(tmpData))
if(i == 1) nms <- names(tmpData)
if(all(names(tmpData) != "RFCD.Code")) tmpData$RFCD.Code <- NA
if(all(names(tmpData) != "RFCD.Percentage")) tmpData$RFCD.Percentage <- NA
if(all(names(tmpData) != "SEO.Code")) tmpData$SEO.Code <- NA
if(all(names(tmpData) != "SEO.Percentage")) tmpData$SEO.Percentage <- NA
tmp[[i]] <- tmpData[,nms]
rm(tmpData)
}
## Stack them up and remove any rows without role information
vertical <- do.call("rbind", tmp)
vertical <- subset(vertical, Role != "")
## Reformat some of the variables to make complete factors, correctly
## encode missing data or to make the factor levels more descriptive.
vertical$Role <- factor(as.character(vertical$Role))
vertical$Year.of.Birth <- factor(paste(vertical$Year.of.Birth), levels = paste(sort(bYears)))
vertical$Country.of.Birth <- gsub(" ", "", as.character(vertical$Country.of.Birth))
vertical$Country.of.Birth[vertical$Country.of.Birth == ""] <- NA
vertical$Country.of.Birth <- factor(vertical$Country.of.Birth)
vertical$Home.Language <- gsub("Other", "OtherLang", as.character(vertical$Home.Language))
vertical$Home.Language[vertical$Home.Language == ""] <- NA
vertical$Home.Language <- factor(vertical$Home.Language)
vertical$Dept.No. <- paste("Dept", vertical$Dept.No., sep = "")
vertical$Dept.No.[vertical$Dept.No. == "DeptNA"] <- NA
vertical$Dept.No. <- factor(vertical$Dept.No.)
vertical$Faculty.No. <- paste("Faculty", vertical$Faculty.No., sep = "")
vertical$Faculty.No.[vertical$Faculty.No. == "FacultyNA"] <- NA
vertical$Faculty.No. <- factor(vertical$Faculty.No.)
vertical$RFCD.Code <- paste("RFCD", vertical$RFCD.Code, sep = "")
vertical$RFCD.Percentage[vertical$RFCD.Code == "RFCDNA"] <- NA
vertical$RFCD.Code[vertical$RFCD.Code == "RFCDNA"] <- NA
vertical$RFCD.Percentage[vertical$RFCD.Code == "RFCD0"] <- NA
vertical$RFCD.Code[vertical$RFCD.Code == "RFCD0"] <- NA
vertical$RFCD.Percentage[vertical$RFCD.Code == "RFCD999999"] <- NA
vertical$RFCD.Code[vertical$RFCD.Code == "RFCD999999"] <- NA
vertical$RFCD.Code <- factor(vertical$RFCD.Code)
vertical$SEO.Code <- paste("SEO", vertical$SEO.Code, sep = "")
vertical$SEO.Percentage[vertical$SEO.Code == "SEONA"] <- NA
vertical$SEO.Code[vertical$SEO.Code == "SEONA"] <- NA
vertical$SEO.Percentage[vertical$SEO.Code == "SEO0"] <- NA
vertical$SEO.Code[vertical$SEO.Code == "SEO0"] <- NA
vertical$SEO.Percentage[vertical$SEO.Code == "SEO999999"] <- NA
vertical$SEO.Code[vertical$SEO.Code== "SEO999999"] <- NA
vertical$SEO.Code <- factor(vertical$SEO.Code)
vertical$No..of.Years.in.Uni.at.Time.of.Grant <- as.character(vertical$No..of.Years.in.Uni.at.Time.of.Grant)
vertical$No..of.Years.in.Uni.at.Time.of.Grant[vertical$No..of.Years.in.Uni.at.Time.of.Grant == ""] <- "DurationUnk"
vertical$No..of.Years.in.Uni.at.Time.of.Grant[vertical$No..of.Years.in.Uni.at.Time.of.Grant == ">=0 to 5"] <- "Duration0to5"
vertical$No..of.Years.in.Uni.at.Time.of.Grant[vertical$No..of.Years.in.Uni.at.Time.of.Grant == ">5 to 10"] <- "Duration5to10"
vertical$No..of.Years.in.Uni.at.Time.of.Grant[vertical$No..of.Years.in.Uni.at.Time.of.Grant == ">10 to 15"] <- "Duration10to15"
vertical$No..of.Years.in.Uni.at.Time.of.Grant[vertical$No..of.Years.in.Uni.at.Time.of.Grant == "more than 15"] <- "DurationGT15"
vertical$No..of.Years.in.Uni.at.Time.of.Grant[vertical$No..of.Years.in.Uni.at.Time.of.Grant == "Less than 0"] <- "DurationLT0"
vertical$No..of.Years.in.Uni.at.Time.of.Grant <- factor(vertical$No..of.Years.in.Uni.at.Time.of.Grant)
######################################################################
## A function to shorten the role titles
shortNames <- function(x, pre = ""){
x <- gsub("EXT_CHIEF_INVESTIGATOR", "ECI", x)
x <- gsub("STUD_CHIEF_INVESTIGATOR", "SCI", x)
x <- gsub("CHIEF_INVESTIGATOR", "CI", x)
x <- gsub("DELEGATED_RESEARCHER", "DR", x)
x <- gsub("EXTERNAL_ADVISOR", "EA", x)
x <- gsub("HONVISIT", "HV", x)
x <- gsub("PRINCIPAL_SUPERVISOR", "PS", x)
x <- gsub("STUDRES", "SR", x)
x <- gsub("Unk", "UNK", x)
other <- x[x != "Grant.Application.ID"]
c("Grant.Application.ID", paste(pre, other, sep = ""))
}
## A function to find and remove zero-variance ("ZV") predictors
noZV <- function(x) {
keepers <- unlist(lapply(x, function(x) length(unique(x)) > 1))
x[,keepers,drop = FALSE]
}
######################################################################
## Calculate the total number of people identified on the grant
people <- ddply(vertical, .(Grant.Application.ID), function(x) c(numPeople = nrow(x)))
######################################################################
## Calculate the number of people per role
investCount <- ddply(vertical, .(Grant.Application.ID),
function(x) as.data.frame(t(as.matrix(table(x$Role)))),
.parallel = cores > 1)
## Clean up the names
names(investCount) <- shortNames(names(investCount), "Num")
######################################################################
## For each role, calculate the frequency of people in each age group
investDOB <- ddply(vertical, .(Grant.Application.ID),
function(x) {
tabDF <- as.data.frame(table(x$Role, x$Year.of.Birth))
out <- data.frame(t(tabDF$Freq))
names(out) <- paste(tabDF$Var1, tabDF$Var2, sep = ".")
out
},
.parallel = cores > 1)
names(investDOB) <- shortNames(names(investDOB))
investDOB <- noZV(investDOB)
######################################################################
## For each role, calculate the frequency of people from each country
investCountry <- ddply(vertical, .(Grant.Application.ID),
function(x) {
tabDF <- as.data.frame(table(x$Role, x$Country.of.Birth))
out <- data.frame(t(tabDF$Freq))
names(out) <- paste(tabDF$Var1, tabDF$Var2, sep = ".")
out
},
.parallel = cores > 1)
names(investCountry) <- shortNames(names(investCountry))
investCountry <- noZV(investCountry)
######################################################################
## For each role, calculate the frequency of people for each language
investLang <- ddply(vertical, .(Grant.Application.ID),
function(x) {
tabDF <- as.data.frame(table(x$Role, x$Home.Language))
out <- data.frame(t(tabDF$Freq))
names(out) <- paste(tabDF$Var1, tabDF$Var2, sep = ".")
out
},
.parallel = cores > 1)
names(investLang) <- shortNames(names(investLang))
investLang <- noZV(investLang)
######################################################################
## For each role, determine who as a Ph.D.
investPhD <- ddply(vertical, .(Grant.Application.ID),
function(x) {
tabDF <- as.data.frame(table(x$Role, x$With.PHD))
out <- data.frame(t(tabDF$Freq))
names(out) <- paste(tabDF$Var1, tabDF$Var2, sep = ".")
out
},
.parallel = cores > 1)
investPhD <- investPhD[,-grep("\\.$", names(investPhD))]
names(investPhD) <- shortNames(names(investPhD))
names(investPhD) <- gsub("Yes ", "PhD", names(investPhD))
investPhD <- noZV(investPhD)
######################################################################
## For each role, calculate the number of successful and unsuccessful
## grants
investGrants <- ddply(vertical, .(Grant.Application.ID, Role),
function(x) {
data.frame(Success = sum(x$Number.of.Successful.Grant, na.rm = TRUE),
Unsuccess = sum(x$Number.of.Unsuccessful.Grant, na.rm = TRUE))
},
.parallel = cores > 1)
investGrants <- reshape(investGrants, direction = "wide", idvar = "Grant.Application.ID", timevar = "Role")
investGrants[is.na(investGrants)] <- 0
names(investGrants) <- shortNames(names(investGrants))
investGrants <- noZV(investGrants)
######################################################################
## Create variables for each role/department combination
investDept <- ddply(vertical, .(Grant.Application.ID),
function(x) {
tabDF <- as.data.frame(table(x$Role, x$Dept.No.))
out <- data.frame(t(tabDF$Freq))
names(out) <- paste(tabDF$Var1, tabDF$Var2, sep = ".")
out
},
.parallel = cores > 1)
names(investDept) <- shortNames(names(investDept))
investDept <- noZV(investDept)
######################################################################
## Create variables for each role/faculty #
investFaculty <- ddply(vertical, .(Grant.Application.ID),
function(x) {
tabDF <- as.data.frame(table(x$Role, x$Faculty.No.))
out <- data.frame(t(tabDF$Freq))
names(out) <- paste(tabDF$Var1, tabDF$Var2, sep = ".")
out
},
.parallel = cores > 1)
names(investFaculty) <- shortNames(names(investFaculty))
investFaculty <- noZV(investFaculty)
######################################################################
## Create dummy variables for each tenure length
investDuration <- ddply(vertical, .(Grant.Application.ID),
function(x) as.data.frame(t(as.matrix(table(x$No..of.Years.in.Uni.at.Time.of.Grant)))),
.parallel = cores > 1)
investDuration[is.na(investDuration)] <- 0
######################################################################
## Create variables for the number of publications per journal
## type. Note that we also compute the total number, which should be
## removed for models that cannot deal with such a linear dependency
totalPub <- ddply(vertical, .(Grant.Application.ID),
function(x) {
data.frame(AstarTotal = sum(x$A., na.rm = TRUE),
ATotal = sum(x$A, na.rm = TRUE),
BTotal = sum(x$B, na.rm = TRUE),
CTotal = sum(x$C, na.rm = TRUE),
allPub = sum(c(x$A., x$A, x$B, x$C), na.rm = TRUE))
},
.parallel = cores > 1)
######################################################################
## Create variables for the number of publications per journal
## type per role.
investPub <- ddply(vertical, .(Grant.Application.ID, Role),
function(x) {
data.frame(Astar = sum(x$A., na.rm = TRUE),
A = sum(x$A, na.rm = TRUE),
B = sum(x$B, na.rm = TRUE),
C = sum(x$C, na.rm = TRUE))
},
.parallel = cores > 1)
investPub <- reshape(investPub, direction = "wide", idvar = "Grant.Application.ID", timevar = "Role")
investPub[is.na(investPub)] <- 0
names(investPub) <- shortNames(names(investPub))
investPub <- noZV(investPub)
######################################################################
## Create variables for each RFCD code
RFCDcount <- ddply(vertical, .(Grant.Application.ID),
function(x) as.data.frame(t(as.matrix(table(x$RFCD.Code)))),
.parallel = cores > 1)
RFCDcount <- noZV(RFCDcount)
######################################################################
## Create variables for each SEO code
SEOcount <- ddply(vertical, .(Grant.Application.ID),
function(x) as.data.frame(t(as.matrix(table(x$SEO.Code)))),
.parallel = cores > 1)
SEOcount <- noZV(SEOcount)
######################################################################
### Make dummy vars out of grant-specific data
grantData <- raw[, c("Sponsor.Code", "Contract.Value.Band...see.note.A", "Grant.Category.Code")]
## Make a lubridate object for the time, then derive the day, week and month info
startTime <- dmy(raw$Start.date)
grantData$Month <- factor(as.character(month(startTime, label = TRUE)))
grantData$Weekday <- factor(as.character(wday(startTime, label = TRUE)))
grantData$Day <- day(startTime)
grantYear <- year(startTime)
######################################################################
### Use the dummyVars function to create binary variables for
### grant-specific variables
dummies <- dummyVars(~., data = grantData, levelsOnly = TRUE)
grantData <- as.data.frame(predict(dummies, grantData))
names(grantData) <- gsub(" ", "", names(grantData))
grantData$Grant.Application.ID <- raw$Grant.Application.ID
grantData$Class <- factor(ifelse(raw$Grant.Status, "successful", "unsuccessful"))
grantData$Grant.Application.ID <- raw$Grant.Application.ID
grantData$is2008 <- year(startTime) == 2008
grantData <- noZV(grantData)
######################################################################
### Merge all the predictors together, remove zero variance columns
### and merge in the outcome data
summarized <- merge(investCount, investDOB)
summarized <- merge(summarized, investCountry)
summarized <- merge(summarized, investLang)
summarized <- merge(summarized, investPhD)
summarized <- merge(summarized, investGrants)
summarized <- merge(summarized, investDept)
summarized <- merge(summarized, investFaculty)
summarized <- merge(summarized, investDuration)
summarized <- merge(summarized, investPub)
summarized <- merge(summarized, totalPub)
summarized <- merge(summarized, people)
summarized <- merge(summarized, RFCDcount)
summarized <- merge(summarized, SEOcount)
summarized <- merge(summarized, grantData)
## Remove the ID column
summarized$Grant.Application.ID <- NULL
print(str(summarized))
######################################################################
### We'll split all of the pre-2008 data into the training set and a
### portion of the 2008 data too
training <- subset(summarized, !is2008)
pre2008 <- 1:nrow(training)
year2008 <- subset(summarized, is2008)
## Now randomly select some 2008 data for model training and add it
## back into the existing training data
set.seed(568)
inTrain <- createDataPartition(year2008$Class, p = 3/4)[[1]]
training2 <- year2008[ inTrain,]
testing <- year2008[-inTrain,]
training <- rbind(training, training2)
training$is2008 <- testing$is2008 <- NULL
training <- noZV(training)
testing <- testing[, names(training)]
######################################################################
### Create two character vectors for different predictor sets. One
### will have all the predictors (called 'fullSet').
##
### Another has some of the sparse predictors removed for models that
### require such filtering. This will be called 'reducedSet'
### (predictors without sparse or Near Zero Variance predictors). This
### set will also have predictors removed that are almost completely
### correlated with other predictors
fullSet <- names(training)[names(training) != "Class"]
###################################################################
### In the classification tree chapter, there is a different set
### of predictors that use factor encodings of some of the
### predictors
factorPredictors <- names(training)[names(training) != "Class"]
factorPredictors <- factorPredictors[!grepl("Sponsor[0-9]", factorPredictors)]
factorPredictors <- factorPredictors[!grepl("SponsorUnk", factorPredictors)]
factorPredictors <- factorPredictors[!grepl("ContractValueBand[A-Z]", factorPredictors)]
factorPredictors <- factorPredictors[!grepl("GrantCat", factorPredictors)]
factorPredictors <- factorPredictors[!(factorPredictors %in% levels(training$Month))]
factorPredictors <- factorPredictors[!(factorPredictors %in% levels(training$Weekday))]
factorForm <- paste("Class ~ ", paste(factorPredictors, collapse = "+"))
factorForm <- as.formula(factorForm)
### Some are extremely correlated, so remove
predCorr <- cor(training[,fullSet])
highCorr <- findCorrelation(predCorr, .99)
fullSet <- fullSet[-highCorr]
isNZV <- nearZeroVar(training[,fullSet], saveMetrics = TRUE, freqCut = floor(nrow(training)/5))
fullSet <- rownames(subset(isNZV, !nzv))
str(fullSet)
reducedSet <- rownames(subset(isNZV, !nzv & freqRatio < floor(nrow(training)/50)))
### Perfectly collinear predictors (due to their construction) March
### and Sunday were selected because they have the lowest frequency of
### all months and days
reducedSet <- reducedSet[(reducedSet != "allPub") &
(reducedSet != "numPeople") &
(reducedSet != "Mar") &
(reducedSet != "Sun")
]
### all months and days
reducedSet <- reducedSet[(reducedSet != "allPub") &
(reducedSet != "numPeople") &
(reducedSet != "Mar") &
(reducedSet != "Sun")
]
str(reducedSet)
sessionInfo()
| /scratch/gouwar.j/cran-all/cranData/AppliedPredictiveModeling/inst/chapters/CreateGrantData.R |
# AgroClimateData.R Example AgroClimateData data used for Eto and soil
# water balance calculations
#
# Copyright (C) 2022 Center of Plant Sciences, Scuola Superiore Sant’Anna (
# http://www.capitalisegenetics.santannapisa.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
#' @title Example AgroClimate data from AgERA5
#'
#' @description The AgERA5 dataset provides daily surface meteorological data
#' for the period from 1979 to present as input for agriculture
#' and agro-ecological studies. This dataset is based on the hourly ECMWF ERA5
#' data at surface level, the data from 01/01/1982 through 12/31/2022 extracted
#' for a grid located in located in Angochen, Nampula province of Mozambique.
#'
#' @format A data frame containing daily observations of AgroClimate parameters:
#' \describe{
#' \item{\code{GridID}}{Grid id of the location.}
#' \item{\code{Lat}}{latitude of the site in decimal degrees.}
#' \item{\code{Lon}}{longitude of the site in decimal degrees.}
#' \item{\code{Elev}}{elevation above sea level in (meters).}
#' \item{\code{WHC}}{water holding capacity in (mm).}
#' \item{\code{Year}}{year of record "YYYY".}
#' \item{\code{Month}}{month of record "MM".}
#' \item{\code{Day}}{day of record "DD".}
#' \item{\code{Rain}}{Precipitation (mm/day).}
#' \item{\code{Tmax}}{Temperature at 2 Meters Maximum (°C).}
#' \item{\code{Tmin}}{Temperature at 2 Meters Minimum (°C).}
#' \item{\code{Rs}}{CAll Sky Surface Shortwave Downward Irradiance
#' (MJ/m^2/day).}
#' \item{\code{Tdew}}{Dew/Frost Point at 2 Meters (°C).}
#' \item{\code{Uz}}{Wind Speed at 2 Meters (m/s).}
#'}
#'
#' @source \url{https://cds.climate.copernicus.eu/cdsapp#!/dataset/sis-agrometeorological-indicators?tab=overview}
#'
#' @keywords datasets
#'
#' @references AgERA5, 2021, Copernicus Climate Change Service (C3S), Fifth
#' generation of ECMWF atmospheric reanalysis of the global
#' climate for agriculture and ago-ecological studies. Copernicus Climate
#' Change Service Climate Data Store (CDS), July-2021.
#'
#' @seealso \code{\link{climateData}, \link{calcEto}}
#'
#' @examples
#' # load example data:
#' data(AgroClimateData)
#'
#' # Get the structure of the data frame:
#' str(AgroClimateData)
#'
#' # Get the head of the data frame:
#' head(AgroClimateData)
#'
"AgroClimateData"
# ********** end of code **********
###############################################################################
| /scratch/gouwar.j/cran-all/cranData/AquaBEHER/R/AgroClimateData.R |
# calcEto.R Potential Evapotranspiration
#
# Copyright (C) 2022 Center of Plant Sciences, Scuola Superiore Sant’Anna (
# http://www.capitalisegenetics.santannapisa.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
#' @title Potential Evapotranspiration
#'
#' @description This function calculates Penman-Monteith, Priestley Taylor and
#' Hargreaves-Samani Potential Evapotranspiration
#' using the method described by Allen et al, (1998)
#'
#' @param data a dataframe containing the required climate variables: columns
#' must contain the following parameters:
#'
#' \verb{ }\emph{\strong{\code{Lat:}} latitude of the site in decimal
#' degrees.}
#'
#' \verb{ }\emph{\strong{\code{Lon:}} longitude of the site in decimal
#' degrees.}
#'
#' \verb{ }\emph{\strong{\code{Elev:}} elevation above sea level in (
#' meters).}
#'
#' \verb{ }\emph{\strong{\code{Year:}} year of record "YYYY".}
#'
#' \verb{ }\emph{\strong{\code{Month:}} month of record "MM".}
#'
#' \verb{ }\emph{\strong{\code{Day:}} day of record "DD".}
#'
#' \verb{ }\emph{\strong{\code{Tmax:}} daily maximum temperature at 2-m
#' height in (°C).}
#'
#' \verb{ }\emph{\strong{\code{Tmin:}} daily minimum temperature at 2-m
#' height in (°C).}
#'
#' \verb{ }\emph{\strong{\code{Rs:}} daily surface incoming solar
#' radiation in (MJ/m^2/day).}
#'
#' \verb{ }\emph{\strong{\code{RH or RHmax and RHmin:}} daily relative
#' humidity at 2-m height.}
#'
#' \verb{ }\emph{\strong{\code{Tdew:}} daily dew point temperature at
#' 2-m height in (°C).}
#'
#' \verb{ }\emph{\strong{\code{U2 or Uz:}} daily wind speed at 2-m or
#' Z-m(custom) height (m/s).}
#'
#' \verb{ }
#' @param method \verb{ }the formulation used to compute Eto; default is
#' \emph{method = "PM"} gives the the Penman-Monteith formulation;
#' \emph{method = "PT"} gives \verb{ }the Priestley-Taylor formulation and
#' \emph{method = "HS"} gives the Hargreaves-Samani formulation.
#'
#' @param crop \verb{ }either \emph{crop = "short"} (default) or
#' \emph{crop = "tall"}; short indicates the method for FAO-56 hypothetical
#' short grass will be applied \verb{ }(Allen et al.1998); tall indicates that
#' the method for ASCE-EWRI standard crop will be applied (ASCE, 2005).
#'
#' @param Zh \verb{ }height of wind speed measurement in meters,
#'
#' @return The function generates a list containing the following objects:
#'
#' \code{ET.Daily:} {daily estimations of reference crop evapotranspiration
#' (mm/day)}
#'
#' \code{Ra.Daily:} {daily estimations of extraterristrial radiation
#' (MJ/m2/day)}
#'
#' \code{Slope.Daily:} {daily estimations of slope of vapour pressure curve
#' (kPa/°C)}
#'
#' \code{ET.type:} {type of the estimation obtained}
#'
#' @references Allen, R.G., L.S. Pereira, D. Raes, and M. Smith. 1998. ‘Crop
#' evapotranspiration-Guidelines for Computing Crop Water requirements FAO
#' Irrigation and Drainage Paper 56’. FAO, Rome 300: 6541.
#'
#' Allen, R. G. 2005. The ASCE standardized reference evapotranspiration
#' equation. Amer Society of Civil Engineers.
#'
#' Guo, Danlu & Westra, Seth & Maier, Holger. (2016). An R package for
#' modelling actual, potential and reference evapotranspiration. Environmental
#' Modelling & Software. 78. 216-224. 10.1016/j.envsoft.2015.12.019.
#'
#' Hargreaves, G.H.Samani, Z.A. 1985, Reference crop evapotranspiration from
#' ambient air temperature. American Society of Agricultural Engineers.
#'
#' Priestley, C. & Taylor, R. 1972, On the assessment of surface heat flux and
#' evaporation using large-scale parameters'. Monthly Weather Review, vol. 100,
#' no. 2, pp. 81-92.
#'
#' @details
#'
#' \strong{Penman-Monteith:}
#' If all variables of Tmax, Tmin, Rs, either U2 or Uz, and either RHmax and
#' RHmin or RH or Tdew are available and crop surface (short or tall) is
#' specified in argument the Penman-Monteith FAO56 formulation is used
#' (Allen et al.1998).
#'
#' \strong{Priestley-Taylor:}
#' If all variables of Tmax, Tmin, Rs and either RHmax and RHmin or RH or
#' Tdew are available the Priestley-Taylor formulation is used (Priestley and
#' Taylor, 1972).
#'
#' \strong{Hargreaves-Samani:}
#' If only Tmax and Tmin are available, the Hargreaves-Samani formulation is
#' used or estimating reference crop evapotranspiration (Hargreaves and.
#' Samani, 1985).
#'
#' @seealso \code{\link{climateData}, \link{calcWatBal}, \link{calcSeasCal}}
#'
#' @examples
#' # load example data:
#' data(climateData)
#'
#' calcEto(climateData, method = "HS")
#'
#' # load example data:
#' data(AgroClimateData)
#'
#' calcEto(AgroClimateData, method = "PM", crop = "short")
#'
#' @export
###############################################################################
calcEto <- function(data, method = "PM", crop = "short", Zh = NULL) {
if (method == "HS") {
###############################################################################
# ***** Hargreaves-Samani
# ***** universal constants *****
lambda <- 2.45 ## latent heat of evaporation = 2.45 MJ.kg^-1 at
# 20 degree Celsius
Cp <-1.013 * 10^-3 ## specific heat at constant pressure = MJ kg^-1 °C^-1
e <- 0.622 ## ratio molecular weight of water vapor/dry air
lat.rad <- data$Lat * (pi/180)
Gsc <- 0.082 # ***** solar constant = 0.0820 MJ.m^-2.min^-1
date.vec <- as.Date.character(paste0(data$Year, "-", data$Month, "-",
data$Day))
data$J <- strftime(as.POSIXlt(date.vec), "%j") ## Julian day of the year
Elev <- unique(data$Elev)
ts<-"daily"
message <- "yes"
# ***** Check of specific data requirement
if (is.null(data$Tmax)|is.null(data$Tmin)) {
stop("Required data missing for 'Tmax' and 'Tmin'")
}
# ***** Calculating mean temperature (°C)
Tavg <- (data$Tmax + data$Tmin) / 2
# ***** Atmospheric pressure (kPa) as a function of altitude
P <- 101.3 * ((293 - 0.0065 * Elev) / 293)^5.26
## Slope of saturation vapor pressure curve at air temperature Tavg (kPa/ °C)
delta <- 4098 * (0.6108 *
exp((17.27 * Tavg)/(Tavg + 237.3))) / ((Tavg + 237.3)^2)
# ***** psychrometric constant (kPa/°C)
gamma <- (Cp * P) / (lambda * e)
# ***** Inverse relative distance Earth-Sun
dr <- 1 + 0.033*cos(2*pi/365 * as.numeric(data$J))
# ***** Solar dedication (rad)
SDc <- 0.409 * sin(2*pi/365 * as.numeric(data$J) - 1.39)
# ***** sunset hour angle (rad)
Ws <- acos(-tan(lat.rad) * tan(SDc))
# ***** Daylight hours (hour)
N <- 24/pi * Ws
# ***** Extraterrestrial radiation (MJ m-2 day-1)
Ra <- (1440/pi) * dr * Gsc * (Ws * sin(lat.rad) * sin(SDc) + cos(lat.rad) *
cos(SDc) * sin(Ws))
# ***** empirical coefficient by Hargreaves and Samani (1985)
C.HS <- 0.00185 * (data$Tmax - data$Tmin)^2 - 0.0433 *
(data$Tmax - data$Tmin) + 0.4023
# reference crop evapotranspiration by Hargreaves and Samani (1985)
ET.HS.Daily <- 0.0135 * C.HS * Ra / lambda * (data$Tmax - data$Tmin)^0.5 *
(Tavg + 17.8)
ET.Daily <- ET.HS.Daily
# Generate summary message for results
ET.formulation <- "Hargreaves-Samani"
ET.type <- "Reference Crop ET"
# message(ET.formulation, " ", ET.type)
# message("Evaporative surface: reference crop")
results <- list(ET.Daily = ET.Daily,
Ra.Daily = Ra,
Slope.Daily = delta,
ET.formulation = ET.formulation,
ET.type = ET.type)
# message("Timestep: ", ts)
# message("Units: mm")
# message("Time duration: ", date.vec[1], " to ", date.vec[length(date.vec)])
return(results)
###############################################################################
} else if (method == "PT") {
alpha <- 0.23
# ***** universal constants *****
lambda <- 2.45 ## latent heat of evaporation = 2.45 MJ.kg^-1 at 20 °C
Cp <- 1.013 * 10^-3 # specific heat at constant pressure = MJ kg^-1 °C^-1
e <- 0.622 # ratio molecular weight of water vapour/dry air
Sigma <- 4.903e-09 # Stefan-Boltzmann constant =
# 4.903*10^-9 MJ.K^-4.m^-2.day^-1
lat.rad <- data$Lat * (pi/180)
Gsc <- 0.082 # ***** solar constant = 0.0820 MJ.m^-2.min^-1
G <- 0 # soil heat flux negligible for daily time-step = 0
# (Allen et al., 1998, page 68)
alphaPT <- 1.26 # Priestley-Taylor coefficient
date.vec <- as.Date.character(paste0(data$Year, "-", data$Month, "-",
data$Day))
data$J <- strftime(as.POSIXlt(date.vec), "%j") # Julian day of the year
Elev <- unique(data$Elev)
ts <- "daily"
message <- "yes"
# ***** Check of specific data requirement
if (is.null(data$Tmax)|is.null(data$Tmin)) {
stop("Required data missing for 'Tmax' and 'Tmin'")
}
if (is.null(data$Va)|is.null(data$Vs)) {
if (is.null(data$RHmax)|is.null(data$RHmin)) {
if (is.null(data$RH)) {
if (is.null(data$Tdew)) {
stop("Required data missing: need either 'Tdew', or 'Va' and 'Vs',
or 'RHmax' and 'RHmin', r 'RH'")
}
}
}
}
if (is.null(data$Rs)) { # solar radiation data is required
stop("Required data missing for 'Rs'")
}
# check user-input albedo
if (is.na(as.numeric(alpha))) {
stop("Please use a numeric value for the alpha (albedo of evaporative
surface)")
}
if (!is.na(as.numeric(alpha))) {
if (as.numeric(alpha) < 0 | as.numeric(alpha) > 1) {
stop("Please use a value between 0 and 1 for the alpha (albedo of
evaporative surface)")
}
}
# ***** Calculating mean temperature (°C)
Tavg <- (data$Tmax + data$Tmin) / 2
# ***** Calculating mean temperature (°C)
Tavg <- (data$Tmax + data$Tmin) / 2
# ***** calculating actual vapor pressure (kPa)
if (!is.null(data$Va) & !is.null(data$Vs)) {
Ea <- data$Va
Es <- data$Vs
} else if (!is.null(data$RHmax) & !is.null(data$RHmin)) {
# Saturation vapor pressure from temperature (kPa)
EsTmax <- 0.6108 * exp(17.27 * data$Tmax / (data$Tmax + 237.3))
EsTmin <- 0.6108 * exp(17.27 * data$Tmin / (data$Tmin + 237.3))
Es <- (EsTmax + EsTmin)/2
# Actual vapor pressure derived from max and min relative humidity data
Ea <- (EsTmin * data$RHmax/100 + EsTmax * data$RHmin/100)/2
} else if (!is.null(data$RH)) {
# Saturation vapor pressure from temperature (kPa)
EsTmax <- 0.6108 * exp(17.27 * data$Tmax / (data$Tmax + 237.3))
EsTmin <- 0.6108 * exp(17.27 * data$Tmin / (data$Tmin + 237.3))
Es <- (EsTmax + EsTmin)/2
# Actual vapor pressure derived from max and min relative humidity data
Ea <- (data$RH/100) * Es
} else if (!is.null(data$Tdew)) {
# Saturation vapor pressure from temperature (kPa)
EsTmax <- 0.6108 * exp(17.27 * data$Tmax / (data$Tmax + 237.3))
EsTmin <- 0.6108 * exp(17.27 * data$Tmin / (data$Tmin + 237.3))
Es <- (EsTmax + EsTmin)/2
# Actual vapour pressure derived from dewpoint temperature
Ea <- 0.6108 * exp((17.27 * data$Tdew)/(data$Tdew + 237.3))
}
# ***** Atmospheric pressure (kPa) as a function of altitude
P <- 101.3 * ((293 - 0.0065 * Elev) / 293)^5.26
## Slope of saturation vapor pressure curve at air temperature Tavg (kPa/ °C)
delta <- 4098 * (0.6108 * exp((17.27 * Tavg)/(Tavg + 237.3))) /
((Tavg + 237.3)^2)
# ***** psychrometric constant (kPa/°C)
gamma <- (Cp * P) / (lambda * e)
# ***** Inverse relative distance Earth-Sun
dr <- 1 + 0.033*cos(2*pi/365 * as.numeric(data$J))
# ***** Solar dedication (rad)
SDc <- 0.409 * sin(2*pi/365 * as.numeric(data$J) - 1.39)
# ***** sunset hour angle (rad)
Ws <- acos(-tan(lat.rad) * tan(SDc))
# ***** Daylight hours (hour)
N <- 24/pi * Ws
# ***** Extraterrestrial radiation (MJ m-2 day-1)
Ra <- (1440/pi) * dr * Gsc * (Ws * sin(lat.rad) * sin(SDc) + cos(lat.rad) *
cos(SDc) * sin(Ws))
# ***** Clear-sky solar radiation (MJ m-2 day-1)
Rso <- (0.75 + (2*10^-5)*Elev) * Ra
Rs <- data$Rs # ***** solar or shortwave radiation (MJ m-2 day-1)
# ***** estimated net outgoing longwave radiation (MJ m-2 day-1)
Rnl <- Sigma * (0.34 - 0.14 * sqrt(Ea)) *
((data$Tmax+273.2)^4 + (data$Tmin+273.2)^4)/2 * (1.35 * Rs / Rso - 0.35)
# net incoming shortwave radiation (MJ m-2 day-1)
Rnsg <- (1 - alpha) * Rs # ***** for grass
# ***** net radiation
Rng <- Rnsg - Rnl
# well-watered crop evapotranspiration in a semi-arid and windy location
E.PT.Daily <- alphaPT * (delta/(delta + gamma) * Rng / lambda - G / lambda)
ET.Daily <- E.PT.Daily
# Generate summary message for results
ET.formulation <- "Priestley-Taylor"
ET.type <- "Potential ET"
if (alpha != 0.08) {
Surface <- paste("user-defined, albedo =", alpha)
} else if (alpha == 0.08) {
Surface <- paste("water, albedo =", alpha)
}
# message(ET.formulation, " ", ET.type)
# message("Evaporative surface: ", Surface)
#
# message("Timestep: ", ts)
# message("Units: mm")
# message("Time duration: ", date.vec[1], " to ", date.vec[length(date.vec)])
results <- list(ET.Daily = ET.Daily,
Ra.Daily = Ra,
Slope.Daily = delta,
Ea.Daily = Ea,
Es.Daily = Es,
ET.formulation = ET.formulation,
ET.type = ET.type)
return(results)
###############################################################################
} else if (method == "PM") {
# ***** universal constants *****
lambda <- 2.45 ## latent heat of evaporation = 2.45 MJ.kg^-1 at
# 20 degree Celsius
Cp <- 1.013 * 10^-3 ## specific heat at constant pressure = MJ kg^-1 °C^-1
e <- 0.622 ## ratio molecular weight of water vapour/dry air
Sigma <- 4.903e-09 # Stefan-Boltzmann constant =
# 4.903*10^-9 MJ.K^-4.m^-2.day^-1
lat.rad <- data$Lat * (pi/180)
Gsc <- 0.082 # ***** solar constant = 0.0820 MJ.m^-2.min^-1
G <- 0 # soil heat flux negligible for daily time-step = 0
# (Allen et al., 1998, page 68)
date.vec <- as.Date.character(paste0(data$Year, "-", data$Month, "-",
data$Day))
data$J <- strftime(as.POSIXlt(date.vec), "%j") # Julian day of the year
Elev <- unique(data$Elev)
ts <- "daily"
message <- "yes"
# ***** Check of specific data requirement
if (is.null(data$Tmax)|is.null(data$Tmin)) {
stop("Required data missing for 'Tmax' and 'Tmin'")
}
if (is.null(data$Rs)) { # solar radiation data is required
stop("Required data missing for 'Rs'")
}
if (is.null(data$U2) & is.null(data$Uz)) {
stop("Required data missing for 'Uz' or 'U2'")
}
if (is.null(data$Va)|is.null(data$Vs)) {
if (is.null(data$RHmax)|is.null(data$RHmin)) {
if (is.null(data$RH)) {
if (is.null(data$Tdew)) {
stop("Required data missing: need either 'Tdew', or 'Va' and 'Vs', or
'RHmax' and 'RHmin', r 'RH'")
}
}
}
}
# ***** check user-input crop type and specify Alberto
if (crop != "short" & crop != "tall") {
stop("Please enter 'short' or 'tall' for the desired reference crop type")
} else {
alpha <- 0.23 # albedo for both short and tall crop
if (crop == "short") {
z0 <- 0.02 # roughness height for short grass
} else {
z0 <- 0.1 # roughness height for tall grass
}
}
# ***** Calculating mean temperature (°C)
Tavg <- (data$Tmax + data$Tmin) / 2
# ***** calculating actual vapor pressure (kPa)
if (!is.null(data$Va) & !is.null(data$Vs)) {
Ea <- data$Va
Es <- data$Vs
} else if (!is.null(data$RHmax) & !is.null(data$RHmin)) {
# Saturation vapor pressure from temperature (kPa)
EsTmax <- 0.6108 * exp(17.27 * data$Tmax / (data$Tmax + 237.3))
EsTmin <- 0.6108 * exp(17.27 * data$Tmin / (data$Tmin + 237.3))
Es <- (EsTmax + EsTmin)/2
# Actual vapor pressure derived from max and min relative humidity data
Ea <- (EsTmin * data$RHmax/100 + EsTmax * data$RHmin/100)/2
} else if (!is.null(data$RH)) {
# Saturation vapor pressure from temperature (kPa)
EsTmax <- 0.6108 * exp(17.27 * data$Tmax / (data$Tmax + 237.3))
EsTmin <- 0.6108 * exp(17.27 * data$Tmin / (data$Tmin + 237.3))
Es <- (EsTmax + EsTmin)/2
# Actual vapor pressure derived from max and min relative humidity data
Ea <- (data$RH/100) * Es
} else if (!is.null(data$Tdew)) {
# Saturation vapor pressure from temperature (kPa)
EsTmax <- 0.6108 * exp(17.27 * data$Tmax / (data$Tmax + 237.3))
EsTmin <- 0.6108 * exp(17.27 * data$Tmin / (data$Tmin + 237.3))
Es <- (EsTmax + EsTmin)/2
# Actual vapour pressure derived from dewpoint temperature
Ea <- 0.6108 * exp((17.27 * data$Tdew)/(data$Tdew + 237.3))
}
# ***** Atmospheric pressure (kPa) as a function of altitude
P <- 101.3 * ((293 - 0.0065 * Elev) / 293)^5.26
## Slope of saturation vapor pressure curve at air temperature Tavg (kPa/ °C)
delta <- 4098 * (0.6108 * exp((17.27 * Tavg)/(Tavg + 237.3))) /
((Tavg + 237.3)^2)
# ***** psychrometric constant (kPa/°C)
gamma <- (Cp * P) / (lambda * e)
# ***** Inverse relative distance Earth-Sun
dr <- 1 + 0.033*cos(2*pi/365 * as.numeric(data$J))
# ***** Solar dedication (rad)
SDc <- 0.409 * sin(2*pi/365 * as.numeric(data$J) - 1.39)
# ***** sunset hour angle (rad)
Ws <- acos(-tan(lat.rad) * tan(SDc))
# ***** Daylight hours (hour)
N <- 24/pi * Ws
# ***** Extraterrestrial radiation (MJ m-2 day-1)
Ra <- (1440/pi) * dr * Gsc * (Ws * sin(lat.rad) * sin(SDc) + cos(lat.rad) *
cos(SDc) * sin(Ws))
# ***** Clear-sky solar radiation (MJ m-2 day-1)
Rso <- (0.75 + (2*10^-5)*Elev) * Ra
Rs <- data$Rs # ***** solar or shortwave radiation (MJ m-2 day-1)
# ***** estimated net outgoing longwave radiation (MJ m-2 day-1)
Rnl <- Sigma * (0.34 - 0.14 * sqrt(Ea)) *
((data$Tmax+273.2)^4 + (data$Tmin+273.2)^4)/2 * (1.35 * Rs / Rso - 0.35)
# net incoming shortwave radiation (MJ m-2 day-1)
Rnsg <- (1 - alpha) * Rs # ***** for grass
# ***** net radiation
Rng <- Rnsg - Rnl
# ***** wind speed
if (is.null(data$U2)) {
U2 <- data$Uz * 4.87 / log(67.8*Zh - 5.42)
} else {
U2 <- data$U2
}
if (crop == "short") {
# FAO-56 reference crop evapotranspiration from short grass
ET.RC.Daily <- (0.408 * delta * (Rng - G) +
gamma * 900 * U2 * (Es - Ea)/
(Tavg + 273)) / (delta + gamma * (1 + 0.34*U2))
} else {
# ASCE-EWRI standardised Penman-Monteith for long grass
ET.RC.Daily <- (0.408 * delta * (Rng - G) + gamma * 1600 * U2 * (Es - Ea)/
(Tavg + 273)) / (delta + gamma * (1 + 0.38*U2))
}
ET.Daily <- ET.RC.Daily
# Generate summary message for results
if (crop == "short") {
r_s <- 70 # will not be used for calculation - just informative
CH <- 0.12 # will not be used for calculation - just informative
ET.formulation <- "Penman-Monteith FAO56"
ET.type <- "Reference Crop ET"
Surface <- paste("FAO-56 hypothetical short grass, albedo =",
alpha, "; surface resistance =", r_s, "sm^-1;
crop height =", CH, " m; roughness height =", z0, "m")
} else {
r_s <- 45 # will not be used for calculation - just informative
CH <- 0.50 # will not be used for calculation - just informative
ET.formulation <- "Penman-Monteith ASCE-EWRI Standardised"
ET.type <- "Reference Crop ET"
Surface <- paste("ASCE-EWRI hypothetical tall grass, albedo =",
alpha, "; surface resistance =", r_s, "sm^-1;
crop height =", CH, " m; roughness height =", z0, "m")
}
results <- list(ET.Daily = ET.Daily,
Ra.Daily = Ra,
Slope.Daily = delta,
Ea.Daily = Ea,
Es.Daily = Es,
ET.formulation = ET.formulation,
ET.type = ET.type)
# message(ET.formulation, " ", ET.type)
# message("Evaporative surface: ", Surface)
#
# message("Timestep: ", ts)
# message("Units: mm")
# message("Time duration: ", date.vec[1], " to ", date.vec[length(date.vec)])
return(results)
}
###############################################################################
}
###############################################################################
###############################################################################
###############################################################################
| /scratch/gouwar.j/cran-all/cranData/AquaBEHER/R/calcEto.R |
# calcSeasCal.R Rainy season calandar
#
# Copyright (C) 2022 Center of Plant Sciences, Scuola Superiore Sant’Anna
# (http://www.capitalisegenetics.santannapisa.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
#' @title Rainy Season Calendar
#'
#' @description This function estimates the rainy season calendar, i.e onset
#' date, cessation date and duration of the rainy season based on Agroclimatic
#' approach. The agroclimatic approach defines the onset of the rainy season as
#' the optimal date that ensures sufficient soil moisture during planting and
#' early growing periods to avoid crop failure after sowing and requires
#' information on rainfall, reference evapotranspiration and accounting of the
#' daily soil water balance parameters.
#'
#' @param data an R object of dataframe as returned by \code{\link{calcWatBal}}
#' or a dataframe having similar parameters.
#' @param onsetWind.start Earliest possible start date of the onset window.
#' @param onsetWind.end The latest possible date for end of the onset window.
#' @param cessaWind.end The latest possible date for end of the cessation window.
#' @param soilWHC Water holding capacity of the soil at root zone depth in (mm).
#'
#' @return The function generates list of dataframes with columns of variables:
#'
#' \strong{\code{Onset.dF:}} a data frame with columns of onset variables:
#'
#' \verb{ } \emph{Year:} year of the season under investigation, "YYYY".
#'
#' \verb{ } \emph{onset.Year:} year of the season under investigation, "YYYY".
#'
#' \verb{ } \emph{onset.Month:} month of the onset of the season in "MM".
#'
#' \verb{ } \emph{onset.Day:} day of the onset of the season in "DD".
#'
#' \verb{ } \emph{JD:} onset date of the season in Julian day, "DOY".
#'
#' \verb{ } \emph{YYYYDOY:} onset date of the season in "YYYY-DOY".
#'
#' \verb{ } \emph{Year:} year of the season under investigation, "YYYY".
#'
#' \strong{\code{Cessation.dF:}} a data frame with columns of onset variables:
#'
#' \verb{ } \emph{Year:} year of the season under investigation, "YYYY".
#'
#' \verb{ } \emph{cessation.Year:} year of the season under investigation,
#' "YYYY".
#'
#' \verb{ } \emph{cessation.Month:} month of the cessation of the season in
#' "MM".
#'
#' \verb{ } \emph{cessation.Day:} day of the cessation of the season in "DD".
#'
#' \verb{ } \emph{JD:} cessation date of the season in Julian day, "DOY".
#'
#' \verb{ } \emph{YYYYDOY:} cessation date of the season in "YYYY-DOY".
#'
#' \strong{\code{Duration.dF:}} a data frame with columns of onset variables:
#'
#' \verb{ } \emph{Year:} year of the season under investigation, "YYYY".
#'
#' \verb{ } \emph{onset.YYYYDOY:} onset date of the season in "YYYY-DOY".
#'
#' \verb{ } \emph{cessation.YYYYDOY:} cessation date of the season in
#' "YYYY-DOY".
#'
#' \verb{ } \emph{Duration:} duration of the season in "days".
#'
#' @details
#'
#' As per agroclimatic approach, a normal rainy season (growing season) is
#' defined as one when there is an excess of precipitation over potential
#' evapotranspiration (PET). Such a period met the evapotransiration demands of
#' crops and recharge the moisture of the soil profile (FAO 1977; 1978; 1986).
#' Thus, the rainy season calendar defined accordingly:
#'
#' \strong{Onset}
#'
#' The \emph{onset} of the rainy season will start on the first day after
#' \emph{onsetWind.start}, when the actual-to-potential evapotranspiration ratio
#' is greater than 0.5 for 7 consecutive days, followed by a 20-day period in
#' which plant available water remains above wilting over the root zone of the
#' soil layer.
#'
#' \strong{Cesation}
#'
#' The rainy season will end, \emph{cessation}, on the first day after
#' \emph{onsetWind.end}, when the actual-to-potential evapotranspiration ratio
#' is less than 0.5 for 7 consecutive days, followed by 12 consecutive
#' non-growing days in which plant available water remains below wilting over
#' the root zone of the soil layer.
#'
#' \strong{Duration}
#'
#' The \emph{duration} of the rainy season is the total number of days from
#' onset to cessation of the season.
#'
#' @references FAO, 1977. Crop water requirements. FAO Irrigation and Drainage
#' Paper No. 24, by Doorenbos J and W.O. Pruitt. FAO, Rome, Italy.
#'
#' FAO 1978. Forestry for Local Community Development Food and Agriculture
#' Organization of the United Nation (FAO), FAO Forestry paper, No 7, Rome.
#'
#' FAO, 1986. Early Agrometeorological crop yield forecasting. FAO Plant
#' Production and Protection paper No. 73, by M. Frère and G.F. Popov. FAO,
#' Rome, Italy
#'
#' @seealso \code{\link{calcEto}, \link{calcWatBal}}
#'
#' @importFrom ggplot2 ggplot geom_line geom_area scale_x_date scale_y_continuous labs theme_linedraw theme
#' @importFrom dplyr group_by summarize
#' @importFrom lubridate as_date
#'
#' @examples
#'
#' \donttest{
#' # load example data:
#' data(AgroClimateData)
#'
#' # Estimate daily PET:
#' PET <- calcEto(AgroClimateData, method = "PM", Zh = 10)
#'
#' # Add the estimated PET 'ET.Daily' to a new column in AgroClimateData:
#' AgroClimateData$Eto <- PET$ET.Daily
#'
#' # Estimate daily water balance for the soil having 100mm of WHC:
#' watBal <- calcWatBal(AgroClimateData, soilWHC = 100)
#'
#' # estimate the rainy season calandar (Onset, Cessation and Duration):
#' onsetWind.start = "1996-09-01" # earliest possible start data of the onset window
#' onsetWind.end = "1997-01-31" # the late possible date for end of the onset window
#' cessaWind.end = "1997-06-30" # the late possible date for end of the cessation window
#'
#' seasCal.lst <- calcSeasCal(watBal, onsetWind.start, onsetWind.end, cessaWind.end, soilWHC = 100)
#'
#' str(seasCal.lst)
#'
#' }
#' @export
###############################################################################
# ***** function to estimate
calcSeasCal <- function(data, onsetWind.start, onsetWind.end,
cessaWind.end, soilWHC) {
# ***** Check of specific data requirement
if ((is.null(data$Year) & is.null(data$Month) & is.null(data$Day)) |
((length(which(is.na(data$Year)))>0)&
(length(which(is.na(data$Month)))>0)&
(length(which(is.na(data$Day)))>0))) {
stop("Required data column for [Year], [Month] and [Day] is missing!")
}
if (is.null(soilWHC) | is.na(soilWHC)) {
stop("Required data for water holding capacity of the soil [soilWHC]
is missing! ")
}
# if (is.null(data$Rain) | length(which(is.na(data$Rain)))> 0) {
#
# stop("Required data column for daily rainfall amount [Rain] is missing! ")
#
# }
if (is.null(data$R) | length(which(is.na(data$R)))> 0) {
stop("Required data column for actual-to-potential evapotranspiration
amount [R] is missing! ")
}
if (is.null(data$AVAIL) | length(which(is.na(data$AVAIL)))> 0) {
stop("Required data column for available soil water amount [AVAIL]
is missing! ")
}
if (is.null(onsetWind.start) | is.na(onsetWind.start)) {
stop("Required date for start of the onset window [onsetWind.start]
is missing! ")
}
if (is.null(onsetWind.end) | is.na(onsetWind.end)) {
stop("Required date for end of the onset window [onsetWind.end]
is missing! ")
}
if (is.null(cessaWind.end) | is.na(cessaWind.end)) {
stop("Required date for end of the cessation window [cessaWind.end]
is missing! ")
}
# ****************************************************************************
data$date <- lubridate::as_date(paste0(data$Year, "-", data$Month, "-",
data$Day))
data$DOY <- strftime(as.POSIXlt(lubridate::as_date(paste0(data$Year, "-",
data$Month, "-",
data$Day))), "%j")
data$YYDOY <- paste0(data$Year, "-", data$DOY)
year.vec <- as.numeric(sort(unique(data$Year)))
Rindex.thr = 0.5
PAW.thr = min(max((0.25 * soilWHC), 15), 30)
# data$Rain[data$Rain < 1] <- 0
if (lubridate::as_date(onsetWind.start) < lubridate::as_date(onsetWind.end) |
lubridate::as_date(onsetWind.start) < lubridate::as_date(cessaWind.end)) {
year.len <- length(year.vec) -1
} else {
year.len <- length(year.vec)
}
# *****************************************************************************
Onset.dF <- data.frame(Year = rep(NA, year.len),
onset.Year = rep(NA, year.len),
onset.Month = rep(NA, year.len),
onset.Day = rep(NA, year.len),
onset.JD = rep(NA, year.len),
onset.Value = rep(NA, year.len))
Cessation.dF <- data.frame(Year = rep(NA, year.len),
cessation.Year = rep(NA, year.len),
cessation.Month = rep(NA, year.len),
cessation.Day = rep(NA, year.len),
cessation.JD = rep(NA, year.len),
cessation.Value = rep(NA, year.len))
Duration.dF <- data.frame(Year = rep(NA, year.len),
onset.YYYYDOY = rep(NA, year.len),
cessation.YYYYDOY = rep(NA, year.len),
Duration = rep(NA, year.len))
for (yr in 1:(year.len)) {
if (!is.null(onsetWind.start) & !is.null(cessaWind.end)) {
ons = as.numeric(format(lubridate::as_date(onsetWind.start), "%j"))
one = as.numeric(format(lubridate::as_date(onsetWind.end), "%j"))
cne = as.numeric(format(lubridate::as_date(cessaWind.end), "%j"))
onsetWind.start.yr <- lubridate::as_date(paste0(year.vec[yr],
substr(onsetWind.start,5,10)))
if (ons < one) {
onsetWind.end.yr <- lubridate::as_date(paste0(year.vec[yr],
substr(onsetWind.end, 5, 10)))
} else {
onsetWind.end.yr <- lubridate::as_date(paste0(year.vec[yr+1],
substr(onsetWind.end,5,10)))
}
data.onset.yr =
data[which(onsetWind.start.yr ==
data$date):which((onsetWind.end.yr + 20) == data$date), ]
# ***** onset date
onset <- NA
for (day in 1:(nrow(data.onset.yr)- 20)) {
if (is.na(onset) & (length(which(is.na(data.onset.yr$R))) < 1) &
(data.onset.yr$R[day] >= Rindex.thr) &
(data.onset.yr$R[day+1] >= Rindex.thr) &
(data.onset.yr$R[day+2] >= Rindex.thr) &
(data.onset.yr$R[day+3] >= Rindex.thr) &
(data.onset.yr$R[day+4] >= Rindex.thr) &
(data.onset.yr$R[day+5] >= Rindex.thr) &
(data.onset.yr$R[day+6] >= Rindex.thr) &
(data.onset.yr$R[day+7] >= Rindex.thr) &
(data.onset.yr$R[day+8] >= Rindex.thr) &
(data.onset.yr$R[day+9] >= Rindex.thr) & (
data.onset.yr$R[day+10] >= Rindex.thr)) {
avail.vec <- data.onset.yr$AVAIL[day:(day+20)]
avail.grDay <- length(which(avail.vec > PAW.thr))
if (avail.grDay > 15) {
onset <- day
} else (next)
}
}
if (is.na(onset)){onset.index = NA} else
{onset.index = data.onset.yr$YYDOY[onset]}
if (is.na(onset)){onset.Year = NA} else
{onset.Year = as.numeric(substr(onset.index, 1, 4))}
if (is.na(onset)){onset.Month = NA} else
{onset.Month = as.numeric(data.onset.yr$Month[onset])}
if (is.na(onset)){onset.Day = NA} else
{onset.Day = as.numeric(data.onset.yr$Day[onset])}
onset.yr.dF = data.frame(Year = as.numeric(year.vec[yr]),
onset.Year = onset.Year,
onset.Month = onset.Month,
onset.Day = onset.Day,
onset.JD = as.numeric(substr(onset.index, 6, 8)),
onset.Value = onset)
Onset.dF[yr,] <- onset.yr.dF
# *****************************************************************************
if (!is.na(onset)) {
cessaWind.start.yr <- lubridate::as_date(paste0(onset.Year, "-",
onset.Month, "-",
onset.Day)) + 35
}
if (!is.na(onset) & (ons < cne)) {
cessaWind.end.yr <- lubridate::as_date(paste0(year.vec[yr],
substr(cessaWind.end,5,10)))
} else {
cessaWind.end.yr <- lubridate::as_date(paste0(year.vec[yr+1],
substr(cessaWind.end, 5, 10)))
}
if (!is.na(onset)) {
data.cessation.yr = data[which(cessaWind.start.yr ==
data$date):which((cessaWind.end.yr + 20) ==
data$date), ]
}
cessation <- NA
if (!is.na(onset)) {
for (day in 1:(nrow(data.cessation.yr)- 20)) {
if (is.na(cessation) & (length(which(is.na(data.cessation.yr$R))) < 1) &
(data.cessation.yr$R[day] < Rindex.thr) &
(data.cessation.yr$R[day+1] < Rindex.thr) &
(data.cessation.yr$R[day+2] < Rindex.thr) &
(data.cessation.yr$R[day+3] < Rindex.thr) &
(data.cessation.yr$R[day+4] < Rindex.thr) &
(data.cessation.yr$R[day+5] < Rindex.thr) &
(data.cessation.yr$R[day+6] < Rindex.thr) &
(data.cessation.yr$R[day+7] < Rindex.thr) &
(data.cessation.yr$R[day+8] < Rindex.thr) &
(data.cessation.yr$R[day+9] < Rindex.thr)&
(data.cessation.yr$R[day+10] < Rindex.thr)) {
avail.vec <- data.cessation.yr$AVAIL[(day):(day+20)]
avail.grDay <- length(which(avail.vec <= (PAW.thr+10)))
if (avail.grDay > 12) {
cessation <- day+1
} else (next)
}
}
}
if (is.na(onset)) {cessation = NA}
if (is.na(cessation)){cessation.index = NA}else
{cessation.index = data.cessation.yr$YYDOY[cessation]}
if (is.na(cessation)){cessation.Year = NA} else
{cessation.Year = as.numeric(substr(cessation.index, 1, 4))}
if (is.na(cessation)){cessation.Month = NA} else
{cessation.Month = as.numeric(data.cessation.yr$Month[cessation])}
if (is.na(cessation)){cessation.Day = NA} else
{cessation.Day = as.numeric(data.cessation.yr$Day[cessation])}
cessation.yr.dF <- data.frame(Year = as.numeric(year.vec[yr]),
cessation.Year = cessation.Year,
cessation.Month = cessation.Month,
cessation.Day = cessation.Day,
cessation.JD = as.numeric(substr(
cessation.index, 6, 8)),
cessation.Value = (cessation +
(Onset.dF$onset.Value[yr]
+ 35)))
Cessation.dF[yr,] <- cessation.yr.dF
# *****************************************************************************
duration <- NA
if (!is.na(onset) & !is.na(cessation)) {
duration <- length(seq(from =
lubridate::as_date(paste0(onset.yr.dF$onset.Year, "-",
onset.yr.dF$onset.Month, "-",
onset.yr.dF$onset.Day)),
to = lubridate::as_date(paste0(cessation.yr.dF$cessation.Year,"-",
cessation.yr.dF$cessation.Month,"-",
cessation.yr.dF$cessation.Day)),
by = "day"))
}
if (is.na(onset)) {duration = NA}
Duration.yr.dF <- data.frame(Year = as.numeric(year.vec[yr]),
onset.YYYYDOY = onset.index,
cessation.YYYYDOY = cessation.index,
Duration = duration)
Duration.dF[yr,] <- Duration.yr.dF
}
# *****************************************************************************
} # for yr
seasCal.lst <- list(Onset.dF, Cessation.dF, Duration.dF )
return(seasCal.lst)
###############################################################################
}
###############################################################################
###############################################################################
###############################################################################
| /scratch/gouwar.j/cran-all/cranData/AquaBEHER/R/calcSeasCal.R |
# calcWatBal.R Soil Water Balance
#
# Copyright (C) 2022 Center of Plant Sciences, Scuola Superiore Sant’Anna
# (http://www.capitalisegenetics.santannapisa.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
#' @title Soil Water Balance
#'
#' @description Function to estimate a budget-based daily soil water balance.
#' It calculates the amount of water present in the root zone of a homogeneous
#' grass canopy growing on a well-drained and homogeneous soil.
#'
#' @param data a dataframe containing the required variables: Columns must
#' contain the following parameters:
#'
#' Lat: latitude of the site in decimal degrees.
#' Lon: longitude of the site in decimal degrees.
#' Elev: elevation above sea level in (meters).
#' Year: year of record "YYYY".
#' Month: month of record "MM".
#' Day: day of record "DD".
#' Rain: daily rainfall in (mm).
#' Eto: daily potential evapotranspiration in (mm).
#'
#' @param soilWHC \verb{ }Water holding capacity of the soil in (mm).
#'
#' @return The function generates a data frame containing the following
#' components:
#'
#' \emph{\code{DRAIN: amount of deep drainage in (mm).}}
#'
#' \emph{\code{TRAN: amount of water lost by transpiration in (mm).}}
#'
#' \emph{\code{RUNOFF: surface runoff in (mm).}}
#'
#' \emph{\code{AVAIL: available soil moisture storage in (mm).}}
#'
#' \emph{\code{R: actual-to-potential evapotranspiration ratio.}}
#'
#'
#' @references Allen, R.G.; Pereira, L.S.; Raes, D.; Smith, M. Crop
#' Evapotranspiration: Guidelines for Computing Crop Water Requirements; FAO
#' Irrigation and Drainage Paper no. 56; FAO: Rome, Italy, 1998;
#' ISBN 92-5-104219-5.
#'
#' Doorenbos, J. and Pruitt, W.O. 1975. Guidelines for predicting crop water
#' requirements, Irrigation and Drainage Paper 24, Food and Agriculture
#' Organization of the United Nations, Rome, 179 p.
#'
#' @seealso \code{\link{calcEto}, \link{calcSeasCal}}
#'
#' @importFrom graphics legend par
#' @importFrom graphics lines
#' @importFrom raster raster extract
#' @importFrom sp coordinates CRS proj4string
#'
#' @examples
#'
#' \donttest{
#' # load example data:
#' data(AgroClimateData)
#'
#' # Estimate daily PET:
#' PET <- calcEto(AgroClimateData, method = "PM", Zh = 10)
#'
#' # Add the estimated PET 'ET.Daily' to a new column in AgroClimateData:
#' AgroClimateData$Eto <- PET$ET.Daily
#'
#' # Estimate daily water balance for the soil having 100mm of WHC:
#' watBal<- calcWatBal(AgroClimateData, soilWHC = 100)
#'
#' # Visualizing water balance parameters for 2019/20 season
#' watBal.19T20 <- watBal[watBal$Year %in% c(2019, 2020),]
#' date.vec <- as.Date.character(paste0(watBal.19T20$Year, "-",
#' watBal.19T20$Month, "-",
#' watBal.19T20$Day))
#'
#' plot(y = watBal.19T20$AVAIL, x = date.vec, ty="l", col="black", xlab="",
#' ylab=" Water (mm)",
#' main="Daily Water Balance Parameters", lwd = 1, lty = 2)
#' lines(y = watBal.19T20$Eto, x = date.vec, col="red", lwd = 3, lty = 1)
#' lines(y = watBal.19T20$Rain, x = date.vec, col="blue", lwd = 1, lty = 1)
#'
#' legend("bottomright",c("Rain","Eto","Available Moisture"),
#' horiz=FALSE, bty='n', cex=1.2,lty=c(1,1,2),lwd=c(1,3,1),
#' inset=c(0,0.7),
#' xpd=TRUE, col=c("blue","red","black"))
#' }
#'@export
###############################################################################
# ***** function to estimate Rindex: actual to potential evapotranspiration
# ratio based on Jones (1987)
calcWatBal <- function(data, soilWHC) {
# ***** initialize parameters
# CN : Runoff curve number
# DC : Drainage coefficient (mm3.mm-3)
# MUF : Water Uptake coefficient (mm^3 mm^-3)
# WATfc : Maximum Water content at field capacity (mm) >>>>> WHC
# WATfc = FC*z
# WATwp : Water content at wilting Point (mm)
# WATwp = WP*z
# data(rcn, envir = environment())
rcn <- raster::raster(system.file("extdata/rcn.tif", package = "AquaBEHER"))
pts.dF <- data.frame(Lat = as.numeric(data$Lat[1]),
Lon = as.numeric(data$Lon[1]))
pts.sp <- pts.dF
sp::coordinates(pts.sp) <- ~Lon+Lat
sp::proj4string(pts.sp) <- sp::CRS("+proj=longlat")
rcn.pts <- raster::extract(rcn, pts.sp)
if (!is.null(rcn.pts) & !is.na(rcn.pts)) {
CN <- rcn.pts
} else {
CN <- 65 # *** well managed grass
}
DC <- 0.55 #
MUF <- 0.1
WATwp <- 0.15 * soilWHC
# Maximum abstraction (for run off)
S <- 25400/CN-254
# Initial Abstraction (for run off)
IA <- 0.2*S
date.vec <- as.Date(paste0(data$Year, "-", data$Month, "-", data$Day))
data$RUNOFF <- data$DRAIN <- data$TRAN <- data$AVAIL <- data$R <- NA
data$Rain[data$Rain < 2] <- 0
for (day in seq_along(date.vec)) {
if (day == 1) {
WAT0 <- 0
# Change in water before drainage (Precipitation - Runoff)
if (data$Rain[day] > IA){
data$RUNOFF[day] <- (data$Rain[day]-0.2*S)^2/(data$Rain[day]+0.8*S)
}else{
data$RUNOFF[day] <- 0
}
data$RUNOFF[day] <- max(data$RUNOFF[day], 0)
# Calculating the amount of deep drainage
if ((WAT0+data$Rain[day]-data$RUNOFF[day]) > soilWHC){
data$DRAIN[day] <- DC*(WAT0+data$Rain[day]-data$RUNOFF[day]-soilWHC)
}else{
data$DRAIN[day] <- 0
}
data$DRAIN[day] <- max(data$DRAIN[day], 0)
# Calculating the amount of water lost by transpiration (after drainage)
data$TRAN[day] <- min(MUF*(WAT0+data$Rain[day]-data$RUNOFF[day]-
data$DRAIN[day]- WATwp), data$Eto[day])
data$TRAN[day] <- max(data$TRAN[day], 0)
data$TRAN[day] <- min(data$TRAN[day], soilWHC)
data$R[day] <- data$TRAN[day] / data$Eto[day]
data$TRAN[day] <- max(data$TRAN[day], (data$R[day] * data$Eto[day]))
data$AVAIL[day] <- WAT0 + (data$Rain[day]-data$RUNOFF[day]-
data$DRAIN[day] - data$TRAN[day])
data$AVAIL[day] <- min(data$AVAIL[day], soilWHC)
data$AVAIL[day] <- max(data$AVAIL[day], 0)
} else {
WAT0 <- data$AVAIL[day-1]
# Change in water before drainage (Precipitation - Runoff)
if (data$Rain[day] > IA){
data$RUNOFF[day] <- (data$Rain[day]-0.2*S)^2/(data$Rain[day]+0.8*S)
}else{
data$RUNOFF[day] <- 0
}
data$RUNOFF[day] <- max(data$RUNOFF[day], 0)
# Calculating the amount of deep drainage
if ((WAT0+data$Rain[day]-data$RUNOFF[day]) > soilWHC){
data$DRAIN[day] <- DC*(WAT0+data$Rain[day]-data$RUNOFF[day]-soilWHC)
}else{
data$DRAIN[day] <- 0
}
data$DRAIN[day] <- max(data$DRAIN[day], 0)
# Calculating the amount of water lost by transpiration (after drainage)
data$TRAN[day] <- min(MUF*(WAT0+data$Rain[day]-data$RUNOFF[day]-
data$DRAIN[day]- WATwp), data$Eto[day])
data$TRAN[day] <- max(data$TRAN[day], 0)
data$TRAN[day] <- min(data$TRAN[day], soilWHC)
data$R[day] <- data$TRAN[day] / data$Eto[day]
data$TRAN[day] <- max(data$TRAN[day], (data$R[day] * data$Eto[day]))
data$AVAIL[day] <- WAT0 + (data$Rain[day]-data$RUNOFF[day]-
data$DRAIN[day] - data$TRAN[day])
data$AVAIL[day] <- min(data$AVAIL[day], soilWHC)
data$AVAIL[day] <- max(data$AVAIL[day], 0)
}
}
data$R <- round(data$R, 3)
data$AVAIL <- round(data$AVAIL, 3)
data$TRAN <- round(data$TRAN, 3)
data$DRAIN <- round(data$DRAIN, 3)
data$RUNOFF <- round(data$RUNOFF, 3)
return(data)
}
##############################################################################
##############################################################################
##############################################################################
| /scratch/gouwar.j/cran-all/cranData/AquaBEHER/R/calcWatBal.R |
# climateData.R Example climate data
#
# Copyright (C) 2022 Center of Plant Sciences, Scuola Superiore Sant’Anna
# (http://www.capitalisegenetics.santannapisa.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
#' @title A dataframe containing raw climate data
#'
#' @description The R data object was obtained from Instituto Nacional de
#' Meteorologia (INAM). This example data set contains the daily raw climate
#' data over the period between 1/1/1996 and 12/31/2020 from a weather station
#' located in Angochen, Nampula province of Mozambique.
#'
#' @docType data
#'
#' @usage data(climateData)
#'
#' @format A data frame containing daily observations of climate parameters:
#'
#' \verb{ }\emph{\strong{\code{Station_Name:}} name of the weather
#' station.}
#'
#' \verb{ }\emph{\strong{\code{Lat:}} latitude of the site in decimal
#' degrees.}
#'
#' \verb{ }\emph{\strong{\code{Lon:}} longitude of the site in decimal
#' degrees.}
#'
#' \verb{ }\emph{\strong{\code{Elev:}} elevation above sea level in
#' (meters).}
#'
#' \verb{ }\emph{\strong{\code{Year:}} year of record "YYYY".}
#'
#' \verb{ }\emph{\strong{\code{Month:}} month of record "MM".}
#'
#' \verb{ }\emph{\strong{\code{Day:}} day of record "DD".}
#'
#' \verb{ }\emph{\strong{\code{Rain:}} daily rainfall in (mm).}
#'
#' \verb{ }\emph{\strong{\code{Tmax:}} daily maximum temperature at 2-m
#' height in (°C).}
#'
#' \verb{ }\emph{\strong{\code{Tmin:}} daily minimum temperature at 2-m
#' height in (°C).}
#'
#' @source INAM - Instituto Nacional de Meteorologia, Mozambique
#'
#' @keywords datasets
#'
#' @seealso \code{\link{AgroClimateData}, \link{calcEto}}
#'
#' @examples
#' # load example data:
#' data(climateData)
#'
#' # Get the structure of the data frame:
#' str(climateData)
#'
#' # Get the head of the data frame:
#' head(climateData)
#'
"climateData"
# ********** end of code **********
###############################################################################
| /scratch/gouwar.j/cran-all/cranData/AquaBEHER/R/climateData.R |
#'data which is used internally in the package
#' @name rcn
#' @importFrom raster raster extract
#' @export
"rcn"
| /scratch/gouwar.j/cran-all/cranData/AquaBEHER/R/rcn.R |
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----AquaBEHER setup----------------------------------------------------------
# install.packages("devtools")
# devtools::install_github("RobelTakele/AquaBEHER", dependencies = TRUE, type = "source",
# build_manual = TRUE, build_vignettes = TRUE)
library(AquaBEHER)
library(ggplot2)
## ----climateData--------------------------------------------------------------
data(AgroClimateData)
str(AgroClimateData)
head(AgroClimateData)
## -----------------------------------------------------------------------------
PET <- calcEto(AgroClimateData, method = "PM", crop = "short")
str(PET)
## -----------------------------------------------------------------------------
# Compute Eto using hargreves-samani formulation using the example data from 'AgroClimateData':
data(AgroClimateData)
Eto.HS <- calcEto(AgroClimateData, method = "HS")
# Now compute Eto using Penman-Monteith formulation for hypothetical grass (short crop):
Eto.PM <- calcEto(AgroClimateData, method = "PM", Zh = 10)
plot(Eto.PM$ET.Daily[1:1000], ty="l", xlab="Days since 1996", ylab="Eto (mm/day)", col="black", lwd = 1, lty = 2)
lines(Eto.HS$ET.Daily[1:1000], col="blue", lwd = 2, lty = 1)
legend("bottom",c("Eto: Penman–Monteith ","Eto: Hargreaves-Samani"),
horiz=TRUE, bty='n', cex=1,lty=c(2,1),lwd=c(2,2), inset=c(1,1),
xpd=TRUE, col=c("black","blue"))
## -----------------------------------------------------------------------------
PET <- calcEto(AgroClimateData, method = "PM", Zh = 10)
# Add the estimated PET 'ET.Daily' to a new column in AgroClimateData:
AgroClimateData$Eto <- PET$ET.Daily
# Estimate daily water balance for the soil having 100mm of WHC:
soilWHC <- 100
watBal<- calcWatBal(data = AgroClimateData, soilWHC)
str(watBal )
# Plotting the water balance output for the climatological year from 2019 to 2020 using ggplot2:
watBal.19T20 <- watBal[watBal$Year %in% c(2019, 2020),]
date.vec <- as.Date.character(paste0(watBal.19T20$Year, "-", watBal.19T20$Month, "-", watBal.19T20$Day))
ggplot(data = watBal.19T20) +
geom_line(aes(y = AVAIL, x = date.vec, fill = "AVAIL"), size = 0.8, color = "red") +
geom_col(aes(y = Rain, x = date.vec, fill = "Rain"), size = 1) +
scale_x_date(date_breaks = "1 month", date_labels = "%b-%Y") +
scale_fill_manual(name = " ", values = c('AVAIL' = "red", 'Rain' = "blue")) +
scale_y_continuous(expand = c(0, 2)) +
labs(y="Moisture (mm)", x=NULL) +
theme_linedraw() +
theme(axis.title = element_text(size = 14, colour = "black", family = "Times New Roman"),
axis.text = element_text(size = 10, colour = "black", family = "Times New Roman"),
axis.text.x = element_text(size = 10, colour = "black", family = "Times New Roman", angle = 45, vjust = 0.5))
## -----------------------------------------------------------------------------
# seasonal calndar is estimated for the onset window ranges from 01-September to 31-January having a soil with 100mm of WHC
soilWHC <- 100
onsetWind.start <- "1996-09-01" # earliest possible start date of the onset window
onsetWind.end <- "1997-01-31" # the latest possible date for end of the onset window
cessaWind.end <- "1997-06-30" # the latest possible date for end of the cessation window
seasCal.lst <- calcSeasCal(watBal, onsetWind.start, onsetWind.end, cessaWind.end, soilWHC = 100)
str(seasCal.lst)
# plotting year to year variation of onset cessation and seasonal duration
seasCal.dF <- data.frame(Year = seasCal.lst[[1]][,c("Year")],
Onset = seasCal.lst[[1]][,c("onset.JD")],
Cessation = seasCal.lst[[2]][,c("cessation.JD")],
Duration = seasCal.lst[[3]][,c("Duration")])
ggplot(data = seasCal.dF) +
geom_line(aes(y = Onset, x = Year, color = "Onset"), size = 1) +
geom_line(aes(y = Cessation, x = Year, color = "Cessation"), size = 1) +
geom_area(aes(y = Duration, x = Year, color = "Duration"), size = 0.8, alpha = 0.4)+
scale_color_manual(name = "Calendar", values = c('Onset' = "blue", 'Cessation' = "red", 'Duration' = "grey")) +
labs(y="Day of a year (DOY)", x=NULL) +
theme_bw()
| /scratch/gouwar.j/cran-all/cranData/AquaBEHER/inst/doc/AquaBEHER.R |
---
title: Estimation of rainy season calandar and soil water balance for agricultural
crops using AquaBEHER
author: "Robel Takele and Matteo Dell'Acqua"
date: "`r Sys.Date()`"
output:
prettydoc::html_pretty:
theme: architect
highlight: github
math: mathjax
fig_caption: yes
toc: yes
toc_depth: 1
anchor_sections: yes
number_sections: no
# css: tools/vignette.css
df_print: default
keep_tex: yes
pdf_document:
toc: yes
toc_depth: 1
vignette: >
%\VignetteIndexEntry{AquaBEHER}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteDepends{prettydoc}
%\VignetteDepends{ggplot2}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
# 1. Introduction
This vignette is a short tutorial to the usage and functionalists of AquaBEHER R package. It is directed to first-time package users who are familiar with the basic concepts of R. The vignette presents the use of several key functions of the package, some useful hints and guidelines. AquaBEHER computes and integrates daily reference evapotranspiration (Eto) and a soil water balance model to estimate parameters of crop and soil water balances for agricultural crops. Using the computed daily soil water balance parameters, the package can estimates the rainy season calendar (Onset, Cessation and Duration) based on agroclimatic approach for a predefined window.
# 2. Installation and Loading
Installing the latest development version from the online repository using the devtools package. Note that to utilize the full functionality of devtools on Windows, [Rtools](https://cran.r-project.org/bin/windows/Rtools/) must be installed.
```{r AquaBEHER setup}
# install.packages("devtools")
# devtools::install_github("RobelTakele/AquaBEHER", dependencies = TRUE, type = "source",
# build_manual = TRUE, build_vignettes = TRUE)
library(AquaBEHER)
library(ggplot2)
```
# 3. Required Climate Data
The methods for calculating evapotranspiration from meteorological data require various physical parameters. Some of the data are measured directly in weather stations. Other parameters are related to commonly measured data and can be derived with the help of a direct or empirical relationship.
The meteorological factors determining evapotranspiration are weather parameters which provide energy for vaporization and remove water vapor from the evaporating surface. The principal weather parameters to consider are presented below.
* Maximum temperature
* Minimum temperature
* Solar radiation
* Dew point temperature or relative humidity
* Wind speed
In addition, georeferenced information on the location of the climate record is required:
* Latitude
* Longitude
* Elevation
```{r climateData}
data(AgroClimateData)
str(AgroClimateData)
head(AgroClimateData)
```
# 4. Potential Evapotranspiration
For many agricultural applications, it is relevant to get an estimate of the potential evapotranspiration (PET). Different methods are developed for estimating Eto. Most of them use empirical equations to determine the value of PET from weather variables. The AquaBEHER package provides options for estimating reference evapotranspiration (Eto) using the FAO Penman-Monteith, Priestley Taylor and Hargreaves-Samani formulations.
<img align="right" width="300" src=tools/Evapotranspiration.gif>
### Usage
calcEto(AgroClimateData, method = "PM", crop = "short")
* The function give a list of
+ daily estimations of Eto in (mm/day)
+ daily estimations of extraterrestrial radiation (MJ/m2/day)
+ daily estimations of slope of vapor pressure curve (kPa/°C)
### Example
The calcEto compute with inputs of data frame containing daily values of meteorological parameters:
```{r}
PET <- calcEto(AgroClimateData, method = "PM", crop = "short")
str(PET)
```
Graphical comparison of the evapotranspiration (mm/day) calculated using the FAO Penman–Monteith formulation and the Hargreaves-Samani formulation:
```{r}
# Compute Eto using hargreves-samani formulation using the example data from 'AgroClimateData':
data(AgroClimateData)
Eto.HS <- calcEto(AgroClimateData, method = "HS")
# Now compute Eto using Penman-Monteith formulation for hypothetical grass (short crop):
Eto.PM <- calcEto(AgroClimateData, method = "PM", Zh = 10)
plot(Eto.PM$ET.Daily[1:1000], ty="l", xlab="Days since 1996", ylab="Eto (mm/day)", col="black", lwd = 1, lty = 2)
lines(Eto.HS$ET.Daily[1:1000], col="blue", lwd = 2, lty = 1)
legend("bottom",c("Eto: Penman–Monteith ","Eto: Hargreaves-Samani"),
horiz=TRUE, bty='n', cex=1,lty=c(2,1),lwd=c(2,2), inset=c(1,1),
xpd=TRUE, col=c("black","blue"))
```
**Notice:** It appears that the FAO Penman–Monteith formulation presents enhanced day-to-day variations of evapotranspiration than the the Hargreaves-Samani formulation.
# 5. Soil Water Balance
This function perform daily computations of soil water balance parameters for the root zone. Soil water changes daily in response to rainfall, evapotranspiration, runoff and deep drainage.
**Assumptions**
* Atmospheric conditions affect the rate at which crops use water.
* The soil has uniform cross-section of homogeneous volume with a measured depth and a unit area.
* A well-established, dense grass crop is growing, which completely covers the soil surface.
### Usage
calcWatBal(data, soilWHC)
### Example
The calcWatBal compute with inputs of data frame containing daily values of Rain, Eto and soil water holding capacity.
```{r}
PET <- calcEto(AgroClimateData, method = "PM", Zh = 10)
# Add the estimated PET 'ET.Daily' to a new column in AgroClimateData:
AgroClimateData$Eto <- PET$ET.Daily
# Estimate daily water balance for the soil having 100mm of WHC:
soilWHC <- 100
watBal<- calcWatBal(data = AgroClimateData, soilWHC)
str(watBal )
# Plotting the water balance output for the climatological year from 2019 to 2020 using ggplot2:
watBal.19T20 <- watBal[watBal$Year %in% c(2019, 2020),]
date.vec <- as.Date.character(paste0(watBal.19T20$Year, "-", watBal.19T20$Month, "-", watBal.19T20$Day))
ggplot(data = watBal.19T20) +
geom_line(aes(y = AVAIL, x = date.vec, fill = "AVAIL"), size = 0.8, color = "red") +
geom_col(aes(y = Rain, x = date.vec, fill = "Rain"), size = 1) +
scale_x_date(date_breaks = "1 month", date_labels = "%b-%Y") +
scale_fill_manual(name = " ", values = c('AVAIL' = "red", 'Rain' = "blue")) +
scale_y_continuous(expand = c(0, 2)) +
labs(y="Moisture (mm)", x=NULL) +
theme_linedraw() +
theme(axis.title = element_text(size = 14, colour = "black", family = "Times New Roman"),
axis.text = element_text(size = 10, colour = "black", family = "Times New Roman"),
axis.text.x = element_text(size = 10, colour = "black", family = "Times New Roman", angle = 45, vjust = 0.5))
```
# 6. Rainy Season Calandar
The onset and cessation dates of the rainy season were determined for each climatological year `Figure 1`. The term climatological year represents the period between two driest periods, which is traditionally defined based on a calender year starting from the driest month and has a fixed length of 12 months.

Various methods have been developed to estimate the rainy season calendar, i.e. the onset, cessation and duration of the rainy season. Common method used for crop production applications is the agroclimatic approach. As per agroclimatic approach, a normal rainy season (growing season) is defined as one when there is an excess of precipitation over potential evapotranspiration (PET). Such a period meets the evapotransiration demands of crops and recharge the moisture of the soil profile FAO 1977; 1978; 1986). Thus, the rainy season calendar defined accordingly:
**Onset**
The onset of the rainy season will start on the first day after `onsetWind.start`, when the actual-to-potential evapotranspiration ratio is greater than 0.5 for 7 consecutive days, followed by a 20-day period in which plant available water remains above wilting over the root zone of the soil layer.
**Cesation**
The rainy season will end, cessation, on the first day after `onsetWind.end`, when the actual-to-potential evapotranspiration ratio is less than 0.5 for 7 consecutive days, followed by 12 consecutive non-growing days in which plant available water remains below wilting over the root zone of the soil layer.
**Duration**
The duration of the rainy season is the total number of days from onset to cessation of the season.

### Example
Using the example climate data provided by the AquaBEHER package, compute the rainy season calandar:
```{r}
# seasonal calndar is estimated for the onset window ranges from 01-September to 31-January having a soil with 100mm of WHC
soilWHC <- 100
onsetWind.start <- "1996-09-01" # earliest possible start date of the onset window
onsetWind.end <- "1997-01-31" # the latest possible date for end of the onset window
cessaWind.end <- "1997-06-30" # the latest possible date for end of the cessation window
seasCal.lst <- calcSeasCal(watBal, onsetWind.start, onsetWind.end, cessaWind.end, soilWHC = 100)
str(seasCal.lst)
# plotting year to year variation of onset cessation and seasonal duration
seasCal.dF <- data.frame(Year = seasCal.lst[[1]][,c("Year")],
Onset = seasCal.lst[[1]][,c("onset.JD")],
Cessation = seasCal.lst[[2]][,c("cessation.JD")],
Duration = seasCal.lst[[3]][,c("Duration")])
ggplot(data = seasCal.dF) +
geom_line(aes(y = Onset, x = Year, color = "Onset"), size = 1) +
geom_line(aes(y = Cessation, x = Year, color = "Cessation"), size = 1) +
geom_area(aes(y = Duration, x = Year, color = "Duration"), size = 0.8, alpha = 0.4)+
scale_color_manual(name = "Calendar", values = c('Onset' = "blue", 'Cessation' = "red", 'Duration' = "grey")) +
labs(y="Day of a year (DOY)", x=NULL) +
theme_bw()
```
# 7. References
Allen, R.G.; Pereira, L.S.; Raes, D.; Smith, M. Crop Evapotranspiration: Guidelines for Computing Crop Water Requirements; FAO Irrigation and Drainage Paper no. 56; FAO: Rome, Italy, 1998; ISBN 92-5-104219-5.
Doorenbos, J. and Pruitt, W.O. 1975. Guidelines for predicting crop water requirements, Irrigation and Drainage Paper 24, Food and Agriculture Organization of the United Nations, Rome, 179 p.
Hargreaves, G.H. and Samani, Z.A. (1985) Reference Crop Evapotranspiration from Temperature. Applied Engineering in Agriculture, 1, 96-99.
$$\\[0.2in]$$
<img align="right" width="300" src="http://www.capitalisegenetics.santannapisa.it/sites/default/files/u65/Logo%20plant%20sciences.png">
The Genetics Group at the **Center of Plant Sciences** is a geographically and culturally diverse research team working on data-drivem agicultural innovation combining crop genetics, climate, and participatory approaches. We are based at **Scuola Superiore Sant’Anna**, Pisa, Italy.
You can contact us sending an email to Matteo Dell'Acqua (mailto:[email protected]) or Mario Enrico Pè (mailto:[email protected]).
You can find out more about us visiting the group web page (http://www.capitalisegenetics.santannapisa.it/) and following us on Twitter [@GenLab_SSA](https://twitter.com/genlab_ssa?lang=en)
We are committed to the
[free software](https://www.fsf.org/about/what-is-free-software) and
[FAIR](https://www.go-fair.org/fair-principles/) principles.
This set of repositories collects our latest developments and provide reusable code.
| /scratch/gouwar.j/cran-all/cranData/AquaBEHER/inst/doc/AquaBEHER.Rmd |
---
title: Estimation of rainy season calandar and soil water balance for agricultural
crops using AquaBEHER
author: "Robel Takele and Matteo Dell'Acqua"
date: "`r Sys.Date()`"
output:
prettydoc::html_pretty:
theme: architect
highlight: github
math: mathjax
fig_caption: yes
toc: yes
toc_depth: 1
anchor_sections: yes
number_sections: no
# css: tools/vignette.css
df_print: default
keep_tex: yes
pdf_document:
toc: yes
toc_depth: 1
vignette: >
%\VignetteIndexEntry{AquaBEHER}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteDepends{prettydoc}
%\VignetteDepends{ggplot2}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
# 1. Introduction
This vignette is a short tutorial to the usage and functionalists of AquaBEHER R package. It is directed to first-time package users who are familiar with the basic concepts of R. The vignette presents the use of several key functions of the package, some useful hints and guidelines. AquaBEHER computes and integrates daily reference evapotranspiration (Eto) and a soil water balance model to estimate parameters of crop and soil water balances for agricultural crops. Using the computed daily soil water balance parameters, the package can estimates the rainy season calendar (Onset, Cessation and Duration) based on agroclimatic approach for a predefined window.
# 2. Installation and Loading
Installing the latest development version from the online repository using the devtools package. Note that to utilize the full functionality of devtools on Windows, [Rtools](https://cran.r-project.org/bin/windows/Rtools/) must be installed.
```{r AquaBEHER setup}
# install.packages("devtools")
# devtools::install_github("RobelTakele/AquaBEHER", dependencies = TRUE, type = "source",
# build_manual = TRUE, build_vignettes = TRUE)
library(AquaBEHER)
library(ggplot2)
```
# 3. Required Climate Data
The methods for calculating evapotranspiration from meteorological data require various physical parameters. Some of the data are measured directly in weather stations. Other parameters are related to commonly measured data and can be derived with the help of a direct or empirical relationship.
The meteorological factors determining evapotranspiration are weather parameters which provide energy for vaporization and remove water vapor from the evaporating surface. The principal weather parameters to consider are presented below.
* Maximum temperature
* Minimum temperature
* Solar radiation
* Dew point temperature or relative humidity
* Wind speed
In addition, georeferenced information on the location of the climate record is required:
* Latitude
* Longitude
* Elevation
```{r climateData}
data(AgroClimateData)
str(AgroClimateData)
head(AgroClimateData)
```
# 4. Potential Evapotranspiration
For many agricultural applications, it is relevant to get an estimate of the potential evapotranspiration (PET). Different methods are developed for estimating Eto. Most of them use empirical equations to determine the value of PET from weather variables. The AquaBEHER package provides options for estimating reference evapotranspiration (Eto) using the FAO Penman-Monteith, Priestley Taylor and Hargreaves-Samani formulations.
<img align="right" width="300" src=tools/Evapotranspiration.gif>
### Usage
calcEto(AgroClimateData, method = "PM", crop = "short")
* The function give a list of
+ daily estimations of Eto in (mm/day)
+ daily estimations of extraterrestrial radiation (MJ/m2/day)
+ daily estimations of slope of vapor pressure curve (kPa/°C)
### Example
The calcEto compute with inputs of data frame containing daily values of meteorological parameters:
```{r}
PET <- calcEto(AgroClimateData, method = "PM", crop = "short")
str(PET)
```
Graphical comparison of the evapotranspiration (mm/day) calculated using the FAO Penman–Monteith formulation and the Hargreaves-Samani formulation:
```{r}
# Compute Eto using hargreves-samani formulation using the example data from 'AgroClimateData':
data(AgroClimateData)
Eto.HS <- calcEto(AgroClimateData, method = "HS")
# Now compute Eto using Penman-Monteith formulation for hypothetical grass (short crop):
Eto.PM <- calcEto(AgroClimateData, method = "PM", Zh = 10)
plot(Eto.PM$ET.Daily[1:1000], ty="l", xlab="Days since 1996", ylab="Eto (mm/day)", col="black", lwd = 1, lty = 2)
lines(Eto.HS$ET.Daily[1:1000], col="blue", lwd = 2, lty = 1)
legend("bottom",c("Eto: Penman–Monteith ","Eto: Hargreaves-Samani"),
horiz=TRUE, bty='n', cex=1,lty=c(2,1),lwd=c(2,2), inset=c(1,1),
xpd=TRUE, col=c("black","blue"))
```
**Notice:** It appears that the FAO Penman–Monteith formulation presents enhanced day-to-day variations of evapotranspiration than the the Hargreaves-Samani formulation.
# 5. Soil Water Balance
This function perform daily computations of soil water balance parameters for the root zone. Soil water changes daily in response to rainfall, evapotranspiration, runoff and deep drainage.
**Assumptions**
* Atmospheric conditions affect the rate at which crops use water.
* The soil has uniform cross-section of homogeneous volume with a measured depth and a unit area.
* A well-established, dense grass crop is growing, which completely covers the soil surface.
### Usage
calcWatBal(data, soilWHC)
### Example
The calcWatBal compute with inputs of data frame containing daily values of Rain, Eto and soil water holding capacity.
```{r}
PET <- calcEto(AgroClimateData, method = "PM", Zh = 10)
# Add the estimated PET 'ET.Daily' to a new column in AgroClimateData:
AgroClimateData$Eto <- PET$ET.Daily
# Estimate daily water balance for the soil having 100mm of WHC:
soilWHC <- 100
watBal<- calcWatBal(data = AgroClimateData, soilWHC)
str(watBal )
# Plotting the water balance output for the climatological year from 2019 to 2020 using ggplot2:
watBal.19T20 <- watBal[watBal$Year %in% c(2019, 2020),]
date.vec <- as.Date.character(paste0(watBal.19T20$Year, "-", watBal.19T20$Month, "-", watBal.19T20$Day))
ggplot(data = watBal.19T20) +
geom_line(aes(y = AVAIL, x = date.vec, fill = "AVAIL"), size = 0.8, color = "red") +
geom_col(aes(y = Rain, x = date.vec, fill = "Rain"), size = 1) +
scale_x_date(date_breaks = "1 month", date_labels = "%b-%Y") +
scale_fill_manual(name = " ", values = c('AVAIL' = "red", 'Rain' = "blue")) +
scale_y_continuous(expand = c(0, 2)) +
labs(y="Moisture (mm)", x=NULL) +
theme_linedraw() +
theme(axis.title = element_text(size = 14, colour = "black", family = "Times New Roman"),
axis.text = element_text(size = 10, colour = "black", family = "Times New Roman"),
axis.text.x = element_text(size = 10, colour = "black", family = "Times New Roman", angle = 45, vjust = 0.5))
```
# 6. Rainy Season Calandar
The onset and cessation dates of the rainy season were determined for each climatological year `Figure 1`. The term climatological year represents the period between two driest periods, which is traditionally defined based on a calender year starting from the driest month and has a fixed length of 12 months.

Various methods have been developed to estimate the rainy season calendar, i.e. the onset, cessation and duration of the rainy season. Common method used for crop production applications is the agroclimatic approach. As per agroclimatic approach, a normal rainy season (growing season) is defined as one when there is an excess of precipitation over potential evapotranspiration (PET). Such a period meets the evapotransiration demands of crops and recharge the moisture of the soil profile FAO 1977; 1978; 1986). Thus, the rainy season calendar defined accordingly:
**Onset**
The onset of the rainy season will start on the first day after `onsetWind.start`, when the actual-to-potential evapotranspiration ratio is greater than 0.5 for 7 consecutive days, followed by a 20-day period in which plant available water remains above wilting over the root zone of the soil layer.
**Cesation**
The rainy season will end, cessation, on the first day after `onsetWind.end`, when the actual-to-potential evapotranspiration ratio is less than 0.5 for 7 consecutive days, followed by 12 consecutive non-growing days in which plant available water remains below wilting over the root zone of the soil layer.
**Duration**
The duration of the rainy season is the total number of days from onset to cessation of the season.

### Example
Using the example climate data provided by the AquaBEHER package, compute the rainy season calandar:
```{r}
# seasonal calndar is estimated for the onset window ranges from 01-September to 31-January having a soil with 100mm of WHC
soilWHC <- 100
onsetWind.start <- "1996-09-01" # earliest possible start date of the onset window
onsetWind.end <- "1997-01-31" # the latest possible date for end of the onset window
cessaWind.end <- "1997-06-30" # the latest possible date for end of the cessation window
seasCal.lst <- calcSeasCal(watBal, onsetWind.start, onsetWind.end, cessaWind.end, soilWHC = 100)
str(seasCal.lst)
# plotting year to year variation of onset cessation and seasonal duration
seasCal.dF <- data.frame(Year = seasCal.lst[[1]][,c("Year")],
Onset = seasCal.lst[[1]][,c("onset.JD")],
Cessation = seasCal.lst[[2]][,c("cessation.JD")],
Duration = seasCal.lst[[3]][,c("Duration")])
ggplot(data = seasCal.dF) +
geom_line(aes(y = Onset, x = Year, color = "Onset"), size = 1) +
geom_line(aes(y = Cessation, x = Year, color = "Cessation"), size = 1) +
geom_area(aes(y = Duration, x = Year, color = "Duration"), size = 0.8, alpha = 0.4)+
scale_color_manual(name = "Calendar", values = c('Onset' = "blue", 'Cessation' = "red", 'Duration' = "grey")) +
labs(y="Day of a year (DOY)", x=NULL) +
theme_bw()
```
# 7. References
Allen, R.G.; Pereira, L.S.; Raes, D.; Smith, M. Crop Evapotranspiration: Guidelines for Computing Crop Water Requirements; FAO Irrigation and Drainage Paper no. 56; FAO: Rome, Italy, 1998; ISBN 92-5-104219-5.
Doorenbos, J. and Pruitt, W.O. 1975. Guidelines for predicting crop water requirements, Irrigation and Drainage Paper 24, Food and Agriculture Organization of the United Nations, Rome, 179 p.
Hargreaves, G.H. and Samani, Z.A. (1985) Reference Crop Evapotranspiration from Temperature. Applied Engineering in Agriculture, 1, 96-99.
$$\\[0.2in]$$
<img align="right" width="300" src="http://www.capitalisegenetics.santannapisa.it/sites/default/files/u65/Logo%20plant%20sciences.png">
The Genetics Group at the **Center of Plant Sciences** is a geographically and culturally diverse research team working on data-drivem agicultural innovation combining crop genetics, climate, and participatory approaches. We are based at **Scuola Superiore Sant’Anna**, Pisa, Italy.
You can contact us sending an email to Matteo Dell'Acqua (mailto:[email protected]) or Mario Enrico Pè (mailto:[email protected]).
You can find out more about us visiting the group web page (http://www.capitalisegenetics.santannapisa.it/) and following us on Twitter [@GenLab_SSA](https://twitter.com/genlab_ssa?lang=en)
We are committed to the
[free software](https://www.fsf.org/about/what-is-free-software) and
[FAIR](https://www.go-fair.org/fair-principles/) principles.
This set of repositories collects our latest developments and provide reusable code.
| /scratch/gouwar.j/cran-all/cranData/AquaBEHER/vignettes/AquaBEHER.Rmd |
# derivatives of species concentrations of a univalent acid with respect to [H+]
dHAdH_uni <- function(H, SumA, K)
{
dHAdH_uni <- (1/(H+K) - H/(H^2 + 2*H*K + K^2))*SumA
}
dAdH_uni <- function(H, SumA, K)
{
dAdH_uni <- - K/(H^2 + 2*H*K + K^2)*SumA
}
# derivatives of species concentrations of a bivalent acid with respect to [H+]
dH2AdH_bi <- function(H, SumA, K1, K2)
{
dH2AdH_bi <- ((2*H/(H*K1 + K1*K2 + H^2)) - ((H^2*(2*H+K1))/(H*K1 + K1*K2 + H^2)^2)) * SumA
}
dHAdH_bi <- function(H, SumA, K1, K2)
{
dHAdH_bi <- ((K1/(H*K1 + K1*K2 + H^2)) - ((H*K1*(2*H+K1))/(H*K1 + K1*K2 + H^2)^2)) * SumA
}
dAdH_bi <- function(H, SumA, K1, K2)
{
dAdH_bi <- (- ((K1*K2*(2*H+K1))/(H*K1 + K1*K2 + H^2)^2)) * SumA
}
# derivatives of species concentrations of a trivalent acid with respect to [H+]
dH3AdH_tri <- function(H, SumA, K1, K2, K3)
{
dH3AdH_tri <- ((3*H^2/(H*K1*K2 + K1*K2*K3 + H^2*K1 + H^3)) - ((H^3*(2*H*K1 + K1*K2 + 3*H^2))/(H*K1*K2 + K1*K2*K3 + +H^2*K1 + H^3)^2)) * SumA
}
dH2AdH_tri <- function(H, SumA, K1, K2, K3)
{
dH2AdH_tri <- ((2*H*K1/(H*K1*K2 + K1*K2*K3 + H^2*K1 + H^3)) - ((H^2*K1*(2*H*K1 + K1*K2 + 3*H^2))/(H*K1*K2 + K1*K2*K3 + +H^2*K1 + H^3)^2)) * SumA
}
dHAdH_tri <- function(H, SumA, K1, K2, K3)
{
dHAdH_tri <- ((K1*K2/(H*K1*K2 + K1*K2*K3 + H^2*K1 + H^3)) - ((H*K1*K2*(2*H*K1 + K1*K2 + 3*H^2))/(H*K1*K2 + K1*K2*K3 + +H^2*K1 + H^3)^2)) * SumA
}
dAdH_tri <- function(H, SumA, K1, K2, K3)
{
dAdH_tri <- (- ((K1*K2*K3*(2*H*K1 + K1*K2 + 3*H^2))/(H*K1*K2 + K1*K2*K3 + +H^2*K1 + H^3)^2)) * SumA
}
# PRIVATE function:
# calculates the derivative of [TA] with respect to [H+]: the buffer factor
dTAdH <- function(ae) # object of class aquaenv
{
with (ae,
{
H <- 10^(-pH)
res <- ( dHAdH_bi (H, SumCO2, K_CO2, K_HCO3)
+ 2 * dAdH_bi (H, SumCO2, K_CO2, K_HCO3)
+ (- K_W/H^2)
+ dAdH_uni (H, SumBOH3, K_BOH3)
+ dHAdH_tri (H, SumH3PO4, K_H3PO4, K_H2PO4, K_HPO4)
+ 2 * dAdH_tri (H, SumH3PO4, K_H3PO4, K_H2PO4, K_HPO4)
+ dH2AdH_bi (H, SumSiOH4, K_SiOH4, K_SiOOH3)
+ 2 * dHAdH_bi (H, SumSiOH4, K_SiOH4, K_SiOOH3)
+ dAdH_uni (H, SumNH4, K_NH4)
+ dHAdH_bi (H, SumH2S, K_H2S, K_HS)
+ 2 * dAdH_bi (H, SumH2S, K_H2S, K_HS)
- 1
- dH3AdH_tri(H, SumH3PO4, K_H3PO4, K_H2PO4, K_HPO4)
- dHAdH_bi (H, SumH2SO4, K_H2SO4, K_HSO4)
- dHAdH_uni (H, SumHF, K_HF)
- dHAdH_uni (H, SumHNO3, K_HNO3)
- dHAdH_uni (H, SumHNO2, K_HNO2)
- 2 * dH2AdH_bi (H, SumH2SO4, K_H2SO4, K_HSO4)
)
return (res) # derivative of [TA] with respect to [H+]: the buffer factor
})
}
# PRIVATE function:
# calculates the derivative of [TA] with respect to changes in the dissociation constants (Ks) times the derivative of the dissociation constants with respect to salinity S
dTAdKdKdS <- function(ae) # object of class aquaenv
{
with (ae,
{
epsilon <- S * Technicals$epsilon_fraction
TAplus <- c()
TAminus <- c()
if (((length(SumCO2) > 1) || (length(pH) > 1)) && (length(S)==1) && (length(p)==1) && (length(t)==1))
{
for (i in 1:max(length(SumCO2), length(pH)))
{
if (length(SumCO2) > 1) {co2 <- i} else {co2 <- 1}
if (length(pH) > 1) {ph <- i} else {ph <- 1}
TAplus <- c(TAplus, aquaenv(S=(S+epsilon), t=t, p=p,
SumCO2=SumCO2[[co2]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3, SumH2SO4=SumH2SO4, SumHF=SumHF,
TA=NULL, pH=pH[[ph]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE, revelle=FALSE)$TA)
TAminus <- c(TAminus, aquaenv(S=(S-epsilon), t=t, p=p,
SumCO2=SumCO2[[co2]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3, SumH2SO4=SumH2SO4, SumHF=SumHF,
TA=NULL, pH=pH[[ph]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE, revelle=FALSE)$TA)
}
}
else
{
for (s in 1:length(S))
{
for (pe in 1:length(p))
{
if (length(pH) > 1) {i <- max(s,pe)} else {i <- 1}
if (length(SumCO2) > 1) {x <- max(s,pe)} else {x <- 1}
TAplus <- c(TAplus, aquaenv(S=(S[[s]]+epsilon[[s]]), t=t, p=p[[pe]],
SumCO2=SumCO2[[x]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3[[s]], SumH2SO4=SumH2SO4[[s]], SumHF=SumHF[[s]],
TA=NULL, pH=pH[[i]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE, revelle=FALSE)$TA)
TAminus <- c(TAminus, aquaenv(S=(S[[s]]-epsilon[[s]]), t=t, p=p[[pe]],
SumCO2=SumCO2[[x]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3[[s]], SumH2SO4=SumH2SO4[[s]], SumHF=SumHF[[s]],
TA=NULL, pH=pH[[i]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE, revelle=FALSE)$TA)
}
}
}
return ((TAplus - TAminus)/(2*epsilon)) # derivative of [TA] with respect to changes in the dissociation constants (Ks) times the derivative of the dissociation constants with respect to salinity S
})
}
# PRIVATE function:
# calculates the derivative of [TA] with respect to changes in the dissociation constants (Ks) times the derivative of the dissociation constants with respect to temperature T
dTAdKdKdT <- function(ae) # object of class aquaenv
{
with (ae,
{
epsilon <- t * Technicals$epsilon_fraction
TAplus <- c()
TAminus <- c()
if (((length(SumCO2) > 1) || (length(pH) > 1)) && (length(S)==1) && (length(p)==1) && (length(t)==1))
{
for (i in 1:max(length(SumCO2), length(pH)))
{
if (length(SumCO2) > 1) {co2 <- i} else {co2 <- 1}
if (length(pH) > 1) {ph <- i} else {ph <- 1}
TAplus <- c(TAplus, aquaenv(S=S, t=(t+epsilon), p=p,
SumCO2=SumCO2[[co2]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3, SumH2SO4=SumH2SO4, SumHF=SumHF,
TA=NULL, pH=pH[[ph]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE, revelle=FALSE)$TA)
TAminus <- c(TAminus, aquaenv(S=S, t=(t-epsilon), p=p,
SumCO2=SumCO2[[co2]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3, SumH2SO4=SumH2SO4, SumHF=SumHF,
TA=NULL, pH=pH[[ph]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE, revelle=FALSE)$TA)
}
}
else
{
for (s in 1:length(S))
{
for (pe in 1:length(p))
{
if (length(pH) > 1) {i <- max(s,pe)} else {i <- 1}
if (length(SumCO2) > 1) {x <- max(s,pe)} else {x <- 1}
TAplus <- c(TAplus, aquaenv(S=S[[s]], t=(t+epsilon), p=p[[pe]],
SumCO2=SumCO2[[x]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3[[s]], SumH2SO4=SumH2SO4[[s]], SumHF=SumHF[[s]],
TA=NULL, pH=pH[[i]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE, revelle=FALSE)$TA)
TAminus <- c(TAminus, aquaenv(S=S[[s]], t=(t-epsilon), p=p[[pe]],
SumCO2=SumCO2[[x]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3[[s]], SumH2SO4=SumH2SO4[[s]], SumHF=SumHF[[s]],
TA=NULL, pH=pH[[i]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE, revelle=FALSE)$TA)
}
}
}
return ((TAplus - TAminus)/(2*epsilon)) # derivative of [TA] with respect to changes in the dissociation constants (Ks) times the derivative of the dissociation constants with respect to temperature T
})
}
# PRIVATE function:
# calculates the derivative of [TA] with respect to changes in the dissociation constants (Ks) times the derivative of the dissociation constants with respect to gauge pressure p
dTAdKdKdp <- function(ae) # object of class aquaenv
{
with (ae,
{
if (min(p) == 0)
{
epsilon <- rep(Technicals$epsilon_fraction, length(p))
}
else
{
epsilon <- p * Technicals$epsilon_fraction
}
TAplus <- c()
TAminus <- c()
if (((length(SumCO2) > 1) || (length(pH) > 1)) && (length(S)==1) && (length(p)==1) && (length(t)==1))
{
for (i in 1:max(length(SumCO2), length(pH)))
{
if (length(SumCO2) > 1) {co2 <- i} else {co2 <- 1}
if (length(pH) > 1) {ph <- i} else {ph <- 1}
TAplus <- c(TAplus, aquaenv(S=S, t=t, p=(p+epsilon),
SumCO2=SumCO2[[co2]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3, SumH2SO4=SumH2SO4, SumHF=SumHF,
TA=NULL, pH=pH[[ph]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE, revelle=FALSE)$TA)
if(p == 0) { ptemp <- 0 } else { ptemp <- (p-epsilon) }
TAminus <- c(TAminus, aquaenv(S=S, t=t, p=ptemp,
SumCO2=SumCO2[[co2]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3, SumH2SO4=SumH2SO4, SumHF=SumHF,
TA=NULL, pH=pH[[ph]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE, revelle=FALSE)$TA)
}
}
else
{
for (s in 1:length(S))
{
for (pe in 1:length(p))
{
if (length(pH) > 1) {i <- max(s,pe)} else {i <- 1}
if (length(SumCO2) > 1) {x <- max(s,pe)} else {x <- 1}
TAplus <- c(TAplus, aquaenv(S=S[[s]], t=t, p=(p[[pe]]+epsilon[[pe]]),
SumCO2=SumCO2[[x]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3[[s]], SumH2SO4=SumH2SO4[[s]], SumHF=SumHF[[s]],
TA=NULL, pH=pH[[i]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE, revelle=FALSE)$TA)
if(p[[pe]] == 0) { ptemp <- 0 } else { ptemp <- (p[[pe]]-epsilon[[pe]]) }
TAminus <- c(TAminus, aquaenv(S=S[[s]], t=t, p=ptemp,
SumCO2=SumCO2[[x]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3[[s]], SumH2SO4=SumH2SO4[[s]], SumHF=SumHF[[s]],
TA=NULL, pH=pH[[i]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE, revelle=FALSE)$TA)
}
}
}
return ((TAplus - TAminus)/(2*epsilon)) # derivative of [TA] with respect to changes in the dissociation constants (Ks) times the derivative of the dissociation constants with respect to depth d
})
}
# PRIVATE function:
# calculates the derivative of [TA] with respect to changes in the dissociation constants (Ks) times the derivative of the dissociation constants with respect to the total sulfate concentration (influence via scale conversion)
dTAdKdKdSumH2SO4 <- function(ae) # object of class aquaenv
{
with (ae,
{
epsilon <- SumH2SO4 * Technicals$epsilon_fraction
TAplus <- c()
TAminus <- c()
if (((length(SumCO2) > 1) || (length(pH) > 1)) && (length(S)==1) && (length(p)==1) && (length(t)==1))
{
for (i in 1:max(length(SumCO2), length(pH)))
{
if (length(SumCO2) > 1) {co2 <- i} else {co2 <- 1}
if (length(pH) > 1) {ph <- i} else {ph <- 1}
TAplus <- c(TAplus, aquaenv(S=S, t=t, p=p,
SumCO2=SumCO2[[co2]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3, SumH2SO4=SumH2SO4, SumHF=SumHF,
TA=NULL, pH=pH[[ph]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE,
SumH2SO4_Koffset=epsilon, revelle=FALSE)$TA)
TAminus <- c(TAminus, aquaenv(S=S, t=t, p=p,
SumCO2=SumCO2[[co2]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3, SumH2SO4=SumH2SO4, SumHF=SumHF,
TA=NULL, pH=pH[[ph]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE,
SumH2SO4_Koffset=-epsilon, revelle=FALSE)$TA)
}
}
else
{
for (s in 1:length(S))
{
for (pe in 1:length(p))
{
if (length(pH) > 1) {i <- max(s,pe)} else {i <- 1}
if (length(SumCO2) > 1) {x <- max(s,pe)} else {x <- 1}
TAplus <- c(TAplus, aquaenv(S=S[[s]], t=t, p=p[[pe]],
SumCO2=SumCO2[[x]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3[[s]], SumH2SO4=SumH2SO4[[s]], SumHF=SumHF[[s]],
TA=NULL, pH=pH[[i]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE,
SumH2SO4_Koffset=epsilon[[s]], revelle=FALSE)$TA)
TAminus <- c(TAminus, aquaenv(S=S[[s]], t=t, p=p[[pe]],
SumCO2=SumCO2[[x]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3[[s]], SumH2SO4=SumH2SO4[[s]], SumHF=SumHF[[s]],
TA=NULL, pH=pH[[i]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE,
SumH2SO4_Koffset=-epsilon[[s]], revelle=FALSE)$TA)
}
}
}
return ((TAplus - TAminus)/(2*epsilon)) # derivative of [TA] with respect to changes in the dissociation constants (Ks) times the derivative of the dissociation constants with respect to the total sulfate concentration (influence via scale conversion)
})
}
# PRIVATE function:
# calculates the derivative of [TA] with respect to changes in the dissociation constants (Ks) times the derivative of the dissociation constants with respect to the total fluoride concentration (influence via scale conversion)
dTAdKdKdSumHF <- function(ae) # object of class aquaenv
{
with (ae,
{
epsilon <- SumHF * Technicals$epsilon_fraction
TAplus <- c()
TAminus <- c()
if (((length(SumCO2) > 1) || (length(pH) > 1)) && (length(S)==1) && (length(p)==1) && (length(t)==1))
{
for (i in 1:max(length(SumCO2), length(pH)))
{
if (length(SumCO2) > 1) {co2 <- i} else {co2 <- 1}
if (length(pH) > 1) {ph <- i} else {ph <- 1}
TAplus <- c(TAplus, aquaenv(S=S, t=t, p=p,
SumCO2=SumCO2[[co2]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3, SumH2SO4=SumH2SO4, SumHF=SumHF,
TA=NULL, pH=pH[[ph]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE,
SumHF_Koffset=epsilon, revelle=FALSE)$TA)
TAminus <- c(TAminus, aquaenv(S=S, t=t, p=p,
SumCO2=SumCO2[[co2]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3, SumH2SO4=SumH2SO4, SumHF=SumHF,
TA=NULL, pH=pH[[ph]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE,
SumHF_Koffset=-epsilon, revelle=FALSE)$TA)
}
}
else
{
for (s in 1:length(S))
{
for (pe in 1:length(p))
{
if (length(pH) > 1) {i <- max(s,pe)} else {i <- 1}
if (length(SumCO2) > 1) {x <- max(s,pe)} else {x <- 1}
TAplus <- c(TAplus, aquaenv(S=S[[s]], t=t, p=p[[pe]],
SumCO2=SumCO2[[x]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3[[s]], SumH2SO4=SumH2SO4[[s]], SumHF=SumHF[[s]],
TA=NULL, pH=pH[[i]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE,
SumHF_Koffset=epsilon[[s]], revelle=FALSE)$TA)
TAminus <- c(TAminus, aquaenv(S=S[[s]], t=t, p=p[[pe]],
SumCO2=SumCO2[[x]], SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3[[s]], SumH2SO4=SumH2SO4[[s]], SumHF=SumHF[[s]],
TA=NULL, pH=pH[[i]], fCO2=NULL, CO2=NULL, speciation=FALSE, dsa=FALSE, ae=NULL, from.data.frame=FALSE,
SumHF_Koffset=-epsilon[[s]], revelle=FALSE)$TA)
}
}
}
return ((TAplus - TAminus)/(2*epsilon)) # derivative of [TA] with respect to changes in the dissociation constants (Ks) times the derivative of the dissociation constants with respect to the total fluoride concentration (influence via scale conversion)
})
}
# PRIVATE function:
# calculates the revelle factor
revelle <- function(ae) # object of class aquaenv
{
CO2_0 <- ae$CO2
SumCO2_0 <- ae$SumCO2
dSumCO2 <- SumCO2_0*Technicals$revelle_fraction
dCO2 <- c()
CO2new <- c()
with (ae,
{
if (((length(SumCO2) > 1) || (length(TA) > 1)) && (length(S)==1) && (length(p)==1) && (length(t)==1))
{
for (i in 1:max(length(SumCO2), length(TA)))
{
if (length(TA) > 1) {ta <- i} else {ta <- 1}
if (length(SumCO2) > 1) {co2 <- i} else {co2 <- 1}
CO2new <<- c(CO2new, aquaenv(t=t, S=S, p=p,
SumCO2=(SumCO2[[co2]]+dSumCO2[[co2]]), SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3, SumH2SO4=SumH2SO4, SumHF=SumHF, k_co2=K_CO2, k_hco3 = K_HCO3,
TA=TA[[ta]], pH=NULL, fCO2=NULL, CO2=NULL, speciation=TRUE, dsa=FALSE, ae=NULL, from.data.frame=FALSE, revelle=FALSE)$CO2)
}
}
else
{
for (s in 1:length(S))
{
for (pe in 1:length(p))
{
for (te in 1:length(t))
{
if (length(TA) > 1) {i <- max(s,pe,te)} else {i <- 1}
if (length(SumCO2) > 1) {x <- max(s,pe,te)} else {x <- 1}
j <- max(s,pe,te)
CO2new <<- c(CO2new, aquaenv(t=t[[te]], S=S[[s]], p=p[[pe]],
SumCO2=(SumCO2[[x]]+dSumCO2[[x]]), SumNH4=SumNH4, SumH2S=SumH2S, SumH3PO4=SumH3PO4, SumSiOH4=SumSiOH4, SumHNO3=SumHNO3, SumHNO2=SumHNO2,
SumBOH3=SumBOH3[[s]], SumH2SO4=SumH2SO4[[s]], SumHF=SumHF[[s]], k_co2=K_CO2[[j]], k_hco3 = K_HCO3[[j]],
TA=TA[[i]], pH=NULL, fCO2=NULL, CO2=NULL, speciation=TRUE, dsa=FALSE, ae=NULL, from.data.frame=FALSE, revelle=FALSE)$CO2)
}
}
}
}
})
dCO2 <- CO2new - CO2_0
return((dCO2/dSumCO2) * (SumCO2_0/CO2_0)) # the revelle factor
}
| /scratch/gouwar.j/cran-all/cranData/AquaEnv/R/aquaenv_private_DSApHfunctions.R |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.