content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
compute.confusion.matrix <- function(true.classes,predicted.classes){
return(table(predicted.classes,true.classes))
}
wrongly.classified <- function(dattable,predicted.classes){
return(dattable[,ncol(dattable)]==predicted.classes)
}
select.process <- function(dattable,method="InformationGain",disc.method="MDL",threshold=0.2,threshold.consis=0.05,attrs.nominal=numeric(),max.no.features=10)
{
inddat <- 1:nrow(dattable)
#CorrSF--------------------
if(method=="CorrSF")
{
sel.feat <- select.forward.Corr(dattable,disc.method,attrs.nominal)
sel.feat<-sapply(sel.feat, function(z) which(names(dattable)==z))
if(length(sel.feat)==0)
sel.feat=numeric()
else
sel.feat=sel.feat[1:min(c(max.no.features,length(sel.feat)))]
}
#ForwardSearch------------------
if (method=="ForwardSearch"){
sel.feat=select.forward.wrapper(dattable)
sel.feat<-sapply(sel.feat, function(z) which(names(dattable)==z))
sel.feat=sel.feat[1:min(c(max.no.features,length(sel.feat)))]
}
#Chi2-algorithm------------------
if(method=="Chi2-algorithm")
{
out<-chi2.algorithm(dattable,attrs.nominal,threshold.consis)
sel.feat<-sapply(out$subset, function(z) which(names(dattable)==z))
sel.feat=sel.feat[1:min(c(max.no.features,length(sel.feat)))]
}
#FastFilter------------------
if (method=="FastFilter"){
sel.feat=select.fast.filter(dattable,disc.method,threshold,attrs.nominal)
if(length(sel.feat)==0)
sel.feat=numeric()
else
sel.feat=sel.feat[1:min(c(max.no.features,nrow(sel.feat))),3]
}
#auc------------------
if (method=="auc"){
aucs.all=rep(-1,ncol(dattable)-1)
if(length(attrs.nominal)>0)
{
aucs <- compute.aucs(dattable[,-attrs.nominal,drop=FALSE])
aucs.all[-attrs.nominal]=aucs[,2]
}
else
{
aucs <- compute.aucs(dattable)
aucs.all=aucs[,2]
}
sel.feat <- order(aucs.all,decreasing=T)[1:min(c(max.no.features),length(aucs.all))]
}
#----------------HUM
if (method=="HUM"){
indexF=1:(ncol(dattable)-1)
indexClass=ncol(dattable)
indexLabel=levels(dattable[,indexClass])
index=setdiff(indexF,attrs.nominal)
out=CalculateHUM_seq(dattable,indexF[index],indexClass,indexLabel)
out.all=rep(-1,ncol(dattable)-1)
out.all[index]=out$HUM
sel.feat <- order(out.all,decreasing=T)[1:min(c(max.no.features),length(out.all))]
}
#CFS-----------------------
if (method=="CFS"){
sel.feat=select.cfs(dattable)
if(length(sel.feat)==0)
sel.feat=numeric()
else
sel.feat=sel.feat[1:min(c(max.no.features,nrow(sel.feat))),2]
}
#Relief---------------------
if (method=="Relief"){
#val <- relief(as.formula(paste(names(dattable)[ncol(dattable)]," ~ .")), dattable,neighbours.count = 5, sample.size = 10)
sel.feat<-select.relief(dattable)
sel.feat=sel.feat[1:min(c(max.no.features,nrow(sel.feat))),3]
}
#InformationGain---------------
if(method=="InformationGain")
{
sel.feat<-select.inf.gain(dattable,disc.method,attrs.nominal)
sel.feat=sel.feat[1:min(c(max.no.features,nrow(sel.feat))),3]
}
#symmetrical.uncertainty-----------------
if(method=="symmetrical.uncertainty")
{
sel.feat<-select.inf.symm(dattable,disc.method,attrs.nominal)
sel.feat=sel.feat[1:min(c(max.no.features,nrow(sel.feat))),3]
}
#ch2-----------------
if(method=="Chi-square")
{
sel.feat<-select.inf.chi2(dattable,disc.method,attrs.nominal)
sel.feat=sel.feat[1:min(c(max.no.features,nrow(sel.feat))),3]
}
return(sel.feat)
}
wrongly.classified <- function(dattable,predicted.classes){
return(dattable[,ncol(dattable)]==predicted.classes)
}
general.fun<- function(classifiers,q,selfea,dti.tr,class.tr,dti.test,class.test)
{
all.class.sample=NULL
all.misclass.sample=NULL
for (j in 1:length(classifiers)){
### Nearest shrunken centroids
switch(classifiers[j],
#there is the error when the number of features=1
nsc = {
if(q==1)
{
class.sample=class.test
}
else
{
weight.train<-100/table(class.tr)
in.data=t(dti.tr[,selfea[1:q],drop=FALSE])
train.dat <- list(x = in.data, y = class.tr, genenames = row.names(in.data),
geneid = row.names(in.data), sampleid = colnames(in.data))
cv.out<-NULL
cv.out<-capture.output({ # To disable the output message from 'pamr.train' and 'pamr.cv'
mod.pam <- pamr.train(train.dat, threshold.scale=weight.train)
mod.cv <- pamr.cv(mod.pam, train.dat)
})
#mod.pam <- pamr.train(train.dat, threshold.scale=weight.train)
#mod.cv <- pamr.cv(mod.pam, train.dat)
Delta=0
# to find the optimized threshold
min0.pos<-which(mod.cv$error==min(mod.cv$error))
#min.pos<-min(min0.pos)
min.pos<-max(min0.pos)
if (mod.cv$size[min.pos]==1){
cat( "one element\n")
min.pos=min(min0.pos)
if (mod.cv$size[min.pos]==1){
min.pos=1
}
}
Delta=mod.cv$threshold[min.pos]
g.lst<-NULL
g.out<-NULL
#g.out<-capture.output({ # To disable the output message from 'pamr.listgenes' function
# g.lst<-pamr.listgenes(mod.pam, train.dat, Delta, genenames = FALSE)
#})
#g.lst<-pamr.listgenes(mod.pam, train.dat, Delta, genenames = FALSE)
#g.lst<-list(as.vector(g.lst[,"id"]))
#names(g.lst)<-"GENE"
res.pam<-pamr.predict(mod.pam, t(dti.test[,selfea[1:q],drop=FALSE]), Delta)
#gene.lst[i]<-list(g.lst)
class.sample=sapply(res.pam, function(z) toString(z))
}
misclass.sample <- wrongly.classified(dti.test,class.sample)
all.class.sample=cbind(all.class.sample,class.sample)
all.misclass.sample=cbind(all.misclass.sample,misclass.sample)
},
### Support vector machine
svm = {
svm.mod <- svm(as.formula("class.label~."),dti.tr[,c(selfea[1:q],ncol(dti.tr))])
class.sample <- predict(svm.mod,dti.test[,selfea[1:q],drop=FALSE],type="class")
class.sample=sapply(class.sample, function(z) toString(z))
misclass.sample <- wrongly.classified(dti.test,class.sample)
all.class.sample=cbind(all.class.sample,class.sample)
all.misclass.sample=cbind(all.misclass.sample,misclass.sample)
},
### Linear discriminant analysis
lda={
lda.mod <- lda(as.formula("class.label~."),dti.tr[,c(selfea[1:q],ncol(dti.tr))])
class.sample <- predict(lda.mod,dti.test[,selfea[1:q],drop=FALSE])$class
class.sample=sapply(class.sample, function(z) toString(z))
misclass.sample <- wrongly.classified(dti.test,class.sample)
all.class.sample=cbind(all.class.sample,class.sample)
all.misclass.sample=cbind(all.misclass.sample,misclass.sample)
},
### Random forest
rf={
rf.mod <- randomForest(as.formula("class.label~."),dti.tr[,c(selfea[1:q],ncol(dti.tr))])
class.sample <- predict(rf.mod,dti.test[,selfea[1:q],drop=FALSE],type="class")
class.sample=sapply(class.sample, function(z) toString(z))
misclass.sample <- wrongly.classified(dti.test,class.sample)
all.class.sample=cbind(all.class.sample,class.sample)
all.misclass.sample=cbind(all.misclass.sample,misclass.sample)
},
###Naive Bayes
nbc={
nbc.mod= naiveBayes(as.formula("class.label~."),dti.tr[,c(selfea[1:q],ncol(dti.tr))])
class.sample <- predict(nbc.mod,dti.test[,selfea[1:q],drop=FALSE])
class.sample=sapply(class.sample, function(z) toString(z))
misclass.sample <- wrongly.classified(dti.test,class.sample)
all.class.sample=cbind(all.class.sample,class.sample)
all.misclass.sample=cbind(all.misclass.sample,misclass.sample)
},
#Nearest Neighbour Classifier
nn= {
class.sample= knn(dti.tr[,selfea[1:q],drop=FALSE],dti.test[,selfea[1:q],drop=FALSE],dti.tr[,ncol(dti.tr)],k=5)
class.sample=sapply(class.sample, function(z) toString(z))
misclass.sample <- wrongly.classified(dti.test,class.sample)
all.class.sample=cbind(all.class.sample,class.sample)
all.misclass.sample=cbind(all.misclass.sample,misclass.sample)
},
#Multinomial Logistic Regression
mlr = {
ml=dti.tr[,c(selfea[1:q],ncol(dti.tr))]
#ml$class1 <- relevel(ml$class.label, ref = "1")
vrem=as.formula(paste("class.label ~ ", paste(names(dti.tr[,selfea[1:q],drop=FALSE]), collapse= "+")))
mlr.mod <- multinom(vrem, data = ml,trace=FALSE)
#class.sample=predict(mlr.mod, newdata = dti.test[,selfea[1:q],drop=FALSE], "probs")
class.sample=predict(mlr.mod, newdata = dti.test[,selfea[1:q],drop=FALSE])
class.sample=sapply(class.sample, function(z) toString(z))
misclass.sample <- wrongly.classified(dti.test,class.sample)
all.class.sample=cbind(all.class.sample,class.sample)
all.misclass.sample=cbind(all.misclass.sample,misclass.sample)
}
)
}
return(list( all.class.sample= all.class.sample,all.misclass.sample=all.misclass.sample))
}
classifier.loop <- function(dattable,classifiers=c("svm","lda","rf","nsc"),feature.selection=c("auc","InformationGain"),disc.method="MDL",threshold=0.3,
threshold.consis=0,attrs.nominal=numeric(),no.feat=20,flag.feature=TRUE,method.cross=c("leaveOneOut","sub-sampling","fold-crossval"))
{
dattable[,ncol(dattable)]=as.factor(dattable[,ncol(dattable)])
names(dattable)[ncol(dattable)] <- "class.label"
if(flag.feature)
{
feature.subset=no.feat #validate all feature subsets
}
else
{
feature.subset=1 #validate one whole subset
}
times.selected <- matrix(0,ncol(dattable)-1,feature.subset)
up=seq(feature.subset,1,-1)
dimnames(times.selected)=c(list(colnames(dattable)[-ncol(dattable)]),list(as.character(up)))
#for leave one out and cross-validation
classi <- array(NA,c(nrow(dattable),feature.subset,length(classifiers)))
misclassified <- array(NA,c(nrow(dattable),feature.subset,length(classifiers)))
if(length(classifiers)==1)
{
dim3=list(classifiers)
}
else
{
dim3=classifiers
}
attr(classi, "dimnames")[[3]]=dim3
attr(misclassified, "dimnames")[[3]]=dim3
#for sub-sampling
class.error<-array(0,c(nrow(dattable),feature.subset,length(classifiers)))
attr(class.error, "dimnames")[[3]]=dim3
if(method.cross=="fold-crossval")
{
num.group=10
gr=NULL
num.sel=floor(nrow(dattable)/num.group)
num.add=nrow(dattable)%%num.group
range=1:nrow(dattable)
rr=nrow(dattable)
for(i in 1:num.group)
{
vrem=sample(1:rr,size=num.sel)
sel=range[vrem]
gr=c(gr,list(sel))
range=range[-vrem]
rr=rr-num.sel
}
if(num.add>0)
{
vrem=sample(1:num.group,num.add)
for(i in 1:num.add)
{
gr[[vrem[i]]]=c(gr[[vrem[i]]],range[i])
}
}
#ptm <- proc.time()
error=array(0,c(num.group,feature.subset,length(classifiers)))
attr(error, "dimnames")[[3]]=dim3
for (i in 1:num.group){
dti.tr <- dattable[-gr[[i]],]
class.tr=dattable[-gr[[i]],ncol(dattable)]
dti.test<- dattable[gr[[i]],]
class.test=dattable[gr[[i]],ncol(dattable)]
selfea <- select.process(dti.tr,method=feature.selection,disc.method=disc.method,threshold=threshold,threshold.consis=threshold.consis,attrs.nominal=attrs.nominal,max.no.features=no.feat)
if(!is.na(selfea[1]))
{
if(flag.feature)
{
start=1
}
else
{
start=length(selfea)
}
for(q in start:length(selfea))
{
times.selected[selfea[1:q],length(selfea)-q+1] <- times.selected[selfea[1:q],length(selfea)-q+1] + 1
out=general.fun(classifiers,q,selfea,dti.tr,class.tr,dti.test,class.test)
classi[gr[[i]],length(selfea)-q+1,classifiers] <- out$all.class.sample
misclassified[gr[[i]],length(selfea)-q+1,classifiers] <- out$all.misclass.sample
error[i,length(selfea)-q+1,classifiers]=(length(gr[[i]])-colSums(out$all.misclass.sample))/length(gr[[i]])
}
}
cat(paste("Iteration",i))
}
#cat(proc.time() - ptm)
#select on threshold for feature
if(flag.feature)
{
true.classified=NULL
for(i in 1:length(selfea))
{
true.classified=cbind(true.classified,1-error[,i,classifiers])
}
}
else
{
true.classified=1-error[,1,classifiers]
dim(true.classified)=c(num.group,length(classifiers))
colnames(true.classified)=attr(error, "dimnames")[[3]]
}
classscores <- data.frame(dattable[,ncol(dattable)],classi[,1,],misclassified[,1,])
misclnames <- rep(".correct",length(classifiers))
for (i in 1:length(classifiers)){
misclnames[i] <- paste(classifiers[i],misclnames[i],sep="")
}
names(classscores) <- c("True class",classifiers,misclnames)
res <- list(predictions=classscores,no.selected=times.selected,true.classified=true.classified)
}
if(method.cross=="sub-sampling")
{
num.sample=100
num.test=1/10
times.select.inst <- rep(0,nrow(dattable))
inddat <- 1:nrow(dattable)
label=levels(dattable[,ncol(dattable)])
confus=array(0,c(length(label),length(label),length(classifiers)))
dimnames(confus)=c(list(label),list(label),list(classifiers))
index.class=NULL
size.sample=0
for(i in 1:length(label))
{
index <-subset(inddat,dattable[,ncol(dattable)]==levels(dattable[,ncol(dattable)])[i])
index.class=c(index.class,list(index))
size.sample=size.sample+ceiling(length(index)*num.test)
}
error=array(0,c(num.sample,feature.subset,length(classifiers)))
attr(error, "dimnames")[[3]]=dim3
for (ib in 1:num.sample)
{
index.test=NULL
for(i in 1:length(label))
{
index=sample(index.class[[i]],ceiling(num.test*length(index.class[[i]]))) #test cases
index.test=c(index.test,index)
}
times.select.inst[index.test] <- times.select.inst[index.test] + 1
dti.tr <- dattable[-index.test,]
class.tr=dattable[-index.test,ncol(dattable)]
dti.test<- dattable[index.test,]
class.test=dattable[index.test,ncol(dattable)]
#make the new function
selfea <- select.process(dti.tr,method=feature.selection,disc.method=disc.method,threshold=threshold,attrs.nominal=attrs.nominal,max.no.features=no.feat)
if(!is.na(selfea[1]))
{
if(flag.feature)
{
start=1
}
else
{
start=length(selfea)
}
for(q in start:length(selfea))
{
dtisel <- dti.tr[,c(selfea[1:q],ncol(dti.tr))]
times.selected[selfea[1:q],length(selfea)-q+1] <- times.selected[selfea[1:q],length(selfea)-q+1] + 1
out=general.fun(classifiers,q,selfea,dti.tr,class.tr,dti.test,class.test)
class.error[index.test,length(selfea)-q+1,classifiers]=class.error[index.test,length(selfea)-q+1,classifiers]+out$all.misclass.sample
error[ib,length(selfea)-q+1,classifiers]=length(index.test)-colSums(out$all.misclass.sample)
if(q==length(selfea))
{
for(j in 1:length(classifiers))
{
vrem=table(class.test,out$all.class.sample[,j])
confus[rownames(vrem),colnames(vrem),classifiers[j]]=confus[rownames(vrem),colnames(vrem),classifiers[j]]+vrem
}
}
}
}
cat(paste("Iteration",ib))
}
#select on threshold for feature
if(flag.feature)
{
true.classified=NULL
for(i in 1:length(selfea))
{
true.classified=cbind(true.classified,1-error[,i,classifiers]/size.sample)
}
# if(length(classifiers)==1)
# {
# vrem=matrix(1:ncol(true.classified),ncol(true.classified),1)
# }
# else
# {
# vrem=sapply(classifiers,function(z) which(colnames(true.classified)==z))
# }
# windows()
# par(mfrow=c(length(classifiers),1))
# for(i in 1:length(classifiers))
# {
# values=true.classified[,vrem[,i]]
# colnames(values)=seq(length(selfea),1,-1)
# boxplot(values,main = paste("Cross validation",classifiers[i]),ylab = "Classification accuracy",xlab="n of features",col=i+1)
# }
classscores <- data.frame(dattable[,ncol(dattable)],times.select.inst,class.error[,1,])
}
else
{
# par(mfrow=c(1,1))
true.classified=1-error[,1,classifiers]/size.sample
dim(true.classified)=c(num.sample,length(classifiers))
colnames(true.classified)=attr(error, "dimnames")[[3]]
#boxplot(true.classified,main = "Cross validation",ylab = "Classification accuracy",xlab="Classifiers",col=3)
classscores <- data.frame(dattable[,ncol(dattable)],times.select.inst,class.error[,1,])
}
time.correct <- rep(".time_correct",length(classifiers))
for (i in 1:length(classifiers)){
time.correct[i] <- paste(classifiers[i],time.correct[i],sep="")
}
names(classscores) <- c("true.label","time.selected",time.correct)
res <- list(predictions=classscores,no.selected=times.selected,true.classified=true.classified,confus=confus)
#to plot the ordered features according number of selections (for no.fea features)
#ordered=order(times.selected[,1],decreasing=T)
#data.frame(colnames(dattable)[ordered[1:no.feat]],times.selected[ordered[1:no.feat],1])
}
if(method.cross=="leaveOneOut")
{
#ptm <- proc.time()
for (i in 1:nrow(dattable)){
dti.tr <- dattable[-i,]
class.tr=dattable[-i,ncol(dattable)]
dti.test<- dattable[i,]
class.test=dattable[i,ncol(dattable)]
selfea <- select.process(dti.tr,method=feature.selection,disc.method=disc.method,threshold=threshold,attrs.nominal=attrs.nominal,max.no.features=no.feat)
if(!is.na(selfea[1]))
{
if(flag.feature)
{
start=1
}
else
{
start=length(selfea)
}
for(q in start:length(selfea))
{
times.selected[selfea[1:q],length(selfea)-q+1] <- times.selected[selfea[1:q],length(selfea)-q+1] + 1
out=general.fun(classifiers,q,selfea,dti.tr,class.tr,dti.test,class.test)
classi[i,length(selfea)-q+1,classifiers] <- out$all.class.sample
misclassified[i,length(selfea)-q+1,classifiers] <- out$all.misclass.sample
}
}
cat(paste("Iteration",i))
}
#cat(proc.time() - ptm)
#select on threshold for feature
if(flag.feature)
{
true.classified=c()
for(i in 1:length(selfea))
{
if(length(classifiers)==1)
{
true.classified=cbind(true.classified,colSums(misclassified[,i,,drop=FALSE])/nrow(dattable))
}
else
{
true.classified=cbind(true.classified,colSums(misclassified[,i,])/nrow(dattable))
}
}
# dim(true.classified)=c(length(classifiers),length(selfea))
# matplot(1:length(selfea),t(true.classified),xlab="n of features",ylab="Accuracy",type="b", col=1:length(classifiers), lty=1, pch=1:length(classifiers),
# bty="n", las=1, main="Classification with n of features",xaxt="n")
# axis(1,1:length(selfea),seq(length(selfea),1,-1))
# legend("bottomright", col=1:length(classifiers), classifiers, bg="white", lwd=1, pch=1:length(classifiers))
}
else
{
if(length(classifiers)==1)
{
true.classified=colSums(misclassified[,1,,drop=FALSE])/nrow(dattable)
}
else
{
true.classified=colSums(misclassified[,1,])/nrow(dattable)
}
# matplot(1,true.classified,pch=1:2,ylim=c(0,1))
# legend("bottomright", col=1:length(classifiers), paste(classifiers,format(true.classified,digits=2)),bg="white", lwd=1, pch=1:length(classifiers))
}
classscores <- data.frame(dattable[,ncol(dattable)],classi[,1,],misclassified[,1,])
misclnames <- rep(".correct",length(classifiers))
for (i in 1:length(classifiers)){
misclnames[i] <- paste(classifiers[i],misclnames[i],sep="")
}
names(classscores) <- c("True class",classifiers,misclnames)
res <- list(predictions=classscores,no.selected=times.selected,true.classified=true.classified)
}
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/Biocomb/R/class_cross.R
|
################################AUC.P-value.permutation
compute.permutation.table <- function(dattable){
datr <- dattable
for (j in 1:(ncol(dattable)-1)){
datr[,j] <- sample(datr[,j],nrow(datr),replace=F)
}
return(datr)
}
compute.auc.permutation <- function(aucs,dattable,repetitions=1000){
minp <- 1/repetitions
aucmax <- rep(0,repetitions)
for (i in 1:repetitions){
datr <- compute.permutation.table(dattable)
aucvalsrs <- compute.aucs(datr)
aucmax[i] <- max(aucvalsrs[,2])
#aucmax[i] <- max(aucvalsrs)
}
p.values <- rep(1,length(aucs))
for (i in 1:length(aucs)){
p.values[i] <- max(c(length(subset(aucmax,aucmax>aucs[i]))/repetitions,minp))
}
return(p.values)
}
################################AUC.P-value.random
multi.random.aucs.lab <- function(labs,repetitions){
n<-length(labs)
random.aucs.n <- function(scores){
aucv<-multiclass.roc(labs,scores)$auc
return(max(c(aucv,1-aucv)))
}
x <- matrix(runif(n*repetitions),nrow=n)
aucs <- sapply(data.frame(x),random.aucs.n)
ord <- order(aucs,decreasing=T)
return(aucs[ord])
}
bh.correction <- function(p.values){
psort <- sort(p.values,index.return=T)
p.values.bh <- p.values
previous.p.value <- 0
for (i in 1:length(p.values)){
p.values.bh[psort$ix[i]] <- p.values[psort$ix[i]]*(length(p.values)+1-i)
if (p.values.bh[psort$ix[i]]>1){p.values.bh[psort$ix[i]]<- 1}
if (p.values.bh[psort$ix[i]]<previous.p.value){p.values.bh[psort$ix[i]]<- previous.p.value}
previous.p.value <- p.values.bh[psort$ix[i]]
}
return(p.values.bh)
}
compute.auc.random <- function(aucs,dattable,repetitions=10000,correction="none"){###repetitions correction
####n und prevalence<-get.n.and.prevalence:n.prev[1]
####aucs<-compute.aucs : auc.val[,2]
labs=dattable[,ncol(dattable)]
aucs.rand <-multi.random.aucs.lab(labs,repetitions)
minpv <- 1/repetitions
pvalues.raw <- rep(1,length(aucs))
for (i in 1:length(aucs)){
pvalues.raw[i] <- max(c(length(subset(aucs.rand,aucs.rand>aucs[i]))/repetitions,minpv))
}
if (correction=="bonferroniholm"){return(bh.correction(pvalues.raw))}
if (correction=="bonferroni"){return(ifelse(pvalues.raw*length(pvalues.raw)>1,1,pvalues.raw*length(pvalues.raw)))}
return(pvalues.raw)#####nur show the p-values
}
|
/scratch/gouwar.j/cran-all/cranData/Biocomb/R/compute.auc.pvalues.R
|
CalcDist<-function(data,index,attrs.nominal)
{
dd=dim(data)
len=0
if(length(attrs.nominal)==0)
{
data.num=data
}
else
{
index.nominal=which(colnames(data)%in%colnames(data[,attrs.nominal,drop=FALSE]))
data.num=data[,-index.nominal]
}
#handle the numeric features
if(length(attrs.nominal)<(dd[2]-1))
{
data.num=data.num[,-ncol(data.num)]
#data.num=normalize(data.num)
dn=dim(data.num)
na.feature=which(is.na(data.num[index,]))
}
if(length(attrs.nominal)>0)
{
no.feature=which(is.na(data[index,attrs.nominal]))
}
vrem.dist=sapply(1:dd[1], function(z) {
rast=0
len=0
if(length(attrs.nominal)<(dd[2]-1))
{
na.feature1=which(is.na(data.num[z,]))
na.end=union(na.feature,na.feature1)
len=dn[2]-length(na.end)
#check na.end==dn[2]
if(len!=0)
{
if(length(na.end)==0)
{
rast=data.num[z,]-data.num[index,]
}
else
{
rast=data.num[z,-na.end]-data.num[index,-na.end]
}
rast=rast**2
rast=sum(rast)
}
}
sum=0
len.no=0
if(length(attrs.nominal)>0)
{
no.feature1=which(is.na(data[z,attrs.nominal]))
na.end=union(no.feature,no.feature1)
len.no=length(attrs.nominal)-length(na.end)
#check na.end==length(attrs.nominal)
if(len.no!=0)
{
if(length(na.end)==0)
{
sum=data[index,attrs.nominal]!=data[z,attrs.nominal]
}
else
{
sum=data[index,attrs.nominal[-na.end]]!=data[z,attrs.nominal[-na.end]]
}
sum=length(which(sum))
}
}
if((len==0)&&(len.no==0))
{
dist=NA
}
else
{
len=len+len.no
dist=(rast+sum)/len
dist=sqrt(dist)
}
})
vrem.dist
}
generate.data.miss<-function(data,percent=5,filename=NULL)
{
#add factors - nominal attributes
n.nom=sample(1:2,nrow(data),replace=TRUE)
data=cbind(data[,-ncol(data)],n.nom,data[,ncol(data)])
colnames(data)[c(ncol(data)-1,ncol(data))]=c("Nominal","Class")
data[,ncol(data)-1]=as.factor(data[,ncol(data)-1])
dd=dim(data)
percent=ceiling(percent*dd[1]*(dd[2]-1)/100)
set.seed(123)
index.row=sample(1:dd[1],percent,replace=TRUE)
vrem=table(index.row)
for(i in 1:length(vrem))
{
index.col=sample(1:(dd[2]-1),vrem[i])
data[as.numeric(names(vrem)[i]),index.col]=NA
}
len=which(is.na(data))
#write.table(data,file="leukemia_miss.txt",sep='\t',row.names = FALSE)
if(length(filename)>0)
{
write.table(data,file=filename,sep='\t',row.names = FALSE)
}
return(data)
}
input_miss<-function(matrix,method.subst="near.value",attrs.nominal=numeric(),delThre=0.2)
{
dd=dim(matrix)
if(length(attrs.nominal)>0)
{
for(i in 1:length(attrs.nominal))
{
matrix[,attrs.nominal[i]]=as.factor(matrix[,attrs.nominal[i]])
}
}
# data=matrix[,-c(attrs.nominal,dd[2])]
data=matrix
dd=dim(data)
flag.miss=TRUE
#delete the genes
na_values=sapply(1:(dd[2]-1), function(z) length(which(is.na(data[,z]))) )
index=which(na_values>delThre*dd[1])
#global matrix
if(length(index)<(dd[2]-1))
{
if(length(index)>0)
{
data=data[,-index]
}
#substitute missing values
dd=dim(data)
switch(method.subst,
#there is the error when the number of features=1
del = {
},
mean.value={
index.nominal=which(colnames(data)%in%colnames(matrix[,attrs.nominal,drop=FALSE]))
if(length(index.nominal)>0)
{
index.number=setdiff(1:(dd[2]-1),index.nominal)
for(ij in 1:length(index.nominal))
{
na.feature=which(is.na(data[,index.nominal[ij]]))
if(length(na.feature)>0)
{
tt=table(data[-na.feature,index.nominal[ij]])
tt1=names(tt)[which.max(tt)]
data[na.feature,index.nominal[ij]]=tt1
}
}
}
else
{
index.number=1:(dd[2]-1)
}
if(length(index.number)>0)
{
mean.value=apply(data[,index.number,drop=FALSE],2,mean,na.rm=TRUE)
for(ij in 1:length(index.number))
{
na.feature=which(is.na(data[,index.number[ij]]))
if(length(na.feature)>0)
{
data[na.feature,index.number[ij]]=mean.value[ij]
}
}
}
},
median.value={
},
near.value={
index.nominal=which(colnames(data)%in%colnames(matrix[,attrs.nominal,drop=FALSE]))
d.vrem=data[,-c(index.nominal,ncol(data))]
#normalize the numeric data
normalize <- function(x) {
x <- as.matrix(x)
minAttr=apply(x, 2, min,na.rm=TRUE)
maxAttr=apply(x, 2, max,na.rm=TRUE)
#x<-x-rep(minAttr,each=nrow(x))
#x<-x/rep(maxAttr-minAttr,each=nrow(x))
x <- sweep(x, 2, minAttr, FUN="-")
x=sweep(x, 2, maxAttr-minAttr, "/")
attr(x, 'normalized:min') = minAttr
attr(x, 'normalized:max') = maxAttr
return (x)
}
d.vrem=normalize(d.vrem)
data[,-c(index.nominal,ncol(data))]=d.vrem
for(i in 1:dd[1])
{
if(any(is.na(data[i,-ncol(data)])))
{
na.feature=which(is.na(data[i,-ncol(data)]))
dist.value=CalcDist(data,i,attrs.nominal)
index.no.na=which(!is.na(dist.value))
index.no.na=index.no.na[-i]
sort.dist=sort(dist.value[index.no.na],index.return=TRUE)
vrem=0
len=length(index.no.na)
#check for len==0
for(k in 1:length(na.feature))
{
vrem=0
vrem1=0
iter=0
nom.value=NULL
for(jk in 1:len)
{
#10 nearest neighbors
if(iter==10) break
dat.vrem=data[index.no.na[sort.dist$ix[jk]],na.feature[k]]
if(!is.na(dat.vrem))
{
if(names(data)[na.feature[k]]%in%colnames(data[,attrs.nominal,drop=FALSE]))
{
nom.value=c(nom.value,dat.vrem)
}
else
{
vrem=vrem+(1/(sort.dist$x[jk]+0.01))*dat.vrem
vrem1=vrem1+1/(sort.dist$x[jk]+0.01)
}
iter=iter+1
}
}
if(iter==0) {
#vrem=NA
flag.miss=FALSE
}
else
{
if(names(data)[na.feature[k]]%in%colnames(data[,attrs.nominal,drop=FALSE]))
{
n.value=table(nom.value)
n.index=which.max(n.value)
data[i,na.feature[k]]=names(n.value)[n.index]
}
else
{
data[i,na.feature[k]]=vrem/vrem1
}
}
}
}
#Iteration
#cat(paste("Iteration",i,"\n"))
}
#reverse normalization
#normalize the numeric data
unnormalize <- function(x,minx,maxx) {
x <- as.matrix(x)
#x<-x*rep(maxAttr-minAttr,each=nrow(x))
#x<-x+rep(minAttr,each=nrow(x))
x=sweep(x, 2, maxx-minx, "*")
x <- sweep(x, 2, minx, FUN="+")
return (x)
}
minAttr=attr(d.vrem,'normalized:min')
maxAttr=attr(d.vrem,'normalized:max')
data[,-c(index.nominal,ncol(data))]=unnormalize(data[,-c(index.nominal,ncol(data))],minAttr,maxAttr)
}
)
}
else
{
#delete all features
flag.miss=FALSE
}
return(list(data=data,flag.miss=flag.miss))
}
|
/scratch/gouwar.j/cran-all/cranData/Biocomb/R/input_miss.R
|
pauc <- function(auc,n=100,n.plus=0.5,labels=numeric(),pos=numeric()){
pauc<-array(0,length(auc))
if(length(pos)==0)
{
for(i in 1:length(auc)){
np <- n.plus
if (n.plus<1){
if (n.plus<0){
np <- round(0.5*n)
}else{
np <- round(n.plus*n)
}
}
nm <- n - np
pauc[i]<-pwilcox(nm*np*(1-auc[i]),nm,np)
}
}
else
{
for(i in 1:length(auc)){
np=length(which(labels==pos[i]))
nm <- length(labels) - np
pauc[i]<-pwilcox(nm*np*(1-auc[i]),nm,np)
}
}
return(pauc)
}
pauclog <- function(auc,n=100,n.plus=0.5,labels=numeric(),pos=numeric()){
pauc<-pauc(auc,n=n,n.plus=n.plus,labels,pos)
pauclog<-array(0,length(auc))
for(i in 1:length(auc)){
pauclog[i]<-log10(pauc[i])
}
return(pauclog)
}
|
/scratch/gouwar.j/cran-all/cranData/Biocomb/R/pauc.R
|
plotClass.result<-function(true.classified, cross.method, class.method, flag.feature, feat.num)
{
if((cross.method=="sub-sampling")||(cross.method=="fold-crossval"))
{
if(flag.feature)
{
if(length(class.method)==1)
{
vrem=matrix(1:ncol(true.classified),ncol(true.classified),1)
}
else
{
vrem=sapply(class.method,function(z) which(colnames(true.classified)==z))
}
par(mfrow=c(length(class.method),1))
for(i in 1:length(class.method))
{
if(feat.num==1)
{
values=true.classified[,vrem[i],drop=FALSE]
}
else
{
values=true.classified[,vrem[,i],drop=FALSE]
}
colnames(values)=seq(feat.num,1,-1)
box=boxplot(values,main = paste("Cross validation",class.method[i]),ylab = "Classification accuracy",xlab="n of features",col=i+1)
}
}
else
{
box=boxplot(true.classified,main = "Cross validation",ylab = "Classification accuracy",xlab="Classifiers",col=3)
}
}
if(cross.method=="leaveOneOut")
{
if(flag.feature)
{
dim(true.classified)=c(length(class.method),feat.num)
barplot (true.classified, beside=TRUE, col=(1:length(class.method))+1, border='white'
, xlab="n of features", ylab="Accuracy", names.arg=as.character(seq(feat.num,1,-1))
, ylim=c(0,1), main="Classification with n of features")
legend("bottomright", col=(1:length(class.method))+1, class.method, bg="white", lwd=1, pch=1:length(class.method))
}
else
{
barplot (true.classified, col=(1:length(class.method))+1, border='white'
, space=0.2, xlab="Classifiers", ylab="Accuracy", names.arg=class.method
, ylim=c(0,1), main="Classification results")
legend("bottomright", col=(1:length(class.method))+1, paste(class.method,format(true.classified,digits=3)),bg="white", lwd=1, pch=1:length(class.method))
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/Biocomb/R/plotClass.result.R
|
plotRoc.curves <- function(dattable,file.name=NULL,colours=NULL,ltys=NULL,add.legend=F,
curve.names=NULL,include.auc=F,xaxis="",yaxis="",line.width=2,headline="",ispercent=F)
{
labs <- dattable[,ncol(dattable)]
scores<-dattable[,-ncol(dattable)]
if (is.null(ncol(scores))){scores <- data.frame(scores)}
if (is.null(colours)){colours <- 1:ncol(scores)}
if (is.null(ltys)){
ltys <- rep(1,ncol(scores))
if(ncol(scores)>8){
N<-ncol(scores)%/%8
for(i in 1:N){
ltys[(8*i+1):length(ltys)]<-ltys[(8*i+1):length(ltys)]+1
}
}}#####if ncol(scores)>8 ltys should be different.ncol(scores)
aucvals <- rep(0,ncol(scores))
pred <- prediction(scores[,1],labs)
aucv <- performance(pred,"tpr", "fpr",measure="auc")
aucval <- attr(aucv,"y.values")[[1]]
if (aucval<0.5){
aucval <- 1-aucval
pred <- prediction(-scores[,1],labs)
}
aucvals[1] <- round(1000*aucval)/1000
perf <- performance(pred,"tpr", "fpr")
if (!is.null(file.name)){pdf(file=file.name)}
if (xaxis=="" & yaxis==""){
xaxis = "False positive rate"
yaxis = "True positive rate"
if (ispercent){
xaxis <- paste(xaxis,"(%)",sep=" ")
yaxis <- paste(yaxis,"(%)",sep=" ")
}
}
if (ispercent){
attr(perf,"x.values")[[1]] <- attr(perf,"x.values")[[1]]*100
attr(perf,"y.values")[[1]] <- attr(perf,"y.values")[[1]]*100
}
plot(perf,lwd=line.width,col=colours[1],lty=ltys[1],xlab=xaxis,ylab=yaxis,main=headline)
if (ncol(scores)>1){
for (i in 2:ncol(scores)){
pred <- prediction(scores[,i],labs)
aucv <- performance(pred,"tpr", "fpr",measure="auc")
aucval <- attr(aucv,"y.values")[[1]]
if (aucval<0.5){
aucval <- 1-aucval
pred <- prediction(-scores[,i],labs)
}
aucvals[i] <- round(1000*aucval)/1000
perf <- performance(pred,"tpr", "fpr")
if (ispercent){
attr(perf,"x.values")[[1]] <- attr(perf,"x.values")[[1]]*100
attr(perf,"y.values")[[1]] <- attr(perf,"y.values")[[1]]*100
}
plot(perf,lwd=line.width,col=colours[i],lty=ltys[i],add=T)
}
}
if (add.legend){
if (is.null(curve.names)){curve.names=names(scores)}
leg.text <- curve.names
if (include.auc){
for (i in 1:ncol(scores)){
leg.text[i] <- paste(curve.names[i],", AUC=",aucvals[i],sep="")
}
}
legend("bottomright",leg.text,lwd=line.width,lty=ltys,col=colours,cex =(0.3+1.4/(ncol(scores)%/%15+2)))
}
if (!is.null(file.name)){dev.off()}
}
|
/scratch/gouwar.j/cran-all/cranData/Biocomb/R/plotRoc.curves.R
|
#########
#1)
#########
pareto.front<- function(scores, classes){
if (length(scores)!=length(classes)){return(print("scores and classes must be the same length"))}
if (length(classes)!=length(classes[classes==0])+length(classes[classes==1])){return(print("class values must be 0 or 1"))}
sorted.scores <- sort(scores,index.return=T)
ones.at.score <- c()
zeros.at.score <- c()
scores.no.duplicates <- c(sorted.scores$x[1])
if (classes[sorted.scores$ix[1]]==0){
ones.at.score <- c(0)
zeros.at.score <- c(1)
}else{
ones.at.score <- c(1)
zeros.at.score <- c(0)
}
last.score <- sorted.scores$x[1]
for (i in 2:length(sorted.scores$x)){
if (last.score<sorted.scores$x[i]){
scores.no.duplicates <- c(scores.no.duplicates,sorted.scores$x[i])
if (classes[sorted.scores$ix[i]]==0){
ones.at.score <- c(ones.at.score,0)
zeros.at.score <- c(zeros.at.score,1)
}else{
ones.at.score <- c(ones.at.score,1)
zeros.at.score <- c(zeros.at.score,0)
}
last.score <- sorted.scores$x[i]}else{
if (classes[sorted.scores$ix[i]]==0){
zeros.at.score[length(zeros.at.score)] <- zeros.at.score[length(zeros.at.score)]+1}else{
ones.at.score[length(ones.at.score)] <- ones.at.score[length(ones.at.score)]+1
}
}
}
increased.max.score <- scores.no.duplicates[length(scores.no.duplicates)] + 0.1
scores.no.duplicates <- c(scores.no.duplicates,increased.max.score)
ones.at.score <- c(ones.at.score,0)
zeros.at.score <- c(zeros.at.score,0)
change.at.score.zero <- !(ones.at.score==0)
change.at.score.zero[length(change.at.score.zero)] <- T
change.at.score.one.a <- !(ones.at.score>0 & zeros.at.score==0)
change.at.score.one <- c(T,change.at.score.one.a[1:(length(change.at.score.one.a)-1)])
change.at.score <- change.at.score.zero & change.at.score.one
dup.temp<-scores.no.duplicates
for (i in 2:length(scores.no.duplicates))
{scores.no.duplicates[i]<-(dup.temp[i-1]+dup.temp[i])/2 }
tp <- c()
tn <- c()
fp <- c()
fn <- c()
pareto.scores <- c()
tp.next <- sum(classes)
tn.next <- 0
fp.next <- length(classes) - tp.next
fn.next <- 0
for (i in 1:(length(change.at.score)-1)){
if (change.at.score[i]){
pareto.scores <- c(pareto.scores,scores.no.duplicates[i])
tp <- c(tp,tp.next)
tn <- c(tn,tn.next)
fp <- c(fp,fp.next)
fn <- c(fn,fn.next)
}
tp.next <- tp.next - ones.at.score[i]
tn.next <- tn.next + zeros.at.score[i]
fp.next <- fp.next - zeros.at.score[i]
fn.next <- fn.next + ones.at.score[i]
}
if (change.at.score[length(change.at.score)]){
pareto.scores <- c(pareto.scores,scores.no.duplicates[length(change.at.score)])
tp <- c(tp,tp.next)
tn <- c(tn,tn.next)
fp <- c(fp,fp.next)
fn <- c(fn,fn.next)
}
threshold <- c(pareto.scores[length(pareto.scores)])
cost.factor <- c()
tp.at.threshold <- c(tp[length(tp)])
tn.at.threshold <- c(tn[length(tn)])
fp.at.threshold <- c(fp[length(fp)])
fn.at.threshold <- c(fn[length(fn)])
if (length(pareto.scores)==1){cost.factor<-Inf}
ind <- length(pareto.scores)
while (ind>1){
cmin <- Inf
next.ind <- ind
for (i in 1:(ind-1)){
cik <- (fp[i]-fp[ind])/(fn[ind]-fn[i])
if (cik < cmin & cik > 0){
cmin <- cik
next.ind <- i
}
}
if (cmin!=Inf){
ind <- next.ind
threshold <- c(threshold,pareto.scores[ind])
cost.factor <- c(cost.factor,cmin)
tp.at.threshold <- c(tp.at.threshold,tp[ind])
tn.at.threshold <- c(tn.at.threshold,tn[ind])
fp.at.threshold <- c(fp.at.threshold,fp[ind])
fn.at.threshold <- c(fn.at.threshold,fn[ind])
}
}
cost.factor[length(threshold)]=Inf
return(data.frame(t=threshold,FP=fp.at.threshold,FN=fn.at.threshold,c=cost.factor))
}
#########
#2)
#########
datRCC<-function(dat,attrs, pos.class){
datRCC<-matrix(ncol=length(attrs)+1,nrow=nrow(dat))
for(i in 1:length(attrs)){
datRCC[,(i+1)]<-10^dat[,attrs[i]]
}
for(i in 1:nrow(datRCC)){
if(grepl(pos.class,dat[i,ncol(dat)])){
datRCC[i,1]<-0
}else{
datRCC[i,1]<-1
}
}
colnames(datRCC)<-c("classes",colnames(dat)[attrs])
return(datRCC)
}
cost.curve<-function(data, attrs.no, pos.Class, AAC=TRUE, n=101, add=FALSE,xlab="log2(c)",ylab="relative costs",
main="RCC",lwd=2,col="black",xlim=c(-4,4), ylim=(c(20,120)))
{
medians=FALSE
dat.rcc<-datRCC(data, attrs.no, pos.Class)
scores=dat.rcc[,2]
classes=dat.rcc[,1]
minc <- 2^(xlim[1])
maxc <- 2^(xlim[2])
at.threshold<-pareto.front(scores,classes)
cost.naive.switch <- (length(classes)-sum(classes))/sum(classes)
cost.function <- function(x){
tc <- exp(x*log(2))
denom <- length(classes)-sum(classes)
if (tc < cost.naive.switch){denom <- tc*sum(classes)}
for (i in 1:length(at.threshold$c)){
if (tc<=at.threshold$c[i]){return(100*(at.threshold$FP[i]+tc*at.threshold$FN[i])/denom)}
} }
cost.function.v <- Vectorize(cost.function)
if (medians==TRUE){
dt<-data.frame(scores,classes)
t1<-median(dt$scores[dt$classes==0])
t2<-median(dt$scores[dt$classes==1])
ind<-which.min(abs(at.threshold$t-t1))
if (t1<at.threshold$t[length(at.threshold$t)-1]){
maxc<-2*at.threshold$c[length(at.threshold$t)-1]
} else if (at.threshold$t[ind]<=t1) {
maxc<-at.threshold$c[ind]-(t1-at.threshold$t[ind])*(at.threshold$c[ind]-at.threshold$c[ind-1])/(at.threshold$t[ind-1]-at.threshold$t[ind])
} else {
maxc<-at.threshold$c[ind+1]-(t1-at.threshold$t[ind+1])*(at.threshold$c[ind+1]-at.threshold$c[ind])/(at.threshold$t[ind]-at.threshold$t[ind+1])
}
ind<-which.min(abs(at.threshold$t-t2))
if (t2>at.threshold$t[1]){
minc<-0.0001
} else if (at.threshold$t[ind]<=t2) {
minc<-at.threshold$c[ind]-(t2-at.threshold$t[ind])*(at.threshold$c[ind]-at.threshold$c[ind-1])/(at.threshold$t[ind-1]-at.threshold$t[ind])
} else {
minc<-at.threshold$c[ind+1]-(t2-at.threshold$t[ind+1])*(at.threshold$c[ind+1]-at.threshold$c[ind])/(at.threshold$t[ind]-at.threshold$t[ind+1])
}
}
c<-seq(log(minc,base=2),log(maxc,base=2),length=n)
costs<-cost.function.v(c)
if (add==FALSE){plot(c,costs,type="l",xlim=xlim, ylim=ylim, xlab=xlab,ylab=ylab, main=main,col=col,lwd=lwd)}
if (add==TRUE){lines(c,costs,col=col,lwd=lwd)}
if(AAC==TRUE){
idx = 2:length(c)
int<- as.double( (c[idx] - c[idx-1]) %*% (costs[idx] + costs[idx-1]))/2
return(((c[length(c)]-c[1])*100-int)/((c[length(c)]-c[1])*100))
}
}
aac.value<-function(rcc){
return(round(1000*rcc)/1000)
}
|
/scratch/gouwar.j/cran-all/cranData/Biocomb/R/rcc.aac.R
|
compute.aucs <- function(dattable){
labs <- dattable[,ncol(dattable)]
aucvals <- rep(0,ncol(dattable)-1)
val=levels(labs)
#pos <- rep(val[2],ncol(dattable)-1)
pos<-factor(rep(val[2],ncol(dattable)-1),levels=val)
if(length(val)==2)
{
for (i in 1:(ncol(dattable)-1)){
pred <- prediction(dattable[,i],labs)
aucv <- performance(pred,"tpr", "fpr",measure="auc")
aucval <- attr(aucv,"y.values")[[1]]
if (aucval<0.5){
aucval <- 1-aucval
pos[i] <- val[1] ####Positive Correlation ist richtig, AUC-Wert >=0.5, sonst AUC-Wert <0.5
}
aucvals[i] <- aucval
}
auctab<-data.frame(names(dattable)[1:(ncol(dattable)-1)],aucvals,pos)
names(auctab)<-c("Biomarker","AUC","Positive class")
}
else
{
for (i in 1:(ncol(dattable)-1)){
aucval <- multiclass.roc(labs,dattable[,i])$auc
aucval2 <- multiclass.roc(labs,dattable[,i])$auc
if (aucval<aucval2){
aucval <- aucval2
}
aucvals[i] <- aucval
}
auctab<-data.frame(names(dattable)[1:(ncol(dattable)-1)],aucvals)
names(auctab)<-c("Biomarker","AUC")
}
return(auctab)
}
chi2.algorithm<- function(matrix,attrs.nominal,threshold)
{
dd=dim(matrix)
if(length(attrs.nominal)>0)
{
for(i in 1:length(attrs.nominal))
{
matrix[,attrs.nominal[i]]=as.factor(matrix[,attrs.nominal[i]])
}
}
#for inconsistency for nominal
vrem.nominal=matrix[,attrs.nominal,drop=FALSE]
if(length(attrs.nominal)>0)
{
for(i in 1:length(attrs.nominal))
{
vrem.nominal[,i]=as.numeric(vrem.nominal[,i])
}
}
#-------
data=matrix[,-c(attrs.nominal,dd[2]),drop=FALSE]
data.start=data
class=matrix[,dd[2]]
class=as.character(class)
d1=dim(data)
label=unique(class)
mat.int=matrix(0,2,length(label))
colnames(mat.int)=label
int.list=fun1_chi(data,class)
int.list.start=int.list
#Phase 1
sig.value=0.6
df=length(label)-1
chi.value=qchisq(1-sig.value, df=df)
chi.stat=fun2_chi(int.list,mat.int)
chi.stat.start=chi.stat
len_chi=sapply(chi.stat, function(z) length(z))
incons=0
step=0.1
delta=6
shag=1
calc=0
while(incons<=threshold)
{
sig.value0=sig.value
if(shag==delta)
{
step=step*0.1
delta=delta+9
}
sig.value=sig.value-step
shag=shag+1
if(sig.value<0.000000000011)
{
#browser()
}
chi.value=qchisq(1-sig.value, df=df)
check=sapply(chi.stat,function(z) length(z))
if(all(check==0))
{
break
}
out3=fun3_chi(chi.stat,int.list,data, chi.value, mat.int)
data=out3$data
chi.stat=out3$chi_stat
int.list=out3$int_list
incons=check_incons(data, vrem.nominal,class)
calc=calc+1
}
#Phase 2
data=data.start
sig.attr=rep(sig.value0,d1[2])
chi.value=qchisq(1-sig.value0, df=df)
chi.attr=rep(chi.value,d1[2])
int.list=int.list.start
chi.stat=chi.stat.start
data=fun4_chi(chi.stat,int.list,data,vrem.nominal,chi.attr,sig.attr,class,mat.int,threshold,df,step,delta,shag)
rr=sapply(1:d1[2],function(z) length(unique(data[,z])))
data.out=data[,which(rr>1),drop=FALSE]
data.out=cbind(data.out,matrix[,attrs.nominal,drop=FALSE])
return(list(data.out=data.out,subset=colnames(data.out)))
}
select.forward.Corr<- function(matrix,disc.method,attrs.nominal)
{
out=ProcessData(matrix,disc.method,attrs.nominal,FALSE)
m3=out$m3
dd=dim(m3)
if(dd[2]>1)
{
subset <- forward_path(0:(ncol(m3)-2),m3)
subset=subset+1
subset<-names(m3)[subset]
}
else
{
subset <- NULL
}
return(subset)
}
select.forward.wrapper<- function(dattable)
{
evaluator <- function(subset) {
#k-fold cross validation
results = sapply(1:k, function(i) {
test.idx <- testind[,i]
train.idx <- !test.idx
test <- dattable[test.idx, , drop=FALSE]
train <- dattable[train.idx, , drop=FALSE]
tree <- rpart(as.simple.formula(subset, names(dattable)[ncol(dattable)]), train,method = "class")
error.rate = sum(test[,ncol(dattable)] != predict(tree, test, type="c")) / nrow(test)
return(1 - error.rate)
})
return(mean(results))
}
k <- 5
splits <- runif(nrow(dattable))
testind <- sapply(1:k, function(i) {(splits >= (i - 1) / k) & (splits < i / k)})
subset <- forward.search(names(dattable)[-ncol(dattable)], evaluator)
}
CalcGain<-function(m1,m2,symm)
{
dd=length(m1)
fq1=table(m1)
fq1=fq1/dd[1]
entropyF1=-sapply(fq1, function(z) if(z==0) 0 else z*log(z))
entropyF1=sum(entropyF1)
fq2=table(m2)
fq2=fq2/dd[1]
entropyF2=-sapply(fq2, function(z) if(z==0) 0 else z*log(z))
entropyF2=sum(entropyF2)
fq=table(m1,m2)
entropyF12=0
for(i in 1:length(fq2))
{
fq0=fq[,i]/sum(fq[,i])
vrem=-sapply(fq0,function(z) if(z==0) 0 else z*log(z))
entropyF12=entropyF12+(fq2[i])*sum(vrem)
}
entropy=entropyF1-entropyF12
if(symm)
{
if((entropyF1+entropyF2)==0)
{
entropy=0
}
else
{
entropy=2*entropy/(entropyF1+entropyF2)
}
}
return(entropy)
}
ProcessData1<-function(matrix,disc.method,attrs.nominal)
{
dd=dim(matrix)
matrix=data.frame(matrix)
matrix[,dd[2]]=as.factor(matrix[,dd[2]])
#data=matrix
if(disc.method=="MDL")
{
m3 <- Discretize(as.formula(paste(names(matrix)[dd[2]],"~.")), data = matrix)
#m3<-mdlp(matrix)$Disc.data
}
if(disc.method=="equal frequency")
{
m3=matrix
for(i in 1:(dd[2]-1))
{
if(!(i%in%attrs.nominal))
{
m3[,i] <- discretize(matrix[,i], method="frequency",categories=3)
}
}
}
if(disc.method=="equal interval width")
{
m3=matrix
for(i in 1:(dd[2]-1))
{
if(!(i%in%attrs.nominal))
{
m3[,i] <- discretize(matrix[,i], categories=3)
}
}
}
#-------------------
#extract the features with one interval
sel.one=lapply(m3, function(z) (length(levels(z))==1)&&(levels(z)=="'All'"))
sel.one=which(unlist(sel.one)==TRUE)
#selected features
sel.feature=1:dd[2]
if(length(sel.one)>0)
{
sel.feature=sel.feature[-sel.one]
matrix=matrix[,-sel.one,drop=FALSE]
m3=m3[,-sel.one,drop=FALSE]
}
return (list(m3=m3,sel.feature=sel.feature))
}
ProcessData<-function(matrix,disc.method,attrs.nominal,flag=FALSE)
{
dd=dim(matrix)
matrix=data.frame(matrix)
matrix[,dd[2]]=as.factor(matrix[,dd[2]])
#data=matrix
if(disc.method=="MDL")
{
m3 <- Discretize(as.formula(paste(names(matrix)[dd[2]],"~.")), data = matrix)
#m3<-mdlp(matrix)$Disc.data
}
if(disc.method=="equal frequency")
{
m3=matrix
for(i in 1:(dd[2]-1))
{
if(!(i%in%attrs.nominal))
{
m3[,i] <- discretize(matrix[,i], breaks=3)
}
}
}
if(disc.method=="equal interval width")
{
m3=matrix
for(i in 1:(dd[2]-1))
{
if(!(i%in%attrs.nominal))
{
m3[,i] <- discretize(matrix[,i], breaks=3,method="interval")
}
}
}
#-------------------
sel.feature=1:dd[2]
if(flag)
{
#extract the features with one interval
sel.one=lapply(m3, function(z) (length(levels(z))==1)&&(levels(z)=="'All'"))
sel.one=which(unlist(sel.one)==TRUE)
#selected features
if(length(sel.one)>0)
{
sel.feature=sel.feature[-sel.one]
matrix=matrix[,-sel.one,drop=FALSE]
m3=m3[,-sel.one,drop=FALSE]
}
}
return (list(m3=m3,sel.feature=sel.feature))
}
select.cfs<-function(matrix)
{
val <- cfs(as.formula(paste(names(matrix)[ncol(matrix)]," ~ .")), matrix)
val<-sapply(val, function(z) which(names(matrix)==z))
info.val <- data.frame(names(matrix)[val],val)
names(info.val) <- c("Biomarker","Index")
return(info.val)
}
select.relief<-function(matrix)
{
val <- relief(as.formula(paste(names(matrix)[ncol(matrix)]," ~ .")), matrix,neighbours.count = 5, sample.size = 10)
val <- sort(val[[1]],decreasing=T,index.return=TRUE)
info.val <- data.frame(names(matrix)[val$ix[1:(ncol(matrix)-1)]],val$x,val$ix)
names(info.val) <- c("Biomarker","Weights","NumberFeature")
return(info.val)
}
select.inf.chi2<-function(matrix,disc.method,attrs.nominal)
{
out=ProcessData(matrix,disc.method,attrs.nominal,FALSE)
m3=out$m3
sel.feature=out$sel.feature
#algorithm
dd=dim(m3)
if(dd[2]>1)
{
#stat=sapply(1:(dd[2]-1), function(z) chisq.test(table(m3[,z],m3[,dd[2]]))$statistic)
#names(stat)=colnames(m3[,1:(dd[2]-1)])
#to compare
weights <- chi.squared(as.formula(paste(names(m3)[dd[2]],"~.")), m3)
#what features are selected
res=sort(weights$attr_importance,decreasing = TRUE,index.return=TRUE)
val=res$ix
weights.sort=res$x
num.feature=sel.feature[val] #val - sorting
info=data.frame(names(m3)[val],weights.sort,num.feature)
}
else
{
info=data.frame(character(),numeric(),numeric())
}
names(info) <- c("Biomarker","ChiSquare","NumberFeature")
return(info)
}
select.inf.symm<-function(matrix,disc.method,attrs.nominal)
{
out=ProcessData(matrix,disc.method,attrs.nominal,FALSE)
m3=out$m3
sel.feature=out$sel.feature
#algorithm
dd=dim(m3)
if(dd[2]>1)
{
#SU1=information.gain(names(matrix)[dd[2]]~., matrix) #package "FSelect"
#entropy of feature
entropy=c()
class=m3[,dd[2]]
for(j in 1:(dd[2]-1))
{
feature=m3[,j]
#Function
out=CalcGain(feature,class,TRUE)
entropy=c(entropy,out)
#--------
}
#what features are selected
res=sort(entropy,decreasing = TRUE,index.return=TRUE)
val=res$ix
entropy.sort=res$x
num.feature=sel.feature[val] #val - sorting
info=data.frame(names(m3)[val],entropy.sort,num.feature)
}
else
{
info=data.frame(character(),numeric(),numeric())
}
names(info) <- c("Biomarker","SymmetricalUncertainty","NumberFeature")
return(info)
}
select.inf.gain<-function(matrix,disc.method,attrs.nominal)
{
out=ProcessData(matrix,disc.method,attrs.nominal,FALSE)
m3=out$m3
sel.feature=out$sel.feature
#algorithm
dd=dim(m3)
if(dd[2]>1)
{
#SU1=information.gain(names(matrix)[dd[2]]~., matrix) #package "FSelect"
#entropy of feature
entropy=c()
class=m3[,dd[2]]
for(j in 1:(dd[2]-1))
{
feature=m3[,j]
#Function
out=CalcGain(feature,class,FALSE)
entropy=c(entropy,out)
#--------
}
#what features are selected
res=sort(entropy,decreasing = TRUE,index.return=TRUE)
val=res$ix
entropy.sort=res$x
num.feature=sel.feature[val] #val - sorting
info=data.frame(names(m3)[val],entropy.sort,num.feature)
}
else
{
info=data.frame(character(),numeric(),numeric())
}
names(info) <- c("Biomarker","Information.Gain","NumberFeature")
return(info)
}
select.fast.filter<-function(matrix,disc.method,threshold,attrs.nominal)
{
#second package "RWeka"
out=ProcessData(matrix,disc.method,attrs.nominal,FALSE)
m3=out$m3
sel.feature=out$sel.feature
#algorithm
dd=dim(m3)
if(dd[2]>1)
{
#SU1=information.gain(names(matrix)[dd[2]]~., matrix) #package "FSelect"
#entropy of feature
entropy=c()
class=m3[,dd[2]]
for(j in 1:(dd[2]-1))
{
feature=m3[,j]
#Function
out=CalcGain(feature,class,FALSE)
entropy=c(entropy,out)
#--------
}
ind=sapply(entropy,function(z) z>=threshold)
entropy=entropy[ind]
m3=m3[,ind,drop=FALSE]
index.F1=1
res=sort(entropy,decreasing = TRUE,index.return=TRUE)
val=res$ix
entropy.sort=res$x
while(index.F1<=length(val))
{
Fp=m3[,val[index.F1]]
index.F2=index.F1+1
while(index.F2<=length(val))
{
Fq=m3[,val[index.F2]]
SUpq=CalcGain(Fp,Fq,FALSE)
if(SUpq>=entropy.sort[index.F2])
{
val=val[-index.F2]
entropy.sort=entropy.sort[-index.F2]
index.F2=index.F2-1
}
index.F2=index.F2+1
}
index.F1=index.F1+1
}
#what features are selected, ind-features with SU(p,c)>threshold
num.feature=sel.feature[ind]
num.feature=num.feature[val] #val - sorting
info=data.frame(names(m3)[val],entropy.sort,num.feature)
}
else
{
info=data.frame(character(),numeric(),numeric())
}
names(info) <- c("Biomarker","Information.Gain","NumberFeature")
return(info)
}
|
/scratch/gouwar.j/cran-all/cranData/Biocomb/R/select.feature.info.R
|
"Fst" <- function(rval,N){
k<-N/sum(N)
Fst.val<-k%*%diag(rval)
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/Fst.R
|
#' Turns a Migration Matrix into a Column Stochastic Matrix
#'
#' Calculates the column stochastic matrix starting from the raw migration matrix \code{x}. For each column, it divides each term by the column sum. Then it returns the thus "normalized by column" matrix, ready to be used in the Malecot migration model.
#' @usage col.sto(x)
#' @param x the raw data migration matrix
#' @return col.sto is used on a an object of class "matrix" and returns an object of class "matrix".
#' @details The Malecot model uses a transformation of the raw migration data; in the "Malecot" library the use of a column stochastic matrix follows Imaizumi 1970 and Swedlund 1984.
#' @references Imaizumi, Y., N. E. Morton and D. E. Harris. 1970. Isolation by distance in artificial populations. Genetics 66: 569-582.
#' @references Jorde, L. B. 1982. The genetic structure of the Utah mormons: migration analysis. Human Biology 54(3): 583-597.
#' @author Federico C. F. Calboli \email{[email protected]}
#' @examples
#' data(raw.mig)
#' new.mig.mat<-col.sto(raw.mig)
#' new.mig.mat
#'
"col.sto" <- function(x){
y<-apply(x,2,sum)
x1<-t(t(x)/y)
x1
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/col.sto.R
|
hedrick <- function(x){
somme <- colSums(x)
M <- matrix(data=0,nrow=length(somme),ncol=length(somme))
rownames(M) <- dimnames(x)[[2]]
colnames(M) <- dimnames(x)[[2]]
for(i in 1:length(somme)){
for(j in 1:length(somme)){
M[i,j] <- (sum((x[,i]/somme[i])*(x[,j]/somme[j])))/
(0.5*(sum((x[,i]/somme[i])^2+(x[,j]/somme[j])^2)))
}
}
M
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/hedrick.R
|
lasker <- function(x){
somme <- colSums(x)
M <- matrix(data=0,nrow=length(somme),ncol=length(somme))
rownames(M) <- dimnames(x)[[2]]
colnames(M) <- dimnames(x)[[2]]
for(i in 1:length(somme)){
for(j in 1:length(somme)){
M[i,j] <- (sum(x[,i]*x[,j]))/(2*(somme[i]*somme[j]))
}
}
M
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/lasker.R
|
mal.cond <- function(PHI,N){
k<-N/sum(N) ## the relative population of each k populaion on the total population of the area in study
rmu<-PHI%*%k ## k is a list, coerced to vertical vector. Here I calculate the row wheight phi mean
mu<-k%*%rmu ## k is now coerced to a linear vector. Here I calculated the overall mean phi
az<-matrix(rep(rmu,length(rmu)),ncol=length(rmu))
ax<-az+t(az)
mu<-as.numeric(mu)
r.mat<-(PHI+mu-ax)/(1-mu)
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/mal.cond.R
|
mal.eq <- function(S,P,N){
phi<-diag(0/N)
Pt<-t(P)
x<-0
repeat{
x<-x+1
S1<-mtx.exp(S,x)
P1<-mtx.exp(P,x)
Pt1<-mtx.exp(Pt,x)
D<-(1-phi)/(2*N)
D<-diag(D)
D<-diag(D) ## everything till here is similar to a normal phi calculation
toll<-phi ## I use toll as a comparison mark. toll is phi a n-1 cycles
toll1<-signif(toll,6) ## optional. I set the number of significant digits to 6
phi<-phi+(S1%*%Pt1%*%D%*%P1%*%S1) ## that's phi at n cycles
phi1<-signif(phi,6) ## optional. As for toll
if (identical(toll1,phi1)){ ## logical condition. If toll (that is, phi for n-1) and phi are identical
return(x-1) ## return the value of n-1
break ## and stop, because the Malecot model has reached its asymptot
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/mal.eq.R
|
mal.phi <- function(S,P,N,n){
if (n < 1){
return("Number of cycles too low!!!")
}
phi<-diag(0/N) ## creating the first phi matrix
Pt<-t(P)
x<-0 ## needed for a correct counting cycle
for (i in 1:n){
x<-x+1 ## start the counting cycle
S1<-mtx.exp(S,x) ## powering S
P1<-mtx.exp(P,x) ## powering P
Pt1<-mtx.exp(Pt,x) ## powering the transpose of P
D<-(1-phi)/(2*N) ## calculating the diagonal of the D matrix
D<-diag(D) ## extracting the diagonal of the above
D<-diag(D) ## creating the REAL D matix, which is a diagonal matrix
phi<-phi+(S1%*%Pt1%*%D%*%P1%*%S1) ## Malecot model
}
phi
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/mal.phi.R
|
mar.iso=function(x){
Pt=rep(0,dim(x)[3])
Pr=rep(0,dim(x)[3])
for(i in 1:dim(x)[3]){
Pt[i]=(sum(diag(x[,,i])))/sum(x[,,i])
Pr[i]=(sum(rowSums(x[,,i])* colSums(x[,,i])))/
((sum(rowSums(x[,,i])))*(sum(colSums(x[,,i]))))
}
pop=dimnames(x)[[3]]
data.frame(pop,Pt,Pr)
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/mar.iso.R
|
"mtx.exp" <- function(X,n){
## Function to calculate the n-th power of a matrix X
if(n != round(n)) {
n <- round(n)
warning("rounding exponent `n' to", n)
}
phi <- diag(nrow = nrow(X))
pot <- X # the first power of the matrix.
while (n > 0)
{
if (n %% 2)
phi <- phi %*% pot
n <- n %/% 2
pot <- pot %*% pot
}
return(phi)
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/mtx.exp.R
|
r.pairs = function(x){
RP = rep(0,dim(x)[3])
RPr = rep(0,dim(x)[3])
perc.diff = rep(0,dim(x)[3])
for (i in 1:dim(x)[3]){
RP[i] = (sum(x[,,i]*(x[,,i]-1)))/(sum(x[,,i])*(sum(x[,,i])-1))
RPr[i] = (((1/(sum(x[,,i])*(sum(x[,,i])-1)))
*sum((rowSums(x[,,i]))^2))-(1/(sum(x[,,i])-1)))*
(((1/(sum(x[,,i])*(sum(x[,,i])-1)))
*sum((colSums(x[,,i]))^2))-(1/(sum(x[,,i])-1)))
perc.diff[i] = ((RP[i]-RPr[i])/RPr[i])
}
pop = dimnames(x)[[3]]
data.frame(pop,RP,RPr,perc.diff)
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/r.pairs.R
|
rel.cond <- function(x,R,method="A"){
metodi <- c("A","B")
method <- pmatch(method, metodi)
if (is.na(method))
stop("not valid method")
if (method==1){
x1 <- (x-R)/(4-R)
x1
}
else{
x1 <- (x-R)/(4*(1-R))
x1
}
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/rel.cond.R
|
rel.phi <- function(x,R,method="A"){
metodi <- c("A","B")
method <- pmatch(method, metodi)
if (is.na(method))
stop("not valid method")
if (method==1){
x1 <- x/4
x1
}
else{
x1 <- x/4+(3*R*((x-R))/(16*(1-R)))
x1
}
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/rel.phi.R
|
rri <- function(x){
somme <- colSums(x)
x1 <- uri(x)
dg <- diag(x1)
conta <- matrix(0, nrow = nrow(x1), ncol = ncol(x1))
preI1 <- rep(0, length(dg))
for(i in 1:length(dg)){
preI1[i] <- (dg[i]*somme[i]*(somme[i]-1))
I1 <- sum(preI1)
}
for(i in 1:nrow(x1)){
for(j in 1:ncol(x1)){
if(i!=j)
conta[i,j]<- (x1[i,j]*somme[i]*somme[j])
else
conta[i,j]=0}
conta
}
preI2 <- rowSums(conta)
I2 <- sum(preI2)
R <- (I1 + I2)/(sum(somme) * (sum(somme)-1))
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/rri.R
|
sur.freq = function(x,pop,mal.sur,fem.sur,freq.table="total"){
#attach(x)
#on.exit(detach(x))
pop = factor(pop)
sur.lev = union(levels(mal.sur),levels(fem.sur))
mal.sur = factor(mal.sur,levels=sur.lev)
fem.sur = factor(fem.sur,levels=sur.lev)
tables = c("males","females","total","marriages")
freq.table = pmatch(freq.table,tables)
if (is.na(freq.table))
stop("this one does not exist!")
if (freq.table==1)
tab=table(mal.sur,pop)
if (freq.table==2)
tab=table(fem.sur,pop)
if (freq.table==3){
tot.sur = data.frame(c(as.character(mal.sur),as.character(fem.sur)),rep(pop,2))
names(tot.sur) = NULL
names(tot.sur) = c("surname","pop")
tab = table(tot.sur$surname,tot.sur$pop)
}
if (freq.table==4)
tab = table(mal.sur,fem.sur,pop)
tab
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/sur.freq.R
|
sur.inbr = function(x,method="B"){
metodi = c("A","B")
method = pmatch(method, metodi)
if (is.na(method))
stop("not valid method")
if (method==1){
Ft = x$Pt/4
Fr = x$Pr/4
Fn = (x$Pt-x$Pr)/(4-x$Pr)
data.frame(x$pop,Ft,Fr,Fn)
}
else{
Ft = x$Pt/4+(3*x$Pr*(x$Pt-x$Pr))/(16*(1-x$Pr))
Fr = x$Pr/4
Fn = (x$Pt-x$Pr)/(4*(1-x$Pr))
data.frame(x$pop,Ft,Fr,Fn)
}
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/sur.inbr.R
|
"sym.P" <- function(x){
alpha<-x[upper.tri(x)]
x1<-t(x)
beta<-x1[upper.tri(x1)]
gamma<-(alpha+beta)/2
x[upper.tri(x)]<-gamma
x2<-t(x)
x[lower.tri(x)]<-x2[lower.tri(x2)]
x
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/sym.P.R
|
uri <- function(x){
somme <- colSums(x)
M <- matrix(data=0,nrow=length(somme),ncol=length(somme))
for(i in 1:length(somme)){
for(j in 1:length(somme)){
if(i==j)
M[i,j] <- (sum(x[,i]*(x[,i]-1)))/(somme[i]*(somme[i]-1))
else
M[i,j] <- (sum(x[,i]*x[,j]))/(somme[i]*somme[j])
}
}
M
}
|
/scratch/gouwar.j/cran-all/cranData/Biodem/R/uri.R
|
`BiodiversityR.changeLog` <-
function()
{
change.file <- file.path(system.file(package = "BiodiversityR"), "ChangeLog")
file.show(change.file)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/BiodiversityR.changeLog.R
|
`BiodiversityRGUI` <-
function(changeLog=FALSE, backward.compatibility.messages=FALSE)
{
if (backward.compatibility.messages == T) {
cat(paste("\n", "Notes on backward compatiblity from BiodiversityR version 2.8.0", "\n"))
cat(paste("\n", "In prior versions, function ensemble.calibrate.models was function ensemble.test"))
cat(paste("\n", "For possible backward compatibility assign ensemble.test <- ensemble.calibrate.models"))
cat(paste("\n", "In prior versions, function ensemble.calibrate.weights was function ensemble.test.splits"))
cat(paste("\n", "In prior versions, slot ensemble.calibrate.weights$AUC.table was ensemble.calibrate.weights$table"))
cat(paste("\n", "In prior versions, argument SSB.reduce was CIRCLES.at"))
cat(paste("\n\n", "(The earlier name of ensemble.test originated from the first [2012] version of ensemble suitability"))
cat(paste("\n", "modelling where both ensemble.raster and ensemble.test internally calibrated and evaluated [tested]"))
cat(paste("\n", "models, but only ensemble.raster went ahead with creating suitability raster layers.)", "\n\n\n"))
}
if (changeLog == T) {BiodiversityR.changeLog()}
if (! requireNamespace("vegan")) {stop("Please install the vegan package")}
# if (! requireNamespace("vegan3d")) {stop("Please install the vegan3d package")}
if (! requireNamespace("dismo")) {stop("Please install the dismo package")}
if (! requireNamespace("colorspace")) {stop("Please install the colorspace package")}
options(Rcmdr=list(etc=file.path(path.package(package="BiodiversityR"),
"etc"), sort.names=FALSE))
if ("Rcmdr" %in% .packages()) {
stop("R commander should not have been loaded yet")
}else{
Rcmdr::Commander()
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/BiodiversityRGUI.R
|
`CAPdiscrim` <-
function(formula, data, dist="bray", axes=4, m=0, mmax=10,
add=FALSE, permutations=0, aitchison_pseudocount=1) {
# if (!require(MASS)) {stop("Requires package MASS")}
CAPresult=function(points, y, group, axes=axes, m=1, eig) {
lda1 <- MASS::lda(y[,group]~points[,1:m], CV=T, tol=1.0e-25)
lda2 <- MASS::lda(y[,group]~points[,1:m], tol=1.0e-25)
matches <- (lda1$class == y[,group])
correct <- sum(matches) / length(matches) * 100
lda3 <- predict(lda2, y[, group])
rownames(lda3$x) <- rownames(points)
tot <- sum(eig)
varm <- sum(eig[1:m])/tot*100
result <- list(PCoA=points[,1:axes], m=m, tot=tot, varm=varm, group=y[,group], CV=lda1$class, percent=correct,
x=lda3$x, F=(lda2$svd)^2, lda.CV=lda1, lda.other=lda2)
return(result)
}
x <- eval(as.name((all.vars(formula)[1])))
group <- all.vars(formula)[2]
y <- data
if (inherits(x, "dist")) {
distmatrix <- x
x <- data.frame(as.matrix(x))
}else{
distmatrix <- vegdist(x, method = dist, pseudocount=aitchison_pseudocount)
}
distmatrix <- as.dist(distmatrix, diag=F, upper=F)
pcoa <- cmdscale(distmatrix, k=nrow(x)-1, eig=T, add=add)
points <- pcoa$points
mmax <- min(ncol(points), mmax)
rownames(points) <- rownames(x)
eig <- pcoa$eig
if (m==0) {
correct <- -1
for (i in 1:mmax) {
if (eig[i] > 0) {
result1 <- CAPresult(points=points, y=y, group=group, axes=axes, m=i, eig=eig)
if (result1$percent > correct) {
correct <- result1$percent
result <- result1
}
}
}
}else{
result <- CAPresult(points=points, y=y, group=group, axes=axes, m=m, eig=eig)
}
if (permutations>0) {
permutations <- permutations-1
permresult <- numeric(permutations)
y1 <- y
n <- nrow(y1)
for (j in 1: permutations){
y1[1:n,] <- y[sample(n),]
if (m==0) {
correct <- -1
for (i in 1:(nrow(x)-1)) {
if (eig[i] > 0) {
result1 <- CAPresult(points=points,y=y1,group=group,axes=axes,m=i,eig=eig)
if (result1$percent > correct) {
correct <- result1$percent
resultp <- result1
}
}
}
}else{
resultp <- CAPresult(points=points,y=y1,group=group,axes=axes,m=m,eig=eig)
}
permresult[j] <- resultp$percent
}
signi <- sum(permresult > result$percent)
signi <- (1+signi)/(1+permutations)
cat("Percentage of correct classifications was", result$percent, "\n")
cat("Significance of this percentage was", signi, "\n\n")
permresult <- summary(permresult)
}else{
signi <- NA
permresult <- NULL
}
m <- result$m
if (m>1) {
result1 <- summary(manova(points[,1:m]~y[,group]))
}else{
result1 <- summary(lm(points[,1]~y[,group]))
}
# Classification success
cat(paste("Overall classification success (m=", m, ") : ", result$percent, " percent", "\n", sep=""))
level.percent <- numeric(length(levels(y[, group])))
for (l in 1:length(levels(y[, group]))) {
level <- levels(y[, group])[l]
index <- result$group == level
classification <- result$CV[index]
correct <- length(which(classification == level)) / length(classification) * 100
cat(paste(level, " (n=", length(classification), ") correct: ", correct, " percent", "\n", sep=""))
level.percent[l] <- correct
names(level.percent)[l] <- level
}
#
result2 <- list(PCoA=result$PCoA, m=m, tot=result$tot, varm=result$varm, group=result$group, CV=result$CV,
percent=result$percent, percent.level=level.percent,
x=result$x, F=result$F, lda.CV=result$lda.CV, lda.other=result$lda.other,
manova=result1, signi=signi, permutations=permresult)
return(result2)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/CAPdiscrim.R
|
`NMSrandom` <-
function(x,perm=100,k=2,stressresult=F,method="isoMDS"){
# if (!require(MASS)) {stop("Requires package MASS")}
minstress <- 100
stress <- array(dim=perm)
for (j in 1:perm) {
if (method=="isoMDS") {result <- MASS::isoMDS(x,initMDS(x,k=k),k=k,maxit=1000,trace=F)}
if (method=="sammon") {result <- MASS::sammon(x,initMDS(x,k=k),k=k,niter=1000,trace=F)}
stress[j] <- result$stress
if (result$stress < minstress) {
minstress <- result$stress
minresult <- result
}
}
rownames(minresult$points) <- rownames(as.matrix(x))
if (stressresult==F) {return(minresult)}
if (stressresult==T) {return(stress)}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/NMSrandom.R
|
`PCAsignificance` <-
function(pca,axes=8) {
eigen <- pca$CA$eig
tot <- sum(eigen)
p <- length(eigen)
if (p < axes) {axes <- p}
varexplained <- array(dim=c(7,p))
varexplained[1,] <- eigen[1:p]
varexplained[2,] <- varexplained[1,]/tot*100
varexplained[3,1] <- varexplained[2,1]
for (i in 2:p) {varexplained[3,i] <- varexplained[3,i-1]+varexplained[2,i]}
for (i in 1:p) {varexplained[6,i] <- 1/i}
for (i in 1:p) {varexplained[4,i] <- sum(varexplained[6,i:p])/p*100}
varexplained[5,1] <- varexplained[4,1]
for (i in 2:p) {varexplained[5,i] <- varexplained[5,i-1]+varexplained[4,i]}
for (i in 1:p) {
if(varexplained[2,i]>varexplained[4,i]) {
varexplained[6,i] <- TRUE
}else{
varexplained[6,i] <- FALSE
}
if(varexplained[3,i]>varexplained[5,i]) {
varexplained[7,i] <- TRUE
}else{
varexplained[7,i] <- FALSE
}
}
rownames(varexplained) <- c("eigenvalue","percentage of variance","cumulative percentage of variance",
"broken-stick percentage","broken-stick cumulative %","% > bs%","cum% > bs cum%")
colnames(varexplained) <- c(1:p)
return(varexplained[,1:axes])
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/PCAsignificance.R
|
`accumcomp` <-
function(x, y="", factor, scale="", method="exact", permutations=100, conditioned=T, gamma="boot",
plotit=T, labelit=T, legend=T, rainbow=T, xlim=c(1,max), ylim=c(0,rich), type="p",
xlab="sites", ylab="species richness", cex.lab=1, cex.axis=1, ...)
{
groups <- table(y[,factor])
min <- min(groups)
max <- max(groups)
m <- length(groups)
levels <- names(groups)
result <- array(NA,dim=c(m,max,3))
dimnames(result) <- list(level=levels, obs=c(1:max), c("Sites","Richness","sd"))
names(dimnames(result)) <- c(factor,"obs","")
for (i in 1:m) {
result1 <- accumresult(x, y, factor, level=levels[i], scale=scale, method=method, permutations=permutations, conditioned=conditioned, gamma=gamma)
l <- length(result1$sites)
result[i,c(1:l),1] <- result1$sites
result[i,c(1:l),2] <- result1$richness
if (method!="collector" && method!="poisson" && method!="binomial" && method!="negbinomial") {result[i,c(1:l),3] <- result1$sd}
}
if (plotit == T) {
max <- max(result[,,1],na.rm=T)
rich <- max(result[,,2],na.rm=T)
for (i in 1:m) {
result1 <- accumresult(x, y, factor, level=levels[i], scale=scale, method=method, permutations=permutations, conditioned=conditioned, gamma=gamma)
if (plotit == T) {
if (i == 1) {addit <- F}
if (i > 1) {addit <- T}
if (labelit==T) {
labels <- levels[i]
}else{
labels <- ""
}
if (rainbow==T) {
grDevices::palette(colorspace::rainbow_hcl(m, c=90, l=50))
accumplot(result1, addit=addit, xlab=xlab, ylab=ylab, xlim=xlim, ylim=ylim, labels=labels,
col=i, pch=i, type=type, cex.lab=cex.lab, cex.axis=cex.axis,...)
}else{
accumplot(result1, addit=addit, xlab=xlab, ylab=ylab, xlim=xlim, ylim=ylim, labels=labels, pch=i, type=type,...)
}
}
}
if (rainbow==T && legend==T) {legend(graphics::locator(1), legend=levels, pch=c(1:m), col=c(1:m))}
if (rainbow==F && legend==T) {legend(graphics::locator(1), legend=levels, pch=c(1:m))}
}
grDevices::palette("default")
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/accumcomp.R
|
`accumplot` <-
function(xr, addit=F, labels="", col=1, ci=2, pch=1, type="p", cex=1,
xlim=c(1,xmax), ylim=c(1,rich), xlab="sites", ylab="species richness",
cex.lab=1, cex.axis=1, ...)
{
x <- xr
xmax <- max(x$sites)
rich <- max(x$richness)
if(addit==F) {graphics::plot(x$sites, x$richness, xlab=xlab, ylab=ylab, bty="l",
type=type, col=col, pch=pch, cex=cex, xlim=xlim, ylim=ylim, cex.lab=cex.lab, cex.axis=cex.axis)}
if(addit==T) {graphics::points(x$sites, x$richness, type=type, col=col, pch=pch, cex=cex)}
graphics::plot(x, add=T, ci=ci, col=col, ...)
if(labels!="") {
l <- length(x$sites)
graphics::text(x$sites[1], x$richness[1], labels=labels, col=col, pos=2, cex=cex)
graphics::text(x$sites[l], x$richness[l], labels=labels, col=col, pos=4, cex=cex)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/accumplot.R
|
`accumresult` <-
function(x,y="",factor="",level,scale="",method="exact",permutations=100,conditioned=T,gamma="boot",...){
op <- options()
options(warn=-1)
subs <- c(1:nrow(x))
if(inherits(y, "data.frame") && factor != "") {
subs <- y[,factor]==level
for (q in 1:length(subs)) {
if(is.na(subs[q])) {subs[q]<-F}
}
x <- x[subs,,drop=F]
freq <- apply(x,2,sum)
subs2 <- freq>0
x <- x[,subs2,drop=F]
}
if(dim(as.matrix(x))[1]==0) {
result <- list(call = match.call(), method = method, sites = 0, richness = NA, sd = NA, perm = NA)
return(result)
}
result <- specaccum(x,method=method,permutations=permutations,conditioned=conditioned,gamma=gamma,...)
if (scale != "") {
y <- y[subs,,drop=F]
tot <- mean(y[,scale])
result$sites <- result$sites * tot
}
options(op)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/accumresult.R
|
`add.spec.scores` <-
function(ordi,comm,method="cor.scores",multi=1,Rscale=F,scaling="1") {
ordiscores <- scores(ordi,display="sites")
n <- ncol(comm)
p <- ncol(ordiscores)
specscores <- array(NA,dim=c(n,p))
rownames(specscores) <- colnames(comm)
colnames(specscores) <- colnames(ordiscores)
if (method == "cor.scores") {
for (i in 1:n) {
for (j in 1:p) {specscores[i,j] <- cor(comm[,i],ordiscores[,j],method="pearson")}
}
}
if (method == "wa.scores") {specscores <- wascores(ordiscores,comm)}
if (method == "pcoa.scores") {
rownames(ordiscores) <- rownames(comm)
eigenv <- ordi$eig
accounted <- sum(eigenv)
tot <- 2*(accounted/ordi$GOF[2])-(accounted/ordi$GOF[1])
eigen.var <- eigenv/(nrow(comm)-1)
neg <- length(eigenv[eigenv<0])
pos <- length(eigenv[eigenv>0])
tot <- tot/(nrow(comm)-1)
eigen.percen <- 100*eigen.var/tot
eigen.cumpercen <- cumsum(eigen.percen)
constant <- ((nrow(comm)-1)*tot)^0.25
ordiscores <- ordiscores * (nrow(comm)-1)^-0.5 * tot^-0.5 * constant
p1 <- min(p, pos)
for (i in 1:n) {
for (j in 1:p1) {
specscores[i,j] <- cor(comm[,i],ordiscores[,j])*sd(comm[,i])/sd(ordiscores[,j])
if(is.na(specscores[i,j])) {specscores[i,j]<-0}
}
}
if (Rscale==T && scaling=="2") {
percen <- eigen.var/tot
percen <- percen^0.5
ordiscores <- sweep(ordiscores,2,percen,"/")
specscores <- sweep(specscores,2,percen,"*")
}
if (Rscale==F) {
specscores <- specscores / constant
ordiscores <- ordi$points
}
ordi$points <- ordiscores
ordi$eig <- eigen.var
ordi$eig.percen <- eigen.percen
ordi$eig.cumpercen <- eigen.cumpercen
ordi$eigen.total <- tot
ordi$R.constant <- constant
ordi$Rscale <- Rscale
ordi$scaling <- scaling
}
specscores <- specscores * multi
ordi$cproj <- specscores
return(ordi)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/add.spec.scores.R
|
`balanced.specaccum` <-
function (comm, permutations=100, strata=strata, grouped=TRUE, reps=0, scale=NULL) {
accumulator <- function(x, ind) {
rowSums(apply(x[ind, ], 2, cumsum) > 0)
}
stratified.sample <- function(factor,grouped=TRUE,reps=0) {
n <- length(factor)
levs <- levels(droplevels(factor))
minimum <- min(summary(factor))
if (reps > 0) {
alllevs <- summary(factor)
goodlevs <- alllevs > (reps-1)
levs <- names(alllevs[goodlevs])
minimum <- reps
}
nl <- length(levs)
seq2 <- array(nl*minimum)
seq1 <- sample(n)
strat <- sample(nl)
count <- 0
for (i in 1:nl) {
for (j in 1:n) {
if (factor[seq1[j]]==levs[strat[i]]) {
count <- count+1
if (count > i*minimum) {count <- count-1}
seq2[count] <- seq1[j]
}
}
}
if (grouped==FALSE) {
seq3 <- sample(seq2)
seq2 <- seq3
}
return(seq2)
}
x <- comm
x <- as.matrix(x)
n <- nrow(x)
p <- ncol(x)
if (p == 1) {
x <- t(x)
n <- nrow(x)
p <- ncol(x)
}
specaccum <- sdaccum <- sites <- perm <- NULL
if (n == 1)
stop(paste("only 1 site provided"))
if (is.factor(strata) != TRUE)
stop(paste("strata should be a categorical variable"))
n1 <- length(stratified.sample(strata,grouped,reps))
perm <- array(dim = c(n1, permutations))
for (i in 1:permutations) {
perm[, i] <- accumulator(x, stratified.sample(strata,grouped,reps))
}
sites <- 1:n1
specaccum <- apply(perm, 1, mean)
sdaccum <- apply(perm, 1, sd)
out <- list(call = match.call(), method = "balanced species accumulation", sites = sites,
richness = specaccum, sd = sdaccum, perm = perm)
class(out) <- "specaccum"
if (is.null(scale)!=TRUE) {
n <- length(strata)
levs <- levels(droplevels(strata))
if (reps > 0) {
alllevs <- summary(strata)
goodlevs <- alllevs > (reps-1)
levs <- names(alllevs[goodlevs])
}
nlevs <- length(levs)
tot <- 0
for (i in 1:nlevs) {
ind <- strata==levs[i]
tot <- tot + mean(scale[ind])
}
tot <- tot/nlevs
out$sites <- out$sites * tot
}
out
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/balanced.specaccum.R
|
`caprescale` <-
function (x, verbose = FALSE)
{
if (is.null(x$CCA)) {
nr <- nrow(x$CA$u)
nunpeiv <- x$CA$rank
neiv <- npeiv <- length(x$CA$eig)
sumpeiv <- sum(x$CA$eig[1:nunpeiv])
sumeiv <- sum(x$CA$eig)
}
else {
nr <- nrow(x$CCA$u)
nceiv <- length(x$CCA$eig)
nunpeiv <- x$CA$rank
nuneiv <- length(x$CA$length)
npeiv <- nceiv + nunpeiv
neiv <- nceiv + nuneiv
sumpeiv <- sum(x$CA$eig[1:nunpeiv]) + sum(x$CCA$eig)
sumeiv <- sum(x$CA$eig) + sum(x$CCA$eig)
}
const <- attributes(scores(x))$const
if (is.null(x$CCA) == F) {
x$CCA$v <- x$CCA$v * const
x$CCA$wa <- x$CCA$wa * const
x$CCA$u <- x$CCA$u * const
x$CCA$biplot <- x$CCA$biplot * const
x$CCA$centroids <- x$CCA$centroids * const
}
x$CA$v <- x$CA$v * const
x$CA$u <- x$CA$u * const
if (verbose == T) {
distmat <- as.matrix(vegdist(summary(x, axes = npeiv,
scaling = 1)$sites, method = "euc"))
ssdist <- sum((distmat)^2)/(2 * nrow(distmat))
if (substr(x$inertia, 1, 4) == "mean") {
sstot <- sumpeiv * (nr - 1)
sumeiv <- sumeiv * (nr - 1)
}else {
sstot <- sumpeiv
ssdist <- ssdist / (nr-1)
}
cat("SSTot obtained from sum of all eigenvalues:", sumeiv, "\n")
cat("SSTot obtained from sum of all positive eigenvalues:", sstot, "\n")
cat("SSTot reflected by distances among site scores on all axes:",
ssdist, "\n")
if (is.null(x$CCA) == F) {
distmat <- as.matrix(vegdist(summary(x, axes = nceiv,
scaling = 1)$constraints, method = "euc"))
ssdistcf <- sum((distmat)^2)/(2 * nrow(distmat))
distmat <- as.matrix(vegdist(summary(x, axes = nceiv,
scaling = 1)$sites, method = "euc"))
ssdistcs <- sum((distmat)^2)/(2 * nrow(distmat))
distmat <- as.matrix(vegdist(summary(x, axes = npeiv,
scaling = 1)$sites[, ((nceiv + 1):npeiv)], method = "euc"))
ssdistus <- sum((distmat)^2)/(2 * nrow(distmat))
if (substr(x$inertia, 1, 4) == "mean") {
sstotc <- sum(x$CCA$eig) * (nr - 1)
sstotu <- sum(x$CA$eig[1:nunpeiv]) * (nr - 1)
}
else {
sstotc <- sum(x$CCA$eig)
ssdistcs <- ssdistcs / (nr-1)
ssdistcf <- ssdistcf / (nr-1)
sstotu <- sum(x$CA$eig[1:nunpeiv])
ssdistus <- ssdistus / (nr-1)
}
cat("SSExpl obtained from eigenvalues of constrained axes:",
sstotc, "\n")
cat("SSExpl reflected by distances among site scores on constrained axes (scaling 1):",
ssdistcs, "\n")
cat("SSExpl reflected by distances among fitted site scores on constrained axes (scaling 1):",
ssdistcf, "\n")
cat("SSRes obtained from eigenvalues of positive unconstrained axes:",
sstotu, "\n")
cat("SSRes reflected by distances among site scores on positive unconstrained axes (scaling 1):",
ssdistus, "\n")
}
}
return(x)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/caprescale.R
|
`check.datasets` <-
function(x, y) {
factor.count <- 0
factor.species <- NULL
na.count <- 0
na.species <- NULL
neg.count <- 0
neg.species <- NULL
all.good <- TRUE
for (i in 1:ncol(x)) {
if(is.factor(x[,i])) {
factor.count <- factor.count+1
factor.species <- c(factor.species, colnames(x)[i])
}
if(any(is.na(x[,i]))) {
na.count <- na.count+1
na.species <- c(na.species, colnames(x)[i])
}
if(any(x[,i] < 0, na.rm=T)) {
neg.count <- neg.count+1
neg.species <- c(neg.species, colnames(x)[i])
}
}
if (factor.count > 0) {
all.good <- FALSE
cat("Warning:", factor.count, "variable(s) of the community dataset ( out of a total of", ncol(x), ") are factors\n")
print(factor.species)
}
if (na.count > 0) {
all.good <- FALSE
cat("Warning:", na.count, "variable(s) of the community dataset ( out of a total of", ncol(x), ") with missing values\n")
print(na.species)
}
if (neg.count > 0) {
all.good <- FALSE
cat("Warning:", neg.count, "variable(s) of the community dataset ( out of a total of", ncol(x), ") with negative values\n")
print(neg.species)
}
if(nrow(x)!=nrow(y)){
all.good <- FALSE
cat("Warning: community and environmental datasets have different numbers of rows\n")
}else{
if(any(rownames(x)!=rownames(y))){
all.good <- FALSE
cat("Warning: rownames for community and environmental datasets are different\n")
}
}
if (all.good == TRUE) {cat("OK\n")}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/check.datasets.R
|
`check.ordiscores` <-
function(x, ord, check.species=TRUE) {
sitescores <- scores(ord,display="sites")
if(nrow(x)!=nrow(sitescores)){
cat("Warning: community data set and ordination result have different number of sites\n")
}else{
if(any(rownames(x)!=rownames(sitescores))){
cat("Warning: names for sites are different in community data set and ordination result\n")
}
}
if (check.species==T){
specscores <- scores(ord,display="species")
if(ncol(x)!=nrow(specscores)){
cat("Warning: community data set and ordination result have different number of species\n")
}else{
if(any(colnames(x)!=rownames(specscores))){
cat("Warning: names for species are different in community data set and ordination result\n")
}
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/check.ordiscores.R
|
`crosstabanalysis` <-
function(x,variable,factor){
cross <- table(x[,variable]>0,x[,factor])
result <- chisq.test(cross)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/crosstabanalysis.R
|
`deviancepercentage` <-
function(x,data,test="F",digits=2){
nexpl <- x$deviance
tot <- x$null.deviance
expl <- tot-nexpl
ratio <- round((expl/tot*100),digits=1)
expl <- round(expl,digits=digits)
tot <- round(tot,digits=digits)
cat("Deviance explained: ",expl,"/",tot,"(",ratio, "percent)\n\n")
formula0 <- paste(names(x$model)[1],"~ 1")
model0 <- glm(as.formula(formula0),family=x$family,data=na.omit(data))
result <- anova(model0,x,test=test)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/deviancepercentage.R
|
`dist.eval` <-
function(x, dist){
x <- as.matrix(x)
tots <- rowSums(x)
if(any(tots==0)) {
cat("Warning: the community matrix contains some sites with only zero abundances\n")
cat("You may want to use functions removezerospecies or dist.zeroes from Biodiversity.R\n")
}else{
tests <- c("manhattan", "euclidean", "canberra", "clark", "bray", "kulczynski", "jaccard", "gower", "altGower", "morisita", "horn", "mountford", "raup" , "binomial",
"chao", "cao", "mahalanobis", "hellinger")
op <- options()
options(warn=-1)
if (any(dist==tests)) {
dist1 <- vegdist(x, method=dist)
}else{
if((dist %in% c("w", "-1", "c", "wb", "r", "I", "e", "t", "me", "j", "sor", "m", "-2", "co", "cc", "g", "-3", "l", "19", "hk", "rlb", "sim", "gl", "z")) == F) {
stop("Provide acceptable method for betadiver")
}
dist1 <- betadiver(x, method=dist)
}
dist2 <- no.shared(x)
list1 <- (dist2==0)
list2 <- (dist2==1)
max <- max(dist1[list1])
min <- min(dist1[list2])
options(op)
if(min<max) {
cat("Warning: min distance for sites with no shared species(",min,") < max dist for other sites(", max, ")\n")
cat("Choose other distance measure or stepacross\n")
}
return(distconnected(dist1))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/dist.eval.R
|
`dist.zeroes` <-
function(comm,dist) {
tots <- rowSums(comm)
dist <- as.matrix(dist)
l <- length(tots)
for (i in 1:(l-1)) {
if (tots[i]==0) {
for (j in 2:l) {
if ((tots[j]==0) && (is.finite(dist[i,j])==F)) {
dist[i,j] <- 0
dist[j,i] <- 0
}
if ((tots[j]>0) && (is.finite(dist[i,j])==F)) {
dist[i,j] <- 1
dist[j,i] <- 1
}
}
}
}
dist <- as.dist(dist)
return(dist)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/dist.zeroes.R
|
`distdisplayed` <-
function(x, ordiplot, distx="bray", plotit=T, addit=F, method="spearman", permutations=100, abline=F, gam=T,...) {
if (gam==T) {
# if (!require(mgcv)) {stop("Requires package mgcv")}
}
if (inherits(x, "dist")) {
dist1 <- x
xlab <- attr(x,"method")
if (is.null(xlab)) {
xlab <- "distance matrix 1"
}else{
xlab <- paste(xlab, "distance")
}
}else{
dist1 <- vegdist(x, method = distx)
xlab <- "original distance"
}
if (inherits(ordiplot, "dist")) {
dist2 <- ordiplot
ylab <- attr(dist2,"method")
if (is.null(ylab)) {
ylab <- "distance matrix 2"
}else{
ylab <- paste(ylab, "distance")
}
}else{
ordiscores <- scores(ordiplot,display="sites")
dist2 <- vegdist(ordiscores,method="euclidean")
ylab <- "distance in ordination plot"
}
if (plotit==T) {
if (addit==F) {
graphics::plot(dist1, dist2, xlab=xlab, ylab=ylab)
}
if (abline==T) {abline(0,1)}
if (gam==T){
data <- data.frame(cbind(as.numeric(dist1), as.numeric(dist2)))
names(data) <- c("dist1", "dist2")
seq <- order(data[,1])
sorted <- data
sorted[1:nrow(data),] <- data[seq,]
gamresult <- mgcv::gam(as.formula(dist2 ~ s(dist1)), data=sorted)
newdata <- data.frame(seq(min(sorted[,1]), max(sorted[,1]), length = 1000))
colnames(newdata) <- "dist1"
gamresult2 <- predict(gamresult, newdata)
graphics::points(newdata$dist1,gamresult2,type="l",lwd=2,col="red")
}
}
result2 <- mantel(dist1, dist2, method=method, permutations=permutations,...)
if (gam==T) {
return(list(gamanalysis=summary(gamresult), mantelanalysis=result2))
}else{
return(result2)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/distdisplayed.R
|
`disttransform` <-
function(x, method="hellinger") {
x <- as.matrix(x)
METHODS <- c("hellinger", "chord", "profiles", "chi.square", "log", "square", "pa",
"Braun.Blanquet", "Domin", "Hult", "Hill", "fix", "coverscale.log", "dispweight")
method <- match.arg(method,METHODS)
switch(method, hellinger = {
x <- decostand(x,"hellinger")
}, profiles = {
x <- decostand(x,"total")
}, chord = {
x2 <- x^2
rowtot <- apply(x2,1,sum)
for (i in 1:length(rowtot)) {if (rowtot[i]==0) {rowtot[i] <- 1}}
rowtot <- rowtot^0.5
x <- x/rowtot
}, chi.square = {
x <- decostand(x, "chi.square")
}, log = {
x <- log(x+1)
}, square = {
x <- x^0.5
}, pa = {
x <- decostand(x, "pa")
#
}, Braun.Blanquet = {
x <- coverscale(x, "Braun.Blanquet")
}, Domin = {
x <- coverscale(x, "Domin")
}, Hult = {
x <- coverscale(x, "Hult")
}, Hill = {
x <- coverscale(x, "Hill")
}, fix = {
x <- coverscale(x, "fix")
}, coverscale.log = {
x <- coverscale(x, "log")
}, dispweight = {
x <- dispweight(x)
})
#
for (i in 1:ncol(x)) {x[,i] <- as.numeric(x[,i])}
x <- data.frame(x)
return(x)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/disttransform.R
|
`diversitycomp` <- function(
x, y=NULL, factor1=NULL, factor2=NULL,
index=c("Shannon", "Simpson", "inverseSimpson", "Logalpha", "Berger",
"simpson.unb", "simpson.unb.inverse",
"richness", "abundance", "Jevenness", "Eevenness",
"jack1", "jack2", "chao", "boot"),
method=c("pooled", "mean", "sd", "max", "jackknife"),
sortit=FALSE, digits=8)
{
INDEX <- c("Shannon", "Simpson", "inverseSimpson", "Logalpha", "Berger",
"simpson.unb", "simpson.unb.inverse",
"richness", "abundance", "Jevenness", "Eevenness",
"jack1", "jack2", "chao", "boot")
if ((index %in% INDEX) == F) {stop(paste("choose an accepted index, not index: ", index, sep=""))}
METHOD <- c("pooled", "mean", "sd", "max", "jackknife")
if ((method %in% METHOD) == F) {stop(paste("choose an accepted method, not method: ", method, sep=""))}
if (is.null(y) == F) {
if((factor1 %in% names(y)) == F) {stop("specified factor1 '", factor1, "' is not a variable of the environmental data frame")}
if(is.factor(y[, factor1]) == F) {stop("specified factor1 '", factor1, "' is not a factor")}
y[, factor1] <- as.factor(as.character(y[, factor1]))
if (is.null(factor2) == F) {
if((factor2 %in% names(y)) == F) {stop("specified factor2 '", factor2, "' is not a variable of the environmental data frame")}
if(is.factor(y[, factor2]) == F) {stop("specified factor2 '", factor2, "' is not a factor")}
y[, factor2] <- as.factor(as.character(y[, factor2]))
}
}
if (is.null(factor2) == T) {
groups <- table(y[, factor1])
m <- length(groups)
levels <- as.character(names(groups))
result <- array(NA, dim=c(m, 2))
result[, 1] <- groups
dimnames(result) <- list(factor1=levels, c("n", index))
names(dimnames(result)) <- c(factor1, "")
for (i in 1:m) {
if (method %in% c("pooled", "mean", "max", "sd")) {result[i, 2] <- as.numeric(diversityresult(x, y, factor=factor1, level=levels[i], method=method, index=index, digits=digits))}
if (method=="jackknife") {
resultx <- diversityresult(x, y, factor=factor1, level=levels[i], method="jackknife", index=index, digits=digits)
result[i, 2] <- as.numeric(resultx$jack.estimate)
}
}
if (sortit == T) {
result2 <- result
seq <- order(result[, 2])
for (i in 1:m) {
result[1:m, ] <- result2[seq, ]
}
rownames(result) <- rownames(result2)[seq]
}
return(result)
}else{
if (method == "jackknife") {stop("jackknife analysis problematic with two factors")}
groups <- table(y[, factor1], y[, factor2])
levels1 <- rownames(groups)
levels2 <- colnames(groups)
m1 <- length(levels1)
m2 <- length(levels2)
result <- array(NA, dim=c(m1, m2, 2))
result[,,1] <- groups
dimnames(result) <- list(factor1=levels1, factor2=levels2, c("n", index))
names(dimnames(result)) <- c(factor1, factor2, "")
for (i in 1:m1) {
for (j in 1:m2) {
if (as.numeric(groups[i, j]) > 0) {
subs <- y[, factor1] == as.character(levels1[i])
x1 <- x[subs, , drop=F]
y1 <- y[subs, , drop=F]
result[i, j, 2] <- as.numeric(diversityresult(x1, y=y1, factor=factor2, level=levels2[j], method=method, index=index, digits=digits))
}
}
}
return(result)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/diversitycomp.R
|
`diversityresult` <- function(
x, y=NULL, factor=NULL, level=NULL,
index=c("Shannon", "Simpson", "inverseSimpson", "Logalpha", "Berger",
"simpson.unb", "simpson.unb.inverse",
"richness", "abundance", "Jevenness", "Eevenness",
"jack1", "jack2", "chao", "boot"),
method=c("pooled", "each site", "mean", "sd", "max", "jackknife"),
sortit=FALSE, digits=8)
{
INDEX <- c("Shannon", "Simpson", "inverseSimpson", "Logalpha", "Berger",
"simpson.unb", "simpson.unb.inverse",
"richness", "abundance", "Jevenness", "Eevenness",
"jack1", "jack2", "chao", "boot")
if ((index %in% INDEX) == F) {stop(paste("choose an accepted index, not index: ", index, sep=""))}
METHOD <- c("pooled", "each site", "mean", "sd", "max", "jackknife")
if ((method %in% METHOD) == F) {stop(paste("choose an accepted method, not method: ", method, sep=""))}
if (is.null(y) == F) {
if((factor %in% names(y)) == F) {stop("specified factor '", factor, "' is not a variable of the environmental data frame")}
if(is.factor(y[, factor]) == F) {stop("specified factor '", factor, "' is not a factor")}
levels1 <- as.character(levels(as.factor(as.character(y[, factor]))))
if((level %in% levels1) == F) {stop("specified level '", level, "' is not an available factor level")}
}
if (index %in% c("jack1", "jack2", "chao", "boot") && method != "pooled") {
cat(paste("\n", "Note that default method for index '", index, "' is method 'pooled'", "\n\n", sep=""))
method <- "pooled"
}
diversityresult0=function(
x, index, method)
{
marg <- 1
if (method=="pooled" && index!="jack1" && index!="jack2" && index!="chao" && index!="boot") {
x <- apply(x, 2, sum)
marg <- 2
}
if (index == "Shannon") {result <- diversity(x, index="shannon", MARGIN=marg)}
if (index == "Simpson") {result <- diversity(x, index="simpson", MARGIN=marg)}
if (index == "inverseSimpson") {result <- diversity(x, index="invsimpson", MARGIN=marg)}
if (index == "simpson.unb") {result <- simpson.unb(x)}
if (index == "simpson.unb.inverse") {result <- simpson.unb(x, inverse=TRUE)}
if (index == "Logalpha") {result <- fisher.alpha(x, MARGIN=1)}
if (index == "Berger") {
if (marg == 2) {
result <- max(x)/sum(x)
}else{
tots <- as.matrix(apply(x, marg, sum))
result <- as.matrix(apply(x, marg, max))
result <- as.matrix(result/tots)[,1]
}
}
if (index == "richness") {
if (marg == 2) {
result <- sum(x>0)
}else{
result <- as.matrix(apply(x>0, marg, sum))
result <- result[,1]
}
}
if (index == "abundance") {
if (marg == 2) {
result <- sum(x)
}else{
result <- as.matrix(apply(x, marg, sum))
result <- result[,1]
}
}
if (index == "Jevenness") {
result1 <- diversity(x, index="shannon", MARGIN=marg)
if (marg == 2) {
result2 <- sum(x>0)
}else{
result2 <- as.matrix(apply(x>0, marg, sum))
result2 <- result2[,1]
}
result <- result1/log(result2)
}
if (index == "Eevenness") {
result1 <- diversity(x, index="shannon", MARGIN=marg)
if (marg == 2) {
result2 <- sum(x>0)
}else{
result2 <- as.matrix(apply(x>0, marg, sum))
result2 <- result2[,1]
}
result <- exp(result1)/result2
}
if (index == "jack1") {result <- specpool(x)$jack1}
if (index == "jack2") {result <- specpool(x)$jack2}
if (index == "chao") {result <- specpool(x)$chao}
if (index == "boot") {result <- specpool(x)$boot}
return(result)
}
options(digits=digits)
if(is.null(y) == F) {
subs <- y[, factor] == as.character(level)
for (q in 1:length(subs)) {
if(is.na(subs[q])) {subs[q]<-F}
}
x <- x[subs, , drop=F]
freq <- apply(x, 2, sum)
subs <- freq > 0
x <- x[, subs, drop=F]
}
x <- as.matrix(x)
if(dim(x)[1]==0) {
result <- array(NA,dim=c(1,1))
colnames(result) <- index
rownames(result) <- "none"
return(result)
}
if (method == "jackknife") {
# if (! require(bootstrap)) {stop("Please install the bootstrap package")}
thetadiv <- function(x, xdata, index) {
xdata2 <- xdata[x, 1:ncol(xdata)]
diversityresult0(xdata2, index=index, method="pooled")
}
if (nrow(x) > 1) {
result2 <- bootstrap::jackknife(1:nrow(x), thetadiv, x, index=index)
result2$jack.estimate <- mean(as.numeric(result2$jack.values), na.rm=T)
}else{
result2 <- list(jack.values=NA, jack.estimate=NA)
}
}
if (method != "jackknife") {
result <- diversityresult0(x, index=index, method=method)
}
if (method == "mean") {
result2 <- result
result <- result2[1]
result[1] <- mean(result2)
}
if (method == "max") {
result2 <- result
result <- result2[1]
result[1] <- max(result2)
}
if (method == "sd") {
result2 <- result
result <- result2[1]
result[1] <- sd(result2)
}
if (sortit == T && method != "jackknife" && method != "pooled") {result <- sort(result)}
if (method!="jackknife") {
result2 <- round(result, digits=digits)
result2 <- data.frame(result2)
colnames(result2) <- index
}
if (method=="pooled") {rownames(result2) <- "pooled"}
if (method=="mean") {rownames(result2) <- "mean"}
if (method=="max") {rownames(result2) <- "max"}
if (method=="sd") {rownames(result2) <- "sd"}
if (method!="pooled" && method!="jackknife" && method!="mean" && method!="max" && method!="sd") {rownames(result2) <- names(result)}
return(result2)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/diversityresult.R
|
`diversityvariables` <-
function(x, y, digits=8){
y$richness <- diversityresult(x, index='richness', method='each site', digits=digits)[,1]
y$Shannon <- diversityresult(x, index='Shannon', method='each site', digits=digits)[,1]
y$Simpson <- diversityresult(x, index='Simpson', method='each site', digits=digits)[,1]
y$inverseSimpson <- diversityresult(x, index='inverseSimpson', method='each site', digits=digits)[,1]
y$simpson.unb <- diversityresult(x, index='simpson.unb', method='each site', digits=digits)[,1]
y$simpson.unb.inverse <- diversityresult(x, index='simpson.unb.inverse', method='each site', digits=digits)[,1]
y$Logalpha <- diversityresult(x, index='Logalpha', method='each site', digits=digits)[,1]
y$Berger <- diversityresult(x, index='Berger', method='each site', digits=digits)[,1]
y$Jevenness <- diversityresult(x, index='Jevenness', method='each site', digits=digits)[,1]
y$Eevenness <- diversityresult(x, index='Eevenness', method='each site', digits=digits)[,1]
y$richness <- diversityresult(x, index='richness', method='each site', digits=digits)[,1]
return(y)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/diversityvariables.R
|
`ensemble.PET.season` <- function(
PREC.stack=NULL, PET.stack=NULL,
filename=NULL, overwrite=TRUE,
CATCH.OFF=FALSE, ...
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if(inherits(PREC.stack, "RasterStack") == FALSE && inherits(PREC.stack, "SpatRaster") == FALSE) {stop("PREC.stack is not a RasterStack or SpatRaster object")}
if(inherits(PET.stack, "RasterStack") == FALSE && inherits(PET.stack, "SpatRaster") == FALSE) {stop("PET.stack is not a RasterStack or SpatRaster object")}
names(PREC.stack) <- paste0("PREC", 1:length(names(PREC.stack)))
names(PET.stack) <- paste0("PET", 1:length(names(PET.stack)))
if (inherits(PREC.stack, "RasterStack")) {
x <- raster::stack(c(PREC.stack, PET.stack))
}else{
x <- terra::rast(list(PREC.stack, PET.stack))
}
PET.season.object <- list(PREC.names=names(PREC.stack), PET.names=names(PET.stack))
predict.PET.season <- function(object=PET.season.object, newdata=newdata) {
PREC.names <- object$PREC.names
PET.names <- object$PET.names
result <- array(nrow(newdata))
for (i in 1:nrow(newdata)) {
datai <- newdata[i,,drop=F]
datai.PREC <- datai[, PREC.names]
datai.PET <- datai[, PET.names]
datai.BAL <- datai.PET - datai.PREC
datai.DRY <- datai.BAL > 0
period.DRY <- rep(0, length(datai.DRY))
count.period <- 1
for (j in 1:length(period.DRY)) {
if (datai.DRY[j] == 1) {
period.DRY[j] <- count.period
}else{
count.period <- count.period+1
}
}
if (datai.DRY[1] == TRUE && datai.DRY[length(period.DRY)] == TRUE) {
old.period <- period.DRY[length(period.DRY)]
period.DRY[period.DRY == old.period] <- 1
}
unique.periods <- c(unique(period.DRY[period.DRY !=0]))
bal.max <- 0
if (length(unique.periods) > 0) {
for (j in length(unique.periods)) {
bal.period <- sum(datai.BAL[period.DRY == unique.periods[j]])
if (bal.period > bal.max) {bal.max <- bal.period}
}
}
result[i] <- -1 * bal.max
}
return(result)
}
#
# predict
if (inherits(PREC.stack, "RasterStack")) {
if (CATCH.OFF == F) {
tryCatch(PET.season.raster <- raster::predict(object=x, model=PET.season.object, fun=predict.PET.season, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...),
error= function(err) {print(paste("prediction of aridity deficit failed"))},
silent=F)
}else{
PET.season.raster <- raster::predict(object=x, model=PET.season.object, fun=predict.PET.season, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...)
}
}else{
if (CATCH.OFF == F) {
tryCatch(PET.season.raster <- terra::predict(object=x, model=PET.season.object, fun=predict.PET.season, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...),
error= function(err) {print(paste("prediction of aridity deficit failed"))},
silent=F)
}else{
PET.season.raster <- terra::predict(object=x, model=PET.season.object, fun=predict.PET.season, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...)
}
}
names(PET.season.raster) <- "PET.season"
return(PET.season.raster)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.PET.season.R
|
`ensemble.PET.seasons` <- function(
PREC.stack=NULL, PET.stack=NULL,
index=c("seasons", "start1", "length1", "start2", "length2", "start3", "length3"),
filename=NULL, overwrite=TRUE,
CATCH.OFF=FALSE, ...
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if(inherits(PREC.stack, "RasterStack") == FALSE && inherits(PREC.stack, "SpatRaster") == FALSE) {stop("PREC.stack is not a RasterStack or SpatRaster object")}
if(inherits(PET.stack, "RasterStack") == FALSE && inherits(PET.stack, "SpatRaster") == FALSE) {stop("PET.stack is not a RasterStack or SpatRaster object")}
names(PREC.stack) <- paste0("PREC", 1:length(names(PREC.stack)))
names(PET.stack) <- paste0("PET", 1:length(names(PET.stack)))
if (inherits(PREC.stack, "RasterStack") == TRUE) {
x <- raster::stack(c(PREC.stack, PET.stack))
}else{
x <- terra::rast(list(PREC.stack, PET.stack))
}
PET.season.object <- list(PREC.names=names(PREC.stack), PET.names=names(PET.stack))
indices <- c("seasons", "start1", "length1", "start2", "length2", "start3", "length3")
index1 <- indices[match(index, indices)]
predict.PET.seasons <- function(object=PET.season.object, newdata=newdata, index=index1) {
PREC.names <- object$PREC.names
PET.names <- object$PET.names
result <- array(nrow(newdata))
for (i in 1:nrow(newdata)) {
datai <- newdata[i,,drop=F]
datai.PREC <- datai[, PREC.names]
datai.PET <- datai[, PET.names]
datai.BAL <- datai.PET - 2 * datai.PREC
datai.DRY <- datai.BAL > 0
period.DRY <- rep(0, length(datai.DRY))
count.period <- 1
for (j in 1:length(period.DRY)) {
if (datai.DRY[j] == 1) {
period.DRY[j] <- count.period
}else{
count.period <- count.period+1
}
}
if (datai.DRY[1] == TRUE && datai.DRY[length(period.DRY)] == TRUE) {
old.period <- period.DRY[length(period.DRY)]
period.DRY[period.DRY == old.period] <- 1
}
unique.periods <- sort(unique(period.DRY))
unique.periods <- unique.periods[unique.periods != 0]
for (j in 1:length(unique.periods)) {
period.DRY[period.DRY == unique.periods[j]] <- -1*j
}
if (period.DRY[1] == 0) {period.DRY[1] <- abs(min(period.DRY))}
if (period.DRY[1] == 0) {period.DRY[1] <- 1}
for (j in 2:length(period.DRY)) {
if (period.DRY[j] == 0) {period.DRY[j] <- abs(period.DRY[j-1])}
}
names(result) <- paste0("PE", c(1:length(result)))
if (i == 1) {
result <- period.DRY
}else{
result <- rbind(result, period.DRY)
}
}
result2 <- data.frame(result)
result2$seasons <- apply(result, FUN="max", MARGIN=1)
result2[result2$seasons == -1, "seasons"] <- 0
result2$start3 <- result2$start2 <- result2$start1 <- NA
result2$length3 <- result2$length2 <- result2$length1 <- NA
for (i in 1:nrow(result2)) {
resulti <- result[i, ]
resulti.pos <- resulti
resulti.pos[resulti.pos < 0] <- NA
if (result2[i, "seasons"] !=0) {
resulti.pos1 <- resulti.pos
resulti.pos1[resulti.pos1 > 1] <- NA
s1 <- which.min(resulti.pos1 == 1)
if (s1 == 1) {
resulti.neg <- resulti
resulti.neg[resulti.neg > 0] <- 0
resulti.neg[resulti.neg < 0] <- 1
resulti.neg <- data.frame(t(resulti.neg))
e1 <- max(0, max.col(resulti.neg, ties.method="last"), na.rm=T)
if (e1 < length(resulti.neg)) {s1 <- e1+1}
}
result2[i, "start1"] <- s1
result2[i, "length1"] <- sum(resulti.pos1 == 1, na.rm=T)
}
if (result2[i, "seasons"] > 1) {
resulti.pos2 <- resulti.pos
resulti.pos2[resulti.pos2 != 2] <- NA
s2 <- which.min(resulti.pos2 == 2)
result2[i, "start2"] <- s2
result2[i, "length2"] <- sum(resulti.pos2 == 2, na.rm=T)
}
if (result2[i, "seasons"] > 2) {
resulti.pos3 <- resulti.pos
resulti.pos3[resulti.pos3 != 3] <- NA
s3 <- which.min(resulti.pos3 == 3)
result2[i, "start3"] <- s3
result2[i, "length3"] <- sum(resulti.pos3 == 3, na.rm=T)
}
}
rownames(result2) <- NULL
result3 <- result2[, index]
return(result3)
}
#
# predict
if (inherits(PREC.stack, "RasterStack") == TRUE) {
if (CATCH.OFF == F) {
tryCatch(PET.seasons.raster <- raster::predict(object=x, model=PET.season.object, fun=predict.PET.seasons, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...),
error= function(err) {print(paste("prediction of", index1, "failed"))},
silent=F)
}else{
PET.seasons.raster <- raster::predict(object=x, model=PET.season.object, fun=predict.PET.seasons, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...)
}
}else{
if (CATCH.OFF == F) {
tryCatch(PET.seasons.raster <- terra::predict(object=x, model=PET.season.object, fun=predict.PET.seasons, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...),
error= function(err) {print(paste("prediction of", index1, "failed"))},
silent=F)
}else{
PET.seasons.raster <- terra::predict(object=x, model=PET.season.object, fun=predict.PET.seasons, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...)
}
}
names(PET.seasons.raster) <- index1
return(PET.seasons.raster)
}
`ensemble.prec.season` <- function(
PREC.stack=NULL, start.layer=NULL, length.layer=NULL,
filename=NULL, overwrite=TRUE,
CATCH.OFF=FALSE, ...
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if(inherits(PREC.stack, "RasterStack") == FALSE && inherits(PREC.stack, "SpatRaster") == FALSE) {stop("PREC.stack is not a RasterStack or SpatRaster object")}
if(inherits(start.layer, "RasterLayer") == FALSE && inherits(start.layer, "SpatRaster") == FALSE) {stop("start.layer is not a RasterLayer or SpatRaster object")}
if(inherits(length.layer, "RasterLayer") == FALSE && inherits(length.layer, "SpatRaster") == FALSE) {stop("length.layer is not a RasterLayer or SpatRaster object")}
names(PREC.stack) <- paste0("PREC", 1:length(names(PREC.stack)))
names(start.layer) <- "start"
names(length.layer) <- "length"
if(inherits(PREC.stack, "RasterStack") == TRUE) {
x <- raster::stack(c(PREC.stack, start.layer, length.layer))
}else{
x <- terra::rast(list(PREC.stack, start.layer, length.layer))
}
prec.season.object <- list(PREC.names=names(PREC.stack))
predict.prec.season <- function(object=prec.season.object, newdata=newdata) {
PREC.names <- object$PREC.names
n.mts <- length(PREC.names)
result <- array(nrow(newdata))
for (i in 1:nrow(newdata)) {
datai <- newdata[i, , drop=F]
datai.PREC <- datai[, PREC.names]
mts <- seq(from=datai[, "start"], length=datai[, "length"])
mts[mts > n.mts] <- mts[mts > n.mts] - n.mts
result[i] <- sum(datai.PREC[mts])
}
return(result)
}
#
# predict
if(inherits(PREC.stack, "RasterStack") == TRUE) {
if (CATCH.OFF == F) {
tryCatch(prec.season.raster <- raster::predict(object=x, model=prec.season.object, fun=predict.prec.season, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...),
error= function(err) {print(paste("prediction failed"))},
silent=F)
}else{
prec.season.raster <- raster::predict(object=x, model=prec.season.object, fun=predict.prec.season, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...)
}
}else{
if (CATCH.OFF == F) {
tryCatch(prec.season.raster <- terra::predict(object=x, model=prec.season.object, fun=predict.prec.season, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...),
error= function(err) {print(paste("prediction failed"))},
silent=F)
}else{
prec.season.raster <- terra::predict(object=x, model=prec.season.object, fun=predict.prec.season, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...)
}
}
names(prec.season.raster) <- "prec.season"
return(prec.season.raster)
}
`ensemble.tmean.season` <- function(
TMEAN.stack=NULL, start.layer=NULL, length.layer=NULL,
filename=NULL, overwrite=TRUE,
CATCH.OFF=FALSE, ...
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if(inherits(TMEAN.stack, "RasterStack") == FALSE && inherits(TMEAN.stack, "SpatRaster") == FALSE) {stop("TMEAN.stack is not a RasterStack or SpatRaster object")}
if(inherits(start.layer, "RasterLayer") == FALSE && inherits(start.layer, "SpatRaster") == FALSE) {stop("start.layer is not a RasterLayer or SpatRaster object")}
if(inherits(length.layer, "RasterLayer") == FALSE && inherits(length.layer, "SpatRaster") == FALSE) {stop("length.layer is not a RasterLayer or SpatRaster object")}
names(TMEAN.stack) <- paste0("TMEAN", 1:length(names(TMEAN.stack)))
names(start.layer) <- "start"
names(length.layer) <- "length"
if(inherits(TMEAN.stack, "RasterStack") == TRUE) {
x <- raster::stack(c(TMEAN.stack, start.layer, length.layer))
}else{
x <- terra::rast(list(TMEAN.stack, start.layer, length.layer))
}
tmean.season.object <- list(TMEAN.names=names(TMEAN.stack))
predict.tmean.season <- function(object=tmean.season.object, newdata=newdata) {
tmean.names <- object$TMEAN.names
n.mts <- length(tmean.names)
result <- array(nrow(newdata))
for (i in 1:nrow(newdata)) {
datai <- newdata[i, , drop=F]
datai.TMEAN <- datai[, tmean.names]
mts <- seq(from=datai[, "start"], length=datai[, "length"])
mts[mts > n.mts] <- mts[mts > n.mts] - n.mts
result[i] <- sum(datai.TMEAN[mts])/length(mts)
}
return(result)
}
#
# predict
if(inherits(TMEAN.stack, "RasterStack") == TRUE) {
if (CATCH.OFF == F) {
tryCatch(tmean.season.raster <- raster::predict(object=x, model=tmean.season.object, fun=predict.tmean.season, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...),
error= function(err) {print(paste("prediction failed"))},
silent=F)
}else{
tmean.season.raster <- raster::predict(object=x, model=tmean.season.object, fun=predict.tmean.season, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...)
}
}else{
if (CATCH.OFF == F) {
tryCatch(tmean.season.raster <- terra::predict(object=x, model=tmean.season.object, fun=predict.tmean.season, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...),
error= function(err) {print(paste("prediction failed"))},
silent=F)
}else{
tmean.season.raster <- terra::predict(object=x, model=tmean.season.object, fun=predict.tmean.season, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...)
}
}
#
names(tmean.season.raster) <- "tmean.season"
return(tmean.season.raster)
}
`ensemble.season.suitability` <- function(
season.raster=NULL, thresholds=NULL,
filename=NULL, overwrite=TRUE,
CATCH.OFF=FALSE, ...
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if(inherits(season.raster, "RasterLayer") == FALSE && inherits(season.raster, "SpatRaster") == FALSE) {stop("season.raster is not a RasterLayer or SpatRaster object")}
suit.object <- list(thresholds=thresholds[order(thresholds)])
predict.suit <- function(suit.object=suit.object, newdata=newdata) {
VMIN <- as.numeric(suit.object$thresholds[1])
VOPMN <- as.numeric(suit.object$thresholds[2])
VOPMX <- as.numeric(suit.object$thresholds[3])
VMAX <- as.numeric(suit.object$thresholds[4])
result <- array(nrow(newdata))
for (i in 1:nrow(newdata)) {
datai <- newdata[i, , drop=F]
R.out <- 1
if (datai <= VMIN) {R.out <- 0}
if (datai > VMIN && datai < VOPMN) {
R.out <- 1 - ((VOPMN - datai) / (VOPMN - VMIN))
}
if (datai >= VMAX) {R.out <- 0}
if (datai > VOPMX && datai < VMAX) {
R.out <- 1 - ((datai - VOPMX) / (VMAX - VOPMX))
}
result[i] <- R.out
}
return(as.numeric(result))
}
#
# predict
if(inherits(season.raster, "RasterLayer") == TRUE) {
if (CATCH.OFF == F) {
tryCatch(suit.raster <- raster::predict(object=season.raster, model=suit.object, fun=predict.suit, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...),
error= function(err) {print(paste("prediction failed"))},
silent=F)
}else{
suit.raster <- raster::predict(object=season.raster, model=suit.object, fun=predict.suit, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...)
}
}else{
if (CATCH.OFF == F) {
tryCatch(suit.raster <- terra::predict(object=season.raster, model=suit.object, fun=predict.suit, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...),
error= function(err) {print(paste("prediction failed"))},
silent=F)
}else{
suit.raster <- terra::predict(object=season.raster, model=suit.object, fun=predict.suit, na.rm=TRUE,
filename=filename, overwrite=overwrite, ...)
}
}
#
names(suit.raster) <- "suitability"
return(suit.raster)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.PET.seasons.R
|
`ensemble.VIF` <- function(
x=NULL, a=NULL, an=10000,
VIF.max=10, keep=NULL,
layer.drops=NULL, factors=NULL, dummy.vars=NULL
)
{
if(is.null(x) == T) {stop("value for parameter x is missing (RasterStack object)")}
if(inherits(x,"RasterStack") == F) {stop("x is not a RasterStack object")}
if(is.null(a) == F) {names(a) <- c("x", "y")}
#
layer.drops <- as.character(layer.drops)
factors <- as.character(factors)
dummy.vars <- as.character(dummy.vars)
#
vars <- names(x)
#
if (length(layer.drops) > 0) {
var.drops <- layer.drops
nd <- length(layer.drops)
for (i in 1:nd) {
if (any(vars == layer.drops[i]) == FALSE) {
cat(paste("\n", "WARNING: variable to exclude '", layer.drops[i], "' not among grid layers", sep = ""))
}else{
cat(paste("\n", "NOTE: variable '", layer.drops[i], "' will not be included as explanatory variable", sep = ""))
x <- raster::dropLayer(x, which(names(x) %in% c(layer.drops[i]) ))
x <- raster::stack(x)
vars <- names(x)
if (length(factors) > 0) {
factors <- factors[factors != layer.drops[i]]
}
if (length(dummy.vars) > 0) {
dummy.vars <- dummy.vars[dummy.vars != layer.drops[i]]
}
}
}
}else{
var.drops <- NULL
}
#
var.drops <- as.character(var.drops)
#
vars <- names(x)
if (length(factors) > 0) {
var.drops <- c(var.drops, factors)
for (i in 1:length(factors)) {vars <- vars[which(vars != factors[i])]}
}
#
nv <- length(vars)
#
result <- data.frame(array(dim=c(nv, nv)))
names(result) <- vars
row.names(result) <- paste("step_", c(1:nv), sep="")
#
if (is.null(a) == T) {a <- dismo::randomPoints(x[[1]], n=an, p=NULL, excludep=F)}
# presence locations will not be used, but are required for the ensemble.calibrate.models function
p <- dismo::randomPoints(x[[1]], n=30, p=NULL, excludep=F)
#
i <- 0
VIF.result.max <- Inf
# for ensemble.calibrate.models, need to use NULL again for factors etc.
if (length(var.drops) == 0) {var.drops <- NULL}
if (length(dummy.vars) == 0) {dummy.vars <- NULL}
cat(paste("\n", "Selection of explanatory variables based on the Variance Explanation Factor", sep = ""))
cat(paste("\n", "Data obtained from ", nrow(a), " point locations", sep = ""))
cat(paste("\n", "If some variables have VIF > VIF.max, then the variable with largest VIF is excluded", sep = ""))
cat(paste("\n", "The procedure stops when all variables have VIF <= VIF.max", "\n", sep = ""))
while(VIF.result.max >= VIF.max) {
VIF.result <- ensemble.calibrate.models(x, p=p, a=a,
layer.drops=var.drops, factors=NULL,
VIF=T, ENSEMBLE.tune=F,
MAXENT=0, MAXNET=0, MAXLIKE=0, GBM=0, GBMSTEP=0, RF=0, CF=0,
GLM=0, GLMSTEP=0, GAM=0,
GAMSTEP=0, MGCV=0, MGCVFIX=0,EARTH=0, RPART=0, NNET=0, FDA=0,
SVM=0, SVME=0, GLMNET=0,
BIOCLIM.O=0, BIOCLIM=0, DOMAIN=0, MAHAL=0, MAHAL01=0)$VIF
i <- i+1
for (v in 1:length(VIF.result)) {result[i, which(names(result) == names(VIF.result)[v])] <- VIF.result[which(names(VIF.result) == names(VIF.result)[v])]}
j <- 1
while(names(VIF.result[j]) %in% keep && j <= length(VIF.result)) {j <- j+1}
if (j <= length(VIF.result)){
VIF.result.max <- VIF.result[j]
var.drops <- c(var.drops, names(VIF.result)[j])
}else{
VIF.result.max <- VIF.max-1
}
}
# remove last variable included
if (length(var.drops) == 1) {
var.drops <- as.character(NULL)
}else{
nvd <- length(var.drops)-1
var.drops <- var.drops[1:nvd]
}
# include factors again as no information to exclude (drop)
if (length(factors) > 0) {
for (i in 1:length(factors)) {var.drops <- var.drops[which(var.drops != factors[i])]}
vars.included <- c(names(VIF.result), factors)
}else{
vars.included <- names(VIF.result)
factors <- NULL
}
dummy.vars <- as.character(dummy.vars)
if (length(dummy.vars) > 0) {
for (i in 1:length(var.drops)) {
if (var.drops[i] %in% dummy.vars) {dummy.vars <- dummy.vars[which(dummy.vars != var.drops[i])]}
}
}else{
dummy.vars <- NULL
}
if (length(var.drops) == 0) {var.drops <- NULL}
result <- result[rowSums(result, na.rm=T) > 0, , drop=F]
cat(paste("Summary of VIF selection process:", "\n", sep = ""))
print(result)
cat(paste("\n", sep = ""))
cat(paste("Final selection of variables:", "\n", sep = ""))
print(vars.included)
cat(paste("\n", sep = ""))
result <- result[rowSums(result, na.rm=T) > 0, ]
return(list(stepwise.results=result, var.drops=var.drops, vars.included=vars.included,
factors=factors, dummy.vars.included=dummy.vars, VIF.final=VIF.result))
}
`ensemble.VIF.dataframe` <- function(
x=NULL, VIF.max=10, keep=NULL,
car=TRUE, silent=F
)
{
if(is.null(x) == T) {stop("value for parameter x is missing (data.frame)")}
if(inherits(x, "data.frame") == F) {stop("x is not a data.frame")}
#
VIFcalc <- function(x, var.drops=NULL, car=T, silent=F) {
x1 <- x[, (names(x) %in% var.drops) == F]
varnames <- names(x1)
x1$RandomizedResponse <- x1[sample(nrow(x1)), 1]
LM.formula <- as.formula(paste("RandomizedResponse ~ ", paste(varnames, sep="", collapse="+"), sep="", collapse="+"))
if (car==T && silent==F) {
vifresult <- car::vif(lm(formula=LM.formula, data=x1))
if (is.null(vifresult) == F) {
cat(paste("\n", "Variance inflation (package: car)", "\n", sep = ""))
print(vifresult)
cat(paste("\n"))
}else{
cat(paste("\n", "NOTE: no result from car::vif", "\n", sep = ""))
}
}
if (silent == F) {
cat(paste("VIF directly calculated from linear model with focal numeric variable as response", "\n", sep = ""))
}
newVIF <- numeric(length=length(varnames))
newVIF[] <- NA
names(newVIF) <- varnames
for (i in 1:length(varnames)) {
response.name <- varnames[i]
explan.names <- varnames[-i]
if (is.factor(x[, varnames[i]]) == F) {
LM.formula <- as.formula(paste(response.name, "~", paste(explan.names, collapse="+"), sep=""))
newVIF[i] <- summary(lm(formula=LM.formula, data=x1))$r.squared
}
}
newVIF <- 1/(1-newVIF)
newVIF <- sort(newVIF, decreasing=T, na.last=T)
if (silent == F) {print(newVIF)}
return(newVIF)
}
#
vars <- names(x)
nv <- length(vars)
#
result <- data.frame(array(dim=c(nv, nv)))
names(result) <- vars
row.names(result) <- paste("step_", c(1:nv), sep="")
#
i <- 0
VIF.result.max <- Inf
var.drops <- NULL
while(VIF.result.max >= VIF.max) {
VIF.result <- VIFcalc(x, car=car, silent=silent, var.drops=var.drops)
i <- i+1
for (v in 1:length(VIF.result)) {result[i, which(names(result) == names(VIF.result)[v])] <- VIF.result[which(names(VIF.result) == names(VIF.result)[v])]}
j <- 1
while(names(VIF.result[j]) %in% keep && j <= length(VIF.result)) {j <- j+1}
if (j <= length(VIF.result)){
VIF.result.max <- VIF.result[j]
var.drops <- c(var.drops, names(VIF.result)[j])
}else{
VIF.result.max <- VIF.max-1
}
}
# remove last variable included
if (length(var.drops) == 1) {
var.drops <- as.character(NULL)
}else{
nvd <- length(var.drops)-1
var.drops <- var.drops[1:nvd]
}
result <- result[rowSums(result, na.rm=T) > 0, , drop=F]
vars.included <- names(VIF.result)
if (silent == F) {
cat(paste("\n", "Summary of VIF selection process:", "\n", sep = ""))
print(result)
cat(paste("\n", "Final selection of variables:", "\n", sep = ""))
print(vars.included)
cat(paste("\n", sep = ""))
}
return(list(stepwise.results=result, var.drops=var.drops, vars.included=vars.included,
VIF.final=VIF.result))
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.VIF.R
|
`ensemble.accepted.categories` <- function(
xcat=NULL, categories=NULL,
filename=NULL, overwrite=TRUE, ...
)
{
# if (! require(dismo)) {stop("Please install the dismo package")}
if(inherits(xcat,"RasterLayer") == F) {stop("parameter xcat is expected to be a RasterLayer")}
if(is.null(categories) == T) {stop("accepted categories are missing")}
if(is.null(filename) == T) {
cat(paste("\n", "No new filename was provided", sep = ""))
# if (! require(tools)) {stop("tools package not available")}
filename1 <- filename(xcat)
extension1 <- paste(".", tools::file_ext(filename1), sep="")
extension2 <- paste("_new.", tools::file_ext(filename1), sep="")
filename <- gsub(pattern=extension1, replacement=extension2, x=filename1)
cat(paste("\n", "New raster will be saved as: ", filename, "\n", sep = ""))
}
#
all.categories <- raster::freq(xcat)[,1]
all.categories <- all.categories[is.na(all.categories) == F]
new.categories <- all.categories[is.na(match(all.categories, categories) )]
cat(paste("\n", "categories that will be reclassified as 'NA'", "\n", sep = ""))
print(new.categories)
#
replace.frame <- data.frame(id=categories, v=categories)
colnames(replace.frame)[2] <- names(xcat)
new.x <- raster::subs(xcat, replace.frame, by=1, which=2, subsWithNA=TRUE, filename=filename, overwrite=overwrite, ...)
return(filename)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.accepted.categories.R
|
`ensemble.analogue.object` <- function(
ref.location, future.stack, current.stack, name="reference1",
method="mahal", an=10000, probs=c(0.025, 0.975), weights=NULL, z=2)
{
target.values <- as.numeric(raster::extract(future.stack, ref.location))
names(target.values) <- names(future.stack)
if ((method %in% c("mahal", "quantile", "sd")) == F) {method <- "none"}
if (method == "mahal") {
a <- dismo::randomPoints(current.stack, n=an, p=NULL, excludep=F)
a <- data.frame(a)
background.data <- raster::extract(current.stack, a)
background.data <- data.frame(background.data)
TrainValid <- complete.cases(background.data)
background.data <- background.data[TrainValid,]
cov.mahal <- cov(background.data)
out <- list(name=name, ref.location=ref.location, stack.name=future.stack@title,
method=method, target.values=target.values, cov.mahal=cov.mahal)
return(out)
}
if (method == "quantile") {
lower.interval <- data.frame(t(quantile(current.stack, probs[1])))
upper.interval <- data.frame(t(quantile(current.stack, probs[2])))
norm.values <- as.numeric(upper.interval - lower.interval)
}
if (method == "sd") {
norm.values <- raster::cellStats(current.stack, stat="sd")
}
if (method == "none") {
norm.values <- rep(1, length=length(names(current.stack)))
}
names(norm.values) <- names(current.stack)
# problem if some of the norm values are zero
zero.norm.values <- which(norm.values == 0)
if(length(zero.norm.values) > 0) {
cat(paste("WARNING: some of the normalizing values were zero", "\n\n", sep=""))
print(names(zero.norm.values))
cat(paste("\n", "respective values were now set to one", "\n", sep=""))
norm.values[names(norm.values) %in% names(zero.norm.values)] <- 1
}
#
if (is.null(weights)==T || all.equal(names(weights), names(current.stack))==F) {
paste("WARNING: length of weights is different from number of variables in current RasterStack", "\n", sep="")
weight.values <- rep(1, raster::nlayers(current.stack))
names(weight.values) <- names(current.stack)
}else{
weight.values <- weights
}
weight.values <- weight.values / sum(weight.values)
out <- list(name=name, ref.location=ref.location, stack.name=future.stack@title,
method=method, target.values=target.values, norm.values=norm.values, weights=weight.values, z=z)
return(out)
}
`ensemble.analogue` <- function(
x=NULL, analogue.object=NULL, analogues=1,
RASTER.object.name=analogue.object$name, RASTER.stack.name=x@title,
RASTER.format="GTiff", RASTER.datatype="INT2S", RASTER.NAflag=-32767,
# KML.out=T, KML.blur=10, KML.maxpixels = 100000,
limits=c(1, 5, 20, 50), limit.colours=c('red', 'orange', 'blue', 'grey'),
CATCH.OFF=FALSE
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if(is.null(x) == T) {stop("value for parameter x is missing (RasterStack object)")}
if(inherits(x, "RasterStack") == F) {stop("x is not a RasterStack object")}
if (is.null(analogue.object) == T) {stop("value for parameter analogue.object is missing (hint: use the ensemble.analogue.object function)")}
#
# if (KML.out==T && raster::isLonLat(x)==F) {
# cat(paste("\n", "NOTE: not possible to generate KML files as Coordinate Reference System (CRS) of stack ", x@title , " is not longitude and latitude", "\n", sep = ""))
# KML.out <- FALSE
# }
#
predict.analogue <- function(object=analogue.object, newdata=newdata) {
method <- object$method
if (method == "mahal") {
centroid <- object$target.values
cov.mahal <- object$cov.mahal
p <- mahalanobis(newdata, center=centroid, cov=cov.mahal)
p <- as.numeric(p)
return(p)
}else{
targetdata <- object$target.values
normdata <- object$norm.values
weightdata <- object$weights
z <- object$z
out <- newdata
for (i in 1:ncol(out)) {
out[,i] <- as.numeric(out[,i]) - as.numeric(targetdata[as.numeric(na.omit(match(names(out)[i], names(targetdata))))])
out[,i] <- abs(out[,i])
out[,i] <- as.numeric(out[,i]) / as.numeric(normdata[as.numeric(na.omit(match(names(out)[i], names(normdata))))])
out[,i] <- (out[,i]) ^ z
out[,i] <- as.numeric(out[,i]) * as.numeric(weightdata[as.numeric(na.omit(match(names(out)[i], names(weightdata))))])
}
p <- rowSums(out)
z2 <- 1/z
p <- p ^ z2
p <- round(p, digits=8)
return(p)
}
}
# avoid problems with non-existing directories and prepare for output
dir.create("ensembles", showWarnings = F)
dir.create("ensembles/analogue", showWarnings = F)
# if (KML.out == T) {
# dir.create("kml", showWarnings = F)
# dir.create("kml/analogue", showWarnings = F)
# }
if(length(x@title) == 0) {x@title <- "stack1"}
stack.title <- RASTER.stack.name
if (gsub(".", "_", stack.title, fixed=T) != stack.title) {cat(paste("\n", "WARNING: title of stack (", stack.title, ") contains '.'", "\n\n", sep = ""))}
rasterfull <- paste("ensembles/analogue/", RASTER.object.name, "_", stack.title , "_analogue", sep="")
kmlfull <- paste("kml/analogue/", RASTER.object.name, "_", stack.title , "_analogue", sep="")
#
# predict
if (CATCH.OFF == F) {
tryCatch(analogue.raster <- raster::predict(object=x, model=analogue.object, fun=predict.analogue, na.rm=TRUE,
filename=rasterfull, progress='text', overwrite=T, format=RASTER.format),
error= function(err) {print(paste("prediction of analogue raster failed"))},
silent=F)
}else{
analogue.raster <- raster::predict(object=x, model=analogue.object, fun=predict.analogue, na.rm=TRUE,
filename=rasterfull, progress='text', overwrite=T, format=RASTER.format)
}
# modified DEC-2022, rounding done within the function
# analogue.raster <- round(analogue.raster, digits=8)
# raster::setMinMax(analogue.raster)
print(analogue.raster)
#
# avoid possible problems with saving of names of the raster layers
# not done from DEC-2022 as saving in GTiff format
# raster::writeRaster(analogue.raster, filename="working.grd", overwrite=T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- paste(RASTER.object.name, "_", stack.title , "_", analogue.object$method, "_z", analogue.object$z, "_analogue", sep="")
# raster::writeRaster(working.raster, filename=rasterfull, progress='text', overwrite=T, format=)
#
limits.data <- data.frame(limits)
limits.data <- data.frame(cbind(limits, limits))
names(limits.data) <- c("threshold", "count")
freqs <- raster::freq(analogue.raster, digits=8)
for (i in 1:length(limits)) {
j <- 1
while (sum(freqs[1:j, 2]) < limits[i]) {j <- j+1}
limits.data[i, 1] <- freqs[j, 1]
limits.data[i, 2] <- sum(freqs[1:j, 2])
}
cat(paste("\n", "suggested breaks in colour scheme", "\n", sep=""))
print(limits.data)
#
# if (KML.out == T) {
# breaks1 <- c(raster::minValue(analogue.raster), limits.data[,1])
# raster::KML(working.raster, filename=kmlfull, overwrite=T, blur=KML.blur, col=limit.colours, breaks=breaks1)
# }
#
# get locations
j <- 1
while (sum(freqs[1:j, 2]) < analogues) {j <- j+1}
threshold <- freqs[j, 1]
cat(paste("\n", "Threshold (n =", j, "): ", threshold, "\n", sep=""))
index1 <- which(analogue.raster[,] <= threshold)
pres1 <- raster::xyFromCell(analogue.raster, index1)
vars <- length(names(x))
output1 <- data.frame(array(dim=c(length(index1), 5+vars)))
output2 <- data.frame(array(dim=c(1, 5+vars)))
names(output1) <- names(output2) <- c("model", "method", "lon", "lat", "distance", names(x))
output1[, 1] <- rep(analogue.object$stack.name, nrow(output1))
output2[1, 1] <- analogue.object$stack.name
if (analogue.object$method == "mahal") {
method1 <- "mahal"
}else{
method1 <- paste(analogue.object$method, "_z_", analogue.object$z, sep="")
}
output1[, 2] <- rep(method1, nrow(output1))
output1[, c(3:4)] <- pres1
point.data1 <- raster::extract(x, pres1)
output1[, c(6:(5+vars))] <- point.data1
point.data2 <- raster::extract(analogue.raster, pres1)
output1[, 5] <- point.data2
output1 <- output1[order(output1[,"distance"], decreasing=F),]
output2[1, 1] <- analogue.object$stack.name
output2[1, 2] <- "target"
output2[1 ,3] <- as.numeric(analogue.object$ref.location[,1])
output2[1 ,4] <- as.numeric(analogue.object$ref.location[,2])
output2[, c(6:(5+vars))] <- analogue.object$target.values
output3 <- rbind(output2, output1)
cat(paste("\n", "analogue raster provided in folder: ", getwd(), "//ensembles//analogue", "\n\n", sep=""))
return(output3)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.analogue.R
|
`ensemble.area` <- function(
x=NULL, km2=TRUE
)
{
# if (! require(dismo)) {stop("Please install the dismo package")}
if(inherits(x, "RasterLayer") == F) {stop("x is not a RasterLayer object")}
if(raster::isLonLat(x) == F) {stop("x is not in longitude-latitude coordinates")}
cat(paste("\n", "Cell frequencies", "\n", sep = ""))
print(raster::freq(x))
count.polygon <- raster::rasterToPolygons(x, dissolve=T)
result <- cbind(count.polygon@data, area=rep(NA, nrow(count.polygon@data)))
result[,2] <- geosphere::areaPolygon(count.polygon)
# convert from square m to square km
if (km2 == T) {
result[,2] <- result[,2]/1000000
names(result)[2] <- "area.km2"
}else{
names(result)[2] <- "area.m2"
}
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.area.R
|
`ensemble.batch` <- function(
x=NULL, xn=c(x),
species.presence=NULL, species.absence=NULL,
presence.min=20, thin.km=0.1,
an=1000, excludep=FALSE, target.groups=FALSE,
get.block=FALSE, block.default=runif(1)>0.5, get.subblocks=FALSE,
SSB.reduce=FALSE, CIRCLES.d=250000,
k.splits=4, k.test=0,
n.ensembles=1,
VIF.max=10, VIF.keep=NULL,
SINK=FALSE, CATCH.OFF=FALSE,
RASTER.datatype="INT2S", RASTER.NAflag=-32767,
# KML.out=FALSE, KML.maxpixels=100000, KML.blur=10,
models.save=FALSE,
threshold.method="spec_sens", threshold.sensitivity=0.9, threshold.PresenceAbsence=FALSE,
ENSEMBLE.best=0, ENSEMBLE.min=0.7, ENSEMBLE.exponent=1, ENSEMBLE.weight.min=0.05,
input.weights=NULL,
MAXENT=1, MAXNET=1, MAXLIKE=1, GBM=1, GBMSTEP=0, RF=1, CF=1,
GLM=1, GLMSTEP=1, GAM=1, GAMSTEP=1, MGCV=1, MGCVFIX=0,
EARTH=1, RPART=1, NNET=1, FDA=1, SVM=1, SVME=1, GLMNET=1,
BIOCLIM.O=0, BIOCLIM=1, DOMAIN=1,
MAHAL=1, MAHAL01=1,
PROBIT=FALSE,
Yweights="BIOMOD",
layer.drops=NULL, factors=NULL, dummy.vars=NULL,
formulae.defaults=TRUE, maxit=100,
MAXENT.a=NULL, MAXENT.an=10000, MAXENT.path=paste(getwd(), "/models/maxent", sep=""),
MAXNET.classes="default", MAXNET.clamp=FALSE, MAXNET.type="cloglog",
MAXLIKE.formula=NULL, MAXLIKE.method="BFGS",
GBM.formula=NULL, GBM.n.trees=2001,
# GBMSTEP.gbm.x=c(2:(1+raster::nlayers(x))),
GBMSTEP.tree.complexity=5, GBMSTEP.learning.rate=0.005,
GBMSTEP.bag.fraction=0.5, GBMSTEP.step.size=100,
RF.formula=NULL, RF.ntree=751, RF.mtry=floor(sqrt(raster::nlayers(x))),
CF.formula=NULL, CF.ntree=751, CF.mtry=floor(sqrt(raster::nlayers(x))),
GLM.formula=NULL, GLM.family=binomial(link="logit"),
GLMSTEP.steps=1000, STEP.formula=NULL, GLMSTEP.scope=NULL, GLMSTEP.k=2,
GAM.formula=NULL, GAM.family=binomial(link="logit"),
GAMSTEP.steps=1000, GAMSTEP.scope=NULL, GAMSTEP.pos=1,
MGCV.formula=NULL, MGCV.select=FALSE,
MGCVFIX.formula=NULL,
EARTH.formula=NULL, EARTH.glm=list(family=binomial(link="logit"), maxit=maxit),
RPART.formula=NULL, RPART.xval=50,
NNET.formula=NULL, NNET.size=8, NNET.decay=0.01,
FDA.formula=NULL,
SVM.formula=NULL, SVME.formula=NULL,
GLMNET.nlambda=100, GLMNET.class=FALSE,
BIOCLIM.O.fraction=0.9,
MAHAL.shape=1
)
{
.BiodiversityR <- new.env()
RASTER.format <- "GTiff"
#
k.test <- as.integer(k.test)
k.splits <- as.integer(k.splits)
if (k.splits < 1) {
cat(paste("\n", "NOTE: parameter k.splits was set to be smaller than 1", sep = ""))
cat(paste("\n", "default value of 4 therefore set for parameter k.splits", sep = ""))
k.splits <- 4
}
n.ensembles <- as.integer(n.ensembles)
if (n.ensembles < 1) {n.ensembles <- 1}
# if (! require(dismo)) {stop("Please install the dismo package")}
if (is.null(xn) == T) {
cat(paste("\n", "NOTE: new rasterStack assumed to be equal to the base rasterStack", sep = ""))
xn <- x
}
xn <- c(xn)
# need to recalculate threshold for mean of ensembles
# therefore put x as first of new stacks
if (n.ensembles > 1) {
xn <- c(x, xn)
i <- 1
while (i < length(xn)) {
i <- i+1
if(identical(x, xn[[i]])) {xn[[i]] <- NULL}
}
}
species.presence <- data.frame(species.presence)
if (ncol(species.presence) < 2) {stop("species.presence expected to be 3-column data.frame with species, x (e.g., lon) and y (e.g., lat) columns")}
if (ncol(species.presence) == 2) {
cat(paste("\n", "species.presence was expected to be 3-column data.frame with columns representing species, x (e.g., lon) and y (e.g., lat)", sep = ""))
cat(paste("\n", "only two columns were provided, it is therefore assumed that these reflect x and y coordinates for a single species", "\n\n", sep = ""))
species.name <- rep("Species001", nrow(species.presence))
species.presence <- cbind(species.name, species.presence)
species.presence <- data.frame(species.presence)
species.presence[,2] <- as.numeric(species.presence[,2])
species.presence[,3] <- as.numeric(species.presence[,3])
names(species.presence) <- c("species", "x", "y")
}
if (ncol(species.presence) > 3) {
cat(paste("\n", "species.presence was expected to be 3-column data.frame with species, x (e.g., lon) and y (e.g., lat) columns", sep = ""))
cat(paste("\n", "only first three columns used", "\n\n", sep = ""))
species.presence <- species.presence[,c(1:3)]
species.presence[,2] <- as.numeric(species.presence[,2])
species.presence[,3] <- as.numeric(species.presence[,3])
names(species.presence) <- c("species", "x", "y")
}
if (is.null(species.absence)==F) {species.absence <- data.frame(species.absence)}
if (is.null(species.absence)==F && ncol(species.absence) < 2) {stop("species.absence expected to be a 2-column data.frame with x (e.g., lon) and y (e.g., lat), or 3-column data.frame with species, x (e.g., lon) and y (e.g., lat) columns")}
if (is.null(species.absence)==F && ncol(species.absence)> 3) {
cat(paste("\n", "species.absence was expected to be 3-column data.frame with species, x (e.g., lon) and y (e.g., lat) columns", sep = ""))
cat(paste("\n", "only first three columns used", "\n\n", sep = ""))
species.absence <- species.absence[,c(1:3)]
species.absence[,2] <- as.numeric(species.absence[,2])
species.absence[,3] <- as.numeric(species.absence[,3])
names(species.absence) <- c("species", "x", "y")
}
as.loc <- NULL
if (is.null(species.absence)==F && ncol(species.absence) == 2) {
cat(paste("\n", "species.absence was expected to be 3-column data.frame with species, x (e.g., lon) and y (e.g., lat) columns", sep = ""))
cat(paste("\n", "only two columns were provided, it is therefore assumed that these reflect x and y coordinates for absence locations to be used for each species run", "\n\n", sep = ""))
species.absence[,1] <- as.numeric(species.absence[,1])
species.absence[,2] <- as.numeric(species.absence[,2])
names(species.absence) <- c("x", "y")
as.loc <- species.absence
}
#
# check for variables below the maximum VIF
# note that the ensemble.VIF function does not remove factor variables
#
VIF.result <- ensemble.VIF(x=x, VIF.max=VIF.max, keep=VIF.keep,
layer.drops=layer.drops, factors=factors, dummy.vars=dummy.vars)
layer.drops <- VIF.result$var.drops
factors <- VIF.result$factors
dummy.vars <- VIF.result$dummy.vars
#
# process species by species
species.names <- levels(droplevels(factor(species.presence[,1])))
AUC.table.out <- AUC.ensemble.out <- output.weights.out <- ensemble.highest <- NULL
for (s in 1:length(species.names)) {
focal.species <- species.names[s]
ps <- species.presence[species.presence[,1]==focal.species, c(2:3)]
n.pres <- nrow(ps)
# check after spatial thinning if species has required minimum number of presence points
# already calculate all the spatially thinned data sets for each run
if (thin.km > 0) {
cat(paste("\n", "Generation of spatially thinned presence data sets for each ensemble", "\n\n", sep = ""))
ps.thins <- vector("list", n.ensembles)
for (i in 1:n.ensembles) {
ps.thins[[i]] <- ensemble.spatialThin(ps, thin.km=thin.km)
if (nrow(ps.thins[[i]]) < n.pres) {n.pres <- nrow(ps.thins[[i]])}
}
}
if (n.pres < presence.min) {
if (thin.km > 0) {
cat(paste("\n", "Species: ", focal.species, " only has ", n.pres, " presence locations in one of the spatially thinned data sets", sep = ""))
}else{
cat(paste("\n", "Species: ", focal.species, " only has ", n.pres, " presence locations", sep = ""))
}
cat(paste("\n", "This species therefore not included in batch processing", "\n\n", sep = ""))
}else{
# create output file
if (s==1) {dir.create("outputs", showWarnings = F)}
paste.file <- paste(getwd(), "/outputs/", focal.species, "_output.txt", sep="")
OLD.SINK <- TRUE
if (sink.number(type="output") == 0) {OLD.SINK <- F}
if (SINK==T && OLD.SINK==F) {
if (file.exists(paste.file) == F) {
cat(paste("\n", "NOTE: results captured in file: ", paste.file, "\n", sep = ""))
}else{
cat(paste("\n", "NOTE: results appended in file: ", paste.file, "\n", sep = ""))
}
cat(paste("\n\n", "RESULTS (ensemble.batch function)", "\n\n", sep=""), file=paste.file, append=T)
sink(file=paste.file, append=T)
cat(paste(date(), "\n", sep=""))
print(match.call())
}
#
cat(paste("\n", "Evaluations for species: ", focal.species, "\n", sep = ""))
ps <- species.presence[species.presence[,1]==focal.species, c(2:3)]
# repeat the whole process for n.ensembles
RASTER.species.name1 <- focal.species
for (runs in 1:n.ensembles) {
if (n.ensembles > 1) {
cat(paste("\n", focal.species, ": ENSEMBLE ", runs, "\n\n", sep = ""))
RASTER.species.name1 <- paste(focal.species, "_ENSEMBLE_", runs, sep="")
}
if (thin.km > 0) {
ps <- ps.thins[[runs]]
}
if (is.null(species.absence)==F && ncol(species.absence) == 3) {
as.loc <- species.absence[species.absence[,1]==focal.species, c(2:3)]
}
# target group sampling
if (is.null(as.loc)==F && target.groups==T) {
cat(paste("\n", "target group (biased pseudo-absence locations) in centres of cells with locations of all target group species ('species.absence')", "\n\n", sep = ""))
p.cell <- unique(raster::cellFromXY(x[[1]], ps))
a.cell <- unique(raster::cellFromXY(x[[1]], as.loc))
if (excludep == T) {a.cell <- a.cell[!(a.cell %in% p.cell)]}
as.loc <- raster::xyFromCell(x[[1]], cell=a.cell, spatial=F)
}
# random selection of background locations for each run
if (is.null(as.loc)==T) {
if (target.groups == T) {
cat(paste("\n", "WARNING: not possible for target group pseudo-absence data as 'species.absence' (locations of all species) not specified", sep = ""))
cat(paste("\n", "Instead background locations selected randomly", "\n\n", sep = ""))
}
if (excludep == T) {
as.loc <- dismo::randomPoints(x[[1]], n=an, p=ps, excludep=T)
}else{
as.loc <- dismo::randomPoints(x[[1]], n=an, p=NULL, excludep=F)
}
}
assign("ps", ps, envir=.BiodiversityR)
assign("as.loc", as.loc, envir=.BiodiversityR)
#1. first ensemble tests
calibration1 <- ensemble.calibrate.weights(x=x, p=ps, a=as.loc, k=k.splits,
get.block=get.block, block.default=block.default, get.subblocks=get.subblocks,
SSB.reduce=SSB.reduce, CIRCLES.d=CIRCLES.d,
CATCH.OFF=CATCH.OFF,
ENSEMBLE.tune=T,
ENSEMBLE.best=ENSEMBLE.best, ENSEMBLE.min=ENSEMBLE.min,
ENSEMBLE.exponent=ENSEMBLE.exponent, ENSEMBLE.weight.min=ENSEMBLE.weight.min,
species.name = RASTER.species.name1,
threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence,
input.weights=input.weights,
MAXENT=MAXENT, MAXNET=MAXNET, MAXLIKE=MAXLIKE, GBM=GBM, GBMSTEP=GBMSTEP, RF=RF, CF=CF,
GLM=GLM, GLMSTEP=GLMSTEP, GAM=GAM, GAMSTEP=GAMSTEP, MGCV=MGCV, MGCVFIX=MGCVFIX,
EARTH=EARTH, RPART=RPART, NNET=NNET, FDA=FDA, SVM=SVM, SVME=SVME, GLMNET=GLMNET,
BIOCLIM.O=BIOCLIM.O, BIOCLIM=BIOCLIM, DOMAIN=DOMAIN, MAHAL=MAHAL, MAHAL01=MAHAL01,
PROBIT=PROBIT, VIF=T,
Yweights=Yweights,
layer.drops=layer.drops, factors=factors, dummy.vars=dummy.vars,
maxit=maxit,
MAXENT.a=MAXENT.a, MAXENT.an=MAXENT.an, MAXENT.path=MAXENT.path,
MAXNET.classes=MAXNET.classes, MAXNET.clamp=MAXNET.clamp, MAXNET.type=MAXNET.type,
MAXLIKE.formula=MAXLIKE.formula, MAXLIKE.method=MAXLIKE.method,
GBM.formula=GBM.formula, GBM.n.trees=GBM.n.trees,
# GBMSTEP.gbm.x=GBMSTEP.gbm.x,
GBMSTEP.tree.complexity=GBMSTEP.tree.complexity,
GBMSTEP.learning.rate=GBMSTEP.learning.rate, GBMSTEP.bag.fraction=GBMSTEP.bag.fraction,
GBMSTEP.step.size=GBMSTEP.step.size,
RF.formula=RF.formula, RF.ntree=RF.ntree, RF.mtry=RF.mtry,
CF.formula=CF.formula, CF.ntree=CF.ntree, CF.mtry=CF.mtry,
GLM.formula=GLM.formula, GLM.family=GLM.family,
GLMSTEP.k=GLMSTEP.k, GLMSTEP.steps=GLMSTEP.steps, STEP.formula=STEP.formula, GLMSTEP.scope=GLMSTEP.scope,
GAM.formula=GAM.formula, GAM.family=GAM.family,
GAMSTEP.steps=GAMSTEP.steps, GAMSTEP.scope=GAMSTEP.scope, GAMSTEP.pos=GAMSTEP.pos,
MGCV.formula=MGCV.formula, MGCV.select=MGCV.select,
MGCVFIX.formula=MGCVFIX.formula,
EARTH.formula=EARTH.formula, EARTH.glm=EARTH.glm,
RPART.formula=RPART.formula, RPART.xval=RPART.xval,
NNET.formula=NNET.formula, NNET.size=NNET.size, NNET.decay=NNET.decay,
FDA.formula=FDA.formula, SVM.formula=SVM.formula, SVME.formula=SVME.formula,
GLMNET.nlambda=GLMNET.nlambda, GLMNET.class=GLMNET.class,
BIOCLIM.O.fraction=BIOCLIM.O.fraction,
MAHAL.shape=MAHAL.shape)
x.batch <- calibration1$x
p.batch <- calibration1$p
a.batch <- calibration1$a
MAXENT.a.batch <- calibration1$MAXENT.a
var.names.batch <- calibration1$var.names
factors.batch <- calibration1$factors
dummy.vars.batch <- calibration1$dummy.vars
dummy.vars.noDOMAIN.batch <- calibration1$dummy.vars.noDOMAIN
AUC.table <- calibration1$AUC.table
rownames(AUC.table) <- paste(rownames(AUC.table), "_", runs, sep="")
AUC.table <- cbind(rep(runs, nrow(AUC.table)), AUC.table, rep(NA, nrow(AUC.table)))
colnames(AUC.table)[1] <- "ensemble"
colnames(AUC.table)[ncol(AUC.table)] <- "final.calibration"
if (runs == 1) {
AUC.table.out <- AUC.table
}else{
AUC.table.out <- rbind(AUC.table.out, AUC.table)
}
AUC.ensemble <- calibration1$AUC.with.suggested.weights
AUC.ensemble <- c(runs, AUC.ensemble)
names(AUC.ensemble)[1] <- "ensemble"
if (runs == 1) {
AUC.ensemble.out <- AUC.ensemble
}else{
AUC.ensemble.out <- rbind(AUC.ensemble.out, AUC.ensemble)
}
#2. calibrate final model
# xn.f <- eval(as.name(xn.focal))
cat(paste("\n", "Final model calibrations for species: ", RASTER.species.name1, "\n", sep = ""))
cat(paste("\n\n", "Input weights for ensemble.calibrate.models are average weights determined by ensemble.calibrate.weights function", "\n", sep=""))
output.weights <- calibration1$output.weights
print(output.weights)
output.weights <- c(runs, output.weights)
names(output.weights)[1] <- "ensemble"
if (runs == 1) {
output.weights.out <- output.weights
}else{
output.weights.out <- rbind(output.weights.out, output.weights)
rownames(output.weights.out) <- c(1:nrow(output.weights.out))
}
if (sum(output.weights) > 0) {
calibration2 <- ensemble.calibrate.models(
x=x.batch, p=p.batch, a=a.batch, k=k.test, pt=NULL, at=NULL,
models.keep=TRUE, evaluations.keep=TRUE,
PLOTS=FALSE, CATCH.OFF=CATCH.OFF,
models.save=models.save, species.name=RASTER.species.name1,
ENSEMBLE.tune=F,
input.weights=output.weights,
threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence,
PROBIT=PROBIT, VIF=T,
Yweights=Yweights,
factors=factors.batch, dummy.vars=dummy.vars.batch,
maxit=maxit,
MAXENT.a=MAXENT.a.batch, MAXENT.path=MAXENT.path,
MAXNET.classes=MAXNET.classes, MAXNET.clamp=MAXNET.clamp, MAXNET.type=MAXNET.type,
MAXLIKE.formula=MAXLIKE.formula, MAXLIKE.method=MAXLIKE.method,
GBM.formula=GBM.formula, GBM.n.trees=GBM.n.trees,
# GBMSTEP.gbm.x=GBMSTEP.gbm.x,
GBMSTEP.tree.complexity=GBMSTEP.tree.complexity,
GBMSTEP.learning.rate=GBMSTEP.learning.rate, GBMSTEP.bag.fraction=GBMSTEP.bag.fraction,
GBMSTEP.step.size=GBMSTEP.step.size,
RF.formula=RF.formula, RF.ntree=RF.ntree, RF.mtry=RF.mtry,
CF.formula=CF.formula, CF.ntree=CF.ntree, CF.mtry=CF.mtry,
GLM.formula=GLM.formula, GLM.family=GLM.family,
GLMSTEP.k=GLMSTEP.k, GLMSTEP.steps=GLMSTEP.steps, STEP.formula=STEP.formula, GLMSTEP.scope=GLMSTEP.scope,
GAM.formula=GAM.formula, GAM.family=GAM.family,
GAMSTEP.steps=GAMSTEP.steps, GAMSTEP.scope=GAMSTEP.scope, GAMSTEP.pos=GAMSTEP.pos,
MGCV.formula=MGCV.formula, MGCV.select=MGCV.select,
MGCVFIX.formula=MGCVFIX.formula,
EARTH.formula=EARTH.formula, EARTH.glm=EARTH.glm,
RPART.formula=RPART.formula, RPART.xval=RPART.xval,
NNET.formula=NNET.formula, NNET.size=NNET.size, NNET.decay=NNET.decay,
FDA.formula=FDA.formula, SVM.formula=SVM.formula, SVME.formula=SVME.formula,
GLMNET.nlambda=GLMNET.nlambda, GLMNET.class=GLMNET.class,
BIOCLIM.O.fraction=BIOCLIM.O.fraction,
MAHAL.shape=MAHAL.shape)
AUC.table.out[which(rownames(AUC.table.out) == paste("MAXENT", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["MAXENT"]
AUC.table.out[which(rownames(AUC.table.out) == paste("MAXNET", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["MAXNET"]
AUC.table.out[which(rownames(AUC.table.out) == paste("MAXLIKE", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["MAXLIKE"]
AUC.table.out[which(rownames(AUC.table.out) == paste("GBM", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["GBM"]
AUC.table.out[which(rownames(AUC.table.out) == paste("GBMSTEP", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["GBMSTEP"]
AUC.table.out[which(rownames(AUC.table.out) == paste("RF", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["RF"]
AUC.table.out[which(rownames(AUC.table.out) == paste("CF", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["CF"]
AUC.table.out[which(rownames(AUC.table.out) == paste("GLM", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["GLM"]
AUC.table.out[which(rownames(AUC.table.out) == paste("GLMSTEP", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["GLMSTEP"]
AUC.table.out[which(rownames(AUC.table.out) == paste("GAM", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["GAM"]
AUC.table.out[which(rownames(AUC.table.out) == paste("GAMSTEP", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["GAMSTEP"]
AUC.table.out[which(rownames(AUC.table.out) == paste("MGCV", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["MGCV"]
AUC.table.out[which(rownames(AUC.table.out) == paste("MGCVFIX", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["MGCVFIX"]
AUC.table.out[which(rownames(AUC.table.out) == paste("EARTH", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["EARTH"]
AUC.table.out[which(rownames(AUC.table.out) == paste("RPART", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["RPART"]
AUC.table.out[which(rownames(AUC.table.out) == paste("NNET", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["NNET"]
AUC.table.out[which(rownames(AUC.table.out) == paste("FDA", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["FDA"]
AUC.table.out[which(rownames(AUC.table.out) == paste("SVM", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["SVM"]
AUC.table.out[which(rownames(AUC.table.out) == paste("SVME", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["SVME"]
AUC.table.out[which(rownames(AUC.table.out) == paste("GLMNET", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["GLMNET"]
AUC.table.out[which(rownames(AUC.table.out) == paste("BIOCLIM.O", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["BIOCLIM.O"]
AUC.table.out[which(rownames(AUC.table.out) == paste("BIOCLIM", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["BIOCLIM"]
AUC.table.out[which(rownames(AUC.table.out) == paste("DOMAIN", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["DOMAIN"]
AUC.table.out[which(rownames(AUC.table.out) == paste("MAHAL", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["MAHAL"]
AUC.table.out[which(rownames(AUC.table.out) == paste("MAHAL01", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["MAHAL01"]
AUC.table.out[which(rownames(AUC.table.out) == paste("ENSEMBLE", "_", runs, sep="")), ncol(AUC.table.out)] <- calibration2$AUC.calibration["ENSEMBLE"]
#3. predict for all the rasters
for (n in 1:length(xn)) {
xn.f <- raster::stack(xn[[n]])
xn.f <- raster::subset(xn.f, subset=var.names.batch)
xn.f <- raster::stack(xn.f)
if(length(xn.f@title) == 0) {xn.f@title <- paste("stack_", n, sep="")}
if (gsub(".", "_", xn.f@title, fixed=T) != xn.f@title) {cat(paste("\n", "WARNING: title of stack (", xn.f@title, ") contains '.'", "\n\n", sep = ""))}
cat(paste("\n", "Predictions for species: ", RASTER.species.name1, " for rasterStack: ", xn.f@title, "\n\n", sep = ""))
if (n == 1) {
rasters2 <- ensemble.raster(xn=xn.f,
models.list=calibration2$models,
RASTER.species.name=RASTER.species.name1,
evaluate=T, p=p.batch, a=a.batch,
RASTER.format=RASTER.format, RASTER.datatype=RASTER.datatype, RASTER.NAflag=RASTER.NAflag)
# KML.out=KML.out, KML.maxpixels=KML.maxpixels, KML.blur=KML.blur)
}else{
rasters2 <- ensemble.raster(xn=xn.f,
models.list=calibration2$models,
RASTER.species.name=RASTER.species.name1,
RASTER.format=RASTER.format, RASTER.datatype=RASTER.datatype, RASTER.NAflag=RASTER.NAflag)
# KML.out=KML.out, KML.maxpixels=KML.maxpixels, KML.blur=KML.blur)
}
if(runs==n.ensembles && n.ensembles>1) {
# recalculate threshold for mean of predictions with calibration stack (xn[[1]])
# use threshold to calculate mean ensemble, ensemble count, ensemble presence and ensemble sd
if (n == 1) {
calibrate.mean <- NULL
calibrate.mean <- ensemble.mean(RASTER.species.name=focal.species, RASTER.stack.name=xn.f@title,
positive.filters = c("tif", "_ENSEMBLE_"), negative.filters = c("xml"),
RASTER.format=RASTER.format, RASTER.datatype=RASTER.datatype, RASTER.NAflag=RASTER.NAflag,
# KML.out=KML.out, KML.maxpixels=KML.maxpixels, KML.blur=KML.blur,
p=p.batch, a=a.batch,
pt = NULL, at = NULL,
threshold = -1,
threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence)
cat(paste("\n", "threshold for mean suitability: ", calibrate.mean$threshold, "\n", sep = ""))
}else{
ensemble.mean(RASTER.species.name=focal.species, RASTER.stack.name=xn.f@title,
positive.filters = c("tif", "_ENSEMBLE_"), negative.filters = c("xml"),
RASTER.format=RASTER.format, RASTER.datatype=RASTER.datatype, RASTER.NAflag=RASTER.NAflag,
# KML.out=KML.out, KML.maxpixels=KML.maxpixels, KML.blur=KML.blur,
p=NULL, a=NULL,
pt = NULL, at = NULL,
threshold = calibrate.mean$threshold,
threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence)
}
}
}
# sum output weights > 0 loop
}
# n ensembles loop
}
cat(paste("\n\n", "All AUC results for species: ", RASTER.species.name1, " for rasterStack: ", xn.f@title, "\n\n", sep=""))
print(AUC.table.out)
if (n.ensembles > 1) {
ensemble.highest <- AUC.ensemble.out[which.max(AUC.ensemble.out[, "MEAN.T"]), "ensemble"]
cat(paste("\n", "ensemble with highest average AUC is ensemble: ", ensemble.highest, "\n", sep = ""))
}else{
ensemble.highest <- 1
}
# if (sufficient presence locations) loop
if (SINK==T && OLD.SINK==F) {sink(file=NULL, append=T)}
}
# s (species) loop
}
result <- list(species=species.names, AUC.table=AUC.table.out, AUC.ensemble.selected.weights=AUC.ensemble.out, output.weights=output.weights.out, ensemble.highest.AUC=ensemble.highest, call=match.call())
cat(paste("\n\n", "(all calibrations and projections finalized by function ensemble.batch)", "\n\n", sep=""))
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.batch.R
|
`ensemble.batch.presence` <- function(
p=NULL, species.name="Species001"
)
{
p <- as.matrix(p)
p <- cbind(rep(species.name, nrow(p)), p)
p <- data.frame(p)
names(p) <- c("species", "x", "y")
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.batch.presence.R
|
`ensemble.bioclim.object` <- function(
x=NULL, p=NULL, fraction=0.9,
quantiles=TRUE,
species.name="Species001",
factors=NULL
)
{
if(is.null(x) == T) {stop("value for parameter x is missing (data.frame or RasterStack object)")}
if(inherits(x, "RasterStack")==F && inherits(x, "data.frame")==F) {stop("x should be a data.frame or RasterStack object")}
if(fraction < 0 || fraction > 1) {stop("fraction should be in range 0-1")}
factors <- as.character(factors)
# cutoff parameter based on the normal distribution
cutoff <- qnorm(0.5+fraction/2)
probs <- c(0.5-fraction/2, 0.5+fraction/2)
if(inherits(x, "RasterStack")==T && is.null(p)==F) {
clim.values <- data.frame(raster::extract(x, y=p))
if (length(names(x)) == 1) {
xdouble <- raster::stack(x, x)
clim.values <- raster::extract(x=xdouble, y=p)
clim.values <- data.frame(clim.values)
clim.values <- clim.values[, 1, drop=F]
}
names(clim.values) <- names(x)
x <- clim.values
}
if(inherits(x, "data.frame") == F) {
vars <- names(x)
if (length(factors) > 0) {for (i in 1:length(factors)) {vars <- vars[which(names(x) != factors[i])]}}
nv <- length(vars)
lower.limitsq <- upper.limitsq <- lower.limits <- upper.limits <- minima <- maxima <- clim.sd <- clim.median <- clim.mean <- numeric(length=nv)
names(lower.limitsq) <- names(upper.limitsq) <- names(lower.limits) <- names(upper.limits) <- names(minima) <- names(maxima) <- names(clim.sd) <- names(clim.median) <- names(clim.mean) <- vars
for (i in 1:nv) {
vari <- vars[which(vars == names(x)[i])]
raster.focus <- x[[which(vars == names(x)[i])]]
raster::setMinMax(raster.focus)
meanV <- raster::cellStats(raster.focus, 'mean')
sdV <- raster::cellStats(raster.focus, 'sd')
minV <- raster::minValue(raster.focus)
maxV <- raster::maxValue(raster.focus)
lowerV <- as.numeric(raster::quantile(raster.focus, probs=probs[1], na.rm=T))
upperV <- as.numeric(raster::quantile(raster.focus, probs=probs[2], na.rm=T))
medianV <- as.numeric(raster::quantile(raster.focus, probs=0.5, na.rm=T))
lower.limitsq[which(names(lower.limitsq) == vari)] <- lowerV
upper.limitsq[which(names(upper.limitsq) == vari)] <- upperV
clim.mean[which(names(clim.mean) == vari)] <- meanV
clim.sd[which(names(clim.sd) == vari)] <- sdV
minima[which(names(minima) == vari)] <- minV
maxima[which(names(maxima) == vari)] <- maxV
clim.median[which(names(clim.median) == vari)] <- medianV
}
}else{
clim.values <- x
for (i in 1:length(names(clim.values))) {if (is.factor(clim.values[, i]) == T) {factors <- c(factors, names(clim.values)[i])} }
factors <- unique(factors)
if (length(factors) > 0) {for (i in 1:length(factors)) {clim.values <- clim.values[, which(names(clim.values) != factors[i]), drop=F]}}
clim.mean <- apply(clim.values, 2, "mean", na.rm=T)
clim.sd <- apply(clim.values, 2, "sd", na.rm=T)
lower.limitsq <- upper.limitsq <- lower.limits <- upper.limits <- minima <- maxima <- clim.median <- numeric(length=length(clim.mean))
names(lower.limitsq) <- names(upper.limitsq) <- names(lower.limits) <- names(upper.limits) <- names(minima) <- names(maxima) <- names(clim.median) <- names(clim.values)
minima <- apply(clim.values, 2, "min", na.rm=T)
maxima <- apply(clim.values, 2, "max", na.rm=T)
lower.limitsq <- apply(clim.values, 2, "quantile", probs[1], na.rm=T)
upper.limitsq <- apply(clim.values, 2, "quantile", probs[2], na.rm=T)
clim.median <- apply(clim.values, 2, "quantile", 0.5, na.rm=T)
}
if (quantiles == F){
lower.limits <- clim.mean - cutoff*clim.sd
upper.limits <- clim.mean + cutoff*clim.sd
}else{
lower.limits <- lower.limitsq
upper.limits <- upper.limitsq
}
# deal with asymmetrical distributions
for (i in 1:length(lower.limits)) {
if (lower.limits[i] < minima[i]) {
cat(paste("\n", "WARNING: lower limit of ", lower.limits[i], " for ", names(lower.limits)[i], " was smaller than minimum of ", minima[i], sep = ""))
cat(paste("\n", "lower limit therefore replaced by quantile value of ", lower.limitsq[i], "\n", sep = ""))
lower.limits[i] <- lower.limitsq[i]
}
if (upper.limits[i] > maxima[i]) {
cat(paste("\n", "WARNING: upper limit of ", upper.limits[i], " for ", names(upper.limits)[i], " was larger than maximum of ", maxima[i], sep = ""))
cat(paste("\n", "upper limit therefore replaced by quantile value of ", upper.limitsq[i], "\n", sep = ""))
upper.limits[i] <- upper.limitsq[i]
}
}
return(list(lower.limits=lower.limits, upper.limits=upper.limits, minima=minima, maxima=maxima,
means=clim.mean, medians=clim.median, sds=clim.sd, cutoff=cutoff, fraction=fraction, species.name=species.name))
}
`ensemble.bioclim` <- function(
x=NULL, bioclim.object=NULL,
RASTER.object.name=bioclim.object$species.name, RASTER.stack.name = x@title,
RASTER.format="GTiff",
# KML.out=TRUE, KML.blur=10, KML.maxpixels=100000,
CATCH.OFF=FALSE
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if(is.null(x) == T) {stop("value for parameter x is missing (RasterStack object)")}
if(inherits(x, "RasterStack") == F) {stop("x is not a RasterStack object")}
if (is.null(bioclim.object) == T) {stop("value for parameter bioclim.object is missing (hint: use the ensemble.bioclim.object function)")}
#
#
# if (KML.out==T && raster::isLonLat(x)==F) {
# cat(paste("\n", "NOTE: not possible to generate KML files as Coordinate Reference System (CRS) of stack ", x@title , " is not longitude and latitude", "\n", sep = ""))
# KML.out <- FALSE
# }
#
predict.bioclim <- function(object=bioclim.object, newdata=newdata) {
lower.limits <- object$lower.limits
upper.limits <- object$upper.limits
minima <- object$minima
maxima <- object$maxima
newdata <- newdata[, which(names(newdata) %in% names(lower.limits)), drop=F]
result <- as.numeric(rep(NA, nrow(newdata)))
varnames <- names(newdata)
nvars <- ncol(newdata)
for (i in 1:nrow(newdata)) {
datai <- newdata[i,,drop=F]
resulti <- 1
j <- 0
while (resulti > 0 && j <= (nvars-1)) {
j <- j+1
focal.var <- varnames[j]
if (resulti == 1) {
lowerj <- lower.limits[which(names(lower.limits) == focal.var)]
if (datai[, j] < lowerj) {resulti <- 0.5}
upperj <- upper.limits[which(names(upper.limits) == focal.var)]
if (datai[, j] > upperj) {resulti <- 0.5}
}
minj <- minima[which(names(minima) == focal.var)]
if (datai[, j] < minj) {resulti <- 0}
maxj <- maxima[which(names(maxima) == focal.var)]
if (datai[, j] > maxj) {resulti <- 0}
}
result[i] <- resulti
}
p <- as.numeric(result)
return(p)
}
# avoid problems with non-existing directories and prepare for output
dir.create("ensembles", showWarnings = F)
# if (KML.out == T) {dir.create("kml", showWarnings = F)}
if(length(x@title) == 0) {x@title <- "stack1"}
stack.title <- RASTER.stack.name
rasterfull <- paste("ensembles//", RASTER.object.name, "_", stack.title , "_BIOCLIM_orig", sep="")
kmlfull <- paste("kml//", RASTER.object.name, "_", stack.title , "_BIOCLIM_orig", sep="")
#
# predict
if (CATCH.OFF == F) {
tryCatch(bioclim.raster <- raster::predict(object=x, model=bioclim.object, fun=predict.bioclim, na.rm=TRUE,
filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("prediction of bioclim failed"))},
silent=F)
}else{
bioclim.raster <- raster::predict(object=x, model=bioclim.object, fun=predict.bioclim, na.rm=TRUE,
filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format)
}
# bioclim.raster <- trunc(1000*bioclim.raster)
# cat(paste("\n", "raster layer created (probabilities multiplied by 1000)", "\n", sep = ""))
# raster::setMinMax(bioclim.raster)
print(bioclim.raster)
#
# avoid possible problems with saving of names of the raster layers
# not done any longer as default is GTiff from DEC-2022
# raster::writeRaster(bioclim.raster, filename="working.grd", overwrite=T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- paste(RASTER.object.name, "_", stack.title , "_BIOCLIM_orig", sep="")
# raster::writeRaster(working.raster, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format)
#
# if (KML.out == T) {
# working.raster <- trunc(1000*working.raster)
# raster::KML(working.raster, filename=kmlfull, col = c("grey", "blue", "green"), colNA = 0,
# blur=KML.blur, maxpixels=KML.maxpixels, overwrite=T, breaks = c(-0.1, 0, 0.5, 1.0))
# }
cat(paste("\n", "bioclim raster provided in folder: ", getwd(), "//ensembles", "\n", sep=""))
return(bioclim.raster)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.bioclim.R
|
`ensemble.bioclim.graph.data` <- function(
x=NULL, p=NULL, fraction = 0.9,
species.climate.name="Species001_base", factors = NULL
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if (is.null(x) == T) {stop("value for parameter xn is missing (RasterStack object)")}
if(inherits(x, "RasterStack") == F) {stop("x is not a RasterStack object")}
if(fraction < 0 || fraction > 1) {stop("fraction should be in range 0-1")}
bioclim.object <- ensemble.bioclim.object(x=x, p=p, fraction=fraction, quantiles=T, species.name=species.climate.name, factors=factors)
vars <- names(bioclim.object$means)
nv <- length(vars)
range.data <- data.frame(array(dim=c(nv, 8)) )
names(range.data) <- c("species_climate", "biovar", "mean", "median", "min", "max", "lower.limits", "upper.limits")
range.data[, "species_climate"] <- rep(species.climate.name, nv)
range.data[, "biovar"] <- names(bioclim.object$means)
range.data[ , "mean"] <- bioclim.object$means
range.data[ , "median"] <- bioclim.object$medians
range.data[ , "min"] <- bioclim.object$minima
range.data[ , "max"] <- bioclim.object$maxima
range.data[ , "lower.limits"] <- bioclim.object$lower.limits
range.data[ , "upper.limits"] <- bioclim.object$upper.limits
return(range.data)
}
`ensemble.bioclim.graph` <- function(
graph.data=NULL, focal.var=NULL, species.climates.subset=NULL, cols=NULL,
var.multiply=1.0, ref.lines=TRUE
)
{
if (is.null(species.climates.subset) == T) {
species.climate.subset <- as.character(graph.data[, "species_climate"])
species.climate.subset <- species.climate.subset[duplicated(species.climate.subset) == F]
}
if (is.null(cols) == F) {
if (length(cols) != length(species.climate.subset)) {stop("different number of colours than number of species and climates to be plotted")}
}else{
cols <- grDevices::rainbow(n=length(species.climate.subset), start = 0, end = 5/6)
}
graph.data <- graph.data[which(graph.data[, "biovar"] == focal.var), , drop=F]
graph.data[, "min"] <- graph.data[, "min"] * var.multiply
graph.data[, "max"] <- graph.data[, "max"] * var.multiply
graph.data[, "median"] <- graph.data[, "median"] * var.multiply
graph.data[, "mean"] <- graph.data[, "mean"] * var.multiply
graph.data[, "lower.limits"] <- graph.data[, "lower.limits"] * var.multiply
graph.data[, "upper.limits"] <- graph.data[, "upper.limits"] * var.multiply
nsc <- length(species.climate.subset)
x1pos <- length(species.climate.subset)+0.5
graphics::plot(graph.data[, "min"] ~ c(1:nsc), main=focal.var, xlim=c(0.5, x1pos), ylim=c(min(graph.data[, "min"]), max(graph.data[, "max"])), axes=F, xlab="", ylab="", type="n", pch=4, cex=1.2)
graphics::axis(1, pos=min(graph.data[, "min"]), labels=species.climate.subset, at=c(1:nsc), las=2, cex.axis=1)
graphics::segments(x0=0.5, y0=min(graph.data[, "min"]), x1=x1pos, y1=min(graph.data[, "min"]))
graphics::axis(2, pos=0.5, labels=T, las=1, cex.axis=1)
graphics::segments(x0=0.5, y0=min(graph.data[, "min"]), x1=0.5, y1=max(graph.data[, "max"]))
if (ref.lines == T) {
graphics::segments(x0=0.5, y0=graph.data[1, "lower.limits"], x1=x1pos, y1=graph.data[1, "lower.limits"], lty=2, lwd=1, col="grey")
graphics::segments(x0=0.5, y0=graph.data[1, "upper.limits"], x1=x1pos, y1=graph.data[1, "upper.limits"], lty=2, lwd=1, col="grey")
}
# different colours for the species.climate subsets
for (i in 1:nsc) {
graphics::points(graph.data[i, "mean"] ~ i, pch=8, cex=2.5, col=cols[i])
graphics::points(graph.data[i, "median"] ~ i, pch=1, cex=2.5, col=cols[i])
graphics::points(graph.data[i, "min"] ~ i, pch=1, cex=1.5, col=cols[i])
graphics::points(graph.data[i, "max"] ~ i, pch=1, cex=1.5, col=cols[i])
graphics::segments(x0=i, y0=graph.data[i, "lower.limits"], x1=i, y1=graph.data[i, "upper.limits"], lty=1, col=cols[i])
graphics::segments(x0=i-0.4, y0=graph.data[i, "lower.limits"], x1=i+0.4, y1=graph.data[i, "lower.limits"], lty=1, col=cols[i])
graphics::segments(x0=i-0.4, y0=graph.data[i, "upper.limits"], x1=i+0.4, y1=graph.data[i, "upper.limits"], lty=1, col=cols[i])
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.bioclim.graph.R
|
`ensemble.spatialBlock` <- function(
x=NULL, p=NULL,
a=NULL, an=1000, EPSG=NULL,
excludep=FALSE, target.groups=FALSE, k=4,
factors=NULL,
theRange=NULL, return.object=FALSE, ...
)
{
# Function to assign presence and background data to spatially separated folds via blockCV::spatialBlock
ensemble.data <- ensemble.calibrate.models(x=x, p=p, a=a, an=an,
SSB.reduce=FALSE,
excludep=excludep, target.groups=target.groups, k=0,
ENSEMBLE.tune=F,
MAXENT=0, MAXNET=0, MAXLIKE=0, GBM=0, GBMSTEP=0, RF=0, CF=0,
GLM=0, GLMSTEP=0, GAM=0, GAMSTEP=0, MGCV=0, MGCVFIX=0,
EARTH=0, RPART=0, NNET=0, FDA=0, SVM=0, SVME=0, GLMNET=0,
BIOCLIM.O=0, BIOCLIM=0, DOMAIN=0, MAHAL=0, MAHAL01=0,
factors=factors,
evaluations.keep=TRUE)
p.new <- as.data.frame(ensemble.data$evaluations$p)
a.new <- as.data.frame(ensemble.data$evaluations$a)
names(a.new) <- names(p.new)
PA.input <- data.frame(pb=c(rep(1, nrow(p.new)), rep(0, nrow(a.new))), rbind(p.new, a.new))
# PA.Spatial <- sp::SpatialPointsDataFrame(PA.input[, c(2:3)], data=PA.input, proj4string=raster::crs(x))
PA.Spatial <- sf::st_as_sf(PA.input, coords=names(a.new), crs=raster::crs(x))
if (is.null(EPSG) == FALSE) {sf::st_crs(PA.Spatial) <- EPSG}
sb1 <- blockCV::spatialBlock(speciesData=PA.Spatial, species="pb", theRange=theRange, k=k, ...)
k <- list(p=p.new, a=a.new, groupp=sb1$foldID[PA.input$pb == 1], groupa=sb1$foldID[PA.input$pb == 0])
if (return.object == F) {
return(k)
}else{
results <- list(k=k, block.object=sb1, speciesData=PA.Spatial)
return(results)
}
}
`ensemble.envBlock` <- function(
x=NULL, p=NULL,
a=NULL, an=1000, EPSG=NULL,
excludep=FALSE, target.groups=FALSE, k=4,
factors=NULL,
return.object=FALSE, ...
)
{
# Function to assign presence and background data to spatially separated folds via blockCV::envBlock
ensemble.data <- ensemble.calibrate.models(x=x, p=p, a=a, an=an,
SSB.reduce=FALSE,
excludep=excludep, target.groups=target.groups, k=0,
ENSEMBLE.tune=F,
MAXENT=0, MAXNET=0, MAXLIKE=0, GBM=0, GBMSTEP=0, RF=0, CF=0,
GLM=0, GLMSTEP=0, GAM=0, GAMSTEP=0, MGCV=0, MGCVFIX=0,
EARTH=0, RPART=0, NNET=0, FDA=0, SVM=0, SVME=0, GLMNET=0,
BIOCLIM.O=0, BIOCLIM=0, DOMAIN=0, MAHAL=0, MAHAL01=0,
factors=factors,
evaluations.keep=TRUE)
p.new <- as.data.frame(ensemble.data$evaluations$p)
a.new <- as.data.frame(ensemble.data$evaluations$a)
names(a.new) <- names(p.new)
PA.input <- data.frame(pb=c(rep(1, nrow(p.new)), rep(0, nrow(a.new))), rbind(p.new, a.new))
PA.Spatial <- sp::SpatialPointsDataFrame(PA.input[, c(2:3)], data=PA.input, proj4string=raster::crs(x))
if (is.null(EPSG) == FALSE) {sf::st_crs(PA.Spatial) <- EPSG}
eb1 <- blockCV::envBlock(rasterLayer=x, speciesData=PA.Spatial, species="pb", k=k, ...)
k <- list(p=p.new, a=a.new, groupp=eb1$foldID[PA.input$pb == 1], groupa=eb1$foldID[PA.input$pb == 0])
if (return.object == F) {
return(k)
}else{
results <- list(k=k, block.object=eb1, speciesData=PA.Spatial)
return(results)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.blockCV.R
|
`ensemble.calibrate.models` <- function(
x=NULL, p=NULL,
a=NULL, an=1000, excludep=FALSE, target.groups=FALSE,
k=0, pt=NULL, at=NULL, SSB.reduce=FALSE, CIRCLES.d=250000,
TrainData=NULL, TestData=NULL,
VIF=FALSE, COR=FALSE,
SINK=FALSE, PLOTS=FALSE, CATCH.OFF=FALSE,
threshold.method="spec_sens", threshold.sensitivity=0.9, threshold.PresenceAbsence=FALSE,
evaluations.keep=FALSE,
models.list=NULL, models.keep=FALSE,
models.save=FALSE, species.name="Species001",
ENSEMBLE.tune=FALSE,
ENSEMBLE.best=0, ENSEMBLE.min=0.7, ENSEMBLE.exponent=1.0, ENSEMBLE.weight.min=0.05,
input.weights=NULL,
MAXENT=1, MAXNET=1, MAXLIKE=1, GBM=1, GBMSTEP=1, RF=1, CF=1,
GLM=1, GLMSTEP=1, GAM=1, GAMSTEP=1, MGCV=1, MGCVFIX=0,
EARTH=1, RPART=1, NNET=1, FDA=1, SVM=1, SVME=1, GLMNET=1,
BIOCLIM.O=0, BIOCLIM=1, DOMAIN=1, MAHAL=1, MAHAL01=1,
PROBIT=FALSE,
Yweights="BIOMOD",
layer.drops=NULL, factors=NULL, dummy.vars=NULL,
formulae.defaults=TRUE, maxit=100,
MAXENT.a=NULL, MAXENT.an=10000,
MAXENT.path=paste(getwd(), "/models/maxent_", species.name, sep=""),
MAXNET.classes="default", MAXNET.clamp=FALSE, MAXNET.type="cloglog",
MAXLIKE.formula=NULL, MAXLIKE.method="BFGS",
GBM.formula=NULL, GBM.n.trees=2001,
GBMSTEP.gbm.x=2:(ncol(TrainData.orig)), GBMSTEP.tree.complexity=5, GBMSTEP.learning.rate=0.005,
GBMSTEP.bag.fraction=0.5, GBMSTEP.step.size=100,
RF.formula=NULL, RF.ntree=751, RF.mtry=floor(sqrt(ncol(TrainData.vars))),
CF.formula=NULL, CF.ntree=751, CF.mtry=floor(sqrt(ncol(TrainData.vars))),
GLM.formula=NULL, GLM.family=binomial(link="logit"),
GLMSTEP.steps=1000, STEP.formula=NULL, GLMSTEP.scope=NULL, GLMSTEP.k=2,
GAM.formula=NULL, GAM.family=binomial(link="logit"),
GAMSTEP.steps=1000, GAMSTEP.scope=NULL, GAMSTEP.pos=1,
MGCV.formula=NULL, MGCV.select=FALSE,
MGCVFIX.formula=NULL,
EARTH.formula=NULL, EARTH.glm=list(family=binomial(link="logit"), maxit=maxit),
RPART.formula=NULL, RPART.xval=50,
NNET.formula=NULL, NNET.size=8, NNET.decay=0.01,
FDA.formula=NULL,
SVM.formula=NULL,
SVME.formula=NULL,
GLMNET.nlambda=100, GLMNET.class=FALSE,
BIOCLIM.O.fraction=0.9,
MAHAL.shape=1
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
#
if (is.list(k) == F) {
k <- as.integer(k)
k.listed <- FALSE
}else{
k.listed <- TRUE
k.list <- k
k <- max(k.list$groupp)
}
x.was.terra <- FALSE
# check data
if (is.null(TrainData) == T) {
if(is.null(x) == T) {stop("value for parameter x is missing (raster::RasterStack or terra::rast object)")}
if(inherits(x,"RasterStack") == FALSE && inherits(x, "SpatRaster") == FALSE) {stop("x is not a RasterStack or SpatRaster object")}
if(inherits(x, "SpatRaster")) {
cat(paste("\n", "NOTE: raster procedures will be done via the raster package, not terra", "\n", sep = ""))
cat(paste("This also allows usage of dismo::prepareData internally.", "\n", sep = ""))
x <- raster::stack(x)
x.was.terra <- TRUE
}
if(is.null(p) == T) {stop("presence locations are missing (parameter p)")}
}
if(is.null(p) == F) {
p <- data.frame(p)
names(p) <- c("x", "y")
}
if(is.null(a) == F) {
a <- data.frame(a)
names(a) <- c("x", "y")
}
if(is.null(pt) == F) {
pt <- data.frame(pt)
names(pt) <- c("x", "y")
}
if(is.null(at) == F) {
at <- data.frame(at)
names(at) <- c("x", "y")
}
if(is.null(MAXENT.a) == F) {
MAXENT.a <- data.frame(MAXENT.a)
names(MAXENT.a) <- c("x", "y")
}
#
if(models.save==T) {
models.keep <- TRUE
dir.create("models", showWarnings = F)
}
# create output file
dir.create("outputs", showWarnings = F)
paste.file <- paste(getwd(), "/outputs/", species.name, "_output.txt", sep="")
OLD.SINK <- TRUE
if (sink.number(type="output") == 0) {OLD.SINK <- F}
if (SINK==T && OLD.SINK==F) {
if (file.exists(paste.file) == F) {
cat(paste("\n", "NOTE: results captured in file: ", paste.file, "\n", sep = ""))
}else{
cat(paste("\n", "NOTE: results appended in file: ", paste.file, "\n", sep = ""))
}
cat(paste(sep="\n\n", "RESULTS (ensemble.calibrate.models function)"), file=paste.file, sep="\n\n", append=T)
sink(file=paste.file, append=T)
cat(paste(date()), sep="\n")
print(match.call())
cat(paste(" "), sep="\n")
}
# check TrainData
if (is.null(TrainData) == F) {
TrainData <- data.frame(TrainData)
if (names(TrainData)[1] !="pb") {stop("first column for TrainData should be 'pb' containing presence (1) and absence (0) data")}
if (raster::nlayers(x) != (ncol(TrainData)-1)) {
cat(paste("\n", "WARNING: different number of explanatory variables in rasterStack and TrainData", sep = ""))
}
}
# modify list of variables
# if TrainData is provided, then this data set takes precedence over raster x in the selection of variables
#
# modify TrainData if layer.drops
if (is.null(TrainData) == F) {
if (is.null(layer.drops) == F) {
vars <- names(TrainData)
layer.drops <- as.character(layer.drops)
dummy.vars <- as.character(dummy.vars)
nd <- length(layer.drops)
for (i in 1:nd) {
if (any(vars==layer.drops[i]) == FALSE) {
cat(paste("\n", "WARNING: variable to exclude '", layer.drops[i], "' not among columns of TrainData", sep = ""))
}else{
cat(paste("\n", "NOTE: variable '", layer.drops[i], "' will not be included as explanatory variable", sep = ""))
TrainData <- TrainData[, which(names(TrainData) != layer.drops[i]), drop=F]
if (is.null(TestData) == F) {TestData <- TestData[, which(names(TestData) != layer.drops[i]), drop=F]}
vars <- names(TrainData)
if (length(factors) > 0) {
factors <- factors[factors != layer.drops[i]]
}
if (length(dummy.vars) > 0) {
dummy.vars <- dummy.vars[dummy.vars != layer.drops[i]]
}
}
}
if(length(layer.drops) == 0) {layer.drops <- NULL}
if(length(factors) == 0) {factors <- NULL}
if(length(dummy.vars) == 0) {dummy.vars <- NULL}
}
if (is.null(factors) == F) {
vars <- names(TrainData)
factors <- as.character(factors)
nf <- length(factors)
old.factors <- factors
for (i in 1:nf) {
if (any(vars==old.factors[i]) == FALSE) {
cat(paste("\n", "WARNING: categorical variable '", old.factors[i], "' not among columns of TrainData", sep = ""))
factors <- factors[factors != old.factors[i]]
}
}
if(length(factors) == 0) {factors <- NULL}
}
if (is.null(dummy.vars) == F) {
vars <- names(TrainData)
dummy.vars <- as.character(dummy.vars)
nf <- length(dummy.vars)
old.dummy.vars <- dummy.vars
for (i in 1:nf) {
if (any(vars==old.dummy.vars[i]) == FALSE) {
cat(paste("\n", "WARNING: dummy variable '", old.dummy.vars[i], "' not among columns of TrainData", sep = ""))
dummy.vars <- dummy.vars[dummy.vars != old.dummy.vars[i]]
}
}
if(length(dummy.vars) == 0) {dummy.vars <- NULL}
}
}
#
# modify RasterStack x only if this RasterStack was provided
if (is.null(x) == F) {
# same variables as TrainData in the rasterstack
if (is.null(TrainData) == F) {
vars <- names(TrainData)
vars <- vars[which(vars!="pb")]
x <- raster::subset(x, subset=vars)
x <- raster::stack(x)
}
if (is.null(TrainData) == T) {
if (is.null(layer.drops) == F) {
vars <- names(x)
layer.drops <- as.character(layer.drops)
factors <- as.character(factors)
dummy.vars <- as.character(dummy.vars)
nd <- length(layer.drops)
for (i in 1:nd) {
if (any(vars==layer.drops[i])==FALSE) {
cat(paste("\n", "WARNING: variable to exclude '", layer.drops[i], "' not among grid layers", sep = ""))
}else{
cat(paste("\n", "NOTE: variable '", layer.drops[i], "' will not be included as explanatory variable", sep = ""))
x <- raster::dropLayer(x, which(names(x) %in% c(layer.drops[i]) ))
x <- raster::stack(x)
vars <- names(x)
if (length(factors) > 0) {
factors <- factors[factors != layer.drops[i]]
}
if (length(dummy.vars) > 0) {
dummy.vars <- dummy.vars[dummy.vars != layer.drops[i]]
}
}
}
if(length(layer.drops) == 0) {layer.drops <- NULL}
if(length(factors) == 0) {factors <- NULL}
if(length(dummy.vars) == 0) {dummy.vars <- NULL}
}
if (is.null(factors) == F) {
vars <- names(x)
factors <- as.character(factors)
nf <- length(factors)
old.factors <- factors
for (i in 1:nf) {
if (any(vars==old.factors[i])==FALSE) {
cat(paste("\n", "WARNING: categorical variable '", old.factors[i], "' not among grid layers", sep = ""))
factors <- factors[factors != old.factors[i]]
}
}
if(length(factors) == 0) {factors <- NULL}
}
if (is.null(dummy.vars) == F) {
vars <- names(x)
dummy.vars <- as.character(dummy.vars)
nf <- length(dummy.vars)
old.dummy.vars <- dummy.vars
for (i in 1:nf) {
if (any(vars==old.dummy.vars[i]) == FALSE) {
cat(paste("\n", "WARNING: dummy variable '", old.dummy.vars[i], "' not among grid layers", sep = ""))
dummy.vars <- dummy.vars[dummy.vars != old.dummy.vars[i]]
}
}
if(length(dummy.vars) == 0) {dummy.vars <- NULL}
}
}
# set minimum and maximum values
for (i in 1:raster::nlayers(x)) {
x[[i]] <- raster::setMinMax(x[[i]])
}
# declare factor layers
if(is.null(factors)==F) {
for (i in 1:length(factors)) {
j <- which(names(x) == factors[i])
x[[j]] <- raster::as.factor(x[[j]])
}
}
}
#
if (is.null(input.weights) == F) {
MAXENT <- max(c(input.weights["MAXENT"], -1), na.rm=T)
MAXNET <- max(c(input.weights["MAXNET"], -1), na.rm=T)
MAXLIKE <- max(c(input.weights["MAXLIKE"], -1), na.rm=T)
GBM <- max(c(input.weights["GBM"], -1), na.rm=T)
GBMSTEP <- max(c(input.weights["GBMSTEP"], -1), na.rm=T)
RF <- max(c(input.weights["RF"], -1), na.rm=T)
CF <- max(c(input.weights["CF"], -1), na.rm=T)
GLM <- max(c(input.weights["GLM"], -1), na.rm=T)
GLMSTEP <- max(c(input.weights["GLMSTEP"], -1), na.rm=T)
GAM <- max(c(input.weights["GAM"], -1), na.rm=T)
GAMSTEP <- max(c(input.weights["GAMSTEP"], -1), na.rm=T)
MGCV <- max(c(input.weights["MGCV"], -1), na.rm=T)
MGCVFIX <- max(c(input.weights["MGCVFIX"], -1), na.rm=T)
EARTH <- max(c(input.weights["EARTH"], -1), na.rm=T)
RPART <- max(c(input.weights["RPART"], -1), na.rm=T)
NNET <- max(c(input.weights["NNET"], -1), na.rm=T)
FDA <- max(c(input.weights["FDA"], -1), na.rm=T)
SVM <- max(c(input.weights["SVM"], -1), na.rm=T)
SVME <- max(c(input.weights["SVME"], -1), na.rm=T)
GLMNET <- max(c(input.weights["GLMNET"], -1), na.rm=T)
BIOCLIM.O <- max(c(input.weights["BIOCLIM.O"], -1), na.rm=T)
BIOCLIM <- max(c(input.weights["BIOCLIM"], -1), na.rm=T)
DOMAIN <- max(c(input.weights["DOMAIN"], -1), na.rm=T)
MAHAL <- max(c(input.weights["MAHAL"], -1), na.rm=T)
MAHAL01 <- max(c(input.weights["MAHAL01"], -1), na.rm=T)
}
ws <- as.numeric(c(MAXENT, MAXNET, MAXLIKE, GBM, GBMSTEP, RF, CF, GLM, GLMSTEP, GAM, GAMSTEP, MGCV,
MGCVFIX, EARTH, RPART, NNET, FDA, SVM, SVME, GLMNET,
BIOCLIM.O, BIOCLIM, DOMAIN, MAHAL, MAHAL01))
names(ws) <- c("MAXENT", "MAXNET", "MAXLIKE", "GBM", "GBMSTEP", "RF", "CF", "GLM", "GLMSTEP", "GAM", "GAMSTEP", "MGCV",
"MGCVFIX", "EARTH", "RPART", "NNET", "FDA", "SVM", "SVME", "GLMNET",
"BIOCLIM.O", "BIOCLIM", "DOMAIN", "MAHAL", "MAHAL01")
ws <- ensemble.weights(weights=ws, exponent=1, best=0, min.weight=0)
#
thresholds <- c(ws, -1)
names(thresholds) <- c(names(ws), "ENSEMBLE")
AUC.calibration <- thresholds
AUC.calibration[] <- NA
AUC.testing <- AUC.calibration
#
#
MAXENT.OLD <- MAXNET.OLD <- MAXLIKE.OLD <- GBM.OLD <- GBMSTEP.OLD <- RF.OLD <- CF.OLD <- GLM.OLD <- GLMSTEP.OLD <- GAM.OLD <- GAMSTEP.OLD <- MGCV.OLD <- NULL
MGCVFIX.OLD <- EARTH.OLD <- RPART.OLD <- NNET.OLD <- FDA.OLD <- SVM.OLD <- SVME.OLD <- GLMNET.OLD <- BIOCLIM.O.OLD <- BIOCLIM.OLD <- DOMAIN.OLD <- MAHAL.OLD <- MAHAL01.OLD <- NULL
# probit models, NULL if no probit model fitted
MAXENT.PROBIT.OLD <- MAXNET.PROBIT.OLD <- MAXLIKE.PROBIT.OLD <- GBM.PROBIT.OLD <- GBMSTEP.PROBIT.OLD <- RF.PROBIT.OLD <- CF.PROBIT.OLD <- GLM.PROBIT.OLD <- GLMSTEP.PROBIT.OLD <- GAM.PROBIT.OLD <- GAMSTEP.PROBIT.OLD <- MGCV.PROBIT.OLD <- NULL
MGCVFIX.PROBIT.OLD <- EARTH.PROBIT.OLD <- RPART.PROBIT.OLD <- NNET.PROBIT.OLD <- FDA.PROBIT.OLD <- SVM.PROBIT.OLD <- SVME.PROBIT.OLD <- GLMNET.PROBIT.OLD <- BIOCLIM.O.PROBIT.OLD <- BIOCLIM.PROBIT.OLD <- DOMAIN.PROBIT.OLD <- MAHAL.PROBIT.OLD <- MAHAL01.PROBIT.OLD <- NULL
if (is.null(models.list) == F) {
if (is.null(models.list$MAXENT) == F) {MAXENT.OLD <- models.list$MAXENT}
if (is.null(models.list$MAXNET) == F) {MAXNET.OLD <- models.list$MAXNET}
if (is.null(models.list$MAXLIKE) == F) {MAXLIKE.OLD <- models.list$MAXLIKE}
if (is.null(models.list$GBM) == F) {GBM.OLD <- models.list$GBM}
if (is.null(models.list$GBMSTEP) == F) {GBMSTEP.OLD <- models.list$GBMSTEP}
if (is.null(models.list$RF) == F) {RF.OLD <- models.list$RF}
if (is.null(models.list$CF) == F) {RF.OLD <- models.list$CF}
if (is.null(models.list$GLM) == F) {GLM.OLD <- models.list$GLM}
if (is.null(models.list$GLMSTEP) == F) {GLMSTEP.OLD <- models.list$GLMSTEP}
if (is.null(models.list$GAM) == F) {GAM.OLD <- models.list$GAM}
if (is.null(models.list$GAMSTEP) == F) {GAMSTEP.OLD <- models.list$GAMSTEP}
if (is.null(models.list$MGCV) == F) {MGCV.OLD <- models.list$MGCV}
if (is.null(models.list$MGCVFIX) == F) {MGCVFIX.OLD <- models.list$MGCVFIX}
if (is.null(models.list$EARTH) == F) {EARTH.OLD <- models.list$EARTH}
if (is.null(models.list$RPART) == F) {RPART.OLD <- models.list$RPART}
if (is.null(models.list$NNET) == F) {NNET.OLD <- models.list$NNET}
if (is.null(models.list$FDA) == F) {FDA.OLD <- models.list$FDA}
if (is.null(models.list$SVM) == F) {SVM.OLD <- models.list$SVM}
if (is.null(models.list$SVME) == F) {SVME.OLD <- models.list$SVME}
if (is.null(models.list$GLMNET) == F) {GLMNET.OLD <- models.list$GLMNET}
if (is.null(models.list$BIOCLIM.O) == F) {BIOCLIM.O.OLD <- models.list$BIOCLIM.O}
if (is.null(models.list$BIOCLIM) == F) {BIOCLIM.OLD <- models.list$BIOCLIM}
if (is.null(models.list$DOMAIN) == F) {DOMAIN.OLD <- models.list$DOMAIN}
if (is.null(models.list$MAHAL) == F) {MAHAL.OLD <- models.list$MAHAL}
if (is.null(models.list$MAHAL01) == F) {MAHAL01.OLD <- models.list$MAHAL01}
# probit models
if (is.null(models.list$MAXENT.PROBIT) == F) {MAXENT.PROBIT.OLD <- models.list$MAXENT.PROBIT}
if (is.null(models.list$MAXNET.PROBIT) == F) {MAXNET.PROBIT.OLD <- models.list$MAXNET.PROBIT}
if (is.null(models.list$MAXLIKE.PROBIT) == F) {MAXLIKE.PROBIT.OLD <- models.list$MAXLIKE.PROBIT}
if (is.null(models.list$GBM.PROBIT) == F) {GBM.PROBIT.OLD <- models.list$GBM.PROBIT}
if (is.null(models.list$GBMSTEP.PROBIT) == F) {GBMSTEP.PROBIT.OLD <- models.list$GBMSTEP.PROBIT}
if (is.null(models.list$RF.PROBIT) == F) {RF.PROBIT.OLD <- models.list$RF.PROBIT}
if (is.null(models.list$CF.PROBIT) == F) {CF.PROBIT.OLD <- models.list$CF.PROBIT}
if (is.null(models.list$GLM.PROBIT) == F) {GLM.PROBIT.OLD <- models.list$GLM.PROBIT}
if (is.null(models.list$GLMSTEP.PROBIT) == F) {GLMSTEP.PROBIT.OLD <- models.list$GLMSTEP.PROBIT}
if (is.null(models.list$GAM.PROBIT) == F) {GAM.PROBIT.OLD <- models.list$GAM.PROBIT}
if (is.null(models.list$GAMSTEP.PROBIT) == F) {GAMSTEP.PROBIT.OLD <- models.list$GAMSTEP.PROBIT}
if (is.null(models.list$MGCV.PROBIT) == F) {MGCV.PROBIT.OLD <- models.list$MGCV.PROBIT}
if (is.null(models.list$MGCVFIX.PROBIT) == F) {MGCVFIX.PROBIT.OLD <- models.list$MGCVFIX.PROBIT}
if (is.null(models.list$EARTH.PROBIT) == F) {EARTH.PROBIT.OLD <- models.list$EARTH.PROBIT}
if (is.null(models.list$RPART.PROBIT) == F) {RPART.PROBIT.OLD <- models.list$RPART.PROBIT}
if (is.null(models.list$NNET.PROBIT) == F) {NNET.PROBIT.OLD <- models.list$NNET.PROBIT}
if (is.null(models.list$FDA.PROBIT) == F) {FDA.PROBIT.OLD <- models.list$FDA.PROBIT}
if (is.null(models.list$SVM.PROBIT) == F) {SVM.PROBIT.OLD <- models.list$SVM.PROBIT}
if (is.null(models.list$SVME.PROBIT) == F) {SVME.PROBIT.OLD <- models.list$SVME.PROBIT}
if (is.null(models.list$GLMNET.PROBIT) == F) {GLMNET.PROBIT.OLD <- models.list$GLMNET.PROBIT}
if (is.null(models.list$BIOCLIM.O.PROBIT) == F) {BIOCLIM.O.PROBIT.OLD <- models.list$BIOCLIM.O.PROBIT}
if (is.null(models.list$BIOCLIM.PROBIT) == F) {BIOCLIM.PROBIT.OLD <- models.list$BIOCLIM.PROBIT}
if (is.null(models.list$DOMAIN.PROBIT) == F) {DOMAIN.PROBIT.OLD <- models.list$DOMAIN.PROBIT}
if (is.null(models.list$MAHAL.PROBIT) == F) {MAHAL.PROBIT.OLD <- models.list$MAHAL.PROBIT}
if (is.null(models.list$MAHAL01.PROBIT) == F) {MAHAL01.PROBIT.OLD <- models.list$MAHAL01.PROBIT}
}
# check formulae and packages
if (formulae.defaults == T) {
if (is.null(TrainData) == T) {
formulae <- ensemble.formulae(x, layer.drops=layer.drops, factors=factors, dummy.vars=dummy.vars, weights=ws)
}else{
formulae <- ensemble.formulae(TrainData, layer.drops=layer.drops, factors=factors, dummy.vars=dummy.vars, weights=ws)
}
}
if (ws["MAXENT"] > 0) {
jar <- paste(system.file(package="dismo"), "/java/maxent.jar", sep='')
if (!file.exists(jar)) {stop('maxent program is missing: ', jar, '\n', 'Please download it here: http://www.cs.princeton.edu/~schapire/maxent/')}
}
if (ws["MAXNET"] > 0) {
if (! requireNamespace("maxnet")) {stop("Please install the maxnet package")}
predict.maxnet2 <- function(object, newdata, clamp=F, type=c("cloglog")) {
p <- predict(object=object, newdata=newdata, clamp=clamp, type=type)
return(as.numeric(p))
}
}
if (ws["MAXLIKE"] > 0) {
if (! requireNamespace("maxlike")) {stop("Please install the maxlike package")}
if (is.null(MAXLIKE.formula) == T && formulae.defaults == T) {MAXLIKE.formula <- formulae$MAXLIKE.formula}
if (is.null(MAXLIKE.formula) == T) {
cat(paste("\n", "NOTE: not possible to calibrate MAXLIKE as no explanatory variables available" ,"\n", sep = ""))
ws["MAXLIKE"] <- 0
}else{
environment(MAXLIKE.formula) <- .BiodiversityR
}
}
if (ws["GBM"] > 0) {
if (! requireNamespace("gbm")) {stop("Please install the gbm package")}
requireNamespace("splines")
if (is.null(GBM.formula) == T && formulae.defaults == T) {GBM.formula <- formulae$GBM.formula}
if (is.null(GBM.formula) == T) {
cat(paste("\n", "NOTE: not possible to calibrate GBM as no explanatory variables available" ,"\n", sep = ""))
ws["GBM"] <- 0
}else{
environment(GBM.formula) <- .BiodiversityR
}
}
if (ws["GBMSTEP"] > 0) {
# if (! require(gbm)) {stop("Please install the gbm package")}
}
if (ws["RF"] > 0) {
# if (! require(randomForest)) {stop("Please install the randomForest package")}
if (is.null(RF.formula) == T && formulae.defaults == T) {RF.formula <- formulae$RF.formula}
if (is.null(RF.formula) == T) {
cat(paste("\n", "NOTE: not possible to calibrate RF as no explanatory variables available" ,"\n", sep = ""))
ws["RF"] <- 0
}else{
environment(RF.formula) <- .BiodiversityR
if (identical(RF.ntree, trunc(RF.ntree/2)) == F) {RF.ntree <- RF.ntree + 1}
# get the probabilities from RF
predict.RF <- function(object, newdata) {
p <- predict(object=object, newdata=newdata, type="response")
return(as.numeric(p))
}
}
}
if (ws["CF"] > 0) {
if (! requireNamespace("party")) {stop("Please install the party package")}
if (is.null(CF.formula) == T && formulae.defaults == T) {CF.formula <- formulae$CF.formula}
if (is.null(CF.formula) == T) {
cat(paste("\n", "NOTE: not possible to calibrate CF as no explanatory variables available" ,"\n", sep = ""))
ws["CF"] <- 0
}else{
environment(CF.formula) <- .BiodiversityR
if (identical(CF.ntree, trunc(CF.ntree/2)) == F) {CF.ntree <- CF.ntree + 1}
# get the probabilities from CF
# note that randomForest used predict.randomForest
predict.CF <- function(object, newdata) {
# avoid problems with single variables, especially with raster::predict
for (i in 1:ncol(newdata)) {
if (is.integer(newdata[, i])) {newdata[, i] <- as.numeric(newdata[, i])}
}
p1 <- predict(object=object, newdata=newdata, type="prob")
p <- numeric(length(p1))
for (i in 1:length(p1)) {p[i] <- p1[[i]][2]}
return(as.numeric(p))
}
}
}
if (ws["GLM"] > 0) {
if (is.null(GLM.formula) == T && formulae.defaults == T) {GLM.formula <- formulae$GLM.formula}
if (is.null(GLM.formula) == T) {
cat(paste("\n", "NOTE: not possible to calibrate GLM as no explanatory variables available" ,"\n", sep = ""))
ws["GLM"] <- 0
}else{
environment(GLM.formula) <- .BiodiversityR
assign("GLM.family", GLM.family, envir=.BiodiversityR)
}
}
if (ws["GLMSTEP"] > 0) {
# if (! require(MASS)) {stop("Please install the MASS package")}
if (is.null(STEP.formula) == T && formulae.defaults == T) {STEP.formula <- formulae$STEP.formula}
if (is.null(GLMSTEP.scope) == T && formulae.defaults == T) {GLMSTEP.scope <- formulae$GLMSTEP.scope}
if (is.null(STEP.formula) == T) {
cat(paste("\n", "NOTE: not possible to calibrate GLMSTEP as no explanatory variables available" ,"\n", sep = ""))
ws["GLMSTEP"] <- 0
}else{
environment(STEP.formula) <- .BiodiversityR
assign("GLM.family", GLM.family, envir=.BiodiversityR)
}
}
if (ws["GAM"] > 0) {
if (is.null(GAM.formula) == T && formulae.defaults == T) {GAM.formula <- formulae$GAM.formula}
if (is.null(GAM.formula) == T) {
cat(paste("\n", "NOTE: not possible to calibrate GAM as no explanatory variables available" ,"\n", sep = ""))
ws["GAM"] <- 0
}else{
environment(GAM.formula) <- .BiodiversityR
assign("GAM.family", GAM.family, envir=.BiodiversityR)
}
}
if (ws["GAMSTEP"] > 0) {
if (is.null(STEP.formula) == T && formulae.defaults == T) {STEP.formula <- formulae$STEP.formula}
if (is.null(GAMSTEP.scope) == T && formulae.defaults == T) {GAMSTEP.scope <- formulae$GAMSTEP.scope}
if (is.null(STEP.formula) == T) {
cat(paste("\n", "NOTE: not possible to calibrate GAMSTEP as no explanatory variables available" ,"\n", sep = ""))
ws["GAMSTEP"] <- 0
}else{
environment(STEP.formula) <- .BiodiversityR
assign("GAM.family", GAM.family, envir=.BiodiversityR)
}
}
if (ws["MGCV"] > 0 || ws["MGCVFIX"] > 0) {
cat(paste("\n\n"))
# get the probabilities from MGCV
predict.MGCV <- function(object, newdata, type="response") {
p <- mgcv::predict.gam(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
# options(warn=0)
}
if (ws["MGCV"] > 0) {
if (is.null(MGCV.formula) == T && formulae.defaults == T) {MGCV.formula <- formulae$MGCV.formula}
if (is.null(MGCV.formula) == T) {
cat(paste("\n", "NOTE: not possible to calibrate MGCV as no explanatory variables available" ,"\n", sep = ""))
ws["MGCV"] <- 0
}else{
environment(MGCV.formula) <- .BiodiversityR
assign("GAM.family", GAM.family, envir=.BiodiversityR)
}
}
if (ws["MGCVFIX"] > 0) {
if (is.null(MGCVFIX.formula) == T && formulae.defaults == T) {MGCVFIX.formula <- formulae$MGCVFIX.formula}
if (is.null(MGCVFIX.formula) == T) {stop("Please provide the MGCVFIX.formula (hint: use ensemble.formulae function)")}
if (is.null(MGCVFIX.formula) == T) {
cat(paste("\n", "NOTE: not possible to calibrate MGCVFIX as no explanatory variables available" ,"\n", sep = ""))
ws["MGCVFIX"] <- 0
}else{
environment(MGCVFIX.formula) <- .BiodiversityR
assign("GAM.family", GAM.family, envir=.BiodiversityR)
}
}
if (ws["EARTH"] > 0) {
# if (! require(earth)) {stop("Please install the earth package")}
if (is.null(EARTH.formula) == T && formulae.defaults == T) {EARTH.formula <- formulae$EARTH.formula}
if (is.null(EARTH.formula) == T) {
cat(paste("\n", "NOTE: not possible to calibrate EARTH as no explanatory variables available" ,"\n", sep = ""))
ws["EARTH"] <- 0
}else{
environment(EARTH.formula) <- .BiodiversityR
# get the probabilities from earth
predict.EARTH <- function(object, newdata, type="response") {
p <- predict(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
}
}
if (ws["RPART"] > 0) {
# if (! require(rpart)) {stop("Please install the rpart package")}
if (is.null(RPART.formula) == T && formulae.defaults == T) {RPART.formula <- formulae$RPART.formula}
if (is.null(RPART.formula) == T) {
cat(paste("\n", "NOTE: not possible to calibrate RPART as no explanatory variables available" ,"\n", sep = ""))
ws["RPART"] <- 0
}else{
environment(RPART.formula) <- .BiodiversityR
}
}
if (ws["NNET"] > 0) {
# if (! require(nnet)) {stop("Please install the nnet package")}
if (is.null(NNET.formula) == T && formulae.defaults == T) {NNET.formula <- formulae$NNET.formula}
if (is.null(NNET.formula) == T) {
cat(paste("\n", "NOTE: not possible to calibrate NNET as no explanatory variables available" ,"\n", sep = ""))
ws["NNET"] <- 0
}else{
environment(NNET.formula) <- .BiodiversityR
# get the probabilities from nnet
predict.NNET <- function(object, newdata, type="raw") {
p <- predict(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
}
}
if (ws["FDA"] > 0) {
# if (! require(mda)) {stop("Please install the mda package")}
if (is.null(FDA.formula) == T && formulae.defaults == T) {FDA.formula <- formulae$FDA.formula}
if (is.null(FDA.formula) == T) {
cat(paste("\n", "NOTE: not possible to calibrate FDA as no explanatory variables available" ,"\n", sep = ""))
ws["FDA"] <- 0
}else{
environment(FDA.formula) <- .BiodiversityR
}
}
if (ws["SVM"] > 0) {
# if (! require(kernlab)) {stop("Please install the kernlab package")}
if (is.null(SVM.formula) == T && formulae.defaults == T) {SVM.formula <- formulae$SVM.formula}
if (is.null(SVM.formula) == T) {
cat(paste("\n", "NOTE: not possible to calibrate SVM as no explanatory variables available" ,"\n", sep = ""))
ws["SVM"] <- 0
}else{
environment(SVM.formula) <- .BiodiversityR
}
}
if (ws["SVME"] > 0) {
# if (! require(e1071)) {stop("Please install the e1071 package")}
if (is.null(SVME.formula) == T && formulae.defaults == T) {SVME.formula <- formulae$SVME.formula}
if (is.null(SVME.formula) == T) {stop("Please provide the SVME.formula (hint: use ensemble.formulae function)")}
if (is.null(SVME.formula) == T) {
cat(paste("\n", "NOTE: not possible to calibrate SVME as no explanatory variables available" ,"\n", sep = ""))
ws["SVME"] <- 0
}else{
environment(SVME.formula) <- .BiodiversityR
# get the probabilities from svm
predict.SVME <- function(model, newdata) {
p <- predict(model, newdata, probability=T)
return(attr(p, "probabilities")[,1])
}
}
}
if (ws["GLMNET"] > 0) {
if (! requireNamespace("glmnet")) {stop("Please install the glmnet package")}
# get the mean probabilities from glmnet
predict.GLMNET <- function(model, newdata, GLMNET.class=FALSE) {
newdata <- as.matrix(newdata)
if (GLMNET.class == TRUE) {
p <- predict(model, newx=newdata, type="class", exact=T)
n.obs <- nrow(p)
nv <- ncol(p)
result <- numeric(n.obs)
for (i in 1:n.obs) {
for (j in 1:nv) {
if(p[i, j] == 1) {result[i] <- result[i] + 1}
}
}
result <- result/nv
return(result)
}else{
p <- predict(model, newx=newdata, type="response", exact=T)
n.obs <- nrow(p)
nv <- ncol(p)
result <- numeric(n.obs)
for (i in 1:n.obs) {
for (j in 1:nv) {
result[i] <- result[i] + p[i, j]
}
}
result <- result/nv
return(result)
}
}
}
if (ws["BIOCLIM.O"] > 0) {
predict.BIOCLIM.O <- function(object, newdata) {
lower.limits <- object$lower.limits
upper.limits <- object$upper.limits
minima <- object$minima
maxima <- object$maxima
#
newdata <- newdata[, which(names(newdata) %in% names(lower.limits)), drop=F]
result <- as.numeric(rep(NA, nrow(newdata)))
varnames <- names(newdata)
nvars <- ncol(newdata)
#
for (i in 1:nrow(newdata)) {
datai <- newdata[i,]
resulti <- 1
j <- 0
while (resulti > 0 && j <= (nvars-1)) {
j <- j+1
focal.var <- varnames[j]
if (resulti == 1) {
lowerj <- lower.limits[which(names(lower.limits) == focal.var)]
if (datai[, j] < lowerj) {resulti <- 0.5}
upperj <- upper.limits[which(names(upper.limits) == focal.var)]
if (datai[, j] > upperj) {resulti <- 0.5}
}
minj <- minima[which(names(minima) == focal.var)]
if (datai[, j] < minj) {resulti <- 0}
maxj <- maxima[which(names(maxima) == focal.var)]
if (datai[, j] > maxj) {resulti <- 0}
}
result[i] <- resulti
}
p <- as.numeric(result)
return(p)
}
}
if (ws["MAHAL"] > 0) {
# get the probabilities from mahal
predict.MAHAL <- function(model, newdata, PROBIT) {
p <- dismo::predict(object=model, x=newdata)
if (PROBIT == F) {
p[p<0] <- 0
p[p>1] <- 1
}
return(as.numeric(p))
}
}
if (ws["MAHAL01"] > 0) {
# get the probabilities from transformed mahal
predict.MAHAL01 <- function(model, newdata, MAHAL.shape) {
p <- dismo::predict(object=model, x=newdata)
p <- p - 1 - MAHAL.shape
p <- abs(p)
p <- MAHAL.shape / p
return(p)
}
}
# create TrainData and TestData
#
# tuning takes precedense over different exponents
# if(length(ENSEMBLE.exponent) > 1 || length(ENSEMBLE.best) > 1 || length(ENSEMBLE.min) > 1) {ENSEMBLE.tune <- TRUE}
no.tests <- FALSE
if (is.null(pt)==T && is.null(at)==T && is.null(TestData)==T && k < 2) {
no.tests <- TRUE
if (ENSEMBLE.tune == T) {
cat(paste("\n", "WARNING: not possible to tune (ENSEMBLE.tune=FALSE) as no Test Data available or will be created", sep = ""))
ENSEMBLE.tune <- FALSE
}
}
if (is.null(TrainData) == F) {
if(any(is.na(TrainData))) {
cat(paste("\n", "WARNING: sample units with missing data removed from calibration data","\n\n",sep = ""))
}
TrainValid <- complete.cases(TrainData)
TrainData <- TrainData[TrainValid,,drop=F]
if (is.null(p) == F) {
TrainValid <- complete.cases(TrainData[TrainData[,"pb"]==1,])
p <- p[TrainValid,]
}
if (is.null(a) == F) {
TrainValid <- complete.cases(TrainData[TrainData[,"pb"]==0,])
a <- a[TrainValid,]
}
if(is.null(TestData) == T) {
TestData <- TrainData
if (k > 1) {
TrainData.p <- TrainData[TrainData$pb == 1, ]
TrainData.a <- TrainData[TrainData$pb == 0, ]
if (k.listed == F) {
groupp <- dismo::kfold(TrainData.p, k=k)
groupa <- dismo::kfold(TrainData.a, k=k)
}else{
groupp <- k.list$groupp
groupa <- k.list$groupa
}
TrainData.pc <- TrainData.p[groupp != 1,]
TrainData.ac <- TrainData.a[groupa != 1,]
TestData.pc <- TrainData.p[groupp == 1,]
TestData.ac <- TrainData.a[groupa == 1,]
TrainData <- rbind(TrainData.pc, TrainData.ac)
TestData <- rbind(TestData.pc, TestData.ac)
}
}
}else{
if (is.null(a)==T) {
if (target.groups == T) {
cat(paste("\n", "WARNING: not possible for target group pseudo-absence data as 'a' (locations of all species) not specified", sep = ""))
cat(paste("\n", "Instead background locations selected randomly", "\n\n", sep = ""))
}
if (excludep == T) {
a <- dismo::randomPoints(x[[1]], n=an, p=p, excludep=T)
}else{
a <- dismo::randomPoints(x[[1]], n=an, p=NULL, excludep=F)
}
}else{
if (target.groups == T) {
cat(paste("\n", "target group (biased pseudo-absence locations) in centres of cells with locations of all target group species ('a')", "\n\n", sep = ""))
p.cell <- unique(raster::cellFromXY(x[[1]], p))
a.cell <- unique(raster::cellFromXY(x[[1]], a))
if (excludep == T) {a.cell <- a.cell[!(a.cell %in% p.cell)]}
a <- raster::xyFromCell(x[[1]], cell=a.cell, spatial=F)
}
}
if (is.null(pt)==T && is.null(TestData)) {pt <- p}
if (k > 1 && identical(pt, p) == T) {
if (k.listed == F) {
groupp <- dismo::kfold(p, k=k)
}else{
groupp <- k.list$groupp
}
pc <- p[groupp != 1,]
pt <- p[groupp == 1,]
p <- pc
}
if (is.null(at)==T && is.null(TestData)) {at <- a}
if (k > 1 && identical(at, a) == T) {
if (k.listed == F) {
groupa <- dismo::kfold(a, k=k)
}else{
groupa <- k.list$groupa
}
ac <- a[groupa != 1,]
at <- a[groupa == 1,]
a <- ac
}
# check for spatial sorting bias (are the testing absences farther away than testing presences)
if (is.null(p)==F && identical(pt, p)==F) {
sb.bias <- dismo::ssb(p=pt, a=at, reference=p)
sb.bias2 <- sb.bias[, 1]/sb.bias[, 2]
cat(paste("\n", "Spatial sorting bias (dismo package, no bias=1, extreme bias=0): ", sb.bias2, "\n", sep = ""))
}
# attempt to reduce spatial bias by searching absence testing locations within circles around all known presences
if (SSB.reduce == T) {
if (identical(pt, p) == T) {
cat(paste("\n", "No search for testing absences in circular neighbourhoods since no separate testing presences", sep = ""))
SSB.reduce <- FALSE
}else{
d.km <- CIRCLES.d/1000
cat(paste("\n", "Random selection of testing absences in circular neighbourhoods of ", d.km, " km", "\n", sep = ""))
pres_all <- rbind(pt, p)
circles.calibrate <- dismo::circles(p=pres_all, lonlat=raster::isLonLat(x[[1]]), d=CIRCLES.d)
circles.predicted <- dismo::predict(circles.calibrate, x[[1]])
at <- dismo::randomPoints(circles.predicted, n=nrow(at), p=pres_all, excludep=T)
sb.bias <- dismo::ssb(p=pt, a=at, reference=p)
sb.bias2 <- sb.bias[, 1]/sb.bias[, 2]
cat(paste("\n", "Spatial sorting bias with new testing absences: ", sb.bias2, "\n", sep = ""))
}
}
#
if (length(names(x)) == 1) {
xdouble <- raster::stack(x, x)
TrainData <- dismo::prepareData(x=xdouble, p, b=a, factors=factors, xy=FALSE)
TrainData <- TrainData[, -3]
names(TrainData)[2] <- names(x)
}else{
TrainData <- dismo::prepareData(x, p, b=a, factors=factors, xy=FALSE)
}
if(any(is.na(TrainData[TrainData[,"pb"]==1,]))) {
cat(paste("\n", "WARNING: presence locations with missing data removed from calibration data","\n\n",sep = ""))
}
TrainValid <- complete.cases(TrainData[TrainData[,"pb"]==1,])
p <- p[TrainValid,]
if(any(is.na(TrainData[TrainData[,"pb"]==0,]))) {
cat(paste("\n", "WARNING: background locations with missing data removed from calibration data","\n\n",sep = ""))
}
TrainValid <- complete.cases(TrainData[TrainData[,"pb"]==0,])
a <- a[TrainValid,]
if (length(names(x)) == 1) {
xdouble <- raster::stack(x, x)
TrainData <- dismo::prepareData(x=xdouble, p, b=a, factors=factors, xy=FALSE)
TrainData <- TrainData[, -3]
names(TrainData)[2] <- names(x)
}else{
TrainData <- dismo::prepareData(x, p, b=a, factors=factors, xy=FALSE)
}
}
#
if (no.tests == F) {
if (is.null(TestData) == F) {
TestData <- data.frame(TestData)
if(any(is.na(TestData))) {
cat(paste("\n", "WARNING: sample units with missing data removed from testing data","\n\n",sep = ""))
}
TestValid <- complete.cases(TestData)
TestData <- TestData[TestValid,,drop=F]
if (is.null(pt) == F) {
TestValid <- complete.cases(TestData[TestData[,"pb"]==1,])
pt <- pt[TestValid,]
}
if (is.null(at) == F) {
TestValid <- complete.cases(TestData[TestData[,"pb"]==0,])
at <- at[TestValid,]
}
if (all(names(TestData)!="pb") == T) {stop("one column needed of 'pb' with presence and absence for TestData")}
}else{
if (length(names(x)) == 1) {
xdouble <- raster::stack(x, x)
TestData <- dismo::prepareData(x=xdouble, p=pt, b=at, factors=factors, xy=FALSE)
TestData <- TestData[, -3]
names(TestData)[2] <- names(x)
}else{
TestData <- dismo::prepareData(x, p=pt, b=at, factors=factors, xy=FALSE)
}
if(any(is.na(TestData[TestData[,"pb"]==1,]))) {
cat(paste("\n", "WARNING: presence locations with missing data removed from evaluation data","\n\n",sep = ""))
}
TestValid <- complete.cases(TestData[TestData[,"pb"]==1,])
pt <- pt[TestValid,]
if(any(is.na(TestData[TestData[,"pb"]==0,]))) {
cat(paste("\n", "WARNING: background locations with missing data removed from evaluation data","\n\n",sep = ""))
}
TestValid <- complete.cases(TestData[TestData[,"pb"]==0,])
at <- at[TestValid,]
TestData <- dismo::prepareData(x, p=pt, b=at, factors=factors, xy=FALSE)
if (length(names(x)) == 1) {
xdouble <- raster::stack(x, x)
TestData <- dismo::prepareData(x=xdouble, p=pt, b=at, factors=factors, xy=FALSE)
TestData <- TestData[, -3]
names(TestData)[2] <- names(x)
}else{
TestData <- dismo::prepareData(x, p=pt, b=at, factors=factors, xy=FALSE)
}
}
}
#
# check if TestData is different from TrainData
if (identical(TrainData, TestData) == T) {no.tests <- TRUE}
#
# include all possible factor levels in TrainData (especially important if models are kept)
if (is.null(factors)==F && is.null(x)==T && no.tests==F) {
for (i in 1:length(factors)) {
if (identical(levels(droplevels(TrainData[,factors[i]])), levels(droplevels(TestData[,factors[i]])))==F) {
cat(paste("\n", "WARNING: differences in factor levels between calibration and evaluation data (variable ", factors[i], ")", "\n", sep = ""))
cat(paste("Same levels set for both data sets to avoid problems with some evaluations", "\n", sep = ""))
cat(paste("However, some predictions may still fail", "\n", sep = ""))
uniquelevels <- unique(c(levels(droplevels(TrainData[,factors[i]])), levels(droplevels(TestData[,factors[i]]))))
levels(TrainData[,factors[i]]) <- uniquelevels
levels(TestData[,factors[i]]) <- uniquelevels
}
}
}
if(is.null(factors)==F && is.null(x)==F) {
if(models.keep==T) {
categories <- as.list(factors)
names(categories) <- factors
}
for (i in 1:length(factors)) {
all.categories <- raster::freq(x[[which(names(x) == factors[i])]])[,1]
all.categories <- all.categories[is.na(all.categories) == F]
all.categories <- as.character(all.categories)
if(models.keep==T) {
categories[[as.name(factors[i])]] <- all.categories
}
train.categories <- levels(droplevels(TrainData[,factors[i]]))
new.categories <- c(all.categories[is.na(match(all.categories, train.categories))])
if (length(new.categories) > 0) {
cat(paste("\n", "The following levels were initially not captured by TrainData for factor '", factors[i], "'\n", sep = ""))
print(new.categories)
if (is.null(x)==F && is.null(p)==F && is.null(a)==F) {
# step 1: search if suitable presence locations in TestData
if (no.tests == F){
for (j in 1:length(new.categories)) {
if (any(TestData[TestData[,"pb"]==1, factors[i]] == new.categories[j])) {
cat(paste("Warning: level '", new.categories[j], "' available for presence location in Test Data", "\n", sep = ""))
}
}
}
# step 2: stratified background sample
strat1 <- raster::sampleStratified(x[[which(names(x) == factors[i])]], size=1, exp=1, na.rm=TRUE, xy=FALSE)
strat1 <- strat1[which(strat1[,2] %in% new.categories), 1]
xy1 <- raster::xyFromCell(x[[which(names(x) == factors[i])]], cell=strat1, spatial=FALSE)
a <- rbind(a, xy1)
if (length(names(x)) == 1) {
xdouble <- raster::stack(x, x)
TrainData <- dismo::prepareData(x=xdouble, p, b=a, factors=factors, xy=FALSE)
TrainData <- TrainData[, -3]
names(TrainData)[2] <- names(x)
}else{
TrainData <- dismo::prepareData(x, p, b=a, factors=factors, xy=FALSE)
}
TrainValid <- complete.cases(TrainData[TrainData[,"pb"]==0,])
a <- a[TrainValid,]
if (length(names(x)) == 1) {
xdouble <- raster::stack(x, x)
TrainData <- dismo::prepareData(x=xdouble, p, b=a, factors=factors, xy=FALSE)
TrainData <- TrainData[, -3]
names(TrainData)[2] <- names(x)
}else{
TrainData <- dismo::prepareData(x, p, b=a, factors=factors, xy=FALSE)
}
train.categories <- levels(droplevels(TrainData[,factors[i]]))
new.categories <- all.categories[is.na(match(all.categories, train.categories))]
if (length(new.categories) == 0) {
cat(paste("All levels have now been included as background data for TrainData for factor '", factors[i], "'\n", sep = ""))
}else{
cat(paste("The following levels were not captured by TrainData for factor '", factors[i], "'\n", sep = ""))
print(new.categories)
cat(paste("\n", "Attempt to include these levels was complicated by missing values in other layers", "\n", sep = ""))
}
}
}
# step 3: also modify test data, but only if no circular neighbourhood
if (no.tests == F) {
test.categories <- levels(droplevels(TestData[,factors[i]]))
new.categories <- c(all.categories[is.na(match(all.categories, test.categories))])
if (length(new.categories)>0 && SSB.reduce==F) {
cat(paste("\n", "The following levels were initially not captured by TestData for factor '", factors[i], "'\n", sep = ""))
print(new.categories)
if (is.null(x)==F && is.null(pt)==F && is.null(at)==F) {
strat1 <- raster::sampleStratified(x[[which(names(x) == factors[i])]], size=1, exp=1, na.rm=TRUE, xy=FALSE)
strat1 <- strat1[which(strat1[,2] %in% new.categories), 1]
xy1 <- raster::xyFromCell(x[[which(names(x) == factors[i])]], cell=strat1, spatial=FALSE)
at <- rbind(at, xy1)
if (length(names(x)) == 1) {
xdouble <- raster::stack(x, x)
TestData <- dismo::prepareData(x=xdouble, p=pt, b=at, factors=factors, xy=FALSE)
TestData <- TestData[, -3]
names(TestData)[2] <- names(x)
}else{
TestData <- dismo::prepareData(x, p=pt, b=at, factors=factors, xy=FALSE)
}
TestValid <- complete.cases(TestData[TestData[,"pb"]==0,])
at <- at[TestValid,]
if (length(names(x)) == 1) {
xdouble <- raster::stack(x, x)
TestData <- dismo::prepareData(x=xdouble, p=pt, b=at, factors=factors, xy=FALSE)
TestData <- TestData[, -3]
names(TestData)[2] <- names(x)
}else{
TestData <- dismo::prepareData(x, p=pt, b=at, factors=factors, xy=FALSE)
}
test.categories <- levels(droplevels(TestData[,factors[i]]))
new.categories <- all.categories[is.na(match(all.categories, test.categories))]
if (length(new.categories) == 0) {
cat(paste("All levels have now been included as background data for TestData for factor '", factors[i], "'\n", sep = ""))
}else{
cat(paste("The following levels were not captured by TestData for factor '", factors[i], "'\n", sep = ""))
print(new.categories)
cat(paste("\n", "Attempt to include these levels was complicated by missing values in other layers", "\n", sep = ""))
}
}
}
if (length(new.categories)>0 && SSB.reduce==T) {
cat(paste("\n", "Note that the following levels were not captured in the circular neighbourhood by TestData for factor '", factors[i], "'\n", sep = ""))
print(new.categories)
}
}
}
}
#
factlevels <- NULL
if (is.null(factors) == F) {
factlevels <- list()
for (i in 1:length(factors)) {
factlevels[[i]] <- levels(TrainData[, factors[i]])
names(factlevels)[i] <- factors[i]
}
}
#
if (sum(ws, na.rm=T) > 0) {
cat(paste("\n", "Summary of Training data set used for calibrations (rows: ", nrow(TrainData), ")\n", sep = ""))
print(summary(TrainData))
cat(paste("\n"))
utils::str(TrainData)
if (no.tests == F) {
cat(paste("\n", "Summary of Testing data set used for evaluations (rows: ", nrow(TestData), ")\n", sep = ""))
print(summary(TestData))
cat(paste("\n"))
utils::str(TestData)
}else{
cat(paste("\n", "(no tests with separate data set)", "\n", sep = ""))
}
}
#
dummy.vars.noDOMAIN <- NULL
#
if(models.keep == T) {
models <- list(MAXENT=NULL, MAXNET=NULL, MAXLIKE=NULL, GBM=NULL, GBMSTEP=NULL, RF=NULL, CF=NULL, GLM=NULL,
GLMSTEP=NULL, GAM=NULL, GAMSTEP=NULL, MGCV=NULL, MGCVFIX=NULL, EARTH=NULL, RPART=NULL,
NNET=NULL, FDA=NULL, SVM=NULL, SVME=NULL, GLMNET=NULL, BIOCLIM.O=NULL, BIOCLIM=NULL, DOMAIN=NULL, MAHAL=NULL, MAHAL01=NULL,
formulae=NULL, output.weights=NULL,
TrainData=NULL, TestData=NULL, p=NULL, a=NULL, pt=NULL, at=NULL,
MAXENT.a=NULL,
vars=NULL, factors=NULL, categories=NULL, dummy.vars=NULL, dummy.vars.noDOMAIN=NULL,
thresholds=NULL, threshold.method=NULL, threshold.sensitivity=NULL, threshold.PresenceAbsence=NULL,
species.name=NULL)
models$TrainData <- TrainData
models$p <- p
models$a <- a
models$MAXENT.a <- MAXENT.a
if (no.tests==F) {models$pt <- pt}
if (no.tests==F) {models$at <- at}
vars <- names(TrainData)
vars <- vars[which(vars!="pb")]
models$vars <- vars
models$factors <- factors
if(is.null(factors)==F) {models$categories <- categories}
models$dummy.vars <- dummy.vars
models$threshold.method <- threshold.method
models$threshold.sensitivity <- threshold.sensitivity
if (threshold.PresenceAbsence == T) {
models$threshold.PresenceAbsence <- TRUE
}else{
models$threshold.PresenceAbsence <- FALSE
}
models$species.name <- species.name
if (no.tests == F) {models$TestData <- TestData}
}else{
models <- NULL
}
#
if(ws["GBMSTEP"] > 0) {
TrainData.orig <- TrainData
assign("TrainData.orig", TrainData.orig, envir=.BiodiversityR)
}
#
# Data frames for distance-based methods, maxnet, SVME and GLMNET
TrainData.vars <- TrainData[, names(TrainData) != "pb", drop=F]
assign("TrainData.vars", TrainData.vars, envir=.BiodiversityR)
TrainData.pa <- as.vector(TrainData[ , "pb"])
assign("TrainData.pa", TrainData.pa, envir=.BiodiversityR)
TrainData.numvars <- TrainData.vars
if (is.null(factors) == F) {
for (i in 1:length(factors)) {
TrainData.numvars <- TrainData.numvars[, which(names(TrainData.numvars) != factors[i]), drop=F]
}
}
num.vars <- names(TrainData.numvars)
assign("TrainData.numvars", TrainData.numvars, envir=.BiodiversityR)
TrainData.pres <- TrainData[TrainData[,"pb"]==1,,drop=F]
TrainData.pres <- TrainData.pres[,names(TrainData.pres) != "pb", drop=F]
assign("TrainData.pres", TrainData.pres, envir=.BiodiversityR)
var.names <- names(TrainData.pres)
if (no.tests == F) {
TestData.vars <- TestData[,names(TestData) != "pb", drop=F]
assign("TestData.vars", TestData.vars, envir=.BiodiversityR)
TestData.numvars <- TestData.vars
if (is.null(factors) == F) {
for (i in 1:length(factors)) {
TestData.numvars <- TestData.numvars[, which(names(TestData.numvars) != factors[i]), drop=F]
}
}
assign("TestData.numvars", TestData.numvars, envir=.BiodiversityR)
}
#
# make MAXENT.TrainData
if (ws["MAXENT"] > 0 || ws["MAXLIKE"] > 0) {
# make these work when x is not provided (14-JUNE-2020)
if (is.null(x) == FALSE) {
if (is.null(MAXENT.a)==T) {
MAXENT.a <- dismo::randomPoints(x[[1]], n=MAXENT.an, p=p, excludep=T)
}
colnames(MAXENT.a) <- colnames(p)
if (length(names(x)) == 1) {
xdouble <- raster::stack(x, x)
MAXENT.TrainData <- dismo::prepareData(x, p, b=MAXENT.a, factors=factors, xy=FALSE)
MAXENT.TrainData <- MAXENT.TrainData[, -3]
names(MAXENT.TrainData)[2] <- names(x)
}else{
MAXENT.TrainData <- dismo::prepareData(x, p, b=MAXENT.a, factors=factors, xy=FALSE)
}
if(any(is.na(MAXENT.TrainData[MAXENT.TrainData[,"pb"]==0,]))) {
cat(paste("\n", "WARNING: background locations with missing data removed from MAXENT calibration data","\n\n",sep = ""))
}
TrainValid <- complete.cases(MAXENT.TrainData[MAXENT.TrainData[,"pb"]==0,])
MAXENT.a <- MAXENT.a[TrainValid,]
if (length(names(x)) == 1) {
xdouble <- raster::stack(x, x)
MAXENT.TrainData <- dismo::prepareData(x=xdouble, p, b=MAXENT.a, factors=factors, xy=FALSE)
MAXENT.TrainData <- MAXENT.TrainData[, -3]
names(MAXENT.TrainData)[2] <- names(x)
}else{
MAXENT.TrainData <- dismo::prepareData(x, p, b=MAXENT.a, factors=factors, xy=FALSE)
}
MAXENT.pa <- as.vector(MAXENT.TrainData[ , "pb"])
MAXENT.TrainData <- MAXENT.TrainData[, which(names(MAXENT.TrainData) != "pb"), drop=F]
}else{
MAXENT.pa <- as.vector(TrainData[ , "pb"])
MAXENT.TrainData <- TrainData[, which(names(TrainData) != "pb"), drop=F]
}
cat(paste("\n", "Summary of Training data set used for calibration of MAXENT or MAXLIKE model (rows: ", nrow(MAXENT.TrainData), ", presence locations: ", sum(MAXENT.pa), ")\n", sep = ""))
print(summary(MAXENT.TrainData))
assign("MAXENT.TrainData", MAXENT.TrainData, envir=.BiodiversityR)
assign("MAXENT.pa", MAXENT.pa, envir=.BiodiversityR)
}
#
eval.table <- as.numeric(rep(NA, 8))
names(eval.table) <- c("AUC", "TSS", "SEDI", "TSS.fixed", "SEDI.fixed", "FNR.fixed", "MCR.fixed", "AUCdiff")
eval.table <- as.data.frame(t(eval.table))[FALSE, ]
#
newVIF <- NULL
if (VIF == T && length(names(TrainData.vars)) > 1 ) {
# if (! require(car)) {stop("Please install the car package")}
# only use background data
TrainDataNum <- TrainData[TrainData[,"pb"]==0,]
LM.formula <- ensemble.formulae(TrainData, factors=factors)$RF.formula
# create possible response
TrainDataNum[,"pb"] <- mean(as.numeric(TrainDataNum[,2]))
vifresult <- NULL
if (CATCH.OFF == F) {
tryCatch(vifresult <- car::vif(lm(formula=LM.formula, data=TrainDataNum)),
error= function(err) {print(paste("\n", "WARNING: VIF (package: car) evaluation failed", "\n", sep=""))},
silent=F)
}else{
vifresult <- car::vif(lm(formula=LM.formula, data=TrainDataNum))
}
if (is.null(vifresult) == F) {
cat(paste("\n", "Variance inflation (package: car)", "\n", sep = ""))
print(vifresult)
}
cat(paste("\n", "VIF directly calculated from linear model with focal numeric variable as response", "\n", sep = ""))
TrainDataNum <- TrainDataNum[, names(TrainDataNum)!="pb"]
varnames <- names(TrainDataNum)
newVIF <- numeric(length=length(varnames))
newVIF[] <- NA
names(newVIF) <- varnames
for (i in 1:length(varnames)) {
response.name <- varnames[i]
explan.names <- varnames[-i]
if ((response.name %in% factors) == F) {
LM.formula <- as.formula(paste(response.name, "~", paste(explan.names, collapse="+"), sep=""))
newVIF[i] <- summary(lm(formula=LM.formula, data=TrainDataNum))$r.squared
}
}
newVIF <- 1/(1-newVIF)
newVIF <- sort(newVIF, decreasing=T, na.last=T)
print(newVIF)
}
if (COR == T) {
TrainDataNum <- TrainData[, names(TrainData) != "pb", drop=F]
if(is.null(factors)) {
for (i in 1:length(factors)) {
TrainDataNum <- TrainDataNum[, names(TrainDataNum) != factors[i], drop=F]
}
}
corresult <- cor(TrainDataNum)
corresult <- round(100*corresult, digits=2)
cat(paste("\n", "Correlation between numeric variables (as percentage)", "\n", sep = ""))
print(corresult)
}
#
modelresults <- data.frame(array(dim=c(nrow(TrainData), length(ws)+1), 0))
names(modelresults) <- c("MAXENT", "MAXNET", "MAXLIKE", "GBM", "GBMSTEP", "RF", "CF", "GLM", "GLMSTEP", "GAM", "GAMSTEP", "MGCV",
"MGCVFIX", "EARTH", "RPART", "NNET", "FDA", "SVM", "SVME", "GLMNET",
"BIOCLIM.O", "BIOCLIM", "DOMAIN", "MAHAL", "MAHAL01", "ENSEMBLE")
TrainData <- cbind(TrainData, modelresults)
assign("TrainData", TrainData, envir=.BiodiversityR)
if (no.tests == F) {
modelresults <- data.frame(array(dim=c(nrow(TestData), length(ws)+1), 0))
names(modelresults) <- c("MAXENT", "MAXNET", "MAXLIKE", "GBM", "GBMSTEP", "RF", "CF", "GLM", "GLMSTEP", "GAM", "GAMSTEP", "MGCV",
"MGCVFIX", "EARTH", "RPART", "NNET", "FDA", "SVM", "SVME", "GLMNET",
"BIOCLIM.O", "BIOCLIM", "DOMAIN", "MAHAL", "MAHAL01", "ENSEMBLE")
TestData <- cbind(TestData, modelresults)
assign("TestData", TestData, envir=.BiodiversityR)
}
weights <- as.numeric(array(dim=length(ws), 0))
names(weights) <- c("MAXENT", "MAXNET", "MAXLIKE", "GBM", "GBMSTEP", "RF", "CF", "GLM", "GLMSTEP", "GAM", "GAMSTEP", "MGCV", "MGCVFIX",
"EARTH", "RPART", "NNET", "FDA", "SVM", "SVME", "GLMNET", "BIOCLIM.O", "BIOCLIM", "DOMAIN", "MAHAL", "MAHAL01")
#
if(evaluations.keep==T) {
evaluations <- list(MAXENT.C=NULL, MAXENT.T=NULL, MAXNET.C=NULL, MAXNET.T=NULL, MAXLIKE.C=NULL, MAXLIKE.T=NULL,
GBM.trees=NULL, GBM.C=NULL, GBM.T=NULL, GBMSTEP.trees=NULL, GBMSTEP.C=NULL, GBMSTEP.T=NULL,
RF.C=NULL, RF.T=NULL, CF.C=NULL, CF.T=NULL, GLM.C=NULL, GLM.T=NULL, GLMS.C=NULL, GLMS.T=NULL,
GAM.C=NULL, GAM.T=NULL, GAMS.C=NULL, GAMS.T=NULL, MGCV.C=NULL, MGCV.T=NULL, MGCVF.C=NULL, MGCVF.T=NULL,
EARTH.C=NULL, EARTH.T=NULL, RPART.C=NULL, RPART.T=NULL,
NNET.C=NULL, NNET.T=NULL, FDA.C=NULL, FDA.T=NULL, SVM.C=NULL, SVM.T=NULL, SVME.C=NULL, SVME.T=NULL, GLMNET.C=NULL, GLMNET.T=NULL,
BIOCLIM.O.C=NULL, BIOCLIM.O.T=NULL, BIOCLIM.C=NULL, BIOCLIM.T=NULL, DOMAIN.C=NULL, DOMAIN.T=NULL, MAHAL.C=NULL, MAHAL.T=NULL, MAHAL01.C=NULL, MAHAL01.T=NULL,
ENSEMBLE.C=NULL, ENSEMBLE.T=NULL, STRATEGY.weights=NULL,
TrainData=NULL, TestData=NULL, MAXENT.a=NULL,
factors=NULL, dummy.vars=NULL)
evaluations$factors <- factors
evaluations$dummy.vars <- dummy.vars
}else{
evaluations <- NULL
}
#
Yweights1 <- Yweights
if (Yweights == "BIOMOD") {
#have equal weight of presence vs. background
Yweights1 <- numeric(length = nrow(TrainData))
pressum <- sum(TrainData[,"pb"]==1)
abssum <- sum(TrainData[,"pb"]==0)
Yweights1[which(TrainData[, "pb"] == 1)] <- 1
Yweights1[which(TrainData[, "pb"] == 0)] <- pressum/abssum
}
if (Yweights == "equal") {
Yweights1 <- numeric(length = nrow(TrainData))
Yweights1[] <- 1
}
assign("Yweights1", Yweights1, envir=.BiodiversityR)
#
# prepare for calculation of deviance
obs1 <- TrainData[, "pb"]
#
# count models
mc <- 0
#
# Different modelling algorithms
#
if(ws["MAXENT"] > 0 && length(names(MAXENT.TrainData)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "MAXENT model can therefore not be calibrated", "\n", sep = ""))
ws["MAXENT"] <- weights["MAXENT"] <- 0
}
if (ws["MAXENT"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Maximum entropy algorithm (package: dismo)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
# Put the file 'maxent.jar' in the 'java' folder of dismo
# the file 'maxent.jar' can be obtained from from http://www.cs.princeton.edu/~schapire/maxent/.
jar <- paste(system.file(package="dismo"), "/java/maxent.jar", sep='')
if(is.null(MAXENT.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- dismo::maxent(x=MAXENT.TrainData, p=MAXENT.pa, factors=factors, path=MAXENT.path),
error= function(err) {print(paste("MAXENT calibration failed"))},
silent=F)
}else{
results <- dismo::maxent(x=MAXENT.TrainData, p=MAXENT.pa, factors=factors, path=MAXENT.path)
}
}else{
results <- MAXENT.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data of other algorithms","\n\n", sep = ""))
TrainData[,"MAXENT"] <- dismo::predict(object=results, x=TrainData.vars)
if (PROBIT == T) {
if(is.null(MAXENT.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ MAXENT"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- MAXENT.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n", sep = ""))
TrainData[,"MAXENT.step1"] <- TrainData[,"MAXENT"]
TrainData[,"MAXENT"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "MAXENT"] < 0), "MAXENT"] <- 0
TrainData[which(TrainData[, "MAXENT"] > 1), "MAXENT"] <- 1
}
pred1 <- TrainData[, "MAXENT"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"MAXENT"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"MAXENT"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
# thresholds["MAXENT"] <- threshold(eval1, sensitivity=threshold.sensitivity)[[threshold.method]]
thresholds["MAXENT"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["MAXENT"]))
weights["MAXENT"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["MAXENT"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n", sep = ""))
TestData[,"MAXENT"] <- dismo::predict(object=results, x=TestData.vars)
if (PROBIT == T) {
TestData[,"MAXENT.step1"] <- TestData[,"MAXENT"]
TestData[,"MAXENT"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "MAXENT"] < 0), "MAXENT"] <- 0
TestData[which(TestData[, "MAXENT"] > 1), "MAXENT"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"MAXENT"]
TestAbs <- TestData[TestData[,"pb"]==0,"MAXENT"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["MAXENT"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["MAXENT"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "MAXENT"
}else{
cat(paste("\n", "WARNING: MAXENT evaluation failed","\n\n",sep = ""))
ws["MAXENT"] <- 0
weights["MAXENT"] <- 0
TrainData[,"MAXENT"] <- 0
TestData[,"MAXENT"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$MAXENT.C <- eval1
evaluations$MAXENT.T <- eval2
}
if (models.keep==T) {
models$MAXENT <- results
models$MAXENT.PROBIT <- results2
}
}else{
cat(paste("\n", "WARNING: MAXENT calibration failed", "\n", "\n"))
ws["MAXENT"] <- weights["MAXENT"] <- 0
TrainData[,"MAXENT"] <- 0
if (no.tests == F) {TestData[,"MAXENT"] <- 0}
}
}
if(ws["MAXNET"] > 0 && length(names(TrainData.vars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "MAXNET model can therefore not be calibrated", "\n", sep = ""))
ws["MAXNET"] <- weights["MAXNET"] <- 0
}
if (ws["MAXNET"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Maximum entropy algorithm (package: maxnet)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(MAXNET.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- maxnet::maxnet(p=TrainData.pa, data=TrainData.vars, f=maxnet::maxnet.formula(p=TrainData.pa, data=TrainData.vars, classes=MAXNET.classes)),
error= function(err) {print(paste("MAXNET calibration failed"))},
silent=F)
}else{
results <- maxnet::maxnet(p=TrainData.pa, data=TrainData.vars, f=maxnet::maxnet.formula(p=TrainData.pa, data=TrainData.vars, classes=MAXNET.classes))
}
}else{
results <- MAXNET.OLD
}
if (is.null(results) == F) {
TrainData[,"MAXNET"] <- predict.maxnet2(object=results, newdata=TrainData.vars, clamp=MAXNET.clamp, type=MAXNET.type)
if (PROBIT == T) {
if(is.null(MAXNET.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ MAXNET"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- MAXNET.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n", sep = ""))
TrainData[,"MAXNET.step1"] <- TrainData[,"MAXNET"]
TrainData[,"MAXNET"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "MAXNET"] < 0), "MAXNET"] <- 0
TrainData[which(TrainData[, "MAXNET"] > 1), "MAXNET"] <- 1
}
pred1 <- TrainData[, "MAXNET"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"MAXNET"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"MAXNET"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
# thresholds["MAXNET"] <- threshold(eval1, sensitivity=threshold.sensitivity)[[threshold.method]]
thresholds["MAXNET"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["MAXNET"]))
weights["MAXNET"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["MAXNET"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n", sep = ""))
TestData[,"MAXNET"] <- predict.maxnet2(object=results, newdata=TestData.vars, clamp=MAXNET.clamp, type=MAXNET.type)
if (PROBIT == T) {
TestData[,"MAXNET.step1"] <- TestData[,"MAXNET"]
TestData[,"MAXNET"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "MAXNET"] < 0), "MAXNET"] <- 0
TestData[which(TestData[, "MAXNET"] > 1), "MAXNET"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"MAXNET"]
TestAbs <- TestData[TestData[,"pb"]==0,"MAXNET"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["MAXNET"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["MAXNET"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "MAXNET"
}else{
cat(paste("\n", "WARNING: MAXNET evaluation failed","\n\n",sep = ""))
ws["MAXNET"] <- 0
weights["MAXNET"] <- 0
TrainData[,"MAXNET"] <- 0
TestData[,"MAXNET"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$MAXNET.C <- eval1
evaluations$MAXNET.T <- eval2
}
if (models.keep==T) {
models$MAXNET <- results
models$MAXNET.PROBIT <- results2
models$formulae$MAXNET.clamp <- MAXNET.clamp
models$formulae$MAXNET.type <- MAXNET.type
}
}else{
cat(paste("\n", "WARNING: MAXNET calibration failed", "\n", "\n"))
ws["MAXNET"] <- weights["MAXNET"] <- 0
TrainData[,"MAXNET"] <- 0
if (no.tests == F) {TestData[,"MAXNET"] <- 0}
}
}
if(ws["MAXLIKE"] > 0 && length(names(MAXENT.TrainData)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "MAXLIKE model can therefore not be calibrated", "\n", sep = ""))
ws["MAXLIKE"] <- weights["MAXLIKE"] <- 0
}
if (ws["MAXLIKE"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Maxlike algorithm (package: maxlike)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
MAXLIKE.x <- MAXENT.TrainData[MAXENT.pa == 1, ]
MAXLIKE.z <- MAXENT.TrainData[MAXENT.pa == 0, ]
if(is.null(MAXLIKE.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- maxlike::maxlike(formula=MAXLIKE.formula, rasters=NULL, points=NULL, x=MAXLIKE.x, z=MAXLIKE.z,
method=MAXLIKE.method, control=list(maxit=maxit)),
error= function(err) {print(paste("MAXLIKE calibration failed"))},
silent=F)
}else{
results <- maxlike::maxlike(formula=MAXLIKE.formula, rasters=NULL, points=NULL, x=MAXLIKE.x, z=MAXLIKE.z,
method=MAXLIKE.method, control=list(maxit=maxit))
}
}else{
results <- MAXLIKE.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data of other algorithms", "\n\n", sep = ""))
TrainData[, "MAXLIKE"] <- predict(results, newdata=TrainData.numvars)
if (PROBIT == T) {
if(is.null(MAXLIKE.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ MAXLIKE"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- MAXLIKE.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n", sep = ""))
TrainData[,"MAXLIKE.step1"] <- TrainData[,"MAXLIKE"]
TrainData[,"MAXLIKE"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "MAXLIKE"] < 0), "MAXLIKE"] <- 0
TrainData[which(TrainData[, "MAXLIKE"] > 1), "MAXLIKE"] <- 1
}
pred1 <- TrainData[, "MAXLIKE"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1, "MAXLIKE"]
TrainAbs <- TrainData[TrainData[,"pb"]==0, "MAXLIKE"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
# thresholds["MAXLIKE"] <- threshold(eval1, sensitivity=threshold.sensitivity)[[threshold.method]]
thresholds["MAXLIKE"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["MAXLIKE"]))
weights["MAXLIKE"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["MAXLIKE"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n", sep = ""))
TestData[, "MAXLIKE"] <- predict(results, newdata=TestData.numvars)
if (PROBIT == T) {
TestData[,"MAXLIKE.step1"] <- TestData[,"MAXLIKE"]
TestData[,"MAXLIKE"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "MAXLIKE"] < 0), "MAXLIKE"] <- 0
TestData[which(TestData[, "MAXLIKE"] > 1), "MAXLIKE"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1, "MAXLIKE"]
TestAbs <- TestData[TestData[,"pb"]==0, "MAXLIKE"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["MAXLIKE"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["MAXLIKE"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "MAXLIKE"
}else{
cat(paste("\n", "WARNING: MAXLIKE evaluation failed","\n\n",sep = ""))
ws["MAXLIKE"] <- 0
weights["MAXLIKE"] <- 0
TrainData[,"MAXLIKE"] <- 0
TestData[,"MAXLIKE"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$MAXLIKE.C <- eval1
evaluations$MAXLIKE.T <- eval2
}
if (models.keep==T) {
models$MAXLIKE <- results
models$MAXLIKE.PROBIT <- results2
models$formulae$MAXLIKE.formula <- MAXLIKE.formula
}
}else{
cat(paste("\n", "WARNING: MAXLIKE calibration failed", "\n", "\n"))
ws["MAXLIKE"] <- weights["MAXLIKE"] <- 0
TrainData[,"MAXLIKE"] <- 0
if (no.tests == F) {TestData[,"MAXLIKE"] <- 0}
}
}
if(ws["GBM"] > 0 && length(names(TrainData.vars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "GBM model can therefore not be calibrated", "\n", sep = ""))
ws["GBM"] <- weights["GBM"] <- 0
}
if (ws["GBM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized boosted regression modeling (package: gbm) \n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(GBM.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- gbm::gbm(formula=GBM.formula, data=TrainData, weights=Yweights1, distribution="bernoulli",
interaction.depth=7, shrinkage=0.001, bag.fraction=0.5, train.fraction=1,
n.trees=GBM.n.trees, verbose=F, cv.folds=5),
error= function(err) {print(paste("GBM calibration failed"))},
silent=F)
}else{
results <- gbm::gbm(formula=GBM.formula, data=TrainData, weights=Yweights1, distribution="bernoulli",
interaction.depth=7, shrinkage=0.001, bag.fraction=0.5, train.fraction=1,
n.trees=GBM.n.trees, verbose=F, cv.folds=5)
}
}else{
results <- GBM.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"GBM"] <- gbm::predict.gbm(object=results, newdata=TrainData.vars, n.trees=GBM.n.trees, type="response")
if (PROBIT == T) {
if(is.null(GBM.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ GBM"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- GBM.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)", "\n\n", sep = ""))
TrainData[,"GBM.step1"] <- TrainData[,"GBM"]
TrainData[,"GBM"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "GBM"] < 0), "GBM"] <- 0
TrainData[which(TrainData[, "GBM"] > 1), "GBM"] <- 1
}
pred1 <- TrainData[, "GBM"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"GBM"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"GBM"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["GBM"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["GBM"]))
weights["GBM"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["GBM"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"GBM"] <- gbm::predict.gbm(object=results, newdata=TestData.vars, n.trees=GBM.n.trees, type="response")
if (PROBIT == T) {
TestData[,"GBM.step1"] <- TestData[,"GBM"]
TestData[,"GBM"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "GBM"] < 0), "GBM"] <- 0
TestData[which(TestData[, "GBM"] > 1), "GBM"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"GBM"]
TestAbs <- TestData[TestData[,"pb"]==0,"GBM"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["GBM"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["GBM"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "GBM"
}else{
cat(paste("\n", "WARNING: GBM evaluation failed","\n\n",sep = ""))
ws["GBM"] <- 0
weights["GBM"] <- 0
TrainData[,"GBM"] <- 0
TestData[,"GBM"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$GBM.trees <- results$n.trees
evaluations$GBM.C <- eval1
evaluations$GBM.T <- eval2
}
if (models.keep==T) {
models$GBM <- results
models$GBM.PROBIT <- results2
models$formulae$GBM.formula <- GBM.formula
}
}else{
cat(paste("\n", "WARNING: GBM calibration failed", "\n", "\n"))
ws["GBM"] <- weights["GBM"] <- 0
TrainData[,"GBM"] <- 0
if (no.tests == F) {TestData[,"GBM"] <- 0}
}
}
if(ws["GBMSTEP"] > 0 && length(names(TrainData.orig)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "GBMSTEP model can therefore not be calibrated", "\n", sep = ""))
ws["GBMSTEP"] <- weights["GBMSTEP"]<- 0
}
if (ws["GBMSTEP"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". gbm step algorithm (package: dismo)\n", sep=""))
# require(gbm, quietly=T)
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(GBMSTEP.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- dismo::gbm.step(data=TrainData.orig, gbm.y=1, gbm.x=GBMSTEP.gbm.x, family="bernoulli",
site.weights=Yweights1, tree.complexity=GBMSTEP.tree.complexity, learning.rate = GBMSTEP.learning.rate,
bag.fraction=GBMSTEP.bag.fraction, step.size=GBMSTEP.step.size, verbose=F, silent=F, plot.main=F),
error= function(err) {print(paste("stepwise GBM calibration failed"))},
silent=F)
}else{
results <- dismo::gbm.step(data=TrainData.orig, gbm.y=1, gbm.x=GBMSTEP.gbm.x, family="bernoulli",
site.weights=Yweights1, tree.complexity=GBMSTEP.tree.complexity, learning.rate = GBMSTEP.learning.rate,
bag.fraction=GBMSTEP.bag.fraction, step.size=GBMSTEP.step.size, verbose=F, silent=F, plot.main=F)
}
}else{
results <- GBMSTEP.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "stepwise GBM trees (target > 1000)", "\n", sep = ""))
print(results$n.trees)
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"GBMSTEP"] <- gbm::predict.gbm(object=results, newdata=TrainData.vars, n.trees=GBM.n.trees, type="response")
if (PROBIT == T) {
if(is.null(GBMSTEP.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ GBMSTEP"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- GBMSTEP.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"GBMSTEP.step1"] <- TrainData[,"GBMSTEP"]
TrainData[,"GBMSTEP"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "GBMSTEP"] < 0), "GBMSTEP"] <- 0
TrainData[which(TrainData[, "GBMSTEP"] > 1), "GBMSTEP"] <- 1
}
pred1 <- TrainData[, "GBMSTEP"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"GBMSTEP"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"GBMSTEP"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["GBMSTEP"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["GBMSTEP"]))
weights["GBMSTEP"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["GBMSTEP"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"GBMSTEP"] <- gbm::predict.gbm(object=results, newdata=TestData.vars, n.trees=GBM.n.trees, type="response")
if (PROBIT == T) {
TestData[,"GBMSTEP.step1"] <- TestData[,"GBMSTEP"]
TestData[,"GBMSTEP"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "GBMSTEP"] < 0), "GBMSTEP"] <- 0
TestData[which(TestData[, "GBMSTEP"] > 1), "GBMSTEP"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"GBMSTEP"]
TestAbs <- TestData[TestData[,"pb"]==0,"GBMSTEP"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["GBMSTEP"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["GBMSTEP"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "GBMSTEP"
}else{
cat(paste("\n", "WARNING: stepwise GBM evaluation failed","\n\n",sep = ""))
ws["GBMSTEP"] <- 0
weights["GBMSTEP"] <- 0
TrainData[,"GBMSTEP"] <- 0
TestData[,"GBMSTEP"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$GBMSTEP.trees <- results$n.trees
evaluations$GBMSTEP.C <- eval1
evaluations$GBMSTEP.T <- eval2
}
if (models.keep==T) {
models$GBMSTEP <- results
models$GBMSTEP.PROBIT <- results2
}
}else{
cat(paste("\n", "WARNING: stepwise GBM calibration failed", "\n", "\n"))
ws["GBMSTEP"] <- weights["GBMSTEP"] <- 0
TrainData[,"GBMSTEP"] <- 0
if (no.tests == F) {TestData[,"GBMSTEP"] <- 0}
}
}
if(ws["RF"] > 0 && length(names(TrainData.vars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "RF model can therefore not be calibrated", "\n", sep = ""))
ws["RF"] <- weights["RF"] <- 0
}
if (ws["RF"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Random forest algorithm (package: randomForest)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(RF.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- randomForest::randomForest(formula=RF.formula, ntree=RF.ntree, mtry=RF.mtry, data=TrainData, na.action=na.omit),
error= function(err) {print(paste("random forest calibration failed"))},
silent=F)
}else{
results <- randomForest::randomForest(formula=RF.formula, ntree=RF.ntree, mtry=RF.mtry, data=TrainData, na.action=na.omit)
}
}else{
results <- RF.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"RF"] <- predict.RF(object=results, newdata=TrainData.vars)
if (PROBIT == T) {
if(is.null(RF.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ RF"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- RF.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"RF.step1"] <- TrainData[,"RF"]
TrainData[,"RF"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "RF"] < 0), "RF"] <- 0
TrainData[which(TrainData[, "RF"] > 1), "RF"] <- 1
}
pred1 <- TrainData[, "RF"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"RF"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"RF"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["RF"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["RF"]))
weights["RF"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["RF"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"RF"] <- predict.RF(object=results, newdata=TestData.vars)
if (PROBIT == T) {
TestData[,"RF.step1"] <- TestData[,"RF"]
TestData[,"RF"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "RF"] < 0), "RF"] <- 0
TestData[which(TestData[, "RF"] > 1), "RF"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"RF"]
TestAbs <- TestData[TestData[,"pb"]==0,"RF"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["RF"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["RF"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "RF"
}else{
cat(paste("\n", "WARNING: random forest evaluation failed","\n\n",sep = ""))
ws["RF"] <- 0
weights["RF"] <- 0
TrainData[,"RF"] <- 0
TestData[,"RF"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$RF.C <- eval1
evaluations$RF.T <- eval2
}
if (models.keep==T) {
models$RF <- results
models$RF.PROBIT <- results2
models$formulae$RF.formula <- RF.formula
}
}else{
cat(paste("\n", "WARNING: random forest calibration failed", "\n", "\n"))
ws["RF"] <- weights["RF"] <- 0
TrainData[,"RF"] <- 0
if (no.tests == F) {TestData[,"RF"] <- 0}
}
}
if(ws["CF"] > 0 && length(names(TrainData.vars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "CF model can therefore not be calibrated", "\n", sep = ""))
ws["CF"] <- weights["CF"] <- 0
}
if (ws["CF"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Random forest algorithm (package: party)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(CF.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- party::cforest(formula=CF.formula, data=TrainData, weights=Yweights1, control=party::cforest_unbiased(ntree=CF.ntree, mtry=CF.mtry)),
error= function(err) {print(paste("random forest calibration failed"))},
silent=F)
}else{
results <- party::cforest(formula=CF.formula, data=TrainData, weights=Yweights1, control=party::cforest_unbiased(ntree=CF.ntree, mtry=CF.mtry))
}
}else{
results <- CF.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"CF"] <- predict.CF(object=results, newdata=TrainData.vars)
if (PROBIT == T) {
if(is.null(CF.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ CF"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- CF.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"CF.step1"] <- TrainData[,"CF"]
TrainData[,"CF"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "CF"] < 0), "CF"] <- 0
TrainData[which(TrainData[, "CF"] > 1), "CF"] <- 1
}
pred1 <- TrainData[, "CF"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"CF"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"CF"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["CF"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["CF"]))
weights["CF"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["CF"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"CF"] <- predict.CF(object=results, newdata=TestData.vars)
if (PROBIT == T) {
TestData[,"CF.step1"] <- TestData[,"CF"]
TestData[,"CF"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "CF"] < 0), "CF"] <- 0
TestData[which(TestData[, "CF"] > 1), "CF"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"CF"]
TestAbs <- TestData[TestData[,"pb"]==0,"CF"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["CF"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["CF"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "CF"
}else{
cat(paste("\n", "WARNING: random forest evaluation failed","\n\n",sep = ""))
ws["CF"] <- 0
weights["CF"] <- 0
TrainData[,"CF"] <- 0
TestData[,"CF"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$CF.C <- eval1
evaluations$CF.T <- eval2
}
if (models.keep==T) {
models$CF <- results
models$CF.PROBIT <- results2
models$formulae$CF.formula <- CF.formula
}
}else{
cat(paste("\n", "WARNING: random forest calibration failed", "\n", "\n"))
ws["CF"] <- weights["CF"] <- 0
TrainData[,"CF"] <- 0
if (no.tests == F) {TestData[,"CF"] <- 0}
}
}
if(ws["GLM"] > 0 && length(names(TrainData.vars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "GLM model can therefore not be calibrated", "\n", sep = ""))
ws["GLM"] <- weights["GLM"] <- 0
}
if (ws["GLM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized Linear Model \n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(GLM.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- glm(formula=GLM.formula, family=GLM.family, data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit)),
error= function(err) {print(paste("GLM calibration failed"))},
silent=F)
}else{
results <- glm(formula=GLM.formula, family=GLM.family, data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}
}else{
results <- GLM.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"GLM"] <- predict.glm(object=results, newdata=TrainData.vars, type="response")
if (PROBIT == T) {
if(is.null(GLM.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ GLM"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- GLM.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"GLM.step1"] <- TrainData[,"GLM"]
TrainData[,"GLM"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "GLM"] < 0), "GLM"] <- 0
TrainData[which(TrainData[, "GLM"] > 1), "GLM"] <- 1
}
pred1 <- TrainData[, "GLM"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"GLM"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"GLM"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["GLM"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["GLM"]))
weights["GLM"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["GLM"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"GLM"] <- predict.glm(object=results, newdata=TestData.vars, type="response")
if (PROBIT == T) {
TestData[,"GLM.step1"] <- TestData[,"GLM"]
TestData[,"GLM"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "GLM"] < 0), "GLM"] <- 0
TestData[which(TestData[, "GLM"] > 1), "GLM"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"GLM"]
TestAbs <- TestData[TestData[,"pb"]==0,"GLM"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["GLM"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["GLM"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "GLM"
}else{
cat(paste("\n", "WARNING: GLM evaluation failed","\n\n",sep = ""))
ws["GLM"] <- 0
weights["GLM"] <- 0
TrainData[,"GLM"] <- 0
TestData[,"GLM"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$GLM.C <- eval1
evaluations$GLM.T <- eval2
}
if (models.keep==T) {
models$GLM <- results
models$GLM.PROBIT <- results2
models$formulae$GLM.formula <- GLM.formula
}
}else{
cat(paste("\n", "WARNING: GLM calibration failed", "\n", "\n"))
ws["GLM"] <- weights["GLM"] <- 0
TrainData[,"GLM"] <- 0
if (no.tests == F) {TestData[,"GLM"] <- 0}
}
}
if(ws["GLMSTEP"] > 0 && length(names(TrainData.vars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "GLMSTEP model can therefore not be calibrated", "\n", sep = ""))
ws["GLMSTEP"] <- weights["GLMSTEP"] <-0
}
if (ws["GLMSTEP"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Stepwise Generalized Linear Model \n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(GLMSTEP.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- glm(formula=STEP.formula, family=GLM.family, data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit)),
error= function(err) {print(paste("first step of stepwise GLM calibration failed"))},
silent=F)
}else{
results <- glm(formula=STEP.formula, family=GLM.family, data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}
if (CATCH.OFF == F) {
tryCatch(results2 <- MASS::stepAIC(results, scope=GLMSTEP.scope, direction="both", trace=F, steps=GLMSTEP.steps, k=GLMSTEP.k),
error= function(err) {print(paste("stepwise GLM calibration failed"))},
silent=F)
}else{
results2 <- MASS::stepAIC(results, scope=GLMSTEP.scope, direction="both", trace=F, steps=GLMSTEP.steps, k=GLMSTEP.k)
}
}else{
results2 <- GLMSTEP.OLD
}
if (is.null(results2) == F) {
results <- results2
results2 <- NULL
cat(paste("\n", "stepwise GLM formula","\n\n",sep = ""))
print(formula(results))
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"GLMSTEP"] <- predict.glm(object=results, newdata=TrainData.vars, type="response")
if (PROBIT == T) {
if(is.null(GLMSTEP.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ GLMSTEP"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- GLMSTEP.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"GLMSTEP.step1"] <- TrainData[,"GLMSTEP"]
TrainData[,"GLMSTEP"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "GLMSTEP"] < 0), "GLMSTEP"] <- 0
TrainData[which(TrainData[, "GLMSTEP"] > 1), "GLMSTEP"] <- 1
}
pred1 <- TrainData[, "GLMSTEP"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"GLMSTEP"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"GLMSTEP"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["GLMSTEP"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["GLMSTEP"]))
weights["GLMSTEP"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["GLMSTEP"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"GLMSTEP"] <- predict.glm(object=results, newdata=TestData.vars, type="response")
if (PROBIT == T) {
TestData[,"GLMSTEP.step1"] <- TestData[,"GLMSTEP"]
TestData[,"GLMSTEP"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "GLMSTEP"] < 0), "GLMSTEP"] <- 0
TestData[which(TestData[, "GLMSTEP"] > 1), "GLMSTEP"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"GLMSTEP"]
TestAbs <- TestData[TestData[,"pb"]==0,"GLMSTEP"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["GLMSTEP"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["GLMSTEP"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "GLMSTEP"
}else{
cat(paste("\n", "WARNING: stepwise GLM evaluation failed","\n\n",sep = ""))
ws["GLMSTEP"] <- 0
weights["GLMSTEP"] <- 0
TrainData[,"GLMSTEP"] <- 0
TestData[,"GLMSTEP"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$GLMS.C <- eval1
evaluations$GLMS.T <- eval2
}
if (models.keep==T) {
models$GLMSTEP <- results
models$GLMSTEP.PROBIT <- results2
models$formulae$STEP.formula <- STEP.formula
models$formulae$GLMSTEP.scope <- GLMSTEP.scope
}
}else{
cat(paste("\n", "WARNING: stepwise GLM calibration failed", "\n", "\n"))
ws["GLMSTEP"] <- weights["GLMSTEP"] <- 0
TrainData[,"GLMSTEP"] <- 0
if (no.tests == F) {TestData[,"GLMSTEP"] <- 0}
}
}
if(ws["GAM"] > 0 && length(names(TrainData.vars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "GAM model can therefore not be calibrated", "\n", sep = ""))
ws["GAM"] <- weights["GAM"] <- 0
}
if (ws["GAM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized Additive Model (package: gam)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(GAM.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- gam::gam(formula=GAM.formula, family=GAM.family, data=TrainData, weights=Yweights1, control=gam::gam.control(maxit=maxit, bf.maxit=50)),
error= function(err) {print(paste("GAM (package: gam) calibration failed"))},
silent=F)
}else{
results <- gam::gam(formula=GAM.formula, family=GAM.family, data=TrainData, weights=Yweights1, control=gam::gam.control(maxit=maxit, bf.maxit=50))
}
}else{
results <- GAM.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"GAM"] <- gam::predict.Gam(object=results, newdata=TrainData.vars, type="response")
if (PROBIT == T) {
if(is.null(GAM.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ GAM"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- GAM.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"GAM.step1"] <- TrainData[,"GAM"]
TrainData[,"GAM"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "GAM"] < 0), "GAM"] <- 0
TrainData[which(TrainData[, "GAM"] > 1), "GAM"] <- 1
}
pred1 <- TrainData[, "GAM"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"GAM"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"GAM"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["GAM"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["GAM"]))
weights["GAM"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["GAM"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"GAM"] <- gam::predict.Gam(object=results, newdata=TestData.vars, type="response")
if (PROBIT == T) {
TestData[,"GAM.step1"] <- TestData[,"GAM"]
TestData[,"GAM"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "GAM"] < 0), "GAM"] <- 0
TestData[which(TestData[, "GAM"] > 1), "GAM"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"GAM"]
TestAbs <- TestData[TestData[,"pb"]==0,"GAM"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["GAM"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["GAM"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "GAM"
}else{
cat(paste("\n", "WARNING: GAM (package: gam) evaluation failed","\n\n",sep = ""))
ws["GAM"] <- 0
weights["GAM"] <- 0
TrainData[,"GAM"] <- 0
TestData[,"GAM"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$GAM.C <- eval1
evaluations$GAM.T <- eval2
}
if (models.keep==T) {
models$GAM <- results
models$GAM.PROBIT <- results2
models$formulae$GAM.formula <- GAM.formula
}
}else{
cat(paste("\n", "WARNING: GAM (package: gam) calibration failed", "\n", "\n"))
ws["GAM"] <- weights["GAM"] <- 0
TrainData[,"GAM"] <- 0
if (no.tests == F) {TestData[,"GAM"] <- 0}
}
}
if(ws["GAMSTEP"] > 0 && length(names(TrainData.vars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "GAMSTEP model can therefore not be calibrated", "\n", sep = ""))
ws["GAMSTEP"] <- weights["GAMSTEP"] <- 0
}
if (ws["GAMSTEP"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Stepwise Generalized Additive Model (package: gam)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(GAMSTEP.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- gam::gam(formula=STEP.formula, family=GAM.family, data=TrainData, weights=Yweights1, control=gam::gam.control(maxit=maxit, bf.maxit=50)),
error= function(err) {print(paste("first step of stepwise GAM (package: gam) calibration failed"))},
silent=F)
}else{
results <- gam::gam(formula=STEP.formula, family=GAM.family, data=TrainData, weights=Yweights1, control=gam::gam.control(maxit=maxit, bf.maxit=50))
}
assign("TrainData", TrainData, pos=GAMSTEP.pos)
assign("GAM.family", GAM.family, pos=GAMSTEP.pos)
assign("maxit", maxit, pos=GAMSTEP.pos)
if (CATCH.OFF == F) {
tryCatch(results2 <- gam::step.Gam(results, scope=GAMSTEP.scope, direction="both", trace=F, steps=GAMSTEP.steps),
error= function(err) {print(paste("stepwise GAM (package: gam) calibration failed"))},
silent=F)
}else{
results2 <- gam::step.Gam(results, scope=GAMSTEP.scope, direction="both", trace=F, steps=GAMSTEP.steps)
}
remove(TrainData, pos=GAMSTEP.pos)
remove(GAM.family, pos=GAMSTEP.pos)
remove(maxit, pos=GAMSTEP.pos)
}else{
results2 <- GAMSTEP.OLD
}
if (is.null(results2) == F) {
results <- results2
results2 <- NULL
cat(paste("\n", "stepwise GAM formula (gam package)","\n\n",sep = ""))
print(formula(results))
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"GAMSTEP"] <- gam::predict.Gam(object=results, newdata=TrainData.vars, type="response")
if (PROBIT == T) {
if(is.null(GAMSTEP.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ GAMSTEP"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- GAMSTEP.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"GAMSTEP.step1"] <- TrainData[,"GAMSTEP"]
TrainData[,"GAMSTEP"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "GAMSTEP"] < 0), "GAMSTEP"] <- 0
TrainData[which(TrainData[, "GAMSTEP"] > 1), "GAMSTEP"] <- 1
}
pred1 <- TrainData[, "GAMSTEP"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"GAMSTEP"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"GAMSTEP"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["GAMSTEP"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["GAMSTEP"]))
weights["GAMSTEP"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["GAMSTEP"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n","\n", sep = ""))
TestData[,"GAMSTEP"] <- gam::predict.Gam(object=results, newdata=TestData.vars, type="response")
if (PROBIT == T) {
TestData[,"GAMSTEP.step1"] <- TestData[,"GAMSTEP"]
TestData[,"GAMSTEP"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "GAMSTEP"] < 0), "GAMSTEP"] <- 0
TestData[which(TestData[, "GAMSTEP"] > 1), "GAMSTEP"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"GAMSTEP"]
TestAbs <- TestData[TestData[,"pb"]==0,"GAMSTEP"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["GAMSTEP"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["GAMSTEP"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "GAMSTEP"
}else{
cat(paste("\n", "WARNING: stepwise GAM (package: gam) evaluation failed","\n\n",sep = ""))
ws["GAMSTEP"] <- 0
weights["GAMSTEP"] <- 0
TrainData[,"GAMSTEP"] <- 0
TestData[,"GAMSTEP"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$GAMS.C <- eval1
evaluations$GAMS.T <- eval2
}
if (models.keep==T) {
models$GAMSTEP <- results
models$GAMSTEP.PROBIT <- results2
models$formulae$STEP.formula <- STEP.formula
models$formulae$GAMSTEP.scope <- GAMSTEP.scope
}
}else{
cat(paste("\n", "WARNING: stepwise GAM (package: gam) calibration failed", "\n", "\n"))
ws["GAMSTEP"] <- weights["GAMSTEP"] <- 0
TrainData[,"GAMSTEP"] <- 0
if (no.tests == F) {TestData[,"GAMSTEP"] <- 0}
}
}
if(ws["MGCV"] > 0 && length(names(TrainData.vars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "MGCV model can therefore not be calibrated", "\n", sep = ""))
ws["MGCV"] <- weights["MGCV"] <- 0
}
if (ws["MGCV"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized Additive Model (package: mgcv)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(MGCV.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- mgcv::gam(formula=MGCV.formula, family=GAM.family, data=TrainData, weights=Yweights1,
select=MGCV.select, control=mgcv::gam.control(maxit=maxit)),
error= function(err) {print(paste("GAM (package: mgcv) calibration failed"))},
silent=F)
}else{
results <- mgcv::gam(formula=MGCV.formula, family=GAM.family, data=TrainData, weights=Yweights1,
select=MGCV.select, control=mgcv::gam.control(maxit=maxit))
}
}else{
results <- MGCV.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"MGCV"] <- predict.MGCV(object=results, newdata=TrainData.vars)
if (PROBIT == T) {
if(is.null(MGCV.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ MGCV"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- MGCV.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"MGCV.step1"] <- TrainData[,"MGCV"]
TrainData[,"MGCV"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "MGCV"] < 0), "MGCV"] <- 0
TrainData[which(TrainData[, "MGCV"] > 1), "MGCV"] <- 1
}
pred1 <- TrainData[, "MGCV"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"MGCV"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"MGCV"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["MGCV"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["MGCV"]))
weights["MGCV"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["MGCV"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"MGCV"] <- predict.MGCV(object=results, newdata=TestData.vars)
if (PROBIT == T) {
TestData[,"MGCV.step1"] <- TestData[,"MGCV"]
TestData[,"MGCV"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "MGCV"] < 0), "MGCV"] <- 0
TestData[which(TestData[, "MGCV"] > 1), "MGCV"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"MGCV"]
TestAbs <- TestData[TestData[,"pb"]==0,"MGCV"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["MGCV"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["MGCV"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "MGCV"
}else{
cat(paste("\n", "WARNING: GAM (package: mgcv) evaluation failed","\n\n",sep = ""))
ws["MGCV"] <- 0
weights["MGCV"] <- 0
TrainData[,"MGCV"] <- 0
TestData[,"MGCV"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$MGCV.C <- eval1
evaluations$MGCV.T <- eval2
}
if (models.keep==T) {
models$MGCV <- results
models$MGCV.PROBIT <- results2
models$formulae$MGCV.formula <- MGCV.formula
}
}else{
cat(paste("\n", "WARNING: GAM (package: mgcv) calibration failed", "\n", "\n"))
ws["MGCV"] <- weights["MGCV"] <- 0
TrainData[,"MGCV"] <- 0
if (no.tests == F) {TestData[,"MGCV"] <- 0}
}
}
if(ws["MGCVFIX"] > 0 && length(names(TrainData.vars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "MGCVFIX model can therefore not be calibrated", "\n", sep = ""))
ws["MGCVFIX"] <- weights["MGCVFIX"] <- 0
}
if (ws["MGCVFIX"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". GAM with fixed d.f. regression splines (package: mgcv)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(MGCVFIX.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- mgcv::gam(formula=MGCVFIX.formula, family=GAM.family, data=TrainData, weights=Yweights1, select=FALSE, control=mgcv::gam.control(maxit=maxit)),
error= function(err) {print(paste("GAM with fixed d.f. regression splines (package: mgcv) calibration failed"))},
silent=F)
}else{
results <- mgcv::gam(formula=MGCVFIX.formula, family=GAM.family, data=TrainData, weights=Yweights1, select=FALSE, control=mgcv::gam.control(maxit=maxit))
}
}else{
results <- MGCVFIX.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"MGCVFIX"] <- predict.MGCV(object=results, newdata=TrainData.vars)
if (PROBIT == T) {
if(is.null(MGCVFIX.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ MGCVFIX"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- MGCVFIX.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"MGCVFIX.step1"] <- TrainData[,"MGCVFIX"]
TrainData[,"MGCVFIX"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "MGCVFIX"] < 0), "MGCVFIX"] <- 0
TrainData[which(TrainData[, "MGCVFIX"] > 1), "MGCVFIX"] <- 1
}
pred1 <- TrainData[, "MGCVFIX"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"MGCVFIX"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"MGCVFIX"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["MGCVFIX"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["MGCVFIX"]))
weights["MGCVFIX"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["MGCVFIX"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"MGCVFIX"] <- predict.MGCV(object=results, newdata=TestData)
if (PROBIT == T) {
TestData[,"MGCVFIX.step1"] <- TestData[,"MGCVFIX"]
TestData[,"MGCVFIX"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "MGCVFIX"] < 0), "MGCVFIX"] <- 0
TestData[which(TestData[, "MGCVFIX"] > 1), "MGCVFIX"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"MGCVFIX"]
TestAbs <- TestData[TestData[,"pb"]==0,"MGCVFIX"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["MGCVFIX"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["MGCVFIX"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "MGCVFIX"
}else{
cat(paste("\n", "WARNING: GAM with fixed d.f. regression splines (package: mgcv) evaluation failed","\n\n",sep = ""))
ws["MGCVFIX"] <- 0
weights["MGCVFIX"] <- 0
TrainData[,"MGCVFIX"] <- 0
TestData[,"MGCVFOX"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$MGCVF.C <- eval1
evaluations$MGCVF.T <- eval2
}
if (models.keep==T) {
models$MGCVFIX <- results
models$MGCVFIX.PROBIT <- results2
models$formulae$MGCVFIX.formula <- MGCVFIX.formula
}
}else{
cat(paste("\n", "WARNING: GAM with fixed d.f. regression splines (package: mgcv) calibration failed", "\n", "\n"))
ws["MGCVFIX"] <- weights["MGCVFIX"] <- 0
TrainData[,"MGCVFIX"] <- 0
if (no.tests == F) {TestData[,"MGCVFIX"] <- 0}
}
}
if(ws["EARTH"] > 0 && length(names(TrainData.numvars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "MARS (EARTH) model can therefore not be calibrated", "\n", sep = ""))
ws["EARTH"] <- weights["EARTH"] <- 0
}
if (ws["EARTH"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Multivariate Adaptive Regression Splines (package: earth)\n", sep=""))
if (is.null(factors) == F) {
cat(paste("\n", "NOTE: factors could not be used as explanatory variables for MARS (maybe consider dummy variables)", "\n", sep=""))
}
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(EARTH.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- earth::earth(formula=EARTH.formula, glm=EARTH.glm, data=TrainData, degree=2),
error= function(err) {print(paste("MARS (package: earth) calibration failed"))},
silent=F)
}else{
results <- earth::earth(formula=EARTH.formula, glm=EARTH.glm, data=TrainData, degree=2)
}
}else{
results <- EARTH.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"EARTH"] <- predict.EARTH(object=results, newdata=TrainData.vars)
if (PROBIT == T) {
if(is.null(EARTH.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ EARTH"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- EARTH.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"EARTH.step1"] <- TrainData[,"EARTH"]
TrainData[,"EARTH"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "EARTH"] < 0), "EARTH"] <- 0
TrainData[which(TrainData[, "EARTH"] > 1), "EARTH"] <- 1
}
pred1 <- TrainData[, "EARTH"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"EARTH"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"EARTH"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["EARTH"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["EARTH"]))
weights["EARTH"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["EARTH"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"EARTH"] <- predict.EARTH(object=results, newdata=TestData.vars)
if (PROBIT == T) {
TestData[,"EARTH.step1"] <- TestData[,"EARTH"]
TestData[,"EARTH"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "EARTH"] < 0), "EARTH"] <- 0
TestData[which(TestData[, "EARTH"] > 1), "EARTH"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"EARTH"]
TestAbs <- TestData[TestData[,"pb"]==0,"EARTH"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["EARTH"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["EARTH"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "EARTH"
}else{
cat(paste("\n", "WARNING: MARS (package: earth) evaluation failed","\n\n",sep = ""))
ws["EARTH"] <- 0
weights["EARTH"] <- 0
TrainData[,"EARTH"] <- 0
TestData[,"EARTH"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$EARTH.C <- eval1
evaluations$EARTH.T <- eval2
}
if (models.keep==T) {
models$EARTH <- results
models$EARTH.PROBIT <- results2
models$formulae$EARTH.formula <- EARTH.formula
}
}else{
cat(paste("\n", "WARNING: MARS (package: earth) calibration failed", "\n", "\n"))
ws["EARTH"] <- weights["EARTH"] <- 0
TrainData[,"EARTH"] <- 0
if (no.tests == F) {TestData[,"EARTH"] <- 0}
}
}
if(ws["RPART"] > 0 && length(names(TrainData.vars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "RPART model can therefore not be calibrated", "\n", sep = ""))
ws["RPART"] <- weights["rpart"] <- 0
}
if (ws["RPART"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Recursive Partitioning And Regression Trees (package: rpart)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(RPART.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- rpart::rpart(formula=RPART.formula, data=TrainData, weights=Yweights1,
control=rpart::rpart.control(xval=RPART.xval, minbucket=5, minsplit=5, cp=0.001, maxdepth=25)),
error= function(err) {print(paste("RPART calibration failed"))},
silent=F)
}else{
results <- rpart::rpart(formula=RPART.formula, data=TrainData, weights=Yweights1,
control=rpart::rpart.control(xval=RPART.xval, minbucket=5, minsplit=5, cp=0.001, maxdepth=25))
}
}else{
results <- RPART.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"RPART"] <- predict(object=results, newdata=TrainData.vars, type="prob")[,2]
if (PROBIT == T) {
if(is.null(RPART.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ RPART"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- RPART.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"RPART.step1"] <- TrainData[,"RPART"]
TrainData[,"RPART"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "RPART"] < 0), "RPART"] <- 0
TrainData[which(TrainData[, "RPART"] > 1), "RPART"] <- 1
}
pred1 <- TrainData[, "RPART"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"RPART"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"RPART"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["RPART"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["RPART"]))
weights["RPART"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["RPART"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"RPART"] <- predict(object=results, newdata=TestData.vars, type="prob")[,2]
if (PROBIT == T) {
TestData[,"RPART.step1"] <- TestData[,"RPART"]
TestData[,"RPART"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "RPART"] < 0), "RPART"] <- 0
TestData[which(TestData[, "RPART"] > 1), "RPART"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"RPART"]
TestAbs <- TestData[TestData[,"pb"]==0,"RPART"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(TestPres) == F && is.null(TestAbs) == F) {
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
print(eval2)
weights["RPART"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["RPART"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "RPART"
}else{
cat(paste("\n", "WARNING: RPART evaluation failed","\n\n",sep = ""))
ws["RPART"] <- 0
weights["RPART"] <- 0
TrainData[,"RPART"] <- 0
TestData[,"RPART"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$RPART.C <- eval1
evaluations$RPART.T <- eval2
}
if (models.keep==T) {
models$RPART <- results
models$RPART.PROBIT <- results2
models$formulae$RPART.formula <- RPART.formula
}
}else{
cat(paste("\n", "WARNING: RPART calibration failed", "\n", "\n"))
ws["RPART"] <- weights["RPART"] <- 0
TrainData[,"RPART"] <- 0
if (no.tests == F) {TestData[,"RPART"] <- 0}
}
}
if(ws["NNET"] > 0 && length(names(TrainData.vars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "NNET model can therefore not be calibrated", "\n", sep = ""))
ws["NNET"] <- weights["NNET"] <- 0
}
if (ws["NNET"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Artificial Neural Network (package: nnet)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(NNET.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- nnet::nnet(formula=NNET.formula, size=NNET.size, decay=NNET.decay, data=TrainData, weights=Yweights1,
rang=0.1, maxit=maxit, trace=F),
error= function(err) {print(paste("Artificial Neural Network (package: nnet) calibration failed"))},
silent=F)
}else{
results <- nnet::nnet(formula=NNET.formula, size=NNET.size, decay=NNET.decay, data=TrainData, weights=Yweights1,
rang=0.1, maxit=maxit, trace=F)
}
}else{
results <- NNET.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"NNET"] <- predict.NNET(object=results, newdata=TrainData.vars)
if (PROBIT == T) {
if(is.null(NNET.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ NNET"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- NNET.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"NNET.step1"] <- TrainData[,"NNET"]
TrainData[,"NNET"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "NNET"] < 0), "NNET"] <- 0
TrainData[which(TrainData[, "NNET"] > 1), "NNET"] <- 1
}
pred1 <- TrainData[, "NNET"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"NNET"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"NNET"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["NNET"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["NNET"]))
weights["NNET"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["NNET"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"NNET"] <- predict.NNET(object=results, newdata=TestData.vars)
if (PROBIT == T) {
TestData[,"NNET.step1"] <- TestData[,"NNET"]
TestData[,"NNET"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "NNET"] < 0), "NNET"] <- 0
TestData[which(TestData[, "NNET"] > 1), "NNET"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"NNET"]
TestAbs <- TestData[TestData[,"pb"]==0,"NNET"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["NNET"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["NNET"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "NNET"
}else{
cat(paste("\n", "WARNING: Artificial Neural Network (package: nnet) evaluation failed","\n\n",sep = ""))
ws["NNET"] <- 0
weights["NNET"] <- 0
TrainData[,"NNET"] <- 0
TestData[,"NNET"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$NNET.C <- eval1
evaluations$NNET.T <- eval2
}
if (models.keep==T) {
models$NNET <- results
models$NNET.PROBIT <- results2
models$formulae$NNET.formula <- NNET.formula
}
}else{
cat(paste("\n", "WARNING: Artificial Neural Network (package: nnet) calibration failed", "\n", "\n"))
ws["NNET"] <- weights["NNET"] <- 0
TrainData[,"NNET"] <- 0
if (no.tests == F) {TestData[,"NNET"] <- 0}
}
}
if(ws["FDA"] > 0 && length(names(TrainData.vars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "FDA model can therefore not be calibrated", "\n", sep = ""))
ws["FDA"] <- weights["FDA"] <- 0
}
if (ws["FDA"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Flexible Discriminant Analysis (package: mda)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(FDA.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- mda::fda(formula=FDA.formula, method=mda::mars, data=TrainData, weights=Yweights1),
error= function(err) {print(paste("Flexible Discriminant Analysis calibration failed"))},
silent=F)
}else{
results <- mda::fda(formula=FDA.formula, method=mda::mars, data=TrainData, weights=Yweights1)
}
}else{
results <- FDA.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"FDA"] <- predict(object=results, newdata=TrainData.vars, type="posterior")[,2]
if (PROBIT == T) {
if(is.null(FDA.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ FDA"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- FDA.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"FDA.step1"] <- TrainData[,"FDA"]
TrainData[,"FDA"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "FDA"] < 0), "FDA"] <- 0
TrainData[which(TrainData[, "FDA"] > 1), "FDA"] <- 1
}
pred1 <- TrainData[, "FDA"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"FDA"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"FDA"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["FDA"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["FDA"]))
weights["FDA"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["FDA"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"FDA"] <- predict(object=results, newdata=TestData.vars, type="posterior")[,2]
if (PROBIT == T) {
TestData[,"FDA.step1"] <- TestData[,"FDA"]
TestData[,"FDA"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "FDA"] < 0), "FDA"] <- 0
TestData[which(TestData[, "FDA"] > 1), "FDA"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"FDA"]
TestAbs <- TestData[TestData[,"pb"]==0,"FDA"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["FDA"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["FDA"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "FDA"
}else{
cat(paste("\n", "WARNING: Flexible Discriminant Analysis evaluation failed","\n\n",sep = ""))
ws["FDA"] <- 0
weights["FDA"] <- 0
TrainData[,"FDA"] <- 0
TestData[,"FDA"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$FDA.C <- eval1
evaluations$FDA.T <- eval2
}
if (models.keep==T) {
models$FDA <- results
models$FDA.PROBIT <- results2
models$formulae$FDA.formula <- FDA.formula
}
}else{
cat(paste("\n", "WARNING: Flexible Discriminant Analysis calibration failed", "\n", "\n"))
ws["FDA"] <- weights["FDA"] <- 0
TrainData[,"FDA"] <- 0
if (no.tests == F) {TestData[,"FDA"] <- 0}
}
}
if(ws["SVM"] > 0 && length(names(TrainData.vars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "SVM model can therefore not be calibrated", "\n", sep = ""))
ws["SVM"] <- weights["SVM"] <- 0
}
if (ws["SVM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Support Vector Machines (package: kernlab)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(SVM.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- kernlab::ksvm(SVM.formula, data=TrainData, type="C-svc", prob.model=T),
error= function(err) {print(paste("Support Vector Machines (package: kernlab) calibration failed"))},
silent=F)
}else{
results <- kernlab::ksvm(SVM.formula, data=TrainData, type="C-svc", prob.model=T)
}
}else{
results <- SVM.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"SVM"] <- kernlab::predict(object=results, newdata=TrainData.vars, type="probabilities")[,2]
if (PROBIT == T) {
if(is.null(SVM.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ SVM"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- SVM.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"SVM.step1"] <- TrainData[,"SVM"]
TrainData[,"SVM"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "SVM"] < 0), "SVM"] <- 0
TrainData[which(TrainData[, "SVM"] > 1), "SVM"] <- 1
}
pred1 <- TrainData[, "SVM"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"SVM"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"SVM"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["SVM"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["SVM"]))
weights["SVM"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["SVM"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"SVM"] <- kernlab::predict(object=results, newdata=TestData.vars, type="probabilities")[,2]
if (PROBIT == T) {
TestData[,"SVM.step1"] <- TestData[,"SVM"]
TestData[,"SVM"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "SVM"] < 0), "SVM"] <- 0
TestData[which(TestData[, "SVM"] > 1), "SVM"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"SVM"]
TestAbs <- TestData[TestData[,"pb"]==0,"SVM"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["SVM"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["SVM"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "SVM"
}else{
cat(paste("\n", "WARNING: Support Vector Machines (package: kernlab) evaluation failed","\n\n",sep = ""))
ws["SVM"] <- 0
weights["SVM"] <- 0
TrainData[,"SVM"] <- 0
TestData[,"SVM"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$SVM.C <- eval1
evaluations$SVM.T <- eval2
}
if (models.keep==T) {
models$SVM <- results
models$SVM.PROBIT <- results2
models$formulae$SVM.formula <- SVM.formula
}
}else{
cat(paste("\n", "WARNING: Support Vector Machines (package: kernlab) calibration failed", "\n", "\n"))
ws["SVM"] <- weights["SVM"] <- 0
TrainData[,"SVM"] <- 0
if (no.tests == F) {TestData[,"SVM"] <- 0}
}
}
if(ws["SVME"] > 0 && length(names(TrainData.vars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "SVME model can therefore not be calibrated", "\n", sep = ""))
ws["SVME"] <- weights["SVME"] <- 0
}
if (ws["SVME"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Support Vector Machines (package: e1071)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(SVME.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- e1071::svm(SVME.formula, data=TrainData, type="C-classification", kernel="polynomial", degree=3, probability=TRUE),
error= function(err) {print(paste("Support Vector Machines (package: e1071) calibration failed"))},
silent=F)
}else{
results <- e1071::svm(SVME.formula, data=TrainData, type="C-classification", kernel="polynomial", degree=3, probability=TRUE)
}
}else{
results <- SVME.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"SVME"] <- predict.SVME(model=results, newdata=TrainData.vars)
if (PROBIT == T) {
if(is.null(SVME.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ SVME"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- SVME.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"SVME.step1"] <- TrainData[,"SVME"]
TrainData[,"SVME"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "SVME"] < 0), "SVME"] <- 0
TrainData[which(TrainData[, "SVME"] > 1), "SVME"] <- 1
}
pred1 <- TrainData[, "SVME"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"SVME"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"SVME"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["SVME"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["SVME"]))
weights["SVME"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["SVME"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"SVME"] <- predict.SVME(model=results, newdata=TestData.vars)
if (PROBIT == T) {
TestData[,"SVME.step1"] <- TestData[,"SVME"]
TestData[,"SVME"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "SVME"] < 0), "SVME"] <- 0
TestData[which(TestData[, "SVME"] > 1), "SVME"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"SVME"]
TestAbs <- TestData[TestData[,"pb"]==0,"SVME"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["SVME"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["SVME"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "SVME"
}else{
cat(paste("\n", "WARNING: Support Vector Machines (package: e1071) evaluation failed","\n\n",sep = ""))
ws["SVME"] <- 0
weights["SVME"] <- 0
TrainData[,"SVME"] <- 0
TestData[,"SVME"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$SVME.C <- eval1
evaluations$SVME.T <- eval2
}
if (models.keep==T) {
models$SVME <- results
models$SVME.PROBIT <- results2
models$formulae$SVME.formula <- SVME.formula
}
}else{
cat(paste("\n", "WARNING: Support Vector Machines (package: e1071) calibration failed", "\n", "\n"))
ws["SVME"] <- weights["SVME"] <- 0
TrainData[,"SVME"] <- 0
if (no.tests == F) {TestData[,"SVME"] <- 0}
}
}
if(ws["GLMNET"] > 0 && length(names(TrainData.numvars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "GLMNET model can therefore not be calibrated", "\n", sep = ""))
ws["GLMNET"] <- weights["GLMNET"] <- 0
}
if (ws["GLMNET"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". GLM with lasso or elasticnet regularization (package: glmnet)\n", sep=""))
if (is.null(factors) == F) {
cat(paste("\n", "NOTE: factors could not be used as explanatory variables for GLMNET (maybe consider dummy variables)", "\n", sep=""))
}
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(GLMNET.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- glmnet::glmnet(x=as.matrix(TrainData.numvars), y=TrainData[, "pb"], family="binomial", weights=Yweights1, nlambda=GLMNET.nlambda),
error= function(err) {print(paste("GLMNET calibration failed"))},
silent=F)
}else{
results <- glmnet::glmnet(x=as.matrix(TrainData.numvars), y=TrainData[, "pb"], family="binomial", weights=Yweights1, nlambda=GLMNET.nlambda)
}
}else{
results <- GLMNET.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"GLMNET"] <- predict.GLMNET(model=results, newdata=TrainData.numvars, GLMNET.class=GLMNET.class)
if (PROBIT == T) {
if(is.null(GLMNET.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ GLMNET"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- GLMNET.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"GLMNET.step1"] <- TrainData[,"GLMNET"]
TrainData[,"GLMNET"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "GLMNET"] < 0), "GLMNET"] <- 0
TrainData[which(TrainData[, "GLMNET"] > 1), "GLMNET"] <- 1
}
pred1 <- TrainData[, "GLMNET"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"GLMNET"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"GLMNET"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["GLMNET"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["GLMNET"]))
weights["GLMNET"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["GLMNET"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"GLMNET"] <- predict.GLMNET(model=results, newdata=TestData.numvars, GLMNET.class=GLMNET.class)
if (PROBIT == T) {
TestData[,"GLMNET.step1"] <- TestData[,"GLMNET"]
TestData[,"GLMNET"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "GLMNET"] < 0), "GLMNET"] <- 0
TestData[which(TestData[, "GLMNET"] > 1), "GLMNET"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"GLMNET"]
TestAbs <- TestData[TestData[,"pb"]==0,"GLMNET"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["GLMNET"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["GLMNET"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "GLMNET"
}else{
cat(paste("\n", "WARNING: GLMNET evaluation failed","\n\n",sep = ""))
ws["GLMNET"] <- 0
weights["GLMNET"] <- 0
TrainData[,"GLMNET"] <- 0
TestData[,"GLMNET"] <- 0
}
}
if(evaluations.keep == T) {
evaluations$GLMNET.C <- eval1
evaluations$GLMNET.T <- eval2
}
if (models.keep==T) {
models$GLMNET <- results
models$GLMNET.PROBIT <- results2
if (GLMNET.class == F) {models$formulae$GLMNET.class <- FALSE}
if (GLMNET.class == T) {models$formulae$GLMNET.class <- TRUE}
}
}else{
cat(paste("\n", "WARNING: GLMNET calibration failed", "\n", "\n"))
ws["GLMNET"] <- weights["GLMNET"] <- 0
TrainData[,"GLMNET"] <- 0
if (no.tests == F) {TestData[,"GLMNET"] <- 0}
}
}
if(ws["BIOCLIM.O"] > 0 && length(names(TrainData.pres)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "BIOCLIM.O model can therefore not be calibrated", "\n", sep = ""))
ws["BIOCLIM.O"] <- weights["BIOCLIM.O"] <- 0
}
if (ws["BIOCLIM.O"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". original BIOCLIM algorithm (package: BiodiversityR)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(BIOCLIM.O.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- BiodiversityR::ensemble.bioclim.object(x=TrainData.pres, fraction=BIOCLIM.O.fraction, species.name=species.name, factors=factors),
error= function(err) {print(paste("original BIOCLIM calibration failed"))},
silent=F)
}else{
results <- BiodiversityR::ensemble.bioclim.object(x=TrainData.pres, fraction=BIOCLIM.O.fraction, species.name=species.name, factors=factors)
}
}else{
results <- BIOCLIM.O.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"BIOCLIM.O"] <- predict.BIOCLIM.O(object=results, newdata=TrainData.vars)
if (PROBIT == T) {
if(is.null(BIOCLIM.O.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ BIOCLIM.O"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- BIOCLIM.O.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"BIOCLIM.O.step1"] <- TrainData[,"BIOCLIM.O"]
TrainData[,"BIOCLIM.O"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "BIOCLIM.O"] < 0), "BIOCLIM.O"] <- 0
TrainData[which(TrainData[, "BIOCLIM.O"] > 1), "BIOCLIM.O"] <- 1
}
pred1 <- TrainData[, "BIOCLIM.O"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"BIOCLIM.O"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"BIOCLIM.O"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["BIOCLIM.O"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["BIOCLIM.O"]))
weights["BIOCLIM.O"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["BIOCLIM.O"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"BIOCLIM.O"] <- predict.BIOCLIM.O(object=results, newdata=TestData.vars)
if (PROBIT == T) {
TestData[,"BIOCLIM.O.step1"] <- TestData[,"BIOCLIM.O"]
TestData[,"BIOCLIM.O"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "BIOCLIM.O"] < 0), "BIOCLIM.O"] <- 0
TestData[which(TestData[, "BIOCLIM.O"] > 1), "BIOCLIM.O"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"BIOCLIM.O"]
TestAbs <- TestData[TestData[,"pb"]==0,"BIOCLIM.O"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["BIOCLIM.O"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["BIOCLIM.O"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "BIOCLIM.O"
}else{
cat(paste("\n", "WARNING: original BIOCLIM evaluation failed","\n\n",sep = ""))
ws["BIOCLIM.O"] <- 0
weights["BIOCLIM.O"] <- 0
TrainData[,"BIOCLIM.O"] <- 0
TestData[,"BIOCLIM.O"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$BIOCLIM.O.C <- eval1
evaluations$BIOCLIM.O.T <- eval2
}
if (models.keep==T) {
models$BIOCLIM.O <- results
models$BIOCLIM.O.PROBIT <- results2
}
}else{
cat(paste("\n", "WARNING: original BIOCLIM calibration failed", "\n", "\n"))
ws["BIOCLIM.O"] <- weights["BIOCLIM.O"] <- 0
TrainData[,"BIOCLIM.O"] <- 0
if (no.tests == F) {TestData[,"BIOCLIM.O"] <- 0}
}
}
if (ws["BIOCLIM"] > 0 || ws["DOMAIN"] > 0 || ws["MAHAL"] > 0 || ws["MAHAL01"] > 0) {
if(is.null(factors) == F) {
for (i in 1:length(factors)) {
TrainData.vars <- TrainData.vars[, which(names(TrainData.vars) != factors[i]), drop=F]
TrainData.pres <- TrainData.pres[, which(names(TrainData.pres) != factors[i]), drop=F]
if (no.tests == F) {TestData.vars <- TestData.vars[, which(names(TestData.vars) != factors[i]), drop=F]}
cat(paste("\n", "NOTE: categorical variables removed for BIOCLIM, DOMAIN, MAHAL and MAHAL01", "\n", sep=""))
}
}
}
if(ws["BIOCLIM"] > 0 && length(names(TrainData.pres)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "BIOCLIM model can therefore not be calibrated", "\n", sep = ""))
ws["BIOCLIM"] <- weights["BIOCLIM"] <- 0
}
if (ws["BIOCLIM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". BIOCLIM algorithm (package: dismo)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(BIOCLIM.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- dismo::bioclim(x=TrainData.pres),
error= function(err) {print(paste("BIOCLIM calibration failed"))},
silent=F)
}else{
results <- dismo::bioclim(x=TrainData.pres)
}
}else{
results <- BIOCLIM.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"BIOCLIM"] <- dismo::predict(object=results, x=TrainData.vars)
if (PROBIT == T) {
if(is.null(BIOCLIM.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ BIOCLIM"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- BIOCLIM.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"BIOCLIM.step1"] <- TrainData[,"BIOCLIM"]
TrainData[,"BIOCLIM"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "BIOCLIM"] < 0), "BIOCLIM"] <- 0
TrainData[which(TrainData[, "BIOCLIM"] > 1), "BIOCLIM"] <- 1
}
pred1 <- TrainData[, "BIOCLIM"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"BIOCLIM"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"BIOCLIM"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["BIOCLIM"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["BIOCLIM"]))
weights["BIOCLIM"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["BIOCLIM"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"BIOCLIM"] <- dismo::predict(object=results, x=TestData.vars)
if (PROBIT == T) {
TestData[,"BIOCLIM.step1"] <- TestData[,"BIOCLIM"]
TestData[,"BIOCLIM"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "BIOCLIM"] < 0), "BIOCLIM"] <- 0
TestData[which(TestData[, "BIOCLIM"] > 1), "BIOCLIM"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"BIOCLIM"]
TestAbs <- TestData[TestData[,"pb"]==0,"BIOCLIM"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["BIOCLIM"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["BIOCLIM"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "BIOCLIM"
}else{
cat(paste("\n", "WARNING: BIOCLIM evaluation failed","\n\n",sep = ""))
ws["BIOCLIM"] <- 0
weights["BIOCLIM"] <- 0
TrainData[,"BIOCLIM"] <- 0
TestData[,"BIOCLIM"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$BIOCLIM.C <- eval1
evaluations$BIOCLIM.T <- eval2
}
if (models.keep==T) {
models$BIOCLIM <- results
models$BIOCLIM.PROBIT <- results2
}
}else{
cat(paste("\n", "WARNING: BIOCLIM calibration failed", "\n", "\n"))
ws["BIOCLIM"] <- weights["BIOCLIM"] <- 0
TrainData[,"BIOCLIM"] <- 0
if (no.tests == F) {TestData[,"BIOCLIM"] <- 0}
}
}
if (ws["DOMAIN"] > 0) {
dummy.vars.noDOMAIN <- as.character(NULL)
if(is.null(dummy.vars) == F) {
for (i in 1:length(dummy.vars)) {
max.var <- max(TrainData.pres[, which(names(TrainData.pres) == dummy.vars[i])], na.rm=T)
min.var <- min(TrainData.pres[, which(names(TrainData.pres) == dummy.vars[i])], na.rm=T)
if (max.var == min.var) {
dummy.vars.noDOMAIN <- c(dummy.vars.noDOMAIN, dummy.vars[i])
cat(paste("\n", "WARNING: dummy variable ", dummy.vars[i], " was removed for DOMAIN because it has no variation for the training points", sep=""))
TrainData.vars <- TrainData.vars[, which(names(TrainData.vars) != dummy.vars[i]), drop=F]
TrainData.pres <- TrainData.pres[, which(names(TrainData.pres) != dummy.vars[i]), drop=F]
if (no.tests == F) {TestData.vars <- TestData.vars[, which(names(TestData.vars) != dummy.vars[i]), drop=F]}
}
}
if (length(dummy.vars.noDOMAIN) == 0) {dummy.vars.noDOMAIN <- NULL}
cat(paste("\n"))
}
}
if(ws["DOMAIN"] > 0 && length(names(TrainData.vars)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "DOMAIN model can therefore not be calibrated", "\n", sep = ""))
ws["DOMAIN"] <- weights["DOMAIN"] <- 0
}
if (ws["DOMAIN"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". DOMAIN algorithm (package: dismo)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(DOMAIN.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- dismo::domain(x=TrainData.pres),
error= function(err) {print(paste("DOMAIN calibration failed"))},
silent=F)
}else{
results <- dismo::domain(x=TrainData.pres)
}
}else{
results <- DOMAIN.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"DOMAIN"] <- dismo::predict(object=results, x=TrainData.vars)
if (PROBIT == T) {
if(is.null(DOMAIN.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ DOMAIN"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- DOMAIN.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"DOMAIN.step1"] <- TrainData[,"DOMAIN"]
TrainData[,"DOMAIN"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "DOMAIN"] < 0), "DOMAIN"] <- 0
TrainData[which(TrainData[, "DOMAIN"] > 1), "DOMAIN"] <- 1
}
pred1 <- TrainData[, "DOMAIN"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"DOMAIN"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"DOMAIN"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["DOMAIN"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["DOMAIN"]))
weights["DOMAIN"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["DOMAIN"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"DOMAIN"] <- dismo::predict(object=results, x=TestData.vars)
if (PROBIT == T) {
TestData[,"DOMAIN.step1"] <- TestData[,"DOMAIN"]
TestData[,"DOMAIN"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "DOMAIN"] < 0), "DOMAIN"] <- 0
TestData[which(TestData[, "DOMAIN"] > 1), "DOMAIN"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"DOMAIN"]
TestAbs <- TestData[TestData[,"pb"]==0,"DOMAIN"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["DOMAIN"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["DOMAIN"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "DOMAIN"
}else{
cat(paste("\n", "WARNING: DOMAIN evaluation failed","\n\n",sep = ""))
ws["DOMAIN"] <- 0
weights["DOMAIN"] <- 0
TrainData[,"DOMAIN"] <- 0
TestData[,"DOMAIN"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$DOMAIN.C <- eval1
evaluations$DOMAIN.T <- eval2
}
if (models.keep==T) {
models$DOMAIN <- results
if (length(dummy.vars.noDOMAIN) > 0) {
models$dummy.vars.noDOMAIN <- dummy.vars.noDOMAIN
evaluations$dummy.vars.noDOMAIN <- dummy.vars.noDOMAIN
}
models$DOMAIN.PROBIT <- results2
}
}else{
cat(paste("\n", "WARNING: DOMAIN calibration failed", "\n", "\n"))
ws["DOMAIN"] <- weights["DOMAIN"] <- 0
TrainData[,"DOMAIN"] <- 0
if (no.tests == F) {TestData[,"DOMAIN"] <- 0}
}
}
if (ws["MAHAL"] > 0) {
if(is.null(dummy.vars) == F) {
cat(paste("\n", "NOTE: all dummy variables were removed for Mahalanobis algorithm", "\n", sep=""))
for (i in 1:length(dummy.vars)) {
TrainData.vars <- TrainData.vars[, which(names(TrainData.vars) != dummy.vars[i]), drop=F]
TrainData.pres <- TrainData.pres[, which(names(TrainData.pres) != dummy.vars[i]), drop=F]
if (no.tests == F) {TestData.vars <- TestData.vars[, which(names(TestData.vars) != dummy.vars[i]), drop=F]}
}
}
}
if(ws["MAHAL"] > 0 && length(names(TrainData.pres)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "MAHAL model can therefore not be calibrated", "\n", sep = ""))
ws["MAHAL"] <- weights["MAHAL"] <- 0
}
if (ws["MAHAL"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Mahalanobis algorithm (package: dismo)\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(MAHAL.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- dismo::mahal(x=TrainData.pres),
error= function(err) {print(paste("Mahalanobis calibration failed"))},
silent=F)
}else{
results <- dismo::mahal(x=TrainData.pres)
}
}else{
results <- MAHAL.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"MAHAL"] <- predict.MAHAL(model=results, newdata=TrainData.vars, PROBIT=PROBIT)
if (PROBIT == T) {
if(is.null(MAHAL.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ MAHAL"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- MAHAL.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"MAHAL.step1"] <- TrainData[,"MAHAL"]
TrainData[,"MAHAL"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "MAHAL"] < 0), "MAHAL"] <- 0
TrainData[which(TrainData[, "MAHAL"] > 1), "MAHAL"] <- 1
}
pred1 <- TrainData[, "MAHAL"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1,"MAHAL"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"MAHAL"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["MAHAL"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["MAHAL"]))
weights["MAHAL"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["MAHAL"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"MAHAL"] <- predict.MAHAL(model=results, newdata=TestData.vars, PROBIT=PROBIT)
if (PROBIT == T) {
TestData[,"MAHAL.step1"] <- TestData[,"MAHAL"]
TestData[,"MAHAL"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "MAHAL"] < 0), "MAHAL"] <- 0
TestData[which(TestData[, "MAHAL"] > 1), "MAHAL"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1,"MAHAL"]
TestAbs <- TestData[TestData[,"pb"]==0,"MAHAL"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["MAHAL"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["MAHAL"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "MAHAL"
}else{
cat(paste("\n", "WARNING: Mahalanobis evaluation failed","\n\n",sep = ""))
ws["MAHAL"] <- 0
weights["MAHAL"] <- 0
TrainData[,"MAHAL"] <- 0
TestData[,"MAHAL"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$MAHAL.C <- eval1
evaluations$MAHAL.T <- eval2
}
if (models.keep==T) {
models$MAHAL <- results
models$MAHAL.PROBIT <- results2
}
}else{
cat(paste("\n", "WARNING: Mahalanobis calibration failed", "\n", "\n"))
ws["MAHAL"] <- weights["MAHAL"] <- 0
TrainData[,"MAHAL"] <- 0
if (no.tests == F) {TestData[,"MAHAL"] <- 0}
}
}
if (ws["MAHAL01"] > 0) {
if(is.null(dummy.vars) == F) {
cat(paste("\n", "NOTE: all dummy variables were removed for Mahalanobis algorithm", "\n", sep=""))
for (i in 1:length(dummy.vars)) {
TrainData.vars <- TrainData.vars[, which(names(TrainData.vars) != dummy.vars[i]), drop=F]
TrainData.pres <- TrainData.pres[, which(names(TrainData.pres) != dummy.vars[i]), drop=F]
if (no.tests == F) {TestData.vars <- TestData.vars[, which(names(TestData.vars) != dummy.vars[i]), drop=F]}
}
}
}
if(ws["MAHAL01"] > 0 && length(names(TrainData.pres)) == 0) {
cat(paste("\n", "WARNING: no explanatory variables available", sep = ""))
cat(paste("\n", "MAHAL01 model can therefore not be calibrated", "\n", sep = ""))
ws["MAHAL01"] <- weights["MAHAL01"] <- 0
}
if (ws["MAHAL01"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Mahalanobis algorithm (transformed within 0 to 1 interval)", "\n", sep=""))
eval1 <- eval2 <- results <- results2 <- TrainPres <- TrainAbs <- TestPres <- TestAbs <- NULL
if(is.null(MAHAL01.OLD) == T) {
if (CATCH.OFF == F) {
tryCatch(results <- dismo::mahal(x=TrainData.pres),
error= function(err) {print(paste("transformed Mahalanobis calibration failed"))},
silent=F)
}else{
results <- dismo::mahal(x=TrainData.pres)
}
}else{
results <- MAHAL01.OLD
}
if (is.null(results) == F) {
cat(paste("\n", "Evaluation with calibration data","\n",sep = ""))
TrainData[,"MAHAL01"] <- predict.MAHAL01(model=results, newdata=TrainData.vars, MAHAL.shape=MAHAL.shape)
if (PROBIT == T) {
if(is.null(MAHAL01.PROBIT.OLD) == T) {
probit.formula <- as.formula(paste("pb ~ MAHAL01"))
results2 <- glm(probit.formula, family=binomial(link="probit"), data=TrainData, weights=Yweights1, control=glm.control(maxit=maxit))
}else{
results2 <- MAHAL01.PROBIT.OLD
}
cat(paste("(Predictions transformed with probit link)","\n\n", sep = ""))
TrainData[,"MAHAL01.step1"] <- TrainData[,"MAHAL01"]
TrainData[,"MAHAL01"] <- predict.glm(object=results2, newdata=TrainData, type="response")
}else{
TrainData[which(TrainData[, "MAHAL01"] < 0), "MAHAL01"] <- 0
TrainData[which(TrainData[, "MAHAL01"] > 1), "MAHAL01"] <- 1
}
pred1 <- TrainData[, "MAHAL01"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n\n", sep = ""))
TrainPres <- TrainData[TrainData[,"pb"]==1, "MAHAL01"]
TrainAbs <- TrainData[TrainData[,"pb"]==0, "MAHAL01"]
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["MAHAL01"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["MAHAL01"]))
weights["MAHAL01"] <- max(c(eval1@auc, 0), na.rm=T)
AUC.calibration["MAHAL01"] <- max(c(eval1@auc, 0), na.rm=T)
if (no.tests == F) {
cat(paste("\n", "Evaluation with test data","\n\n",sep = ""))
TestData[,"MAHAL01"] <- predict.MAHAL01(model=results, newdata=TestData.vars, MAHAL.shape=MAHAL.shape)
if (PROBIT == T) {
TestData[,"MAHAL01.step1"] <- TestData[,"MAHAL01"]
TestData[,"MAHAL01"] <- predict.glm(object=results2, newdata=TestData, type="response")
}else{
TestData[which(TestData[, "MAHAL01"] < 0), "MAHAL01"] <- 0
TestData[which(TestData[, "MAHAL01"] > 1), "MAHAL01"] <- 1
}
TestPres <- TestData[TestData[,"pb"]==1, "MAHAL01"]
TestAbs <- TestData[TestData[,"pb"]==0, "MAHAL01"]
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (is.null(eval2) == F) {
print(eval2)
weights["MAHAL01"] <- max(c(eval2@auc, 0), na.rm=T)
AUC.testing["MAHAL01"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "MAHAL01"
}else{
cat(paste("\n", "WARNING: transformed Mahalanobis evaluation failed","\n\n",sep = ""))
ws["MAHAL01"] <- 0
weights["MAHAL01"] <- 0
TrainData[,"MAHAL01"] <- 0
TestData[,"MAHAL01"] <- 0
}
}
if(evaluations.keep ==T) {
evaluations$MAHAL01.C <- eval1
evaluations$MAHAL01.T <- eval2
}
if (models.keep==T) {
models$MAHAL01 <- results
models$formulae$MAHAL.shape <- MAHAL.shape
models$MAHAL01.PROBIT <- results2
}
}else{
cat(paste("\n", "WARNING: transformed Mahalanobis calibration failed", "\n", "\n"))
ws["MAHAL01"] <- weights["MAHAL01"] <- 0
TrainData[,"MAHAL01"] <- 0
if (no.tests == F) {TestData[,"MAHAL01"] <- 0}
}
}
#
models$thresholds <- thresholds
#
if(ENSEMBLE.tune == F) {
if (sum(ws, na.rm=T) > 0) {
cat(paste("\n", "Ensemble weights based directly on input weights scaled to sum up to 1", "\n", sep = ""))
print(ws)
}else{
# modified as function often used only to create data sets
# cat(paste("\n", "NOTE: no positive input weights", "\n", sep = ""))
}
if(evaluations.keep == T) {evaluations$ensemble.weights <- ws}
if(models.keep==T) {models$output.weights <- ws}
}else{
# use different strategies for calculating the ensemble model
# similar to using test data for calculating input AUC, use internal test data for calculating best ensemble
# recalculating AUC does not require much computing time - initial calculations kept to spot problems for specific algorithms
cat(paste("\n", "Weights tuned by ensemble.strategy function", sep=""))
strategy.results <- ensemble.strategy(TrainData=TrainData, TestData=TestData,
ENSEMBLE.exponent=ENSEMBLE.exponent, ENSEMBLE.best=ENSEMBLE.best, ENSEMBLE.min=ENSEMBLE.min)
ws <- strategy.results$weights
if (sum(ws, na.rm=T) > 0) {
cat(paste("\n", "Minimum input weight is ", ENSEMBLE.weight.min, "\n", sep=""))
ws2 <- ws
while(min(ws2) < ENSEMBLE.weight.min) {
ws2 <- ws2[-which.min(ws2)]
ws2 <- ensemble.weights(weights=ws2, exponent=1, best=0, min.weight=0)
}
ws[] <- 0
for (i in 1:length(ws2)) {ws[which(names(ws) == names(ws2)[i])] <- ws2[i]}
cat(paste("\n", "Weights for ensemble forecasting", "\n", sep = ""))
print(ws)
}else{
ENSEMBLE.tune <- FALSE
}
if(evaluations.keep == T) {evaluations$STRATEGY.weights <- ws}
if(models.keep==T) {models$output.weights <- ws}
}
# modified to also calculate ensemble with pre-defined weights
if(sum(ws > 0, na.rm=T) > 0) {
TrainData[,"ENSEMBLE"] <- ws["MAXENT"]*TrainData[,"MAXENT"] + ws["MAXNET"]*TrainData[,"MAXNET"] + ws["MAXLIKE"]*TrainData[,"MAXLIKE"] + ws["GBM"]*TrainData[,"GBM"] +
ws["GBMSTEP"]*TrainData[,"GBMSTEP"] + ws["RF"]*TrainData[,"RF"] + ws["CF"]*TrainData[,"CF"] + ws["GLM"]*TrainData[,"GLM"] +
ws["GLMSTEP"]*TrainData[,"GLMSTEP"] + ws["GAM"]*TrainData[,"GAM"] + ws["GAMSTEP"]*TrainData[,"GAMSTEP"] +
ws["MGCV"]*TrainData[,"MGCV"] + ws["MGCVFIX"]*TrainData[,"MGCVFIX"] + ws["EARTH"]*TrainData[,"EARTH"] +
ws["RPART"]*TrainData[,"RPART"] + ws["NNET"]*TrainData[,"NNET"] + ws["FDA"]*TrainData[,"FDA"] +
ws["SVM"]*TrainData[,"SVM"] + ws["SVME"]*TrainData[,"SVME"] + ws["GLMNET"]*TrainData[,"GLMNET"] +
ws["BIOCLIM.O"]*TrainData[,"BIOCLIM.O"] + ws["BIOCLIM"]*TrainData[,"BIOCLIM"] +
ws["DOMAIN"]*TrainData[,"DOMAIN"] + ws["MAHAL"]*TrainData[,"MAHAL"] + ws["MAHAL01"]*TrainData[,"MAHAL01"]
pred1 <- TrainData[, "ENSEMBLE"]
pred1[pred1 < 0.0000000001] <- 0.0000000001
pred1[pred1 > 0.9999999999] <- 0.9999999999
pred2 <- rep(mean(obs1), times=length(pred1))
if (no.tests == F) {
TestData[,"ENSEMBLE"] <- ws["MAXENT"]*TestData[,"MAXENT"] + ws["MAXNET"]*TestData[,"MAXNET"] + ws["MAXLIKE"]*TestData[,"MAXLIKE"] + ws["GBM"]*TestData[,"GBM"] +
ws["GBMSTEP"]*TestData[,"GBMSTEP"] + ws["RF"]*TestData[,"RF"] + ws["CF"]*TestData[,"CF"] + ws["GLM"]*TestData[,"GLM"] +
ws["GLMSTEP"]*TestData[,"GLMSTEP"] + ws["GAM"]*TestData[,"GAM"] + ws["GAMSTEP"]*TestData[,"GAMSTEP"] +
ws["MGCV"]*TestData[,"MGCV"] + ws["MGCVFIX"]*TestData[,"MGCVFIX"] + ws["EARTH"]*TestData[,"EARTH"] +
ws["RPART"]*TestData[,"RPART"] + ws["NNET"]*TestData[,"NNET"] + ws["FDA"]*TestData[,"FDA"] +
ws["SVM"]*TestData[,"SVM"] + ws["SVME"]*TestData[,"SVME"] + ws["GLMNET"]*TestData[,"GLMNET"] +
ws["BIOCLIM.O"]*TestData[,"BIOCLIM.O"] + ws["BIOCLIM"]*TestData[,"BIOCLIM"] +
ws["DOMAIN"]*TestData[,"DOMAIN"] + ws["MAHAL"]*TestData[,"MAHAL"] + ws["MAHAL01"]*TestData[,"MAHAL01"]
}
mc <- mc+1
cat(paste("\n\n", mc, ". Ensemble algorithm\n", sep=""))
eval1 <- eval2 <- NULL
cat(paste("\n", "Ensemble evaluation with calibration data", "\n\n", sep = ""))
cat(paste("Residual deviance (dismo package): ", dismo::calc.deviance(obs=obs1, pred=pred1, calc.mean=F), "\n", sep = ""))
cat(paste("Residual deviance if all predictions were ", mean(obs1), " (prevalence): ", dismo::calc.deviance(obs=obs1, pred=pred2, calc.mean=F), "\n", sep = ""))
# worst possible predictions
numpres <- sum(TrainData[, "pb"])
numabs <- nrow(TrainData) - numpres
pred1 <- rep(0.000000001, numpres)
pred2 <- rep(0.999999999, numabs)
pred3 <- c(pred1, pred2)
null.dev.cal <- dismo::calc.deviance(obs=obs1, pred=pred3, calc.mean=F)
cat(paste("Residual deviance for worst possible predictions: ", null.dev.cal, "\n\n", sep = ""))
TrainPres <- as.numeric(TrainData[TrainData[,"pb"]==1, "ENSEMBLE"])
TrainAbs <- as.numeric(TrainData[TrainData[,"pb"]==0, "ENSEMBLE"])
if (sum(TrainPres, na.rm=T) <= 0 || sum(TrainAbs, na.rm=T) <= 0) {
cat(paste("\n", "NOTE: not possible to evaluate the ensemble model since calibration probabilities not available", "\n", sep = ""))
}else{
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
print(eval1)
thresholds["ENSEMBLE"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=TrainPres, Abs=TrainAbs)
AUC.calibration["ENSEMBLE"] <- max(c(eval1@auc, 0), na.rm=T)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["ENSEMBLE"]))
if (models.keep == T) {models$thresholds <- thresholds}
if (no.tests == F) {
cat(paste("\n", "Ensemble evaluation with testing data", "\n\n", sep = ""))
TestPres <- as.numeric(TestData[TestData[,"pb"]==1,"ENSEMBLE"])
TestAbs <- as.numeric(TestData[TestData[,"pb"]==0,"ENSEMBLE"])
if (sum(TestPres, na.rm=T) <= 0 || sum(TestAbs, na.rm=T) <= 0) {
cat(paste("\n", "NOTE: not possible to evaluate the ensemble model since evaluation probabilities not available", "\n", sep = ""))
}else{
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
print(eval2)
AUC.testing["ENSEMBLE"] <- max(c(eval2@auc, 0), na.rm=T)
cat(paste("\n", "Results with ensemble.evaluate", "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval2, eval.train=eval1)
print(eval3)
eval.table <- data.frame(rbind(eval.table, t(eval3)))
rownames(eval.table)[nrow(eval.table)] <- "ENSEMBLE"
}
}
if(evaluations.keep==T) {
evaluations$ENSEMBLE.C <- eval1
evaluations$ENSEMBLE.T <- eval2
}
}
}
if(models.keep==T) {
models$TrainData <- TrainData
if (no.tests == F) {models$TestData <- TestData}
models$p <- p
models$pt <- pt
models$a <- a
models$at <- at
models$MAXENT.a <- MAXENT.a
models$var.names <- var.names
models$num.vars <- num.vars
models$factors <- factors
models$factlevels <- factlevels
models$dummy.vars <- dummy.vars
models$dummy.vars.noDOMAIN <- dummy.vars.noDOMAIN
}
if(evaluations.keep == T) {
evaluations$TrainData <- TrainData
if (no.tests == F) {evaluations$TestData <- TestData}
evaluations$p <- p
evaluations$pt <- pt
evaluations$a <- a
evaluations$at <- at
evaluations$MAXENT.a <- MAXENT.a
evaluations$var.names <- var.names
evaluations$num.vars <- num.vars
evaluations$factors <- factors
evaluations$factlevels <- factlevels
evaluations$dummy.vars <- dummy.vars
evaluations$dummy.vars.noDOMAIN <- dummy.vars.noDOMAIN
}
remove(Yweights1, envir=.BiodiversityR)
remove(TrainData, envir=.BiodiversityR)
remove(TrainData.vars, envir=.BiodiversityR)
remove(TrainData.numvars, envir=.BiodiversityR)
remove(TrainData.pres, envir=.BiodiversityR)
if (ws["GBMSTEP"] > 0) {remove(TrainData.orig, envir=.BiodiversityR)}
if (no.tests == F) {
remove(TestData, envir=.BiodiversityR)
remove(TestData.vars, envir=.BiodiversityR)
remove(TestData.numvars, envir=.BiodiversityR)
}
if (models.save==T && models.keep==T) {
ensemble.models <- models
save(ensemble.models, file=paste(getwd(), "/models/", models$species.name, "_models", sep=""), compress="xz")
}
if (models.keep == F) {models <- NULL}
result <- list(evaluations=evaluations, eval.table=eval.table, AUC.calibration=AUC.calibration, AUC.testing=AUC.testing, models=models, VIF=newVIF, call=match.call() )
cat(paste("\n\n"))
if (SINK==T && OLD.SINK==F) {sink(file=NULL, append=T)}
if(sum(ws > 0, na.rm=T) > 0){cat(paste("\n", "finished model calibrations (function ensemble.calibrate.models)", "\n\n", sep = ""))}
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.calibrate.models.R
|
`ensemble.calibrate.models.gbm` <- function(
x=NULL, p=NULL, a=NULL, an=1000, excludep=FALSE, k=4,
TrainData=NULL,
VIF=FALSE, COR=FALSE,
SINK=FALSE, PLOTS=FALSE,
species.name="Species001",
Yweights="BIOMOD",
layer.drops=NULL, factors=NULL,
GBMSTEP.gbm.x=2:(ncol(TrainData.orig)),
complexity=c(3:6), learning=c(0.005, 0.002, 0.001),
GBMSTEP.bag.fraction=0.5, GBMSTEP.step.size=100
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
# if (! require(gbm)) {stop("Please install the gbm package")}
k <- as.integer(k)
if (k < 2) {
cat(paste("\n", "NOTE: parameter k was set to be smaller than 2", sep = ""))
cat(paste("\n", "default value of 5 therefore set for parameter k", "\n", sep = ""))
k <- 5
}
# check data
if (is.null(TrainData) == T) {
if(is.null(x) == T) {stop("value for parameter x is missing (RasterStack object)")}
if(inherits(x,"RasterStack") == F) {stop("x is not a RasterStack object")}
if(raster::projection(x)=="NA") {
raster::projection(x) <- "+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
}
if(is.null(p) == T) {stop("presence locations are missing (parameter p)")}
}
# create output file
dir.create("outputs", showWarnings = F)
paste.file <- paste(getwd(), "/outputs/", species.name, "_output.txt", sep="")
OLD.SINK <- TRUE
if (sink.number(type="output") == 0) {OLD.SINK <- F}
if (SINK==T && OLD.SINK==F) {
if (file.exists(paste.file) == F) {
cat(paste("\n", "NOTE: results captured in file: ", paste.file, "\n", sep = ""))
}else{
cat(paste("\n", "NOTE: results appended in file: ", paste.file, "\n", sep = ""))
}
cat(paste("\n\n", "RESULTS (ensemble.calibrate.models.gbm function)", "\n\n", sep=""), file=paste.file, append=T)
sink(file=paste.file, append=T)
cat(paste(date(), "\n", sep=""))
print(match.call())
}
# check TrainData
if (is.null(TrainData) == F) {
TrainData <- data.frame(TrainData)
if (names(TrainData)[1] !="pb") {stop("first column for TrainData should be 'pb' containing presence (1) and absence (0) data")}
if ((is.null(x) == F) && (raster::nlayers(x) != (ncol(TrainData)-1))) {
cat(paste("\n", "WARNING: different number of explanatory variables in rasterStack and TrainData", sep = ""))
}
}
# modify RasterStack x only if this RasterStack was provided
if (is.null(x) == F) {
if (is.null(layer.drops) == F) {
vars <- names(x)
layer.drops <- as.character(layer.drops)
factors <- as.character(factors)
dummy.vars <- as.character(dummy.vars)
nd <- length(layer.drops)
for (i in 1:nd) {
if (any(vars==layer.drops[i])==FALSE) {
cat(paste("\n", "WARNING: variable to exclude '", layer.drops[i], "' not among grid layers", "\n", sep = ""))
}else{
cat(paste("\n", "NOTE: variable '", layer.drops[i], "' will not be included as explanatory variable", "\n", sep = ""))
x <- raster::dropLayer(x, which(names(x) %in% c(layer.drops[i]) ))
x <- raster::stack(x)
vars <- names(x)
if (is.null(factors) == F) {
factors <- factors[factors != layer.drops[i]]
if(length(factors) == 0) {factors <- NULL}
}
if (is.null(dummy.vars) == F) {
dummy.vars <- dummy.vars[dummy.vars != layer.drops[i]]
if(length(dummy.vars) == 0) {dummy.vars <- NULL}
}
}
}
}
if (is.null(factors) == F) {
vars <- names(x)
factors <- as.character(factors)
nf <- length(factors)
for (i in 1:nf) {
if (any(vars==factors[i])==FALSE) {
cat(paste("\n", "WARNING: categorical variable '", factors[i], "' not among grid layers", "\n", sep = ""))
factors <- factors[factors != factors[i]]
if(length(factors) == 0) {factors <- NULL}
}
}
}
if (is.null(dummy.vars) == F) {
vars <- names(x)
dummy.vars <- as.character(dummy.vars)
nf <- length(dummy.vars)
for (i in 1:nf) {
if (any(vars==dummy.vars[i])==FALSE) {
cat(paste("\n", "WARNING: dummy variable '", dummy.vars[i], "' not among grid layers", "\n", sep = ""))
dummy.vars <- dummy.vars[dummy.vars != dummy.vars[i]]
if(length(dummy.vars) == 0) {dummy.vars <- NULL}
}
}
}
# set minimum and maximum values
for (i in 1:raster::nlayers(x)) {
x[[i]] <- raster::setMinMax(x[[i]])
}
# declare factor layers
if(is.null(factors)==F) {
for (i in 1:length(factors)) {
j <- which(names(x) == factors[i])
x[[j]] <- raster::as.factor(x[[j]])
}
}
}
#
# modify TrainData if layer.drops
if (is.null(TrainData) == F) {
if (is.null(layer.drops) == F) {
vars <- names(TrainData)
layer.drops <- as.character(layer.drops)
factors <- as.character(factors)
dummy.vars <- as.character(dummy.vars)
nd <- length(layer.drops)
for (i in 1:nd) {
if (any(vars==layer.drops[i])==FALSE) {
cat(paste("\n", "WARNING: variable to exclude '", layer.drops[i], "' not among columns of TrainData", "\n", sep = ""))
}else{
cat(paste("\n", "NOTE: variable '", layer.drops[i], "' will not be included as explanatory variable", "\n", sep = ""))
TrainData <- TrainData[, which(names(TrainData) != layer.drops[i]), drop=F]
if (is.null(TestData) == F) {TestData <- TestData[, which(names(TestData) != layer.drops[i]), drop=F]}
vars <- names(TrainData)
if (is.null(factors) == F) {
factors <- factors[factors != layer.drops[i]]
if(length(factors) == 0) {factors <- NULL}
}
if (is.null(dummy.vars) == F) {
dummy.vars <- dummy.vars[dummy.vars != layer.drops[i]]
if(length(dummy.vars) == 0) {dummy.vars <- NULL}
}
}
}
}
if (is.null(factors) == F) {
vars <- names(TrainData)
factors <- as.character(factors)
nf <- length(factors)
for (i in 1:nf) {
if (any(vars==factors[i])==FALSE) {
cat(paste("\n", "WARNING: categorical variable '", factors[i], "' not among columns of TrainData", "\n", sep = ""))
factors <- factors[factors != factors[i]]
if(length(factors) == 0) {factors <- NULL}
}
}
}
if (is.null(dummy.vars) == F) {
vars <- names(TrainData)
dummy.vars <- as.character(dummy.vars)
nf <- length(dummy.vars)
for (i in 1:nf) {
if (any(vars==dummy.vars[i])==FALSE) {
cat(paste("\n", "WARNING: dummy variable '", dummy.vars[i], "' not among columns of TrainData", "\n", sep = ""))
dummy.vars <- dummy.vars[dummy.vars != dummy.vars[i]]
if(length(dummy.vars) == 0) {dummy.vars <- NULL}
}
}
}
}
# create TrainData and TestData
if (is.null(TrainData) == F) {
if(any(is.na(TrainData))) {
cat(paste("\n", "WARNING: sample units with missing data removed from calibration data","\n\n",sep = ""))
}
TrainValid <- complete.cases(TrainData)
TrainData <- TrainData[TrainValid,]
}else{
if (is.null(a)==T) {
if (excludep == T) {
a <- dismo::randomPoints(x[[1]], n=an, p=p, excludep=T)
}else{
a <- dismo::randomPoints(x[[1]], n=an, p=NULL, excludep=F)
}
}
TrainData <- dismo::prepareData(x, p, b=a, factors=factors, xy=FALSE)
if (length(names(x)) == 1) {
xdouble <- raster::stack(x, x)
TrainData <- dismo::prepareData(x=xdouble, p, b=a, factors=factors, xy=FALSE)
TrainData <- TrainData[, -3]
names(TrainData)[2] <- names(x)
}
if(any(is.na(TrainData[TrainData[,"pb"]==1,]))) {
cat(paste("\n", "WARNING: presence locations with missing data removed from calibration data","\n\n",sep = ""))
}
TrainValid <- complete.cases(TrainData[TrainData[,"pb"]==1,])
p <- p[TrainValid,]
if(any(is.na(TrainData[TrainData[,"pb"]==0,]))) {
cat(paste("\n", "WARNING: background locations with missing data removed from calibration data","\n\n",sep = ""))
}
TrainValid <- complete.cases(TrainData[TrainData[,"pb"]==0,])
a <- a[TrainValid,]
TrainData <- dismo::prepareData(x, p, b=a, factors=factors, xy=FALSE)
if (length(names(x)) == 1) {
xdouble <- raster::stack(x, x)
TrainData <- dismo::prepareData(x=xdouble, p, b=a, factors=factors, xy=FALSE)
TrainData <- TrainData[, -3]
names(TrainData)[2] <- names(x)
}
}
TrainData.orig <- TrainData
assign("TrainData.orig", TrainData.orig, envir=.BiodiversityR)
#
nc <- length(complexity)
nl <- length(learning)
nt <- nc*nl
output <- array(NA, dim=c(nt, 2*k+3))
colnames(output) <- c("tree.complexity","learning.rate", 1:k,"MEAN",1:k)
output[,"tree.complexity"] <- rep(complexity, nl)
output[,"learning.rate"] <- rep(learning, each=nc)
#
groupp <- dismo::kfold(TrainData, k=k, by=TrainData[,"pb"])
for (i in 1:k){
cat(paste("\n", "EVALUATION RUN: ", i, "\n\n", sep = ""))
TrainData.c <- TrainData[groupp != i,]
TestData.c <- TrainData[groupp == i,]
assign("TrainData.c", TrainData.c, envir=.BiodiversityR)
assign("TestData.c", TestData.c, envir=.BiodiversityR)
for (j in 1:nt) {
complex <- output[j,"tree.complexity"]
lr <- output[j, "learning.rate"]
cat(paste("\n", "complexity: ", complex, ", learning: ", lr, "\n", sep=""))
tests <- ensemble.calibrate.models(x=x,
TrainData=TrainData.c, TestData=TestData.c,
VIF=VIF, COR=COR,
PLOTS=PLOTS, evaluations.keep=T,
MAXENT=0, MAXLIKE=1, GBM=0, GBMSTEP=1, RF=0, GLM=0, GLMSTEP=0,
GAM=0, GAMSTEP=0, MGCV=0, MGCVFIX=0, EARTH=0, RPART=0,
NNET=0, FDA=0, SVM=0, SVME=0, GLMNET=0,
BIOCLIM=0, DOMAIN=0, MAHAL=0, MAHAL01=0,
Yweights=Yweights, factors=factors,
GBMSTEP.gbm.x=2:(1+raster::nlayers(x)),
GBMSTEP.bag.fraction=GBMSTEP.bag.fraction,
GBMSTEP.tree.complexity=complex, GBMSTEP.learning.rate=lr,
GBMSTEP.step.size=GBMSTEP.step.size)
output[j,2+i] <- tests$GBMSTEP.T@auc
output[j,k+3+i] <- tests$GBMSTEP.trees
}
}
output[,k+3] <- rowMeans(output[,3:(k+2)], na.rm=T)
output <- output[order(output[,"MEAN"], decreasing=T),]
output[,3:(k+3)] <- 100*output[,3:(k+3)]
print(output)
cat(paste("\n\n"))
if (SINK==T && OLD.SINK==F) {sink(file=NULL, append=T)}
return(list(table=output, call=match.call() ))
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.calibrate.models.gbm.R
|
`ensemble.calibrate.models.nnet` <- function(
x=NULL, p=NULL, a=NULL, an=1000, excludep=FALSE, k=4,
TrainData=NULL,
VIF=FALSE, COR=FALSE,
SINK=FALSE, PLOTS=FALSE,
species.name="Species001",
Yweights="BIOMOD",
layer.drops=NULL, factors=NULL,
formulae.defaults=TRUE, maxit=100,
NNET.formula=NULL,
sizes=c(2, 4, 6, 8), decays=c(0.1, 0.05, 0.01, 0.001)
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
# if (! require(nnet)) {stop("Please install the nnet package")}
k <- as.integer(k)
if (k < 2) {
cat(paste("\n", "NOTE: parameter k was set to be smaller than 2", sep = ""))
cat(paste("\n", "default value of 5 therefore set for parameter k", "\n", sep = ""))
k <- 5
}
# check data
if (is.null(TrainData) == T) {
if(is.null(x) == T) {stop("value for parameter x is missing (RasterStack object)")}
if(inherits(x,"RasterStack") == F) {stop("x is not a RasterStack object")}
if(raster::projection(x)=="NA") {
raster::projection(x) <- "+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
}
if(is.null(p) == T) {stop("presence locations are missing (parameter p)")}
}
# create output file
dir.create("outputs", showWarnings = F)
paste.file <- paste(getwd(), "/outputs/", species.name, "_output.txt", sep="")
OLD.SINK <- TRUE
if (sink.number(type="output") == 0) {OLD.SINK <- F}
if (SINK==T && OLD.SINK==F) {
if (file.exists(paste.file) == F) {
cat(paste("\n", "NOTE: results captured in file: ", paste.file, "\n", sep = ""))
}else{
cat(paste("\n", "NOTE: results appended in file: ", paste.file, "\n", sep = ""))
}
cat(paste("\n\n", "RESULTS (ensemble.calibrate.models.nnet function)", "\n\n", sep=""), file=paste.file, append=T)
sink(file=paste.file, append=T)
cat(paste(date(), "\n", sep=""))
print(match.call())
}
# check TrainData
if (is.null(TrainData) == F) {
TrainData <- data.frame(TrainData)
if (names(TrainData)[1] !="pb") {stop("first column for TrainData should be 'pb' containing presence (1) and absence (0) data")}
if ((is.null(x) == F) && (raster::nlayers(x) != (ncol(TrainData)-1))) {
cat(paste("\n", "WARNING: different number of explanatory variables in rasterStack and TrainData", sep = ""))
}
}
# modify RasterStack x only if this RasterStack was provided
if (is.null(x) == F) {
if (is.null(layer.drops) == F) {
vars <- names(x)
layer.drops <- as.character(layer.drops)
factors <- as.character(factors)
dummy.vars <- as.character(dummy.vars)
nd <- length(layer.drops)
for (i in 1:nd) {
if (any(vars==layer.drops[i])==FALSE) {
cat(paste("\n", "WARNING: variable to exclude '", layer.drops[i], "' not among grid layers", "\n", sep = ""))
}else{
cat(paste("\n", "NOTE: variable '", layer.drops[i], "' will not be included as explanatory variable", "\n", sep = ""))
x <- raster::dropLayer(x, which(names(x) %in% c(layer.drops[i]) ))
x <- raster::stack(x)
vars <- names(x)
if (is.null(factors) == F) {
factors <- factors[factors != layer.drops[i]]
if(length(factors) == 0) {factors <- NULL}
}
if (is.null(dummy.vars) == F) {
dummy.vars <- dummy.vars[dummy.vars != layer.drops[i]]
if(length(dummy.vars) == 0) {dummy.vars <- NULL}
}
}
}
}
if (is.null(factors) == F) {
vars <- names(x)
factors <- as.character(factors)
nf <- length(factors)
for (i in 1:nf) {
if (any(vars==factors[i])==FALSE) {
cat(paste("\n", "WARNING: categorical variable '", factors[i], "' not among grid layers", "\n", sep = ""))
factors <- factors[factors != factors[i]]
if(length(factors) == 0) {factors <- NULL}
}
}
}
if (is.null(dummy.vars) == F) {
vars <- names(x)
dummy.vars <- as.character(dummy.vars)
nf <- length(dummy.vars)
for (i in 1:nf) {
if (any(vars==dummy.vars[i])==FALSE) {
cat(paste("\n", "WARNING: dummy variable '", dummy.vars[i], "' not among grid layers", "\n", sep = ""))
dummy.vars <- dummy.vars[dummy.vars != dummy.vars[i]]
if(length(dummy.vars) == 0) {dummy.vars <- NULL}
}
}
}
# set minimum and maximum values
for (i in 1:raster::nlayers(x)) {
x[[i]] <- raster::setMinMax(x[[i]])
}
# declare factor layers
if(is.null(factors)==F) {
for (i in 1:length(factors)) {
j <- which(names(x) == factors[i])
x[[j]] <- raster::as.factor(x[[j]])
}
}
}
#
# modify TrainData if layer.drops
if (is.null(TrainData) == F) {
if (is.null(layer.drops) == F) {
vars <- names(TrainData)
layer.drops <- as.character(layer.drops)
factors <- as.character(factors)
dummy.vars <- as.character(dummy.vars)
nd <- length(layer.drops)
for (i in 1:nd) {
if (any(vars==layer.drops[i])==FALSE) {
cat(paste("\n", "WARNING: variable to exclude '", layer.drops[i], "' not among columns of TrainData", "\n", sep = ""))
}else{
cat(paste("\n", "NOTE: variable '", layer.drops[i], "' will not be included as explanatory variable", "\n", sep = ""))
TrainData <- TrainData[, which(names(TrainData) != layer.drops[i]), drop=F]
if (is.null(TestData) == F) {TestData <- TestData[, which(names(TestData) != layer.drops[i]), drop=F]}
vars <- names(TrainData)
if (is.null(factors) == F) {
factors <- factors[factors != layer.drops[i]]
if(length(factors) == 0) {factors <- NULL}
}
if (is.null(dummy.vars) == F) {
dummy.vars <- dummy.vars[dummy.vars != layer.drops[i]]
if(length(dummy.vars) == 0) {dummy.vars <- NULL}
}
}
}
}
if (is.null(factors) == F) {
vars <- names(TrainData)
factors <- as.character(factors)
nf <- length(factors)
for (i in 1:nf) {
if (any(vars==factors[i])==FALSE) {
cat(paste("\n", "WARNING: categorical variable '", factors[i], "' not among columns of TrainData", "\n", sep = ""))
factors <- factors[factors != factors[i]]
if(length(factors) == 0) {factors <- NULL}
}
}
}
if (is.null(dummy.vars) == F) {
vars <- names(TrainData)
dummy.vars <- as.character(dummy.vars)
nf <- length(dummy.vars)
for (i in 1:nf) {
if (any(vars==dummy.vars[i])==FALSE) {
cat(paste("\n", "WARNING: dummy variable '", dummy.vars[i], "' not among columns of TrainData", "\n", sep = ""))
dummy.vars <- dummy.vars[dummy.vars != dummy.vars[i]]
if(length(dummy.vars) == 0) {dummy.vars <- NULL}
}
}
}
}
#
if (formulae.defaults == T) {
if(is.null(TrainData) == T) {
formulae <- ensemble.formulae(x, factors=factors)
}else{
formulae <- ensemble.formulae(x, factors=factors)
}
}
if (is.null(NNET.formula) == T && formulae.defaults == T) {NNET.formula <- formulae$NNET.formula}
if (is.null(NNET.formula) == T) {stop("Please provide the NNET.formula (hint: use ensemble.formulae function)")}
environment(NNET.formula) <- .BiodiversityR
# create TrainData and TestData
if (is.null(TrainData) == F) {
if(any(is.na(TrainData))) {
cat(paste("\n", "WARNING: sample units with missing data removed from calibration data","\n\n",sep = ""))
}
TrainValid <- complete.cases(TrainData)
TrainData <- TrainData[TrainValid,]
}else{
if (is.null(a)==T) {
if (excludep == T) {
a <- dismo::randomPoints(x[[1]], n=an, p=p, excludep=T)
}else{
a <- dismo::randomPoints(x[[1]], n=an, p=NULL, excludep=F)
}
}
TrainData <- dismo::prepareData(x, p, b=a, factors=factors, xy=FALSE)
if (length(names(x)) == 1) {
xdouble <- raster::stack(x, x)
TrainData <- dismo::prepareData(x=xdouble, p, b=a, factors=factors, xy=FALSE)
TrainData <- TrainData[, -3]
names(TrainData)[2] <- names(x)
}
if(any(is.na(TrainData[TrainData[,"pb"]==1,]))) {
cat(paste("\n", "WARNING: presence locations with missing data removed from calibration data","\n\n",sep = ""))
}
TrainValid <- complete.cases(TrainData[TrainData[,"pb"]==1,])
p <- p[TrainValid,]
if(any(is.na(TrainData[TrainData[,"pb"]==0,]))) {
cat(paste("\n", "WARNING: background locations with missing data removed from calibration data","\n\n",sep = ""))
}
TrainValid <- complete.cases(TrainData[TrainData[,"pb"]==0,])
a <- a[TrainValid,]
TrainData <- dismo::prepareData(x, p, b=a, factors=factors, xy=FALSE)
if (length(names(x)) == 1) {
xdouble <- raster::stack(x, x)
TrainData <- dismo::prepareData(x=xdouble, p, b=a, factors=factors, xy=FALSE)
TrainData <- TrainData[, -3]
names(TrainData)[2] <- names(x)
}
}
TrainData.orig <- TrainData
assign("TrainData.orig", TrainData.orig, envir=.BiodiversityR)
#
ns <- length(sizes)
nd <- length(decays)
nt <- ns*nd
output <- array(NA, dim=c(nt, k+3))
colnames(output) <- c("size","decay", 1:k,"MEAN")
output[,"size"] <- rep(sizes, nd)
output[,"decay"] <- rep(decays, each=ns)
#
groupp <- dismo::kfold(TrainData, k=k, by=TrainData[,"pb"])
for (i in 1:k){
cat(paste("\n", "EVALUATION RUN: ", i, "\n", "\n", sep = ""))
TrainData.c <- TrainData[groupp != i,]
TestData.c <- TrainData[groupp == i,]
assign("TrainData.c", TrainData.c, envir=.BiodiversityR)
assign("TestData.c", TestData.c, envir=.BiodiversityR)
for (j in 1:nt) {
NNET.size <- output[j,"size"]
NNET.decay <- output[j, "decay"]
cat(paste("\n", "size: ", NNET.size, ", decay: ", NNET.decay, "\n", sep=""))
tests <- ensemble.calibrate.models(x=x,
TrainData=TrainData.c, TestData=TestData.c,
VIF=VIF, COR=COR,
PLOTS=PLOTS, evaluations.keep=T,
MAXENT=0, MAXLIKE=0, GBM=0, GBMSTEP=0, RF=0, GLM=0, GLMSTEP=0,
GAM=0, GAMSTEP=0, MGCV=0, MGCVFIX=0, EARTH=0, RPART=0,
NNET=1, FDA=0, SVM=0, SVME=0, GLMNET=0,
BIOCLIM=0, DOMAIN=0, MAHAL=0, MAHAL01=0,
maxit=maxit,
Yweights=Yweights, factors=factors,
NNET.formula=NNET.formula, NNET.size=NNET.size, NNET.decay=NNET.decay)
output[j,2+i] <- tests$NNET.T@auc
}
}
output[,k+3] <- rowMeans(output[,3:(k+2)], na.rm=T)
output <- output[order(output[,"MEAN"], decreasing=T),]
output[,3:(k+3)] <- 100*output[,3:(k+3)]
print(output)
cat(paste("\n\n"))
if (SINK==T && OLD.SINK==F) {sink(file=NULL, append=T)}
return(list(table=output, call=match.call() ))
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.calibrate.models.nnet.R
|
`ensemble.calibrate.weights` <- function(
x=NULL, p=NULL, TrainTestData=NULL,
a=NULL, an=1000,
get.block=FALSE, block.default=TRUE, get.subblocks=FALSE,
SSB.reduce=FALSE, CIRCLES.d=100000,
excludep=FALSE, target.groups=FALSE, k=4,
VIF=FALSE, COR=FALSE,
SINK=FALSE, PLOTS=FALSE, CATCH.OFF=FALSE,
data.keep=FALSE,
species.name = "Species001",
threshold.method="spec_sens", threshold.sensitivity=0.9, threshold.PresenceAbsence=FALSE,
ENSEMBLE.tune=FALSE,
ENSEMBLE.best=0, ENSEMBLE.min=0.7, ENSEMBLE.exponent=1, ENSEMBLE.weight.min=0.05,
input.weights=NULL,
MAXENT=1, MAXNET=1, MAXLIKE=1, GBM=1, GBMSTEP=1, RF=1, CF=1,
GLM=1, GLMSTEP=1, GAM=1, GAMSTEP=1, MGCV=1, MGCVFIX=0,
EARTH=1, RPART=1, NNET=1, FDA=1, SVM=1, SVME=1, GLMNET=1,
BIOCLIM.O=0, BIOCLIM=1, DOMAIN=1, MAHAL=1, MAHAL01=1,
PROBIT=FALSE,
Yweights="BIOMOD",
layer.drops=NULL, factors=NULL, dummy.vars=NULL,
formulae.defaults=TRUE, maxit=100,
MAXENT.a=NULL, MAXENT.an=10000,
MAXENT.path=paste(getwd(), "/models/maxent_", species.name, sep=""),
MAXNET.classes="default", MAXNET.clamp=FALSE, MAXNET.type="cloglog",
MAXLIKE.formula=NULL, MAXLIKE.method="BFGS",
GBM.formula=NULL, GBM.n.trees=2001,
GBMSTEP.gbm.x=2:(length(var.names)+1), GBMSTEP.tree.complexity=5, GBMSTEP.learning.rate=0.005,
GBMSTEP.bag.fraction=0.5, GBMSTEP.step.size=100,
RF.formula=NULL, RF.ntree=751, RF.mtry=floor(sqrt(length(var.names))),
CF.formula=NULL, CF.ntree=751, CF.mtry=floor(sqrt(length(var.names))),
GLM.formula=NULL, GLM.family=binomial(link="logit"),
GLMSTEP.steps=1000, STEP.formula=NULL, GLMSTEP.scope=NULL, GLMSTEP.k=2,
GAM.formula=NULL, GAM.family=binomial(link="logit"),
GAMSTEP.steps=1000, GAMSTEP.scope=NULL, GAMSTEP.pos=1,
MGCV.formula=NULL, MGCV.select=FALSE,
MGCVFIX.formula=NULL,
EARTH.formula=NULL, EARTH.glm=list(family=binomial(link="logit"), maxit=maxit),
RPART.formula=NULL, RPART.xval=50,
NNET.formula=NULL, NNET.size=8, NNET.decay=0.01,
FDA.formula=NULL,
SVM.formula=NULL,
SVME.formula=NULL,
GLMNET.nlambda=100, GLMNET.class=FALSE,
BIOCLIM.O.fraction=0.9,
MAHAL.shape=1
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if (is.list(k) == F) {
k <- as.integer(k)
k.listed <- FALSE
if (k < 2) {
cat(paste("\n", "NOTE: parameter k was set to be smaller than 2", sep = ""))
cat(paste("\n", "default value of 4 therefore set for parameter k", "\n", sep = ""))
k <- 4
}
}else{
k.listed <- TRUE
k.list <- k
k <- max(k.list$groupa)
}
x.was.terra <- FALSE
# new option using data.frame
if (is.null(TrainTestData == TRUE)) {
if(inherits(x, "RasterStack") == FALSE && inherits(x, "SpatRaster") == FALSE) {stop("x is not a RasterStack object")}
# use the raster format for preparing and checking the data
if(inherits(x, "SpatRaster")) {
x <- raster::stack(x)
x.was.terra <- TRUE
}
if(is.null(p) == T) {stop("presence locations are missing (parameter p)")}
if(is.null(p) == F) {
p <- data.frame(p)
names(p) <- c("x", "y")
}
if(is.null(a) == F) {
a <- data.frame(a)
names(a) <- c("x", "y")
}
if(is.null(MAXENT.a) == F) {
MAXENT.a <- data.frame(MAXENT.a)
names(MAXENT.a) <- c("x", "y")
}
if (is.null(layer.drops) == F) {
layer.drops <- as.character(layer.drops)
if (inherits(x, "RasterStack")) {
x <- raster::dropLayer(x, which(names(x) %in% layer.drops))
x <- raster::stack(x)
}
factors <- as.character(factors)
dummy.vars <- as.character(dummy.vars)
nd <- length(layer.drops)
for (i in 1:nd) {
if (is.null(factors) == F) {
factors <- factors[factors != layer.drops[i]]
if(length(factors) == 0) {factors <- NULL}
}
if (is.null(dummy.vars) == F) {
dummy.vars <- dummy.vars[dummy.vars != layer.drops[i]]
if(length(dummy.vars) == 0) {dummy.vars <- NULL}
}
}
if(length(layer.drops) == 0) {layer.drops <- NULL}
}
} # no data.frame
if (is.null(TrainTestData) == F) {
TrainTestData <- data.frame(TrainTestData)
if (names(TrainTestData)[1] !="pb") {stop("first column for TrainTestData should be 'pb' containing presence (1) and absence (0) data")}
if (inherits(x, "RasterStack")) {
if (raster::nlayers(x) != (ncol(TrainTestData)-1)) {
cat(paste("\n", "WARNING: different number of explanatory variables in rasterStack and TrainTestData", sep = ""))
}
}
}
#
output.rownames <- c("MAXENT", "MAXNET", "MAXLIKE", "GBM", "GBMSTEP", "RF", "CF",
"GLM", "GLMSTEP", "GAM", "GAMSTEP", "MGCV", "MGCVFIX",
"EARTH", "RPART", "NNET", "FDA", "SVM", "SVME", "GLMNET",
"BIOCLIM.O", "BIOCLIM", "DOMAIN", "MAHAL", "MAHAL01", "ENSEMBLE")
#
# if(length(ENSEMBLE.exponent) > 1 || length(ENSEMBLE.best) > 1 || length(ENSEMBLE.min) > 1) {ENSEMBLE.tune <- TRUE}
if(ENSEMBLE.tune == F) {
output <- array(0, dim=c(length(output.rownames), k+1))
rownames(output) <- output.rownames
colnames(output) <- c(paste("T_", c(1:k), sep=""),"MEAN")
}else{
output <- array(0, dim=c(length(output.rownames), 2*k+2))
rownames(output) <- output.rownames
colnames(output) <- c(paste("T_", c(1:k), sep=""), "MEAN.T", paste("S_", c(1:k), sep=""), "MEAN")
}
# keep data for final output.weights checks with suggested weights
TrainData.all <- vector("list", k)
TestData.all <- vector("list", k)
# create output file
dir.create("outputs", showWarnings = F)
paste.file <- paste(getwd(), "/outputs/", species.name, "_output.txt", sep="")
OLD.SINK <- TRUE
if (sink.number(type="output") == 0) {OLD.SINK <- F}
if (SINK==T && OLD.SINK==F) {
if (file.exists(paste.file) == F) {
cat(paste("\n", "NOTE: results captured in file: ", paste.file, "\n", sep = ""))
}else{
cat(paste("\n", "NOTE: results appended in file: ", paste.file, "\n", sep = ""))
}
cat(paste("\n\n", "RESULTS (ensemble.calibrate.weights function)", "\n\n", sep=""), file=paste.file, append=T)
sink(file=paste.file, append=T)
cat(paste(date(), "\n", sep=""))
print(match.call())
}
#
# run ensemble.calibrate.models first to obtain MAXENT.a and var.names
# also obtain p and a, possibly via target group sampling
# only do this when no data.frame
if (is.null(TrainTestData) == TRUE) {
tests <- ensemble.calibrate.models(x=x,
p=p, a=a, an=an, pt=NULL, at=NULL, excludep=excludep, target.groups=target.groups,
k=0, TrainData=NULL,
VIF=F, COR=F,
PLOTS=PLOTS, evaluations.keep=T, models.keep=F,
ENSEMBLE.tune=F,
ENSEMBLE.exponent=1, ENSEMBLE.best=1, ENSEMBLE.min=0.7,
MAXENT=0, MAXNET=0, MAXLIKE=0, GBM=0, GBMSTEP=0, RF=0, CF=0, GLM=0, GLMSTEP=0,
GAM=0, GAMSTEP=0, MGCV=0, MGCVFIX=0, EARTH=0, RPART=0,
NNET=0, FDA=0, SVM=0, SVME=0, GLMNET=0,
BIOCLIM.O=0, BIOCLIM=0, DOMAIN=0, MAHAL=0, MAHAL01=0,
MAXENT.a=MAXENT.a, MAXENT.an=MAXENT.an,
factors=factors)
var.names <- tests$evaluations$var.names
MAXENT.a <- tests$evaluations$MAXENT.a
factors2 <- NULL
if (is.null(factors) == F) {
factors2 <- factors[which(factors %in% var.names)]
if (length(factors2) == 0) {factors2 <- NULL}
}
dummy.vars2 <- NULL
if (is.null(dummy.vars) == F) {
dummy.vars2 <- dummy.vars[which(dummy.vars %in% var.names)]
if (length(dummy.vars2) == 0) {dummy.vars2 <- NULL}
}
p.all <- tests$evaluations$p
a.all <- tests$evaluations$a
# new option of k.list surpasses other options of assigning observations to k-folds
if (k.listed == F) {
if (get.subblocks == T) {get.block <- T}
if (get.block == F) {
groupp <- dismo::kfold(p.all, k=k)
groupa <- dismo::kfold(a.all, k=k)
}else{
p2.all <- p.all
a2.all <- a.all
if (block.default == F) {
p2.all[, 1] <- p.all[, 2]
p2.all[, 2] <- p.all[, 1]
a2.all[, 1] <- a.all[, 2]
a2.all[, 2] <- a.all[, 1]
cat(paste("\n", "non-default ENMeval::get.block with first split along longitude", "\n\n", sep = ""))
}else{
cat(paste("\n", "default ENMeval::get.block with first split along latitude", "\n\n", sep = ""))
}
blocks <- ENMeval::get.block(occ=p2.all, bg.coords=a2.all)
groupp <- blocks$occ.grp
groupa <- blocks$bg.grp
k <- 4
# 'subblocking' whereby get.block is applied to each previously determined block
if (get.subblocks == T) {
occ.old <- groupp
backg.old <- groupa
for (i in 1:4) {
occ.i <- p2.all[occ.old == i, ]
backg.i <- a2.all[backg.old == i, ]
block2 <- ENMeval::get.block(occ=occ.i, bg.coords=backg.i)
occ.new <- block2$occ.grp
backg.new <- block2$bg.grp
groupp[occ.old == i] <- occ.new
groupa[backg.old == i] <- backg.new
}
}
}
}else{
groupp <- k.list$groupp
groupa <- k.list$groupa
if (length(groupp) != nrow(p.all)) {cat(paste("WARNING: groupp length (", length(groupp), ") different from number of presence observations (", nrow(p.all), ")", "\n", sep = ""))}
if (length(groupa) != nrow(a.all)) {cat(paste("WARNING: groupa length (", length(groupa), ") different from number of background observations (", nrow(a.all), ")", "\n", sep = ""))}
}
} # no data.frame
if (is.null(TrainTestData) == FALSE) {
TrainTest.p <- TrainTestData[TrainTestData[, "pb"] == 1, ]
TrainTest.a <- TrainTestData[TrainTestData[, "pb"] == 0, ]
var.names <- names(TrainTestData)
var.names <- var.names[which(var.names != "pb")]
p.all <- NULL
a.all <- NULL
factors2 <- NULL
if (is.null(factors) == F) {
factors2 <- factors[which(factors %in% var.names)]
if (length(factors2) == 0) {factors2 <- NULL}
}
dummy.vars2 <- NULL
if (is.null(dummy.vars) == F) {
dummy.vars2 <- dummy.vars[which(dummy.vars %in% var.names)]
if (length(dummy.vars2) == 0) {dummy.vars2 <- NULL}
}
if (k.listed == F) {
groupp <- dismo::kfold(TrainTest.p, k=k)
groupa <- dismo::kfold(TrainTest.a, k=k)
}else{
groupp <- k.list$groupp
groupa <- k.list$groupa
if (length(groupp) != nrow(TrainTest.p)) {cat(paste("WARNING: groupp length (", length(groupp), ") different from number of presence observations (", nrow(TrainTest.p), ")", "\n", sep = ""))}
if (length(groupa) != nrow(TrainTest.a)) {cat(paste("WARNING: groupa length (", length(groupa), ") different from number of background observations (", nrow(TrainTest.a), ")", "\n", sep = ""))}
}
} # data.frame
# Start cross-validations
for (i in 1:k){
cat(paste(species.name, " ", k, "-FOLD CROSS-VALIDATION RUN: ", i, "\n", sep = ""))
if (is.null(TrainTestData) == TRUE) {
p1 <- p.all[groupp != i,]
p2 <- p.all[groupp == i,]
a1 <- a.all[groupa != i,]
a2 <- a.all[groupa == i,]
tests <- ensemble.calibrate.models(x=x,
TrainData=NULL, TestData=NULL,
p=p1, a=a1, pt=p2, at=a2, SSB.reduce=SSB.reduce, CIRCLES.d=CIRCLES.d,
VIF=VIF, COR=COR,
threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence,
PLOTS=PLOTS, CATCH.OFF=CATCH.OFF,
evaluations.keep=T, models.keep=F,
ENSEMBLE.tune=ENSEMBLE.tune,
ENSEMBLE.best=ENSEMBLE.best, ENSEMBLE.min=ENSEMBLE.min, ENSEMBLE.exponent=ENSEMBLE.exponent, ENSEMBLE.weight.min=ENSEMBLE.weight.min,
MAXENT=MAXENT, MAXNET=MAXNET, MAXLIKE=MAXLIKE, GBM=GBM, GBMSTEP=GBMSTEP, RF=RF, CF=CF,
GLM=GLM, GLMSTEP=GLMSTEP, GAM=GAM, GAMSTEP=GAMSTEP, MGCV=MGCV, MGCVFIX=MGCVFIX,
EARTH=EARTH, RPART=RPART, NNET=NNET, FDA=FDA, SVM=SVM, SVME=SVME, GLMNET=GLMNET,
BIOCLIM.O=BIOCLIM.O, BIOCLIM=BIOCLIM, DOMAIN=DOMAIN, MAHAL=MAHAL, MAHAL01=MAHAL01,
PROBIT=PROBIT,
Yweights=Yweights,
factors=factors2, dummy.vars=dummy.vars2,
maxit=maxit,
MAXENT.a=MAXENT.a, MAXENT.path=MAXENT.path,
MAXNET.clamp=MAXNET.clamp, MAXNET.type=MAXNET.type,
MAXLIKE.formula=MAXLIKE.formula, MAXLIKE.method=MAXLIKE.method,
GBM.formula=GBM.formula, GBM.n.trees=GBM.n.trees,
GBMSTEP.gbm.x=GBMSTEP.gbm.x, GBMSTEP.tree.complexity=GBMSTEP.tree.complexity,
GBMSTEP.learning.rate=GBMSTEP.learning.rate, GBMSTEP.bag.fraction=GBMSTEP.bag.fraction,
GBMSTEP.step.size=GBMSTEP.step.size,
RF.formula=RF.formula, RF.ntree=RF.ntree, RF.mtry=RF.mtry,
CF.formula=CF.formula, CF.ntree=CF.ntree, CF.mtry=CF.mtry,
GLM.formula=GLM.formula, GLM.family=GLM.family,
GLMSTEP.k=GLMSTEP.k, GLMSTEP.steps=GLMSTEP.steps, STEP.formula=STEP.formula,
GLMSTEP.scope=GLMSTEP.scope,
GAM.formula=GAM.formula, GAM.family=GAM.family,
GAMSTEP.steps=GAMSTEP.steps, GAMSTEP.scope=GAMSTEP.scope, GAMSTEP.pos=GAMSTEP.pos,
MGCV.formula=MGCV.formula, MGCV.select=MGCV.select,
MGCVFIX.formula=MGCVFIX.formula,
EARTH.formula=EARTH.formula, EARTH.glm=EARTH.glm,
RPART.formula=RPART.formula, RPART.xval=RPART.xval,
NNET.formula=NNET.formula, NNET.size=NNET.size, NNET.decay=NNET.decay,
FDA.formula=FDA.formula,
SVM.formula=SVM.formula,
SVME.formula=SVME.formula,
GLMNET.nlambda=GLMNET.nlambda, GLMNET.class=GLMNET.class,
BIOCLIM.O.fraction=BIOCLIM.O.fraction,
MAHAL.shape=MAHAL.shape)
} # no data.frame
if (is.null(TrainTestData) == FALSE) {
TrainTest.p1 <- TrainTest.p[groupp != i,]
TrainTest.p2 <- TrainTest.p[groupp == i,]
TrainTest.a1 <- TrainTest.a[groupa != i,]
TrainTest.a2 <- TrainTest.a[groupa == i,]
TrainTest.train <- rbind(TrainTest.p1, TrainTest.a1)
TrainTest.test <- rbind(TrainTest.p2, TrainTest.a2)
tests <- ensemble.calibrate.models(x=NULL,
TrainData=TrainTest.train, TestData=TrainTest.test,
p=NULL, a=NULL, pt=NULL, at=NULL, SSB.reduce=SSB.reduce, CIRCLES.d=CIRCLES.d,
VIF=VIF, COR=COR,
threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence,
PLOTS=PLOTS, CATCH.OFF=CATCH.OFF,
evaluations.keep=T, models.keep=F,
ENSEMBLE.tune=ENSEMBLE.tune,
ENSEMBLE.best=ENSEMBLE.best, ENSEMBLE.min=ENSEMBLE.min, ENSEMBLE.exponent=ENSEMBLE.exponent, ENSEMBLE.weight.min=ENSEMBLE.weight.min,
MAXENT=MAXENT, MAXNET=MAXNET, MAXLIKE=MAXLIKE, GBM=GBM, GBMSTEP=GBMSTEP, RF=RF, CF=CF,
GLM=GLM, GLMSTEP=GLMSTEP, GAM=GAM, GAMSTEP=GAMSTEP, MGCV=MGCV, MGCVFIX=MGCVFIX,
EARTH=EARTH, RPART=RPART, NNET=NNET, FDA=FDA, SVM=SVM, SVME=SVME, GLMNET=GLMNET,
BIOCLIM.O=BIOCLIM.O, BIOCLIM=BIOCLIM, DOMAIN=DOMAIN, MAHAL=MAHAL, MAHAL01=MAHAL01,
PROBIT=PROBIT,
Yweights=Yweights,
factors=factors2, dummy.vars=dummy.vars2,
maxit=maxit,
MAXENT.a=MAXENT.a, MAXENT.path=MAXENT.path,
MAXNET.clamp=MAXNET.clamp, MAXNET.type=MAXNET.type,
MAXLIKE.formula=MAXLIKE.formula, MAXLIKE.method=MAXLIKE.method,
GBM.formula=GBM.formula, GBM.n.trees=GBM.n.trees,
GBMSTEP.gbm.x=GBMSTEP.gbm.x, GBMSTEP.tree.complexity=GBMSTEP.tree.complexity,
GBMSTEP.learning.rate=GBMSTEP.learning.rate, GBMSTEP.bag.fraction=GBMSTEP.bag.fraction,
GBMSTEP.step.size=GBMSTEP.step.size,
RF.formula=RF.formula, RF.ntree=RF.ntree, RF.mtry=RF.mtry,
CF.formula=CF.formula, CF.ntree=CF.ntree, CF.mtry=CF.mtry,
GLM.formula=GLM.formula, GLM.family=GLM.family,
GLMSTEP.k=GLMSTEP.k, GLMSTEP.steps=GLMSTEP.steps, STEP.formula=STEP.formula,
GLMSTEP.scope=GLMSTEP.scope,
GAM.formula=GAM.formula, GAM.family=GAM.family,
GAMSTEP.steps=GAMSTEP.steps, GAMSTEP.scope=GAMSTEP.scope, GAMSTEP.pos=GAMSTEP.pos,
MGCV.formula=MGCV.formula, MGCV.select=MGCV.select,
MGCVFIX.formula=MGCVFIX.formula,
EARTH.formula=EARTH.formula, EARTH.glm=EARTH.glm,
RPART.formula=RPART.formula, RPART.xval=RPART.xval,
NNET.formula=NNET.formula, NNET.size=NNET.size, NNET.decay=NNET.decay,
FDA.formula=FDA.formula,
SVM.formula=SVM.formula,
SVME.formula=SVME.formula,
GLMNET.nlambda=GLMNET.nlambda, GLMNET.class=GLMNET.class,
BIOCLIM.O.fraction=BIOCLIM.O.fraction,
MAHAL.shape=MAHAL.shape)
} # data.frame
dummy.vars.noDOMAIN <- tests$evaluations$dummy.vars.noDOMAIN
if(is.null(tests$evaluations$MAXENT.T)==F) {output["MAXENT",i] <- tests$evaluations$MAXENT.T@auc}
if(is.null(tests$evaluations$MAXNET.T)==F) {output["MAXNET",i] <- tests$evaluations$MAXNET.T@auc}
if(is.null(tests$evaluations$MAXLIKE.T)==F) {output["MAXLIKE",i] <- tests$evaluations$MAXLIKE.T@auc}
if(is.null(tests$evaluations$GBM.T)==F) {output["GBM",i] <- tests$evaluations$GBM.T@auc}
if(is.null(tests$evaluations$GBMSTEP.T)==F) {output["GBMSTEP",i] <- tests$evaluations$GBMSTEP.T@auc}
if(is.null(tests$evaluations$RF.T)==F) {output["RF",i] <- tests$evaluations$RF.T@auc}
if(is.null(tests$evaluations$CF.T)==F) {output["CF",i] <- tests$evaluations$CF.T@auc}
if(is.null(tests$evaluations$GLM.T)==F) {output["GLM",i] <- tests$evaluations$GLM.T@auc}
if(is.null(tests$evaluations$GLMS.T)==F) {output["GLMSTEP",i] <- tests$evaluations$GLMS.T@auc}
if(is.null(tests$evaluations$GAM.T)==F) {output["GAM",i] <- tests$evaluations$GAM.T@auc}
if(is.null(tests$evaluations$GAMS.T)==F) {output["GAMSTEP",i] <- tests$evaluations$GAMS.T@auc}
if(is.null(tests$evaluations$MGCV.T)==F) {output["MGCV",i] <- tests$evaluations$MGCV.T@auc}
if(is.null(tests$evaluations$MGCVF.T)==F) {output["MGCVFIX",i] <- tests$evaluations$MGCVF.T@auc}
if(is.null(tests$evaluations$EARTH.T)==F) {output["EARTH",i] <- tests$evaluations$EARTH.T@auc}
if(is.null(tests$evaluations$RPART.T)==F) {output["RPART",i] <- tests$evaluations$RPART.T@auc}
if(is.null(tests$evaluations$NNET.T)==F) {output["NNET",i] <- tests$evaluations$NNET.T@auc}
if(is.null(tests$evaluations$FDA.T)==F) {output["FDA",i] <- tests$evaluations$FDA.T@auc}
if(is.null(tests$evaluations$SVM.T)==F) {output["SVM",i] <- tests$evaluations$SVM.T@auc}
if(is.null(tests$evaluations$SVME.T)==F) {output["SVME",i] <- tests$evaluations$SVME.T@auc}
if(is.null(tests$evaluations$GLMNET.T)==F) {output["GLMNET",i] <- tests$evaluations$GLMNET.T@auc}
if(is.null(tests$evaluations$BIOCLIM.O.T)==F) {output["BIOCLIM.O",i] <- tests$evaluations$BIOCLIM.O.T@auc}
if(is.null(tests$evaluations$BIOCLIM.T)==F) {output["BIOCLIM",i] <- tests$evaluations$BIOCLIM.T@auc}
if(is.null(tests$evaluations$DOMAIN.T)==F) {output["DOMAIN",i] <- tests$evaluations$DOMAIN.T@auc}
if(is.null(tests$evaluations$MAHAL.T)==F) {output["MAHAL",i] <- tests$evaluations$MAHAL.T@auc}
if(is.null(tests$evaluations$MAHAL01.T)==F) {output["MAHAL01",i] <- tests$evaluations$MAHAL01.T@auc}
if(is.null(tests$evaluations$ENSEMBLE.T)==F) {output["ENSEMBLE",i] <- tests$evaluations$ENSEMBLE.T@auc}
if(ENSEMBLE.tune == T) {
output["MAXENT",k+1+i] <- tests$evaluations$STRATEGY.weights["MAXENT"]
output["MAXNET",k+1+i] <- tests$evaluations$STRATEGY.weights["MAXNET"]
output["MAXLIKE",k+1+i] <- tests$evaluations$STRATEGY.weights["MAXLIKE"]
output["GBM",k+1+i] <- tests$evaluations$STRATEGY.weights["GBM"]
output["GBMSTEP",k+1+i] <- tests$evaluations$STRATEGY.weights["GBMSTEP"]
output["RF",k+1+i] <- tests$evaluations$STRATEGY.weights["RF"]
output["CF",k+1+i] <- tests$evaluations$STRATEGY.weights["CF"]
output["GLM",k+1+i] <- tests$evaluations$STRATEGY.weights["GLM"]
output["GLMSTEP",k+1+i] <- tests$evaluations$STRATEGY.weights["GLMSTEP"]
output["GAM",k+1+i] <- tests$evaluations$STRATEGY.weights["GAM"]
output["GAMSTEP",k+1+i] <- tests$evaluations$STRATEGY.weights["GAMSTEP"]
output["MGCV",k+1+i] <- tests$evaluations$STRATEGY.weights["MGCV"]
output["MGCVFIX",k+1+i] <- tests$evaluations$STRATEGY.weights["MGCVFIX"]
output["EARTH",k+1+i] <- tests$evaluations$STRATEGY.weights["EARTH"]
output["RPART",k+1+i] <- tests$evaluations$STRATEGY.weights["RPART"]
output["NNET",k+1+i] <- tests$evaluations$STRATEGY.weights["NNET"]
output["FDA",k+1+i] <- tests$evaluations$STRATEGY.weights["FDA"]
output["SVM",k+1+i] <- tests$evaluations$STRATEGY.weights["SVM"]
output["SVME",k+1+i] <- tests$evaluations$STRATEGY.weights["SVME"]
output["GLMNET",k+1+i] <- tests$evaluations$STRATEGY.weights["GLMNET"]
output["BIOCLIM.O",k+1+i] <- tests$evaluations$STRATEGY.weights["BIOCLIM.O"]
output["BIOCLIM",k+1+i] <- tests$evaluations$STRATEGY.weights["BIOCLIM"]
output["DOMAIN",k+1+i] <- tests$evaluations$STRATEGY.weights["DOMAIN"]
output["MAHAL",k+1+i] <- tests$evaluations$STRATEGY.weights["MAHAL"]
output["MAHAL01",k+1+i] <- tests$evaluations$STRATEGY.weights["MAHAL01"]
}
TrainData.all[[i]] <- tests$evaluations$TrainData
TestData.all[[i]] <- tests$evaluations$TestData
eval.tablei <- tests$eval.table
eval.tablei$ALGO <- rownames(eval.tablei)
eval.tablei$k <- rep(i, nrow(eval.tablei))
if (i==1) {
eval.table.all <- eval.tablei
}else{
eval.table.all <- rbind(eval.table.all, eval.tablei)
}
}
output[,k+1] <- rowMeans(output[,c(1:k)], na.rm=T)
output[is.na(output[,k+1]),(k+1)] <- 0
#
# Try to use exponent, min and best to calculate final weights
#
# in case there were several exponents, then do not change the weights
ENSEMBLE.exponent1 <- ENSEMBLE.exponent
if (length(ENSEMBLE.exponent) > 1) {ENSEMBLE.exponent1 <- 1}
ENSEMBLE.min1 <- ENSEMBLE.min
if (length(ENSEMBLE.min) > 1) {ENSEMBLE.min1 <- 0.5}
ENSEMBLE.best1 <- ENSEMBLE.best
if (length(ENSEMBLE.best) > 1) {ENSEMBLE.best1 <- 0}
#
if(ENSEMBLE.tune == T) {
output[,2*k+2] <- rowMeans(output[,c((k+2):(2*k+1))], na.rm=T)
output[is.na(output[,2*k+2]),(2*k+2)] <- 0
}
#
output.weights <- output[, "MEAN"]
output.weightsT <- output[, "MEAN.T"]
output.weights <- output.weights[names(output.weights) != "ENSEMBLE"]
output.weightsT <- output.weightsT[names(output.weightsT) != "ENSEMBLE"]
output <- output[order(output[,k+1], decreasing=T),]
cat(paste("Results of ensemble.calibrate.weights sorted by average AUC for tests T_1 to T_", k, "\n", sep = ""))
cat(paste("\n", "columns T_1 to T_", k, " show the AUC for each ", k, "-fold cross-validation run", "\n", sep = ""))
if(ENSEMBLE.tune == T) {
cat(paste("column MEAN.T shows the mean of the AUC", "\n\n", sep = ""))
cat(paste("columns S_1 to S_", k, " show the weights for the ensemble model with best AUC", "\n", sep = ""))
cat(paste("column MEAN shows the mean of these weights", "\n\n", sep = ""))
}else{
cat(paste("column MEAN shows the mean of the AUC", "\n\n", sep = ""))
}
print(output)
if (ENSEMBLE.tune == F) {
# do not modify if ENSEMBLE.tune is not required, simply use same
# cat(paste("\n\n", "parameters for next weighting: ENSEMBLE.min=", ENSEMBLE.min1, ", ENSEMBLE.best=", ENSEMBLE.best1, " and ENSEMBLE.exponent=", ENSEMBLE.exponent1, "\n\n", sep = ""))
# output.weights <- ensemble.weights(weights=output.weights, exponent=ENSEMBLE.exponent1, best=ENSEMBLE.best1, min.weight=ENSEMBLE.min1)
# print(output.weights)
output.weights <- tests$evaluations$ensemble.weights
cat(paste("\n", "final = original weights for ensemble forecasting", "\n", sep = ""))
print(output.weights)
}else{
cat(paste("\n", "input weights for ensemble modelling based on MEAN column", "\n", sep = ""))
print(output.weights)
# if possible, select best models (models with highest weights)
if (ENSEMBLE.best1 > 0) {
cat(paste("\n\n", "parameters for next weighting: ENSEMBLE.min=0, ENSEMBLE.best=", ENSEMBLE.best1, " and ENSEMBLE.exponent=1", "\n\n", sep = ""))
output.weights <- ensemble.weights(weights=output.weights, exponent=1, best=ENSEMBLE.best1, min.weight=0)
print(output.weights)
}
output.weightsT <- ensemble.weights(weights=output.weightsT, exponent=ENSEMBLE.exponent1, best=ENSEMBLE.best1, min.weight=ENSEMBLE.min1)
# remove models with low input weights (mainly to reduce the number of models for final calibrations and mapping)
cat(paste("\n", "Minimum input weight is ", ENSEMBLE.weight.min, "\n", sep=""))
output.weights2 <- output.weights
output.weights2T <- output.weightsT
while(min(output.weights2) < ENSEMBLE.weight.min) {
output.weights2 <- output.weights2[-which.min(output.weights2)]
output.weights2 <- ensemble.weights(weights=output.weights2, exponent=1, best=0, min.weight=0)
}
while(min(output.weights2T) < ENSEMBLE.weight.min) {
output.weights2T <- output.weights2T[-which.min(output.weights2T)]
output.weights2T <- ensemble.weights(weights=output.weights2T, exponent=1, best=0, min.weight=0)
}
output.weights[] <- 0
output.weightsT[] <- 0
for (i in 1:length(output.weights2)) {output.weights[which(names(output.weights) == names(output.weights2)[i])] <- output.weights2[i]}
for (i in 1:length(output.weights2T)) {output.weightsT[which(names(output.weightsT) == names(output.weights2T)[i])] <- output.weights2T[i]}
cat(paste("\n", "final suggested weights for ensemble forecasting", "\n", sep = ""))
print(output.weights)
}
# test with suggested final weights
# was no need to repeat for no-tuning since here we already have those results, but possibly good to recheck
output2 <- numeric(length=k+1)
names(output2)[1:k] <- paste("T_", c(1:k), sep="")
names(output2)[k+1] <- c("MEAN.T")
for (i in 1:k) {
TrainData <- TrainData.all[[i]]
TrainData[,"ENSEMBLE"] <- output.weights["MAXENT"]*TrainData[,"MAXENT"] + output.weights["MAXNET"]*TrainData[,"MAXNET"] +
output.weights["MAXLIKE"]*TrainData[,"MAXLIKE"] + output.weights["GBM"]*TrainData[,"GBM"] +
output.weights["GBMSTEP"]*TrainData[,"GBMSTEP"] + output.weights["RF"]*TrainData[,"RF"] + output.weights["CF"]*TrainData[,"CF"] + output.weights["GLM"]*TrainData[,"GLM"] +
output.weights["GLMSTEP"]*TrainData[,"GLMSTEP"] + output.weights["GAM"]*TrainData[,"GAM"] + output.weights["GAMSTEP"]*TrainData[,"GAMSTEP"] +
output.weights["MGCV"]*TrainData[,"MGCV"] + output.weights["MGCVFIX"]*TrainData[,"MGCVFIX"] + output.weights["EARTH"]*TrainData[,"EARTH"] +
output.weights["RPART"]*TrainData[,"RPART"] + output.weights["NNET"]*TrainData[,"NNET"] + output.weights["FDA"]*TrainData[,"FDA"] +
output.weights["SVM"]*TrainData[,"SVM"] + output.weights["SVME"]*TrainData[,"SVME"] + output.weights["GLMNET"]*TrainData[,"GLMNET"] +
output.weights["BIOCLIM.O"]*TrainData[,"BIOCLIM.O"] + output.weights["BIOCLIM"]*TrainData[,"BIOCLIM"] +
output.weights["DOMAIN"]*TrainData[,"DOMAIN"] + output.weights["MAHAL"]*TrainData[,"MAHAL"]+ output.weights["MAHAL01"]*TrainData[,"MAHAL01"]
TestData <- TestData.all[[i]]
TestData[,"ENSEMBLE"] <- output.weights["MAXENT"]*TestData[,"MAXENT"] + output.weights["MAXNET"]*TestData[,"MAXNET"] +
output.weights["MAXLIKE"]*TestData[,"MAXLIKE"] + output.weights["GBM"]*TestData[,"GBM"] +
output.weights["GBMSTEP"]*TestData[,"GBMSTEP"] + output.weights["RF"]*TestData[,"RF"] + output.weights["CF"]*TestData[,"CF"] + output.weights["GLM"]*TestData[,"GLM"] +
output.weights["GLMSTEP"]*TestData[,"GLMSTEP"] + output.weights["GAM"]*TestData[,"GAM"] + output.weights["GAMSTEP"]*TestData[,"GAMSTEP"] +
output.weights["MGCV"]*TestData[,"MGCV"] + output.weights["MGCVFIX"]*TestData[,"MGCVFIX"] + output.weights["EARTH"]*TestData[,"EARTH"] +
output.weights["RPART"]*TestData[,"RPART"] + output.weights["NNET"]*TestData[,"NNET"] + output.weights["FDA"]*TestData[,"FDA"] +
output.weights["SVM"]*TestData[,"SVM"] + output.weights["SVME"]*TestData[,"SVME"] + output.weights["GLMNET"]*TestData[,"GLMNET"] +
output.weights["BIOCLIM.O"]*TestData[,"BIOCLIM.O"] + output.weights["BIOCLIM"]*TestData[,"BIOCLIM"] +
output.weights["DOMAIN"]*TestData[,"DOMAIN"] + output.weights["MAHAL"]*TestData[,"MAHAL"]+ output.weights["MAHAL01"]*TestData[,"MAHAL01"]
eval1 <- evalT <- NULL
TrainPres <- as.numeric(TrainData[TrainData[,"pb"]==1, "ENSEMBLE"])
TrainAbs <- as.numeric(TrainData[TrainData[,"pb"]==0, "ENSEMBLE"])
evalT <- dismo::evaluate(p=TrainPres, a=TrainAbs)
TestPres <- as.numeric(TestData[TestData[,"pb"]==1, "ENSEMBLE"])
TestAbs <- as.numeric(TestData[TestData[,"pb"]==0, "ENSEMBLE"])
eval1 <- dismo::evaluate(p=TestPres, a=TestAbs)
output2[i] <- eval1@auc
cat(paste("\n", "Results with final weights for ensemble.evaluate for k = ", i, "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval1, eval.train=evalT)
print(eval3)
if (i==1) {
eval.table.final <- data.frame(t(eval3))
}else{
eval.table.final <- data.frame(rbind(eval.table.final, t(eval3)))
}
}
output2[k+1] <- mean(output2[1:k])
cat(paste("\n", "AUC for ensemble models based on suggested input weights (using presence and background data sets generated for ", k, "-fold cross-validations)", "\n", sep = ""))
print(output2)
cat(paste("\n", "(Results with input weights inferred from MEAN.T column with similar procedures", "\n", sep = ""))
cat(paste("parameters for weighting of MEAN.T: ENSEMBLE.min=", ENSEMBLE.min1, ", ENSEMBLE.best=", ENSEMBLE.best1, " and ENSEMBLE.exponent=", ENSEMBLE.exponent1, "\n\n", sep = ""))
cat(paste("Final suggested weights with this alternative procedure", "\n", sep = ""))
print(output.weightsT)
output3 <- numeric(length=k+1)
names(output3)[1:k] <- paste("T_", c(1:k), sep="")
names(output3)[k+1] <- c("MEAN.T")
for (i in 1:k) {
TrainData <- TrainData.all[[i]]
TrainData[,"ENSEMBLE"] <- output.weightsT["MAXENT"]*TrainData[,"MAXENT"] + output.weightsT["MAXNET"]*TrainData[,"MAXNET"] +
output.weightsT["MAXLIKE"]*TrainData[,"MAXLIKE"] + output.weightsT["GBM"]*TrainData[,"GBM"] +
output.weightsT["GBMSTEP"]*TrainData[,"GBMSTEP"] + output.weightsT["RF"]*TrainData[,"RF"] + output.weightsT["CF"]*TrainData[,"CF"] + output.weightsT["GLM"]*TrainData[,"GLM"] +
output.weightsT["GLMSTEP"]*TrainData[,"GLMSTEP"] + output.weightsT["GAM"]*TrainData[,"GAM"] + output.weightsT["GAMSTEP"]*TrainData[,"GAMSTEP"] +
output.weightsT["MGCV"]*TrainData[,"MGCV"] + output.weightsT["MGCVFIX"]*TrainData[,"MGCVFIX"] + output.weightsT["EARTH"]*TrainData[,"EARTH"] +
output.weightsT["RPART"]*TrainData[,"RPART"] + output.weightsT["NNET"]*TrainData[,"NNET"] + output.weightsT["FDA"]*TrainData[,"FDA"] +
output.weightsT["SVM"]*TrainData[,"SVM"] + output.weightsT["SVME"]*TrainData[,"SVME"] + output.weightsT["GLMNET"]*TrainData[,"GLMNET"] +
output.weightsT["BIOCLIM.O"]*TrainData[,"BIOCLIM.O"] + output.weightsT["BIOCLIM"]*TrainData[,"BIOCLIM"] +
output.weightsT["DOMAIN"]*TrainData[,"DOMAIN"] + output.weightsT["MAHAL"]*TrainData[,"MAHAL"]+ output.weightsT["MAHAL01"]*TrainData[,"MAHAL01"]
TestData <- TestData.all[[i]]
TestData[,"ENSEMBLE"] <- output.weightsT["MAXENT"]*TestData[,"MAXENT"] + output.weightsT["MAXNET"]*TestData[,"MAXNET"] +
output.weightsT["MAXLIKE"]*TestData[,"MAXLIKE"] + output.weightsT["GBM"]*TestData[,"GBM"] +
output.weightsT["GBMSTEP"]*TestData[,"GBMSTEP"] + output.weightsT["RF"]*TestData[,"RF"] + output.weightsT["CF"]*TestData[,"CF"] + output.weightsT["GLM"]*TestData[,"GLM"] +
output.weightsT["GLMSTEP"]*TestData[,"GLMSTEP"] + output.weightsT["GAM"]*TestData[,"GAM"] + output.weightsT["GAMSTEP"]*TestData[,"GAMSTEP"] +
output.weightsT["MGCV"]*TestData[,"MGCV"] + output.weightsT["MGCVFIX"]*TestData[,"MGCVFIX"] + output.weightsT["EARTH"]*TestData[,"EARTH"] +
output.weightsT["RPART"]*TestData[,"RPART"] + output.weightsT["NNET"]*TestData[,"NNET"] + output.weightsT["FDA"]*TestData[,"FDA"] +
output.weightsT["SVM"]*TestData[,"SVM"] + output.weightsT["SVME"]*TestData[,"SVME"] + output.weightsT["GLMNET"]*TestData[,"GLMNET"] +
output.weightsT["BIOCLIM.O"]*TestData[,"BIOCLIM.O"] + output.weightsT["BIOCLIM"]*TestData[,"BIOCLIM"] +
output.weightsT["DOMAIN"]*TestData[,"DOMAIN"] + output.weightsT["MAHAL"]*TestData[,"MAHAL"]+ output.weightsT["MAHAL01"]*TestData[,"MAHAL01"]
eval1 <- eval2 <- NULL
TestPres <- as.numeric(TestData[TestData[,"pb"]==1, "ENSEMBLE"])
TestAbs <- as.numeric(TestData[TestData[,"pb"]==0, "ENSEMBLE"])
eval1 <- dismo::evaluate(p=TestPres, a=TestAbs)
TrainPres <- as.numeric(TrainData[TrainData[,"pb"]==1, "ENSEMBLE"])
TrainAbs <- as.numeric(TrainData[TrainData[,"pb"]==0, "ENSEMBLE"])
evalT <- dismo::evaluate(p=TrainPres, a=TrainAbs)
TestPres <- as.numeric(TestData[TestData[,"pb"]==1, "ENSEMBLE"])
TestAbs <- as.numeric(TestData[TestData[,"pb"]==0, "ENSEMBLE"])
eval1 <- dismo::evaluate(p=TestPres, a=TestAbs)
output3[i] <- eval1@auc
cat(paste("\n", "Results with alternative final weights for ensemble.evaluate for k = ", i, "\n\n", sep = ""))
eval3 <- ensemble.evaluate(eval=eval1, eval.train=evalT)
print(eval3)
}
output3[k+1] <- mean(output3[1:k])
cat(paste("\n", "AUC for ensemble models based on alternative input weights", "\n", sep = ""))
print(output3)
cat(paste(")", "\n", sep=""))
if (SINK==T && OLD.SINK==F) {sink(file=NULL, append=T)}
if (x.was.terra == TRUE) {x <- terra::rast(x)}
if (data.keep == F) {
cat(paste("\n\n"))
return(list(AUC.table=output, table=output, output.weights=output.weights, AUC.with.suggested.weights=output2,
eval.table.all=eval.table.all, eval.table.final=eval.table.final,
x=x, p=p.all, a=a.all, MAXENT.a=MAXENT.a, groupp=groupp, groupa=groupa,
var.names=var.names, factors=factors2, dummy.vars=dummy.vars2, dummy.vars.noDOMAIN=dummy.vars.noDOMAIN,
species.name=species.name,
call=match.call()))
}else{
cat(paste("\n\n"))
return(list(data=TestData.all,
AUC.table=output, table=output, output.weights=output.weights, AUC.with.suggested.weights=output2,
data=TestData.all, eval.table.all=eval.table.all, eval.table.final=eval.table.final,
x=x, p=p.all, a=a.all, MAXENT.a=MAXENT.a, groupp=groupp, groupa=groupa,
var.names=var.names, factors=factors2, dummy.vars=dummy.vars2, dummy.vars.noDOMAIN=dummy.vars.noDOMAIN,
species.name=species.name,
call=match.call()))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.calibrate.weights.R
|
`ensemble.centroids` <- function(
presence.raster=NULL, x=NULL, categories.raster=NULL,
an=10000, ext=NULL, name="Species001",
pca.var=0.95, centers=0, use.silhouette=TRUE,
plotit=FALSE, dev.new.width=7, dev.new.height=7
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if (is.null(presence.raster) == T) {stop("value for parameter presence.raster is missing (RasterLayer object)")}
if(inherits(presence.raster, "RasterLayer") == F) {stop("x is not a RasterLayer object")}
if(raster::minValue(presence.raster) != 0) {cat(paste("\n", "WARNING: minValue of presence.raster not 0, so this raster layer possibly does not indicate presence-absence", "\n", sep = ""))}
if(raster::maxValue(presence.raster) != 1) {cat(paste("\n", "WARNING: maxValue of presence.raster not 1, so this raster layer possibly does not indicate presence-absence", "\n", sep = ""))}
if(is.null(x) == T) {stop("value for parameter x is missing (RasterStack object)")}
if(inherits(x, "RasterStack") == F) {stop("x is not a RasterStack object")}
# only for plotting
predict.zone <- function(object=centroid.model, newdata=newdata) {
centroids <- object$centroids
cov.mahal <- object$cov.mahal
nc <- nrow(centroids)
result <- data.frame(array(0, dim=c(nrow(newdata), nc)))
for (i in 1:nc) {
result[,i] <- mahalanobis(newdata, center=as.numeric(centroids[i,]), cov=cov.mahal)
}
p <- apply(result[, 1:nc], 1, which.min)
p <- as.numeric(p)
return(p)
}
# same extent for predictors and presence map
if (is.null(ext) == F) {
if(length(x@title) == 0) {x@title <- "stack1"}
title.old <- x@title
x <- raster::crop(x, y=ext, snap="in")
x@title <- title.old
presence.raster <- raster::crop(presence.raster, y=ext, snap="in")
if (is.null(categories.raster) == F) {categories.raster <- raster::crop(categories.raster, y=ext, snap="in")}
}
# mask the presence area
presence.raster <- raster::mask(presence.raster, presence.raster, inverse=T, maskvalue=1)
# create background data
a <- dismo::randomPoints(presence.raster, n=an, p=NULL, excludep=F)
a <- data.frame(a)
background.data <- raster::extract(x, a)
background.data <- data.frame(background.data)
if (length(names(x)) == 1) {
xdouble <- raster::stack(x, x)
background.data <- raster::extract(x=xdouble, y=a)
background.data <- data.frame(background.data)
background.data <- background.data[, 1, drop=F]
names(background.data) <- names(x)
}
TrainValid <- complete.cases(background.data)
a <- a[TrainValid,]
background.data <- background.data[TrainValid,]
# PCA of scaled variables
rda.result <- vegan::rda(X=background.data, scale=T)
# select number of axes
ax <- 2
while ( (sum(vegan::eigenvals(rda.result)[c(1:ax)])/sum(vegan::eigenvals(rda.result))) < pca.var ) {ax <- ax+1}
cat(paste("\n", "Percentage of variance of the selected axes (1 to ", ax, ") of principal components analysis", "\n", sep = ""))
print(100*sum(vegan::eigenvals(rda.result)[c(1:ax)])/sum(vegan::eigenvals(rda.result)))
rda.scores <- vegan::scores(rda.result, display="sites", scaling=1, choices=c(1:ax))
# K-means analysis
if (is.null(categories.raster) == T) {
if (centers < 1) {
cat(paste("\n", "Number of centroids determined with cascadeKM (2 to 16 clusters, Calinski-Harabasz criterion)", "\n", sep = ""))
cascaderesult <- vegan::cascadeKM(rda.scores, inf.gr=2, sup.gr=16, iter=200, criterion="calinski")
print(cascaderesult$results)
groupnumbers <- as.numeric(gsub(" groups", "", colnames(cascaderesult$results)))
w <- cascaderesult$results[2,]
maxx = which.max(w[])
centers <- groupnumbers[maxx]
cat(paste("\n", "Selected number of centroids: ", centers, "\n", sep = ""))
}
kmeans.result <- kmeans(rda.scores, centers=centers, iter.max=1000)
clusters <- kmeans.result$cluster
clusters.remember <- clusters
clusters.names <- c(1:centers)
# predefined categories
}else{
categories.data <- raster::extract(categories.raster, a)
categories.data <- data.frame(categories.data)
categories <- levels(as.factor(categories.data[,1]))
categories <- categories[is.na(categories) == F]
clusters.names <- as.numeric(categories)
clusters <- match(categories.data[,1], categories)
clusters.remember <- clusters
centers <- length(categories)
}
# centroids
centroid.data <- background.data[1:centers,]
centroid.rda <- rda.scores[1:centers,]
centroid.data[] <- NA
centroid.rda[] <- NA
if (use.silhouette == T) {
background.silhouette <- cluster::silhouette(clusters, vegan::vegdist(rda.scores, method="euc"))
clusters[background.silhouette[,"sil_width"] <= 0] <- -1
}
for (i in c(1:centers)) {
centroid.data[i,] <- colMeans(background.data[as.numeric(clusters)==i,])
centroid.rda[i,] <- colMeans(rda.scores[as.numeric(clusters)==i,])
}
# calculate variance for Mahalanobis distance
cov.mahal <- cov(background.data)
# find analog locations that are closest to the centroids in PCA space
centroid.analogs <- cbind(a[1:centers,], background.data[1:centers,], pca.dist=as.numeric(rep(NA, centers)))
centroid.analogs[] <- NA
remember.closest <- numeric(centers)
for (i in c(1:centers)) {
centroid.rda1 <- rbind(centroid.rda[i,], rda.scores)
euc.dist <- as.matrix(vegan::vegdist(centroid.rda1, "euc"))
euc.dist <- euc.dist[1,]
euc.dist <- euc.dist[-1]
closest <- as.numeric(which.min(euc.dist))
remember.closest[i] <- closest
centroid.analogs[i, as.numeric(na.omit(match(names(a), names(centroid.analogs))))] <- a[closest,]
centroid.analogs[i, as.numeric(na.omit(match(names(background.data), names(centroid.analogs))))] <- background.data[closest,]
centroid.analogs[i, as.numeric(na.omit(match("pca.dist", names(centroid.analogs))))] <- euc.dist[closest]
}
# output
centroid.model <- list(centroids=centroid.data, centroid.analogs=centroid.analogs, cov.mahal=cov.mahal, name=name, clusters.names=clusters.names)
# plotting
if (plotit == T) {
par.old <- graphics::par(no.readonly=T)
if (dev.new.width > 0 && dev.new.height > 0) {grDevices::dev.new(width=dev.new.width, height=dev.new.height)}
graphics::par(mfrow=c(2,2))
zone <- predict.zone(centroid.model, newdata=background.data)
# simple plot in geographic space with K-means clustering
graphics::plot(a[, 2] ~ a[, 1], pch=15, col=grDevices::rainbow(centers)[as.numeric(clusters.remember)],
main="K-means zones in geographical space", xlab=names(a)[1], ylab=names(a)[2])
graphics::points(centroid.analogs[, 2] ~ centroid.analogs[, 1], pch=8)
graphics::text(centroid.analogs[, 2] ~ centroid.analogs[, 1], labels=c(1:centers), pos=3)
graphics::legend(x="topright", legend=c(1:centers), pch=rep(15, centers), col=grDevices::rainbow(centers))
# simple plot in geographic space with Mahalanobis clustering
graphics::plot(a[, 2] ~ a[, 1], pch=15, col=grDevices::rainbow(centers)[as.numeric(zone)],
main="predicted zones from centroid", xlab=names(a)[1], ylab=names(a)[2])
graphics::points(centroid.analogs[, 2] ~ centroid.analogs[, 1], pch=8)
graphics::text(centroid.analogs[, 2] ~ centroid.analogs[, 1], labels=c(1:centers), pos=3)
graphics::legend(x="topright", legend=c(1:centers), pch=rep(15, centers), col=grDevices::rainbow(centers))
# plot in PCA space
graphics::plot(rda.scores[, 2] ~ rda.scores[, 1], pch=15, col=grDevices::rainbow(centers)[as.numeric(zone)], main="K-means zones in environmental space")
graphics::points(centroid.rda[, 2] ~ centroid.rda[, 1], pch=20)
graphics::text(centroid.rda[, 2] ~ centroid.rda[, 1], labels=c(1:centers), pos=3)
graphics::points(rda.scores[remember.closest, 2] ~ rda.scores[remember.closest, 1], pch=8)
graphics::legend(x="topright", legend=c(1:centers), pch=rep(15, centers), col=grDevices::rainbow(centers))
if (ax >= 4) {
graphics::plot(rda.scores[, 4] ~ rda.scores[, 3], pch=15, col=grDevices::rainbow(centers)[as.numeric(zone)], main="K-means zones in environmental space")
graphics::points(centroid.rda[, 4] ~ centroid.rda[, 3], pch=20)
graphics::text(centroid.rda[, 4] ~ centroid.rda[, 3], labels=c(1:centers), pos=3)
graphics::points(rda.scores[remember.closest, 4] ~ rda.scores[remember.closest, 3], pch=8)
graphics::legend(x="topright", legend=c(1:centers), pch=rep(15, centers), col=grDevices::rainbow(centers))
}
graphics::par(par.old)
cat(paste("\n\n", "In graphs, circles are locations of centroids and asterisks locations of analogues of centroids", "\n", sep = ""))
}
# output
return(centroid.model)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.centroids.R
|
`ensemble.chull.create` <- function(
x.pres=NULL, p=NULL, buffer.width=0.2, buffer.maxmins=FALSE, lonlat.dist=FALSE,
poly.only=FALSE,
RASTER.format="GTiff", RASTER.datatype="INT1U", RASTER.NAflag=255,
overwrite=TRUE,
...
)
{
# if (! require(dismo)) {stop("Please install the dismo package")}
if (poly.only == F) {
if(is.null(x.pres) == T) {stop("value for argument x.pres is missing (RasterLayer object)")}
if(inherits(x.pres, "RasterLayer") == F) {stop("x.pres is not a RasterLayer object")}
x <- x.pres
if (raster::maxValue(x) > 1) {
cat(paste("Warning: base.raster has values larger than 1, hence does not provide presence-absence", sep=""))
}
}
if(is.null(p) == T) {stop("presence locations are missing")}
names(p) <- c("x", "y")
#
# create convex hull around presence locations
# modified from red::map.habitat with option mcp=T
# modification includes creation of buffer around convex hull
#
#
cat(paste("\n", "Creation of convex hull around presence locations", sep=""))
#
vertices <- grDevices::chull(p)
vertices <- c(vertices, vertices[1])
vertices <- p[vertices, ]
poly <- sp::Polygon(vertices)
poly <- sp::Polygons(list(poly), 1)
poly <- sp::SpatialPolygons(list(poly))
# modification for BiodiversityR
if (buffer.maxmins == FALSE) {
maxdist1 <- max(raster::pointDistance(p, lonlat=F), na.rm=T)
maxdist <- maxdist1 * buffer.width
if (lonlat.dist == TRUE) {maxdist2 <- max(raster::pointDistance(p, lonlat=T), na.rm=T)}
}else{
point.dists <- raster::pointDistance(p, lonlat=F, allpairs=F)
point.dists[point.dists == 0] <- NA
maxdist1 <- max(apply(point.dists, 2, min, na.rm=T))
if (lonlat.dist == TRUE) {
pres.distances <- array(dim=c(nrow(p), nrow(p)))
for (i in 1:nrow(p)) {
pres.distances[, i] <- geosphere::distGeo(p[], p[i, ])
}
pres.distances <- data.frame(pres.distances)
pres.distances[pres.distances == 0] <- NA
min.distances <- apply(pres.distances, 2, min, na.rm=T)
maxdist2 <- max(min.distances)
}
maxdist <- maxdist1 * buffer.width
}
# changed Dec 2022 to remove dependency on rgeos
# poly <- rgeos::gBuffer(poly, width=maxdist)
poly.sf <- sf::st_as_sf(poly)
poly <- sf::st_buffer(poly.sf, dist=maxdist)
#
if (buffer.maxmins == FALSE) {
cat(paste("\n", "Buffer around convex hull of ", maxdist, " (", buffer.width, " * ", maxdist1, ", where ", maxdist1 , " is the maximum distance among presence locations)", "\n", sep=""))
if (lonlat.dist == TRUE) {cat(paste("This maximum distance corresponds to a distance in km of: ", maxdist2/1000, "\n"))}
}else{
cat(paste("\n", "Buffer around convex hull of ", maxdist, " (", buffer.width, " * ", maxdist1, ", where ", maxdist1 , " is the maximum of the distances to the closest neighbour for each presence location)", "\n", sep=""))
if (lonlat.dist == TRUE) {cat(paste("This maximum distance corresponds to a distance in km of: ", maxdist2/1000, "\n"))}
}
if (poly.only == TRUE) {return(list(convex.hull=poly))}
# modification ended
patches <- raster::clump(x, gaps=F)
selPatches <- raster::unique(raster::extract(patches, poly, df=T, weights=T)$clumps)
selPatches <- selPatches[!is.na(selPatches)]
allPatches <- raster::unique(patches)
allPatches <- as.data.frame(cbind(allPatches, rep(0, length(allPatches))))
names(allPatches) <- c("patches", "selected")
allPatches[selPatches, 2] <- 1
patches <- raster::subs(patches, allPatches)
raster::setMinMax(patches)
#
# save
raster.name <- names(x)
names(patches) <- raster.name
dir.create("ensembles", showWarnings = F)
dir.create("ensembles/chull", showWarnings = F)
# modified to save the patches as 'mask'...
filename1 <- paste("ensembles/chull/", "mask", sep="")
raster::writeRaster(patches, filename=filename1, progress='text', format=RASTER.format, overwrite=overwrite, datatype=RASTER.datatype, NAflag=RASTER.NAflag, ...)
# avoid possible problems with saving of names of the raster layers
# no longer used with default format of GTiff since DEC-2022
# raster::writeRaster(patches, filename="working.grd", overwrite=T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- raster.name
# raster::writeRaster(working.raster, filename=filename1, progress='text', format=RASTER.format, overwrite=overwrite, datatype=RASTER.datatype, NAflag=RASTER.NAflag, ...)
#
return(list(mask.layer=patches, convex.hull=poly))
}
`ensemble.chull.apply` <- function(
x.spec=NULL, mask.layer=NULL, keep.old=T,
RASTER.format="GTiff", RASTER.datatype="INT1U", RASTER.NAflag=255,
overwrite=TRUE,
...
)
{
# if (! require(dismo)) {stop("Please install the dismo package")}
if(is.null(x.spec) == T) {stop("value for parameter x.spec is missing (RasterLayer object)")}
if(inherits(x.spec, "RasterLayer") == F) {stop("x.spec is not a RasterLayer object")}
x <- x.spec
if(is.null(mask.layer) == T) {stop("value for parameter mask.layer is missing (RasterLayer object)")}
if(inherits(mask.layer, "RasterLayer") == F) {stop("mask.layer is not a RasterLayer object")}
if (raster::maxValue(mask.layer) > 1) {
cat(paste("Warning: mask.layer has values larger than 1, hence does not provide presence mask layer", sep=""))
}
# if (! require(tools)) {stop("tools package not available")}
filename1 <- raster::filename(x)
#
# modified in DEC-2022 to create a new directory rather than overwriting the presence file
# if (keep.old == T){
# extension1 <- paste(".", tools::file_ext(filename1), sep="")
# extension2 <- paste("_old.", tools::file_ext(filename1), sep="")
# filename2 <- gsub(pattern=extension1, replacement=extension2, x=filename1)
# raster::writeRaster(x, filename=filename2, overwrite=overwrite, ...)
# }
dir.create("ensembles", showWarnings = F)
dir.create("ensembles/chull", showWarnings = F)
filename3 <- paste0(getwd(), "/ensembles/chull/", basename(filename1))
#
# cells that are 0 in the mask should become 0 in the output file
masked.x <- raster::mask(x, mask=mask.layer, maskvalue=0, updatevalue=0)
#
raster.name <- names(x)
names(masked.x) <- raster.name
raster::writeRaster(masked.x, filename=filename3, progress='text', format=RASTER.format, overwrite=overwrite, datatype=RASTER.datatype, NAflag=RASTER.NAflag, ...)
# avoid possible problems with saving of names of the raster layers
# no longer used with default format of GTiff since DEC-2022
# raster::writeRaster(masked.x, filename="working.grd", overwrite=T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- raster.name
# raster::writeRaster(working.raster, filename=filename1, progress='text', format=RASTER.format, overwrite=overwrite, datatype=RASTER.datatype, NAflag=RASTER.NAflag, ...)
#
if (keep.old == T) {
# old.raster <- raster::raster(filename2)
return(list(masked.raster=masked.x, old.raster=x))
}else{
return(masked.x)
}
}
`ensemble.chull.buffer.distances` <- function(
p=NULL, buffer.maxmins=FALSE, lonlat.dist=FALSE
)
{
names(p) <- c("x", "y")
if (buffer.maxmins==FALSE) {
# maximum
point.dists <- raster::pointDistance(p, lonlat=F)
max.distances <- apply(point.dists, 2, max, na.rm=T)
max.1a <- which.max(max.distances)
max.1b <- which.max(point.dists[max.1a, ])
max.1c <- max(max.distances)
cat(paste("Maximum distance is between locations ", max.1a, " and ", max.1b, " with distance in native coordinates of ", max.1c, "\n", sep=""))
if (lonlat.dist == TRUE) {
pres.distances <- array(dim=c(nrow(p), nrow(p)))
for (i in 1:nrow(p)) {
pres.distances[, i] <- geosphere::distGeo(p[], p[i, ])
}
pres.distances <- data.frame(pres.distances)
pres.distances[pres.distances == 0] <- NA
max.distances <- apply(pres.distances, 2, max, na.rm=T)
max.1a <- which.max(max.distances)
max.1b <- which.max(pres.distances[max.1a, ])
max.1c <- max(max.distances)/1000
cat(paste("Maximum distance is between locations ", max.1a, " and ", max.1b, " with distance in km of ", max.1c, "\n", sep=""))
}
result <- c(max.1a, max.1b, max.1c)
names(result) <- c("location.1", "location.2", "distance")
}else{
point.dists <- raster::pointDistance(p, lonlat=F)
point.dists[point.dists == 0] <- NA
min.distances <- apply(point.dists, 2, min, na.rm=T)
max.1a <- which.max(min.distances)
min.1b <- which.min(point.dists[max.1a, ])
max.1c <- max(min.distances)
cat(paste("Maximum closest neighbour distance is between locations ", max.1a, " and ", min.1b, " with distance in native coordinates of ", max.1c, "\n", sep=""))
if (lonlat.dist == TRUE) {
pres.distances <- array(dim=c(nrow(p), nrow(p)))
for (i in 1:nrow(p)) {
pres.distances[, i] <- geosphere::distGeo(p[], p[i, ])
}
pres.distances <- data.frame(pres.distances)
pres.distances[pres.distances == 0] <- NA
min.distances <- apply(pres.distances, 2, min, na.rm=T)
max.1a <- which.max(min.distances)
min.1b <- which.min(pres.distances[max.1a, ])
max.1c <- max(min.distances)/1000
cat(paste("Maximum closest neighbour distance is between locations ", max.1a, " and ", min.1b, " with distance in km of ", max.1c, "\n", sep=""))
}
result <- c(max.1a, min.1b, max.1c)
names(result) <- c("location.1", "location.2", "distance")
}
return(result)
}
`ensemble.chull.MSDM` <- function(
p=NULL, a=NULL, species.name=NULL,
suit.file=NULL, suit.divide=1000, MSDM.dir = NULL,
method = "BMCP", threshold = "spec_sens",
buffer = "species_specific"
)
{
if(is.null(p) == T) {stop("Provide p (presence observations)")}
if(ncol(p) != 2) {stop("p (presence observations) is expected to have 2 columns (x and y coordinates)")}
if(is.null(a) == T) {stop("Provide a (absence or background observations)")}
if(ncol(a) != 2) {stop("a (absence observations) is expected to have 2 columns (x and y coordinates)")}
if(is.null(species.name) == T) {stop("Provide species name")}
if(file.exists(suit.file) == F) {stop("Suitability file does not exist")}
if(dir.exists(MSDM.dir) == F) {stop("MSDM directory does not exist")}
name.OK <- gsub(species.name, pattern=" ", replacement="_")
p <- data.frame(sp=rep(name.OK, nrow(p)), p)
a <- data.frame(sp=rep(name.OK, nrow(a)), a)
names(p) <- names(a) <- c("sp", "x", "y")
suit.raster <- raster::raster(suit.file)
prob.raster <- suit.raster/1000
names(prob.raster) <- name.OK
prob.file <- paste(MSDM.dir, "/", name.OK, ".tif", sep="")
raster::writeRaster(prob.raster, filename=prob.file, overwrite=TRUE)
cat(paste("created file: ", prob.file, "\n", sep=""))
MSDM.script <- paste("MSDM_Posteriori(records=M.out$records, absences=M.out$absences, ", "x='x', y='y', sp='sp',", "dirraster='", MSDM.dir, "', dirsave='", MSDM.dir, "', ", "method='", method, "', buffer='", buffer, "')", sep="")
line1 <- paste("MSDM_Posteriori(records=M.out$records, absences=M.out$absences,", sep="")
line2 <- paste("x='x', y='y', sp='sp',", sep="")
line3 <- paste("dirraster='", MSDM.dir, "', dirsave='", MSDM.dir, "',", sep="")
line4 <- paste("method='", method, "', buffer='", buffer, "')", sep="")
return(list(records=p, absences=a, script=MSDM.script,
line1=line1, line2=line2, line3=line3, line4=line4))
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.chull.R
|
`ensemble.concave.hull` <- function(
baseline.data,
change.data,
complete.cases=TRUE,
VIF=TRUE, VIF.max=20, VIF.silent=TRUE,
method=c("rda", "pca", "prcomp"),
ax1=1, ax2=2,
concavity=2.5,
buffer.dist=NA,
ggplot=TRUE
)
{
if (complete.cases==TRUE) {
baseline.data <- baseline.data[complete.cases(baseline.data), ]
change.data <- change.data[complete.cases(change.data), ]
}
comm1 <- rbind(baseline.data, change.data)
if (VIF==TRUE) {
VIF.vars <- BiodiversityR::ensemble.VIF.dataframe(baseline.data,
VIF.max=VIF.max,
car=FALSE,
silent=VIF.silent)$vars.included
cat(paste("VIF selection:", "\n"))
print(VIF.vars)
comm1 <- comm1[, VIF.vars]
}
env1 <- data.frame(climate=c(rep("baseline", nrow(baseline.data)),
rep("change", nrow(change.data))))
env1$climate <- factor(env1$climate)
method <- method[1]
if (method == "rda") {
rda1 <- vegan::rda(comm1 ~ climate, data=env1, scale=TRUE)
envfit.result <- envfit(rda1, env=comm1, choices=c(ax1, ax2))
envfit.df <- data.frame(variable = names(envfit.result$vectors$r),
R2 = envfit.result$vectors$r,
pvals =envfit.result$vectors$pvals)
envfit.df <- envfit.df[envfit.df$pvals <= 0.05, ]
envfit.df <- envfit.df[envfit.df$R2 >= 0.50, ]
envfit.vars <- envfit.df$variable
cat(paste("Variables selected via envfit:", "\n"))
print(envfit.vars)
comm2 <- comm1[, which(names(comm1) %in% envfit.vars)]
rda1 <- vegan::rda(comm2, scale=TRUE)
rda1 <- BiodiversityR::caprescale(rda1)
}
if (method == "pca") {
rda1 <- vegan::rda(comm1, scale=TRUE)
rda1 <- BiodiversityR::caprescale(rda1)
}
if (method == "prcomp") {
rda1 <- stats::prcomp(comm1, center=TRUE, scale.=TRUE)
}
rda1.s <- scores(rda1, choices=c(ax1, ax2),
scaling="sites", display="sites")
rda1.b <- as.matrix(rda1.s[env1$climate == "baseline", ])
rda1.c <- as.matrix(rda1.s[env1$climate == "change", ])
baseline.hull <- concaveman::concaveman(rda1.b, concavity=concavity)
baseline.hull <- sf::st_make_valid(sf::st_polygon(list(baseline.hull)))
baseline.area <- sf::st_area(baseline.hull)
change.hull <- concaveman::concaveman(rda1.c, concavity=concavity)
change.hull <- sf::st_make_valid(sf::st_polygon(list(change.hull)))
change.area <- sf::st_area(change.hull)
novel.hull <- sf::st_difference(change.hull, baseline.hull)
novel.area <- sf::st_area(novel.hull)
rda1.base <- data.frame(rda1.b)
names(rda1.base) <- c("X", "Y")
rda1.df <- data.frame(rda1.c)
names(rda1.df) <- c("X", "Y")
rda1.sf <- sf::st_as_sf(rda1.df, coords=c("X", "Y"))
intersect.hull <- sf::st_intersection(change.hull, baseline.hull)
intersect.area <- sf::st_area(intersect.hull)
if (is.na(buffer.dist) == TRUE) {
range.x <- (max(rda1.df$X)-min(rda1.df$X))/10000
range.y <- (max(rda1.df$Y)-min(rda1.df$Y))/10000
buffer.dist <- max(c(range.x, range.y))
}
intersect.buffer <- sf::st_buffer(intersect.hull,
dist=buffer.dist)
rda1.novel <- sf::st_intersects(rda1.sf, intersect.buffer, sparse=FALSE)
# rda1.novel <- st_contains_properly(novel.hull, rda1.sf, sparse=FALSE)
rda1.df$novel <- as.numeric(rda1.novel) == 0
out1 <- list(rda.object=rda1,
method=method,
baseline.hull=baseline.hull,
baseline.area=baseline.area,
change.hull=change.hull,
change.area=change.area,
overlap.hull=intersect.hull,
overlap.area=intersect.area,
novel.hull=novel.hull,
novel.area=novel.area,
buffer.dist=buffer.dist,
change.points=rda1.df,
baseline.points=rda1.base)
if (ggplot==TRUE) {
ggplot.out <- ggplot() +
ggplot2::geom_sf(data=baseline.hull,
colour="green", fill=ggplot2::alpha("green", 0.4), size=0.7) +
ggplot2::geom_sf(data=change.hull,
colour="blue", fill=ggplot2::alpha("blue", 0.4), size=0.7) +
ggplot2::geom_sf(data=novel.hull,
colour="orange", fill=NA, size=0.7)
if (nrow(rda1.df[rda1.df$novel == FALSE, ]) > 0) {
points.in <- sf::st_as_sf(rda1.df[rda1.df$novel == FALSE, ],
coords=c("X", "Y"))
ggplot.out <- ggplot.out +
ggplot2::geom_sf(data=points.in,
colour="black", size=1.0)
}else{
cat(paste("Note: No future points inside the baseline hull", "\n"))
}
if (nrow(rda1.df[rda1.df$novel == TRUE, ]) > 0) {
points.out <- sf::st_as_sf(rda1.df[rda1.df$novel == TRUE, ],
coords=c("X", "Y"))
ggplot.out <- ggplot.out +
ggplot2::geom_sf(data=points.out,
colour="red", size=1.0)
}else{
cat(paste("Note: No future points outside the baseline hull", "\n"))
}
out1 <- as.list(c(out1, list(ggplot.out=ggplot.out)))
}
return(out1)
}
`ensemble.concave.venn` <- function(
x,
candidate.data,
concavity=x$concavity,
buffer.dist=x$buffer.dist,
ggplot=TRUE,
show.candidate.points=TRUE
)
{
if (x$method == "rda" || x$method == "pca"){
rda2.c2 <- predict(x$rda.object, newdata=candidate.data,
scaling="sites", type="wa", model="CA")
rda2.c <- rda2.c2[, c(1:2)]
}
if (x$method == "prcomp"){
rda2.c1 <- predict(x$rda.object, newdata=candidate.data,
center=TRUE, .scale=TRUE)
rda2.c <- scores(rda2.c1[, c(1:2), drop=FALSE])
}
rda2.c <- rda2.c[complete.cases(rda2.c), ]
rda2.c <- as.matrix(rda2.c)
cand.hull <- concaveman::concaveman(rda2.c, concavity=concavity)
cand.hull <- sf::st_make_valid(sf::st_polygon(list(cand.hull)))
cand.area <- sf::st_area(cand.hull)
cand.buffer <- sf::st_buffer(cand.hull,
dist=buffer.dist)
rda1.df <- x$change.points
rda1.sf <- sf::st_as_sf(rda1.df, coords=c("X", "Y"))
rda1.candidate.in <- sf::st_intersects(rda1.sf, cand.buffer, sparse=FALSE)
# rda1.novel <- st_contains_properly(novel.hull, rda1.sf, sparse=FALSE)
rda1.df$candidate.in <- as.numeric(rda1.candidate.in) == 1
rda2.df <- data.frame(rda2.c)
names(rda2.df) <- c("X", "Y")
out1 <- list(change.points=rda1.df,
candidate.points=rda2.df,
candidate.hull=cand.hull,
candidate.area=cand.area)
if (ggplot==TRUE) {
baseline.hull <- x$baseline.hull
change.hull <- x$change.hull
ggplot.out <- ggplot() +
ggplot2::geom_sf(data=baseline.hull,
colour="green", fill=ggplot2::alpha("green", 0.4), size=0.7) +
ggplot2::geom_sf(data=change.hull,
colour="blue", fill=ggplot2::alpha("blue", 0.4), size=0.7) +
ggplot2::geom_sf(data=cand.hull,
colour="orange", fill=NA, size=0.7)
rda1.fut <- rda1.df[rda1.df$novel == TRUE, ]
if (nrow(rda1.fut[rda1.fut$candidate.in == TRUE, ]) > 0) {
points.in <- sf::st_as_sf(rda1.fut[rda1.fut$candidate.in == TRUE, ],
coords=c("X", "Y"))
ggplot.out <- ggplot.out +
ggplot2::geom_sf(data=points.in,
colour="black", size=1.0)
}else{
cat(paste("Note: No future points inside the candidate hull", "\n"))
}
if (nrow(rda1.fut[rda1.fut$candidate.in == FALSE, ]) > 0) {
points.out <- sf::st_as_sf(rda1.fut[rda1.fut$candidate.in == FALSE, ],
coords=c("X", "Y"))
ggplot.out <- ggplot.out +
ggplot2::geom_sf(data=points.out,
colour="red", size=1.0)
}else{
cat(paste("Note: No future points outside the candidate hull", "\n"))
}
if (show.candidate.points == TRUE) {
rda2.sf <- sf::st_as_sf(rda2.df, coords=c("X", "Y"))
ggplot.out <- ggplot.out +
ggplot2::geom_sf(data=rda2.sf,
colour="orange", size=1.0)
}
out1 <- as.list(c(out1, list(ggplot.out=ggplot.out)))
}
return(out1)
}
`ensemble.concave.union` <- function(
x,
candidate.venns,
buffer.dist=x$buffer.dist,
ggplot=TRUE,
show.candidate.points=TRUE
)
{
cand.hulls <- vector("list", length(candidate.venns))
for (i in 1:length(candidate.venns)) {
cand.hulls[[i]] <- candidate.venns[[i]]$candidate.hull
}
cand.hulls <- sf::st_make_valid(sf::st_sfc(cand.hulls))
cand.hull <- sf::st_make_valid(sf::st_union(cand.hulls, by_feature=FALSE))
cand.area <- sf::st_area(cand.hull)
cand.buffer <- sf::st_buffer(cand.hull,
dist=buffer.dist)
rda1.df <- x$change.points
rda1.sf <- sf::st_as_sf(rda1.df, coords=c("X", "Y"))
rda1.candidate.in <- sf::st_intersects(rda1.sf, cand.buffer, sparse=FALSE)
rda1.df$candidate.in <- as.numeric(rda1.candidate.in) == 1
out1 <- list(change.points=rda1.df,
candidate.hull=cand.hull,
candidate.area=cand.area)
if (ggplot==TRUE) {
baseline.hull <- x$baseline.hull
change.hull <- x$change.hull
ggplot.out <- ggplot() +
ggplot2::geom_sf(data=baseline.hull,
colour="green", fill=ggplot2::alpha("green", 0.4)) +
ggplot2::geom_sf(data=change.hull,
colour="blue", fill=ggplot2::alpha("blue", 0.4)) +
ggplot2::geom_sf(data=cand.hull,
colour="orange", fill=NA, size=0.7)
rda1.fut <- rda1.df[rda1.df$novel == TRUE, ]
if (nrow(rda1.fut[rda1.fut$candidate.in == TRUE, ]) > 0) {
points.in <- sf::st_as_sf(rda1.fut[rda1.fut$candidate.in == TRUE, ],
coords=c("X", "Y"))
ggplot.out <- ggplot.out +
ggplot2::geom_sf(data=points.in,
colour="black", size=1.0)
}else{
cat(paste("Note: No future points inside the candidate hull", "\n"))
}
if (nrow(rda1.fut[rda1.fut$candidate.in == FALSE, ]) > 0) {
points.out <- sf::st_as_sf(rda1.fut[rda1.fut$candidate.in == FALSE, ],
coords=c("X", "Y"))
ggplot.out <- ggplot.out +
ggplot2::geom_sf(data=points.out,
colour="red", size=1.0)
}else{
cat(paste("Note: No future points outside the candidate hull", "\n"))
}
if (show.candidate.points == TRUE) {
rda2.df <- candidate.venns[[1]]$candidate.points
for (i in 2:length(candidate.venns)) {
rda2.df <- rbind(rda2.df, candidate.venns[[i]]$candidate.points)
}
rda2.sf <- sf::st_as_sf(rda2.df, coords=c("X", "Y"))
ggplot.out <- ggplot.out +
ggplot2::geom_sf(data=rda2.sf,
colour="orange", size=1.0)
}
out1 <- as.list(c(out1, list(ggplot.out=ggplot.out)))
}
return(out1)
}
ensemble.outliers <- function(x,
ID.var=NULL, bioc.vars=NULL,
fence.k=2.5, n_min=5)
{
y <- x
y$count <- rep(0, nrow(y))
if (is.null(ID.var)) {
y$ID <- row.names(y)
ID.var <- "ID"
}else{
x <- x[, which(names(x) != ID.var), drop=FALSE]
}
if (is.null(bioc.vars) == FALSE) {x <- x[, bioc.vars, drop=FALSE]}
for (i in 1:ncol(x)) {
iqr <- IQR(x[, i], na.rm=TRUE)
lb <- summary(x[, i], na.rm=TRUE)[2] - iqr * fence.k
ub <- summary(x[, i], na.rm=TRUE)[5] + iqr * fence.k
# error in the function when only one variable
# outl <- as.numeric(x[, i] <= lb | x[, i] >= ub)
outl <- as.numeric(x[, i] < lb | x[, i] > ub)
y$count <- y$count + outl
}
y$outlier <- y$count >= n_min
return(y[, c(ID.var, "count", "outlier")])
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.concave.R
|
`ensemble.drop1` <- function(
x=NULL, p=NULL,
a=NULL, an=1000, excludep=FALSE, target.groups=FALSE,
k=0, pt=NULL, at=NULL, SSB.reduce=FALSE, CIRCLES.d=100000,
TrainData=NULL, TestData=NULL,
VIF=FALSE, COR=FALSE,
SINK=FALSE, species.name="Species001",
difference=FALSE, variables.alone=FALSE,
ENSEMBLE.tune=FALSE,
ENSEMBLE.best=0, ENSEMBLE.min=0.7, ENSEMBLE.exponent=1,
input.weights=NULL,
MAXENT=1, MAXNET=1, MAXLIKE=1, GBM=1, GBMSTEP=0, RF=1, CF=1,
GLM=1, GLMSTEP=1, GAM=1, GAMSTEP=1, MGCV=1,
MGCVFIX=0, EARTH=1, RPART=1, NNET=1, FDA=1, SVM=1, SVME=1, GLMNET=1,
BIOCLIM.O=0, BIOCLIM=1, DOMAIN=1, MAHAL=1, MAHAL01=1,
PROBIT=FALSE,
Yweights="BIOMOD",
layer.drops=NULL, factors=NULL, dummy.vars=NULL,
maxit=100,
MAXENT.a=NULL, MAXENT.an=10000,
MAXENT.path=paste(getwd(), "/models/maxent_", species.name, sep=""),
MAXNET.classes="default", MAXNET.clamp=FALSE, MAXNET.type="cloglog",
MAXLIKE.method="BFGS",
GBM.n.trees=2001,
GBMSTEP.tree.complexity=5, GBMSTEP.learning.rate=0.005,
GBMSTEP.bag.fraction=0.5, GBMSTEP.step.size=100,
RF.ntree=751,
CF.ntree=751,
GLM.family=binomial(link="logit"),
GLMSTEP.steps=1000, GLMSTEP.scope=NULL, GLMSTEP.k=2,
GAM.family=binomial(link="logit"),
GAMSTEP.steps=1000, GAMSTEP.scope=NULL, GAMSTEP.pos=1,
MGCV.select=FALSE,
EARTH.glm=list(family=binomial(link="logit"), maxit=maxit),
RPART.xval=50,
NNET.size=8, NNET.decay=0.01,
GLMNET.nlambda=100, GLMNET.class=FALSE,
BIOCLIM.O.fraction=0.9,
MAHAL.shape=1
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if (MAXLIKE > 0) {
cat(paste("\n", "WARNING: MAXLIKE algorithm will not be implemented as MAXLIKE does not accept (new) data.frames as input", "\n", sep = ""))
MAXLIKE <- 0
}
if (is.null(layer.drops) == F) {
layer.drops <- as.character(layer.drops)
if (is.null(x)==F) {x <- raster::dropLayer(x, which(names(x) %in% layer.drops))}
x <- raster::stack(x)
factors <- as.character(factors)
dummy.vars <- as.character(dummy.vars)
nd <- length(layer.drops)
for (i in 1:nd) {
if (is.null(factors) == F) {
factors <- factors[factors != layer.drops[i]]
if(length(factors) == 0) {factors <- NULL}
}
if (is.null(dummy.vars) == F) {
dummy.vars <- dummy.vars[dummy.vars != layer.drops[i]]
if(length(dummy.vars) == 0) {dummy.vars <- NULL}
}
}
if(length(layer.drops) == 0) {layer.drops <- NULL}
}
# create output file
dir.create("outputs", showWarnings = F)
paste.file <- paste(getwd(), "/outputs/", species.name, "_output.txt", sep="")
OLD.SINK <- TRUE
if (sink.number(type="output") == 0) {OLD.SINK <- F}
if (SINK==T && OLD.SINK==F) {
if (file.exists(paste.file) == F) {
cat(paste("\n", "NOTE: results captured in file: ", paste.file, "\n", sep = ""))
}else{
cat(paste("\n", "NOTE: results appended in file: ", paste.file, "\n", sep = ""))
}
cat(paste("\n\n", "RESULTS (ensemble.drop1 function)", "\n\n", sep=""), file=paste.file, append=T)
sink(file=paste.file, append=T)
cat(paste(date(), "\n", sep=""))
print(match.call())
}
# estimate deviance
loglik.calculation <- function(obs=NULL, preds=NULL) {
preds[preds < 0.0000000001] <- 0.0000000001
preds[preds > 0.9999999999] <- 0.9999999999
out <- dismo::calc.deviance(obs=obs, pred=preds, calc.mean=F)
return(out)
}
#
# first fit with all variables
if (raster::nlayers(x) == 0) {
# MAXENT needs x to make MAXENT.TrainData, MAXLIKE can only be calibrated with x
MAXENT <- 0
MAXLIKE <- 0
}
cat(paste("\n\n", "RESULTS WITH ALL VARIABLES", "\n\n", sep=""))
tests <- ensemble.calibrate.models(x=x,
p=p, a=a, an=an, excludep=excludep, target.groups=target.groups,
k=k, pt=pt, at=at, SSB.reduce=SSB.reduce, CIRCLES.d=CIRCLES.d,
TrainData=NULL, TestData=NULL,
PLOTS=FALSE, evaluations.keep=T, models.keep=F,
VIF=VIF, COR=COR,
formulae.defaults=T, maxit=maxit,
ENSEMBLE.tune=ENSEMBLE.tune,
ENSEMBLE.best=ENSEMBLE.best, ENSEMBLE.min=ENSEMBLE.min,
ENSEMBLE.exponent=ENSEMBLE.exponent,
input.weights=input.weights,
MAXENT=MAXENT, MAXNET=MAXNET, MAXLIKE=MAXLIKE, GBM=GBM, GBMSTEP=GBMSTEP, RF=RF, CF=CF,
GLM=GLM, GLMSTEP=GLMSTEP,
GAM=GAM, GAMSTEP=GAMSTEP, MGCV=MGCV, MGCVFIX=MGCVFIX, EARTH=EARTH, RPART=RPART,
NNET=NNET, FDA=FDA, SVM=SVM, SVME=SVME, GLMNET=GLMNET,
BIOCLIM.O=BIOCLIM.O, BIOCLIM=BIOCLIM, DOMAIN=DOMAIN,
MAHAL=MAHAL, MAHAL01=MAHAL01,
PROBIT=PROBIT,
Yweights=Yweights,
layer.drops=layer.drops, factors=factors, dummy.vars=dummy.vars,
MAXENT.a=MAXENT.a, MAXENT.an=MAXENT.an, MAXENT.path=MAXENT.path,
MAXNET.classes=MAXNET.classes, MAXNET.clamp=MAXNET.clamp, MAXNET.type=MAXNET.type,
MAXLIKE.formula=NULL, MAXLIKE.method=MAXLIKE.method,
GBM.formula=NULL, GBM.n.trees=GBM.n.trees,
GBMSTEP.tree.complexity=GBMSTEP.tree.complexity,
GBMSTEP.learning.rate=GBMSTEP.learning.rate, GBMSTEP.bag.fraction=GBMSTEP.bag.fraction,
GBMSTEP.step.size=GBMSTEP.step.size,
RF.formula=NULL, RF.ntree=RF.ntree,
CF.formula=NULL, CF.ntree=CF.ntree,
GLM.formula=NULL, GLM.family=GLM.family,
GLMSTEP.k=GLMSTEP.k, GLMSTEP.steps=GLMSTEP.steps, STEP.formula=NULL, GLMSTEP.scope=NULL,
GAM.formula=NULL, GAM.family=GAM.family,
GAMSTEP.steps=GAMSTEP.steps, GAMSTEP.scope=NULL, GAMSTEP.pos=GAMSTEP.pos,
MGCV.formula=NULL, MGCV.select=MGCV.select,
MGCVFIX.formula=NULL,
EARTH.formula=NULL, EARTH.glm=EARTH.glm,
RPART.formula=NULL, RPART.xval=RPART.xval,
NNET.formula=NULL, NNET.size=NNET.size, NNET.decay=NNET.decay,
FDA.formula=NULL, SVM.formula=NULL, SVME.formula=NULL,
GLMNET.nlambda=GLMNET.nlambda, GLMNET.class=GLMNET.class,
BIOCLIM.O.fraction=BIOCLIM.O.fraction,
MAHAL.shape=MAHAL.shape)
# use output to get names of the variables
var.names <- tests$evaluations$var.names
nv <- length(var.names)
# get locations for MAXLIKE
p1 <- tests$evaluations$p
p2 <- tests$evaluations$pt
a1 <- tests$evaluations$a
a2 <- tests$evaluations$at
model.names <- c("MAXENT", "MAXNET", "MAXLIKE", "GBM", "GBMSTEP", "RF", "CF",
"GLM", "GLMSTEP", "GAM", "GAMSTEP", "MGCV",
"MGCVFIX", "EARTH", "RPART", "NNET", "FDA", "SVM", "SVME", "GLMNET",
"BIOCLIM.O", "BIOCLIM", "DOMAIN", "MAHAL", "MAHAL01", "ENSEMBLE")
output.C <- array(NA, dim=c(length(model.names), nv+1))
rownames(output.C) <- model.names
colnames(output.C) <- c("all_vars", paste("without_", var.names, sep=""))
output.T <- array(NA, dim=c(length(model.names), nv+1))
rownames(output.T) <- model.names
colnames(output.T) <- c("all_vars", paste("without_", var.names, sep=""))
output.LLC <- array(NA, dim=c(length(model.names), nv+1))
rownames(output.LLC) <- model.names
colnames(output.LLC) <- c("all_vars", paste("without_", var.names, sep=""))
output.LLT <- array(NA, dim=c(length(model.names), nv+1))
rownames(output.LLT) <- model.names
colnames(output.LLT) <- c("all_vars", paste("without_", var.names, sep=""))
if(is.null(tests$evaluations$MAXENT.C)==F) {output.C["MAXENT",1] <- tests$evaluations$MAXENT.C@auc}
if(is.null(tests$evaluations$MAXENT.T)==F) {output.T["MAXENT",1] <- tests$evaluations$MAXENT.T@auc}
if(sum(tests$evaluations$TrainData$MAXENT) > 0) {output.LLC["MAXENT",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MAXENT)}
if(sum(tests$evaluations$TestData$MAXENT) > 0) {output.LLT["MAXENT",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MAXENT)}
if(is.null(tests$evaluations$MAXNET.C)==F) {output.C["MAXNET",1] <- tests$evaluations$MAXNET.C@auc}
if(is.null(tests$evaluations$MAXNET.T)==F) {output.T["MAXNET",1] <- tests$evaluations$MAXNET.T@auc}
if(sum(tests$evaluations$TrainData$MAXNET) > 0) {output.LLC["MAXNET",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MAXNET)}
if(sum(tests$evaluations$TestData$MAXNET) > 0) {output.LLT["MAXNET",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MAXNET)}
if(is.null(tests$evaluations$MAXLIKE.C)==F) {output.C["MAXLIKE",1] <- tests$evaluations$MAXLIKE.C@auc}
if(is.null(tests$evaluations$MAXLIKE.T)==F) {output.T["MAXLIKE",1] <- tests$evaluations$MAXLIKE.T@auc}
if(sum(tests$evaluations$TrainData$MAXLIKE) > 0) {output.LLC["MAXLIKE",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MAXLIKE)}
if(sum(tests$evaluations$TestData$MAXLIKE) > 0) {output.LLT["MAXLIKE",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MAXLIKE)}
if(is.null(tests$evaluations$GBM.C)==F) {output.C["GBM",1] <- tests$evaluations$GBM.C@auc}
if(is.null(tests$evaluations$GBM.T)==F) {output.T["GBM",1] <- tests$evaluations$GBM.T@auc}
if(sum(tests$evaluations$TrainData$GBM) > 0) {output.LLC["GBM",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GBM)}
if(sum(tests$evaluations$TestData$GBM) > 0) {output.LLT["GBM",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GBM)}
if(is.null(tests$evaluations$GBMSTEP.C)==F) {output.C["GBMSTEP",1] <- tests$evaluations$GBMSTEP.C@auc}
if(is.null(tests$evaluations$GBMSTEP.T)==F) {output.T["GBMSTEP",1] <- tests$evaluations$GBMSTEP.T@auc}
if(sum(tests$evaluations$TrainData$GBMSTEP) > 0) {output.LLC["GBMSTEP",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GBMSTEP)}
if(sum(tests$evaluations$TestData$GBMSTEP) > 0) {output.LLT["GBMSTEP",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GBMSTEP)}
if(is.null(tests$evaluations$RF.C)==F) {output.C["RF",1] <- tests$evaluations$RF.C@auc}
if(is.null(tests$evaluations$RF.T)==F) {output.T["RF",1] <- tests$evaluations$RF.T@auc}
if(sum(tests$evaluations$TrainData$RF) > 0) {output.LLC["RF",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$RF)}
if(sum(tests$evaluations$TestData$RF) > 0) {output.LLT["RF",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$RF)}
if(is.null(tests$evaluations$CF.C)==F) {output.C["CF",1] <- tests$evaluations$CF.C@auc}
if(is.null(tests$evaluations$CF.T)==F) {output.T["CF",1] <- tests$evaluations$CF.T@auc}
if(sum(tests$evaluations$TrainData$CF) > 0) {output.LLC["CF",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$CF)}
if(sum(tests$evaluations$TestData$CF) > 0) {output.LLT["CF",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$CF)}
if(is.null(tests$evaluations$GLM.C)==F) {output.C["GLM",1] <- tests$evaluations$GLM.C@auc}
if(is.null(tests$evaluations$GLM.T)==F) {output.T["GLM",1] <- tests$evaluations$GLM.T@auc}
if(sum(tests$evaluations$TrainData$GLM) > 0) {output.LLC["GLM",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GLM)}
if(sum(tests$evaluations$TestData$GLM) > 0) {output.LLT["GLM",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GLM)}
if(is.null(tests$evaluations$GLMS.C)==F) {output.C["GLMSTEP",1] <- tests$evaluations$GLMS.C@auc}
if(is.null(tests$evaluations$GLMS.T)==F) {output.T["GLMSTEP",1] <- tests$evaluations$GLMS.T@auc}
if(sum(tests$evaluations$TrainData$GLMSTEP) > 0) {output.LLC["GLMSTEP",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GLMSTEP)}
if(sum(tests$evaluations$TestData$GLMSTEP) > 0) {output.LLT["GLMSTEP",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GLMSTEP)}
if(is.null(tests$evaluations$GAM.C)==F) {output.C["GAM",1] <- tests$evaluations$GAM.C@auc}
if(is.null(tests$evaluations$GAM.T)==F) {output.T["GAM",1] <- tests$evaluations$GAM.T@auc}
if(sum(tests$evaluations$TrainData$GAM) > 0) {output.LLC["GAM",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GAM)}
if(sum(tests$evaluations$TestData$GAM) > 0) {output.LLT["GAM",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GAM)}
if(is.null(tests$evaluations$GAMS.C)==F) {output.C["GAMSTEP",1] <- tests$evaluations$GAMS.C@auc}
if(is.null(tests$evaluations$GAMS.T)==F) {output.T["GAMSTEP",1] <- tests$evaluations$GAMS.T@auc}
if(sum(tests$evaluations$TrainData$GAMSTEP) > 0) {output.LLC["GAMSTEP",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GAMSTEP)}
if(sum(tests$evaluations$TestData$GAMSTEP) > 0) {output.LLT["GAMSTEP",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GAMSTEP)}
if(is.null(tests$evaluations$MGCV.C)==F) {output.C["MGCV",1] <- tests$evaluations$MGCV.C@auc}
if(is.null(tests$evaluations$MGCV.T)==F) {output.T["MGCV",1] <- tests$evaluations$MGCV.T@auc}
if(sum(tests$evaluations$TrainData$MGCV) > 0) {output.LLC["MGCV",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MGCV)}
if(sum(tests$evaluations$TestData$MGCV) > 0) {output.LLT["MGCV",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MGCV)}
if(is.null(tests$evaluations$MGCVF.C)==F) {output.C["MGCVFIX",1] <- tests$evaluations$MGCVF.C@auc}
if(is.null(tests$evaluations$MGCVF.T)==F) {output.T["MGCVFIX",1] <- tests$evaluations$MGCVF.T@auc}
if(sum(tests$evaluations$TrainData$MGCVFIX) > 0) {output.LLC["MGCVFIX",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MGCVFIX)}
if(sum(tests$evaluations$TestData$MGCVFIX) > 0) {output.LLT["MGCVFIX",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MGCVFIX)}
if(is.null(tests$evaluations$EARTH.C)==F) {output.C["EARTH",1] <- tests$evaluations$EARTH.C@auc}
if(is.null(tests$evaluations$EARTH.T)==F) {output.T["EARTH",1] <- tests$evaluations$EARTH.T@auc}
if(sum(tests$evaluations$TrainData$EARTH) > 0) {output.LLC["EARTH",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$EARTH)}
if(sum(tests$evaluations$TestData$EARTH) > 0) {output.LLT["EARTH",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$EARTH)}
if(is.null(tests$evaluations$RPART.C)==F) {output.C["RPART",1] <- tests$evaluations$RPART.C@auc}
if(is.null(tests$evaluations$RPART.T)==F) {output.T["RPART",1] <- tests$evaluations$RPART.T@auc}
if(sum(tests$evaluations$TrainData$RPART) > 0) {output.LLC["RPART",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$RPART)}
if(sum(tests$evaluations$TestData$RPART) > 0) {output.LLT["RPART",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$RPART)}
if(is.null(tests$evaluations$NNET.C)==F) {output.C["NNET",1] <- tests$evaluations$NNET.C@auc}
if(is.null(tests$evaluations$NNET.T)==F) {output.T["NNET",1] <- tests$evaluations$NNET.T@auc}
if(sum(tests$evaluations$TrainData$NNET) > 0) {output.LLC["NNET",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$NNET)}
if(sum(tests$evaluations$TestData$NNET) > 0) {output.LLT["NNET",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$NNET)}
if(is.null(tests$evaluations$FDA.C)==F) {output.C["FDA",1] <- tests$evaluations$FDA.C@auc}
if(is.null(tests$evaluations$FDA.T)==F) {output.T["FDA",1] <- tests$evaluations$FDA.T@auc}
if(sum(tests$evaluations$TrainData$FDA) > 0) {output.LLC["FDA",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$FDA)}
if(sum(tests$evaluations$TestData$FDA) > 0) {output.LLT["FDA",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$FDA)}
if(is.null(tests$evaluations$SVM.C)==F) {output.C["SVM",1] <- tests$evaluations$SVM.C@auc}
if(is.null(tests$evaluations$SVM.T)==F) {output.T["SVM",1] <- tests$evaluations$SVM.T@auc}
if(sum(tests$evaluations$TrainData$SVM) > 0) {output.LLC["SVM",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$SVM)}
if(sum(tests$evaluations$TestData$SVM) > 0) {output.LLT["SVM",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$SVM)}
if(is.null(tests$evaluations$SVME.C)==F) {output.C["SVME",1] <- tests$evaluations$SVME.C@auc}
if(is.null(tests$evaluations$SVME.T)==F) {output.T["SVME",1] <- tests$evaluations$SVME.T@auc}
if(sum(tests$evaluations$TrainData$SVME) > 0) {output.LLC["SVME",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$SVME)}
if(sum(tests$evaluations$TestData$SVME) > 0) {output.LLT["SVME",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$SVME)}
if(is.null(tests$evaluations$GLMNET.C)==F) {output.C["GLMNET",1] <- tests$evaluations$GLMNET.C@auc}
if(is.null(tests$evaluations$GLMNET.T)==F) {output.T["GLMNET",1] <- tests$evaluations$GLMNET.T@auc}
if(sum(tests$evaluations$TrainData$GLMNET) > 0) {output.LLC["GLMNET",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GLMNET)}
if(sum(tests$evaluations$TestData$GLMNET) > 0) {output.LLT["GLMNET",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GLMNET)}
if(is.null(tests$evaluations$BIOCLIM.O.C)==F) {output.C["BIOCLIM.O",1] <- tests$evaluations$BIOCLIM.O.C@auc}
if(is.null(tests$evaluations$BIOCLIM.O.T)==F) {output.T["BIOCLIM.O",1] <- tests$evaluations$BIOCLIM.O.T@auc}
if(sum(tests$evaluations$TrainData$BIOCLIM.O) > 0) {output.LLC["BIOCLIM.O",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$BIOCLIM.O)}
if(sum(tests$evaluations$TestData$BIOCLIM.O) > 0) {output.LLT["BIOCLIM.O",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$BIOCLIM.O)}
if(is.null(tests$evaluations$BIOCLIM.C)==F) {output.C["BIOCLIM",1] <- tests$evaluations$BIOCLIM.C@auc}
if(is.null(tests$evaluations$BIOCLIM.T)==F) {output.T["BIOCLIM",1] <- tests$evaluations$BIOCLIM.T@auc}
if(sum(tests$evaluations$TrainData$BIOCLIM) > 0) {output.LLC["BIOCLIM",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$BIOCLIM)}
if(sum(tests$evaluations$TestData$BIOCLIM) > 0) {output.LLT["BIOCLIM",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$BIOCLIM)}
if(is.null(tests$evaluations$DOMAIN.C)==F) {output.C["DOMAIN",1] <- tests$evaluations$DOMAIN.C@auc}
if(is.null(tests$evaluations$DOMAIN.T)==F) {output.T["DOMAIN",1] <- tests$evaluations$DOMAIN.T@auc}
if(sum(tests$evaluations$TrainData$DOMAIN) > 0) {output.LLC["DOMAIN",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$DOMAIN)}
if(sum(tests$evaluations$TestData$DOMAIN) > 0) {output.LLT["DOMAIN",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$DOMAIN)}
if(is.null(tests$evaluations$MAHAL.C)==F) {output.C["MAHAL",1] <- tests$evaluations$MAHAL.C@auc}
if(is.null(tests$evaluations$MAHAL.T)==F) {output.T["MAHAL",1] <- tests$evaluations$MAHAL.T@auc}
if(sum(tests$evaluations$TrainData$MAHAL) > 0) {output.LLC["MAHAL",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MAHAL)}
if(sum(tests$evaluations$TestData$MAHAL) > 0) {output.LLT["MAHAL",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MAHAL)}
if(is.null(tests$evaluations$MAHAL01.C)==F) {output.C["MAHAL01",1] <- tests$evaluations$MAHAL01.C@auc}
if(is.null(tests$evaluations$MAHAL01.T)==F) {output.T["MAHAL01",1] <- tests$evaluations$MAHAL01.T@auc}
if(sum(tests$evaluations$TrainData$MAHAL01) > 0) {output.LLC["MAHAL01",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MAHAL01)}
if(sum(tests$evaluations$TestData$MAHAL01) > 0) {output.LLT["MAHAL01",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MAHAL01)}
if(is.null(tests$evaluations$ENSEMBLE.C)==F) {output.C["ENSEMBLE",1] <- tests$evaluations$ENSEMBLE.C@auc}
if(is.null(tests$evaluations$ENSEMBLE.T)==F) {output.T["ENSEMBLE",1] <- tests$evaluations$ENSEMBLE.T@auc}
if(sum(tests$evaluations$TrainData$ENSEMBLE) > 0) {output.LLC["ENSEMBLE",1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$ENSEMBLE)}
if(sum(tests$evaluations$TestData$ENSEMBLE) > 0) {output.LLT["ENSEMBLE",1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$ENSEMBLE)}
# sequentially leave out the focal variable, then fit again
# the data sets are used from the "full" model
var.names2 <- c("pb", var.names)
TrainData1 <- tests$evaluations$TrainData
TrainData1 <- TrainData1[, which(names(TrainData1) %in% var.names2), drop=F]
glm1 <- glm(as.formula("pb~1"), data=TrainData1, family="binomial")
preval.dev.cal <- glm1$deviance
# calculate worst possible model (predict absence where present and vice versa)
numpres <- sum(TrainData1[, "pb"])
numabs <- nrow(TrainData1) - numpres
obs1 <- TrainData1[, "pb"]
pred1 <- rep(0.000000001, numpres)
pred2 <- rep(0.999999999, numabs)
pred3 <- c(pred1, pred2)
null.dev.cal <- loglik.calculation(obs=obs1, preds=pred3)
TestData1 <- tests$evaluations$TestData
TestData1 <- TestData1[, which(names(TestData1) %in% var.names2), drop=F]
glm2 <- glm(as.formula("pb~1"), data=TestData1, family="binomial")
preval.dev.test <- glm2$deviance
# calculate worst possible model (predict absence where present and vice versa)
numpres <- sum(TestData1[, "pb"])
numabs <- nrow(TestData1) - numpres
obs1 <- TestData1[, "pb"]
pred1 <- rep(0.000000001, numpres)
pred2 <- rep(0.999999999, numabs)
pred3 <- c(pred1, pred2)
null.dev.test <- loglik.calculation(obs=obs1, preds=pred3)
MAXENT.a <- tests$evaluations$MAXENT.a
for (i in 1:nv) {
var.f <- var.names[i]
cat(paste("\n", "2.", i, ". Leaving out variable: ", var.f, "\n\n", sep = ""))
TrainData2 <- TrainData1[, which(names(TrainData1) != var.f), drop=F]
TestData2 <- TestData1[, which(names(TestData1) != var.f), drop=F]
factors2 <- NULL
if (is.null(factors) == F) {
factors2 <- factors[which(factors != var.f)]
if (length(factors2) == 0) {factors2 <- NULL}
}
dummy.vars2 <- NULL
if (is.null(dummy.vars) == F) {
dummy.vars2 <- dummy.vars[which(dummy.vars != var.f)]
if (length(dummy.vars2) == 0) {dummy.vars2 <- NULL}
}
if (is.null(layer.drops) == T) {
layer.drops2 <- var.f
}else{
layer.drops2 <- c(layer.drops, var.f)
}
x2 <- raster::dropLayer(x, which(names(x) %in% layer.drops2))
x2 <- raster::stack(x2)
if (raster::nlayers(x2) == 0) {
MAXENT <- 0
MAXLIKE <- 0
}
tests <- ensemble.calibrate.models(x=x2,
p=p1, a=a1, an=an, excludep=excludep,
k=k, pt=p2, at=a2, SSB.reduce=FALSE, CIRCLES.d=CIRCLES.d,
TrainData=TrainData2, TestData=TestData2,
PLOTS=FALSE, evaluations.keep=T,
VIF=VIF, COR=COR,
formulae.defaults=T, maxit=maxit,
ENSEMBLE.tune=ENSEMBLE.tune,
ENSEMBLE.best=ENSEMBLE.best, ENSEMBLE.min=ENSEMBLE.min,
ENSEMBLE.exponent=ENSEMBLE.exponent,
input.weights=input.weights,
MAXENT=MAXENT, MAXNET=MAXNET, MAXLIKE=MAXLIKE, GBM=GBM, GBMSTEP=GBMSTEP, RF=RF, CF=CF,
GLM=GLM, GLMSTEP=GLMSTEP, GAM=GAM, GAMSTEP=GAMSTEP, MGCV=MGCV, MGCVFIX=MGCVFIX,
EARTH=EARTH, RPART=RPART, NNET=NNET, FDA=FDA, SVM=SVM, SVME=SVME, GLMNET=GLMNET,
BIOCLIM.O=BIOCLIM.O, BIOCLIM=BIOCLIM, DOMAIN=DOMAIN,
MAHAL=MAHAL, MAHAL01=MAHAL01,
PROBIT=PROBIT,
Yweights=Yweights,
layer.drops=layer.drops2, factors=factors2, dummy.vars=dummy.vars2,
MAXENT.a=MAXENT.a, MAXENT.path=MAXENT.path,
MAXNET.classes=MAXNET.classes, MAXNET.clamp=MAXNET.clamp, MAXNET.type=MAXNET.type,
MAXLIKE.formula=NULL, MAXLIKE.method=MAXLIKE.method,
GBM.formula=NULL, GBM.n.trees=GBM.n.trees,
GBMSTEP.tree.complexity=GBMSTEP.tree.complexity,
GBMSTEP.learning.rate=GBMSTEP.learning.rate, GBMSTEP.bag.fraction=GBMSTEP.bag.fraction,
GBMSTEP.step.size=GBMSTEP.step.size,
RF.formula=NULL, RF.ntree=RF.ntree,
CF.formula=NULL, CF.ntree=CF.ntree,
GLM.formula=NULL, GLM.family=GLM.family,
GLMSTEP.k=GLMSTEP.k, GLMSTEP.steps=GLMSTEP.steps, STEP.formula=NULL, GLMSTEP.scope=NULL,
GAM.formula=NULL, GAM.family=GAM.family,
GAMSTEP.steps=GAMSTEP.steps, GAMSTEP.scope=NULL, GAMSTEP.pos=GAMSTEP.pos,
MGCV.formula=NULL, MGCV.select=MGCV.select,
MGCVFIX.formula=NULL,
EARTH.formula=NULL, EARTH.glm=EARTH.glm,
RPART.formula=NULL, RPART.xval=RPART.xval,
NNET.formula=NULL, NNET.size=NNET.size, NNET.decay=NNET.decay,
FDA.formula=NULL, SVM.formula=NULL, SVME.formula=NULL,
GLMNET.nlambda=GLMNET.nlambda,
BIOCLIM.O.fraction=BIOCLIM.O.fraction,
MAHAL.shape=MAHAL.shape)
if(is.null(tests$evaluations$MAXENT.C)==F) {output.C["MAXENT",i+1] <- tests$evaluations$MAXENT.C@auc}
if(is.null(tests$evaluations$MAXENT.T)==F) {output.T["MAXENT",i+1] <- tests$evaluations$MAXENT.T@auc}
if(sum(tests$evaluations$TrainData$MAXENT) > 0) {output.LLC["MAXENT",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MAXENT)}
if(sum(tests$evaluations$TestData$MAXENT) > 0) {output.LLT["MAXENT",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MAXENT)}
if(is.null(tests$evaluations$MAXNET.C)==F) {output.C["MAXNET",i+1] <- tests$evaluations$MAXNET.C@auc}
if(is.null(tests$evaluations$MAXNET.T)==F) {output.T["MAXNET",i+1] <- tests$evaluations$MAXNET.T@auc}
if(sum(tests$evaluations$TrainData$MAXNET) > 0) {output.LLC["MAXNET",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MAXNET)}
if(sum(tests$evaluations$TestData$MAXNET) > 0) {output.LLT["MAXNET",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MAXNET)}
if(is.null(tests$evaluations$MAXLIKE.C)==F) {output.C["MAXLIKE",i+1] <- tests$evaluations$MAXLIKE.C@auc}
if(is.null(tests$evaluations$MAXLIKE.T)==F) {output.T["MAXLIKE",i+1] <- tests$evaluations$MAXLIKE.T@auc}
if(sum(tests$evaluations$TrainData$MAXLIKE) > 0) {output.LLC["MAXLIKE",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MAXLIKE)}
if(sum(tests$evaluations$TestData$MAXLIKE) > 0) {output.LLT["MAXLIKE",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MAXLIKE)}
if(is.null(tests$evaluations$GBM.C)==F) {output.C["GBM",i+1] <- tests$evaluations$GBM.C@auc}
if(is.null(tests$evaluations$GBM.T)==F) {output.T["GBM",i+1] <- tests$evaluations$GBM.T@auc}
if(sum(tests$evaluations$TrainData$GBM) > 0) {output.LLC["GBM",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GBM)}
if(sum(tests$evaluations$TestData$GBM) > 0) {output.LLT["GBM",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GBM)}
if(is.null(tests$evaluations$GBMSTEP.C)==F) {output.C["GBMSTEP",i+1] <- tests$evaluations$GBMSTEP.C@auc}
if(is.null(tests$evaluations$GBMSTEP.T)==F) {output.T["GBMSTEP",i+1] <- tests$evaluations$GBMSTEP.T@auc}
if(sum(tests$evaluations$TrainData$GBMSTEP) > 0) {output.LLC["GBMSTEP",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GBMSTEP)}
if(sum(tests$evaluations$TestData$GBMSTEP) > 0) {output.LLT["GBMSTEP",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GBMSTEP)}
if(is.null(tests$evaluations$RF.C)==F) {output.C["RF",i+1] <- tests$evaluations$RF.C@auc}
if(is.null(tests$evaluations$RF.T)==F) {output.T["RF",i+1] <- tests$evaluations$RF.T@auc}
if(sum(tests$evaluations$TrainData$RF) > 0) {output.LLC["RF",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$RF)}
if(sum(tests$evaluations$TestData$RF) > 0) {output.LLT["RF",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$RF)}
if(is.null(tests$evaluations$CF.C)==F) {output.C["CF",i+1] <- tests$evaluations$CF.C@auc}
if(is.null(tests$evaluations$CF.T)==F) {output.T["CF",i+1] <- tests$evaluations$CF.T@auc}
if(sum(tests$evaluations$TrainData$CF) > 0) {output.LLC["CF",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$CF)}
if(sum(tests$evaluations$TestData$CF) > 0) {output.LLT["CF",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$CF)}
if(is.null(tests$evaluations$GLM.C)==F) {output.C["GLM",i+1] <- tests$evaluations$GLM.C@auc}
if(is.null(tests$evaluations$GLM.T)==F) {output.T["GLM",i+1] <- tests$evaluations$GLM.T@auc}
if(sum(tests$evaluations$TrainData$GLM) > 0) {output.LLC["GLM",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GLM)}
if(sum(tests$evaluations$TestData$GLM) > 0) {output.LLT["GLM",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GLM)}
if(is.null(tests$evaluations$GLMS.C)==F) {output.C["GLMSTEP",i+1] <- tests$evaluations$GLMS.C@auc}
if(is.null(tests$evaluations$GLMS.T)==F) {output.T["GLMSTEP",i+1] <- tests$evaluations$GLMS.T@auc}
if(sum(tests$evaluations$TrainData$GLMSTEP) > 0) {output.LLC["GLMSTEP",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GLMSTEP)}
if(sum(tests$evaluations$TestData$GLMSTEP) > 0) {output.LLT["GLMSTEP",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GLMSTEP)}
if(is.null(tests$evaluations$GAM.C)==F) {output.C["GAM",i+1] <- tests$evaluations$GAM.C@auc}
if(is.null(tests$evaluations$GAM.T)==F) {output.T["GAM",i+1] <- tests$evaluations$GAM.T@auc}
if(sum(tests$evaluations$TrainData$GAM) > 0) {output.LLC["GAM",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GAM)}
if(sum(tests$evaluations$TestData$GAM) > 0) {output.LLT["GAM",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GAM)}
if(is.null(tests$evaluations$GAMS.C)==F) {output.C["GAMSTEP",i+1] <- tests$evaluations$GAMS.C@auc}
if(is.null(tests$evaluations$GAMS.T)==F) {output.T["GAMSTEP",i+1] <- tests$evaluations$GAMS.T@auc}
if(sum(tests$evaluations$TrainData$GAMSTEP) > 0) {output.LLC["GAMSTEP",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GAMSTEP)}
if(sum(tests$evaluations$TestData$GAMSTEP) > 0) {output.LLT["GAMSTEP",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GAMSTEP)}
if(is.null(tests$evaluations$MGCV.C)==F) {output.C["MGCV",i+1] <- tests$evaluations$MGCV.C@auc}
if(is.null(tests$evaluations$MGCV.T)==F) {output.T["MGCV",i+1] <- tests$evaluations$MGCV.T@auc}
if(sum(tests$evaluations$TrainData$MGCV) > 0) {output.LLC["MGCV",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MGCV)}
if(sum(tests$evaluations$TestData$MGCV) > 0) {output.LLT["MGCV",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MGCV)}
if(is.null(tests$evaluations$MGCVF.C)==F) {output.C["MGCVFIX",i+1] <- tests$evaluations$MGCVF.C@auc}
if(is.null(tests$evaluations$MGCVF.T)==F) {output.T["MGCVFIX",i+1] <- tests$evaluations$MGCVF.T@auc}
if(sum(tests$evaluations$TrainData$MGCVFIX) > 0) {output.LLC["MGCVFIX",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MGCVFIX)}
if(sum(tests$evaluations$TestData$MGCVFIX) > 0) {output.LLT["MGCVFIX",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MGCVFIX)}
if(is.null(tests$evaluations$EARTH.C)==F) {output.C["EARTH",i+1] <- tests$evaluations$EARTH.C@auc}
if(is.null(tests$evaluations$EARTH.T)==F) {output.T["EARTH",i+1] <- tests$evaluations$EARTH.T@auc}
if(sum(tests$evaluations$TrainData$EARTH) > 0) {output.LLC["EARTH",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$EARTH)}
if(sum(tests$evaluations$TestData$EARTH) > 0) {output.LLT["EARTH",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$EARTH)}
if(is.null(tests$evaluations$RPART.C)==F) {output.C["RPART",i+1] <- tests$evaluations$RPART.C@auc}
if(is.null(tests$evaluations$RPART.T)==F) {output.T["RPART",i+1] <- tests$evaluations$RPART.T@auc}
if(sum(tests$evaluations$TrainData$RPART) > 0) {output.LLC["RPART",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$RPART)}
if(sum(tests$evaluations$TestData$RPART) > 0) {output.LLT["RPART",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$RPART)}
if(is.null(tests$evaluations$NNET.C)==F) {output.C["NNET",i+1] <- tests$evaluations$NNET.C@auc}
if(is.null(tests$evaluations$NNET.T)==F) {output.T["NNET",i+1] <- tests$evaluations$NNET.T@auc}
if(sum(tests$evaluations$TrainData$NNET) > 0) {output.LLC["NNET",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$NNET)}
if(sum(tests$evaluations$TestData$NNET) > 0) {output.LLT["NNET",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$NNET)}
if(is.null(tests$evaluations$FDA.C)==F) {output.C["FDA",i+1] <- tests$evaluations$FDA.C@auc}
if(is.null(tests$evaluations$FDA.T)==F) {output.T["FDA",i+1] <- tests$evaluations$FDA.T@auc}
if(sum(tests$evaluations$TrainData$FDA) > 0) {output.LLC["FDA",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$FDA)}
if(sum(tests$evaluations$TestData$FDA) > 0) {output.LLT["FDA",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$FDA)}
if(is.null(tests$evaluations$SVM.C)==F) {output.C["SVM",i+1] <- tests$evaluations$SVM.C@auc}
if(is.null(tests$evaluations$SVM.T)==F) {output.T["SVM",i+1] <- tests$evaluations$SVM.T@auc}
if(sum(tests$evaluations$TrainData$SVM) > 0) {output.LLC["SVM",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$SVM)}
if(sum(tests$evaluations$TestData$SVM) > 0) {output.LLT["SVM",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$SVM)}
if(is.null(tests$evaluations$SVME.C)==F) {output.C["SVME",i+1] <- tests$evaluations$SVME.C@auc}
if(is.null(tests$evaluations$SVME.T)==F) {output.T["SVME",i+1] <- tests$evaluations$SVME.T@auc}
if(sum(tests$evaluations$TrainData$SVME) > 0) {output.LLC["SVME",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$SVME)}
if(sum(tests$evaluations$TestData$SVME) > 0) {output.LLT["SVME",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$SVME)}
if(is.null(tests$evaluations$GLMNET.C)==F) {output.C["GLMNET",i+1] <- tests$evaluations$GLMNET.C@auc}
if(is.null(tests$evaluations$GLMNET.T)==F) {output.T["GLMNET",i+1] <- tests$evaluations$GLMNET.T@auc}
if(sum(tests$evaluations$TrainData$GLMNET) > 0) {output.LLC["GLMNET",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GLMNET)}
if(sum(tests$evaluations$TestData$GLMNET) > 0) {output.LLT["GLMNET",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GLMNET)}
if(is.null(tests$evaluations$BIOCLIM.O.C)==F) {output.C["BIOCLIM.O",i+1] <- tests$evaluations$BIOCLIM.O.C@auc}
if(is.null(tests$evaluations$BIOCLIM.O.T)==F) {output.T["BIOCLIM.O",i+1] <- tests$evaluations$BIOCLIM.O.T@auc}
if(sum(tests$evaluations$TrainData$BIOCLIM.O) > 0) {output.LLC["BIOCLIM.O",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$BIOCLIM.O)}
if(sum(tests$evaluations$TestData$BIOCLIM.O) > 0) {output.LLT["BIOCLIM.O",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$BIOCLIM.O)}
if(is.null(tests$evaluations$BIOCLIM.C)==F) {output.C["BIOCLIM",i+1] <- tests$evaluations$BIOCLIM.C@auc}
if(is.null(tests$evaluations$BIOCLIM.T)==F) {output.T["BIOCLIM",i+1] <- tests$evaluations$BIOCLIM.T@auc}
if(sum(tests$evaluations$TrainData$BIOCLIM) > 0) {output.LLC["BIOCLIM",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$BIOCLIM)}
if(sum(tests$evaluations$TestData$BIOCLIM) > 0) {output.LLT["BIOCLIM",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$BIOCLIM)}
if(is.null(tests$evaluations$DOMAIN.C)==F) {output.C["DOMAIN",i+1] <- tests$evaluations$DOMAIN.C@auc}
if(is.null(tests$evaluations$DOMAIN.T)==F) {output.T["DOMAIN",i+1] <- tests$evaluations$DOMAIN.T@auc}
if(sum(tests$evaluations$TrainData$DOMAIN) > 0) {output.LLC["DOMAIN",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$DOMAIN)}
if(sum(tests$evaluations$TestData$DOMAIN) > 0) {output.LLT["DOMAIN",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$DOMAIN)}
if(is.null(tests$evaluations$MAHAL.C)==F) {output.C["MAHAL",i+1] <- tests$evaluations$MAHAL.C@auc}
if(is.null(tests$evaluations$MAHAL.T)==F) {output.T["MAHAL",i+1] <- tests$evaluations$MAHAL.T@auc}
if(sum(tests$evaluations$TrainData$MAHAL) > 0) {output.LLC["MAHAL",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MAHAL)}
if(sum(tests$evaluations$TestData$MAHAL) > 0) {output.LLT["MAHAL",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MAHAL)}
if(is.null(tests$evaluations$MAHAL01.C)==F) {output.C["MAHAL01",i+1] <- tests$evaluations$MAHAL01.C@auc}
if(is.null(tests$evaluations$MAHAL01.T)==F) {output.T["MAHAL01",i+1] <- tests$evaluations$MAHAL01.T@auc}
if(sum(tests$evaluations$TrainData$MAHAL01) > 0) {output.LLC["MAHAL01",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MAHAL01)}
if(sum(tests$evaluations$TestData$MAHAL01) > 0) {output.LLT["MAHAL01",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MAHAL01)}
if(is.null(tests$evaluations$ENSEMBLE.C)==F) {output.C["ENSEMBLE",i+1] <- tests$evaluations$ENSEMBLE.C@auc}
if(is.null(tests$evaluations$ENSEMBLE.T)==F) {output.T["ENSEMBLE",i+1] <- tests$evaluations$ENSEMBLE.T@auc}
if(sum(tests$evaluations$TrainData$ENSEMBLE) > 0) {output.LLC["ENSEMBLE",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$ENSEMBLE)}
if(sum(tests$evaluations$TestData$ENSEMBLE) > 0) {output.LLT["ENSEMBLE",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$ENSEMBLE)}
}
if (variables.alone == T) {
## Models only using the focal variable
output1.C <- output.C
output1.T <- output.T
output1.LLC <- output.LLC
output1.LLT <- output.LLT
colnames(output1.C) <- colnames(output1.T) <- colnames(output1.LLC) <- colnames(output1.LLT) <- c("all_vars", paste("only_", var.names, sep=""))
for (i in 1:nv) {
var.f <- var.names[i]
var.f2 <- c("pb", var.f)
cat(paste("\n", "3.", i, ". Only using variable: ", var.f, "\n\n", sep = ""))
TrainData2 <- TrainData1[, which(names(TrainData1) %in% var.f2), drop=F]
TestData2 <- TestData1[, which(names(TestData1) %in% var.f2), drop=F]
factors2 <- NULL
if (is.null(factors) == F) {
factors2 <- factors[which(factors == var.f)]
if (length(factors2) == 0) {factors2 <- NULL}
}
dummy.vars2 <- NULL
if (is.null(dummy.vars) == F) {
dummy.vars2 <- dummy.vars[which(dummy.vars == var.f)]
if (length(dummy.vars2) == 0) {dummy.vars2 <- NULL}
}
var.f3 <- var.names[which(var.names != var.f)]
if (is.null(layer.drops) == T) {
layer.drops3 <- var.f3
}else{
layer.drops3 <- c(layer.drops, var.f)
}
x3 <- raster::dropLayer(x, which(names(x) %in% layer.drops3))
x3 <- raster::stack(x3)
if (raster::nlayers(x3) == 0) {
MAXENT <- 0
MAXLIKE <- 0
}
if(var.f %in% factors) {
EARTH <- 0
GLMNET <- 0
}
tests <- ensemble.calibrate.models(x=x3,
p=p1, a=a1, an=an, excludep=excludep,
k=k, pt=p2, at=a2, SSB.reduce=SSB.reduce, CIRCLES.d=CIRCLES.d,
TrainData=TrainData2, TestData=TestData2,
PLOTS=FALSE, evaluations.keep=T,
VIF=VIF, COR=COR,
formulae.defaults=T, maxit=maxit,
ENSEMBLE.tune=ENSEMBLE.tune,
ENSEMBLE.best=ENSEMBLE.best, ENSEMBLE.min=ENSEMBLE.min,
ENSEMBLE.exponent=ENSEMBLE.exponent,
input.weights=input.weights,
MAXENT=MAXENT, MAXNET=MAXNET, MAXLIKE=MAXLIKE, GBM=GBM, GBMSTEP=GBMSTEP, RF=RF, CF=CF,
GLM=GLM, GLMSTEP=GLMSTEP, GAM=GAM, GAMSTEP=GAMSTEP, MGCV=MGCV, MGCVFIX=MGCVFIX,
EARTH=EARTH, RPART=RPART, NNET=NNET, FDA=FDA, SVM=SVM, SVME=SVME, GLMNET=GLMNET,
BIOCLIM.O=BIOCLIM.O, BIOCLIM=BIOCLIM, DOMAIN=DOMAIN,
MAHAL=MAHAL, MAHAL01=MAHAL01,
PROBIT=PROBIT,
Yweights=Yweights,
factors=factors2, dummy.vars=dummy.vars2,
MAXENT.a=MAXENT.a, MAXENT.path=MAXENT.path,
MAXNET.classes=MAXNET.classes, MAXNET.clamp=MAXNET.clamp, MAXNET.type=MAXNET.type,
MAXLIKE.formula=NULL, MAXLIKE.method=MAXLIKE.method,
GBM.formula=NULL, GBM.n.trees=GBM.n.trees,
GBMSTEP.tree.complexity=GBMSTEP.tree.complexity,
GBMSTEP.learning.rate=GBMSTEP.learning.rate, GBMSTEP.bag.fraction=GBMSTEP.bag.fraction,
GBMSTEP.step.size=GBMSTEP.step.size,
RF.formula=NULL, RF.ntree=RF.ntree,
CF.formula=NULL, CF.ntree=CF.ntree,
GLM.formula=NULL, GLM.family=GLM.family,
GLMSTEP.k=GLMSTEP.k, GLMSTEP.steps=GLMSTEP.steps, STEP.formula=NULL, GLMSTEP.scope=NULL,
GAM.formula=NULL, GAM.family=GAM.family,
GAMSTEP.steps=GAMSTEP.steps, GAMSTEP.scope=NULL, GAMSTEP.pos=GAMSTEP.pos,
MGCV.formula=NULL, MGCV.select=MGCV.select,
MGCVFIX.formula=NULL,
EARTH.formula=NULL, EARTH.glm=EARTH.glm,
RPART.formula=NULL, RPART.xval=RPART.xval,
NNET.formula=NULL, NNET.size=NNET.size, NNET.decay=NNET.decay,
FDA.formula=NULL, SVM.formula=NULL, SVME.formula=NULL,
GLMNET.nlambda=GLMNET.nlambda,
BIOCLIM.O.fraction=BIOCLIM.O.fraction,
MAHAL.shape=MAHAL.shape)
if(is.null(tests$evaluations$MAXENT.C)==F) {output1.C["MAXENT",i+1] <- tests$evaluations$MAXENT.C@auc}
if(is.null(tests$evaluations$MAXENT.T)==F) {output1.T["MAXENT",i+1] <- tests$evaluations$MAXENT.T@auc}
if(sum(tests$evaluations$TrainData$MAXENT) > 0) {output1.LLC["MAXENT",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MAXENT)}
if(sum(tests$evaluations$TestData$MAXENT) > 0) {output1.LLT["MAXENT",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MAXENT)}
if(is.null(tests$evaluations$MAXNET.C)==F) {output1.C["MAXNET",i+1] <- tests$evaluations$MAXNET.C@auc}
if(is.null(tests$evaluations$MAXNET.T)==F) {output1.T["MAXNET",i+1] <- tests$evaluations$MAXNET.T@auc}
if(sum(tests$evaluations$TrainData$MAXNET) > 0) {output1.LLC["MAXNET",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MAXNET)}
if(sum(tests$evaluations$TestData$MAXNET) > 0) {output1.LLT["MAXNET",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MAXNET)}
if(is.null(tests$evaluations$MAXLIKE.C)==F) {output1.C["MAXLIKE",i+1] <- tests$evaluations$MAXLIKE.C@auc}
if(is.null(tests$evaluations$MAXLIKE.T)==F) {output1.T["MAXLIKE",i+1] <- tests$evaluations$MAXLIKE.T@auc}
if(sum(tests$evaluations$TrainData$MAXLIKE) > 0) {output1.LLC["MAXLIKE",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MAXLIKE)}
if(sum(tests$evaluations$TestData$MAXLIKE) > 0) {output1.LLT["MAXLIKE",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MAXLIKE)}
if(is.null(tests$evaluations$GBM.C)==F) {output1.C["GBM",i+1] <- tests$evaluations$GBM.C@auc}
if(is.null(tests$evaluations$GBM.T)==F) {output1.T["GBM",i+1] <- tests$evaluations$GBM.T@auc}
if(sum(tests$evaluations$TrainData$GBM) > 0) {output1.LLC["GBM",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GBM)}
if(sum(tests$evaluations$TestData$GBM) > 0) {output1.LLT["GBM",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GBM)}
if(is.null(tests$evaluations$GBMSTEP.C)==F) {output1.C["GBMSTEP",i+1] <- tests$evaluations$GBMSTEP.C@auc}
if(is.null(tests$evaluations$GBMSTEP.T)==F) {output1.T["GBMSTEP",i+1] <- tests$evaluations$GBMSTEP.T@auc}
if(sum(tests$evaluations$TrainData$GBMSTEP) > 0) {output1.LLC["GBMSTEP",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GBMSTEP)}
if(sum(tests$evaluations$TestData$GBMSTEP) > 0) {output1.LLT["GBMSTEP",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GBMSTEP)}
if(is.null(tests$evaluations$RF.C)==F) {output1.C["RF",i+1] <- tests$evaluations$RF.C@auc}
if(is.null(tests$evaluations$RF.T)==F) {output1.T["RF",i+1] <- tests$evaluations$RF.T@auc}
if(sum(tests$evaluations$TrainData$RF) > 0) {output1.LLC["RF",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$RF)}
if(sum(tests$evaluations$TestData$RF) > 0) {output1.LLT["RF",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$RF)}
if(is.null(tests$evaluations$CF.C)==F) {output1.C["CF",i+1] <- tests$evaluations$CF.C@auc}
if(is.null(tests$evaluations$CF.T)==F) {output1.T["CF",i+1] <- tests$evaluations$CF.T@auc}
if(sum(tests$evaluations$TrainData$CF) > 0) {output1.LLC["CF",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$CF)}
if(sum(tests$evaluations$TestData$CF) > 0) {output1.LLT["CF",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$CF)}
if(is.null(tests$evaluations$GLM.C)==F) {output1.C["GLM",i+1] <- tests$evaluations$GLM.C@auc}
if(is.null(tests$evaluations$GLM.T)==F) {output1.T["GLM",i+1] <- tests$evaluations$GLM.T@auc}
if(sum(tests$evaluations$TrainData$GLM) > 0) {output1.LLC["GLM",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GLM)}
if(sum(tests$evaluations$TestData$GLM) > 0) {output1.LLT["GLM",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GLM)}
if(is.null(tests$evaluations$GLMS.C)==F) {output1.C["GLMSTEP",i+1] <- tests$evaluations$GLMS.C@auc}
if(is.null(tests$evaluations$GLMS.T)==F) {output1.T["GLMSTEP",i+1] <- tests$evaluations$GLMS.T@auc}
if(sum(tests$evaluations$TrainData$GLMSTEP) > 0) {output1.LLC["GLMSTEP",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GLMSTEP)}
if(sum(tests$evaluations$TestData$GLMSTEP) > 0) {output1.LLT["GLMSTEP",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GLMSTEP)}
if(is.null(tests$evaluations$GAM.C)==F) {output1.C["GAM",i+1] <- tests$evaluations$GAM.C@auc}
if(is.null(tests$evaluations$GAM.T)==F) {output1.T["GAM",i+1] <- tests$evaluations$GAM.T@auc}
if(sum(tests$evaluations$TrainData$GAM) > 0) {output1.LLC["GAM",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GAM)}
if(sum(tests$evaluations$TestData$GAM) > 0) {output1.LLT["GAM",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GAM)}
if(is.null(tests$evaluations$GAMS.C)==F) {output1.C["GAMSTEP",i+1] <- tests$evaluations$GAMS.C@auc}
if(is.null(tests$evaluations$GAMS.T)==F) {output1.T["GAMSTEP",i+1] <- tests$evaluations$GAMS.T@auc}
if(sum(tests$evaluations$TrainData$GAMSTEP) > 0) {output1.LLC["GAMSTEP",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GAMSTEP)}
if(sum(tests$evaluations$TestData$GAMSTEP) > 0) {output1.LLT["GAMSTEP",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GAMSTEP)}
if(is.null(tests$evaluations$MGCV.C)==F) {output1.C["MGCV",i+1] <- tests$evaluations$MGCV.C@auc}
if(is.null(tests$evaluations$MGCV.T)==F) {output1.T["MGCV",i+1] <- tests$evaluations$MGCV.T@auc}
if(sum(tests$evaluations$TrainData$MGCV) > 0) {output1.LLC["MGCV",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MGCV)}
if(sum(tests$evaluations$TestData$MGCV) > 0) {output1.LLT["MGCV",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MGCV)}
if(is.null(tests$evaluations$MGCVF.C)==F) {output1.C["MGCVFIX",i+1] <- tests$evaluations$MGCVF.C@auc}
if(is.null(tests$evaluations$MGCVF.T)==F) {output1.T["MGCVFIX",i+1] <- tests$evaluations$MGCVF.T@auc}
if(sum(tests$evaluations$TrainData$MGCVFIX) > 0) {output1.LLC["MGCVFIX",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MGCVFIX)}
if(sum(tests$evaluations$TestData$MGCVFIX) > 0) {output1.LLT["MGCVFIX",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MGCVFIX)}
if(is.null(tests$evaluations$EARTH.C)==F) {output1.C["EARTH",i+1] <- tests$evaluations$EARTH.C@auc}
if(is.null(tests$evaluations$EARTH.T)==F) {output1.T["EARTH",i+1] <- tests$evaluations$EARTH.T@auc}
if(sum(tests$evaluations$TrainData$EARTH) > 0) {output1.LLC["EARTH",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$EARTH)}
if(sum(tests$evaluations$TestData$EARTH) > 0) {output1.LLT["EARTH",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$EARTH)}
if(is.null(tests$evaluations$RPART.C)==F) {output1.C["RPART",i+1] <- tests$evaluations$RPART.C@auc}
if(is.null(tests$evaluations$RPART.T)==F) {output1.T["RPART",i+1] <- tests$evaluations$RPART.T@auc}
if(sum(tests$evaluations$TrainData$RPART) > 0) {output1.LLC["RPART",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$RPART)}
if(sum(tests$evaluations$TestData$RPART) > 0) {output1.LLT["RPART",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$RPART)}
if(is.null(tests$evaluations$NNET.C)==F) {output1.C["NNET",i+1] <- tests$evaluations$NNET.C@auc}
if(is.null(tests$evaluations$NNET.T)==F) {output1.T["NNET",i+1] <- tests$evaluations$NNET.T@auc}
if(sum(tests$evaluations$TrainData$NNET) > 0) {output1.LLC["NNET",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$NNET)}
if(sum(tests$evaluations$TestData$NNET) > 0) {output1.LLT["NNET",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$NNET)}
if(is.null(tests$evaluations$FDA.C)==F) {output1.C["FDA",i+1] <- tests$evaluations$FDA.C@auc}
if(is.null(tests$evaluations$FDA.T)==F) {output1.T["FDA",i+1] <- tests$evaluations$FDA.T@auc}
if(sum(tests$evaluations$TrainData$FDA) > 0) {output1.LLC["FDA",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$FDA)}
if(sum(tests$evaluations$TestData$FDA) > 0) {output1.LLT["FDA",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$FDA)}
if(is.null(tests$evaluations$SVM.C)==F) {output1.C["SVM",i+1] <- tests$evaluations$SVM.C@auc}
if(is.null(tests$evaluations$SVM.T)==F) {output1.T["SVM",i+1] <- tests$evaluations$SVM.T@auc}
if(sum(tests$evaluations$TrainData$SVM) > 0) {output1.LLC["SVM",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$SVM)}
if(sum(tests$evaluations$TestData$SVM) > 0) {output1.LLT["SVM",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$SVM)}
if(is.null(tests$evaluations$SVME.C)==F) {output1.C["SVME",i+1] <- tests$evaluations$SVME.C@auc}
if(is.null(tests$evaluations$SVME.T)==F) {output1.T["SVME",i+1] <- tests$evaluations$SVME.T@auc}
if(sum(tests$evaluations$TrainData$SVME) > 0) {output1.LLC["SVME",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$SVME)}
if(sum(tests$evaluations$TestData$SVME) > 0) {output1.LLT["SVME",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$SVME)}
if(is.null(tests$evaluations$GLMNET.C)==F) {output1.C["GLMNET",i+1] <- tests$evaluations$GLMNET.C@auc}
if(is.null(tests$evaluations$GLMNET.T)==F) {output1.T["GLMNET",i+1] <- tests$evaluations$GLMNET.T@auc}
if(sum(tests$evaluations$TrainData$GLMNET) > 0) {output1.LLC["GLMNET",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$GLMNET)}
if(sum(tests$evaluations$TestData$GLMNET) > 0) {output1.LLT["GLMNET",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$GLMNET)}
if(is.null(tests$evaluations$BIOCLIM.O.C)==F) {output1.C["BIOCLIM.O",i+1] <- tests$evaluations$BIOCLIM.O.C@auc}
if(is.null(tests$evaluations$BIOCLIM.O.T)==F) {output1.T["BIOCLIM.O",i+1] <- tests$evaluations$BIOCLIM.O.T@auc}
if(sum(tests$evaluations$TrainData$BIOCLIM.O) > 0) {output1.LLC["BIOCLIM.O",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$BIOCLIM.O)}
if(sum(tests$evaluations$TestData$BIOCLIM.O) > 0) {output1.LLT["BIOCLIM.O",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$BIOCLIM.O)}
if(is.null(tests$evaluations$BIOCLIM.C)==F) {output1.C["BIOCLIM",i+1] <- tests$evaluations$BIOCLIM.C@auc}
if(is.null(tests$evaluations$BIOCLIM.T)==F) {output1.T["BIOCLIM",i+1] <- tests$evaluations$BIOCLIM.T@auc}
if(sum(tests$evaluations$TrainData$BIOCLIM) > 0) {output1.LLC["BIOCLIM",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$BIOCLIM)}
if(sum(tests$evaluations$TestData$BIOCLIM) > 0) {output1.LLT["BIOCLIM",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$BIOCLIM)}
if(is.null(tests$evaluations$DOMAIN.C)==F) {output1.C["DOMAIN",i+1] <- tests$evaluations$DOMAIN.C@auc}
if(is.null(tests$evaluations$DOMAIN.T)==F) {output1.T["DOMAIN",i+1] <- tests$evaluations$DOMAIN.T@auc}
if(sum(tests$evaluations$TrainData$DOMAIN) > 0) {output1.LLC["DOMAIN",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$DOMAIN)}
if(sum(tests$evaluations$TestData$DOMAIN) > 0) {output1.LLT["DOMAIN",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$DOMAIN)}
if(is.null(tests$evaluations$MAHAL.C)==F) {output1.C["MAHAL",i+1] <- tests$evaluations$MAHAL.C@auc}
if(is.null(tests$evaluations$MAHAL.T)==F) {output1.T["MAHAL",i+1] <- tests$evaluations$MAHAL.T@auc}
if(sum(tests$evaluations$TrainData$MAHAL) > 0) {output1.LLC["MAHAL",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MAHAL)}
if(sum(tests$evaluations$TestData$MAHAL) > 0) {output1.LLT["MAHAL",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MAHAL)}
if(is.null(tests$evaluations$MAHAL01.C)==F) {output1.C["MAHAL01",i+1] <- tests$evaluations$MAHAL01.C@auc}
if(is.null(tests$evaluations$MAHAL01.T)==F) {output1.T["MAHAL01",i+1] <- tests$evaluations$MAHAL01.T@auc}
if(sum(tests$evaluations$TrainData$MAHAL01) > 0) {output1.LLC["MAHAL01",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$MAHAL01)}
if(sum(tests$evaluations$TestData$MAHAL01) > 0) {output1.LLT["MAHAL01",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$MAHAL01)}
if(is.null(tests$evaluations$ENSEMBLE.C)==F) {output1.C["ENSEMBLE",i+1] <- tests$evaluations$ENSEMBLE.C@auc}
if(is.null(tests$evaluations$ENSEMBLE.T)==F) {output1.T["ENSEMBLE",i+1] <- tests$evaluations$ENSEMBLE.T@auc}
if(sum(tests$evaluations$TrainData$ENSEMBLE) > 0) {output1.LLC["ENSEMBLE",i+1] <- loglik.calculation(obs=tests$evaluations$TrainData$pb, preds=tests$evaluations$TrainData$ENSEMBLE)}
if(sum(tests$evaluations$TestData$ENSEMBLE) > 0) {output1.LLT["ENSEMBLE",i+1] <- loglik.calculation(obs=tests$evaluations$TestData$pb, preds=tests$evaluations$TestData$ENSEMBLE)}
}
#
# end of variables alone loop
}
## Arrange the tables
output.C <- 100*output.C
if (difference == T) {
for (i in 1:nv) {
output.C[,i+1] <- output.C[,i+1] - output.C[,1]
}
}
output.C <- output.C[order(output.C[,"all_vars"], decreasing=T),]
cat(paste("\n", "AUC for calibration data (as percentage)", "\n\n", sep = ""))
if (difference == T) {
cat(paste("\n", "Results for variables show change from full models", sep = ""))
cat(paste("\n", "Note that positive differences indicate that the model without the variable", sep = ""))
cat(paste("\n", "has higher AUC than the model with all the variables", "\n\n", sep = ""))
}
print (output.C)
output.T <- 100*output.T
if (difference == T) {
for (i in 1:nv) {
output.T[,i+1] <- output.T[,i+1] - output.T[,1]
}
}
output.T <- output.T[order(output.T[,"all_vars"], decreasing=T),]
cat(paste("\n", "AUC for testing data (as percentage)", "\n\n", sep = ""))
if (difference == T) {
cat(paste("\n", "Results for variables show change from full models", sep = ""))
cat(paste("\n", "Note that positive differences indicate that the model without the variable", sep = ""))
cat(paste("\n", "has higher AUC than the model with all the variables", "\n\n", sep = ""))
}
print (output.T)
#
# base null model on predictions of prevalence
cat(paste("\n", "Null deviance for calibration data: ", preval.dev.cal, sep=""))
cat(paste("\n", "Null deviance for worst possible predictions of calibration data: ", null.dev.cal, sep=""))
cat(paste("\n", "Residual deviance for calibration data", "\n", sep = ""))
percentage.LLC <- output.LLC
if (difference == T) {
for (i in 1:nv) {
output.LLC[,i+1] <- output.LLC[,1] - output.LLC[,i+1]
}
cat(paste("\n", "Results for variables show change from full models", sep = ""))
}
output.LLC <- output.LLC[order(output.LLC[,"all_vars"], decreasing=F),]
cat(paste("\n\n"))
print (output.LLC)
cat(paste("\n", "Null deviance for testing data: ", preval.dev.test, sep=""))
cat(paste("\n", "Null deviance for worst possible predictions of testing data: ", null.dev.test, sep=""))
cat(paste("\n", "Residual deviance for testing data", "\n", sep = ""))
percentage.LLT <- output.LLT
if (difference == T) {
for (i in 1:nv) {
output.LLT[,i+1] <- output.LLT[,1] - output.LLT[,i+1]
}
cat(paste("\n", "Results for variables show change from full models", sep = ""))
}
output.LLT <- output.LLT[order(output.LLT[,"all_vars"], decreasing=F),]
cat(paste("\n\n"))
print (output.LLT)
for (i in 1:(1+nv)) {
percentage.LLC[, i] <- percentage.LLC[, i] - as.numeric(null.dev.cal)
percentage.LLC[, i] <- -100 * percentage.LLC[, i]
percentage.LLC[, i] <- percentage.LLC[, i] / as.numeric(null.dev.cal)
percentage.LLC[, i] <- round(percentage.LLC[, i], 2)
percentage.LLT[, i] <- percentage.LLT[, i] - as.numeric(null.dev.test)
percentage.LLT[, i] <- -100 * percentage.LLT[, i]
percentage.LLT[, i] <- percentage.LLT[, i] / as.numeric(null.dev.test)
percentage.LLT[, i] <- round(percentage.LLT[, i], 2)
}
if (difference == T) {
for (i in 1:nv) {
percentage.LLC[,i+1] <- percentage.LLC[,1] - percentage.LLC[,i+1]
percentage.LLT[,i+1] <- percentage.LLT[,1] - percentage.LLT[,i+1]
}
cat(paste("\n", "Results for variables show change from full models", sep = ""))
}
cat(paste("\n", "Percentage explained for calibration data", "\n\n", sep = ""))
percentage.LLC <- percentage.LLC[order(percentage.LLC[,"all_vars"], decreasing=F),]
print(percentage.LLC)
cat(paste("\n", "Percentage explained for testing data", "\n\n", sep = ""))
percentage.LLT <- percentage.LLT[order(percentage.LLT[,"all_vars"], decreasing=F),]
print (percentage.LLT)
## Models with one variable only
if (variables.alone == T) {
output1.C <- 100*output1.C
if (difference == T) {
for (i in 1:nv) {
output1.C[,i+1] <- output1.C[,i+1] - output1.C[,1]
}
}
output1.C <- output1.C[order(output1.C[,"all_vars"], decreasing=T),]
cat(paste("\n", "AUC for calibration data (as percentage)", "\n\n", sep = ""))
if (difference == T) {
cat(paste("\n", "Results for variables show change from full models", sep = ""))
cat(paste("\n", "Note that positive differences indicate that the model only with the variable", sep = ""))
cat(paste("\n", "has higher AUC than the model with all the variables", "\n\n", sep = ""))
}
print (output1.C)
output1.T <- 100*output1.T
if (difference == T) {
for (i in 1:nv) {
output1.T[,i+1] <- output1.T[,i+1] - output1.T[,1]
}
}
output1.T <- output1.T[order(output1.T[,"all_vars"], decreasing=F),]
cat(paste("\n", "AUC for testing data (as percentage)", "\n\n", sep = ""))
if (difference == T) {
cat(paste("\n", "Results for variables show change from full models", sep = ""))
cat(paste("\n", "Note that positive differences indicate that the model only with the variable", sep = ""))
cat(paste("\n", "has higher AUC than the model with all the variables", "\n\n", sep = ""))
}
print (output1.T)
#
# base null model on predictions of prevalence
cat(paste("\n", "Null deviance for calibration data: ", preval.dev.cal, sep=""))
cat(paste("\n", "Null deviance for worst possible predictions of calibration data: ", null.dev.cal, sep=""))
cat(paste("\n", "Residual deviance for calibration data", "\n", sep = ""))
percentage1.LLC <- output1.LLC
if (difference == T) {
for (i in 1:nv) {
output1.LLC[,i+1] <- output1.LLC[,1] - output1.LLC[,i+1]
}
cat(paste("\n", "Results for variables show change from full models", sep = ""))
}
output1.LLC <- output1.LLC[order(output1.LLC[,"all_vars"], decreasing=F),]
cat(paste("\n\n"))
print (output1.LLC)
cat(paste("\n", "Null deviance for testing data: ", preval.dev.test, sep=""))
cat(paste("\n", "Null deviance for worst possible predictions of testing data: ", null.dev.test, sep=""))
cat(paste("\n", "Residual deviance for testing data", "\n", sep = ""))
percentage1.LLT <- output1.LLT
if (difference == T) {
for (i in 1:nv) {
output1.LLT[,i+1] <- output1.LLT[,1] - output1.LLT[,i+1]
}
cat(paste("\n", "Results for variables show change from full models", sep = ""))
}
output1.LLT <- output1.LLT[order(output1.LLT[,"all_vars"], decreasing=F),]
cat(paste("\n\n"))
print (output1.LLT)
for (i in 1:(1+nv)) {
percentage1.LLC[, i] <- percentage1.LLC[, i] - as.numeric(null.dev.cal)
percentage1.LLC[, i] <- -100 * percentage1.LLC[, i]
percentage1.LLC[, i] <- percentage1.LLC[, i] / as.numeric(null.dev.cal)
percentage1.LLC[, i] <- round(percentage1.LLC[, i], 2)
percentage1.LLT[, i] <- percentage1.LLT[, i] - as.numeric(null.dev.test)
percentage1.LLT[, i] <- -100 * percentage1.LLT[, i]
percentage1.LLT[, i] <- percentage1.LLT[, i] / as.numeric(null.dev.test)
percentage1.LLT[, i] <- round(percentage1.LLT[, i], 2)
}
if (difference == T) {
for (i in 1:nv) {
percentage1.LLC[,i+1] <- percentage1.LLC[,1] - percentage1.LLC[,i+1]
percentage1.LLT[,i+1] <- percentage1.LLT[,1] - percentage1.LLT[,i+1]
}
cat(paste("\n", "Results for variables show change from full models", sep = ""))
}
cat(paste("\n", "Percentage explained for calibration data", "\n\n", sep = ""))
percentage1.LLC <- percentage1.LLC[order(percentage1.LLC[,"all_vars"], decreasing=F),]
print(percentage1.LLC)
cat(paste("\n", "Percentage explained for testing data", "\n\n", sep = ""))
percentage1.LLT <- percentage1.LLT[order(percentage1.LLT[,"all_vars"], decreasing=F),]
print (percentage1.LLT)
#
# end of variables alone loop
}
#
if (SINK==T && OLD.SINK==F) {sink(file=NULL, append=T)}
#
if (variables.alone == F) {
return(list(AUC.calibration=output.C, AUC.testing=output.T,
residual.deviance.calibration=output.LLC, residual.deviance.testing=output.LLT,
percentage.deviance.calibration=percentage.LLC, percentage.deviance.testing=percentage.LLT,
call=match.call() ))
}else{
return(list(AUC.calibration=output.C, AUC.testing=output.T,
residual.deviance.calibration=output.LLC, residual.deviance.testing=output.LLT,
percentage.deviance.calibration=percentage.LLC, percentage.deviance.testing=percentage.LLT,
AUC.single.calibration=output1.C, AUC.single.testing=output1.T,
residual.single.deviance.calibration=output1.LLC, residual.single.deviance.testing=output1.LLT,
percentage.single.deviance.calibration=percentage1.LLC, percentage.single.deviance.testing=percentage1.LLT,
call=match.call() ))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.drop1.R
|
`ensemble.dummy.variables` <- function(
xcat=NULL, freq.min=50, most.frequent=5,
new.levels=NULL, overwrite=TRUE, ...
)
{
# if (! require(dismo)) {stop("Please install the dismo package")}
if(inherits(xcat,"RasterLayer") == F) {stop("parameter xcat is expected to be a RasterLayer")}
# get all categories of the layer
freqs <- raster::freq(xcat)
freqs <- freqs[is.na(freqs[, 1])==F, ]
all.categories <- freqs[, 1]
replace.frame <- data.frame(id=all.categories, v=rep(0, length(all.categories)))
colnames(replace.frame)[2] <- paste(names(xcat), "dummy", sep="")
freqs <- freqs[order(freqs[, 2], decreasing=T),]
cat(paste("\n", "Frequency of the categories", "\n", sep = ""))
print(freqs)
max.level <- max(freqs[ ,1])
# freq.min should at minimum exclude the least frequent category
least.freq <- min(freqs[, 2])
if (least.freq > freq.min) {freq.min <- least.freq}
# get only variables with higher frequencies
freqs <- freqs[freqs[, 2] > freq.min, ]
if (most.frequent < 1) {most.frequent <- length(freqs[, 1])}
if (most.frequent < length(freqs[, 1])) {
freqs <- freqs[1:most.frequent, , drop=F]
}
new.categories <- freqs[, 1]
cat(paste("\n", "dummy layers will be created for the following factor levels", "\n", sep = ""))
print(new.categories)
# filename of original layer
# if (! require(tools)) {stop("tools package not available")}
filename1 <- raster::filename(xcat)
extension1 <- paste(".", tools::file_ext(filename1),sep="")
# create the new layers
for (i in 1:length(new.categories)) {
extension2 <- paste("_", new.categories[i], ".", tools::file_ext(filename1), sep="")
filename2 <- gsub(pattern=extension1, replacement=extension2, x=filename1)
# use working file to be able to change names
extension3 <- paste("_working.", tools::file_ext(filename1), sep="")
filename3 <- gsub(pattern=extension1, replacement=extension3, x=filename1)
replace.frame1 <- replace.frame
replace.frame1[replace.frame1[,1]==new.categories[i], 2] <- 1
new.name <- paste(names(xcat)[1], "_", new.categories[i], sep="")
names(replace.frame1)[2] <- new.name
new.x <- raster::subs(xcat, replace.frame1, by="id", which=new.name, subsWithNA=TRUE, filename=filename3, overwrite=overwrite, ...)
names(new.x) <- new.name
raster::writeRaster(new.x, filename=filename2, overwrite=overwrite, ...)
}
# make layers for the new categories
if (is.null(new.levels) == F) {
cat(paste("\n", "dummy layers will be created for the following new factor levels", "\n", sep = ""))
print(new.levels)
for (i in 1:length(new.levels)) {
new.x <- xcat > max.level
extension2 <- paste("_", new.levels[i], ".", tools::file_ext(filename1), sep="")
filename2 <- gsub(pattern=extension1, replacement=extension2, x=filename1)
new.name <- paste(names(xcat)[1], "_", new.levels[i], sep="")
names(new.x) <- new.name
raster::writeRaster(new.x, filename=filename2, overwrite=overwrite, ...)
}
}
# remove the working file
cat(paste("\n", "Removing temporary file: ", filename3, "\n", sep = ""))
file.remove(filename3)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.dummy.variables.R
|
`ensemble.ecocrop.object` <- function(
temp.thresholds, rain.thresholds, name="crop01",
temp.multiply=1, annual.temps=TRUE, transform=1
)
{
temps <- as.numeric(temp.thresholds[order(temp.thresholds)])
temps <- temps*temp.multiply
names(temps) <- c("tminabs", "tminopt", "tmaxopt", "tmaxabs")
rains <- rain.thresholds[order(rain.thresholds)]
names(rains) <- c("pminabs", "pminopt", "pmaxopt", "pmaxabs")
ecocrop.object <- list(name=name, temp.thresholds=temps, rain.thresholds=rains, annual.temps=annual.temps, transform=transform)
return(ecocrop.object)
}
`ensemble.ecocrop` <- function(
x=NULL, ecocrop.object=NULL,
RASTER.object.name=ecocrop.object$name,
RASTER.stack.name = "xTitle", RASTER.format = "GTiff",
RASTER.datatype = "INT2S", RASTER.NAflag = -32767,
CATCH.OFF = FALSE
)
{
.BiodiversityR <- new.env()
if (is.null(x) == T) {
stop("value for parameter x is missing (RasterStack or SpatRaster object)")
}
if (inherits(x, "RasterStack") == F && inherits(x, "SpatRaster") == FALSE) {
stop("x is not a RasterStack or SpatRaster object")
}
names(x)[which(names(x) == "bio01")] <- "bio1"
names(x)[which(names(x) == "bio05")] <- "bio5"
names(x)[which(names(x) == "bio06")] <- "bio6"
if (is.null(ecocrop.object) == TRUE) {
stop("value for parameter ecocrop.object is missing (hint: use the ensemble.ecocrop.object function)")
}
vars <- names(x)
if (any(vars == "bio12") == F) {
stop("Bioclimatic variable 'bio12' not provided with data")
}
if (ecocrop.object$annual.temps == F) {
if (any(vars == "bio5") == F) {
stop("Bioclimatic variable 'bio5' not provided with data")
}
if (any(vars == "bio6") == F) {
stop("Bioclimatic variable 'bio6' not provided with data")
}
}
else {
if (any(vars == "bio1") == F) {
stop("Bioclimatic variable 'bio1' not provided with data")
}
}
predict.ecocrop <- function(object = ecocrop.object, newdata = newdata) {
tminopt <- object$temp.thresholds["tminopt"]
tminabs <- object$temp.thresholds["tminabs"]
tmaxopt <- object$temp.thresholds["tmaxopt"]
tmaxabs <- object$temp.thresholds["tmaxabs"]
pminopt <- object$rain.thresholds["pminopt"]
pminabs <- object$rain.thresholds["pminabs"]
pmaxopt <- object$rain.thresholds["pmaxopt"]
pmaxabs <- object$rain.thresholds["pmaxabs"]
annual.temps <- object$annual.temps
z <- object$transform
# modified from array to numeric
result <- numeric(nrow(newdata))
Pall <- as.numeric(newdata[, "bio12"])
if (annual.temps == F) {
TMIall <- as.numeric(newdata[, "bio6"])
TMXall <- as.numeric(newdata[, "bio5"])
}else{
TMIall <- as.numeric(newdata[, "bio1"])
TMXall <- TMIall
}
for (i in 1:length(Pall)) {
# datai <- newdata[i, , drop = F]
P <- Pall[i]
TMI <- TMIall[i]
TMX <- TMXall[i]
if (is.na(P)==FALSE && is.na(TMI)==FALSE && is.na(TMX)==FALSE) {
PS1 <- PS2 <- PS3 <- 0
if (P > pminabs && P < pminopt) {
PS1 <- (P - pminabs)/(pminopt - pminabs)
}
if (P >= pminopt && P <= pmaxopt) {
PS2 <- 1
}
if (P > pmaxopt && P < pmaxabs) {
PS3 <- (pmaxabs - P)/(pmaxabs - pmaxopt)
}
PS <- max(c(PS1, PS2, PS3))
TMI1 <- TMI2 <- 0
if (TMI >= tminopt) {
TMI1 <- 1
}
if (TMI > tminabs && TMI < tminopt) {
TMI2 <- (TMI - tminabs)/(tminopt - tminabs)
}
TMIS <- max(c(TMI1, TMI2))
TMX1 <- TMX2 <- 0
if (TMX <= tmaxopt) {
TMX1 <- 1
}
if (TMX > tmaxopt && TMX < tmaxabs) {
TMX2 <- (tmaxabs - TMX)/(tmaxabs - tmaxopt)
}
TMXS <- max(c(TMX1, TMX2))
SFINAL <- min(PS, TMIS, TMXS)
result[i] <- SFINAL
}else{
result[i] <- NA
}
}
# p <- as.numeric(result)
result <- result^z
result <- trunc(1000*result)
return(result)
}
dir.create("ensembles", showWarnings = F)
dir.create("ensembles/ecocrop", showWarnings = F)
stack.title <- RASTER.stack.name
rasterfull <- paste("ensembles/ecocrop/", RASTER.object.name,
"_", stack.title, sep = "")
if (inherits(x, "RasterStack") == TRUE) {
if (CATCH.OFF == F) {
tryCatch(ecocrop.raster <- raster::predict(object = x,
model = ecocrop.object, fun = predict.ecocrop, na.rm = TRUE,
filename = rasterfull, progress = "text", overwrite = TRUE,
format = RASTER.format), error = function(err) {
print(paste("ecocrop prediction failed"))
}, silent = F)
}else{
ecocrop.raster <- raster::predict(object = x, model = ecocrop.object,
fun = predict.ecocrop, na.rm = TRUE, filename = rasterfull,
progress = "text", overwrite = TRUE, format = RASTER.format)
}
} # RasterStack
if (inherits(x, "SpatRaster") == TRUE) {
rasterfull <- paste0(rasterfull, ".tif")
if (CATCH.OFF == F) {
tryCatch(ecocrop.raster <- terra::predict(object = x,
model = ecocrop.object, fun = predict.ecocrop, na.rm = TRUE,
filename = rasterfull,
overwrite = TRUE), error = function(err) {
print(paste("ecocrop prediction failed"))
}, silent = F)
}else{
ecocrop.raster <- terra::predict(object = x, model = ecocrop.object,
fun = predict.ecocrop, na.rm = TRUE, filename = rasterfull,
overwrite = TRUE)
}
} # SpatRaster
# ecocrop.raster <- trunc(1000 * ecocrop.raster)
cat(paste("\n", "raster layer created (probabilities multiplied by 1000)",
"\n", sep = ""))
# raster::setMinMax(ecocrop.raster)
print(ecocrop.raster)
# raster::writeRaster(ecocrop.raster, filename = "working.grd",
# overwrite = T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- paste(RASTER.object.name, "_",
# stack.title, "_ecocrop", sep = "")
# raster::writeRaster(ecocrop.raster, filename = rasterfull,
# progress = "text", overwrite = TRUE, format = RASTER.format,
# datatype = RASTER.datatype, NAflag = RASTER.NAflag)
cat(paste("\n", "ecocrop raster provided in folder: ",
getwd(), "//ensembles//ecocrop", "\n", sep = ""))
return(ecocrop.raster)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.ecocrop.R
|
`ensemble.envirem.masterstack` <- function(
x,
precipstack,
tmaxstack, tminstack,
tmeanstack=NULL,
envirem3=TRUE
)
{
if (inherits(precipstack, "RasterBrick")) {precipstack <- raster::stack(precipstack)}
if (inherits(precipstack, "RasterStack")) {
precip.data <- data.frame(raster::extract(precipstack, y=x))
}else if (inherits(precipstack, "SpatRaster")){
precip.data <- data.frame(terra::extract(precipstack, y=x))
}else{
stop("precipstack is not a RasterStack or SpatRaster object")
}
names(precip.data) <- paste0("precip_", 1:ncol(precip.data))
if (inherits(tmaxstack, "RasterBrick")) {tmaxstack <- raster::stack(tmaxstack)}
if (inherits(tmaxstack, "RasterStack")) {
tmax.data <- data.frame(raster::extract(tmaxstack, y=x))
}else if (inherits(tmaxstack, "SpatRaster")){
tmax.data <- data.frame(terra::extract(tmaxstack, y=x))
}else{
stop("tmaxstack is not a RasterStack or SpatRaster object")
}
names(tmax.data) <- paste0("tmax_", 1:ncol(tmax.data))
if (inherits(tminstack, "RasterBrick")) {tminstack <- raster::stack(tminstack)}
if (inherits(tminstack, "RasterStack")) {
tmin.data <- data.frame(raster::extract(tminstack, y=x))
}else if (inherits(tminstack, "SpatRaster")){
tmin.data <- data.frame(terra::extract(tminstack, y=x))
}else{
stop("tminstack is not a RasterStack or SpatRaster object")
}
names(tmin.data) <- paste0("tmin_", 1:ncol(tmin.data))
if (is.null(tmeanstack) == FALSE) {
if (inherits(tmeanstack, "RasterBrick")) {tmeanstack <- raster::stack(tmeanstack)}
if (inherits(tmeanstack, "RasterStack")) {
tmean.data <- data.frame(raster::extract(tmeanstack, y=x))
}else if (inherits(tmeanstack, "SpatRaster")){
tmean.data <- data.frame(terra::extract(tmeanstack, y=x))
}else{
stop("tmeanstack is not a RasterStack or SpatRaster object")
}
names(tmean.data) <- paste0("tmean_", 1:ncol(tmean.data))
input.data <- cbind(precip.data, tmax.data, tmin.data, tmean.data)
}else{
input.data <- cbind(precip.data, tmax.data, tmin.data, tmean.data)
}
if (envirem3 == FALSE ) {
for (i in 1:ncol(input.data)) {
rasteri <- raster::raster(matrix(input.data[, i]))
if (i == 1) {
masterstack <- raster::stack(rasteri)
}else{
masterstack <- raster::stack(c(masterstack, rasteri))
}
}
}else{
for (i in 1:ncol(input.data)) {
rasteri <- terra::rast(matrix(input.data[, i]))
if (i == 1) {
masterstack <- rasteri
}else{
masterstack <- c(masterstack, rasteri)
}
}
}
names(masterstack) <- names(input.data)
return(masterstack)
}
`ensemble.envirem.solradstack` <- function(
x, solrad,
envirem3=TRUE
)
{
if (inherits(solrad, "RasterBrick")) {solrad <- raster::stack(solrad)}
if (inherits(solrad, "RasterStack")) {
input.data <- data.frame(raster::extract(solrad, y=x))
}else if (inherits(solrad, "SpatRaster")){
input.data <- data.frame(terra::extract(solrad, y=x))
}else{
stop("solrad is not a RasterStack or SpatRaster object")
}
names(input.data) <- paste0("et_solrad_", 1:ncol(input.data))
if (envirem3 == FALSE ) {
for (i in 1:ncol(input.data)) {
rasteri <- raster::raster(matrix(input.data[, i]))
if (i == 1) {
solradout <- raster::stack(rasteri)
}else{
solradout <- raster::stack(c(solradout, rasteri))
}
}
}else{
for (i in 1:ncol(input.data)) {
rasteri <- terra::rast(matrix(input.data[, i]))
if (i == 1) {
solradout <- rasteri
}else{
solradout <- c(solradout, rasteri)
}
}
}
names(solradout) <- names(input.data)
return(solradout)
}
`ensemble.envirem.run` <- function(
masterstack, solradstack,
var="all", ...
)
{
# Modified Sep-2023 due to change from envirem::layerCreation
envirem.out <- envirem::generateEnvirem(masterstack=masterstack,
solradstack=solradstack,
var=var, ...)
# Modified May 2023 with optional argument
return(as.data.frame(envirem.out, optional=FALSE))
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.envirem.R
|
`ensemble.environmentalThin` <- function(
x, predictors.stack=NULL, extracted.data=NULL,
thin.n=50, runs=100, pca.var=0.95,
silent=FALSE, verbose=FALSE,
return.notRetained=FALSE
)
{
.BiodiversityR <- new.env()
# distEnviro.thin operates on stacked distances (do not recalculate distance each run)
'distEnviro.thin' <- function(x, x2, thin.dist=0.1, thin.n=50) {
# Algorithm modified in MAR-2022 to always reach the minimum distance, but
# in case fewer locations were retained, use a previous step from the removal process
x2.c <- x2
while(min(x2.c[, 3]) < thin.dist && nrow(x2.c) > 1) {
retained.c <- unique(c(x2.c[, 1], x2.c[, 2]))
if (length(retained.c) >= thin.n) {x2.prev <- x2.c}
p <- nrow(x2.c)
x2.c <- x2.c[sample(p), ]
first.min <- which(x2.c[, 3] < thin.dist)
first.min <- first.min[1]
random.col <- as.numeric(runif(1) > 0.5)+1
selected <- x2.c[first.min, random.col]
rows1 <- x2.c[, 1] != selected
x2.c <- x2.c[rows1, , drop=F]
rows2 <- x2.c[, 2] != selected
x2.c <- x2.c[rows2, , drop=F]
}
retained.c <- unique(c(x2.c[, 1], x2.c[, 2]))
if (length(retained.c) >= thin.n) {
x2 <- x2.c
}else{
x2 <- x2.prev
}
retained <- unique(c(x2[, 1], x2[, 2]))
x3 <- x[retained, ]
# special case where the remaining 2 locations are closer than minimum distance
if (nrow(x3)==2 && x2[1, 3] < thin.dist) {
retained <- sample(retained, size=1)
x3 <- x[retained, , drop=F]
}
# Modified MAR-2022!
# In case a larger number of locations was retained, use the first algorithm again
retained <- unique(c(x2[, 1], x2[, 2]))
retained.n <- length(unique(c(x2[, 1], x2[, 2])))
while(retained.n > thin.n) {
first.min <- which.min(x2[, 3])
first.min <- first.min[1]
random.col <- as.numeric(runif(1) > 0.5)+1
selected <- x2[first.min, random.col]
rows1 <- x2[, 1] != selected
x2 <- x2[rows1, , drop=F]
rows2 <- x2[, 2] != selected
x2 <- x2[rows2, , drop=F]
retained <- unique(c(x2[, 1], x2[, 2]))
retained.n <- length(retained)
}
x3 <- x[retained, ]
dist.min <- min(x2[, 3])
return(list(x3=x3, dist.min=dist.min, retained=retained))
}
if (verbose == TRUE) {silent <- FALSE}
if(thin.n >= nrow(x)) {
if (silent == F) {
cat(paste("WARNING: thinning parameter larger or equal to number of available locations", "\n"))
cat(paste("therefore all locations selected", "\n\n"))
}
if (return.notRetained == TRUE) {
return(list(retained=x, not.retained=NULL))
}else{
return(x)
}
}
# create background data
if (is.null(extracted.data) == TRUE) {
background.data <- raster::extract(predictors.stack, x)
background.data <- data.frame(background.data)
}else{
if (nrow(x) != nrow(extracted.data)) {stop("WARNING: different row numbers of coordinates and extracted.data")}
background.data <- extracted.data
}
TrainValid <- complete.cases(background.data)
x <- x[TrainValid,]
background.data <- background.data[TrainValid,]
# PCA of scaled variables
rda.result <- vegan::rda(X=background.data, scale=TRUE)
# select number of axes
ax <- 1
while ( (sum(vegan::eigenvals(rda.result)[c(1:ax)])/sum(vegan::eigenvals(rda.result))) < pca.var ) {ax <- ax+1}
if (silent == FALSE) {cat(paste("\n", "Percentage of variance of the selected axes (1 to ", ax, ") of principal components analysis: ", 100*sum(vegan::eigenvals(rda.result)[c(1:ax)])/sum(vegan::eigenvals(rda.result)), "\n", sep = ""))}
rda.scores <- vegan::scores(rda.result, display="sites", scaling=1, choices=c(1:ax))
rda.dist <- as.matrix(vegdist(rda.scores, method="euc"))
rda.dist <- signif(rda.dist, digits=6)
#
# stack
n <- nrow(x)
pairs <- utils::combn(n, 2)
p <- ncol(pairs)
pairs <- cbind(t(pairs), numeric(p))
for (i in 1:p) {
pairs[i, 3] <- rda.dist[pairs[i, 1], pairs[i, 2]]
}
#
runs <- max(runs, 1)
dist.all <- 0
#
# algorithm 1 removes from pairs with smallest distance
x2 <- pairs
retained.n <- length(unique(c(x2[, 1], x2[, 2])))
while(retained.n > thin.n) {
first.min <- which.min(x2[, 3])
first.min <- first.min[1]
random.col <- as.numeric(runif(1) > 0.5)+1
selected <- x2[first.min, random.col]
rows1 <- x2[, 1] != selected
x2 <- x2[rows1, , drop=F]
rows2 <- x2[, 2] != selected
x2 <- x2[rows2, , drop=F]
retained <- unique(c(x2[, 1], x2[, 2]))
retained.n <- length(retained)
}
x3 <- x[retained, ]
dist.min1 <- min(x2[, 3])
if (silent == FALSE) {
cat(paste("Environmentally thinned point location data set obtained with first algorithm", "\n", sep=""))
cat(paste("number of locations: ", nrow(x3), "\n"))
cat(paste("minimum distance: ", dist.min1, "\n"))
}
#
# algorithm 2 uses minimum distance of previous algorithm
# now algorithm attempts to maximize the number of retained locations, similar to ensemble.spatialThin
#
if (silent == F) {cat(paste("\n", "Environmentally thinned point location data set obtained with second algorithm", "\n", sep=""))}
dist.all <- 0
dist.n.all <- 0
for (i in 1:runs) {
dist1 <- distEnviro.thin(x, x2=pairs, thin.dist=dist.min1, thin.n=thin.n)
dist.min2 <- dist1$dist.min
dist.n <- nrow(dist1$x3)
if (verbose == T) {
if (dist.min2 > dist.all) {cat(paste("run ", i, " (", dist.n, " locations with minimum distance: ", dist.min2, " > ", dist.all, " [previous minimum distance])", "\n", sep=""))}
if (dist.min2 == dist.all) {cat(paste("run ", i, " (", dist.n, " locations with minimum distance: ", dist.min2, " = ", dist.all, " [previous minimum distance])", "\n", sep=""))}
if (dist.min2 < dist.all) {cat(paste("run ", i, " (", dist.n, " locations with minimum distance: ", dist.min2, ")", "\n", sep=""))}
}
if (dist.min2 > dist.all) {
dist.all <- dist.min2
loc.out <- dist1$x3
dist.n.all <- dist.n
retained <- dist1$retained
}
if (dist.min2 == dist.all && dist.n > dist.n.all) {
dist.all <- dist.min2
loc.out <- dist1$x3
dist.n.all <- dist.n
retained <- dist1$retained
}
}
if (verbose == TRUE) {cat(paste("\n"))}
if (silent == FALSE) {
cat(paste("number of locations: ", nrow(loc.out), "\n"))
cat(paste("minimum distance: ", dist.all, "\n"))
}
if (return.notRetained == TRUE) {
x.not <- x[(c(1:nrow(x)) %in% retained) == FALSE, ]
return(list(retained=loc.out, not.retained=x.not))
}else{
return(loc.out)
}
}
`ensemble.environmentalThin.clara` <- function(
x, predictors.stack=NULL, thin.n=20, runs=100, pca.var=0.95,
silent=FALSE, verbose=FALSE,
clara.k=100
)
{
#
# create background data
background.data <- raster::extract(predictors.stack, x)
background.data <- data.frame(background.data)
TrainValid <- complete.cases(background.data)
x <- x[TrainValid,]
background.data <- background.data[TrainValid,]
# PCA of scaled variables
rda.result <- vegan::rda(X=background.data, scale=T)
# select number of axes
ax <- 1
while ( (sum(vegan::eigenvals(rda.result)[c(1:ax)])/sum(vegan::eigenvals(rda.result))) < pca.var ) {ax <- ax+1}
if (silent == F) {cat(paste("\n", "Percentage of variance of the selected axes (1 to ", ax, ") of principal components analysis: ", 100*sum(vegan::eigenvals(rda.result)[c(1:ax)])/sum(vegan::eigenvals(rda.result)), "\n", sep = ""))}
rda.scores <- vegan::scores(rda.result, display="sites", scaling=1, choices=c(1:ax))
#
# cluster and thin if more locations in each cluster
clara.result <- cluster::clara(rda.scores, k=clara.k, metric="euclidean", medoids.x=F)$clustering
#
loc.first <- TRUE
for (lo in 1:clara.k) {
x.bin <- x[clara.result == lo, , drop=F]
x.env <- x.bin
if (nrow(as.data.frame(x.bin)) > thin.n) {
x.env <- ensemble.environmentalThin(x=x.bin, predictors.stack=predictors.stack, thin.n=thin.n, runs=runs, pca.var=pca.var, verbose=verbose, silent=silent)
}
if (loc.first == T) {
x.out <- x.env
loc.first <- FALSE
}else{
x.out <- rbind(x.out, x.env)
}
}
return(x.out)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.environmentalThin.R
|
`ensemble.SEDI` <- function(
TPR, FPR, small=1e-9)
{
# Equation 2 in Ferro, C.A. and D.B. Stephenson (2011) Extremal Dependence Indices: Improved Verification Measures for Deterministic Forecasts of Rare
# Binary Events. Wea. Forecasting, 26, 699-713, https://doi.org/10.1175/WAF-D-10-05030.1
# Equation 7 in Wunderlich RF, Lin Y-P, Anthony J, Petway JR (2019) Two alternative evaluation metrics to replace the true skill statistic in the assessment
# of species distribution models. Nature Conservation 35: 97-116. https://doi.org/10.3897/natureconservation.35.33918
# Zeroes are substituted by small number (1e-9) as discussed by Wunderlich et al. and as implemented in https://github.com/RWunderlich/SEDI/blob/master/R/sedi.R
TPR[TPR==0] <- small
FPR[FPR==0] <- small
TNR <- 1-FPR
FNR <- 1-TPR
TNR[TNR==0] <- small
FNR[FNR==0] <- small
s <- (log(FPR) - log(TPR) - log(TNR) + log(FNR)) / (log(FPR) + log(TPR) + log(TNR) + log(FNR))
return(s)
}
# Inspired by:
# https://github.com/kerickson22/SDMs_for_rare_species_modeling/blob/main/code/00b_Constants.R
#computeTjurR2 = function(Y, predY) {
#
# R2 = mean(predY[which(Y == 1)]) - mean(predY[which(Y == 0)])
# return(R2)
#}
`ensemble.Tjur` <- function(
eval
)
{
result <- mean(eval@presence) - mean(eval@absence)
return(result)
}
`ensemble.evaluate` <- function(
eval, fixed.threshold=NULL, eval.train=NULL)
{
if (inherits(eval, "ModelEvaluation") == FALSE) {stop(paste("Please provide a ModelEvaluation object", "\n", sep = ""))}
if (is.null(fixed.threshold) == T) {
if (is.null(eval.train) == T) {
stop(paste("Please provide a ModelEvaluation object for the training data", "\n", sep = ""))
}else{
fixed.threshold <- eval.train@t[which.max(eval.train@TPR + eval.train@TNR)]
cat(paste("Calculated fixed threshold of ", fixed.threshold, " corresponding to highest sum of sensitivity and specificity", "\n", sep = ""))
}
}
result <- as.numeric(rep(NA, 9))
names(result) <- c("AUC", "TSS", "SEDI", "TSS.fixed", "SEDI.fixed", "FNR.fixed", "MCR.fixed", "AUCdiff", "Tjur")
result["AUC"] <- eval@auc
tss <- eval@TPR - eval@FPR
result["TSS"] <- max(tss)
sedi <- ensemble.SEDI(eval@TPR, eval@FPR)
result["SEDI"] <- max(sedi)
result["TSS.fixed"] <- tss[which(eval@t >= fixed.threshold)][1]
result["SEDI.fixed"] <- sedi[which(eval@t >= fixed.threshold)][1]
result["FNR.fixed"] <- eval@FNR[which(eval@t >= fixed.threshold)][1]
result["MCR.fixed"] <- eval@MCR[which(eval@t >= fixed.threshold)][1]
if (is.null(eval.train) == T) {
cat(paste("Please provide a ModelEvaluation object for calculating AUCdiff", "\n", sep = ""))
}else{
result["AUCdiff"] <- eval.train@auc - eval@auc
}
result["Tjur"] <- ensemble.Tjur(eval)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.evaluate.R
|
`ensemble.formulae` <- function(
x, layer.drops=NULL, factors=NULL, dummy.vars=NULL, weights=NULL
)
{
results <- list(MAXLIKE.formula=NULL, GBM.formula=NULL, RF.formula=NULL, CF.formula=NULL,
GLM.formula=NULL, STEP.formula=NULL, GLMSTEP.scope=NULL,
GAM.formula=NULL, GAMSTEP.scope=NULL, MGCV.formula=NULL, MGCVFIX.formula=NULL,
EARTH.formula=NULL, RPART.formula=NULL, NNET.formula=NULL,
FDA.formula=NULL, SVM.formula=NULL, SVME.formula=NULL)
# in older version of raster used layerNames instead of names
vars <- names(x)
# drop variables
if (is.null(layer.drops) == F) {
layer.drops <- as.character(layer.drops)
factors <- as.character(factors)
dummy.vars <- as.character(dummy.vars)
nd <- length(layer.drops)
for (i in 1:nd) {
vars <- vars[which(vars != layer.drops[i])]
factors <- factors[which(factors != layer.drops[i])]
dummy.vars <- dummy.vars[which(dummy.vars != layer.drops[i])]
}
if (length(factors) == 0) {factors <- NULL}
if (length(dummy.vars) == 0) {dummy.vars <- NULL}
}
# exclude column for pb for data.frames
vars <- vars[which(vars != "pb")]
if (length(vars) == 0) {
cat(paste("\n", "WARNING: no variables available", "\n\n", sep = ""))
return(results)
}
gamscope <- as.list(vars)
names(gamscope) <- vars
nv <- length(vars)
nf <- length(factors)
nd <- length(dummy.vars)
if (is.null(factors) == F) {
factors <- as.character(factors)
for (i in 1:nf) {
if (any(vars==factors[i])==FALSE) {
cat(paste("\n", "WARNING: categorical variable '", factors[i], "' not among grid layers", "\n\n", sep = ""))
}
}
}
if (is.null(dummy.vars) == F) {
dummy.vars <- as.character(dummy.vars)
for (i in 1:nd) {
if (any(vars==dummy.vars[i])==FALSE) {
cat(paste("\n", "WARNING: dummy variable '", dummy.vars[i], "' not among grid layers", "\n", "\n", sep = ""))
}
}
}
numpb <- paste("pb ~ ")
catpb <- paste("as.factor(pb) ~ ")
stepvars <- paste(vars[1])
allvars <- paste0("allvars", c(1:nv))
for (i in 1:nv) {allvars[i] <- paste(vars[i])}
glmvars <- gamvars <- mgcvvars <- mgcvfixvars <- explicitcatvars <- allvars
for (i in 1:nv) {
if (any(vars[i]==factors) == T) {
explicitcatvars[i] <- paste("as.factor(", vars[i], ")", sep="")
gamvars[i] <- paste("as.factor(", vars[i], ")", sep="")
gamscope[[as.name(vars[i])]] <- as.formula(paste("~1 + as.factor(", vars[i], ")", sep=""))
}else{
if (any(vars[i]==dummy.vars) == T) {
gamscope[[as.name(vars[i])]] <- as.formula(paste("~1 +", vars[i], sep=""))
}else{
glmvars[i] <- paste(vars[i], "+ I(", vars[i], "^2) + I(", vars[i], "^3)", sep="")
gamvars[i] <- paste("gam::s(", vars[i], ", 4)", sep="")
mgcvvars[i] <- paste("s(", vars[i], ", k=4)", sep="")
mgcvfixvars[i] <- paste("s(", vars[i], ", k=4, fx=T)", sep="")
gamscope[[as.name(vars[i])]] <- as.formula(paste("~1 + ", vars[i], "+ gam::s(", vars[i], ", 4)", sep=""))
}
}
}
ne <- nv-nf
earthvars <- NULL
if (all(vars %in% factors) == T) {
earthvars <- NULL
}else{
if (ne > 0) {
earthvars <- paste0("earthvars", c(1:ne))
j <- 0
for (i in 1:nv) {
if (any(vars[i]==factors) == F) {
j <- j+1
earthvars[j] <- paste(vars[i])
}
}
}
}
results$GBM.formula <- as.formula(paste(numpb, paste(allvars, sep="", collapse="+"), sep="", collapse="+"))
results$RF.formula <- as.formula(paste(numpb, paste(allvars, sep="", collapse="+"), sep="", collapse="+"))
results$CF.formula <- as.formula(paste(catpb, paste(allvars, sep="", collapse="+"), sep="", collapse="+"))
results$GLM.formula <- as.formula(paste(numpb, paste(glmvars, sep="", collapse="+"), sep="", collapse="+"))
results$STEP.formula <- as.formula(paste(numpb, stepvars, sep="", collapse="+"))
results$GLMSTEP.scope <- list(upper=as.formula(paste("~", paste(glmvars, sep="", collapse="+"), sep="", collapse="+")), lower=as.formula(paste("~1")))
results$GAM.formula <- as.formula(paste(numpb, paste(gamvars, sep="", collapse="+"), sep="", collapse="+"))
results$GAMSTEP.scope <- gamscope
results$MGCV.formula <- as.formula(paste(numpb, paste(mgcvvars, sep="", collapse="+"), sep="", collapse="+"))
results$MGCVFIX.formula <- as.formula(paste(numpb, paste(mgcvfixvars, sep="", collapse="+"), sep="", collapse="+"))
# no categorical variables for maxlike and earth
if(is.null(earthvars) == F) {results$EARTH.formula <- as.formula(paste(catpb, paste(earthvars, sep="", collapse="+"), sep="", collapse="+"))}
if(is.null(earthvars) == F) {results$MAXLIKE.formula <- as.formula(paste("~", paste(earthvars, sep="", collapse="+"), sep="", collapse="+"))}
results$RPART.formula <- as.formula(paste(catpb, paste(allvars, sep="", collapse="+"), sep="", collapse="+"))
results$NNET.formula <- as.formula(paste(catpb, paste(allvars, sep="", collapse="+"), sep="", collapse="+"))
results$FDA.formula <- as.formula(paste(numpb, paste(allvars, sep="", collapse="+"), sep="", collapse="+"))
results$SVM.formula <- as.formula(paste(numpb, paste(allvars, sep="", collapse="+"), sep="", collapse="+"))
results$SVME.formula <- as.formula(paste(catpb, paste(allvars, sep="", collapse="+"), sep="", collapse="+"))
if (is.null(factors) == F) {
if (is.null(weights) == F) {
if (weights["MAXLIKE"] > 0 && is.null(earthvars) == F) {cat(paste("\n", "Note that categorical variables were not included by ensemble.formulae for MAXLIKE", sep = ""))}
if (weights["MAXLIKE"] > 0 && is.null(earthvars) == T) {cat(paste("\n", "Note that there were no variables available for MAXLIKE to make a formula", sep = ""))}
if (weights["EARTH"] > 0 && is.null(earthvars) == F) {cat(paste("\n", "Note that categorical variables were not included by ensemble.formulae for EARTH", sep = ""))}
if (weights["EARTH"] > 0 && is.null(earthvars) == T) {cat(paste("\n", "Note that there were no variables available for MAXLIKE to make a formula", sep = ""))}
cat(paste("\n\n"))
}
}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.formulae.R
|
`ensemble.habitat.change` <- function(
base.map=file.choose(),
other.maps=utils::choose.files(),
change.folder="ensembles/change",
RASTER.names="changes",
RASTER.format="GTiff", RASTER.datatype="INT1U", RASTER.NAflag=255
# KML.out=FALSE, KML.folder="kml/change",
# KML.maxpixels=100000, KML.blur=10
)
{
#
change.function <- function(x, y) {return(10*x + y)}
# output resembles binary output based on current 1/0 + predicted 1/0
# 11 = suitable in current and predicted habitat
# 10 = suitable in current but not suitable in predicted habitat
# 01 = 1 = not suitable in current but suitable in predicted habitat
# 00 = 0 = not suitable in current and predicted habitat
# check whether information likely to be count data
base.raster <- raster::raster(base.map)
if(length(base.map[grepl("presence", base.map)]) < 1) {
cat(paste("Warning: base.map is not in a subfolder 'presence'\n", sep=""))
}
if(length(other.maps[grepl("presence", base.map)]) < 1) {
cat(paste("Warning: base.map is not in a subfolder 'presence'\n", sep=""))
}
#
dir.create(change.folder, showWarnings = F)
#
base.raster <- raster::raster(base.map)
base.name <- names(base.raster)
raster::setMinMax(base.raster)
#
# if (KML.out==T && raster::isLonLat(base.raster)==F) {
# cat(paste("\n", "NOTE: not possible to generate KML files as Coordinate Reference System (CRS) of baseline raster is not longitude and latitude", "\n", sep = ""))
# KML.out <- FALSE
# }
#
# if(KML.out==T && KML.folder == "kml/change") {
# dir.create("kml", showWarnings = F)
# dir.create("kml/change", showWarnings = F)
# }
# if(KML.out == T && KML.folder != "kml/change") {dir.create(KML.folder, showWarnings = F)}
#
if (raster::maxValue(base.raster) > 1) {
cat(paste("Warning: base.raster has values larger than 1, hence does not provide presence-absence", sep=""))
}
frequencies <- data.frame(array(dim=c(length(other.maps)+1,4)))
rownames(frequencies)[1] <- paste(base.name, " (base.map)", sep="")
names(frequencies) <- c("never suitable (00=0)", "always suitable (11)", "no longer suitable (10)", "new habitat (01=1)")
freq1 <- raster::freq(base.raster)
freq1 <- freq1[is.na(freq1[,1])==F,]
if(length(freq1[freq1[,1]==0, 2]) > 0) {frequencies[1, 1] <- freq1[freq1[,1]==0, 2]}
if(length(freq1[freq1[,1]==1, 2]) > 0) {frequencies[1, 2] <- freq1[freq1[,1]==1, 2]}
for (i in 1:length(other.maps)) {
other.raster <- raster::raster(other.maps[i])
# raster.name <- names(other.raster)
raster.name <- RASTER.names[i]
raster::setMinMax(other.raster)
if (raster::maxValue(base.raster) > 1) {
cat(paste("Warning: other.maps [", i, "] has values larger than 1, hence does not provide presence-absence", sep=""))
}
changes.raster <- other.raster
filename1 <- paste(change.folder, "/", raster.name, sep="")
changes.raster <- raster::overlay(x=base.raster, y=other.raster, fun = change.function, na.rm = FALSE)
names(changes.raster) <- raster.name
raster::writeRaster(changes.raster, filename=filename1, progress='text', format=RASTER.format, overwrite=TRUE, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
# avoid possible problems with saving of names of the raster layers
# no longer used with detault GTiff format since DEC-2022
# raster::writeRaster(changes.raster, filename="working.grd", overwrite=T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- raster.name
# raster::writeRaster(working.raster, filename=filename1, progress='text', format=RASTER.format, overwrite=TRUE, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
#
rownames(frequencies)[i+1] <- raster.name
freq1 <- raster::freq(changes.raster)
freq1 <- freq1[is.na(freq1[,1])==F,]
if(length(freq1[freq1[,1]==0, 2]) > 0) {frequencies[i+1, 1] <- freq1[freq1[,1]==0, 2]}
if(length(freq1[freq1[,1]==11, 2]) > 0) {frequencies[i+1, 2] <- freq1[freq1[,1]==11, 2]}
if(length(freq1[freq1[,1]==10, 2]) > 0) {frequencies[i+1, 3] <- freq1[freq1[,1]==10, 2]}
if(length(freq1[freq1[,1]==1, 2]) > 0) {frequencies[i+1, 4] <- freq1[freq1[,1]==1, 2]}
filename2 <- paste(change.folder, "/", raster.name, sep="")
# if (KML.out == T) {
# filename2 <- paste(KML.folder, "/", raster.name, sep="")
# raster::KML(changes.raster, filename=filename2, col=c("black","blue","red","green"), breaks=c(-1, 0, 1, 10, 11),
# colNA=0, blur=KML.blur, maxpixels=KML.maxpixels, overwrite=TRUE)
# }
}
cat(paste("\n", "Codes used in newly created rasters", sep=""))
cat(paste("\n\t", "Code = 11 indicates that the cell is suitable for the base and the other map", sep=""))
cat(paste("\n\t", "Code = 00 = 0 indicates that the cell is not suitable for the base and the other map", sep=""))
cat(paste("\n\t", "Code = 10 indicates lost habitat (suitable for the base map, not suitable for the other map)", sep=""))
cat(paste("\n\t", "Code = 01 = 1 indicates new habitat (not suitable for the base map, suitable for the other map)", sep=""))
cat(paste("\n\n", "Frequencies (number of cells) of habitat changes", sep=""))
cat(paste("\n", "(first row documents habitat for base map, i.e. the 'no change' scenario)", "\n\n", sep=""))
print(frequencies)
# if (KML.out == T) {
# cat(paste("\n", "Colour coding in KML layer", sep=""))
# cat(paste("\n\t", "Colour = green indicates that the cell is suitable for the base and the other map (Code = 11)", sep=""))
# cat(paste("\n\t", "Colour = black indicates that the cell is not suitable for the base and the other map (Code = 0)", sep=""))
# cat(paste("\n\t", "Colour = red indicates lost habitat (Code = 10; suitable for the base map, not suitable for the other map)", sep=""))
# cat(paste("\n\t", "Colour = blue indicates new habitat (Code = 1; not suitable for the base map, suitable for the other map)", "\n", sep=""))
# }
return(frequencies)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.habitat.change.R
|
`ensemble.mean` <- function(
RASTER.species.name="Species001", RASTER.stack.name="base",
positive.filters=c("tif", "_ENSEMBLE_"), negative.filters=c("xml"),
RASTER.format="GTiff", RASTER.datatype="INT2S", RASTER.NAflag=-32767,
# KML.out=FALSE, KML.maxpixels=100000, KML.blur=10,
abs.breaks=6, pres.breaks=6, sd.breaks=9,
p=NULL, a=NULL,
pt=NULL, at=NULL,
threshold=-1,
threshold.method="spec_sens", threshold.sensitivity=0.9, threshold.PresenceAbsence=FALSE
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if (threshold < 0) {
if (is.null(p)==T || is.null(a)==T) {stop(paste("Please provide locations p and a to calculate thresholds", "\n", sep = ""))}
}
retest <- F
if (is.null(pt)==F && is.null(at)==F) {
if(identical(pt, p) == F || identical(at, a) == F) {retest <- T}
}
#
if(is.null(p) == F) {names(p) <- c("x", "y")}
if(is.null(a) == F) {names(a) <- c("x", "y")}
if(is.null(pt) == F) {names(pt) <- c("x", "y")}
if(is.null(at) == F) {names(at) <- c("x", "y")}
#
# avoid problems with non-existing directories
dir.create("ensembles/consensussuitability", showWarnings = F)
dir.create("ensembles/consensuscount", showWarnings = F)
dir.create("ensembles/consensuspresence", showWarnings = F)
dir.create("ensembles/consensussd", showWarnings = F)
# if(KML.out == T) {
# dir.create("kml", showWarnings = F)
# dir.create("kml/consensussuitability", showWarnings = F)
# dir.create("kml/consensuscount", showWarnings = F)
# dir.create("kml/consensuspresence", showWarnings = F)
# dir.create("kml/consensussd", showWarnings = F)
# }
#
# get ensemble input files
species_focus <- RASTER.species.name
if (gsub(".", "_", RASTER.species.name, fixed=T) != RASTER.species.name) {cat(paste("\n", "WARNING: species name (", RASTER.species.name, ") contains '.'", "\n\n", sep = ""))}
ensemble.files <- list.files(path=paste(getwd(), "//ensembles//suitability", sep=""), pattern=species_focus, full.names=TRUE)
if (length(ensemble.files) < 1) {
cat(paste("\n", "NOTE: not meaningful to provide means as there are no raster files for this species:", RASTER.species.name, "\n", sep = ""))
return(NULL)
}
RASTER.stack.name2 <- RASTER.stack.name
if (gsub(".", "_", RASTER.stack.name, fixed=T) != RASTER.stack.name) {cat(paste("\n", "WARNING: title of stack (", RASTER.stack.name, ") contains '.'", "\n\n", sep = ""))}
if (RASTER.stack.name != "") {
ensemble.files <- ensemble.files[grepl(pattern=RASTER.stack.name, x=ensemble.files)]
filename0 <- paste(species_focus, "_", RASTER.stack.name, sep="")
if (length(ensemble.files) < 1) {
cat(paste("\n", "NOTE: not meaningful to provide means as there are no raster files for this stack:", RASTER.stack.name, "\n", sep = ""))
return(NULL)
}
}else{
filename0 <- species_focus
}
for (i in 1:length(positive.filters)) {
ensemble.files <- ensemble.files[grepl(pattern=positive.filters[i], x=ensemble.files)]
}
for (i in 1:length(negative.filters)) {
ensemble.files <- ensemble.files[grepl(pattern=negative.filters[i], x=ensemble.files) == FALSE]
}
if (length(ensemble.files) < 2) {
cat(paste("\n", "NOTE: not meaningful to provide means as there are fewer than 2 ensemble files", "\n", sep = ""))
return(NULL)
}
cat(paste("\n", "Files used to create mean ensemble", "\n\n", sep = ""))
print(ensemble.files)
ensemble.stack <- raster::stack(ensemble.files)
cat(paste("\n", "RasterStack used to create mean ensemble", "\n\n", sep = ""))
print(ensemble.stack)
#
ensemble.mean <- raster::mean(ensemble.stack)
ensemble.mean <- trunc(1.0 * ensemble.mean)
# raster::setMinMax(ensemble.mean)
names(ensemble.mean) <- filename0
cat(paste("\n", "consensus mean suitability (truncated)", "\n\n", sep = ""))
print(ensemble.mean)
filename1 <- paste(getwd(), "//ensembles//consensussuitability//", filename0, sep="")
raster::writeRaster(x=ensemble.mean, filename=filename1, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
#
# avoid possible problems with saving of names of the raster layers
# no longer used with default GTiff format since DEC-2022
# raster::writeRaster(ensemble.mean, filename="working.grd", overwrite=T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- filename0
# raster::writeRaster(working.raster, filename=filename1, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
#
# nicheOverlap
nicheOverlaps <- numeric(length(names(ensemble.stack)))
names(nicheOverlaps) <- names(ensemble.stack)
for (i in 1:length(names(ensemble.stack))) {
nicheOverlaps[i] <- dismo::nicheOverlap((ensemble.stack[[i]]/1000), (ensemble.mean/1000), stat="I", checkNegatives=F)
}
cat(paste("\n", "niche overlap (dismo::nicheOverlap with stat='I') between mean suitability and input suitability files", "\n\n", sep = ""))
print(as.data.frame(nicheOverlaps))
#
# standard deviation
ensemble.sd <- raster::calc(ensemble.stack, fun=sd)
ensemble.sd <- trunc(ensemble.sd)
# raster::setMinMax(ensemble.mean)
names(ensemble.sd) <- filename0
cat(paste("\n", "consensus standard deviation (truncated)", "\n\n", sep = ""))
print(ensemble.sd)
filename1 <- paste(getwd(), "//ensembles//consensussd//", filename0, sep="")
raster::writeRaster(x=ensemble.sd, filename=filename1, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
#
# avoid possible problems with saving of names of the raster layers
# no longer used with default GTiff format since DEC-2022
# raster::writeRaster(ensemble.sd, filename="working.grd", overwrite=T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- filename0
# raster::writeRaster(working.raster, filename=filename1, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
#
#
threshold.mean <- threshold
if (threshold.mean < 0) {
eval1 <- NULL
cat(paste("\n", "Evaluation of created mean ensemble raster layer at locations p and a", "\n", sep = ""))
if (ncol(p) == 3) {p <- p[p[,1]==species_focus, c(2:3)]}
if (ncol(a) == 3) {a <- a[a[,1]==species_focus, c(2:3)]}
pres_consensus <- raster::extract(ensemble.mean, p)/1000
pres_consensus <- pres_consensus[is.na(pres_consensus)==F]
abs_consensus <- raster::extract(ensemble.mean, a)/1000
abs_consensus <- abs_consensus[is.na(abs_consensus)==F]
eval1 <- dismo::evaluate(p=pres_consensus, a=abs_consensus)
print(eval1)
threshold.mean <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres_consensus, Abs=abs_consensus)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(threshold.mean))
if(retest == T) {
eval1 <- NULL
cat(paste("\n", "Evaluation of created mean ensemble raster layer at locations pt and at", "\n\n", sep = ""))
if (ncol(pt) == 3) {pt <- pt[pt[,1]==species_focus, c(2:3)]}
if (ncol(at) == 3) {at <- at[at[,1]==species_focus, c(2:3)]}
pres_consensus <- raster::extract(ensemble.mean, p)/1000
pres_consensus <- pres_consensus[is.na(pres_consensus)==F]
abs_consensus <- raster::extract(ensemble.mean, a)/1000
abs_consensus <- abs_consensus[is.na(abs_consensus)==F]
eval1 <- dismo::evaluate(p=pres_consensus, a=abs_consensus)
print(eval1)
}
}
#
#
# if (KML.out==T && raster::isLonLat(ensemble.mean)==F) {
# cat(paste("\n", "NOTE: not possible to generate KML files as Coordinate Reference System (CRS) is not longitude and latitude", "\n", sep = ""))
# KML.out <- FALSE
# }
# if (KML.out == T) {
# raster.min <- raster::minValue(ensemble.mean)
# raster.max <- raster::maxValue(ensemble.mean)
# seq1 <- round(seq(from=raster.min, to=threshold.mean, length.out=abs.breaks), 4)
# seq1 <- seq1[1:(abs.breaks-1)]
# seq1[-abs.breaks]
# seq1 <- unique(seq1)
# seq2 <- round(seq(from = threshold.mean, to = raster.max, length.out=pres.breaks), 4)
# seq2 <- unique(seq2)
# filename2 <- paste(getwd(), "//kml//consensussuitability//", filename0, sep="")
# raster::KML(ensemble.mean, filename=filename2, breaks = c(seq1, seq2), col = c(grDevices::rainbow(n=length(seq1), start=0, end =1/6), grDevices::rainbow(n=length(seq2)-1, start=3/6, end=4/6)), colNA = 0,
# blur=KML.blur, maxpixels=KML.maxpixels, overwrite=TRUE)
#
# sd.max <- raster::cellStats(ensemble.sd, stat='max')
# seq1 <- seq(from = 0, to = sd.max, length.out = sd.breaks)
# filename2b <- paste(getwd(), "//kml//consensussd//", filename0, sep="")
# raster::KML(ensemble.sd, filename=filename2b, col=grDevices::rainbow(n = length(seq1)-1, start = 1/6, end = 4/6), colNA = 0,
# blur=KML.blur, maxpixels=KML.maxpixels, overwrite=TRUE, breaks = seq1)
# }
#
# presence-absence maps based on the mean maps
enspresence <- ensemble.mean >= 1000 * threshold.mean
raster::setMinMax(enspresence)
names(enspresence) <- filename0
filename3 <- paste(getwd(), "//ensembles//consensuspresence//", filename0, sep="")
raster::writeRaster(x=enspresence, filename=filename3, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
#
# avoid possible problems with saving of names of the raster layers
# no longer used with default GTiff format since DEC-2022
# raster::writeRaster(enspresence, filename="working.grd", overwrite=T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- filename0
# raster::writeRaster(working.raster, filename=filename3, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
#
# if (KML.out == T) {
# filename4 <- paste(getwd(), "//kml//consensuspresence//", filename0, sep="")
# raster::KML(enspresence, filename=filename4, col=c("grey", "green"),
# colNA=0, blur=KML.blur, maxpixels=KML.maxpixels, overwrite=TRUE)
# }
#
# count maps: counting the number of ensembles predicting presence
presence.files <- list.files(path=paste(getwd(), "//ensembles//presence", sep=""), pattern=species_focus, full.names=TRUE)
if (RASTER.stack.name != "") {
presence.files <- presence.files[grepl(pattern=RASTER.stack.name, x=presence.files)]
}
for (i in 1:length(positive.filters)) {
presence.files <- presence.files[grepl(pattern=positive.filters[i], x=presence.files)]
}
for (i in 1:length(negative.filters)) {
presence.files <- presence.files[grepl(pattern=negative.filters[i], x=presence.files) == FALSE]
}
ensemble.stack <- raster::stack(presence.files)
cat(paste("\n", "RasterStack (presence-absence) used to create consensus ensemble (count of ensembles)", "\n\n", sep = ""))
print(ensemble.stack)
ensemble.count <- sum(ensemble.stack)
raster::setMinMax(ensemble.count)
names(ensemble.count) <- filename0
filename5 <- paste(getwd(), "//ensembles//consensuscount//", filename0, sep="")
raster::writeRaster(x=ensemble.count, filename=filename5, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
#
# avoid possible problems with saving of names of the raster layers
# no longer used with default GTiff format since DEC-2022
# raster::writeRaster(ensemble.count, filename="working.grd", overwrite=T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- filename0
# raster::writeRaster(working.raster, filename=filename5, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
#
# if (KML.out == T) {
# filename6 <- paste(getwd(), "//kml//consensuscount//", filename0, sep="")
# nmax <- length(presence.files)
# if (nmax > 3) {
# raster::KML(ensemble.count, filename=filename6, col=c("grey", "black", grDevices::rainbow(n=(nmax-2), start=0, end=1/3), "blue"),
# colNA=0, blur=10, overwrite=TRUE, breaks=seq(from=-1, to=nmax, by=1))
# }else{
# raster::KML(ensemble.count, filename=filename6, col=c("grey", grDevices::rainbow(n=nmax, start=0, end=1/3)),
# colNA=0, blur=10, overwrite=TRUE, breaks=seq(from=-1, to=nmax, by=1))
# }
# }
return(list(threshold=threshold.mean, nicheOverlaps=nicheOverlaps, call=match.call() ))
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.mean.R
|
`ensemble.novel.object` <- function(
x=NULL, name="reference1", mask.raster=NULL,
quantiles=FALSE, probs=c(0.05, 0.95),
factors=NULL
)
{
vars <- names(x)
if (length(factors) > 0) {for (i in 1:length(factors)) {vars <- vars[which(names(x) != factors[i])]}}
nv <- length(vars)
minima <- maxima <- numeric(nv)
names(minima) <- names(maxima) <- vars
if(inherits(x, "RasterStack") == T) {
for (i in c(1:nv)) {
vari <- vars[which(names(x) == vars[i])]
raster.focus <- x[[which(names(x) == vars[i])]]
if (is.null(mask.raster) == F) {raster.focus <- raster::mask(x[[which(names(x) == vars[i])]], mask=mask.raster)}
raster::setMinMax(raster.focus)
print(raster.focus)
if (quantiles == F) {
minV <- raster::minValue(raster.focus)
maxV <- raster::maxValue(raster.focus)
}else{
minV <- as.numeric(raster::quantile(raster.focus, probs=probs[1], na.rm=T))
maxV <- as.numeric(raster::quantile(raster.focus, probs=probs[2], na.rm=T))
}
minima[which(names(minima) == vari)] <- minV
maxima[which(names(maxima) == vari)] <- maxV
}
}
if(inherits(x, "data.frame") == T) {
for (i in 1:nv) {
vari <- vars[i]
xdata <- x[, which(names(x)==vari), drop=F]
if (quantiles == F) {
minV <- min(xdata)
maxV <- max(xdata)
}else{
minV <- as.numeric(stats::quantile(xdata, probs[1], na.rm=T))
maxV <- as.numeric(stats::quantile(xdata, probs[2], na.rm=T))
}
minima[which(names(minima) == vari)] <- minV
maxima[which(names(maxima) == vari)] <- maxV
}
}
novel.object <- list(minima=minima, maxima=maxima, name=name)
return(novel.object)
}
`ensemble.novel` <- function(
x=NULL, novel.object=NULL,
RASTER.object.name=novel.object$name, RASTER.stack.name = x@title,
RASTER.format="GTiff", RASTER.datatype="INT2S", RASTER.NAflag=-32767,
# KML.out=FALSE, KML.maxpixels=100000, KML.blur=10,
CATCH.OFF=FALSE
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if(is.null(x) == T) {stop("value for parameter x is missing (RasterStack object)")}
if(inherits(x, "RasterStack") == F) {stop("x is not a RasterStack object")}
if (is.null(novel.object) == T) {stop("value for parameter novel.object is missing (hint: use the ensemble.novel.object function)")}
if (all.equal(names(novel.object$minima), names(novel.object$maxima)) == F) {{stop("different variable names for maxima and minima")}}
#
#
# if (KML.out==T && raster::isLonLat(x)==F) {
# cat(paste("\n", "NOTE: not possible to generate KML files as Coordinate Reference System (CRS) of stack ", x@title , " is not longitude and latitude", "\n", sep = ""))
# KML.out <- FALSE
# }
#
predict.novel <- function(object=novel.object, newdata=newdata) {
minima <- object$minima
maxima <- object$maxima
newdata <- newdata[, which(names(newdata) %in% names(minima)), drop=F]
result <- as.numeric(rep(NA, nrow(newdata)))
varnames <- names(newdata)
nvars <- ncol(newdata)
# for (i in 1:nrow(newdata)) {
# datai <- newdata[i,,drop=F]
# if (any(is.na(datai)) == F) {
# datai1 <- rbind(minima, datai)
# mins <- sum(as.numeric(apply(datai1, 2, which.min)))
# datai2 <- rbind(maxima, datai)
# maxs <- sum(as.numeric(apply(datai2, 2, which.max)))
# if ((mins+maxs) == 2*nv) {
# result[i] <- 0
# }else{
# result[i] <- 1
# }
# }
# }
for (i in 1:nrow(newdata)) {
datai <- newdata[i,,drop=F]
resulti <- 0
j <- 0
while (resulti < 1 && j <= (nvars-1)) {
j <- j+1
focal.var <- varnames[j]
minj <- minima[which(names(minima) == focal.var)]
if (datai[, j] < minj) {resulti <- 1}
maxj <- maxima[which(names(maxima) == focal.var)]
if (datai[, j] > maxj) {resulti <- 1}
}
result[i] <- resulti
}
p <- as.numeric(result)
p <- trunc(p)
return(p)
}
# check if all variables are present
vars <- names(novel.object$minima)
vars.x <- names(x)
nv <- length(vars)
for (i in 1:nv) {
if (any(vars.x==vars[i]) == F) {stop("explanatory variable '", vars[i], "' not among grid layers of RasterStack x \n", sep = "")}
}
nv <- length(vars.x)
for (i in 1:nv) {
if (any(vars==vars.x[i]) == F) {
cat(paste("\n", "NOTE: RasterStack layer '", vars.x[i], "' was not documented in the novel object data set and will be ignored", "\n", sep = ""))
x <- raster::dropLayer(x, which(names(x) %in% c(vars.x[i]) ))
x <- raster::stack(x)
}
}
# same order of variables in stack as in novel object
minima <- novel.object$minima
minima <- minima[as.numeric(na.omit(match(names(x), names(minima))))]
novel.object$minima <- minima
maxima <- novel.object$maxima
maxima <- maxima[as.numeric(na.omit(match(names(x), names(maxima))))]
novel.object$maxima <- maxima
# avoid problems with non-existing directories and prepare for output
dir.create("ensembles", showWarnings = F)
dir.create("ensembles/novel", showWarnings = F)
# if (KML.out == T) {
# dir.create("kml", showWarnings = F)
# dir.create("kml/novel", showWarnings = F)
# }
if(length(x@title) == 0) {x@title <- "stack1"}
stack.title <- RASTER.stack.name
if (gsub(".", "_", stack.title, fixed=T) != stack.title) {cat(paste("\n", "WARNING: title of stack (", stack.title, ") contains '.'", "\n\n", sep = ""))}
rasterfull <- paste("ensembles/novel/", RASTER.object.name, "_", stack.title , "_novel", sep="")
# kmlfull <- paste("kml/novel/", RASTER.object.name, "_", stack.title , "_novel", sep="")
#
# predict
if (CATCH.OFF == F) {
tryCatch(novel.raster <- raster::predict(object=x, model=novel.object, fun=predict.novel, na.rm=TRUE,
filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("prediction of novel zones failed"))},
silent=F)
}else{
novel.raster <- raster::predict(object=x, model=novel.object, fun=predict.novel, na.rm=TRUE,
filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format)
}
# save as integer
# novel.raster <- trunc(novel.raster)
raster::setMinMax(novel.raster)
cat(paste("\n", "raster layer with novel areas (1) created (0 indicates not novel)", "\n", sep = ""))
print(novel.raster)
print(raster::freq(novel.raster))
#
# avoid possible problems with saving of names of the raster layers
# no longer used with default GTiff format since DEC-2022
# raster::writeRaster(novel.raster, filename="working.grd", overwrite=T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- paste(RASTER.object.name, "_", stack.title , "_novel", sep="")
# raster::writeRaster(working.raster, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
#
# if (KML.out == T) {
# raster::KML(working.raster, filename=kmlfull, col = c("grey", "green"), colNA = 0,
# blur=KML.blur, maxpixels=KML.maxpixels, overwrite=TRUE, breaks = c(-1, 0, 1))
# }
cat(paste("\n", "novel climate raster provided in folder: ", getwd(), "//ensembles//novel", "\n", sep=""))
# novel.raster <- raster::raster(rasterfull)
return(novel.raster)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.novel.R
|
`ensemble.outlierThin` <- function(
x, predictors.stack=NULL, k=10, quant=0.95, pca.var=0.95,
return.outliers=FALSE
)
{
.BiodiversityR <- new.env()
#
# create background data
background.data <- raster::extract(predictors.stack, x)
background.data <- data.frame(background.data)
TrainValid <- complete.cases(background.data)
x <- x[TrainValid,]
background.data <- background.data[TrainValid,]
# PCA of scaled variables
rda.result <- vegan::rda(X=background.data, scale=T)
# select number of axes
ax <- 2
while ( (sum(vegan::eigenvals(rda.result)[c(1:ax)])/sum(vegan::eigenvals(rda.result))) < pca.var ) {ax <- ax+1}
cat(paste("\n", "Percentage of variance of the selected axes (1 to ", ax, ") of principal components analysis: ", 100*sum(vegan::eigenvals(rda.result)[c(1:ax)])/sum(vegan::eigenvals(rda.result)), "\n", sep = ""))
rda.scores <- vegan::scores(rda.result, display="sites", scaling=1, choices=c(1:ax))
#
lof.result <- Rlof::lof(rda.scores, k=10, method="euclidean")
outliers.limit <- quantile(lof.result, probs=quant)
#
inliers <- x[lof.result < outliers.limit, ]
outliers <- x[lof.result >= outliers.limit, ]
cat(paste(quant, " quantile limit for local outliers: ", outliers.limit, "\n", sep=""))
#
if (return.outliers == T) {
return(list(inliers=inliers, outliers=outliers))
}else{
return(inliers)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.outlierThin.R
|
`ensemble.pairs` <- function(
x=NULL, a=NULL, an=10000
)
{
# application of the graphics::pairs function including auxillary functions shown in the documentation (graphics 3.4.3)
#
# raster package has a similar function
## obtained from graphics::pairs example
## put histograms on the diagonal
panel.hist <- function(x, ...){
usr <- graphics::par("usr"); on.exit(graphics::par(usr))
graphics::par(usr = c(usr[1:2], 0, 1.5) )
h <- graphics::hist(x, plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
graphics::rect(breaks[-nB], 0, breaks[-1], y, col = "green", ...)
}
## obtained from graphics::pairs example
## modified to limit reduction of size to minimum 0.5 and show sign of correlation
## put (absolute) correlations on the upper panels,
## with size proportional to the correlations.
panel.cor <- function(x, y, digits = 2, prefix = "", cex.cor, ...){
usr <- graphics::par("usr"); on.exit(graphics::par(usr))
graphics::par(usr = c(0, 1, 0, 1))
r <- abs(cor(x, y))
txt <- format(c(r, 0.123456789), digits = digits)[1]
## modified next 3 lines
r1 <- max(0.5, abs(cor(x, y)))
r2 <- cor(x, y)
txt <- format(c(r2, 0.123456789), digits = digits)[1]
##
txt <- paste0(prefix, txt)
if(missing(cex.cor)) cex.cor <- 0.8/graphics::strwidth(txt)
## modified final line
## text(0.5, 0.5, txt, cex = cex.cor * r)
graphics::text(0.5, 0.5, txt, cex = cex.cor * r1)
}
if(is.null(x) == T) {stop("value for parameter x is missing (RasterStack object)")}
if(inherits(x,"RasterStack") == F) {stop("x is not a RasterStack object")}
if(is.null(a) == F) {names(a) <- c("x", "y")}
if (is.null(a) == T) {a <- dismo::randomPoints(x[[1]], n=an, p=NULL, excludep=F)}
x.data <- raster::extract(x, y=a)
x <- as.data.frame(x.data)
x <- na.omit(x)
graphics::pairs(x, lower.panel=graphics::panel.smooth, diag.panel=panel.hist, upper.panel=panel.cor)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.pairs.R
|
`ensemble.plot` <- function(
RASTER.species.name="Species001", RASTER.stack.name="base",
plot.method=c("suitability", "presence", "count",
"consensussuitability", "consensuspresence", "consensuscount", "consensussd"),
dev.new.width=7, dev.new.height=7,
main=paste(RASTER.species.name, " ", plot.method, " for ", RASTER.stack.name, sep=""),
positive.filters=c("tif"), negative.filters=c("xml"),
p=NULL, a=NULL,
threshold=-1,
threshold.method="spec_sens", threshold.sensitivity=0.9, threshold.PresenceAbsence=FALSE,
abs.breaks=6, abs.col=NULL,
pres.breaks=6, pres.col=NULL,
sd.breaks=9, sd.col=NULL,
absencePresence.col=NULL,
count.col=NULL, ...
)
{
plot.method <- match.arg(plot.method)
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if (threshold < 0 && plot.method=="suitability") {
if (is.null(p)==T || is.null(a)==T) {stop(paste("Please provide locations p and a to calculate thresholds", "\n", sep = ""))}
}
if(is.null(p) == F) {names(p) <- c("x", "y")}
if(is.null(a) == F) {names(a) <- c("x", "y")}
#
# get raster files
# basic assumption is that different ensemble files are named as species_ENSEMBLE_1, species_ENSEMBLE_2, ... i.e. output from ensemble.batch
species_focus <- RASTER.species.name
if (gsub(".", "_", RASTER.species.name, fixed=T) != RASTER.species.name) {cat(paste("\n", "WARNING: species name (", RASTER.species.name, ") contains '.'", "\n\n", sep = ""))}
#
if (plot.method == "suitability") {
ensemble.files <- list.files(path=paste(getwd(), "//ensembles//suitability", sep=""), pattern=species_focus, full.names=TRUE)
if (length(ensemble.files) < 1) {
cat(paste("\n", "NOTE: not meaningful to plot suitability as there are no files available for species: ", RASTER.species.name, "\n", sep = ""))
return(NULL)
}
}
if (plot.method == "presence") {
ensemble.files <- list.files(path=paste(getwd(), "//ensembles//presence", sep=""), pattern=species_focus, full.names=TRUE)
if (length(ensemble.files) < 1) {
cat(paste("\n", "NOTE: not meaningful to plot presence as there are no files available for species: ", RASTER.species.name, "\n", sep = ""))
return(NULL)
}
}
if (plot.method == "count") {
ensemble.files <- list.files(path=paste(getwd(), "//ensembles//count", sep=""), pattern=species_focus, full.names=TRUE)
if (length(ensemble.files) < 1) {
cat(paste("\n", "NOTE: not meaningful to plot counts as there are no files available for this species: ", RASTER.species.name, "\n", sep = ""))
return(NULL)
}
}
if (plot.method == "consensussuitability") {
ensemble.files <- list.files(path=paste(getwd(), "//ensembles//consensussuitability", sep=""), pattern=species_focus, full.names=TRUE)
if (length(ensemble.files) < 1) {
cat(paste("\n", "NOTE: not meaningful to plot consensus suitability as there are no files available for species: ", RASTER.species.name, "\n", sep = ""))
return(NULL)
}
}
if (plot.method == "consensuspresence") {
ensemble.files <- list.files(path=paste(getwd(), "//ensembles//consensuspresence", sep=""), pattern=species_focus, full.names=TRUE)
if (length(ensemble.files) < 1) {
cat(paste("\n", "NOTE: not meaningful to plot consensus presence as there are no files available for species: ", RASTER.species.name, "\n", sep = ""))
return(NULL)
}
}
if (plot.method == "consensuscount") {
ensemble.files <- list.files(path=paste(getwd(), "//ensembles//consensuscount", sep=""), pattern=species_focus, full.names=TRUE)
if (length(ensemble.files) < 1) {
cat(paste("\n", "NOTE: not meaningful to plot consensus counts as there are no files available for this species: ", RASTER.species.name, "\n", sep = ""))
return(NULL)
}
}
if (plot.method == "consensussd") {
ensemble.files <- list.files(path=paste(getwd(), "//ensembles//consensussd", sep=""), pattern=species_focus, full.names=TRUE)
if (length(ensemble.files) < 1) {
cat(paste("\n", "NOTE: not meaningful to plot consensus standard deviations as there are no files available for this species: ", RASTER.species.name, "\n", sep = ""))
return(NULL)
}
}
#
RASTER.stack.name2 <- RASTER.stack.name
if (gsub(".", "_", RASTER.stack.name, fixed=T) != RASTER.stack.name) {cat(paste("\n", "WARNING: title of stack (", RASTER.stack.name, ") contains '.'", "\n\n", sep = ""))}
if (RASTER.stack.name != "") {
ensemble.files <- ensemble.files[grepl(pattern=RASTER.stack.name, x=ensemble.files)]
RASTER.stack.name2 <- paste("_", RASTER.stack.name, sep="")
if (length(ensemble.files) < 1) {
cat(paste("\n", "NOTE: not meaningful to plot as there are no raster files for this stack: ", RASTER.stack.name, "\n", sep = ""))
return(NULL)
}
}
for (i in 1:length(positive.filters)) {
ensemble.files <- ensemble.files[grepl(pattern=positive.filters[i], x=ensemble.files)]
}
for (i in 1:length(negative.filters)) {
ensemble.files <- ensemble.files[grepl(pattern=negative.filters[i], x=ensemble.files) == FALSE]
}
if (length(ensemble.files) < 1) {
cat(paste("\n", "NOTE: not meaningful to plot as there are no raster files available for the specified filters", "\n", sep = ""))
return(NULL)
}
#
cat(paste("\n", "Files used to create plots", "\n\n", sep = ""))
print(ensemble.files)
subtitle <- NULL
threshold.mean <- threshold
for (i in 1:length(ensemble.files)) {
raster.focus <- raster::raster(ensemble.files[i])
if (length(ensemble.files) > 1) {subtitle <- ensemble.files[i]}
if (plot.method %in% c("suitability", "consensussuitability")) {
# thresholds apply to probabilities, also plot for probabilities
raster.focus <- raster.focus / 1000
raster.min <- raster::minValue(raster.focus)
raster.max <- raster::maxValue(raster.focus)
if (threshold.mean < 0) {
eval1 <- NULL
if (ncol(p) == 3) {p <- p[p[,1]==species_focus, c(2:3)]}
if (ncol(a) == 3) {a <- a[a[,1]==species_focus, c(2:3)]}
cat(paste("\n", "Evaluation of suitability raster layer at locations p and a", "\n", sep = ""))
cat(paste("Note that threshold is only meaningful for calibration stack suitabilities", "\n\n", sep = ""))
pres_consensus <- raster::extract(raster.focus, p)
pres_consensus <- pres_consensus[is.na(pres_consensus)==F]
abs_consensus <- raster::extract(raster.focus, a)
abs_consensus <- abs_consensus[is.na(abs_consensus)==F]
eval1 <- dismo::evaluate(p=pres_consensus, a=abs_consensus)
print(eval1)
threshold.mean <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity, threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres_consensus, Abs=abs_consensus)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(threshold.mean))
}
if (abs.breaks > 0) {
seq1 <- round(seq(from=raster.min, to=threshold.mean, length.out=abs.breaks), 4)
seq1 <- seq1[1:(abs.breaks-1)]
seq1[-abs.breaks]
seq1 <- unique(seq1)
seq2 <- round(seq(from = threshold.mean, to = raster.max, length.out=pres.breaks), 4)
seq2 <- unique(seq2)
if (dev.new.width > 0 && dev.new.height > 0) {grDevices::dev.new(width=dev.new.width, height=dev.new.height)}
if (is.null(abs.col) == T) {abs.col <- grDevices::rainbow(n=length(seq1), start=0, end =1/6)}
if (is.null(pres.col) == T) {pres.col <- grDevices::rainbow(n=length(seq2)-1, start=3/6, end=4/6)}
raster::plot(raster.focus, breaks = c(seq1, seq2), col = c(abs.col, pres.col), colNA = NA,
legend.shrink=0.8, cex.axis=0.8, main=main, sub=subtitle, ...)
}else{
seq1 <- NULL
abs.col <- NULL
seq2 <- round(seq(from = threshold.mean, to = raster.max, length.out=pres.breaks), 4)
seq2 <- unique(seq2)
if (is.null(pres.col) == T) {pres.col <- grDevices::rainbow(n=length(seq2)-1, start=3/6, end=4/6)}
if (dev.new.width > 0 && dev.new.height > 0) {grDevices::dev.new(width=dev.new.width, height=dev.new.height)}
raster::plot(raster.focus, breaks = seq2, col = pres.col, colNA = NA,
lab.breaks=seq2, legend.shrink=0.8, cex.axis=0.8, main=main, sub=subtitle, ...)
}
}
if (plot.method %in% c("presence", "consensuspresence")) {
if (dev.new.width > 0 && dev.new.height > 0) {grDevices::dev.new(width=dev.new.width, height=dev.new.height)}
if (is.null(absencePresence.col) == T) {absencePresence.col <- c("grey", "green")}
if (length(absencePresence.col) == 2) {
raster::plot(raster.focus, breaks=c(0, 0.5, 1), col = absencePresence.col, colNA = NA,
legend.shrink=0.6, cex.axis=0.8, lab.breaks=c("", "a", "p"), main=main, sub=subtitle, ...)
}
if (length(absencePresence.col) == 1) {
raster::plot(raster.focus, breaks=c(0.5, 1), col = absencePresence.col, colNA = NA,
legend.shrink=0.6, cex.axis=0.8, lab.breaks=c("a", "p"), main=main, sub=subtitle, ...)
}
}
if (plot.method %in% c("count", "consensuscount")) {
nmax <- raster::maxValue(raster.focus)
if (dev.new.width > 0 && dev.new.height > 0) {grDevices::dev.new(width=dev.new.width, height=dev.new.height)}
if (is.null(count.col) == T) {
if (nmax > 3) {
count.col <- c("grey", "black", grDevices::rainbow(n=(nmax-2), start=0, end=1/3), "blue")
}else{
count.col <- c("grey", grDevices::rainbow(n=nmax, start=0, end=1/3))
}
}
seq1 <- seq(from=-1, to=nmax, by=1)
if (length(count.col) == (length(seq1)-1)) {
raster::plot(raster.focus, breaks=seq(from=-1, to=nmax, by=1), col=count.col,
legend.shrink=0.8, cex.axis=0.8, main=main, sub=subtitle, ...)
}else{
raster::plot(raster.focus, breaks=c(0.1, seq(from=1, to=nmax, by=1)), col=count.col,
lab.breaks=seq(from=0, to=nmax, by=1), legend.shrink=0.8, cex.axis=0.8, main=main, sub=subtitle, ...)
}
}
if (plot.method == "consensussd") {
sd.max <- raster::maxValue(raster.focus)
seq1 <- seq(from = 0, to = sd.max, length.out = sd.breaks)
if (is.null(sd.col) == T) {sd.col <- grDevices::rainbow(n = length(seq1)-1, start = 1/6, end = 4/6)}
raster::plot(raster.focus, breaks=seq1, col=sd.col,
legend.shrink=0.8, cex.axis=0.8, main=main, sub=subtitle, ...)
}
# if (maptools.boundaries == T) {
# utils::data("wrld_simpl", package="maptools", envir=.BiodiversityR)
# maptools.wrld_simpl <- eval(as.name("wrld_simpl"), envir=.BiodiversityR)
# raster::plot(maptools.wrld_simpl, add=T, border=maptools.col)
# }
}
if (length(ensemble.files)==1 && plot.method %in% c("suitability", "consensussuitability")) {return(list(threshold=threshold.mean, breaks=c(seq1, seq2),
col=c(grDevices::rainbow(n=length(seq1), start = 0, end = 1/6), grDevices::rainbow(n=length(seq2)-1, start=3/6, end=4/6))))}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.plot.R
|
`ensemble.raster` <- function(
xn=NULL,
models.list=NULL,
input.weights=models.list$output.weights,
thresholds=models.list$thresholds,
RASTER.species.name=models.list$species.name,
RASTER.stack.name=xn@title,
RASTER.format="GTiff", RASTER.datatype="INT2S", RASTER.NAflag=-32767,
RASTER.models.overwrite=TRUE,
# KML.out=FALSE, KML.maxpixels=100000, KML.blur=10,
evaluate=FALSE, SINK=FALSE,
p=models.list$p, a=models.list$a,
pt=models.list$pt, at=models.list$at,
CATCH.OFF=FALSE
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if (is.null(xn) == T) {stop("value for parameter xn is missing (RasterStack object)")}
if(inherits(xn, "RasterStack") == F) {stop("xn is not a RasterStack object")}
if (is.null(models.list) == T) {stop("provide 'models.list' as models will not be recalibrated and retested")}
if (is.null(input.weights) == T) {input.weights <- models.list$output.weights}
if (is.null(thresholds) == T) {stop("provide 'thresholds' as models will not be recalibrated and retested")}
thresholds.raster <- thresholds
if(is.null(p) == F) {
p <- data.frame(p)
names(p) <- c("x", "y")
}
if(is.null(a) == F) {
a <- data.frame(a)
names(a) <- c("x", "y")
}
if(is.null(pt) == F) {
pt <- data.frame(pt)
names(pt) <- c("x", "y")
}
if(is.null(at) == F) {
at <- data.frame(at)
names(at) <- c("x", "y")
}
#
# if (KML.out==T && raster::isLonLat(xn)==F) {
# cat(paste("\n", "NOTE: not possible to generate KML files as Coordinate Reference System (CRS) of stack ", xn@title , " is not longitude and latitude", "\n", sep = ""))
# KML.out <- FALSE
# }
#
retest <- FALSE
if (evaluate == T) {
if (is.null(p)==T || is.null(a)==T) {
cat(paste("\n", "NOTE: not possible to evaluate the models since locations p and a are not provided", "\n", sep = ""))
evaluate <- FALSE
}else{
threshold.method <- models.list$threshold.method
threshold.sensitivity <- models.list$threshold.sensitivity
threshold.PresenceAbsence <- models.list$threshold.PresenceAbsence <- FALSE
}
if (is.null(pt)==F && is.null(at)==F) {
if(identical(pt, p) == F || identical(at, a) == F) {retest <- TRUE}
}
}
#
# create output file
dir.create("outputs", showWarnings = F)
paste.file <- paste(getwd(), "/outputs/", RASTER.species.name, "_output.txt", sep="")
OLD.SINK <- TRUE
if (sink.number(type="output") == 0) {OLD.SINK <- F}
if (SINK==T && OLD.SINK==F) {
if (file.exists(paste.file) == F) {
cat(paste("\n", "NOTE: results captured in file: ", paste.file, "\n", sep = ""))
}else{
cat(paste("\n", "NOTE: results appended in file: ", paste.file, "\n", sep = ""))
}
cat(paste("\n\n", "RESULTS (ensemble.raster function)", "\n\n", sep=""), file=paste.file, append=T)
sink(file=paste.file, append=T)
cat(paste(date(), "\n", sep=""))
print(match.call())
}
#
# check if all variables are present
vars <- models.list$vars
vars.xn <- names(xn)
nv <- length(vars)
for (i in 1:nv) {
if (any(vars.xn==vars[i]) == F) {stop("explanatory variable '", vars[i], "' not among grid layers of RasterStack xn", "\n", sep = "")}
}
for (i in 1:length(vars.xn) ) {
if (any(vars==vars.xn[i]) == F) {
cat(paste("\n", "NOTE: RasterStack layer '", vars.xn[i], "' was not calibrated as explanatory variable", "\n", sep = ""))
xn <- raster::dropLayer(xn, which(names(xn) == vars.xn[i]))
xn <- raster::stack(xn)
}
}
#
# set minimum and maximum values for xn
for (i in 1:raster::nlayers(xn)) {
xn[[i]] <- raster::setMinMax(xn[[i]])
}
# declare categorical layers for xn
factors <- models.list$factors
if(is.null(factors) == F) {
for (i in 1:length(factors)) {
j <- which(names(xn) == factors[i])
xn[[j]] <- raster::as.factor(xn[[j]])
}
}
if(length(factors) == 0) {factors <- NULL}
factlevels <- models.list$factlevels
dummy.vars <- models.list$dummy.vars
dummy.vars.noDOMAIN <- models.list$dummy.vars.noDOMAIN
#
# KML.blur <- trunc(KML.blur)
# if (KML.blur < 1) {KML.blur <- 1}
#
if (is.null(input.weights) == F) {
MAXENT <- max(c(input.weights["MAXENT"], -1), na.rm=T)
MAXNET <- max(c(input.weights["MAXNET"], -1), na.rm=T)
MAXLIKE <- max(c(input.weights["MAXLIKE"], -1), na.rm=T)
GBM <- max(c(input.weights["GBM"], -1), na.rm=T)
GBMSTEP <- max(c(input.weights["GBMSTEP"], -1), na.rm=T)
RF <- max(c(input.weights["RF"], -1), na.rm=T)
CF <- max(c(input.weights["CF"], -1), na.rm=T)
GLM <- max(c(input.weights["GLM"], -1), na.rm=T)
GLMSTEP <- max(c(input.weights["GLMSTEP"], -1), na.rm=T)
GAM <- max(c(input.weights["GAM"], -1), na.rm=T)
GAMSTEP <- max(c(input.weights["GAMSTEP"], -1), na.rm=T)
MGCV <- max(c(input.weights["MGCV"], -1), na.rm=T)
MGCVFIX <- max(c(input.weights["MGCVFIX"], -1), na.rm=T)
EARTH <- max(c(input.weights["EARTH"], -1), na.rm=T)
RPART <- max(c(input.weights["RPART"], -1), na.rm=T)
NNET <- max(c(input.weights["NNET"], -1), na.rm=T)
FDA <- max(c(input.weights["FDA"], -1), na.rm=T)
SVM <- max(c(input.weights["SVM"], -1), na.rm=T)
SVME <- max(c(input.weights["SVME"], -1), na.rm=T)
GLMNET <- max(c(input.weights["GLMNET"], -1), na.rm=T)
BIOCLIM.O <- max(c(input.weights["BIOCLIM.O"], -1), na.rm=T)
BIOCLIM <- max(c(input.weights["BIOCLIM"], -1), na.rm=T)
DOMAIN <- max(c(input.weights["DOMAIN"], -1), na.rm=T)
MAHAL <- max(c(input.weights["MAHAL"], -1), na.rm=T)
MAHAL01 <- max(c(input.weights["MAHAL01"], -1), na.rm=T)
}
#
MAXENT.OLD <- MAXNET.OLD <- MAXLIKE.OLD <- GBM.OLD <- GBMSTEP.OLD <- RF.OLD <- CF.OLD <- GLM.OLD <- GLMSTEP.OLD <- GAM.OLD <- GAMSTEP.OLD <- MGCV.OLD <- NULL
MGCVFIX.OLD <- EARTH.OLD <- RPART.OLD <- NNET.OLD <- FDA.OLD <- SVM.OLD <- SVME.OLD <- GLMNET.OLD <- BIOCLIM.O.OLD <- BIOCLIM.OLD <- DOMAIN.OLD <- MAHAL.OLD <- MAHAL01.OLD <- NULL
# probit models, NULL if no probit model fitted
MAXENT.PROBIT.OLD <- MAXNET.PROBIT.OLD <- MAXLIKE.PROBIT.OLD <- GBM.PROBIT.OLD <- GBMSTEP.PROBIT.OLD <- RF.PROBIT.OLD <- CF.PROBIT.OLD <- GLM.PROBIT.OLD <- GLMSTEP.PROBIT.OLD <- GAM.PROBIT.OLD <- GAMSTEP.PROBIT.OLD <- MGCV.PROBIT.OLD <- NULL
MGCVFIX.PROBIT.OLD <- EARTH.PROBIT.OLD <- RPART.PROBIT.OLD <- NNET.PROBIT.OLD <- FDA.PROBIT.OLD <- SVM.PROBIT.OLD <- SVME.PROBIT.OLD <- GLMNET.PROBIT.OLD <- BIOCLIM.O.PROBIT.OLD <- BIOCLIM.PROBIT.OLD <- DOMAIN.PROBIT.OLD <- MAHAL.PROBIT.OLD <- MAHAL01.PROBIT.OLD <- NULL
if (is.null(models.list) == F) {
if (is.null(models.list$MAXENT) == F) {MAXENT.OLD <- models.list$MAXENT}
if (is.null(models.list$MAXNET) == F) {
MAXNET.OLD <- models.list$MAXNET
MAXNET.clamp <- models.list$formulae$MAXNET.clamp
MAXNET.type <- models.list$formulae$MAXNET.type
}
if (is.null(models.list$MAXLIKE) == F) {
MAXLIKE.OLD <- models.list$MAXLIKE
MAXLIKE.formula <- models.list$formulae$MAXLIKE.formula
}
if (is.null(models.list$GBM) == F) {GBM.OLD <- models.list$GBM}
if (is.null(models.list$GBMSTEP) == F) {GBMSTEP.OLD <- models.list$GBMSTEP}
if (is.null(models.list$RF) == F) {RF.OLD <- models.list$RF}
if (is.null(models.list$CF) == F) {CF.OLD <- models.list$CF}
if (is.null(models.list$GLM) == F) {GLM.OLD <- models.list$GLM}
if (is.null(models.list$GLMSTEP) == F) {GLMSTEP.OLD <- models.list$GLMSTEP}
if (is.null(models.list$GAM) == F) {GAM.OLD <- models.list$GAM}
if (is.null(models.list$GAMSTEP) == F) {GAMSTEP.OLD <- models.list$GAMSTEP}
if (is.null(models.list$MGCV) == F) {MGCV.OLD <- models.list$MGCV}
if (is.null(models.list$MGCVFIX) == F) {MGCVFIX.OLD <- models.list$MGCVFIX}
if (is.null(models.list$EARTH) == F) {EARTH.OLD <- models.list$EARTH}
if (is.null(models.list$RPART) == F) {RPART.OLD <- models.list$RPART}
if (is.null(models.list$NNET) == F) {NNET.OLD <- models.list$NNET}
if (is.null(models.list$FDA) == F) {FDA.OLD <- models.list$FDA}
if (is.null(models.list$SVM) == F) {SVM.OLD <- models.list$SVM}
if (is.null(models.list$SVME) == F) {SVME.OLD <- models.list$SVME}
if (is.null(models.list$GLMNET) == F) {
GLMNET.OLD <- models.list$GLMNET
GLMNET.class <- models.list$formulae$GLMNET.class
}
if (is.null(models.list$BIOCLIM.O) == F) {BIOCLIM.O.OLD <- models.list$BIOCLIM.O}
if (is.null(models.list$BIOCLIM) == F) {BIOCLIM.OLD <- models.list$BIOCLIM}
if (is.null(models.list$DOMAIN) == F) {DOMAIN.OLD <- models.list$DOMAIN}
if (is.null(models.list$MAHAL) == F) {MAHAL.OLD <- models.list$MAHAL}
if (is.null(models.list$MAHAL01) == F) {
MAHAL01.OLD <- models.list$MAHAL01
MAHAL.shape <- models.list$formulae$MAHAL.shape
}
# probit models
if (is.null(models.list$MAXENT.PROBIT) == F) {MAXENT.PROBIT.OLD <- models.list$MAXENT.PROBIT}
if (is.null(models.list$MAXNET.PROBIT) == F) {MAXNET.PROBIT.OLD <- models.list$MAXNET.PROBIT}
if (is.null(models.list$MAXLIKE.PROBIT) == F) {MAXLIKE.PROBIT.OLD <- models.list$MAXLIKE.PROBIT}
if (is.null(models.list$GBM.PROBIT) == F) {GBM.PROBIT.OLD <- models.list$GBM.PROBIT}
if (is.null(models.list$GBMSTEP.PROBIT) == F) {GBMSTEP.PROBIT.OLD <- models.list$GBMSTEP.PROBIT}
if (is.null(models.list$RF.PROBIT) == F) {RF.PROBIT.OLD <- models.list$RF.PROBIT}
if (is.null(models.list$CF.PROBIT) == F) {CF.PROBIT.OLD <- models.list$CF.PROBIT}
if (is.null(models.list$GLM.PROBIT) == F) {GLM.PROBIT.OLD <- models.list$GLM.PROBIT}
if (is.null(models.list$GLMSTEP.PROBIT) == F) {GLMSTEP.PROBIT.OLD <- models.list$GLMSTEP.PROBIT}
if (is.null(models.list$GAM.PROBIT) == F) {GAM.PROBIT.OLD <- models.list$GAM.PROBIT}
if (is.null(models.list$GAMSTEP.PROBIT) == F) {GAMSTEP.PROBIT.OLD <- models.list$GAMSTEP.PROBIT}
if (is.null(models.list$MGCV.PROBIT) == F) {MGCV.PROBIT.OLD <- models.list$MGCV.PROBIT}
if (is.null(models.list$MGCVFIX.PROBIT) == F) {MGCVFIX.PROBIT.OLD <- models.list$MGCVFIX.PROBIT}
if (is.null(models.list$EARTH.PROBIT) == F) {EARTH.PROBIT.OLD <- models.list$EARTH.PROBIT}
if (is.null(models.list$RPART.PROBIT) == F) {RPART.PROBIT.OLD <- models.list$RPART.PROBIT}
if (is.null(models.list$NNET.PROBIT) == F) {NNET.PROBIT.OLD <- models.list$NNET.PROBIT}
if (is.null(models.list$FDA.PROBIT) == F) {FDA.PROBIT.OLD <- models.list$FDA.PROBIT}
if (is.null(models.list$SVM.PROBIT) == F) {SVM.PROBIT.OLD <- models.list$SVM.PROBIT}
if (is.null(models.list$SVME.PROBIT) == F) {SVME.PROBIT.OLD <- models.list$SVME.PROBIT}
if (is.null(models.list$GLMNET.PROBIT) == F) {GLMNET.PROBIT.OLD <- models.list$GLMNET.PROBIT}
if (is.null(models.list$BIOCLIM.O.PROBIT) == F) {BIOCLIM.O.PROBIT.OLD <- models.list$BIOCLIM.O.PROBIT}
if (is.null(models.list$BIOCLIM.PROBIT) == F) {BIOCLIM.PROBIT.OLD <- models.list$BIOCLIM.PROBIT}
if (is.null(models.list$DOMAIN.PROBIT) == F) {DOMAIN.PROBIT.OLD <- models.list$DOMAIN.PROBIT}
if (is.null(models.list$MAHAL.PROBIT) == F) {MAHAL.PROBIT.OLD <- models.list$MAHAL.PROBIT}
if (is.null(models.list$MAHAL01.PROBIT) == F) {MAHAL01.PROBIT.OLD <- models.list$MAHAL01.PROBIT}
}
if (MAXENT > 0) {
jar <- paste(system.file(package="dismo"), "/java/maxent.jar", sep='')
if (!file.exists(jar)) {stop('maxent program is missing: ', jar, '\nPlease download it here: http://www.cs.princeton.edu/~schapire/maxent/')}
}
if (MAXNET > 0) {
if (! requireNamespace("maxnet")) {stop("Please install the maxnet package")}
predict.maxnet2 <- function(object, newdata, clamp=F, type=c("cloglog")) {
p <- predict(object=object, newdata=newdata, clamp=clamp, type=type)
return(as.numeric(p))
}
}
if (MAXLIKE > 0) {
if (! requireNamespace("maxlike")) {stop("Please install the maxlike package")}
# MAXLIKE.formula <- ensemble.formulae(xn, factors=factors)$MAXLIKE.formula
# environment(MAXLIKE.formula) <- .BiodiversityR
}
if (GBM > 0) {
if (! requireNamespace("gbm")) {stop("Please install the gbm package")}
requireNamespace("splines")
}
if (RF > 0) {
# get the probabilities from RF
predict.RF <- function(object, newdata) {
p <- predict(object=object, newdata=newdata, type="response")
return(as.numeric(p))
}
}
if (CF > 0) {
# get the probabilities from RF
# ensure that cases with missing values are removed
if (! requireNamespace("party")) {stop("Please install the party package")}
predict.CF <- function(object, newdata) {
# avoid problems with single variables, especially with raster::predict
for (i in 1:ncol(newdata)) {
if (is.integer(newdata[, i])) {newdata[, i] <- as.numeric(newdata[, i])}
}
p1 <- predict(object=object, newdata=newdata, type="prob")
p <- numeric(length(p1))
for (i in 1:length(p1)) {p[i] <- p1[[i]][2]}
return(as.numeric(p))
}
}
if (MGCV > 0 || MGCVFIX > 0) {
# get the probabilities from MGCV
predict.MGCV <- function(object, newdata, type="response") {
p <- mgcv::predict.gam(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
}
if (EARTH > 0) {
# get the probabilities from earth
predict.EARTH <- function(object, newdata, type="response") {
p <- predict(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
}
if (NNET > 0) {
# get the probabilities from nnet
predict.NNET <- function(object, newdata, type="raw") {
p <- predict(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
}
if (SVME > 0) {
# get the probabilities from svm
predict.SVME <- function(model, newdata) {
p <- predict(model, newdata, probability=T)
return(attr(p, "probabilities")[,1])
}
}
if (GLMNET > 0) {
if (! requireNamespace("glmnet")) {stop("Please install the glmnet package")}
# get the mean probabilities from glmnet
predict.GLMNET <- function(model, newdata, GLMNET.class=FALSE) {
newdata <- as.matrix(newdata)
if (GLMNET.class == TRUE) {
p <- predict(model, newx=newdata, type="class", exact=T)
n.obs <- nrow(p)
nv <- ncol(p)
result <- numeric(n.obs)
for (i in 1:n.obs) {
for (j in 1:nv) {
if(p[i, j] == 1) {result[i] <- result[i] + 1}
}
}
result <- result/nv
return(result)
}else{
p <- predict(model, newx=newdata, type="response", exact=T)
n.obs <- nrow(p)
nv <- ncol(p)
result <- numeric(n.obs)
for (i in 1:n.obs) {
for (j in 1:nv) {
result[i] <- result[i] + p[i, j]
}
}
result <- result/nv
return(result)
}
}
}
if (BIOCLIM.O > 0) {
# get the probabilities for original BIOCLIM
predict.BIOCLIM.O <- function(object, newdata) {
lower.limits <- object$lower.limits
upper.limits <- object$upper.limits
minima <- object$minima
maxima <- object$maxima
#
newdata <- newdata[, which(names(newdata) %in% names(lower.limits)), drop=F]
result <- as.numeric(rep(NA, nrow(newdata)))
varnames <- names(newdata)
nvars <- ncol(newdata)
#
for (i in 1:nrow(newdata)) {
datai <- newdata[i,,drop=F]
resulti <- 1
j <- 0
while (resulti > 0 && j <= (nvars-1)) {
j <- j+1
focal.var <- varnames[j]
if (resulti == 1) {
lowerj <- lower.limits[which(names(lower.limits) == focal.var)]
if (datai[, j] < lowerj) {resulti <- 0.5}
upperj <- upper.limits[which(names(upper.limits) == focal.var)]
if (datai[, j] > upperj) {resulti <- 0.5}
}
minj <- minima[which(names(minima) == focal.var)]
if (datai[, j] < minj) {resulti <- 0}
maxj <- maxima[which(names(maxima) == focal.var)]
if (datai[, j] > maxj) {resulti <- 0}
}
result[i] <- resulti
}
p <- as.numeric(result)
return(p)
}
}
if (MAHAL > 0) {
# get the probabilities from mahal
predict.MAHAL <- function(model, newdata, PROBIT) {
p <- dismo::predict(object=model, x=newdata)
if (PROBIT == F) {
p[p<0] <- 0
p[p>1] <- 1
}
return(as.numeric(p))
}
}
if (MAHAL01 > 0) {
# get the probabilities from transformed mahal
predict.MAHAL01 <- function(model, newdata, MAHAL.shape) {
p <- dismo::predict(object=model, x=newdata)
p <- p - 1 - MAHAL.shape
p <- abs(p)
p <- MAHAL.shape / p
return(p)
}
}
#
output.weights <- input.weights
prediction.failures <- FALSE
#
# avoid problems with non-existing directories
dir.create("models", showWarnings = F)
dir.create("ensembles", showWarnings = F)
dir.create("ensembles/suitability", showWarnings = F)
dir.create("ensembles/count", showWarnings = F)
dir.create("ensembles/presence", showWarnings = F)
# if(KML.out == T) {
# dir.create("kml", showWarnings = F)
# dir.create("kml/suitability", showWarnings = F)
# dir.create("kml/count", showWarnings = F)
# dir.create("kml/presence", showWarnings = F)
# }
#
stack.title <- RASTER.stack.name
if (gsub(".", "_", stack.title, fixed=T) != stack.title) {cat(paste("\n", "WARNING: title of stack (", stack.title, ") contains '.'", "\n\n", sep = ""))}
#
raster.title <- paste(RASTER.species.name, "_", stack.title , sep="")
rasterfull <- paste("ensembles//suitability//", raster.title , sep="")
kmlfull <- paste("kml//suitability//", raster.title , sep="")
rastercount <- paste("ensembles//count//", raster.title , sep="")
kmlcount <- paste("kml//count//", raster.title, sep="")
rasterpresence <- paste("ensembles//presence//", raster.title, sep="")
kmlpresence <- paste("kml//presence//", raster.title, sep="")
#
RASTER.species.orig <- RASTER.species.name
if (RASTER.models.overwrite==T) {
RASTER.species.name <- "working"
}else{
RASTER.species.name <- paste(RASTER.species.name, "_", stack.title, sep="")
}
#
cat(paste("\n", "Start of predictions for organism: ", RASTER.species.orig, "\n", sep = ""))
cat(paste("Predictions for RasterStack: ", stack.title, "\n", sep = ""))
ensemble.statistics <- NULL
cat(paste("ensemble raster layers will be saved in folder ", getwd(), "//ensembles", "\n\n", sep = ""))
statistics.names <- c("n.models", "ensemble.threshold", "ensemble.min", "ensemble.max", "count.min", "count.max")
ensemble.statistics <- numeric(6)
names(ensemble.statistics) <- statistics.names
#
# sometimes still error warnings for minimum and maximum values of the layers
# set minimum and maximum values for xn
for (i in 1:raster::nlayers(xn)) {
xn[[i]] <- raster::setMinMax(xn[[i]])
}
# count models
mc <- 0
#
# start raster layer creations
if (output.weights["MAXENT"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Maximum entropy algorithm (package: dismo)\n", sep=""))
# Put the file 'maxent.jar' in the 'java' folder of dismo
# the file 'maxent.jar' can be obtained from from http://www.cs.princeton.edu/~schapire/maxent/.
jar <- paste(system.file(package="dismo"), "/java/maxent.jar", sep='')
results <- MAXENT.OLD
pmaxent <- NULL
fullname <- paste("models/", RASTER.species.name, "_MAXENT", sep="")
if (CATCH.OFF == F) {
tryCatch(pmaxent <- raster::predict(object=results, x=xn, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("MAXENT prediction failed"))},
silent=F)
}else{
pmaxent <- raster::predict(object=results, x=xn, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pmaxent) == F) {
results2 <- MAXENT.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pmaxent, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "MAXENT"
pmaxent <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pmaxent <- trunc(1000*pmaxent)
raster::writeRaster(x=pmaxent, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pmaxent, p)/1000
abs1 <- raster::extract(pmaxent, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["MAXENT"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["MAXENT"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pmaxent, pt)/1000
abs1 <- raster::extract(pmaxent, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: MAXENT prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["MAXENT"] <- -1
}
}
if (output.weights["MAXNET"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Maximum entropy algorithm (package: maxnet)\n", sep=""))
results <- MAXNET.OLD
pmaxnet <- NULL
fullname <- paste("models/", RASTER.species.name, "_MAXNET", sep="")
if (CATCH.OFF == F) {
tryCatch(pmaxnet <- raster::predict(object=xn, model=results, fun=predict.maxnet2, na.rm=TRUE, clamp=MAXNET.clamp, type=MAXNET.type,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("MAXNET prediction failed"))},
silent=F)
}else{
pmaxnet <- raster::predict(object=xn, model=results, fun=predict.maxnet2, na.rm=TRUE, clamp=MAXNET.clamp, type=MAXNET.type,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pmaxnet) == F) {
results2 <- MAXNET.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pmaxnet, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "MAXNET"
pmaxnet <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pmaxnet <- trunc(1000*pmaxnet)
raster::writeRaster(x=pmaxnet, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pmaxnet, p)/1000
abs1 <- raster::extract(pmaxnet, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["MAXNET"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["MAXNET"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pmaxnet, pt)/1000
abs1 <- raster::extract(pmaxnet, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: MAXNET prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["MAXNET"] <- -1
}
}
if (output.weights["MAXLIKE"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Maxlike algorithm (package: maxlike)\n", sep=""))
results <- MAXLIKE.OLD
pmaxlike <- NULL
fullname <- paste("models/", RASTER.species.name, "_MAXLIKE", sep="")
xn.num <- raster::subset(xn, subset=models.list$num.vars)
if (CATCH.OFF == F) {
tryCatch(pmaxlike <- raster::predict(object=xn.num, model=results, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("MAXLIKE prediction failed"))},
silent=F)
}else{
pmaxlike <- raster::predict(object=xn.num, model=results, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pmaxlike) == F) {
results2 <- MAXLIKE.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste("models//", "MAXLIKE_step1", sep="")
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pmaxlike, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "MAXLIKE"
pmaxlike <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pmaxlike <- trunc(1000*pmaxlike)
raster::writeRaster(x=pmaxlike, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pmaxlike, p)/1000
abs1 <- raster::extract(pmaxlike, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["MAXLIKE"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["MAXLIKE"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pmaxlike, pt)/1000
abs1 <- raster::extract(pmaxlike, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: MAXLIKE prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["MAXLIKE"] <- -1
}
}
if (output.weights["GBM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized boosted regression modeling (package: gbm) \n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- GBM.OLD
pgbm <- NULL
fullname <- paste("models/", RASTER.species.name, "_GBM", sep="")
if (CATCH.OFF == F) {
tryCatch(pgbm <- raster::predict(object=xn, model=results, na.rm=TRUE, factors=factlevels,
n.trees=results$n.trees, type="response", filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("GBM prediction failed"))},
silent=F)
}else{
pgbm <- raster::predict(object=xn, model=results, na.rm=TRUE, factors=factlevels,
n.trees=results$n.trees, type="response", filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pgbm) == F) {
results2 <- GBM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pgbm, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "GBM"
pgbm <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pgbm <- trunc(1000*pgbm)
raster::writeRaster(x=pgbm, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pgbm, p)/1000
abs1 <- raster::extract(pgbm, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["GBM"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["GBM"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pgbm, pt)/1000
abs1 <- raster::extract(pgbm, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GBM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["GBM"] <- -1
}
}
if (output.weights["GBMSTEP"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". gbm step algorithm (package: dismo)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- GBMSTEP.OLD
pgbms <- NULL
fullname <- paste("models/", RASTER.species.name, "_GBMSTEP", sep="")
if (CATCH.OFF == F) {
tryCatch(pgbms <- raster::predict(object=xn, model=results, fun=gbm::predict.gbm, na.rm=TRUE, factors=factlevels,
n.trees=results$n.trees, type="response", filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("stepwise GBM prediction failed"))},
silent=F)
}else{
pgbms <- raster::predict(object=xn, model=results, fun=gbm::predict.gbm, na.rm=TRUE, factors=factlevels,
n.trees=results$n.trees, type="response", filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pgbms) == F) {
results2 <- GBMSTEP.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pgbms, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "GBMSTEP"
pgbms <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pgbms <- trunc(1000*pgbms)
# corrected writing in new format (August 2020)
raster::writeRaster(x=pgbms, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pgbms, p)/1000
abs1 <- raster::extract(pgbms, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["GBMSTEP"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["GBMSTEP"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pgbms, pt)/1000
abs1 <- raster::extract(pgbms, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: stepwise GBM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["GBMSTEP"] <- -1
}
}
if (output.weights["RF"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Random forest algorithm (package: randomForest)\n", sep=""))
results <- RF.OLD
prf <- NULL
fullname <- paste("models/", RASTER.species.name, "_RF", sep="")
if (CATCH.OFF == F) {
tryCatch(prf <- raster::predict(object=xn, model=results, fun=predict.RF, na.rm=TRUE, factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("random forest prediction failed"))},
silent=F)
}else{
prf <- raster::predict(object=xn, model=results, fun=predict.RF, na.rm=TRUE, factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(prf) == F) {
results2 <- RF.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=prf, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "RF"
prf <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
prf <- trunc(1000*prf)
raster::writeRaster(x=prf, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(prf, p)/1000
abs1 <- raster::extract(prf, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["RF"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["RF"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(prf, pt)/1000
abs1 <- raster::extract(prf, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: random forest prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["RF"] <- -1
}
}
if (output.weights["CF"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Random forest algorithm (package: party)\n", sep=""))
results <- CF.OLD
pcf <- NULL
fullname <- paste("models/", RASTER.species.name, "_CF", sep="")
if (CATCH.OFF == F) {
tryCatch(pcf <- raster::predict(object=xn, model=results, fun=predict.CF, na.rm=TRUE, factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("random forest prediction failed"))},
silent=F)
}else{
pcf <- raster::predict(object=xn, model=results, fun=predict.CF, na.rm=TRUE, factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pcf) == F) {
results2 <- CF.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pcf, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "CF"
pcf <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pcf <- trunc(1000*pcf)
raster::writeRaster(x=pcf, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pcf, p)/1000
abs1 <- raster::extract(pcf, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["CF"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["CF"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pcf, pt)/1000
abs1 <- raster::extract(pcf, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: random forest prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["CF"] <- -1
}
}
if (output.weights["GLM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized Linear Model \n", sep=""))
results <- GLM.OLD
pglm <- NULL
fullname <- paste("models/", RASTER.species.name, "_GLM", sep="")
if (CATCH.OFF == T) {
tryCatch(pglm <- raster::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("GLM prediction failed"))},
silent=F)
}else{
pglm <- raster::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pglm) == F) {
results2 <- GLM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pglm, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "GLM"
pglm <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pglm <- trunc(1000*pglm)
raster::writeRaster(x=pglm, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pglm, p)/1000
abs1 <- raster::extract(pglm, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["GLM"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["GLM"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pglm, pt)/1000
abs1 <- raster::extract(pglm, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GLM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["GLM"] <- -1
}
}
if (output.weights["GLMSTEP"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Stepwise Generalized Linear Model \n", sep=""))
results <- GLMSTEP.OLD
pglms <- NULL
fullname <- paste("models/", RASTER.species.name, "_GLMSTEP", sep="")
if (CATCH.OFF == F) {
tryCatch(pglms <- raster::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("stepwise GLM prediction failed"))},
silent=F)
}else{
pglms <- raster::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pglms) == F) {
results2 <- GLMSTEP.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pglms, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "GLMSTEP"
pglms <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pglms <- trunc(1000*pglms)
raster::writeRaster(x=pglms, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pglms, p)/1000
abs1 <- raster::extract(pglms, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["GLMSTEP"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["GLMSTEP"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pglms, pt)/1000
abs1 <- raster::extract(pglms, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: stepwise GLM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["GLMSTEP"] <- -1
}
}
if (output.weights["GAM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized Additive Model (package: gam)\n", sep=""))
results <- GAM.OLD
pgam <- NULL
fullname <- paste("models/", RASTER.species.name, "_GAM", sep="")
if (CATCH.OFF == F) {
tryCatch(pgam <- raster::predict(object=xn, model=results, fun=gam::predict.Gam, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("GAM (package: gam) prediction failed"))},
silent=F)
}else{
pgam <- raster::predict(object=xn, model=results, fun=gam::predict.Gam, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pgam) == F) {
results2 <- GAM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pgam, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "GAM"
pgam <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pgam <- trunc(1000*pgam)
raster::writeRaster(x=pgam, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pgam, p)/1000
abs1 <- raster::extract(pgam, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["GAM"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["GAM"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pgam, pt)/1000
abs1 <- raster::extract(pgam, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GAM prediction (gam package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["GAM"] <- -1
}
}
if (output.weights["GAMSTEP"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Stepwise Generalized Additive Model (package: gam)\n", sep=""))
results <- GAMSTEP.OLD
pgams <- NULL
fullname <- paste("models/", RASTER.species.name, "_GAMSTEP", sep="")
if (CATCH.OFF == F) {
tryCatch(pgams <- raster::predict(object=xn, model=results, fun=gam::predict.Gam, type="response", na.rm=TRUE, factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("stepwise GAM (package: gam) prediction failed"))},
silent=F)
}else{
pgams <- raster::predict(object=xn, model=results, fun=gam::predict.Gam, type="response", na.rm=TRUE, factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pgams) == F) {
results2 <- GAMSTEP.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pgams, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "GAMSTEP"
pgams <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pgams <- trunc(1000*pgams)
raster::writeRaster(x=pgams, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pgams, p)/1000
abs1 <- raster::extract(pgams, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["GAMSTEP"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["GAMSTEP"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pgams, pt)/1000
abs1 <- raster::extract(pgams, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: stepwise GAM prediction (gam package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["GAMSTEP"] <- -1
}
}
if (output.weights["MGCV"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized Additive Model (package: mgcv)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- MGCV.OLD
pmgcv <- NULL
fullname <- paste("models/", RASTER.species.name, "_MGCV", sep="")
if (CATCH.OFF == F) {
tryCatch(pmgcv <- raster::predict(object=xn, model=results, fun=predict.MGCV, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("GAM (package: mgcv) prediction failed"))},
silent=F)
}else{
pmgcv <- raster::predict(object=xn, model=results, fun=predict.MGCV, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pmgcv) == F) {
results2 <- MGCV.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pmgcv, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "MGCV"
pmgcv <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pmgcv <- trunc(1000*pmgcv)
raster::writeRaster(x=pmgcv, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pmgcv, p)/1000
abs1 <- raster::extract(pmgcv, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["MGCV"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["MGCV"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pmgcv, pt)/1000
abs1 <- raster::extract(pmgcv, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GAM prediction (mgcv package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["MGCV"] <- -1
}
}
if (output.weights["MGCVFIX"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". GAM with fixed d.f. regression splines (package: mgcv)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- MGCVFIX.OLD
pmgcvf <- NULL
fullname <- paste("models/", RASTER.species.name, "_MGCVFIX", sep="")
if (CATCH.OFF == F) {
tryCatch(pmgcvf <- raster::predict(object=xn, model=results, fun=predict.MGCV, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("GAM with fixed d.f. regression splines (package: mgcv) prediction failed"))},
silent=F)
}else{
pmgcvf <- raster::predict(object=xn, model=results, fun=predict.MGCV, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pmgcvf) == F) {
results2 <- MGCVFIX.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pmgcvf, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "MGCVFIX"
pmgcvf <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pmgcvf <- trunc(1000*pmgcvf)
raster::writeRaster(x=pmgcvf, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pmgcvf, p)/1000
abs1 <- raster::extract(pmgcvf, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["MGCVFIX"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["MGCVFIX"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pmgcvf, pt)/1000
abs1 <- raster::extract(pmgcvf, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GAM prediction (mgcv package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["MGCVFIX"] <- -1
}
}
if (output.weights["EARTH"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Multivariate Adaptive Regression Splines (package: earth)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "NOTE: MARS (earth package) with factors may require explicit dummy variables", "\n", sep=""))
}
results <- EARTH.OLD
pearth <- NULL
fullname <- paste("models/", RASTER.species.name, "_EARTH", sep="")
if (CATCH.OFF == F) {
tryCatch(pearth <- raster::predict(object=xn, model=results, fun=predict.EARTH, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("MARS (package: earth) prediction failed"))},
silent=F)
}else{
pearth <- raster::predict(object=xn, model=results, fun=predict.EARTH, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pearth) == F) {
results2 <- EARTH.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pearth, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "EARTH"
pearth <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pearth <- trunc(1000*pearth)
raster::writeRaster(x=pearth, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pearth, p)/1000
abs1 <- raster::extract(pearth, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["EARTH"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["EARTH"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pearth, pt)/1000
abs1 <- raster::extract(pearth, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: MARS prediction (earth package) failed", "\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["EARTH"] <- -1
}
}
if (output.weights["RPART"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Recursive Partitioning And Regression Trees (package: rpart)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- RPART.OLD
prpart <- NULL
fullname <- paste("models/", RASTER.species.name, "_RPART", sep="")
if (CATCH.OFF == F) {
tryCatch(prpart <- raster::predict(object=xn, model=results, na.rm=TRUE, type="prob", index=2, factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("RPART prediction failed"))},
silent=F)
}else{
prpart <- raster::predict(object=xn, model=results, na.rm=TRUE, type="prob", index=2, factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(prpart) == F) {
results2 <- RPART.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=prpart, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "RPART"
prpart <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
prpart <- trunc(1000*prpart)
raster::writeRaster(x=prpart, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(prpart, p)/1000
abs1 <- raster::extract(prpart, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["RPART"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["RPART"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(prpart, pt)/1000
abs1 <- raster::extract(prpart, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: RPART prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["RPART"] <- -1
}
}
if (output.weights["NNET"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Artificial Neural Network (package: nnet)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- NNET.OLD
pnnet <- NULL
fullname <- paste("models/", RASTER.species.name, "_NNET", sep="")
if (CATCH.OFF == F){
tryCatch(pnnet <- raster::predict(object=xn, model=results, fun=predict.NNET, na.rm=TRUE, factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("Artificial Neural Network (package: nnet) prediction failed"))},
silent=F)
}else{
pnnet <- raster::predict(object=xn, model=results, fun=predict.NNET, na.rm=TRUE, factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pnnet) == F) {
results2 <- NNET.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pnnet, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "NNET"
pnnet <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pnnet <- trunc(1000*pnnet)
raster::writeRaster(x=pnnet, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pnnet, p)/1000
abs1 <- raster::extract(pnnet, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["NNET"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["NNET"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pnnet, pt)/1000
abs1 <- raster::extract(pnnet, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: ANN prediction (nnet package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["NNET"] <- -1
}
}
if (output.weights["FDA"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Flexible Discriminant Analysis (package: mda)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- FDA.OLD
pfda <- NULL
fullname <- paste("models/", RASTER.species.name, "_FDA", sep="")
if (CATCH.OFF == F) {
tryCatch(pfda <- raster::predict(object=xn, model=results, na.rm=TRUE, type="posterior", index=2, factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("FDA prediction failed"))},
silent=F)
}else{
pfda <- raster::predict(object=xn, model=results, na.rm=TRUE, type="posterior", index=2, factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pfda) == F) {
results2 <- FDA.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pfda, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "FDA"
pfda <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pfda <- trunc(1000*pfda)
raster::writeRaster(x=pfda, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pfda, p)/1000
abs1 <- raster::extract(pfda, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["FDA"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["FDA"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pfda, pt)/1000
abs1 <- raster::extract(pfda, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: FDA prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["FDA"] <- -1
}
}
if (output.weights["SVM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Support Vector Machines (package: kernlab)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "NOTE: SVM model with factors may require explicit dummy variables", "\n", sep=""))
}
results <- SVM.OLD
psvm <- NULL
fullname <- paste("models/", RASTER.species.name, "_SVM", sep="")
predict.svm2 <- as.function(kernlab::predict)
if (CATCH.OFF == F) {
tryCatch(psvm <- raster::predict(object=xn, model=results, fun=predict.svm2, na.rm=TRUE, type="probabilities", index=2, factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("Support Vector Machines (package: kernlab) prediction failed"))},
silent=F)
}else{
psvm <- raster::predict(object=xn, model=results, fun=predict.svm2, na.rm=TRUE, type="probabilities", index=2, factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(psvm) == F) {
results2 <- SVM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=psvm, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "SVM"
psvm <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
psvm <- trunc(1000*psvm)
raster::writeRaster(x=psvm, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(psvm, p)/1000
abs1 <- raster::extract(psvm, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["SVM"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["SVM"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(psvm, pt)/1000
abs1 <- raster::extract(psvm, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: SVM prediction (kernlab package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["SVM"] <- -1
}
}
if (output.weights["SVME"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Support Vector Machines (package: e1071)\n", sep=""))
results <- SVME.OLD
psvme <- NULL
fullname <- paste("models/", RASTER.species.name, "_SVME", sep="")
if (CATCH.OFF == F) {
tryCatch(psvme <- raster::predict(object=xn, model=results, fun=predict.SVME, na.rm=TRUE, factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("SVM prediction (e1071 package) failed"))},
warning= function(war) {print(paste("SVM prediction (e1071 package) failed"))},
silent=F)
}else{
psvme <- raster::predict(object=xn, model=results, fun=predict.SVME, na.rm=TRUE, factors=factlevels,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(psvme) == F) {
results2 <- SVME.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=psvme, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "SVME"
psvme <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
psvme <- trunc(1000*psvme)
raster::writeRaster(x=psvme, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(psvme, p)/1000
abs1 <- raster::extract(psvme, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["SVME"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["SVME"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(psvme, pt)/1000
abs1 <- raster::extract(psvme, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: SVM prediction (e1071 package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["SVME"] <- -1
}
}
if (output.weights["GLMNET"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". GLM with lasso or elasticnet regularization (package: glmnet)\n", sep=""))
if (is.null(factors) == F) {
cat(paste("\n", "NOTE: factors not considered (maybe consider dummy variables)", "\n", sep=""))
}
results <- GLMNET.OLD
pglmnet <- NULL
fullname <- paste("models/", RASTER.species.name, "_GLMNET", sep="")
xn.num <- raster::subset(xn, subset=models.list$num.vars)
if (CATCH.OFF == F) {
tryCatch(pglmnet <- raster::predict(object=xn.num, model=results, fun=predict.GLMNET, na.rm=TRUE, GLMNET.class=GLMNET.class,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("GLMNET prediction (glmnet package) failed"))},
warning= function(war) {print(paste("GLMNET prediction (glmnet package) failed"))},
silent=F)
}else{
pglmnet <- raster::predict(object=xn.num, model=results, fun=predict.GLMNET, na.rm=TRUE, GLMNET.class=GLMNET.class,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pglmnet) == F) {
results2 <- GLMNET.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pglmnet, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "GLMNET"
pglmnet <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pglmnet <- trunc(1000*pglmnet)
raster::writeRaster(x=pglmnet, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pglmnet, p)/1000
abs1 <- raster::extract(pglmnet, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["GLMNET"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["GLMNET"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pglmnet, pt)/1000
abs1 <- raster::extract(pglmnet, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GLMNET prediction (glmnet package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["GLMNET"] <- -1
}
}
if (output.weights["BIOCLIM.O"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". original BIOCLIM algorithm (package: BiodiversityR)\n", sep=""))
results <- BIOCLIM.O.OLD
pbioO <- NULL
fullname <- paste("models/", RASTER.species.name, "_BIOCLIMO", sep="")
if (CATCH.OFF == F){
tryCatch(pbioO <- raster::predict(object=xn, model=results, fun=predict.BIOCLIM.O, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("original BIOCLIM prediction failed"))},
silent=F)
}else{
pbioO <- raster::predict(object=xn, model=results, fun=predict.BIOCLIM.O, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pbioO) == F) {
results2 <- BIOCLIM.O.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pbioO, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "BIOCLIM.O"
pbioO <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pbioO <- trunc(1000*pbioO)
raster::writeRaster(x=pbioO, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pbioO, p)/1000
abs1 <- raster::extract(pbioO, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["BIOCLIM.O"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["BIOCLIM.O"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pbioO, pt)/1000
abs1 <- raster::extract(pbioO, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: original BIOCLIM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["BIOCLIM.O"] <- -1
}
}
if (output.weights["BIOCLIM"] > 0 || output.weights["DOMAIN"] > 0 || output.weights["MAHAL"] > 0 || output.weights["MAHAL01"] > 0) {
if(is.null(factors) == F) {
xn <- raster::dropLayer(xn, which(names(xn) %in% factors))
xn <- raster::stack(xn)
}
}
if (output.weights["BIOCLIM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". BIOCLIM algorithm (package: dismo)\n", sep=""))
results <- BIOCLIM.OLD
pbio <- NULL
fullname <- paste("models/", RASTER.species.name, "_BIOCLIM", sep="")
if (CATCH.OFF == F) {
tryCatch(pbio <- dismo::predict(object=results, x=xn, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("BIOCLIM prediction failed"))},
silent=F)
}else{
pbio <- dismo::predict(object=results, x=xn, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pbio) == F) {
results2 <- BIOCLIM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pbio, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "BIOCLIM"
pbio <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pbio <- trunc(1000*pbio)
raster::writeRaster(x=pbio, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pbio, p)/1000
abs1 <- raster::extract(pbio, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["BIOCLIM"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["BIOCLIM"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pbio, pt)/1000
abs1 <- raster::extract(pbio, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: BIOCLIM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["BIOCLIM"] <- -1
}
}
if (output.weights["DOMAIN"] > 0) {
if(is.null(factors) == F) {
xn <- raster::dropLayer(xn, which(names(xn) %in% dummy.vars.noDOMAIN))
xn <- raster::stack(xn)
}
}
if (output.weights["DOMAIN"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". DOMAIN algorithm (package: dismo)\n", sep=""))
if(is.null(models.list$dummy.vars.noDOMAIN) == F) {
xn <- raster::dropLayer(xn, which(names(xn) %in% models.list$dummy.vars.noDOMAIN))
xn <- raster::stack(xn)
}
results <- DOMAIN.OLD
pdom <- NULL
fullname <- paste("models/", RASTER.species.name, "_DOMAIN", sep="")
if (CATCH.OFF == F) {
tryCatch(pdom <- dismo::predict(object=results, x=xn, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("DOMAIN prediction failed"))},
silent=F)
}else{
pdom <- dismo::predict(object=results, x=xn, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pdom) == F) {
results2 <- DOMAIN.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pdom, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "DOMAIN"
pdom <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pdom <- trunc(1000*pdom)
raster::writeRaster(x=pdom, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pdom, p)/1000
abs1 <- raster::extract(pdom, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["DOMAIN"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["DOMAIN"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pdom, pt)/1000
abs1 <- raster::extract(pdom, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: DOMAIN prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["DOMAIN"] <- -1
}
}
if (output.weights["MAHAL"] > 0 || output.weights["MAHAL01"] > 0) {
if(is.null(dummy.vars) == F) {
xn <- raster::dropLayer(xn, which(names(xn) %in% dummy.vars))
xn <- raster::stack(xn)
}
}
if (output.weights["MAHAL"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Mahalanobis algorithm (package: dismo)\n", sep=""))
results <- MAHAL.OLD
pmahal <- NULL
fullname <- paste("models/", RASTER.species.name, "_MAHAL", sep="")
# not possible to use the predict.mahal function as raster::predict automatically reverts to dismo::predict for 'DistModel' objects
results2 <- MAHAL.PROBIT.OLD
# PROBIT FALSE
if (is.null(results2) == F) {
if (CATCH.OFF == F) {
tryCatch(pmahal <- raster::predict(object=xn, model=results, fun=predict.MAHAL, na.rm=TRUE, PROBIT=FALSE,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("Mahalanobis prediction failed"))},
silent=F)
}else{
pmahal <- raster::predict(object=xn, model=results, fun=predict.MAHAL, na.rm=TRUE, PROBIT=FALSE,
filename=fullname, progress='text', overwrite=TRUE)
}
# PROBIT TRUE
}else{
if (CATCH.OFF == F) {
tryCatch(pmahal <- raster::predict(object=xn, model=results, fun=predict.MAHAL, na.rm=TRUE, PROBIT=TRUE,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("Mahalanobis prediction failed"))},
silent=F)
}else{
pmahal <- raster::predict(object=xn, model=results, fun=predict.MAHAL, na.rm=TRUE, PROBIT=TRUE,
filename=fullname, progress='text', overwrite=TRUE)
}
}
if (is.null(pmahal) == F) {
# results2 <- MAHAL.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pmahal, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "MAHAL"
pmahal <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pmahal <- trunc(1000*pmahal)
raster::writeRaster(x=pmahal, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pmahal, p)/1000
abs1 <- raster::extract(pmahal, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["MAHAL"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["MAHAL"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pmahal, pt)/1000
abs1 <- raster::extract(pmahal, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: Mahalanobis prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["MAHAL"] <- -1
}
}
if (output.weights["MAHAL01"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Mahalanobis algorithm (transformed within 0 to 1 interval)", "\n", sep=""))
if(is.null(dummy.vars) == F) {
xn <- raster::dropLayer(xn, which(names(xn) %in% dummy.vars))
xn <- raster::stack(xn)
}
results <- MAHAL01.OLD
# pmahal <- NULL
fullname <- paste("models/", RASTER.species.name, "_MAHAL01", sep="")
# not possible to use the predict.mahal function as raster::predict automatically reverts to dismo::predict for 'DistModel' objects
if (CATCH.OFF == F) {
tryCatch(pmahal01 <- raster::predict(object=xn, model=results, fun=predict.MAHAL01, na.rm=TRUE, MAHAL.shape=MAHAL.shape,
filename=fullname, progress='text', overwrite=TRUE),
error= function(err) {print(paste("transformed Mahalanobis prediction failed"))},
silent=F)
}else{
pmahal01 <- raster::predict(object=xn, model=results, fun=predict.MAHAL01, na.rm=TRUE, MAHAL.shape=MAHAL.shape,
filename=fullname, progress='text', overwrite=TRUE)
}
if (is.null(pmahal01) == F) {
# pmahal <- pmahal - 1 - MAHAL.shape
# pmahal <- abs(pmahal)
# pmahal <- MAHAL.shape / pmahal
results2 <- MAHAL01.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pmahal01, filename=fullname2, progress='text', overwrite=TRUE)
explan.stack <- raster::stack(fullname2)
names(explan.stack) <- "MAHAL01"
pmahal01 <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE)
}
pmahal01 <- trunc(1000*pmahal01)
raster::writeRaster(x=pmahal01, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pmahal01, p)/1000
abs1 <- raster::extract(pmahal01, a)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["MAHAL01"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["MAHAL01"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pmahal01, pt)/1000
abs1 <- raster::extract(pmahal01, at)/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: transformed Mahalanobis prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["MAHAL01"] <- -1
}
}
#
if (prediction.failures == T) {
cat(paste("\n", "WARNING: some predictions failed", sep = ""))
cat(paste("\n", "actual weights that were used were (-1 indicates failed predictions):", "\n", sep = ""))
print(output.weights)
cat(paste("\n", "Because ensemble suitability would therefore underestimated", sep = ""))
cat(paste("\n", "the ensemble will not be calibrated", "\n", sep = ""))
}
#
# create ensembles if no prediction failures
if (prediction.failures == F) {
if (evaluate == T) {thresholds <- thresholds.raster}
cat(paste("\n", "submodel thresholds for absence-presence used: ", "\n", sep = ""))
if (evaluate == T) {
cat(paste("(thresholds recalculated from raster layers)", "\n", sep = ""))
thresholds <- thresholds.raster
}
print(thresholds)
#
mc <- mc+1
cat(paste("\n\n", mc, ". Ensemble algorithm\n", sep=""))
ensemble.statistics["n.models"] <- sum(as.numeric(output.weights > 0))
ensemble <- xn[[1]] == raster::NAvalue(xn[[1]])
raster::setMinMax(ensemble)
names(ensemble) <- raster.title
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
enscount <- ensemble
raster::setMinMax(enscount)
names(enscount) <- paste(raster.title, "_count", sep="")
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
enspresence <- ensemble
raster::setMinMax(enspresence)
names(enspresence) <- paste(raster.title, "_presence", sep="")
raster::writeRaster(x=enspresence, filename=rasterpresence, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
if (output.weights["MAXENT"] > 0) {
ensemble <- ensemble + output.weights["MAXENT"] * pmaxent
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmaxent <- pmaxent >= 1000 * thresholds["MAXENT"]
enscount <- enscount + pmaxent
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["MAXNET"] > 0) {
ensemble <- ensemble + output.weights["MAXNET"] * pmaxnet
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmaxnet <- pmaxnet >= 1000 * thresholds["MAXNET"]
enscount <- enscount + pmaxnet
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["MAXLIKE"] > 0) {
ensemble <- ensemble + output.weights["MAXLIKE"] * pmaxlike
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmaxlike <- pmaxlike >= 1000 * thresholds["MAXLIKE"]
enscount <- enscount + pmaxlike
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["GBM"] > 0) {
ensemble <- ensemble + output.weights["GBM"] * pgbm
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pgbm <- pgbm >= 1000 * thresholds["GBM"]
enscount <- enscount + pgbm
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["GBMSTEP"] > 0) {
ensemble <- ensemble + output.weights["GBMSTEP"] * pgbms
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pgbms <- pgbms >= 1000 * thresholds["GBMSTEP"]
enscount <- enscount + pgbms
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["RF"] > 0) {
ensemble <- ensemble + output.weights["RF"] * prf
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
prf <- prf >= 1000 * thresholds["RF"]
enscount <- enscount + prf
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["CF"] > 0) {
ensemble <- ensemble + output.weights["CF"] * pcf
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pcf <- pcf >= 1000 * thresholds["CF"]
enscount <- enscount + pcf
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["GLM"] > 0) {
ensemble <- ensemble + output.weights["GLM"] * pglm
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pglm <- pglm >= 1000 * thresholds["GLM"]
enscount <- enscount + pglm
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["GLMSTEP"] > 0) {
ensemble <- ensemble + output.weights["GLMSTEP"] * pglms
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pglms <- pglms >= 1000 * thresholds["GLMSTEP"]
enscount <- enscount + pglms
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["GAM"] > 0) {
ensemble <- ensemble + output.weights["GAM"] * pgam
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pgam <- pgam >= 1000 * thresholds["GAM"]
enscount <- enscount + pgam
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["GAMSTEP"] > 0) {
ensemble <- ensemble + output.weights["GAMSTEP"] * pgams
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pgams <- pgams >= 1000 * thresholds["GAMSTEP"]
enscount <- enscount + pgams
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["MGCV"] > 0) {
ensemble <- ensemble + output.weights["MGCV"] * pmgcv
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmgcv <- pmgcv >= 1000 * thresholds["MGCV"]
enscount <- enscount + pmgcv
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["MGCVFIX"] > 0) {
ensemble <- ensemble + output.weights["MGCVFIX"] * pmgcvf
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmgcvf <- pmgcvf >= 1000 * thresholds["MGCVFIX"]
enscount <- enscount + pmgcvf
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["EARTH"] > 0) {
ensemble <- ensemble + output.weights["EARTH"] * pearth
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pearth <- pearth >= 1000 * thresholds["EARTH"]
enscount <- enscount + pearth
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["RPART"] > 0) {
ensemble <- ensemble + output.weights["RPART"] * prpart
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
prpart <- prpart >= 1000 * thresholds["RPART"]
enscount <- enscount + prpart
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["NNET"] > 0) {
ensemble <- ensemble + output.weights["NNET"] * pnnet
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pnnet <- pnnet >= 1000 * thresholds["NNET"]
enscount <- enscount + pnnet
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["FDA"] > 0) {
ensemble <- ensemble + output.weights["FDA"] * pfda
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pfda <- pfda >= 1000 * thresholds["FDA"]
enscount <- enscount + pfda
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["SVM"] > 0) {
ensemble <- ensemble + output.weights["SVM"] * psvm
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
psvm <- psvm >= 1000 * thresholds["SVM"]
enscount <- enscount + psvm
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["SVME"] > 0) {
ensemble <- ensemble + output.weights["SVME"] * psvme
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
psvme <- psvme >= 1000 * thresholds["SVME"]
enscount <- enscount + psvme
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["GLMNET"] > 0) {
ensemble <- ensemble + output.weights["GLMNET"] * pglmnet
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pglmnet <- pglmnet >= 1000 * thresholds["GLMNET"]
enscount <- enscount + pglmnet
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["BIOCLIM.O"] > 0) {
ensemble <- ensemble + output.weights["BIOCLIM.O"] * pbioO
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pbioO <- pbioO >= 1000 * thresholds["BIOCLIM.O"]
enscount <- enscount + pbioO
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["BIOCLIM"] > 0) {
ensemble <- ensemble + output.weights["BIOCLIM"] * pbio
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pbio <- pbio >= 1000 * thresholds["BIOCLIM"]
enscount <- enscount + pbio
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["DOMAIN"] > 0) {
ensemble <- ensemble + output.weights["DOMAIN"] * pdom
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pdom <- pdom >= 1000 * thresholds["DOMAIN"]
enscount <- enscount + pdom
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["MAHAL"] > 0) {
ensemble <- ensemble + output.weights["MAHAL"] * pmahal
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmahal <- pmahal >= 1000 * thresholds["MAHAL"]
enscount <- enscount + pmahal
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (output.weights["MAHAL01"] > 0) {
ensemble <- ensemble + output.weights["MAHAL01"] * pmahal01
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmahal01 <- pmahal01 >= 1000 * thresholds["MAHAL01"]
enscount <- enscount + pmahal01
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
#
# note that submodels had already been multiplied by 1000
ensemble <- trunc(ensemble)
raster::setMinMax(ensemble)
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
ensemble.statistics["ensemble.min"] <- raster::minValue(ensemble)
ensemble.statistics["ensemble.max"] <- raster::maxValue(ensemble)
# names(ensemble) <- raster.title
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
# avoid possible problems with saving of names of the raster layers
# no longer used with default GTiff format since DEC-2022
# raster::writeRaster(ensemble, filename="working.grd", overwrite=T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- raster.title
# raster::writeRaster(working.raster, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
#
raster::setMinMax(enscount)
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
ensemble.statistics["count.min"] <- raster::minValue(enscount)
ensemble.statistics["count.max"] <- raster::maxValue(enscount)
# names(enscount) <- paste(raster.title, "_count", sep="")
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
# avoid possible problems with saving of names of the raster layers
# no longer used with default GTiff format since DEC-2022
# raster::writeRaster(enscount, filename="working.grd", overwrite=T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- paste(raster.title, "_count", sep="")
# raster::writeRaster(working.raster, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
#
# if (KML.out == T) {
# raster::writeRaster(enscount, filename="KMLworking.grd", overwrite=T)
# KMLworking.raster <- raster::raster("KMLworking.grd")
# names(KMLworking.raster) <- paste(raster.title, "_count", sep="")
# nmax <- sum(as.numeric(output.weights > 0))
# if (nmax > 3) {
# raster::KML(KMLworking.raster, filename=kmlcount, col=c("grey", "black", grDevices::rainbow(n=(nmax-2), start=0, end=1/3), "blue"),
# colNA=0, blur=10, overwrite=T, breaks=seq(from=-1, to=nmax, by=1))
# }else{
# raster::KML(KMLworking.raster, filename=kmlcount, col=c("grey", grDevices::rainbow(n=nmax, start=0, end=1/3)),
# colNA=0, blur=10, overwrite=TRUE, breaks=seq(from=-1, to=nmax, by=1))
# }
# }
#
if(evaluate == T) {
eval1 <- NULL
cat(paste("\n", "Evaluation of created ensemble raster layer (", rasterfull, ") at locations p and a", "\n\n", sep = ""))
pres_consensus <- raster::extract(ensemble, p)/1000
abs_consensus <- raster::extract(ensemble, a)/1000
eval1 <- dismo::evaluate(p=pres_consensus, a=abs_consensus)
print(eval1)
thresholds["ENSEMBLE"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres_consensus, Abs=abs_consensus)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["ENSEMBLE"]))
}
if(retest == T) {
eval1 <- NULL
cat(paste("\n", "Evaluation of created ensemble raster layer (", rasterfull, ") at locations pt and at", "\n\n", sep = ""))
pres_consensus <- raster::extract(ensemble, pt)/1000
abs_consensus <- raster::extract(ensemble, at)/1000
eval1 <- dismo::evaluate(p=pres_consensus, a=abs_consensus)
print(eval1)
}
ensemble.statistics["ensemble.threshold"] <- thresholds["ENSEMBLE"]
#
# if (KML.out == T) {
# raster::writeRaster(ensemble, filename="KMLworking.grd", overwrite=T)
# KMLworking.raster <- raster::raster("KMLworking.grd")
# raster::setMinMax(KMLworking.raster)
# names(KMLworking.raster) <- raster.title
# raster::writeRaster(KMLworking.raster, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
# thresholdx <- 1000 * as.numeric(thresholds["ENSEMBLE"])
# raster.min <- raster::minValue(KMLworking.raster)
# raster.max <- raster::maxValue(KMLworking.raster)
# abs.breaks <- 8
# pres.breaks <- 8
# seq1 <- round(seq(from=raster.min, to=thresholdx, length.out=abs.breaks), 4)
# seq1 <- seq1[1:(abs.breaks-1)]
# seq1[-abs.breaks]
# seq1 <- unique(seq1)
# seq2 <- round(seq(from = thresholdx, to = raster.max, length.out=pres.breaks), 4)
# seq2 <- unique(seq2)
# raster::KML(KMLworking.raster, filename=kmlfull, breaks = c(seq1, seq2), col = c(grDevices::rainbow(n=length(seq1), start=0, end =1/6), grDevices::rainbow(n=length(seq2)-1, start=3/6, end=4/6)), colNA = 0,
# blur=KML.blur, maxpixels=KML.maxpixels, overwrite=TRUE)
# }
enspresence <- ensemble >= 1000 * thresholds["ENSEMBLE"]
raster::setMinMax(enspresence)
raster::writeRaster(enspresence, filename=rasterpresence, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
# names(enspresence) <- paste(raster.title, "_presence", sep="")
# raster::writeRaster(x=enspresence, filename=rasterpresence, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
# avoid possible problems with saving of names of the raster layers
# no longer used with default GTiff format since DEC-2022
# raster::writeRaster(enspresence, filename="working.grd", overwrite=T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- paste(raster.title, "_presence", sep="")
# raster::writeRaster(working.raster, filename=rasterpresence, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
#
# if (KML.out == T) {
# raster::writeRaster(enspresence, filename="KMLworking.grd", overwrite=T)
# KMLworking.raster <- raster::raster("KMLworking.grd")
# names(KMLworking.raster) <- paste(raster.title, "_presence", sep="")
# raster::KML(KMLworking.raster, filename=kmlpresence, col=c("grey", "green"),
# colNA=0, blur=KML.blur, maxpixels=KML.maxpixels, overwrite=T)
# }
#
cat(paste("\n", "End of modelling for organism: ", RASTER.species.orig, "\n\n", sep = ""))
cat(paste("Predictions were made for RasterStack: ", stack.title, "\n\n", sep = ""))
out <- list(ensemble.statistics=ensemble.statistics, output.weights=output.weights, thresholds=thresholds, call=match.call() )
if (SINK==T && OLD.SINK==F) {sink(file=NULL, append=T)}
return(out)
# end of prediction failures loop
}else{
out <- list(warning="prediction failure for some algorithms", output.weights=output.weights, call=match.call() )
if (SINK==T && OLD.SINK==F) {sink(file=NULL, append=T)}
return(out)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.raster.R
|
`ensemble.red` <- function(
x=NULL
)
{
# Function inspired from red::map.sdm function
# However, AOO and EOO calculated for different thresholds of 'count' suitability maps
# AOO and EOO thresholds from http://www.iucnredlist.org/static/categories_criteria_3_1 (April 2018)
#
# if (! require(dismo)) {stop("Please install the dismo package")}
if (! requireNamespace("red")) {stop("Please install the red package")}
if (is.null(x) == T) {stop("value for parameter x is missing (RasterLayer object)")}
if (inherits(x, "RasterLayer") == F) {stop("x is not a RasterLayer object")}
#
cat(paste("Calculation of Area of Occupancy (AOO, km2) and Extent of Occurrence (EOO, km2) (package: red)", "\n\n", sep=""))
#
freqs <- raster::freq(x)
freqs <- freqs[complete.cases(freqs),]
freqs <- freqs[freqs[, 1] > 0, ]
for (i in (nrow(freqs)-1):1) {freqs[i, 2] <- freqs[i, 2] + freqs[i+1, 2]}
#
results <- data.frame(array(dim=c(nrow(freqs), 6)))
names(results) <- c("threshold", "cells", "AOO", "AOO.Type", "EOO", "EOO.Type")
results[, c(1:2)] <- freqs
#
reclassify.matrix <- array(dim=c(2, 3))
reclassify.matrix[1, ] <- c(0.0, 1.0, 0)
reclassify.matrix[2, ] <- c(0.0, 1.0, 1)
reclassify.matrix[2, 2] <- results[nrow(results), 1]
#
for (i in 1:nrow(results)) {
AOO.Type <- EOO.Type <- c("")
reclassify.matrix[1, 2] <- reclassify.matrix[2, 1] <- results[i, 1]-0.1
pres.count <- raster::reclassify(x, reclassify.matrix)
AOO <- red::aoo(pres.count)
results[i, "AOO"] <- AOO
if (AOO < 2000) {AOO.Type <- "possibly Vulnerable (VU)"}
if (AOO < 500) {AOO.Type <- "possibly Endangered (EN)"}
if (AOO < 10) {AOO.Type <- "possibly Critically Endangered (CR)"}
results[i, "AOO.Type"] <- AOO.Type
EOO <- red::eoo(pres.count)
results[i, "EOO"] <- EOO
if (EOO < 20000) {EOO.Type <- "Vulnerable (VU)"}
if (EOO < 5000) {EOO.Type <- "possibly Endangered (EN)"}
if (EOO < 100) {EOO.Type <- "possibly Critically Endangered (CR)"}
results[i, "EOO.Type"] <- EOO.Type
}
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.red.R
|
`ensemble.simplified.categories` <- function(
xcat=NULL, p=NULL,
filename=NULL, overwrite=TRUE, ...
)
{
# if (! require(dismo)) {stop("Please install the dismo package")}
if(inherits(xcat,"RasterLayer") == F) {stop("parameter xcat is expected to be a RasterLayer")}
if(is.null(p) == T) {stop("presence locations are missing (parameter p)")}
if(is.null(filename) == T) {
cat(paste("\n", "No new filename was provided", sep = ""))
# if (! require(tools)) {stop("tools package not available")}
filename1 <- filename(xcat)
extension1 <- paste(".", tools::file_ext(filename1),sep="")
extension2 <- paste("_new.", tools::file_ext(filename1),sep="")
filename <- gsub(pattern=extension1, replacement=extension2, x=filename1)
cat(paste("\n", "New raster will be saved as:", sep = ""))
cat(paste("\n", filename, "\n", sep=""))
}
# get categories of presence points
a <- dismo::randomPoints(xcat, n=10)
double.stack <- raster::stack(xcat, xcat)
TrainData <- dismo::prepareData(double.stack, p, b=a, factors=names(xcat), xy=FALSE)
TrainData <- TrainData[, -3]
names(TrainData)[2] <- names(xcat)
TrainData <- TrainData[TrainData[,"pb"]==1, , drop=F]
presence.categories <- levels(droplevels(factor(TrainData[,names(xcat)])))
presence.categories <- as.numeric(presence.categories)
cat(paste("\n", "categories with presence points", "\n", sep = ""))
print(presence.categories)
# get all categories of the layer
all.categories <- raster::freq(xcat)[,1]
all.categories <- all.categories[is.na(all.categories) == F]
# categories without presence points
new.categories <- all.categories[is.na(match(all.categories, presence.categories) )]
cat(paste("\n", "categories without presence points", "\n", sep = ""))
print(new.categories)
# outside category
out.cat <- max(new.categories)
cat(paste("\n", "categories without presence points will all be classified as: ", out.cat, "\n\n", sep = ""))
replace.frame <- data.frame(id=new.categories, v=rep(out.cat, length(new.categories)))
colnames(replace.frame)[2] <- names(xcat)
new.x <- raster::subs(xcat, replace.frame, by=1, which=2, subsWithNA=FALSE, filename=filename, overwrite=overwrite, ...)
return(filename)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.simplified.categories.R
|
`ensemble.spatialThin` <- function(
x, thin.km=0.1, runs=100, silent=FALSE, verbose=FALSE,
return.notRetained=FALSE
)
{
'distGeo.stack' <- function(x) {
n <- nrow(x)
pairs <- utils::combn(n, 2)
p <- ncol(pairs)
pairs <- cbind(t(pairs), numeric(p))
for (i in 1:p) {
pairs[i, 3] <- geosphere::distGeo(x[pairs[i, 1], ], x[pairs[i, 2], ]) / 1000
pairs[i, 3] <- round(pairs[i, 3], 2)
}
return(pairs)
}
# distGeo.thin operates on stacked distances (do not recalculate distance each run)
'distGeo.thin' <- function(x, x2, thin.km=0.1) {
while(min(x2[, 3]) < thin.km && nrow(x2) > 1) {
p <- nrow(x2)
x2 <- x2[sample(p), ]
first.min <- which(x2[, 3] < thin.km)
first.min <- first.min[1]
random.col <- as.numeric(runif(1) > 0.5)+1
selected <- x2[first.min, random.col]
rows1 <- x2[, 1] != selected
x2 <- x2[rows1, , drop=F]
rows2 <- x2[, 2] != selected
x2 <- x2[rows2, , drop=F]
}
retained <- unique(c(x2[, 1], x2[, 2]))
x3 <- x[retained, , drop=F]
# special case where the remaining 2 locations are closer than minimum distance
if (nrow(x3)==2 && distGeo.stack(x3)[3] < thin.km) {
retained <- sample(retained, size=1)
x3 <- x[retained, , drop=F]
}
return(list(x3=x3, retained=retained))
}
#
if (verbose == T) {silent <- FALSE}
if (raster::couldBeLonLat(sp::SpatialPoints(x)) == F) {
cat(paste("WARNING: locations not in longitude - latitude format", "\n"))
cat(paste("therefore spatial thinning not executed", "\n\n"))
return(x)
}
if(nrow(x) == 1) {
if (silent == F) {cat(paste("NOTE: only one location was provided", "\n"))}
return(x)
}
x2 <- distGeo.stack(x)
if(max(x2[, 3]) <= thin.km) {
if (silent == F) {
cat(paste("WARNING: thinning parameter larger or equal to maximum distance among locations", "\n"))
cat(paste("therefore only one location randomly selected", "\n\n"))
}
p <- nrow(x)
x3 <- x[sample(p, 1), ]
return(x3)
}
if(min(x2[, 3]) >= thin.km) {
if (silent == F) {
cat(paste("WARNING: thinning parameter smaller or equal to minimum distance among locations", "\n"))
cat(paste("therefore all locations selected", "\n\n"))
}
return(x)
}
runs <- max(runs, 1)
locs <- 0
for (i in 1:runs) {
loc.l1 <- distGeo.thin(x, x2, thin.km=thin.km)
loc1 <- loc.l1$x3
if (verbose == T) {
if (nrow(loc1) > locs) {cat(paste("run ", i, " (locations: ", nrow(loc1), " > ", locs, " [previous maximum number of locations])", "\n", sep=""))}
if (nrow(loc1) == locs) {cat(paste("run ", i, " (locations: ", nrow(loc1), " = ", locs, " [previous maximum number of locations])", "\n", sep=""))}
if (nrow(loc1) < locs) {cat(paste("run ", i, " (locations: ", nrow(loc1), ")", "\n", sep=""))}
}
if (nrow(loc1) > locs) {
locs <- nrow(loc1)
loc.out <- loc1
retained.final <- loc.l1$retained
}
}
if (verbose == T) {cat(paste("\n"))}
loc.matrix <- geosphere::distm(loc.out)
diag(loc.matrix) <- NA
if (silent == F) {
cat(paste("Spatially thinned point location data set obtained for target minimum distance of ", thin.km, "\n", sep=""))
cat(paste("number of locations: ", nrow(loc.out), "\n"))
cat(paste("minimum distance: ", min(loc.matrix, na.rm=T), "\n"))
}
if (return.notRetained == T) {
x.not <- x[(c(1:nrow(x)) %in% retained.final) == F, ]
return(list(loc.out=loc.out, not.retained=x.not))
}
if (return.notRetained == F) {return(loc.out)}
}
`ensemble.spatialThin.quant` <- function(
x, thin.km=0.1, runs=100, silent=FALSE, verbose=FALSE,
LON.length=21, LAT.length=21
)
{
LON.bins <- quantile(x[, 1], probs=seq(from=0, to=1, length=LON.length))
LAT.bins <- quantile(x[, 2], probs=seq(from=0, to=1, length=LAT.length))
#
LON.bins[length(LON.bins)] <- +Inf
LAT.bins[length(LAT.bins)] <- +Inf
#
loc.first <- TRUE
for (lo in 1:(length(LON.bins)-1)) {
x.bin <- x[(x[, 1] >= LON.bins[lo] & x[, 1] < LON.bins[lo+1]), ]
for (la in 1:(length(LAT.bins)-1)) {
x.bin2 <- x.bin[(x.bin[, 2] >= LAT.bins[la] & x.bin[, 2] < LAT.bins[la+1]), ]
if (nrow(as.data.frame(x.bin2)) > 0) {
x.spat <- ensemble.spatialThin(x=x.bin2, thin.km=thin.km, runs=runs, verbose=verbose, silent=silent)
if (loc.first == T) {
x.out <- x.spat
loc.first <- FALSE
}else{
x.out <- rbind(x.out, x.spat)
}
}
}
}
return(x.out)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.spatialThin.R
|
`ensemble.strategy` <- function(
TrainData=NULL, TestData=NULL,
verbose=FALSE,
ENSEMBLE.best=c(4:10), ENSEMBLE.min=c(0.7),
ENSEMBLE.exponent=c(1, 2, 3)
)
{
# if (! require(dismo)) {stop("Please install the dismo package")}
# input AUC
modelnames <- c("MAXENT", "MAXNET", "MAXLIKE", "GBM", "GBMSTEP", "RF", "CF",
"GLM", "GLMSTEP", "GAM", "GAMSTEP", "MGCV", "MGCVFIX",
"EARTH", "RPART", "NNET", "FDA", "SVM", "SVME", "GLMNET",
"BIOCLIM.O", "BIOCLIM", "DOMAIN", "MAHAL", "MAHAL01")
weights <- numeric(length=length(modelnames))
final.weights <- weights
names(weights) <- modelnames
bests <- length(ENSEMBLE.best)
exponents <- length(ENSEMBLE.exponent)
mins <- length(ENSEMBLE.min)
#
# output for each cross-validation run
output <- data.frame(array(dim=c(bests*exponents*mins, 7), NA))
if (nrow(output) == 1) {cat(paste("\n", "NOTE: no alternatives available for choosing best strategy", "\n", sep=""))}
names(output) <- c("ENSEMBLE.best", "ENSEMBLE.exponent", "ENSEMBLE.min", "model.C", "AUC.C", "model.T", "AUC.T")
all.combinations <- expand.grid(ENSEMBLE.best, ENSEMBLE.exponent, ENSEMBLE.min)
output[,c(1:3)] <- all.combinations
#
# recalculate AUC values
weights.cal <- c(0, weights)
weights.eval <- c(0, weights)
names(weights.cal) <- c("ENSEMBLE", modelnames)
names(weights.eval) <- c("ENSEMBLE", modelnames)
for (i in 1:length(weights)) {
TrainPres <- TrainData[TrainData[,"pb"]==1, modelnames[i]]
TrainAbs <- TrainData[TrainData[,"pb"]==0, modelnames[i]]
if (sum(TrainPres, TrainAbs, na.rm=T) != 0) {
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
weights.cal[i+1] <- eval1@auc
}
TestPres <- TestData[TestData[,"pb"]==1, modelnames[i]]
TestAbs <- TestData[TestData[,"pb"]==0, modelnames[i]]
if (sum(TestPres, TestAbs, na.rm=T) != 0) {
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
weights.eval[i+1] <- eval2@auc
}
}
input.weights.c <- weights.cal[names(weights.cal) != "ENSEMBLE"]
input.weights.e <- weights.eval[names(weights.eval) != "ENSEMBLE"]
auc.target <- -1.0
for (r in 1:nrow(output)) {
if (verbose == T) {
cat(paste("\n", "run ", r, ": best=", output[r, "ENSEMBLE.best"], ", exponent=", output[r, "ENSEMBLE.exponent"], ", min=", output[r, "ENSEMBLE.min"] ,"\n", sep=""))
}
#
# strategy based on evaluations
ws <- ensemble.weights(input.weights.e, exponent=output[r, "ENSEMBLE.exponent"], best=output[r, "ENSEMBLE.best"],
min.weight=output[r, "ENSEMBLE.min"])
if (verbose == T) {print(ws)}
TrainData[,"ENSEMBLE"] <- ws["MAXENT"]*TrainData[,"MAXENT"] + ws["MAXNET"]*TrainData[,"MAXNET"] + ws["MAXLIKE"]*TrainData[,"MAXLIKE"] + ws["GBM"]*TrainData[,"GBM"] +
ws["GBMSTEP"]*TrainData[,"GBMSTEP"] + ws["RF"]*TrainData[,"RF"] + ws["CF"]*TrainData[,"CF"] + ws["GLM"]*TrainData[,"GLM"] +
ws["GLMSTEP"]*TrainData[,"GLMSTEP"] + ws["GAM"]*TrainData[,"GAM"] + ws["GAMSTEP"]*TrainData[,"GAMSTEP"] +
ws["MGCV"]*TrainData[,"MGCV"] + ws["MGCVFIX"]*TrainData[,"MGCVFIX"] + ws["EARTH"]*TrainData[,"EARTH"] +
ws["RPART"]*TrainData[,"RPART"] + ws["NNET"]*TrainData[,"NNET"] + ws["FDA"]*TrainData[,"FDA"] +
ws["SVM"]*TrainData[,"SVM"] + ws["SVME"]*TrainData[,"SVME"] + ws["GLMNET"]*TrainData[,"GLMNET"] +
ws["BIOCLIM.O"]*TrainData[,"BIOCLIM.O"] + ws["BIOCLIM"]*TrainData[,"BIOCLIM"] +
ws["DOMAIN"]*TrainData[,"DOMAIN"] + ws["MAHAL"]*TrainData[,"MAHAL"] + ws["MAHAL01"]*TrainData[,"MAHAL01"]
# TrainData[,"ENSEMBLE"] <- trunc(TrainData[,"ENSEMBLE"])
TrainPres <- TrainData[TrainData[,"pb"]==1,"ENSEMBLE"]
TrainAbs <- TrainData[TrainData[,"pb"]==0,"ENSEMBLE"]
eval1 <- NULL
if (sum(TrainPres, TrainAbs, na.rm=T) != 0) {
eval1 <- dismo::evaluate(p=TrainPres, a=TrainAbs)
if (verbose == T) {
cat(paste("\n", "evaluation with train data", "\n", sep=""))
print(eval1)
}
weights.cal["ENSEMBLE"] <- eval1@auc
weights.cal <- weights.cal[order(weights.cal, decreasing=T)]
output[r, "model.C"] <- names(weights.cal)[1]
output[r, "AUC.C"] <- weights.cal[1]
}
TestData[,"ENSEMBLE"] <- ws["MAXENT"]*TestData[,"MAXENT"] + ws["MAXNET"]*TestData[,"MAXNET"] + ws["MAXLIKE"]*TestData[,"MAXLIKE"] + ws["GBM"]*TestData[,"GBM"] +
ws["GBMSTEP"]*TestData[,"GBMSTEP"] + ws["RF"]*TestData[,"RF"] + ws["CF"]*TestData[,"CF"] + ws["GLM"]*TestData[,"GLM"] +
ws["GLMSTEP"]*TestData[,"GLMSTEP"] + ws["GAM"]*TestData[,"GAM"] + ws["GAMSTEP"]*TestData[,"GAMSTEP"] +
ws["MGCV"]*TestData[,"MGCV"] + ws["MGCVFIX"]*TestData[,"MGCVFIX"] + ws["EARTH"]*TestData[,"EARTH"] +
ws["RPART"]*TestData[,"RPART"] + ws["NNET"]*TestData[,"NNET"] + ws["FDA"]*TestData[,"FDA"] +
ws["SVM"]*TestData[,"SVM"] + ws["SVME"]*TestData[,"SVME"] + ws["GLMNET"]*TestData[,"GLMNET"] +
ws["BIOCLIM.O"]*TestData[,"BIOCLIM.O"] + ws["BIOCLIM"]*TestData[,"BIOCLIM"] +
ws["DOMAIN"]*TestData[,"DOMAIN"] + ws["MAHAL"]*TestData[,"MAHAL"] + ws["MAHAL01"]*TestData[,"MAHAL01"]
# TestData[,"ENSEMBLE"] <- trunc(TestData[,"ENSEMBLE"])
TestPres <- TestData[TestData[,"pb"]==1,"ENSEMBLE"]
TestAbs <- TestData[TestData[,"pb"]==0,"ENSEMBLE"]
eval2 <- NULL
if (sum(TestPres, TestAbs, na.rm=T) != 0) {
eval2 <- dismo::evaluate(p=TestPres, a=TestAbs)
if (verbose == T) {
cat(paste("\n", "evaluation with test data", "\n", sep=""))
print(eval2)
}
weights.eval["ENSEMBLE"] <- eval2@auc
weights.eval <- weights.eval[order(weights.eval, decreasing=T)]
output[r, "model.T"] <- names(weights.eval)[1]
output[r, "AUC.T"] <- weights.eval[1]
}
if (weights.eval[1] > auc.target) {
auc.target <- weights.eval[1]
weights.out <- ws
}
}
output <- output[order(output[,"AUC.T"], decreasing=T), ]
cat(paste("\n", "Ensemble tuning result: best=", output[1, "ENSEMBLE.best"], ", exponent=", output[1, "ENSEMBLE.exponent"], ", min=", output[1, "ENSEMBLE.min"] ,"\n", sep=""))
cat(paste("\n", "Weights corresponding to best strategy", "\n", sep = ""))
print(weights.out)
return(list(weights=weights.out, output=output))
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.strategy.R
|
`ensemble.terra` <- function(
xn=NULL,
models.list=NULL,
input.weights=models.list$output.weights,
thresholds=models.list$thresholds,
RASTER.species.name=models.list$species.name,
RASTER.stack.name="xnTitle",
RASTER.filetype="GTiff", RASTER.datatype="INT2S", RASTER.NAflag=-32767,
RASTER.models.overwrite=TRUE,
evaluate=FALSE, SINK=FALSE,
p=models.list$p, a=models.list$a,
pt=models.list$pt, at=models.list$at,
CATCH.OFF=FALSE
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if (is.null(xn) == T) {stop("value for parameter xn is missing (SpatRaster object)")}
if(inherits(xn, "SpatRaster") == F) {stop("xn is not a SpatRaster object")}
if (is.null(models.list) == T) {stop("provide 'models.list' as models will not be recalibrated and retested")}
if (is.null(input.weights) == T) {input.weights <- models.list$output.weights}
if (is.null(thresholds) == T) {stop("provide 'thresholds' as models will not be recalibrated and retested")}
thresholds.raster <- thresholds
if(is.null(p) == F) {
p <- data.frame(p)
names(p) <- c("x", "y")
}
if(is.null(a) == F) {
a <- data.frame(a)
names(a) <- c("x", "y")
}
if(is.null(pt) == F) {
pt <- data.frame(pt)
names(pt) <- c("x", "y")
}
if(is.null(at) == F) {
at <- data.frame(at)
names(at) <- c("x", "y")
}
#
retest <- FALSE
if (evaluate == T) {
if (is.null(p)==T || is.null(a)==T) {
cat(paste("\n", "NOTE: not possible to evaluate the models since locations p and a are not provided", "\n", sep = ""))
evaluate <- FALSE
}else{
threshold.method <- models.list$threshold.method
threshold.sensitivity <- models.list$threshold.sensitivity
threshold.PresenceAbsence <- models.list$threshold.PresenceAbsence <- FALSE
}
if (is.null(pt)==F && is.null(at)==F) {
if(identical(pt, p) == F || identical(at, a) == F) {retest <- TRUE}
}
}
#
# create output file
dir.create("outputs", showWarnings = F)
paste.file <- paste(getwd(), "/outputs/", RASTER.species.name, "_output.txt", sep="")
OLD.SINK <- TRUE
if (sink.number(type="output") == 0) {OLD.SINK <- F}
if (SINK==T && OLD.SINK==F) {
if (file.exists(paste.file) == F) {
cat(paste("\n", "NOTE: results captured in file: ", paste.file, "\n", sep = ""))
}else{
cat(paste("\n", "NOTE: results appended in file: ", paste.file, "\n", sep = ""))
}
cat(paste("\n\n", "RESULTS (ensemble.raster function)", "\n\n", sep=""), file=paste.file, append=T)
sink(file=paste.file, append=T)
cat(paste(date(), "\n", sep=""))
print(match.call())
}
#
# check if all variables are present
vars <- models.list$vars
vars.xn <- names(xn)
nv <- length(vars)
for (i in 1:nv) {
if (any(vars.xn==vars[i]) == F) {stop("explanatory variable '", vars[i], "' not among grid layers of RasterStack xn", "\n", sep = "")}
}
for (i in 1:length(vars.xn) ) {
if (any(vars==vars.xn[i]) == F) {
cat(paste("\n", "NOTE: RasterStack layer '", vars.xn[i], "' was not calibrated as explanatory variable", "\n", sep = ""))
xn <- terra::subset(xn, which(names(xn) == vars.xn[i]))
xn <- terra::rast(xn)
}
}
#
# declare categorical layers for xn
factors <- models.list$factors
if(is.null(factors) == F) {
for (i in 1:length(factors)) {
j <- which(names(xn) == factors[i])
xn[[j]] <- terra::as.factor(xn[[j]])
}
}
if(length(factors) == 0) {factors <- NULL}
factlevels <- models.list$factlevels
dummy.vars <- models.list$dummy.vars
dummy.vars.noDOMAIN <- models.list$dummy.vars.noDOMAIN
#
if (is.null(input.weights) == F) {
MAXENT <- max(c(input.weights["MAXENT"], -1), na.rm=T)
MAXNET <- max(c(input.weights["MAXNET"], -1), na.rm=T)
MAXLIKE <- max(c(input.weights["MAXLIKE"], -1), na.rm=T)
GBM <- max(c(input.weights["GBM"], -1), na.rm=T)
GBMSTEP <- max(c(input.weights["GBMSTEP"], -1), na.rm=T)
RF <- max(c(input.weights["RF"], -1), na.rm=T)
CF <- max(c(input.weights["CF"], -1), na.rm=T)
GLM <- max(c(input.weights["GLM"], -1), na.rm=T)
GLMSTEP <- max(c(input.weights["GLMSTEP"], -1), na.rm=T)
GAM <- max(c(input.weights["GAM"], -1), na.rm=T)
GAMSTEP <- max(c(input.weights["GAMSTEP"], -1), na.rm=T)
MGCV <- max(c(input.weights["MGCV"], -1), na.rm=T)
MGCVFIX <- max(c(input.weights["MGCVFIX"], -1), na.rm=T)
EARTH <- max(c(input.weights["EARTH"], -1), na.rm=T)
RPART <- max(c(input.weights["RPART"], -1), na.rm=T)
NNET <- max(c(input.weights["NNET"], -1), na.rm=T)
FDA <- max(c(input.weights["FDA"], -1), na.rm=T)
SVM <- max(c(input.weights["SVM"], -1), na.rm=T)
SVME <- max(c(input.weights["SVME"], -1), na.rm=T)
GLMNET <- max(c(input.weights["GLMNET"], -1), na.rm=T)
BIOCLIM.O <- max(c(input.weights["BIOCLIM.O"], -1), na.rm=T)
BIOCLIM <- max(c(input.weights["BIOCLIM"], -1), na.rm=T)
DOMAIN <- max(c(input.weights["DOMAIN"], -1), na.rm=T)
MAHAL <- max(c(input.weights["MAHAL"], -1), na.rm=T)
MAHAL01 <- max(c(input.weights["MAHAL01"], -1), na.rm=T)
}
#
MAXENT.OLD <- MAXNET.OLD <- MAXLIKE.OLD <- GBM.OLD <- GBMSTEP.OLD <- RF.OLD <- CF.OLD <- GLM.OLD <- GLMSTEP.OLD <- GAM.OLD <- GAMSTEP.OLD <- MGCV.OLD <- NULL
MGCVFIX.OLD <- EARTH.OLD <- RPART.OLD <- NNET.OLD <- FDA.OLD <- SVM.OLD <- SVME.OLD <- GLMNET.OLD <- BIOCLIM.O.OLD <- BIOCLIM.OLD <- DOMAIN.OLD <- MAHAL.OLD <- MAHAL01.OLD <- NULL
# probit models, NULL if no probit model fitted
MAXENT.PROBIT.OLD <- MAXNET.PROBIT.OLD <- MAXLIKE.PROBIT.OLD <- GBM.PROBIT.OLD <- GBMSTEP.PROBIT.OLD <- RF.PROBIT.OLD <- CF.PROBIT.OLD <- GLM.PROBIT.OLD <- GLMSTEP.PROBIT.OLD <- GAM.PROBIT.OLD <- GAMSTEP.PROBIT.OLD <- MGCV.PROBIT.OLD <- NULL
MGCVFIX.PROBIT.OLD <- EARTH.PROBIT.OLD <- RPART.PROBIT.OLD <- NNET.PROBIT.OLD <- FDA.PROBIT.OLD <- SVM.PROBIT.OLD <- SVME.PROBIT.OLD <- GLMNET.PROBIT.OLD <- BIOCLIM.O.PROBIT.OLD <- BIOCLIM.PROBIT.OLD <- DOMAIN.PROBIT.OLD <- MAHAL.PROBIT.OLD <- MAHAL01.PROBIT.OLD <- NULL
if (is.null(models.list) == F) {
if (is.null(models.list$MAXENT) == F) {MAXENT.OLD <- models.list$MAXENT}
if (is.null(models.list$MAXNET) == F) {
MAXNET.OLD <- models.list$MAXNET
MAXNET.clamp <- models.list$formulae$MAXNET.clamp
MAXNET.type <- models.list$formulae$MAXNET.type
}
if (is.null(models.list$MAXLIKE) == F) {
MAXLIKE.OLD <- models.list$MAXLIKE
MAXLIKE.formula <- models.list$formulae$MAXLIKE.formula
}
if (is.null(models.list$GBM) == F) {GBM.OLD <- models.list$GBM}
if (is.null(models.list$GBMSTEP) == F) {GBMSTEP.OLD <- models.list$GBMSTEP}
if (is.null(models.list$RF) == F) {RF.OLD <- models.list$RF}
if (is.null(models.list$CF) == F) {CF.OLD <- models.list$CF}
if (is.null(models.list$GLM) == F) {GLM.OLD <- models.list$GLM}
if (is.null(models.list$GLMSTEP) == F) {GLMSTEP.OLD <- models.list$GLMSTEP}
if (is.null(models.list$GAM) == F) {GAM.OLD <- models.list$GAM}
if (is.null(models.list$GAMSTEP) == F) {GAMSTEP.OLD <- models.list$GAMSTEP}
if (is.null(models.list$MGCV) == F) {MGCV.OLD <- models.list$MGCV}
if (is.null(models.list$MGCVFIX) == F) {MGCVFIX.OLD <- models.list$MGCVFIX}
if (is.null(models.list$EARTH) == F) {EARTH.OLD <- models.list$EARTH}
if (is.null(models.list$RPART) == F) {RPART.OLD <- models.list$RPART}
if (is.null(models.list$NNET) == F) {NNET.OLD <- models.list$NNET}
if (is.null(models.list$FDA) == F) {FDA.OLD <- models.list$FDA}
if (is.null(models.list$SVM) == F) {SVM.OLD <- models.list$SVM}
if (is.null(models.list$SVME) == F) {SVME.OLD <- models.list$SVME}
if (is.null(models.list$GLMNET) == F) {
GLMNET.OLD <- models.list$GLMNET
GLMNET.class <- models.list$formulae$GLMNET.class
}
if (is.null(models.list$BIOCLIM.O) == F) {BIOCLIM.O.OLD <- models.list$BIOCLIM.O}
if (is.null(models.list$BIOCLIM) == F) {BIOCLIM.OLD <- models.list$BIOCLIM}
if (is.null(models.list$DOMAIN) == F) {DOMAIN.OLD <- models.list$DOMAIN}
if (is.null(models.list$MAHAL) == F) {MAHAL.OLD <- models.list$MAHAL}
if (is.null(models.list$MAHAL01) == F) {
MAHAL01.OLD <- models.list$MAHAL01
MAHAL.shape <- models.list$formulae$MAHAL.shape
}
# probit models
if (is.null(models.list$MAXENT.PROBIT) == F) {MAXENT.PROBIT.OLD <- models.list$MAXENT.PROBIT}
if (is.null(models.list$MAXNET.PROBIT) == F) {MAXNET.PROBIT.OLD <- models.list$MAXNET.PROBIT}
if (is.null(models.list$MAXLIKE.PROBIT) == F) {MAXLIKE.PROBIT.OLD <- models.list$MAXLIKE.PROBIT}
if (is.null(models.list$GBM.PROBIT) == F) {GBM.PROBIT.OLD <- models.list$GBM.PROBIT}
if (is.null(models.list$GBMSTEP.PROBIT) == F) {GBMSTEP.PROBIT.OLD <- models.list$GBMSTEP.PROBIT}
if (is.null(models.list$RF.PROBIT) == F) {RF.PROBIT.OLD <- models.list$RF.PROBIT}
if (is.null(models.list$CF.PROBIT) == F) {CF.PROBIT.OLD <- models.list$CF.PROBIT}
if (is.null(models.list$GLM.PROBIT) == F) {GLM.PROBIT.OLD <- models.list$GLM.PROBIT}
if (is.null(models.list$GLMSTEP.PROBIT) == F) {GLMSTEP.PROBIT.OLD <- models.list$GLMSTEP.PROBIT}
if (is.null(models.list$GAM.PROBIT) == F) {GAM.PROBIT.OLD <- models.list$GAM.PROBIT}
if (is.null(models.list$GAMSTEP.PROBIT) == F) {GAMSTEP.PROBIT.OLD <- models.list$GAMSTEP.PROBIT}
if (is.null(models.list$MGCV.PROBIT) == F) {MGCV.PROBIT.OLD <- models.list$MGCV.PROBIT}
if (is.null(models.list$MGCVFIX.PROBIT) == F) {MGCVFIX.PROBIT.OLD <- models.list$MGCVFIX.PROBIT}
if (is.null(models.list$EARTH.PROBIT) == F) {EARTH.PROBIT.OLD <- models.list$EARTH.PROBIT}
if (is.null(models.list$RPART.PROBIT) == F) {RPART.PROBIT.OLD <- models.list$RPART.PROBIT}
if (is.null(models.list$NNET.PROBIT) == F) {NNET.PROBIT.OLD <- models.list$NNET.PROBIT}
if (is.null(models.list$FDA.PROBIT) == F) {FDA.PROBIT.OLD <- models.list$FDA.PROBIT}
if (is.null(models.list$SVM.PROBIT) == F) {SVM.PROBIT.OLD <- models.list$SVM.PROBIT}
if (is.null(models.list$SVME.PROBIT) == F) {SVME.PROBIT.OLD <- models.list$SVME.PROBIT}
if (is.null(models.list$GLMNET.PROBIT) == F) {GLMNET.PROBIT.OLD <- models.list$GLMNET.PROBIT}
if (is.null(models.list$BIOCLIM.O.PROBIT) == F) {BIOCLIM.O.PROBIT.OLD <- models.list$BIOCLIM.O.PROBIT}
if (is.null(models.list$BIOCLIM.PROBIT) == F) {BIOCLIM.PROBIT.OLD <- models.list$BIOCLIM.PROBIT}
if (is.null(models.list$DOMAIN.PROBIT) == F) {DOMAIN.PROBIT.OLD <- models.list$DOMAIN.PROBIT}
if (is.null(models.list$MAHAL.PROBIT) == F) {MAHAL.PROBIT.OLD <- models.list$MAHAL.PROBIT}
if (is.null(models.list$MAHAL01.PROBIT) == F) {MAHAL01.PROBIT.OLD <- models.list$MAHAL01.PROBIT}
}
if (MAXENT > 0) {
jar <- paste(system.file(package="dismo"), "/java/maxent.jar", sep='')
if (!file.exists(jar)) {stop('maxent program is missing: ', jar, '\nPlease download it here: http://www.cs.princeton.edu/~schapire/maxent/')}
}
if (MAXNET > 0) {
if (! requireNamespace("maxnet")) {stop("Please install the maxnet package")}
predict.maxnet2 <- function(object, newdata, clamp=F, type=c("cloglog")) {
p <- predict(object=object, newdata=newdata, clamp=clamp, type=type)
return(as.numeric(p))
}
}
if (MAXLIKE > 0) {
if (! requireNamespace("maxlike")) {stop("Please install the maxlike package")}
# MAXLIKE.formula <- ensemble.formulae(xn, factors=factors)$MAXLIKE.formula
# environment(MAXLIKE.formula) <- .BiodiversityR
}
if (GBM > 0) {
if (! requireNamespace("gbm")) {stop("Please install the gbm package")}
requireNamespace("splines")
}
if (RF > 0) {
# get the probabilities from RF
predict.RF <- function(object, newdata) {
p <- predict(object=object, newdata=newdata, type="response")
return(as.numeric(p))
}
}
if (CF > 0) {
# get the probabilities from RF
# ensure that cases with missing values are removed
if (! requireNamespace("party")) {stop("Please install the party package")}
predict.CF <- function(object, newdata) {
# avoid problems with single variables, especially with terra::predict
for (i in 1:ncol(newdata)) {
if (is.integer(newdata[, i])) {newdata[, i] <- as.numeric(newdata[, i])}
}
p1 <- predict(object=object, newdata=newdata, type="prob")
p <- numeric(length(p1))
for (i in 1:length(p1)) {p[i] <- p1[[i]][2]}
return(as.numeric(p))
}
}
if (MGCV > 0 || MGCVFIX > 0) {
# get the probabilities from MGCV
predict.MGCV <- function(object, newdata, type="response") {
p <- mgcv::predict.gam(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
}
if (EARTH > 0) {
# get the probabilities from earth
predict.EARTH <- function(object, newdata, type="response") {
p <- predict(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
}
if (NNET > 0) {
# get the probabilities from nnet
predict.NNET <- function(object, newdata, type="raw") {
p <- predict(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
}
if (SVME > 0) {
# get the probabilities from svm
predict.SVME <- function(model, newdata) {
p <- predict(model, newdata, probability=T)
return(attr(p, "probabilities")[,1])
}
}
if (GLMNET > 0) {
if (! requireNamespace("glmnet")) {stop("Please install the glmnet package")}
# get the mean probabilities from glmnet
predict.GLMNET <- function(model, newdata, GLMNET.class=FALSE) {
newdata <- as.matrix(newdata)
if (GLMNET.class == TRUE) {
p <- predict(model, newx=newdata, type="class", exact=T)
n.obs <- nrow(p)
nv <- ncol(p)
result <- numeric(n.obs)
for (i in 1:n.obs) {
for (j in 1:nv) {
if(p[i, j] == 1) {result[i] <- result[i] + 1}
}
}
result <- result/nv
return(result)
}else{
p <- predict(model, newx=newdata, type="response", exact=T)
n.obs <- nrow(p)
nv <- ncol(p)
result <- numeric(n.obs)
for (i in 1:n.obs) {
for (j in 1:nv) {
result[i] <- result[i] + p[i, j]
}
}
result <- result/nv
return(result)
}
}
}
if (BIOCLIM.O > 0) {
# get the probabilities for original BIOCLIM
predict.BIOCLIM.O <- function(object, newdata) {
lower.limits <- object$lower.limits
upper.limits <- object$upper.limits
minima <- object$minima
maxima <- object$maxima
#
newdata <- newdata[, which(names(newdata) %in% names(lower.limits)), drop=F]
result <- as.numeric(rep(NA, nrow(newdata)))
varnames <- names(newdata)
nvars <- ncol(newdata)
#
for (i in 1:nrow(newdata)) {
datai <- newdata[i,,drop=F]
resulti <- 1
j <- 0
while (resulti > 0 && j <= (nvars-1)) {
j <- j+1
focal.var <- varnames[j]
if (resulti == 1) {
lowerj <- lower.limits[which(names(lower.limits) == focal.var)]
if (datai[, j] < lowerj) {resulti <- 0.5}
upperj <- upper.limits[which(names(upper.limits) == focal.var)]
if (datai[, j] > upperj) {resulti <- 0.5}
}
minj <- minima[which(names(minima) == focal.var)]
if (datai[, j] < minj) {resulti <- 0}
maxj <- maxima[which(names(maxima) == focal.var)]
if (datai[, j] > maxj) {resulti <- 0}
}
result[i] <- resulti
}
p <- as.numeric(result)
return(p)
}
}
if (MAHAL > 0) {
# get the probabilities from mahal
predict.MAHAL <- function(model, newdata, PROBIT) {
p <- dismo::predict(object=model, x=newdata)
if (PROBIT == F) {
p[p<0] <- 0
p[p>1] <- 1
}
return(as.numeric(p))
}
}
if (MAHAL01 > 0) {
# get the probabilities from transformed mahal
predict.MAHAL01 <- function(model, newdata, MAHAL.shape) {
p <- dismo::predict(object=model, x=newdata)
p <- p - 1 - MAHAL.shape
p <- abs(p)
p <- MAHAL.shape / p
return(p)
}
}
#
output.weights <- input.weights
prediction.failures <- FALSE
#
# avoid problems with non-existing directories
dir.create("models", showWarnings = F)
dir.create("ensembles", showWarnings = F)
dir.create("ensembles/suitability", showWarnings = F)
dir.create("ensembles/count", showWarnings = F)
dir.create("ensembles/presence", showWarnings = F)
#
stack.title <- RASTER.stack.name
if (gsub(".", "_", stack.title, fixed=T) != stack.title) {cat(paste("\n", "WARNING: title of stack (", stack.title, ") contains '.'", "\n\n", sep = ""))}
#
raster.title <- paste(RASTER.species.name, "_", stack.title , sep="")
rasterfull <- paste("ensembles//suitability//", raster.title , ".tif", sep="")
rastercount <- paste("ensembles//count//", raster.title , ".tif", sep="")
rasterpresence <- paste("ensembles//presence//", raster.title, ".tif", sep="")
#
RASTER.species.orig <- RASTER.species.name
if (RASTER.models.overwrite==T) {
RASTER.species.name <- "working"
}else{
RASTER.species.name <- paste(RASTER.species.name, "_", stack.title, sep="")
}
#
cat(paste("\n", "Start of predictions for organism: ", RASTER.species.orig, "\n", sep = ""))
cat(paste("Predictions for SpatRaster: ", stack.title, "\n", sep = ""))
ensemble.statistics <- NULL
cat(paste("ensemble raster layers will be saved in folder ", getwd(), "//ensembles", "\n\n", sep = ""))
statistics.names <- c("n.models", "ensemble.threshold", "ensemble.min", "ensemble.max", "count.min", "count.max")
ensemble.statistics <- numeric(6)
names(ensemble.statistics) <- statistics.names
#
# sometimes still error warnings for minimum and maximum values of the layers
# set minimum and maximum values for xn
for (i in 1:terra::nlyr(xn)) {
xn[[i]] <- terra::setMinMax(xn[[i]])
}
# count models
mc <- 0
#
# start raster layer creations
if (output.weights["MAXENT"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Maximum entropy algorithm (package: dismo)\n", sep=""))
# Put the file 'maxent.jar' in the 'java' folder of dismo
# the file 'maxent.jar' can be obtained from from http://www.cs.princeton.edu/~schapire/maxent/.
jar <- paste(system.file(package="dismo"), "/java/maxent.jar", sep='')
results <- MAXENT.OLD
pmaxent <- NULL
fullname <- paste("models/", RASTER.species.name, "_MAXENT.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_MAXENT_step1.tif", sep="")
if (CATCH.OFF == F) {
tryCatch(pmaxent <- terra::predict(object=results, x=xn, na.rm=TRUE,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("MAXENT prediction failed"))},
silent=F)
}else{
pmaxent <- terra::predict(object=results, x=xn, na.rm=TRUE,
filename=fullname, overwrite=TRUE)
}
if (is.null(pmaxent) == F) {
results2 <- MAXENT.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pmaxent, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "MAXENT"
pmaxent <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pmaxent <- trunc(1000*pmaxent)
terra::writeRaster(x=pmaxent, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pmaxent, y=p)[,2]/1000
abs1 <- terra::extract(pmaxent, y=a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["MAXENT"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["MAXENT"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pmaxent, pt)[,2]/1000
abs1 <- terra::extract(pmaxent, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: MAXENT prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["MAXENT"] <- -1
}
}
if (output.weights["MAXNET"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Maximum entropy algorithm (package: maxnet)\n", sep=""))
results <- MAXNET.OLD
pmaxnet <- NULL
fullname <- paste("models/", RASTER.species.name, "_MAXNET.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_MAXNET_step1.tif", sep="")
if (CATCH.OFF == F) {
tryCatch(pmaxnet <- terra::predict(object=xn, model=results, fun=predict.maxnet2, na.rm=TRUE, clamp=MAXNET.clamp, type=MAXNET.type,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("MAXNET prediction failed"))},
silent=F)
}else{
pmaxnet <- terra::predict(object=xn, model=results, fun=predict.maxnet2, na.rm=TRUE, clamp=MAXNET.clamp, type=MAXNET.type,
filename=fullname, overwrite=TRUE)
}
if (is.null(pmaxnet) == F) {
results2 <- MAXNET.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1.tif", sep="")
terra::writeRaster(x=pmaxnet, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "MAXNET"
pmaxnet <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pmaxnet <- trunc(1000*pmaxnet)
terra::writeRaster(x=pmaxnet, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pmaxnet, y=p)[, 2]/1000
abs1 <- terra::extract(pmaxnet, y=a)[, 2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["MAXNET"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["MAXNET"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- (terra::extract(pmaxnet, pt)[,2])/1000
abs1 <- (terra::extract(pmaxnet, at)[,2])/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: MAXNET prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["MAXNET"] <- -1
}
}
if (output.weights["MAXLIKE"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Maxlike algorithm (package: maxlike)\n", sep=""))
results <- MAXLIKE.OLD
pmaxlike <- NULL
fullname <- paste("models/", RASTER.species.name, "_MAXLIKE.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_MAXLIKE_step1.tif", sep="")
xn.num <- terra::subset(xn, subset=models.list$num.vars)
if (CATCH.OFF == F) {
tryCatch(pmaxlike <- terra::predict(object=xn.num, model=results, na.rm=TRUE,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("MAXLIKE prediction failed"))},
silent=F)
}else{
pmaxlike <- terra::predict(object=xn.num, model=results, na.rm=TRUE,
filename=fullname, overwrite=TRUE)
}
if (is.null(pmaxlike) == F) {
results2 <- MAXLIKE.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste("models//", "MAXLIKE_step1", sep="")
# fullname2 <- paste(fullname, "_step1.tif", sep="")
terra::writeRaster(x=pmaxlike, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "MAXLIKE"
pmaxlike <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pmaxlike <- trunc(1000*pmaxlike)
terra::writeRaster(x=pmaxlike, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pmaxlike, p)[,2]/1000
abs1 <- terra::extract(pmaxlike, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["MAXLIKE"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["MAXLIKE"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pmaxlike, pt)[,2]/1000
abs1 <- terra::extract(pmaxlike, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: MAXLIKE prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["MAXLIKE"] <- -1
}
}
if (output.weights["GBM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized boosted regression modeling (package: gbm) \n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- GBM.OLD
pgbm <- NULL
fullname <- paste("models/", RASTER.species.name, "_GBM.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_GBM_step1.tif", sep="")
if (CATCH.OFF == F) {
tryCatch(pgbm <- terra::predict(object=xn, model=results, na.rm=TRUE, factors=factlevels,
n.trees=results$n.trees, type="response", filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("GBM prediction failed"))},
silent=F)
}else{
pgbm <- terra::predict(object=xn, model=results, na.rm=TRUE, factors=factlevels,
n.trees=results$n.trees, type="response", filename=fullname, overwrite=TRUE)
}
if (is.null(pgbm) == F) {
results2 <- GBM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1.tif", sep="")
terra::writeRaster(x=pgbm, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "GBM"
pgbm <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pgbm <- trunc(1000*pgbm)
terra::writeRaster(x=pgbm, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pgbm, p)[,2]/1000
abs1 <- terra::extract(pgbm, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["GBM"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["GBM"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pgbm, pt)[,2]/1000
abs1 <- terra::extract(pgbm, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GBM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["GBM"] <- -1
}
}
if (output.weights["GBMSTEP"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". gbm step algorithm (package: dismo)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- GBMSTEP.OLD
pgbms <- NULL
fullname <- paste("models/", RASTER.species.name, "_GBMSTEP.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_GBMSTEP_step1.tif", sep="")
if (CATCH.OFF == F) {
tryCatch(pgbms <- terra::predict(object=xn, model=results, fun=gbm::predict.gbm, na.rm=TRUE, factors=factlevels,
n.trees=results$n.trees, type="response", filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("stepwise GBM prediction failed"))},
silent=F)
}else{
pgbms <- terra::predict(object=xn, model=results, fun=gbm::predict.gbm, na.rm=TRUE, factors=factlevels,
n.trees=results$n.trees, type="response", filename=fullname, overwrite=TRUE)
}
if (is.null(pgbms) == F) {
results2 <- GBMSTEP.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pgbms, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "GBMSTEP"
pgbms <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pgbms <- trunc(1000*pgbms)
# corrected writing in new format (August 2020)
terra::writeRaster(x=pgbms, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pgbms, p)[,2]/1000
abs1 <- terra::extract(pgbms, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["GBMSTEP"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["GBMSTEP"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pgbms, pt)[,2]/1000
abs1 <- terra::extract(pgbms, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: stepwise GBM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["GBMSTEP"] <- -1
}
}
if (output.weights["RF"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Random forest algorithm (package: randomForest)\n", sep=""))
results <- RF.OLD
prf <- NULL
fullname <- paste("models/", RASTER.species.name, "_RF.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_RF_step1.tif", sep="")
if (CATCH.OFF == F) {
tryCatch(prf <- terra::predict(object=xn, model=results, fun=predict.RF, na.rm=TRUE, factors=factlevels,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("random forest prediction failed"))},
silent=F)
}else{
prf <- terra::predict(object=xn, model=results, fun=predict.RF, na.rm=TRUE, factors=factlevels,
filename=fullname, overwrite=TRUE)
}
if (is.null(prf) == F) {
results2 <- RF.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=prf, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "RF"
prf <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
prf <- trunc(1000*prf)
terra::writeRaster(x=prf, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(prf, p)[,2]/1000
abs1 <- terra::extract(prf, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["RF"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["RF"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(prf, pt)[,2]/1000
abs1 <- terra::extract(prf, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: random forest prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["RF"] <- -1
}
}
if (output.weights["CF"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Random forest algorithm (package: party)\n", sep=""))
results <- CF.OLD
pcf <- NULL
fullname <- paste("models/", RASTER.species.name, "_CF.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_CF_step1.tif", sep="")
if (CATCH.OFF == F) {
tryCatch(pcf <- terra::predict(object=xn, model=results, fun=predict.CF, na.rm=TRUE, factors=factlevels,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("random forest prediction failed"))},
silent=F)
}else{
pcf <- terra::predict(object=xn, model=results, fun=predict.CF, na.rm=TRUE, factors=factlevels,
filename=fullname, overwrite=TRUE)
}
if (is.null(pcf) == F) {
results2 <- CF.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pcf, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "CF"
pcf <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pcf <- trunc(1000*pcf)
terra::writeRaster(x=pcf, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pcf, p)[,2]/1000
abs1 <- terra::extract(pcf, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["CF"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["CF"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pcf, pt)[,2]/1000
abs1 <- terra::extract(pcf, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: random forest prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["CF"] <- -1
}
}
if (output.weights["GLM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized Linear Model \n", sep=""))
results <- GLM.OLD
pglm <- NULL
fullname <- paste("models/", RASTER.species.name, "_GLM.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_GLM_step1.tif", sep="")
if (CATCH.OFF == T) {
tryCatch(pglm <- terra::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("GLM prediction failed"))},
silent=F)
}else{
pglm <- terra::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, overwrite=TRUE)
}
if (is.null(pglm) == F) {
results2 <- GLM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pglm, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "GLM"
pglm <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pglm <- trunc(1000*pglm)
terra::writeRaster(x=pglm, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pglm, p)[,2]/1000
abs1 <- terra::extract(pglm, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["GLM"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["GLM"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pglm, pt)[,2]/1000
abs1 <- terra::extract(pglm, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GLM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["GLM"] <- -1
}
}
if (output.weights["GLMSTEP"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Stepwise Generalized Linear Model \n", sep=""))
results <- GLMSTEP.OLD
pglms <- NULL
fullname <- paste("models/", RASTER.species.name, "_GLMSTEP.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_GLMSTEP_step1.tif", sep="")
if (CATCH.OFF == F) {
tryCatch(pglms <- terra::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("stepwise GLM prediction failed"))},
silent=F)
}else{
pglms <- terra::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, overwrite=TRUE)
}
if (is.null(pglms) == F) {
results2 <- GLMSTEP.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pglms, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "GLMSTEP"
pglms <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pglms <- trunc(1000*pglms)
terra::writeRaster(x=pglms, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pglms, p)[,2]/1000
abs1 <- terra::extract(pglms, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["GLMSTEP"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["GLMSTEP"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pglms, pt)[,2]/1000
abs1 <- terra::extract(pglms, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: stepwise GLM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["GLMSTEP"] <- -1
}
}
if (output.weights["GAM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized Additive Model (package: gam)\n", sep=""))
results <- GAM.OLD
pgam <- NULL
fullname <- paste("models/", RASTER.species.name, "_GAM.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_GAM_step1.tif", sep="")
if (CATCH.OFF == F) {
tryCatch(pgam <- terra::predict(object=xn, model=results, fun=gam::predict.Gam, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("GAM (package: gam) prediction failed"))},
silent=F)
}else{
pgam <- terra::predict(object=xn, model=results, fun=gam::predict.Gam, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, overwrite=TRUE)
}
if (is.null(pgam) == F) {
results2 <- GAM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pgam, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "GAM"
pgam <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pgam <- trunc(1000*pgam)
terra::writeRaster(x=pgam, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pgam, p)[,2]/1000
abs1 <- terra::extract(pgam, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["GAM"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["GAM"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pgam, pt)[,2]/1000
abs1 <- terra::extract(pgam, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GAM prediction (gam package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["GAM"] <- -1
}
}
if (output.weights["GAMSTEP"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Stepwise Generalized Additive Model (package: gam)\n", sep=""))
results <- GAMSTEP.OLD
pgams <- NULL
fullname <- paste("models/", RASTER.species.name, "_GAMSTEP.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_GAMSTEP_step1.tif", sep="")
if (CATCH.OFF == F) {
tryCatch(pgams <- terra::predict(object=xn, model=results, fun=gam::predict.Gam, type="response", na.rm=TRUE, factors=factlevels,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("stepwise GAM (package: gam) prediction failed"))},
silent=F)
}else{
pgams <- terra::predict(object=xn, model=results, fun=gam::predict.Gam, type="response", na.rm=TRUE, factors=factlevels,
filename=fullname, overwrite=TRUE)
}
if (is.null(pgams) == F) {
results2 <- GAMSTEP.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pgams, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "GAMSTEP"
pgams <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pgams <- trunc(1000*pgams)
terra::writeRaster(x=pgams, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pgams, p)[,2]/1000
abs1 <- terra::extract(pgams, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["GAMSTEP"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["GAMSTEP"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pgams, pt)[,2]/1000
abs1 <- terra::extract(pgams, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: stepwise GAM prediction (gam package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["GAMSTEP"] <- -1
}
}
if (output.weights["MGCV"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized Additive Model (package: mgcv)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- MGCV.OLD
pmgcv <- NULL
fullname <- paste("models/", RASTER.species.name, "_MGCV.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_MGCV_step1.tif", sep="")
if (CATCH.OFF == F) {
tryCatch(pmgcv <- terra::predict(object=xn, model=results, fun=predict.MGCV, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("GAM (package: mgcv) prediction failed"))},
silent=F)
}else{
pmgcv <- terra::predict(object=xn, model=results, fun=predict.MGCV, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, overwrite=TRUE)
}
if (is.null(pmgcv) == F) {
results2 <- MGCV.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pmgcv, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "MGCV"
pmgcv <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pmgcv <- trunc(1000*pmgcv)
terra::writeRaster(x=pmgcv, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pmgcv, p)[,2]/1000
abs1 <- terra::extract(pmgcv, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["MGCV"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["MGCV"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pmgcv, pt)[,2]/1000
abs1 <- terra::extract(pmgcv, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GAM prediction (mgcv package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["MGCV"] <- -1
}
}
if (output.weights["MGCVFIX"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". GAM with fixed d.f. regression splines (package: mgcv)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- MGCVFIX.OLD
pmgcvf <- NULL
fullname <- paste("models/", RASTER.species.name, "_MGCVFIX.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_MGCVFIX_step1.tif", sep="")
if (CATCH.OFF == F) {
tryCatch(pmgcvf <- terra::predict(object=xn, model=results, fun=predict.MGCV, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("GAM with fixed d.f. regression splines (package: mgcv) prediction failed"))},
silent=F)
}else{
pmgcvf <- terra::predict(object=xn, model=results, fun=predict.MGCV, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, overwrite=TRUE)
}
if (is.null(pmgcvf) == F) {
results2 <- MGCVFIX.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pmgcvf, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "MGCVFIX"
pmgcvf <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pmgcvf <- trunc(1000*pmgcvf)
terra::writeRaster(x=pmgcvf, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pmgcvf, p)[,2]/1000
abs1 <- terra::extract(pmgcvf, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["MGCVFIX"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["MGCVFIX"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pmgcvf, pt)[,2]/1000
abs1 <- terra::extract(pmgcvf, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GAM prediction (mgcv package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["MGCVFIX"] <- -1
}
}
if (output.weights["EARTH"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Multivariate Adaptive Regression Splines (package: earth)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "NOTE: MARS (earth package) with factors may require explicit dummy variables", "\n", sep=""))
}
results <- EARTH.OLD
pearth <- NULL
fullname <- paste("models/", RASTER.species.name, "_EARTH.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_EARTH_step1.tif", sep="")
if (CATCH.OFF == F) {
tryCatch(pearth <- terra::predict(object=xn, model=results, fun=predict.EARTH, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("MARS (package: earth) prediction failed"))},
silent=F)
}else{
pearth <- terra::predict(object=xn, model=results, fun=predict.EARTH, na.rm=TRUE, type="response", factors=factlevels,
filename=fullname, overwrite=TRUE)
}
if (is.null(pearth) == F) {
results2 <- EARTH.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pearth, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "EARTH"
pearth <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pearth <- trunc(1000*pearth)
terra::writeRaster(x=pearth, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pearth, p)[,2]/1000
abs1 <- terra::extract(pearth, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["EARTH"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["EARTH"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pearth, pt)[,2]/1000
abs1 <- terra::extract(pearth, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: MARS prediction (earth package) failed", "\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["EARTH"] <- -1
}
}
if (output.weights["RPART"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Recursive Partitioning And Regression Trees (package: rpart)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- RPART.OLD
prpart <- NULL
fullname <- paste("models/", RASTER.species.name, "_RPART.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_RPART_step1.tif", sep="")
if (CATCH.OFF == F) {
tryCatch(prpart <- terra::predict(object=xn, model=results, na.rm=TRUE, type="prob", index=2, factors=factlevels,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("RPART prediction failed"))},
silent=F)
}else{
prpart <- terra::predict(object=xn, model=results, na.rm=TRUE, type="prob", index=2, factors=factlevels,
filename=fullname, overwrite=TRUE)
}
if (is.null(prpart) == F) {
results2 <- RPART.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=prpart, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "RPART"
prpart <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
prpart <- trunc(1000*prpart)
terra::writeRaster(x=prpart, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(prpart, p)[,2]/1000
abs1 <- terra::extract(prpart, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["RPART"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["RPART"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(prpart, pt)[,2]/1000
abs1 <- terra::extract(prpart, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: RPART prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["RPART"] <- -1
}
}
if (output.weights["NNET"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Artificial Neural Network (package: nnet)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- NNET.OLD
pnnet <- NULL
fullname <- paste("models/", RASTER.species.name, "_NNET.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_NNET_step1.tif", sep="")
if (CATCH.OFF == F){
tryCatch(pnnet <- terra::predict(object=xn, model=results, fun=predict.NNET, na.rm=TRUE, factors=factlevels,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("Artificial Neural Network (package: nnet) prediction failed"))},
silent=F)
}else{
pnnet <- terra::predict(object=xn, model=results, fun=predict.NNET, na.rm=TRUE, factors=factlevels,
filename=fullname, overwrite=TRUE)
}
if (is.null(pnnet) == F) {
results2 <- NNET.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pnnet, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "NNET"
pnnet <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pnnet <- trunc(1000*pnnet)
terra::writeRaster(x=pnnet, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pnnet, p)[,2]/1000
abs1 <- terra::extract(pnnet, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["NNET"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["NNET"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pnnet, pt)[,2]/1000
abs1 <- terra::extract(pnnet, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: ANN prediction (nnet package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["NNET"] <- -1
}
}
if (output.weights["FDA"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Flexible Discriminant Analysis (package: mda)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- FDA.OLD
pfda <- NULL
fullname <- paste("models/", RASTER.species.name, "_FDA.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_FDA_step1.tif", sep="")
if (CATCH.OFF == F) {
tryCatch(pfda <- terra::predict(object=xn, model=results, na.rm=TRUE, type="posterior", index=2, factors=factlevels,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("FDA prediction failed"))},
silent=F)
}else{
pfda <- terra::predict(object=xn, model=results, na.rm=TRUE, type="posterior", index=2, factors=factlevels,
filename=fullname, overwrite=TRUE)
}
if (is.null(pfda) == F) {
results2 <- FDA.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pfda, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "FDA"
pfda <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pfda <- trunc(1000*pfda)
terra::writeRaster(x=pfda, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pfda, p)[,2]/1000
abs1 <- terra::extract(pfda, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["FDA"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["FDA"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pfda, pt)[,2]/1000
abs1 <- terra::extract(pfda, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: FDA prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["FDA"] <- -1
}
}
if (output.weights["SVM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Support Vector Machines (package: kernlab)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "NOTE: SVM model with factors may require explicit dummy variables", "\n", sep=""))
}
results <- SVM.OLD
psvm <- NULL
fullname <- paste("models/", RASTER.species.name, "_SVM.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_SVM_step1.tif", sep="")
predict.svm2 <- as.function(kernlab::predict)
if (CATCH.OFF == F) {
tryCatch(psvm <- terra::predict(object=xn, model=results, fun=predict.svm2, na.rm=TRUE, type="probabilities", index=2, factors=factlevels,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("Support Vector Machines (package: kernlab) prediction failed"))},
silent=F)
}else{
psvm <- terra::predict(object=xn, model=results, fun=predict.svm2, na.rm=TRUE, type="probabilities", index=2, factors=factlevels,
filename=fullname, overwrite=TRUE)
}
if (is.null(psvm) == F) {
results2 <- SVM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=psvm, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "SVM"
psvm <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
psvm <- trunc(1000*psvm)
terra::writeRaster(x=psvm, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(psvm, p)[,2]/1000
abs1 <- terra::extract(psvm, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["SVM"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["SVM"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(psvm, pt)[,2]/1000
abs1 <- terra::extract(psvm, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: SVM prediction (kernlab package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["SVM"] <- -1
}
}
if (output.weights["SVME"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Support Vector Machines (package: e1071)\n", sep=""))
results <- SVME.OLD
psvme <- NULL
fullname <- paste("models/", RASTER.species.name, "_SVME.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_SVME_step1.tif", sep="")
if (CATCH.OFF == F) {
tryCatch(psvme <- terra::predict(object=xn, model=results, fun=predict.SVME, na.rm=TRUE, factors=factlevels,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("SVM prediction (e1071 package) failed"))},
warning= function(war) {print(paste("SVM prediction (e1071 package) failed"))},
silent=F)
}else{
psvme <- terra::predict(object=xn, model=results, fun=predict.SVME, na.rm=TRUE, factors=factlevels,
filename=fullname, overwrite=TRUE)
}
if (is.null(psvme) == F) {
results2 <- SVME.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=psvme, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "SVME"
psvme <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
psvme <- trunc(1000*psvme)
terra::writeRaster(x=psvme, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(psvme, p)[,2]/1000
abs1 <- terra::extract(psvme, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["SVME"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["SVME"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(psvme, pt)[,2]/1000
abs1 <- terra::extract(psvme, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: SVM prediction (e1071 package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["SVME"] <- -1
}
}
if (output.weights["GLMNET"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". GLM with lasso or elasticnet regularization (package: glmnet)\n", sep=""))
if (is.null(factors) == F) {
cat(paste("\n", "NOTE: factors not considered (maybe consider dummy variables)", "\n", sep=""))
}
results <- GLMNET.OLD
pglmnet <- NULL
fullname <- paste("models/", RASTER.species.name, "_GLMNET.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_GLMNET_step1.tif", sep="")
xn.num <- terra::subset(xn, subset=models.list$num.vars)
if (CATCH.OFF == F) {
tryCatch(pglmnet <- terra::predict(object=xn.num, model=results, fun=predict.GLMNET, na.rm=TRUE, GLMNET.class=GLMNET.class,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("GLMNET prediction (glmnet package) failed"))},
warning= function(war) {print(paste("GLMNET prediction (glmnet package) failed"))},
silent=F)
}else{
pglmnet <- terra::predict(object=xn.num, model=results, fun=predict.GLMNET, na.rm=TRUE, GLMNET.class=GLMNET.class,
filename=fullname, overwrite=TRUE)
}
if (is.null(pglmnet) == F) {
results2 <- GLMNET.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pglmnet, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "GLMNET"
pglmnet <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pglmnet <- trunc(1000*pglmnet)
terra::writeRaster(x=pglmnet, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pglmnet, p)[,2]/1000
abs1 <- terra::extract(pglmnet, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["GLMNET"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["GLMNET"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pglmnet, pt)[,2]/1000
abs1 <- terra::extract(pglmnet, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GLMNET prediction (glmnet package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["GLMNET"] <- -1
}
}
if (output.weights["BIOCLIM.O"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". original BIOCLIM algorithm (package: BiodiversityR)\n", sep=""))
results <- BIOCLIM.O.OLD
pbioO <- NULL
fullname <- paste("models/", RASTER.species.name, "_BIOCLIMO.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_BIOCLIMO_step1.tif", sep="")
if (CATCH.OFF == F){
tryCatch(pbioO <- terra::predict(object=xn, model=results, fun=predict.BIOCLIM.O, na.rm=TRUE,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("original BIOCLIM prediction failed"))},
silent=F)
}else{
pbioO <- terra::predict(object=xn, model=results, fun=predict.BIOCLIM.O, na.rm=TRUE,
filename=fullname, overwrite=TRUE)
}
if (is.null(pbioO) == F) {
results2 <- BIOCLIM.O.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pbioO, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "BIOCLIM.O"
pbioO <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pbioO <- trunc(1000*pbioO)
terra::writeRaster(x=pbioO, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pbioO, p)[,2]/1000
abs1 <- terra::extract(pbioO, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["BIOCLIM.O"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["BIOCLIM.O"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pbioO, pt)[,2]/1000
abs1 <- terra::extract(pbioO, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: original BIOCLIM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["BIOCLIM.O"] <- -1
}
}
if (output.weights["BIOCLIM"] > 0 || output.weights["DOMAIN"] > 0 || output.weights["MAHAL"] > 0 || output.weights["MAHAL01"] > 0) {
if(is.null(factors) == F) {
xn <- terra::subset(xn, which((names(xn) %in% factors) == FALSE))
# xn <- terra::rast(xn)
}
}
if (output.weights["BIOCLIM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". BIOCLIM algorithm (package: dismo)\n", sep=""))
results <- BIOCLIM.OLD
pbio <- NULL
fullname <- paste("models/", RASTER.species.name, "_BIOCLIM.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_BIOCLIM_step1.tif", sep="")
if (CATCH.OFF == F) {
# tryCatch(pbio <- dismo::predict(object=results, x=xn, na.rm=TRUE,
tryCatch(pbio <- terra::predict(object=xn.num, model=results, na.rm=TRUE,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("BIOCLIM prediction failed"))},
silent=F)
}else{
# pbio <- dismo::predict(object=results, x=xn, na.rm=TRUE,
# filename=fullname, overwrite=TRUE)
pbio <- terra::predict(object=xn.num, model=results, na.rm=TRUE,
filename=fullname, overwrite=TRUE)
}
if (is.null(pbio) == F) {
results2 <- BIOCLIM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pbio, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "BIOCLIM"
pbio <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pbio <- trunc(1000*pbio)
terra::writeRaster(x=pbio, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pbio, p)[,2]/1000
abs1 <- terra::extract(pbio, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["BIOCLIM"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["BIOCLIM"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pbio, pt)[,2]/1000
abs1 <- terra::extract(pbio, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: BIOCLIM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["BIOCLIM"] <- -1
}
}
if (output.weights["DOMAIN"] > 0) {
if(is.null(factors) == F) {
xn <- terra::subset(xn, which((names(xn) %in% dummy.vars.noDOMAIN) == FALSE))
# xn <- terra::rast(xn)
}
}
if (output.weights["DOMAIN"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". DOMAIN algorithm (package: dismo)\n", sep=""))
if(is.null(models.list$dummy.vars.noDOMAIN) == F) {
xn <- terra::subset(xn, which((names(xn) %in% models.list$dummy.vars.noDOMAIN) == FALSE))
# xn <- terra::rast(xn)
}
results <- DOMAIN.OLD
pdom <- NULL
fullname <- paste("models/", RASTER.species.name, "_DOMAIN.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_DOMAIN_step1.tif", sep="")
if (CATCH.OFF == F) {
# tryCatch(pdom <- dismo::predict(object=results, x=xn, na.rm=TRUE,
tryCatch(pdom <- terra::predict(object=xn, model=results, na.rm=TRUE,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("DOMAIN prediction failed"))},
silent=F)
}else{
# pdom <- dismo::predict(object=results, x=xn, na.rm=TRUE,
# filename=fullname, overwrite=TRUE)
pdom <- terra::predict(object=xn, model=results, na.rm=TRUE,
filename=fullname, overwrite=TRUE)
}
if (is.null(pdom) == F) {
results2 <- DOMAIN.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pdom, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "DOMAIN"
pdom <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pdom <- trunc(1000*pdom)
terra::writeRaster(x=pdom, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pdom, p)[,2]/1000
abs1 <- terra::extract(pdom, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["DOMAIN"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["DOMAIN"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pdom, pt)[,2]/1000
abs1 <- terra::extract(pdom, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: DOMAIN prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["DOMAIN"] <- -1
}
}
if (output.weights["MAHAL"] > 0 || output.weights["MAHAL01"] > 0) {
if(is.null(dummy.vars) == F) {
xn <- terra::subset(xn, which((names(xn) %in% dummy.vars) == FALSE))
# xn <- terra::rast(xn)
}
}
if (output.weights["MAHAL"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Mahalanobis algorithm (package: dismo)\n", sep=""))
results <- MAHAL.OLD
pmahal <- NULL
fullname <- paste("models/", RASTER.species.name, "_MAHAL.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_MAHAL_step1.tif", sep="")
# not possible to use the predict.mahal function as terra::predict automatically reverts to dismo::predict for 'DistModel' objects
results2 <- MAHAL.PROBIT.OLD
# PROBIT FALSE
if (is.null(results2) == F) {
if (CATCH.OFF == F) {
tryCatch(pmahal <- terra::predict(object=xn, model=results, fun=predict.MAHAL, na.rm=TRUE, PROBIT=FALSE,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("Mahalanobis prediction failed"))},
silent=F)
}else{
pmahal <- terra::predict(object=xn, model=results, fun=predict.MAHAL, na.rm=TRUE, PROBIT=FALSE,
filename=fullname, overwrite=TRUE)
}
# PROBIT TRUE
}else{
if (CATCH.OFF == F) {
tryCatch(pmahal <- terra::predict(object=xn, model=results, fun=predict.MAHAL, na.rm=TRUE, PROBIT=TRUE,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("Mahalanobis prediction failed"))},
silent=F)
}else{
pmahal <- terra::predict(object=xn, model=results, fun=predict.MAHAL, na.rm=TRUE, PROBIT=TRUE,
filename=fullname, overwrite=TRUE)
}
}
if (is.null(pmahal) == F) {
# results2 <- MAHAL.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pmahal, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "MAHAL"
pmahal <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pmahal <- trunc(1000*pmahal)
terra::writeRaster(x=pmahal, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pmahal, p)[,2]/1000
abs1 <- terra::extract(pmahal, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["MAHAL"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["MAHAL"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pmahal, pt)[,2]/1000
abs1 <- terra::extract(pmahal, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: Mahalanobis prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["MAHAL"] <- -1
}
}
if (output.weights["MAHAL01"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Mahalanobis algorithm (transformed within 0 to 1 interval)", "\n", sep=""))
if(is.null(dummy.vars) == F) {
xn <- terra::subset(xn, which((names(xn) %in% dummy.vars) == FALSE))
# xn <- terra::rast(xn)
}
results <- MAHAL01.OLD
# pmahal <- NULL
fullname <- paste("models/", RASTER.species.name, "_MAHAL01.tif", sep="")
fullname2 <- paste("models/", RASTER.species.name, "_MAHAL01_step1.tif", sep="")
# not possible to use the predict.mahal function as terra::predict automatically reverts to dismo::predict for 'DistModel' objects
if (CATCH.OFF == F) {
tryCatch(pmahal01 <- terra::predict(object=xn, model=results, fun=predict.MAHAL01, na.rm=TRUE, MAHAL.shape=MAHAL.shape,
filename=fullname, overwrite=TRUE),
error= function(err) {print(paste("transformed Mahalanobis prediction failed"))},
silent=F)
}else{
pmahal01 <- terra::predict(object=xn, model=results, fun=predict.MAHAL01, na.rm=TRUE, MAHAL.shape=MAHAL.shape,
filename=fullname, overwrite=TRUE)
}
if (is.null(pmahal01) == F) {
# pmahal <- pmahal - 1 - MAHAL.shape
# pmahal <- abs(pmahal)
# pmahal <- MAHAL.shape / pmahal
results2 <- MAHAL01.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
# fullname2 <- paste(fullname, "_step1", sep="")
terra::writeRaster(x=pmahal01, filename=fullname2, overwrite=TRUE)
explan.stack <- terra::rast(fullname2)
names(explan.stack) <- "MAHAL01"
pmahal01 <- terra::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, overwrite=TRUE)
}
pmahal01 <- trunc(1000*pmahal01)
terra::writeRaster(x=pmahal01, filename=fullname, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- terra::extract(pmahal01, p)[,2]/1000
abs1 <- terra::extract(pmahal01, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
thresholds.raster["MAHAL01"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres1, Abs=abs1)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds.raster["MAHAL01"]))
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- terra::extract(pmahal01, pt)[,2]/1000
abs1 <- terra::extract(pmahal01, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: transformed Mahalanobis prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
output.weights["MAHAL01"] <- -1
}
}
#
if (prediction.failures == T) {
cat(paste("\n", "WARNING: some predictions failed", sep = ""))
cat(paste("\n", "actual weights that were used were (-1 indicates failed predictions):", "\n", sep = ""))
print(output.weights)
cat(paste("\n", "Because ensemble suitability would therefore underestimated", sep = ""))
cat(paste("\n", "the ensemble will not be calibrated", "\n", sep = ""))
}
#
# create ensembles if no prediction failures
if (prediction.failures == F) {
if (evaluate == T) {thresholds <- thresholds.raster}
cat(paste("\n", "submodel thresholds for absence-presence used: ", "\n", sep = ""))
if (evaluate == T) {
cat(paste("(thresholds recalculated from raster layers)", "\n", sep = ""))
thresholds <- thresholds.raster
}
print(thresholds)
#
mc <- mc+1
cat(paste("\n\n", mc, ". Ensemble algorithm\n", sep=""))
ensemble.statistics["n.models"] <- sum(as.numeric(output.weights > 0))
# ensemble <- xn[[1]] == terra::NAflag(xn[[1]])
# avoid problems with SRS not matching
ensemble <- terra::rast(fullname)
ensemble <- ensemble > Inf
terra::setMinMax(ensemble)
names(ensemble) <- raster.title
terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
enscount <- ensemble
terra::setMinMax(enscount)
names(enscount) <- paste(raster.title, "_count", sep="")
terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
enspresence <- ensemble
terra::setMinMax(enspresence)
names(enspresence) <- paste(raster.title, "_presence", sep="")
terra::writeRaster(x=enspresence, filename=rasterpresence, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
if (output.weights["MAXENT"] > 0) {
ensemble <- ensemble + output.weights["MAXENT"] * pmaxent
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmaxent <- pmaxent >= 1000 * thresholds["MAXENT"]
enscount <- enscount + pmaxent
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["MAXNET"] > 0) {
ensemble <- ensemble + output.weights["MAXNET"] * pmaxnet
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmaxnet <- pmaxnet >= 1000 * thresholds["MAXNET"]
enscount <- enscount + pmaxnet
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["MAXLIKE"] > 0) {
ensemble <- ensemble + output.weights["MAXLIKE"] * pmaxlike
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmaxlike <- pmaxlike >= 1000 * thresholds["MAXLIKE"]
enscount <- enscount + pmaxlike
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["GBM"] > 0) {
ensemble <- ensemble + output.weights["GBM"] * pgbm
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pgbm <- pgbm >= 1000 * thresholds["GBM"]
enscount <- enscount + pgbm
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["GBMSTEP"] > 0) {
ensemble <- ensemble + output.weights["GBMSTEP"] * pgbms
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pgbms <- pgbms >= 1000 * thresholds["GBMSTEP"]
enscount <- enscount + pgbms
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["RF"] > 0) {
ensemble <- ensemble + output.weights["RF"] * prf
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
prf <- prf >= 1000 * thresholds["RF"]
enscount <- enscount + prf
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["CF"] > 0) {
ensemble <- ensemble + output.weights["CF"] * pcf
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pcf <- pcf >= 1000 * thresholds["CF"]
enscount <- enscount + pcf
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["GLM"] > 0) {
ensemble <- ensemble + output.weights["GLM"] * pglm
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pglm <- pglm >= 1000 * thresholds["GLM"]
enscount <- enscount + pglm
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["GLMSTEP"] > 0) {
ensemble <- ensemble + output.weights["GLMSTEP"] * pglms
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pglms <- pglms >= 1000 * thresholds["GLMSTEP"]
enscount <- enscount + pglms
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["GAM"] > 0) {
ensemble <- ensemble + output.weights["GAM"] * pgam
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pgam <- pgam >= 1000 * thresholds["GAM"]
enscount <- enscount + pgam
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["GAMSTEP"] > 0) {
ensemble <- ensemble + output.weights["GAMSTEP"] * pgams
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pgams <- pgams >= 1000 * thresholds["GAMSTEP"]
enscount <- enscount + pgams
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["MGCV"] > 0) {
ensemble <- ensemble + output.weights["MGCV"] * pmgcv
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmgcv <- pmgcv >= 1000 * thresholds["MGCV"]
enscount <- enscount + pmgcv
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["MGCVFIX"] > 0) {
ensemble <- ensemble + output.weights["MGCVFIX"] * pmgcvf
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmgcvf <- pmgcvf >= 1000 * thresholds["MGCVFIX"]
enscount <- enscount + pmgcvf
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["EARTH"] > 0) {
ensemble <- ensemble + output.weights["EARTH"] * pearth
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pearth <- pearth >= 1000 * thresholds["EARTH"]
enscount <- enscount + pearth
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["RPART"] > 0) {
ensemble <- ensemble + output.weights["RPART"] * prpart
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
prpart <- prpart >= 1000 * thresholds["RPART"]
enscount <- enscount + prpart
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["NNET"] > 0) {
ensemble <- ensemble + output.weights["NNET"] * pnnet
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pnnet <- pnnet >= 1000 * thresholds["NNET"]
enscount <- enscount + pnnet
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["FDA"] > 0) {
ensemble <- ensemble + output.weights["FDA"] * pfda
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pfda <- pfda >= 1000 * thresholds["FDA"]
enscount <- enscount + pfda
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["SVM"] > 0) {
ensemble <- ensemble + output.weights["SVM"] * psvm
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
psvm <- psvm >= 1000 * thresholds["SVM"]
enscount <- enscount + psvm
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["SVME"] > 0) {
ensemble <- ensemble + output.weights["SVME"] * psvme
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
psvme <- psvme >= 1000 * thresholds["SVME"]
enscount <- enscount + psvme
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["GLMNET"] > 0) {
ensemble <- ensemble + output.weights["GLMNET"] * pglmnet
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pglmnet <- pglmnet >= 1000 * thresholds["GLMNET"]
enscount <- enscount + pglmnet
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["BIOCLIM.O"] > 0) {
ensemble <- ensemble + output.weights["BIOCLIM.O"] * pbioO
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pbioO <- pbioO >= 1000 * thresholds["BIOCLIM.O"]
enscount <- enscount + pbioO
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["BIOCLIM"] > 0) {
ensemble <- ensemble + output.weights["BIOCLIM"] * pbio
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pbio <- pbio >= 1000 * thresholds["BIOCLIM"]
enscount <- enscount + pbio
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["DOMAIN"] > 0) {
ensemble <- ensemble + output.weights["DOMAIN"] * pdom
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pdom <- pdom >= 1000 * thresholds["DOMAIN"]
enscount <- enscount + pdom
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["MAHAL"] > 0) {
ensemble <- ensemble + output.weights["MAHAL"] * pmahal
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmahal <- pmahal >= 1000 * thresholds["MAHAL"]
enscount <- enscount + pmahal
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
if (output.weights["MAHAL01"] > 0) {
ensemble <- ensemble + output.weights["MAHAL01"] * pmahal01
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmahal01 <- pmahal01 >= 1000 * thresholds["MAHAL01"]
enscount <- enscount + pmahal01
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
}
#
terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
# note that submodels had already been multiplied by 1000
ensemble <- trunc(ensemble)
terra::setMinMax(ensemble)
ensemble.statistics[c("ensemble.min", "ensemble.max")] <- as.numeric(terra::minmax(ensemble))
# names(ensemble) <- raster.title
# terra::writeRaster(x=ensemble, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
# avoid possible problems with saving of names of the raster layers
terra::writeRaster(ensemble, filename="working.tif", overwrite=T)
working.raster <- terra::rast("working.tif")
names(working.raster) <- raster.title
terra::writeRaster(working.raster, filename=rasterfull, overwrite=TRUE, filetype=RASTER.filetype, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
#
terra::setMinMax(enscount)
ensemble.statistics[c("count.min", "count.max")] <- as.numeric(terra::minmax(enscount))
# names(enscount) <- paste(raster.title, "_count", sep="")
# terra::writeRaster(x=enscount, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
# avoid possible problems with saving of names of the raster layers
terra::writeRaster(enscount, filename="working.tif", overwrite=T)
working.raster <- terra::rast("working.tif")
names(working.raster) <- paste(raster.title, "_count", sep="")
terra::writeRaster(working.raster, filename=rastercount, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
#
if(evaluate == T) {
eval1 <- NULL
cat(paste("\n", "Evaluation of created ensemble raster layer (", rasterfull, ") at locations p and a", "\n\n", sep = ""))
pres_consensus <- terra::extract(ensemble, p)[,2]/1000
abs_consensus <- terra::extract(ensemble, a)[,2]/1000
eval1 <- dismo::evaluate(p=pres_consensus, a=abs_consensus)
print(eval1)
thresholds["ENSEMBLE"] <- ensemble.threshold(eval1, threshold.method=threshold.method, threshold.sensitivity=threshold.sensitivity,
threshold.PresenceAbsence=threshold.PresenceAbsence, Pres=pres_consensus, Abs=abs_consensus)
cat(paste("\n", "Threshold (method: ", threshold.method, ") \n", sep = ""))
print(as.numeric(thresholds["ENSEMBLE"]))
}
if(retest == T) {
eval1 <- NULL
cat(paste("\n", "Evaluation of created ensemble raster layer (", rasterfull, ") at locations pt and at", "\n\n", sep = ""))
pres_consensus <- terra::extract(ensemble, pt)[,2]/1000
abs_consensus <- terra::extract(ensemble, at)[,2]/1000
eval1 <- dismo::evaluate(p=pres_consensus, a=abs_consensus)
print(eval1)
}
ensemble.statistics["ensemble.threshold"] <- thresholds["ENSEMBLE"]
#
enspresence <- ensemble >= 1000 * thresholds["ENSEMBLE"]
terra::setMinMax(enspresence)
# names(enspresence) <- paste(raster.title, "_presence", sep="")
# terra::writeRaster(x=enspresence, filename=rasterpresence, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
# avoid possible problems with saving of names of the raster layers
terra::writeRaster(enspresence, filename="working.tif", overwrite=T)
working.raster <- terra::rast("working.tif")
names(working.raster) <- paste(raster.title, "_presence", sep="")
terra::writeRaster(working.raster, filename=rasterpresence, overwrite=TRUE, filetype=RASTER.filetype, datatype="INT1U", NAflag=255)
#
cat(paste("\n", "End of modelling for organism: ", RASTER.species.orig, "\n\n", sep = ""))
cat(paste("Predictions were made for RasterStack: ", stack.title, "\n\n", sep = ""))
out <- list(ensemble.statistics=ensemble.statistics, output.weights=output.weights, thresholds=thresholds, call=match.call() )
if (SINK==T && OLD.SINK==F) {sink(file=NULL, append=T)}
return(out)
# end of prediction failures loop
}else{
out <- list(warning="prediction failure for some algorithms", output.weights=output.weights, call=match.call() )
if (SINK==T && OLD.SINK==F) {sink(file=NULL, append=T)}
return(out)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.terra.R
|
`ensemble.threshold` <- function(
eval, threshold.method="spec_sens", threshold.sensitivity=0.9,
threshold.PresenceAbsence=FALSE, Pres, Abs)
{
if (threshold.method == "threshold.min") {threshold.method <- "threshold2013.min"}
if (threshold.method == "threshold.mean") {threshold.method <- "threshold2013.mean"}
if (threshold.method %in% c("threshold2013.min", "threshold2013.mean", "threshold2005.min", "threshold2005.mean")) {threshold.PresenceAbsence <- TRUE}
if (threshold.PresenceAbsence == T) {
Pres2 <- cbind(rep(1, length(Pres)), Pres)
Abs2 <- cbind(rep(0, length(Abs)), Abs)
data1 <- rbind(Pres2, Abs2)
data2 <- cbind(seq(1:nrow(data1)), data1)
auc.value <- PresenceAbsence::auc(data2, st.dev=F)
cat(paste("\n", "AUC from PresenceAbsence package (also used to calculate threshold): ", auc.value, "\n", sep = ""))
if (threshold.method=="kappa") {threshold.method <- "MaxKappa"}
if (threshold.method=="spec_sens") {threshold.method <- "MaxSens+Spec"}
if (threshold.method=="prevalence") {threshold.method <- "ObsPrev"}
if (threshold.method=="equal_sens_spec") {threshold.method <- "Sens=Spec"}
if (threshold.method=="sensitivity") {threshold.method <- "ReqSens"}
req.sens <- threshold.sensitivity
if (threshold.method=="no_omission") {
threshold.method <- "ReqSens"
req.sens <- 1.0
}
result <- PresenceAbsence::optimal.thresholds(data2, threshold=seq(from=0, to=1, by=0.0001), req.sens=req.sens)
result2 <- as.numeric(result[, 2])
names(result2) <- result[, 1]
# threshold2005.min and threshold2005.mean build on results from Liu et al. 2005. Ecography 28: 385-393
# this study of selecting best thresholds recommended following approaches
# 4. prevalence (ObsPrev)
# 5. average probability (MeanProb)
# 7. sensitivity-specificity maximization (MaxSens+Spec)
# 8. sensitivity-specificity equality (Sens=Spec)
# 9. ROC-based (MinROCdist)
if (threshold.method %in% c("threshold2005.min", "threshold2005.mean")) {
t1 <- result2[["MaxSens+Spec"]]
cat(paste("\n", "Threshold (method: MaxSens+Spec): ", sep = ""))
print(t1)
t2 <- result2[["Sens=Spec"]]
cat(paste("Threshold (method: Sens=Spec): ", sep = ""))
print(t2)
t3 <- result2[["ObsPrev"]]
cat(paste("Threshold (method: ObsPrev): ", sep = ""))
print(t3)
t4 <- result2[["MeanProb"]]
cat(paste("Threshold (method: MeanProb): ", sep = ""))
print(t4)
t5 <- result2[["MinROCdist"]]
cat(paste("Threshold (method: MinROCdist): ", sep = ""))
print(t5)
thresholds <- as.numeric(c(t1, t2, t3, t4, t5))
thresholds <- thresholds[thresholds > 0]
if (threshold.method == "threshold2005.min") {return(min(thresholds))}
if (threshold.method == "threshold2005.mean") {return(mean(thresholds))}
}
# threshold2013.min and threshold2013.mean build on results from Liu et al. 2013. Journal of Biogeography 40: 778-789
# this study of selecting best thresholds recommended following approaches
# 7. maximizing the sum of sensitivity and specificity, max SSS (MaxSens+Spec)
# 1. training data prevalence, trainPrev (ObsPrev)
# 2. mean predicted value for a set of random points over the whole study area, meanPred (MeanProb)
# when species prevalence is high, however, max SSS is the superior method (page 786)
if (threshold.method %in% c("threshold2013.min", "threshold2013.mean")) {
t1 <- result2[["MaxSens+Spec"]]
cat(paste("\n", "Threshold (method: MaxSens+Spec): ", sep = ""))
print(as.numeric(t1))
t3 <- result2[["ObsPrev"]]
cat(paste("Threshold (method: ObsPrev): ", sep = ""))
print(as.numeric(t3))
t4 <- result2[["MeanProb"]]
cat(paste("Threshold (method: MeanProb): ", sep = ""))
print(as.numeric(t4))
thresholds <- as.numeric(c(t1, t3, t4))
thresholds <- thresholds[thresholds > 0]
if (threshold.method == "threshold2013.min") {return(min(thresholds))}
if (threshold.method == "threshold2013.mean") {return(mean(thresholds))}
}
return(as.numeric(result2[[threshold.method]]))
}
if (threshold.PresenceAbsence == F) {
result <- dismo::threshold(eval, sensitivity=threshold.sensitivity)
return(result[[threshold.method]])
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.threshold.R
|
`ensemble.weights` <- function(
weights=c(0.9, 0.8, 0.7, 0.5),
best=0, min.weight=0, exponent=1.0,
digits=6
)
{
names.weights <- names(weights)
weights <- as.numeric(weights)
# also allow weights to be larger than 1, since they will be forced to sum to 1
# if(any(weights > 1.0)) {stop("Input weights are expected to be ranged between 0 and 1")}
names(weights) <- names.weights
# weights should not be negative
if (min.weight < 0) {min.weight <- 0}
weights[weights < min.weight] <- 0
weights[is.na(weights)] <- 0
#
# special case if all weights are zero
if (sum(weights) == 0) {return(weights)}
#
# select best weights
# ties are handled correctly
lw <- length(weights)
lp <- sum(as.numeric(weights > 0))
if (best < 1) {best <- lp}
if (lp < best) {best <- lp}
weights.sorted <- sort(weights, decreasing=T)
min.best <- weights.sorted[best]
weights[weights < min.best] <- 0
#
# apply exponents
weights <- weights^exponent
#
# scaling to 1
tot <- sum(weights)
weights <- weights/tot
weights <- round(weights, digits=digits)
#
names(weights) <- names.weights
return(weights)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.weights.R
|
`ensemble.zones` <- function(
presence.raster=NULL, centroid.object=NULL, x=NULL, ext=NULL,
RASTER.species.name=centroid.object$name, RASTER.stack.name = x@title,
RASTER.format="GTiff", RASTER.datatype="INT2S", RASTER.NAflag=-32767,
# KML.out=FALSE, KML.maxpixels=100000, KML.blur=10,
CATCH.OFF=FALSE
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if (is.null(presence.raster) == T) {stop("value for parameter presence.raster is missing (RasterLayer object)")}
if(inherits(presence.raster, "RasterLayer") == F) {stop("x is not a RasterLayer object")}
if(is.null(x) == T) {stop("value for parameter x is missing (RasterStack object)")}
if(inherits(x, "RasterStack") == F) {stop("x is not a RasterStack object")}
if (is.null(centroid.object) == T) {stop("value for parameter centroid.object is missing (hint: use the ensemble.centroids function)")}
#
#
# if (KML.out==T && raster::isLonLat(presence.raster)==F) {
# cat(paste("\n", "NOTE: not possible to generate KML files as Coordinate Reference System (CRS) of presence.raster is not longitude and latitude", "\n", sep = ""))
# KML.out <- FALSE
# }
#
predict.zone <- function(object=centroid.object, newdata=newdata) {
centroids <- object$centroids
cov.mahal <- object$cov.mahal
nc <- nrow(centroids)
result <- data.frame(array(0, dim=c(nrow(newdata), nc)))
for (i in 1:nc) {
result[,i] <- mahalanobis(newdata, center=as.numeric(centroids[i,]), cov=cov.mahal)
}
p <- apply(result[, 1:nc], 1, which.min)
p <- as.numeric(p)
return(p)
}
#
# check if all variables are present
vars <- names(centroid.object$centroids)
vars.x <- names(x)
nv <- length(vars)
for (i in 1:nv) {
if (any(vars.x==vars[i]) == F) {stop("explanatory variable '", vars[i], "' not among grid layers of RasterStack x \n", sep = "")}
}
nv <- length(vars.x)
for (i in 1:nv) {
if (any(vars==vars.x[i]) == F) {
cat(paste("\n", "NOTE: RasterStack layer '", vars.x[i], "' was not documented in the centroids data set", "\n", sep = ""))
x <- raster::dropLayer(x, which(names(x) %in% c(vars.x[i]) ))
x <- raster::stack(x)
}
}
# same extent for predictors and presence map
if (is.null(ext) == F) {
if(length(x@title) == 0) {x@title <- "stack1"}
title.old <- x@title
x <- raster::crop(x, y=ext, snap="in")
x@title <- title.old
x <- raster::stack(x)
presence.raster <- raster::crop(presence.raster, y=ext, snap="in")
}
# avoid problems with non-existing directories and prepare for output
dir.create("ensembles", showWarnings = F)
dir.create("ensembles/zones", showWarnings = F)
# if(KML.out == T) {
# dir.create("kml", showWarnings = F)
# dir.create("kml/zones", showWarnings = F)
# }
stack.title <- RASTER.stack.name
# if (gsub(".", "_", stack.title, fixed=T) != stack.title) {cat(paste("\n", "WARNING: title of stack (", stack.title, ") contains '.'", "\n\n", sep = ""))}
rasterfull <- paste("ensembles/zones/", RASTER.species.name, "_", stack.title , sep="")
kmlfull <- paste("kml/zones/", RASTER.species.name, "_", stack.title , sep="")
#
# predict
if (CATCH.OFF == F) {
tryCatch(zones.raster <- raster::predict(object=x, model=centroid.object, fun=predict.zone, na.rm=TRUE,
filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("prediction of zones failed"))},
silent=F)
}else{
zones.raster <- raster::predict(object=x, model=centroid.object, fun=predict.zone, na.rm=TRUE,
filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format)
}
# mask the presence area, including areas that are NA in presence raster
zones.raster <- raster::mask(zones.raster, presence.raster, inverse=T, maskvalue=1)
zones.raster <- raster::mask(zones.raster, presence.raster, inverse=F)
cat(paste("\n", "raster layer with zones created", "\n", sep = ""))
print(raster::freq(zones.raster))
#
# avoid possible problems with saving of names of the raster layers
# no longer used with default format of GTiff since DEC-2022
raster::writeRaster(zones.raster, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
# raster::writeRaster(zones.raster, filename="working.grd", overwrite=T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- paste(RASTER.species.name, "_", stack.title , "_zones", sep="")
# raster::writeRaster(working.raster, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
#
# if (KML.out == T) {
# nc <- nrow(centroid.object$centroids)
# raster::KML(working.raster, filename=kmlfull, col = grDevices::rainbow(n = nc, start = 0.2, end = 0.8), colNA = 0,
# blur=KML.blur, maxpixels=KML.maxpixels, overwrite=TRUE, breaks = c(0:nc))
# }
cat(paste("\n", "zones provided in folder: ", getwd(), "//ensembles//zones", "\n", sep=""))
# zones.raster <- raster::raster(rasterfull)
return(zones.raster)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ensemble.zones.R
|
`evaluation.strip.data` <- function(
xn=NULL, ext=NULL,
models.list=NULL,
input.weights=models.list$output.weights,
steps=200, CATCH.OFF=FALSE
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if (is.null(xn) == T) {stop("value for parameter xn is missing (RasterStack object)")}
if (is.null(models.list) == T) {stop("provide 'models.list' as models will not be recalibrated and retested")}
if (is.null(input.weights) == T) {input.weights <- models.list$output.weights}
if (is.null(ext) == F) {
if(length(xn@title) == 0) {xn@title <- "stack1"}
title.old <- xn@title
xn <- raster::crop(xn, y=ext, snap="in")
xn <- raster::stack(xn)
xn@title <- title.old
}
#
# check if all variables are present
vars <- models.list$vars
vars.xn <- names(xn)
nv <- length(vars)
for (i in 1:nv) {
if (any(vars.xn==vars[i]) == F) {stop("explanatory variable '", vars[i], "' not among grid layers of RasterStack xn", "\n", sep = "")}
}
nv <- length(vars.xn)
for (i in 1:nv) {
if (any(vars==vars.xn[i]) == F) {
cat(paste("\n", "NOTE: RasterStack layer '", vars.xn[i], "' was not calibrated as explanatory variable", "\n", sep = ""))
xn <- raster::dropLayer(xn, which(names(xn) %in% c(vars.xn[i]) ))
xn <- raster::stack(xn)
}
}
factors <- models.list$factors
dummy.vars <- models.list$dummy.vars
dummy.vars.noDOMAIN <- models.list$dummy.vars.noDOMAIN
#
# set minimum and maximum values for xn
for (i in 1:raster::nlayers(xn)) {
xn[[i]] <- raster::setMinMax(xn[[i]])
}
# declare categorical layers for xn
# factors <- models.list$factors
if(is.null(factors) == F) {
for (i in 1:length(factors)) {
j <- which(names(xn) == factors[i])
xn[[j]] <- raster::as.factor(xn[[j]])
}
}
#
if (is.null(input.weights) == F) {
MAXENT <- max(c(input.weights["MAXENT"], -1), na.rm=T)
MAXNET <- max(c(input.weights["MAXNET"], -1), na.rm=T)
MAXLIKE <- max(c(input.weights["MAXLIKE"], -1), na.rm=T)
GBM <- max(c(input.weights["GBM"], -1), na.rm=T)
GBMSTEP <- max(c(input.weights["GBMSTEP"], -1), na.rm=T)
RF <- max(c(input.weights["RF"], -1), na.rm=T)
CF <- max(c(input.weights["CF"], -1), na.rm=T)
GLM <- max(c(input.weights["GLM"], -1), na.rm=T)
GLMSTEP <- max(c(input.weights["GLMSTEP"], -1), na.rm=T)
GAM <- max(c(input.weights["GAM"], -1), na.rm=T)
GAMSTEP <- max(c(input.weights["GAMSTEP"], -1), na.rm=T)
MGCV <- max(c(input.weights["MGCV"], -1), na.rm=T)
MGCVFIX <- max(c(input.weights["MGCVFIX"], -1), na.rm=T)
EARTH <- max(c(input.weights["EARTH"], -1), na.rm=T)
RPART <- max(c(input.weights["RPART"], -1), na.rm=T)
NNET <- max(c(input.weights["NNET"], -1), na.rm=T)
FDA <- max(c(input.weights["FDA"], -1), na.rm=T)
SVM <- max(c(input.weights["SVM"], -1), na.rm=T)
SVME <- max(c(input.weights["SVME"], -1), na.rm=T)
GLMNET <- max(c(input.weights["GLMNET"], -1), na.rm=T)
BIOCLIM.O <- max(c(input.weights["BIOCLIM.O"], -1), na.rm=T)
BIOCLIM <- max(c(input.weights["BIOCLIM"], -1), na.rm=T)
DOMAIN <- max(c(input.weights["DOMAIN"], -1), na.rm=T)
MAHAL <- max(c(input.weights["MAHAL"], -1), na.rm=T)
MAHAL01 <- max(c(input.weights["MAHAL01"], -1), na.rm=T)
}
#
MAXENT.OLD <- MAXNET.OLD <- MAXLIKE.OLD <- GBM.OLD <- GBMSTEP.OLD <- RF.OLD <- CF.OLD <- GLM.OLD <- GLMSTEP.OLD <- GAM.OLD <- GAMSTEP.OLD <- MGCV.OLD <- NULL
MGCVFIX.OLD <- EARTH.OLD <- RPART.OLD <- NNET.OLD <- FDA.OLD <- SVM.OLD <- SVME.OLD <- GLMNET.OLD <- BIOCLIM.O.OLD <- BIOCLIM.OLD <- DOMAIN.OLD <- MAHAL.OLD <- MAHAL01.OLD <- NULL
# probit models, NULL if no probit model fitted
MAXENT.PROBIT.OLD <- MAXNET.PROBIT.OLD <- MAXLIKE.PROBIT.OLD <- GBM.PROBIT.OLD <- GBMSTEP.PROBIT.OLD <- RF.PROBIT.OLD <- CF.PROBIT.OLD <- GLM.PROBIT.OLD <- GLMSTEP.PROBIT.OLD <- GAM.PROBIT.OLD <- GAMSTEP.PROBIT.OLD <- MGCV.PROBIT.OLD <- NULL
MGCVFIX.PROBIT.OLD <- EARTH.PROBIT.OLD <- RPART.PROBIT.OLD <- NNET.PROBIT.OLD <- FDA.PROBIT.OLD <- SVM.PROBIT.OLD <- SVME.PROBIT.OLD <- GLMNET.PROBIT.OLD <- BIOCLIM.O.PROBIT.OLD <- BIOCLIM.PROBIT.OLD <- DOMAIN.PROBIT.OLD <- MAHAL.PROBIT.OLD <- MAHAL01.PROBIT.OLD <- NULL
if (is.null(models.list) == F) {
if (is.null(models.list$MAXENT) == F) {MAXENT.OLD <- models.list$MAXENT}
if (is.null(models.list$MAXNET) == F) {
MAXNET.OLD <- models.list$MAXNET
MAXNET.clamp <- models.list$formulae$MAXNET.clamp
MAXNET.type <- models.list$formulae$MAXNET.type
}
if (is.null(models.list$MAXLIKE) == F) {MAXLIKE.OLD <- models.list$MAXLIKE}
if (is.null(models.list$GBM) == F) {GBM.OLD <- models.list$GBM}
if (is.null(models.list$GBMSTEP) == F) {GBMSTEP.OLD <- models.list$GBMSTEP}
if (is.null(models.list$RF) == F) {RF.OLD <- models.list$RF}
if (is.null(models.list$CF) == F) {CF.OLD <- models.list$CF}
if (is.null(models.list$GLM) == F) {GLM.OLD <- models.list$GLM}
if (is.null(models.list$GLMSTEP) == F) {GLMSTEP.OLD <- models.list$GLMSTEP}
if (is.null(models.list$GAM) == F) {GAM.OLD <- models.list$GAM}
if (is.null(models.list$GAMSTEP) == F) {GAMSTEP.OLD <- models.list$GAMSTEP}
if (is.null(models.list$MGCV) == F) {MGCV.OLD <- models.list$MGCV}
if (is.null(models.list$MGCVFIX) == F) {MGCVFIX.OLD <- models.list$MGCVFIX}
if (is.null(models.list$EARTH) == F) {EARTH.OLD <- models.list$EARTH}
if (is.null(models.list$RPART) == F) {RPART.OLD <- models.list$RPART}
if (is.null(models.list$NNET) == F) {NNET.OLD <- models.list$NNET}
if (is.null(models.list$FDA) == F) {FDA.OLD <- models.list$FDA}
if (is.null(models.list$SVM) == F) {SVM.OLD <- models.list$SVM}
if (is.null(models.list$SVME) == F) {SVME.OLD <- models.list$SVME}
if (is.null(models.list$GLMNET) == F) {
GLMNET.OLD <- models.list$GLMNET
GLMNET.class <- models.list$formulae$GLMNET.class
}
if (is.null(models.list$BIOCLIM.O) == F) {BIOCLIM.O.OLD <- models.list$BIOCLIM.O}
if (is.null(models.list$BIOCLIM) == F) {BIOCLIM.OLD <- models.list$BIOCLIM}
if (is.null(models.list$DOMAIN) == F) {DOMAIN.OLD <- models.list$DOMAIN}
if (is.null(models.list$MAHAL) == F) {MAHAL.OLD <- models.list$MAHAL}
if (is.null(models.list$MAHAL01) == F) {
MAHAL01.OLD <- models.list$MAHAL01
MAHAL.shape <- models.list$formulae$MAHAL.shape
}
#
# probit models
if (is.null(models.list$MAXENT.PROBIT) == F) {MAXENT.PROBIT.OLD <- models.list$MAXENT.PROBIT}
if (is.null(models.list$MAXNET.PROBIT) == F) {MAXNET.PROBIT.OLD <- models.list$MAXNET.PROBIT}
if (is.null(models.list$MAXLIKE.PROBIT) == F) {MAXLIKE.PROBIT.OLD <- models.list$MAXLIKE.PROBIT}
if (is.null(models.list$GBM.PROBIT) == F) {GBM.PROBIT.OLD <- models.list$GBM.PROBIT}
if (is.null(models.list$GBMSTEP.PROBIT) == F) {GBMSTEP.PROBIT.OLD <- models.list$GBMSTEP.PROBIT}
if (is.null(models.list$RF.PROBIT) == F) {RF.PROBIT.OLD <- models.list$RF.PROBIT}
if (is.null(models.list$CF.PROBIT) == F) {CF.PROBIT.OLD <- models.list$CF.PROBIT}
if (is.null(models.list$GLM.PROBIT) == F) {GLM.PROBIT.OLD <- models.list$GLM.PROBIT}
if (is.null(models.list$GLMSTEP.PROBIT) == F) {GLMSTEP.PROBIT.OLD <- models.list$GLMSTEP.PROBIT}
if (is.null(models.list$GAM.PROBIT) == F) {GAM.PROBIT.OLD <- models.list$GAM.PROBIT}
if (is.null(models.list$GAMSTEP.PROBIT) == F) {GAMSTEP.PROBIT.OLD <- models.list$GAMSTEP.PROBIT}
if (is.null(models.list$MGCV.PROBIT) == F) {MGCV.PROBIT.OLD <- models.list$MGCV.PROBIT}
if (is.null(models.list$MGCVFIX.PROBIT) == F) {MGCVFIX.PROBIT.OLD <- models.list$MGCVFIX.PROBIT}
if (is.null(models.list$EARTH.PROBIT) == F) {EARTH.PROBIT.OLD <- models.list$EARTH.PROBIT}
if (is.null(models.list$RPART.PROBIT) == F) {RPART.PROBIT.OLD <- models.list$RPART.PROBIT}
if (is.null(models.list$NNET.PROBIT) == F) {NNET.PROBIT.OLD <- models.list$NNET.PROBIT}
if (is.null(models.list$FDA.PROBIT) == F) {FDA.PROBIT.OLD <- models.list$FDA.PROBIT}
if (is.null(models.list$SVM.PROBIT) == F) {SVM.PROBIT.OLD <- models.list$SVM.PROBIT}
if (is.null(models.list$SVME.PROBIT) == F) {SVME.PROBIT.OLD <- models.list$SVME.PROBIT}
if (is.null(models.list$GLMNET.PROBIT) == F) {GLMNET.PROBIT.OLD <- models.list$GLMNET.PROBIT}
if (is.null(models.list$BIOCLIM.O.PROBIT) == F) {BIOCLIM.O.PROBIT.OLD <- models.list$BIOCLIM.O.PROBIT}
if (is.null(models.list$BIOCLIM.PROBIT) == F) {BIOCLIM.PROBIT.OLD <- models.list$BIOCLIM.PROBIT}
if (is.null(models.list$DOMAIN.PROBIT) == F) {DOMAIN.PROBIT.OLD <- models.list$DOMAIN.PROBIT}
if (is.null(models.list$MAHAL.PROBIT) == F) {MAHAL.PROBIT.OLD <- models.list$MAHAL.PROBIT}
if (is.null(models.list$MAHAL01.PROBIT) == F) {MAHAL01.PROBIT.OLD <- models.list$MAHAL01.PROBIT}
}
if (MAXENT > 0) {
jar <- paste(system.file(package="dismo"), "/java/maxent.jar", sep='')
if (!file.exists(jar)) {stop('maxent program is missing: ', jar, '\nPlease download it here: http://www.cs.princeton.edu/~schapire/maxent/')}
}
if (MAXNET > 0) {
if (! requireNamespace("maxnet")) {stop("Please install the maxnet package")}
predict.maxnet2 <- function(object, newdata, clamp=F, type=c("cloglog")) {
p <- predict(object=object, newdata=newdata, clamp=clamp, type=type)
return(as.numeric(p))
}
}
if (MAXLIKE > 0) {
if (! requireNamespace("maxlike")) {stop("Please install the maxlike package")}
}
if (GBM > 0) {
if (! requireNamespace("gbm")) {stop("Please install the gbm package")}
requireNamespace("splines")
}
if (RF > 0) {
# get the probabilities from RF
predict.RF <- function(object, newdata) {
p <- predict(object=object, newdata=newdata, type="response")
return(as.numeric(p))
}
}
if (CF > 0) {
# get the probabilities from cforest
if (! requireNamespace("party")) {stop("Please install the party package")}
predict.CF <- function(object, newdata) {
# avoid problems with single variables, especially with raster::predict
for (i in 1:ncol(newdata)) {
if (is.integer(newdata[, i])) {newdata[, i] <- as.numeric(newdata[, i])}
}
p1 <- predict(object=object, newdata=newdata, type="prob")
p <- numeric(length(p1))
for (i in 1:length(p1)) {p[i] <- p1[[i]][2]}
return(as.numeric(p))
}
}
if (MGCV > 0 || MGCVFIX > 0) {
# get the probabilities from MGCV
predict.MGCV <- function(object, newdata, type="response") {
p <- mgcv::predict.gam(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
}
if (EARTH > 0) {
# get the probabilities from earth
predict.EARTH <- function(object, newdata, type="response") {
p <- predict(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
}
if (NNET > 0) {
# get the probabilities from nnet
predict.NNET <- function(object, newdata, type="raw") {
p <- predict(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
}
if (SVME > 0) {
# get the probabilities from svm
predict.SVME <- function(model, newdata) {
p <- predict(model, newdata, probability=T)
return(attr(p, "probabilities")[,1])
}
}
if (FDA > 0) {
if (! requireNamespace("mda")) {stop("Please install the mda package")}
}
if (GLMNET > 0) {
if (! requireNamespace("glmnet")) {stop("Please install the glmnet package")}
# get the mean probabilities from glmnet
predict.GLMNET <- function(model, newdata, GLMNET.class=FALSE) {
newdata <- as.matrix(newdata)
if (GLMNET.class == TRUE) {
p <- predict(model, newx=newdata, type="class", exact=T)
n.obs <- nrow(p)
nv <- ncol(p)
result <- numeric(n.obs)
for (i in 1:n.obs) {
for (j in 1:nv) {
if(p[i, j] == 1) {result[i] <- result[i] + 1}
}
}
result <- result/nv
return(result)
}else{
p <- predict(model, newx=newdata, type="response", exact=T)
n.obs <- nrow(p)
nv <- ncol(p)
result <- numeric(n.obs)
for (i in 1:n.obs) {
for (j in 1:nv) {
result[i] <- result[i] + p[i, j]
}
}
result <- result/nv
return(result)
}
}
}
if (BIOCLIM.O > 0) {
# get the probabilities for original BIOCLIM
predict.BIOCLIM.O <- function(object, newdata) {
lower.limits <- object$lower.limits
upper.limits <- object$upper.limits
minima <- object$minima
maxima <- object$maxima
#
newdata <- newdata[, which(names(newdata) %in% names(lower.limits)), drop=F]
result <- as.numeric(rep(NA, nrow(newdata)))
varnames <- names(newdata)
nvars <- ncol(newdata)
#
for (i in 1:nrow(newdata)) {
datai <- newdata[i,,drop=F]
resulti <- 1
j <- 0
while (resulti > 0 && j <= (nvars-1)) {
j <- j+1
focal.var <- varnames[j]
if (resulti == 1) {
lowerj <- lower.limits[which(names(lower.limits) == focal.var)]
if (datai[, j] < lowerj) {resulti <- 0.5}
upperj <- upper.limits[which(names(upper.limits) == focal.var)]
if (datai[, j] > upperj) {resulti <- 0.5}
}
minj <- minima[which(names(minima) == focal.var)]
if (datai[, j] < minj) {resulti <- 0}
maxj <- maxima[which(names(maxima) == focal.var)]
if (datai[, j] > maxj) {resulti <- 0}
}
result[i] <- resulti
}
p <- as.numeric(result)
return(p)
}
}
if (MAHAL > 0) {
# get the probabilities from mahal
predict.MAHAL <- function(model, newdata, PROBIT) {
p <- dismo::predict(object=model, x=newdata)
if (PROBIT == F) {
p[p<0] <- 0
p[p>1] <- 1
}
return(as.numeric(p))
}
}
if (MAHAL01 > 0) {
# get the probabilities from transformed mahal
predict.MAHAL01 <- function(model, newdata, MAHAL.shape) {
p <- dismo::predict(object=model, x=newdata)
p <- p - 1 - MAHAL.shape
p <- abs(p)
p <- MAHAL.shape / p
return(p)
}
}
#
ws <- input.weights
#
# prepare data set
vars.xn <- names(xn)
nvars <- length(vars)
nnum <- nvars - length(factors)
nrows <- nnum * steps
plot.data <- array(dim=c(nnum*steps, nvars+2), NA)
dimnames(plot.data)[[2]] <- c("focal.var", "categorical", vars)
# for categorical variables first
fixedlevel <- array(dim=c(nvars))
for (i in 1:nvars) {
if(any(vars[i] == factors) == T) {
il <- which(vars.xn == vars[i])
tabulation <- data.frame(raster::freq(xn[[il]]))
NA.index <- !is.na(tabulation[,"value"])
tabulation <- tabulation[NA.index,]
plot.data2 <- array(dim=c(nrow(tabulation), nvars+2), NA)
plot.data2[, 1] <- rep(i, nrow(tabulation))
plot.data2[, 2] <- rep(1, nrow(tabulation))
plot.data2[, i+2] <- tabulation[,1]
plot.data <- rbind(plot.data, plot.data2)
fixedlevel[i] <- tabulation[which.max(tabulation[,2]),1]
}
}
for (i in 1:nvars) {
if(any(vars[i] == factors) == T) {
index <- is.na(plot.data[,i+2])
plot.data[index,i+2] <- fixedlevel[i]
}
}
nrows <- nrow(plot.data)
# for numerical variables next
for (i in 1:nvars) {
if(any(vars[i] == factors) == F) {
il <- which(vars.xn == vars[i])
plot.data[,i+2] <- rep(raster::cellStats(xn[[il]], stat="mean"), nrows)
}
}
j <- 0
for (i in 1:nvars) {
if(any(vars[i] == factors) == F) {
il <- which(vars.xn == vars[i])
j <- j+1
startpos <- (j-1)*steps+1
endpos <- (j-1)*steps+steps
plot.data[startpos:endpos,1] <- rep(i, steps)
plot.data[startpos:endpos,2] <- rep(0, steps)
minv <- raster::minValue(xn[[il]])
maxv <- raster::maxValue(xn[[il]])
plot.data[startpos:endpos,i+2] <- seq(from=minv,to=maxv, length.out=steps)
}
}
# declare factor variables
plot.data.vars <- data.frame(plot.data)
plot.data.vars <- plot.data.vars[, which(names(plot.data.vars) %in% vars), drop=F]
plot.data.numvars <- plot.data.vars
for (i in 1:nvars) {
if(any(vars[i] == factors) == T) {
# corrected NOV 2019, error reported by Viviana Ceccarelli
column.i <- which(names(plot.data.vars)==vars[i])
plot.data.vars[, column.i] <- factor(plot.data.vars[, column.i], levels=models.list$categories[[vars[i]]])
plot.data.numvars <- plot.data.numvars[, which(names(plot.data.numvars) != vars[i]), drop=F]
}
}
plot.data.domain <- plot.data.numvars
for (i in 1:nvars) {
if(any(vars[i] == dummy.vars) == T) {
plot.data.domain <- plot.data.domain[, which(names(plot.data.domain) != dummy.vars[i]), drop=F]
}
}
plot.data.mahal <- plot.data.numvars
for (i in 1:nvars) {
if(any(vars[i] == dummy.vars) == T) {
plot.data.mahal <- plot.data.mahal[, which(names(plot.data.mahal) != dummy.vars[i]), drop=F]
}
}
assign("plot.data.vars", plot.data.vars, envir=.BiodiversityR)
assign("plot.data.numvars", plot.data.numvars, envir=.BiodiversityR)
assign("plot.data.domain", plot.data.domain, envir=.BiodiversityR)
assign("plot.data.mahal", plot.data.mahal, envir=.BiodiversityR)
modelnames <- c("MAXENT", "MAXNET", "MAXLIKE", "GBM", "GBMSTEP", "RF", "CF",
"GLM", "GLMSTEP", "GAM", "GAMSTEP", "MGCV", "MGCVFIX",
"EARTH", "RPART", "NNET", "FDA", "SVM", "SVME", "GLMNET",
"BIOCLIM.O", "BIOCLIM", "DOMAIN", "MAHAL", "MAHAL01", "ENSEMBLE")
nmodels <- length(modelnames)
modelout <- array(dim=c(nrows, nmodels), 0.0)
dimnames(modelout)[[2]] <- modelnames
plot.data <- cbind(plot.data, modelout)
plot.data <- data.frame(plot.data)
for (i in 1:nvars) {
if(any(vars[i] == factors) == T) {
plot.data[,i+2] <- factor(plot.data[,i+2], levels=models.list$categories[[vars[i]]])
}
}
#
# sometimes still error warnings for minimum and maximum values of the layers
# set minimum and maximum values for xn
for (i in 1:raster::nlayers(xn)) {
xn[[i]] <- raster::setMinMax(xn[[i]])
}
#
# Different modelling algorithms
#
if (MAXENT > 0) {
results <- MAXENT.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"MAXENT"] <- dismo::predict(object=results, x=plot.data.vars),
error= function(err) {print(paste("MAXENT prediction failed"))},
silent=F)
}else{
plot.data[,"MAXENT"] <- dismo::predict(object=results, x=plot.data.vars)
}
results2 <- MAXENT.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"MAXENT.step1"] <- plot.data[, "MAXENT"]
plot.data[,"MAXENT"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (MAXNET > 0) {
results <- MAXNET.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"MAXNET"] <- predict.maxnet2(object=results, newdata=plot.data.vars, clamp=MAXNET.clamp, type=MAXNET.type),
error= function(err) {print(paste("MAXNET prediction failed"))},
silent=F)
}else{
plot.data[,"MAXNET"] <- predict.maxnet2(object=results, newdata=plot.data.vars, clamp=MAXNET.clamp, type=MAXNET.type)
}
results2 <- MAXNET.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"MAXNET.step1"] <- plot.data[, "MAXNET"]
plot.data[,"MAXNET"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (MAXLIKE > 0) {
results <- MAXLIKE.OLD
# corrected NOV 2019, error reported by Viviana Ceccarelli - error caused by MAXLIKE excluding categorical variables
if (CATCH.OFF == F) {
tryCatch(plot.data[,"MAXLIKE"] <- predict(object=results, newdata=plot.data.numvars),
error= function(err) {print(paste("MAXLIKE prediction failed"))},
silent=F)
}else{
plot.data[,"MAXLIKE"] <- predict(object=results, newdata=plot.data.numvars)
}
results2 <- MAXLIKE.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[, "MAXLIKE.step1"] <- plot.data[, "MAXLIKE"]
plot.data[,"MAXLIKE"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (GBM > 0) {
results <- GBM.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"GBM"] <- gbm::predict.gbm(object=results, newdata=plot.data.vars, n.trees=results$n.trees, type="response"),
error= function(err) {print(paste("GBM prediction failed"))},
silent=F)
}else{
plot.data[,"GBM"] <- gbm::predict.gbm(object=results, newdata=plot.data.vars, n.trees=results$n.trees, type="response")
}
results2 <- GBM.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[, "GBM.step1"] <- plot.data[, "GBM"]
plot.data[,"GBM"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (GBMSTEP > 0) {
results <- GBMSTEP.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"GBMSTEP"] <- gbm::predict.gbm(object=results, newdata=plot.data.vars, n.trees=results$n.trees, type="response"),
error= function(err) {print(paste("stepwise GBM prediction failed"))},
silent=F)
}else{
plot.data[,"GBMSTEP"] <- gbm::predict.gbm(object=results, newdata=plot.data.vars, n.trees=results$n.trees, type="response")
}
results2 <- GBMSTEP.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"GBMSTEP.step1"] <- plot.data[, "GBMSTEP"]
plot.data[,"GBMSTEP"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (RF > 0) {
results <- RF.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"RF"] <- predict.RF(object=results, newdata=plot.data.vars),
error= function(err) {print(paste("RF prediction failed"))},
silent=F)
}else{
plot.data[,"RF"] <- predict.RF(object=results, newdata=plot.data.vars)
}
results2 <- RF.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"RF.step1"] <- plot.data[,"RF"]
plot.data[,"RF"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (CF > 0) {
results <- CF.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"CF"] <- predict.CF(object=results, newdata=plot.data.vars),
error= function(err) {print(paste("CF prediction failed"))},
silent=F)
}else{
plot.data[,"CF"] <- predict.CF(object=results, newdata=plot.data.vars)
}
results2 <- CF.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"CF.step1"] <- plot.data[,"CF"]
plot.data[,"CF"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (GLM > 0) {
results <- GLM.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"GLM"] <- predict.glm(object=results, newdata=plot.data.vars, type="response"),
error= function(err) {print(paste("GLM prediction failed"))},
silent=F)
}else{
plot.data[,"GLM"] <- predict.glm(object=results, newdata=plot.data.vars, type="response")
}
results2 <- GLM.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"GLM.step1"] <- plot.data[, "GLM"]
plot.data[,"GLM"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (GLMSTEP > 0) {
results <- GLMSTEP.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"GLMSTEP"] <- predict.glm(object=results, newdata=plot.data.vars, type="response"),
error= function(err) {print(paste("stepwise GLM prediction failed"))},
silent=F)
}else{
plot.data[,"GLMSTEP"] <- predict.glm(object=results, newdata=plot.data.vars, type="response")
}
results2 <- GLMSTEP.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"GLMSTEP.step1"] <- plot.data[, "GLMSTEP"]
plot.data[,"GLMSTEP"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (GAM > 0) {
results <- GAM.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"GAM"] <- gam::predict.Gam(object=results, newdata=plot.data.vars, type="response"),
error= function(err) {print(paste("GAM (package: gam) prediction failed"))},
silent=F)
}else{
plot.data[,"GAM"] <- gam::predict.Gam(object=results, newdata=plot.data.vars, type="response")
}
results2 <- GAM.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"GAM.step1"] <- plot.data[, "GAM"]
plot.data[,"GAM"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (GAMSTEP > 0) {
results <- GAMSTEP.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"GAMSTEP"] <- gam::predict.Gam(object=results, newdata=plot.data.vars, type="response"),
error= function(err) {print(paste("stepwise GAM prediction (gam package) failed"))},
silent=F)
}else{
plot.data[,"GAMSTEP"] <- gam::predict.Gam(object=results, newdata=plot.data.vars, type="response")
}
results2 <- GAMSTEP.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[, "GAMSTEP.step1"] <- plot.data[, "GAMSTEP"]
plot.data[,"GAMSTEP"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (MGCV > 0) {
results <- MGCV.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"MGCV"] <- predict.MGCV(object=results, newdata=plot.data.vars),
error= function(err) {print(paste("GAM prediction (mgcv package) failed"))},
silent=F)
}else{
plot.data[,"MGCV"] <- predict.MGCV(object=results, newdata=plot.data.vars)
}
results2 <- MGCV.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"MGCV.step1"] <- plot.data[,"MGCV"]
plot.data[,"MGCV"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (MGCVFIX > 0) {
results <- MGCVFIX.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"MGCVFIX"] <- predict.MGCV(object=results, newdata=plot.data.vars),
error= function(err) {print(paste("MGCVFIX prediction (mgcv package) failed"))},
silent=F)
}else{
plot.data[,"MGCVFIX"] <- predict.MGCV(object=results, newdata=plot.data.vars)
}
results2 <- MGCVFIX.PROBIT.OLD
if (is.null(results2) == F) {plot.data[,"MGCVFIX"] <- predict.glm(object=results2, newdata=plot.data, type="response")}
}
if (EARTH > 0) {
results <- EARTH.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"EARTH"] <- predict.EARTH(object=results, newdata=plot.data.numvars),
error= function(err) {print(paste("MARS prediction (earth package) failed"))},
silent=F)
}else{
plot.data[,"EARTH"] <- predict.EARTH(object=results, newdata=plot.data.numvars)
}
results2 <- EARTH.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"EARTH.step1"] <- plot.data[,"EARTH"]
plot.data[,"EARTH"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (RPART > 0) {
results <- RPART.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"RPART"] <- predict(object=results, newdata=plot.data.vars, type="prob")[,2],
error= function(err) {print(paste("RPART prediction failed"))},
silent=F)
}else{
plot.data[,"RPART"] <- predict(object=results, newdata=plot.data.vars, type="prob")[,2]
}
results2 <- RPART.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"RPART.step1"] <- plot.data[,"RPART"]
plot.data[,"RPART"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (NNET > 0) {
results <- NNET.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"NNET"] <- predict.NNET(object=results, newdata=plot.data.vars),
error= function(err) {print(paste("ANN prediction (nnet package) failed"))},
silent=F)
}else{
plot.data[,"NNET"] <- predict.NNET(object=results, newdata=plot.data.vars)
}
results2 <- NNET.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"NNET.step1"] <- plot.data[,"NNET"]
plot.data[,"NNET"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (FDA > 0) {
results <- FDA.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"FDA"] <- predict(object=results, newdata=plot.data.vars, type="posterior")[,2],
error= function(err) {print(paste("FDA prediction failed"))},
silent=F)
}else{
plot.data[,"FDA"] <- predict(object=results, newdata=plot.data.vars, type="posterior")[,2]
}
results2 <- FDA.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"FDA.step1"] <- plot.data[,"FDA"]
plot.data[,"FDA"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (SVM > 0) {
results <- SVM.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"SVM"] <- kernlab::predict(object=results, newdata=plot.data.vars, type="probabilities")[,2],
error= function(err) {print(paste("SVM prediction (kernlab package) failed"))},
silent=F)
}else{
plot.data[,"SVM"] <- kernlab::predict(object=results, newdata=plot.data.vars, type="probabilities")[,2]
}
results2 <- SVM.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"SVM.step1"] <- plot.data[,"SVM"]
plot.data[,"SVM"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (SVME > 0) {
results <- SVME.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"SVME"] <- predict.SVME(model=results, newdata=plot.data.vars),
error= function(err) {print(paste("SVM prediction (e1071 package) failed"))},
warning= function(war) {print(paste("SVM prediction (e1071 package) failed"))},
silent=F)
}else{
plot.data[,"SVME"] <- predict.SVME(model=results, newdata=plot.data.vars)
}
results2 <- SVME.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"SVME.step1"] <- plot.data[,"SVME"]
plot.data[,"SVME"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (GLMNET > 0) {
results <- GLMNET.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"GLMNET"] <- predict.GLMNET(model=results, newdata=plot.data.numvars, GLMNET.class=GLMNET.class),
error= function(err) {print(paste("GLMNET prediction (glmnet package) failed"))},
warning= function(war) {print(paste("GLMNET prediction (glmnet package) failed"))},
silent=F)
}else{
plot.data[,"GLMNET"] <- predict.GLMNET(model=results, newdata=plot.data.numvars, GLMNET.class=GLMNET.class)
}
results2 <- GLMNET.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"GLMNET.step1"] <- plot.data[,"GLMNET"]
plot.data[,"GLMNET"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (BIOCLIM.O > 0) {
results <- BIOCLIM.O.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"BIOCLIM.O"] <- predict.BIOCLIM.O(object=results, newdata=plot.data.vars),
error= function(err) {print(paste("original BIOCLIM prediction failed"))},
silent=F)
}else{
plot.data[,"BIOCLIM.O"] <- predict.BIOCLIM.O(object=results, newdata=plot.data.vars)
}
results2 <- BIOCLIM.O.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"BIOCLIM.O.step1"] <- plot.data[,"BIOCLIM.O"]
plot.data[,"BIOCLIM.O"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (BIOCLIM > 0 || DOMAIN > 0 || MAHAL > 0) {
if(is.null(factors)==F) {
for (i in 1:length(factors)) {
plot.data.vars <- plot.data.vars[, which(names(plot.data.vars) != factors[i]), drop=F]
}
}
}
if (BIOCLIM > 0) {
results <- BIOCLIM.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"BIOCLIM"] <- dismo::predict(object=results, x=plot.data.vars),
error= function(err) {print(paste("BIOCLIM prediction failed"))},
silent=F)
}else{
plot.data[,"BIOCLIM"] <- dismo::predict(object=results, x=plot.data.vars)
}
results2 <- BIOCLIM.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"BIOCLIM.step1"] <- plot.data[,"BIOCLIM"]
plot.data[,"BIOCLIM"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (DOMAIN > 0) {
if (CATCH.OFF == F) {
tryCatch(plot.data[,"DOMAIN"] <- dismo::predict(object=results, x=plot.data.domain),
error= function(err) {print(paste("DOMAIN prediction failed"))},
silent=F)
}else{
plot.data[,"DOMAIN"] <- dismo::predict(object=results, x=plot.data.domain)
}
results2 <- DOMAIN.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"DOMAIN.step1"] <- plot.data[,"DOMAIN"]
plot.data[,"DOMAIN"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (MAHAL > 0) {
results <- MAHAL.OLD
results2 <- MAHAL.PROBIT.OLD
if (is.null(results2) == T) {
if (CATCH.OFF == F) {
tryCatch(plot.data[,"MAHAL"] <- predict.MAHAL(model=results, newdata=plot.data.mahal, PROBIT=FALSE),
error= function(err) {print(paste("Mahalanobis prediction failed"))},
silent=F)
}else{
plot.data[,"MAHAL"] <- predict.MAHAL(model=results, newdata=plot.data.mahal, PROBIT=FALSE)
}
}else{
if (CATCH.OFF == F) {
tryCatch(plot.data[,"MAHAL"] <- predict.MAHAL(model=results, newdata=plot.data.mahal, PROBIT=TRUE),
error= function(err) {print(paste("Mahalanobis prediction failed"))},
silent=F)
}else{
plot.data[,"MAHAL"] <- predict.MAHAL(model=results, newdata=plot.data.mahal, PROBIT=TRUE)
}
plot.data[,"MAHAL.step1"] <- plot.data[,"MAHAL"]
plot.data[,"MAHAL"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
if (MAHAL01 > 0) {
results <- MAHAL01.OLD
if (CATCH.OFF == F) {
tryCatch(plot.data[,"MAHAL01"] <- predict.MAHAL01(model=results, newdata=plot.data.mahal, MAHAL.shape=MAHAL.shape),
error= function(err) {print(paste("transformed Mahalanobis prediction failed"))},
silent=F)
}else{
plot.data[,"MAHAL01"] <- predict.MAHAL01(model=results, newdata=plot.data.mahal, MAHAL.shape=MAHAL.shape)
}
results2 <- MAHAL.PROBIT.OLD
if (is.null(results2) == F) {
plot.data[,"MAHAL01.step1"] <- plot.data[,"MAHAL01"]
plot.data[,"MAHAL01"] <- predict.glm(object=results2, newdata=plot.data, type="response")
}
}
#
plot.data[,"ENSEMBLE"] <- ws["MAXENT"]*plot.data[,"MAXENT"] + ws["MAXNET"]*plot.data[,"MAXNET"] + ws["GBM"]*plot.data[,"GBM"] +
ws["GBMSTEP"]*plot.data[,"GBMSTEP"] + ws["RF"]*plot.data[,"RF"] + ws["CF"]*plot.data[,"CF"]
+ ws["GLM"]*plot.data[,"GLM"] +
ws["GLMSTEP"]*plot.data[,"GLMSTEP"] + ws["GAM"]*plot.data[,"GAM"] + ws["GAMSTEP"]*plot.data[,"GAMSTEP"] +
ws["MGCV"]*plot.data[,"MGCV"] + ws["MGCVFIX"]*plot.data[,"MGCVFIX"] + ws["EARTH"]*plot.data[,"EARTH"] +
ws["RPART"]*plot.data[,"RPART"] + ws["NNET"]*plot.data[,"NNET"] + ws["FDA"]*plot.data[,"FDA"] +
ws["SVM"]*plot.data[,"SVM"] + ws["SVME"]*plot.data[,"SVME"] + ws["GLMNET"]*plot.data[,"GLMNET"]
ws["BIOCLIM.O"]*plot.data[,"BIOCLIM.O"] + ws["BIOCLIM"]*plot.data[,"BIOCLIM"] +
ws["DOMAIN"]*plot.data[,"DOMAIN"] + ws["MAHAL"]*plot.data[,"MAHAL"] + ws["MAHAL01"]*plot.data[,"MAHAL01"]
#
for (i in 1:length(modelnames)) {
if (sum(plot.data[, which(names(plot.data) == modelnames[i])]) == 0) {plot.data <- plot.data[, which(names(plot.data) != modelnames[i]), drop=F]}
}
out <- list(plot.data=plot.data, TrainData=models.list$TrainData)
remove(plot.data.vars, envir=.BiodiversityR)
remove(plot.data.numvars, envir=.BiodiversityR)
remove(plot.data.domain, envir=.BiodiversityR)
remove(plot.data.mahal, envir=.BiodiversityR)
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/evaluation.strip.data.R
|
`evaluation.strip.plot` <- function(
data, TrainData=NULL,
variable.focal=NULL, model.focal=NULL, ylim=c(0, 1.25),
dev.new.width=7, dev.new.height=7, ...
)
{
if (is.null(TrainData) == F) {
TrainData <- TrainData[TrainData[, "pb"]==1, ]
TrainData[, "pb"] <- as.factor(TrainData[, "pb"])
}
modelnames <- c("MAXENT", "MAXNET", "MAXLIKE", "GBM", "GBMSTEP", "RF", "CF", "GLM", "GLMSTEP", "GAM", "GAMSTEP", "MGCV",
"MGCVFIX", "EARTH", "RPART", "NNET", "FDA", "SVM", "SVME", "GLMNET",
"BIOCLIM.O", "BIOCLIM", "DOMAIN", "MAHAL", "MAHAL01", "ENSEMBLE")
modelnames <- names(data)[which(names(data) %in% modelnames)]
if(is.null(variable.focal)==F) {
v <- which(names(data) == variable.focal)
v <- v-2
f <- data[,1]==v
vars <- max(data[,1])
# plot for all model.focals
if (is.null(model.focal) == T) {
n.models <- length(modelnames)
# model.focals with data
dim1 <- max(1, ceiling(sqrt(n.models)))
dim2 <- max(1, ceiling(n.models/dim1))
par.old <- graphics::par(no.readonly=T)
if (dev.new.width > 0 && dev.new.height > 0) {grDevices::dev.new(width=dev.new.width, height=dev.new.height)}
graphics::par(mfrow=c(dim1,dim2))
for (j in 1:n.models) {
if (is.null(TrainData)==T || is.factor(TrainData[, which(names(TrainData) == variable.focal)])==T) {
graphics::plot(data[f,v+2], data[f, 2+vars+j], main=variable.focal, xlab="", ylab=names(data)[2+vars+j], ylim=ylim, ...)
}else{
graphics::plot(data[f,v+2], data[f, 2+vars+j], main=variable.focal, xlab="", ylab=names(data)[2+vars+j], ylim=ylim, ...)
graphics::boxplot(TrainData[, which(names(TrainData) == variable.focal)] ~ TrainData[,"pb"], add=T, horizontal=T)
}
}
graphics::par(par.old)
}else{
m <- which(names(data) == model.focal)
if (dev.new.width > 0 && dev.new.height > 0) {grDevices::dev.new(width=dev.new.width, height=dev.new.height)}
if (is.null(TrainData)==T || is.factor(TrainData[, which(names(TrainData) == variable.focal)])==T) {
graphics::plot(data[f,v+2], data[f, m], main=variable.focal, xlab="", ylab=model.focal, ylim=ylim, ...)
}else{
graphics::plot(data[f,v+2], data[f, m], main=variable.focal, xlab="", ylab=model.focal, ylim=ylim, ...)
graphics::boxplot(TrainData[, which(names(TrainData) == variable.focal)] ~ TrainData[,"pb"], add=T, horizontal=T)
}
}
}
if(is.null(model.focal)==F && is.null(variable.focal)==T) {
m <- which(names(data) == model.focal)
# model.focals with data
vars <- max(data[,1])
dim1 <- max(1, ceiling(sqrt(vars)))
dim2 <- max(1, ceiling(vars/dim1))
if (dev.new.width > 0 && dev.new.height > 0) {grDevices::dev.new(width=dev.new.width, height=dev.new.height)}
par.old <- graphics::par(no.readonly=T)
graphics::par(mfrow=c(dim1,dim2))
for (i in 1:vars) {
f <- which(data[,1]==i)
if (is.null(TrainData)==T || is.factor(TrainData[, which(names(TrainData) == names(data)[i+2])])==T) {
graphics::plot(data[f,i+2], data[f, m], main=names(data)[i+2], xlab="", ylab=model.focal, ylim=ylim, ...)
}else{
graphics::plot(data[f,i+2], data[f, m], main=names(data)[i+2], xlab="", ylab=model.focal, ylim=ylim, ...)
varfocal <- names(data)[i+2]
graphics::boxplot(TrainData[, which(names(TrainData) == varfocal)] ~ TrainData[,"pb"], add=T, horizontal=T)
}
}
graphics::par(par.old)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/evaluation.strip.plot.R
|
if(.Platform$OS.type == "windows") {
`import.from.Access` <-
function(file=file.choose(), data.type="community", table=NULL, sitenames="sites", column="species", value="abundance", factor="", level="", cepnames=FALSE) {
# if (!require(RODBC)) {stop("Requires package RODBC")}
dataplace <- RODBC::odbcConnectAccess(file)
if (is.null(data.type) == TRUE) {data.type <- table}
TYPES <- c("community", "environmental", "stacked")
data.type <- match.arg(data.type, TYPES)
if (is.null(table) == TRUE) {table <- data.type}
if (data.type == "stacked") {
stackeddata <- RODBC::sqlFetch(dataplace, table)
result <- makecommunitydataset(stackeddata, row=sitenames, column=column, value=value, factor=factor, level=level)
}else{
result <- RODBC::sqlFetch(dataplace, table, rownames=sitenames)
}
close(dataplace)
rownames(result) <- make.names(rownames(result), unique=T)
if (cepnames == TRUE) {
colnames(result) <- make.cepnames(colnames(result))
}else{
colnames(result) <- make.names(colnames(result), unique=T)
}
return(result)
}
`import.from.Access2007` <-
function(file=file.choose(), data.type="community", table=NULL, sitenames="sites", column="species", value="abundance", factor="", level="", cepnames=FALSE) {
# if (!require(RODBC)) {stop("Requires package RODBC")}
dataplace <- RODBC::odbcConnectAccess2007(file)
if (is.null(data.type) == TRUE) {data.type <- table}
TYPES <- c("community", "environmental", "stacked")
data.type <- match.arg(data.type, TYPES)
if (is.null(table) == TRUE) {table <- data.type}
if (data.type == "stacked") {
stackeddata <- RODBC::sqlFetch(dataplace, table)
result <- makecommunitydataset(stackeddata, row=sitenames, column=column, value=value, factor=factor, level=level)
}else{
result <- RODBC::sqlFetch(dataplace, table, rownames=sitenames)
}
close(dataplace)
rownames(result) <- make.names(rownames(result), unique=T)
if (cepnames == TRUE) {
colnames(result) <- make.cepnames(colnames(result))
}else{
colnames(result) <- make.names(colnames(result), unique=T)
}
return(result)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/import.from.Access.R
|
if(.Platform$OS.type == "windows") {
`import.from.Excel` <-
function(file=file.choose(), data.type="community", sheet=NULL, sitenames="sites",
column="species", value="abundance", factor="", level="", cepnames=FALSE,
write.csv=FALSE, csv.file=paste(data.type, ".csv", sep="") )
{
# if (!require(RODBC)) {stop("Requires package RODBC")}
dataplace <- RODBC::odbcConnectExcel(file)
if (is.null(data.type) == TRUE) {data.type <- sheet}
TYPES <- c("community", "environmental", "stacked")
data.type <- match.arg(data.type, TYPES)
if (is.null(sheet) == TRUE) {sheet <- data.type}
if (data.type == "stacked") {
stackeddata <- RODBC::sqlFetch(dataplace,sheet)
result <- makecommunitydataset(stackeddata,row=sitenames,column=column,value=value,factor=factor,level=level)
data.type <- "community"
}else{
result <- RODBC::sqlFetch(dataplace,sheet,rownames=sitenames)
}
close(dataplace)
rownames(result) <- make.names(rownames(result),unique=T)
if (cepnames == TRUE && data.type == "community") {
colnames(result) <- make.cepnames(colnames(result))
}else{
colnames(result) <- make.names(colnames(result),unique=T)
}
if (write.csv == TRUE) {utils::write.table(x=result, file=csv.file, row.names=T, col.names=T, sep=',')}
return(result)
}
`import.from.Excel2007` <-
function(file=file.choose(), data.type="community", sheet=NULL, sitenames="sites",
column="species", value="abundance", factor="", level="", cepnames=FALSE,
write.csv=FALSE, csv.file=paste(data.type, ".csv", sep="") )
{
# if (!require(RODBC)) {stop("Requires package RODBC")}
dataplace <- RODBC::odbcConnectExcel2007(file)
if (is.null(data.type) == TRUE) {data.type <- sheet}
TYPES <- c("community", "environmental", "stacked")
data.type <- match.arg(data.type, TYPES)
if (is.null(sheet) == TRUE) {sheet <- data.type}
if (data.type == "stacked") {
stackeddata <- RODBC::sqlFetch(dataplace, sheet)
result <- makecommunitydataset(stackeddata, row=sitenames, column=column, value=value, factor=factor, level=level)
}else{
result <- RODBC::sqlFetch(dataplace,sheet,rownames=sitenames)
}
close(dataplace)
rownames(result) <- make.names(rownames(result), unique=T)
if (cepnames == TRUE && data.type == "community") {
colnames(result) <- make.cepnames(colnames(result))
}else{
colnames(result) <- make.names(colnames(result), unique=T)
}
if (write.csv == TRUE) {utils::write.table(x=result, file=csv.file, row.names=T, col.names=T, sep=',')}
return(result)
}
}
`import.with.readxl` <-
function(file=file.choose(), data.type="community", sheet=NULL, sitenames="sites",
column="species", value="abundance", factor="", level="", cepnames=FALSE,
write.csv=FALSE, csv.file=paste(data.type, ".csv", sep="") )
{
if (is.null(data.type) == TRUE) {data.type <- sheet}
TYPES <- c("community", "environmental", "stacked")
data.type <- match.arg(data.type, TYPES)
if (is.null(sheet) == TRUE) {sheet <- data.type}
if (data.type == "stacked") {
stackeddata <- readxl::read_excel(file, sheet=sheet)
stackeddata <- as.data.frame(stackeddata)
result <- makecommunitydataset(stackeddata, row=sitenames, column=column, value=value, factor=factor, level=level)
}else{
result <- readxl::read_excel(file, sheet=sheet)
result <- as.data.frame(result)
rownames(result) <- result[, sitenames]
result <- result[, which(names(result) != sitenames)]
}
rownames(result) <- make.names(rownames(result), unique=T)
if (cepnames == TRUE && data.type == "community") {
colnames(result) <- make.cepnames(colnames(result))
}else{
colnames(result) <- make.names(colnames(result), unique=T)
}
if (data.type == "environmental") {
for (i in 1:ncol(result)) {
if (is.character(result[, i])) {result[, i] <- as.factor(result[, i])}
}
}
if (write.csv == TRUE) {utils::write.table(x=result, file=csv.file, row.names=T, col.names=T, sep=',')}
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/import.from.Excel.R
|
`importancevalue` <-
function(x, site="plotID", species="species",
count="count", basal="basal",
factor="forest", level="")
{
if (any(names(x) == site) == F) {stop ("site variable not defined")}
if (any(names(x) == species) == F) {stop ("species variable not defined")}
if (any(names(x) == count) == F) {stop ("count variable not defined")}
if (any(names(x) == basal) == F) {stop ("basal area variable not defined")}
if (factor != "") {
if (any(levels(droplevels(factor(x[, factor]))) == level) == F) {stop ("specified level not among factor levels")}
subs <- x[, factor]==level
x <- x[subs, ,drop=F]
}
species.names <- levels(droplevels(factor(x[, species])))
p <- length(species.names)
result <- array(dim=c(p, 7))
colnames(result) <- c("frequency", "density", "dominance", "frequency.percent", "density.percent", "dominance.percent", "importance.value")
rownames(result) <- species.names
total.plots <- sum(table(x[, site]) > 0)
for (j in 1:p) {
subs <- x[, species] == rownames(result)[j]
spec.data <- x[subs, , drop=F]
spec.data <- data.frame(spec.data)
result[j, "frequency"] <- sum(table(spec.data[, site]) > 0) / total.plots
result[j, "density"] <- sum(spec.data[, count])
result[j, "dominance"] <- sum(spec.data[, basal])
}
total.freq <- sum(result[, "frequency"])
total.density <- sum(x[, count])
total.dominance <- sum(x[, basal])
for (j in 1:p) {
result[j, "frequency.percent"] <- result[j, "frequency"] / total.freq * 100
result[j, "density.percent"] <- result[j, "density"] / total.density * 100
result[j, "dominance.percent"] <- result[j, "dominance"] / total.dominance * 100
result[j, "importance.value"] <- sum(result[j, c("frequency.percent", "density.percent", "dominance.percent")])
}
result <- result[order(result[, "importance.value"], decreasing=T),]
return(result)
}
`importancevalue.comp` <-
function(x, site="plotID", species="species",
count="count", basal="basal",
factor="forest")
{
groups <- table(x[, factor])
m <- length(groups)
levels <- names(groups)
result <- list(values=levels)
for (i in 1:m) {
resultx <- importancevalue(x=x, site=site, species=species, count=count, basal=basal,
factor=factor, level=levels[i])
result[[levels[i]]] <- resultx
}
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/importancevalue.R
|
`loaded.citations` <-
function() {
loaded.packages <- (.packages())
cat("Loaded packages: \n")
cat(loaded.packages, "\n")
cat("\n Citations: \n")
standard.packages <- c("datasets", "grDevices", "graphics", "grid", "methods", "splines",
"stats", "stats4", "tcltk", "tools", "utils")
non.standard <- as.logical(rep("T", l=length(loaded.packages)) )
for (i in 1:length(non.standard)) {
if (any (loaded.packages[i] == standard.packages)) {non.standard[i] <- F}
}
loaded.packages <- loaded.packages[non.standard]
for (i in 1:length(loaded.packages)) {print(utils::citation(loaded.packages[i]))}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/loaded.citations.R
|
`makecommunitydataset` <-
function(x, row, column, value, factor="", level="", drop=F)
{
x[, row] <- as.factor(x[, row])
x[, column] <- as.factor(x[, column])
if(factor != "") {
subs <- x[, factor]==level
for (q in 1:length(subs)) {
if(is.na(subs[q])) {subs[q]<-F}
}
x <- x[subs, , drop=F]
}
x[, row] <- x[, row][drop=drop]
x[, column] <- x[, column][drop=T]
result <- table(x[, row], x[, column])
rows <- rownames(result)
r <- length(rows)
cols <- colnames(result)
c <- length(cols)
result2 <- array(dim=c(r, c))
for (i in 1:r){
sub1 <- x[,row]==rows[i]
subset <- x[sub1, ]
for (j in 1:c) {
sub2 <- subset[, column]==cols[j]
subset2 <- subset[sub2, ]
result2[i,j] <- sum(subset2[, value])
}
}
rownames(result2) <- make.names(rownames(result), unique=T)
colnames(result2) <- make.names(colnames(result), unique=T)
result2 <- as.data.frame(result2)
return(result2)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/makecommunitydataset.R
|
`multiconstrained` <-
function (method ="capscale", formula, data, distance = "bray", comm = NULL, add = FALSE, multicomp="", contrast=0,...) {
METHODS <- c("rda", "cca", "capscale", "dbrda")
method <- match.arg(method, METHODS)
commun <- eval(as.name((all.vars(formula)[1])))
if (inherits(commun, "dist")) {
if (method=="rda" || method=="cca") {stop("analysis not possible for distance matrix")}
wasdist <- T
commun <- data.frame(as.matrix(commun))
}else{
wasdist <- F
}
if(multicomp=="") {multicomp <- all.vars(formula)[2]}
levels <- names(table(data[,multicomp]))
l <- length(levels)
pairs <- utils::combn(l,2)
p <- ncol(pairs)
df <- chi <- Fval <- nperm <- Pval <- numeric(p)
result <- data.frame(df,chi,Fval,Pval)
for (i in 1:p) {
level1 <- levels[pairs[1,i]]
level2 <- levels[pairs[2,i]]
subs <- (data[, multicomp] == level1) | (data[,multicomp] == level2)
for (q in 1:length(subs)) {
if (is.na(subs[q])) {
subs[q] <- F
}
}
if (wasdist == F) {
comm1 <- commun[subs,,drop=F]
}else{
comm1 <- commun[subs,subs,drop=F]
}
data1 <- data[subs,,drop=F]
for (j in 1:ncol(data1)) {
if (is.factor(data1[,j])) {data1[,j] <- factor(data1[,j][drop=T])}
}
if (wasdist == F) {
freq <- apply(comm1, 2, sum)
subs <- freq > 0
comm1 <- comm1[, subs, drop = F]
}else{
comm1 <- as.dist(comm1)
}
newenvdata <- data1
newcommunity <- comm1
.BiodiversityR <- new.env()
assign("newenvdata",data1,envir=.BiodiversityR)
assign("newcommunity",comm1,envir=.BiodiversityR)
formula1 <- update(formula, newcommunity ~ .)
environment(formula1) <- .BiodiversityR
if (method == "rda") {ordinationresult <- rda(formula1, data=newenvdata)}
if (method == "cca") {ordinationresult <- cca(formula1, data=newenvdata)}
if (method == "capscale") {ordinationresult <- capscale(formula1, data=newenvdata, distance=distance, add=add)}
if (method == "dbrda") {ordinationresult <- dbrda(formula1, data=newenvdata, distance=distance, add=add)}
if (contrast==i) {
comm1 <- data.frame(as.matrix(comm1))
cat("Multiple comparisons for", method, "for", multicomp, "\n")
if (method=="capscale" || method=="dbrda") {
if (wasdist == T) {cat("Analysis done with distance matrix and add=", add, "\n")
}else{cat("Analysis done with", distance, "distance and add=", add, "\n")}
}
cat("Contrast: ", level1, "vs. ", level2, "\n")
return(ordinationresult)
}
anovaresult <- anova.cca(ordinationresult, ...)
result[i,] <- anovaresult[1,]
rownames(result)[i] <- paste(level1, "vs.", level2)
}
remove("newenvdata",envir=.BiodiversityR)
remove("newcommunity",envir=.BiodiversityR)
colnames(result) <- c("Df", "SumOfSqs", "F", "Pr(>F)")
head <- paste("Multiple comparisons for", method, "for all contrasts of", multicomp, "\n")
mod <- paste("Model: ", c(match.call()), "\n")
structure(result, heading = c(head,mod), Random.seed = NULL,
class = c("anova.cca", "anova", "data.frame"))
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/multiconstrained.R
|
`nested.anova.dbrda` <-
function (formula, data, method = "euc", add = FALSE,
permutations = 100,
warnings = FALSE)
{
randomize = function(data, toplev, lowlev) {
newdata <- data
orig.levs <- levels(droplevels(data[, lowlev]))
nl <- length(orig.levs)
new.levs <- orig.levs[sample(nl)]
for (i in 1:nl) {
subs1 <- data[, lowlev] == orig.levs[i]
subs2 <- data[, lowlev] == new.levs[i]
newtops <- data[subs2, toplev]
newtops <- newtops[1]
newtops <- rep(newtops, sum(subs1))
newdata[subs1, toplev] <- newtops
}
return(newdata)
}
randomize2 = function(data, strata) {
newdata <- data
orig.levs <- levels(droplevels(data[, strata]))
nl <- length(orig.levs)
for (i in 1:nl) {
subs <- data[, strata] == orig.levs[i]
nsub <- sum(subs == T)
subdata <- data[subs, ]
newdata[subs, ] <- subdata[sample(nsub), ]
}
return(newdata)
}
ow <- options("warn")
if (warnings == FALSE) {
options(warn = 0)
}
formula <- as.formula(formula)
if (length(all.vars(formula)) > 3)
stop(paste("function only works with one main and one nested factor"))
x <- eval(as.name((all.vars(formula)[1])))
if (inherits(x, "dist")) {
distmatrix <- as.matrix(x)
}
else {
distmatrix <- as.matrix(vegdist(x, method = method))
}
SStot <- sum(distmatrix^2)/(2 * nrow(distmatrix))
cat("Total sum of squares of distance matrix:", SStot, "\n")
resp <- all.vars(formula)[1]
toplev <- all.vars(formula)[2]
lowlev <- all.vars(formula)[3]
.BiodiversityR <- new.env()
environment(formula) <- .BiodiversityR
data1 <- data
assign("data1", data, envir=.BiodiversityR)
assign("data1", data1, envir=.BiodiversityR)
METHODS <- c("manhattan", "euclidean", "canberra", "bray",
"kulczynski", "gower", "morisita", "horn", "mountford",
"jaccard", "raup", "binomial", "chao")
methodid <- pmatch(method, METHODS)
method <- METHODS[methodid]
model <- capscale(formula, data1, distance=method, add=add)
# remember the data
model$call$data <- data1
#
anovares <- anova(model, perm = 2, by="terms")
anovadat <- data.frame(anovares)
adjust <- nrow(model$CCA$u) - 1
if (pmatch("mean", model$inertia, nomatch = -1) > 0) {
anovadat[, 2] <- anovadat[, 2] * adjust
model$tot.chi <- model$tot.chi * adjust
}else{
anovadat[, 2] <- anovadat[, 2] / adjust
model$tot.chi <- model$tot.chi / adjust
}
df1 <- anovadat[1, 1]
df2 <- anovadat[2, 1]
anovadat[3, 1] <- df3 <- nrow(distmatrix) - df1 - df2 - 1
anovadat[2, 3] <- anovadat[2, 2]/df3
formula1 <- as.formula(paste(resp, "~", lowlev, "+Condition(",
toplev, ")"))
environment(formula1) <- .BiodiversityR
model1 <- capscale(formula1, data = data1, distance = method,
add = add)
Ftop <- (model1$pCCA$tot.chi/df1)/(model1$CCA$tot.chi/df2)
anovadat[1, 3] <- Ftop
counter <- 1
for (i in 1:permutations) {
data2 <- randomize(data, toplev, lowlev)
assign("data2", data2, envir=.BiodiversityR)
Ordinationperm <- capscale(formula1, data = data2,
distance = method, add = add)
randomF <- (Ordinationperm$pCCA$tot.chi/df1)/(Ordinationperm$CCA$tot.chi/df2)
if (randomF >= Ftop) {
counter <- counter + 1
}
}
signi <- counter/(permutations + 1)
anovadat[1, 4] <- anovadat[2, 4] <- permutations
anovadat[1, 5] <- signi
Flow <- (model1$CCA$tot.chi/df2)/(model1$CA$tot.chi/df3)
anovadat[2, 3] <- Flow
counter <- 1
for (i in 1:permutations) {
data2 <- randomize2(data, toplev)
assign("data2", data2, envir=.BiodiversityR)
Ordinationperm <- capscale(formula1, data = data2,
distance = method, add = add)
randomF <- (Ordinationperm$CCA$tot.chi/df2)/(Ordinationperm$CA$tot.chi/df3)
if (randomF >= Flow) {
counter <- counter + 1
}
}
remove("data1", envir=.BiodiversityR)
remove("data2", envir=.BiodiversityR)
signi <- counter/(permutations + 1)
anovadat[2, 5] <- signi
colnames(anovadat) <- c("Df", "SumsOfSquares", "F", "N.Perm", "Pr(>F)")
mod <- paste("Nested anova for", lowlev, "nested within",
toplev, "\n")
head <- paste("Total sum of squares of distance-based redundancy analysis:",
c(model$tot.chi), "\n")
options(ow)
structure(anovadat, heading = c(head, mod), Random.seed = NA,
class = c("anova.cca", "anova", "data.frame"))
}
# test
# model.test <- nested.anova.dbrda(warcom~rift.valley+popshort, data=warenv, method="jac", permutations=5)
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/nested.anova.dbrda.R
|
`nested.npmanova` <-
function(formula, data, method="euc", permutations=100,
warnings=FALSE)
{
randomize=function(data,toplev,lowlev){
newdata <- data
orig.levs <- levels(droplevels(data[,lowlev]))
nl <- length(orig.levs)
new.levs <- orig.levs[sample(nl)]
for (i in 1:nl) {
subs1 <- data[, lowlev] == orig.levs[i]
subs2 <- data[, lowlev] == new.levs[i]
newtops <- data[subs2,toplev]
newtops <- newtops[1]
newtops <- rep(newtops, sum(subs1))
newdata[subs1,toplev] <- newtops
}
return(newdata)
}
randomize2=function(data,strata){
newdata <- data
orig.levs <- levels(droplevels(data[,strata]))
nl <- length(orig.levs)
for (i in 1:nl) {
subs <- data[, strata] == orig.levs[i]
nsub <- sum(subs==T)
subdata <- data[subs,]
newdata[subs,] <- subdata[sample(nsub),]
}
return(newdata)
}
ow <- options("warn")
if (warnings==FALSE) {options(warn=0)}
formula <- as.formula(formula)
.BiodiversityR <- new.env()
environment(formula) <- .BiodiversityR
if (length(all.vars(formula)) > 3)
stop(paste("function only works with one main and one nested factor"))
x <- eval(as.name((all.vars(formula)[1])))
if (inherits(x, "dist")) {
distmatrix <- as.matrix(x)
}else{
distmatrix <- as.matrix(vegdist(x, method = method))
}
SStot <- sum(distmatrix^2)/(2*nrow(distmatrix))
cat("Total sum of squares of distance matrix:", SStot, "\n")
resp <- all.vars(formula)[1]
toplev <- all.vars(formula)[2]
lowlev <- all.vars(formula)[3]
data1 <- data
assign("data1", data1, envir=.BiodiversityR)
# modified August 2022
adonis1 <- adonis2(formula, data1, permutations=2, method=method)
# modified August 2022 to work with adonis2
# adonis1 <- data.frame(adonis1$aov.tab)
adonis1 <- data.frame(adonis1)
# modified August 2022, column with R2 was removed from original results
# anovadat <- adonis1[1:3, -5]
anovadat <- adonis1[1:3, ]
df1 <- anovadat[1, 1]
df2 <- anovadat[2, 1]
df3 <- nrow(distmatrix)-df1-df2-1
sstop <- anovadat[1, 2]
sslow <- anovadat[2, 2]
ssres <- anovadat[3, 2]
vartot <- adonis1[4, 2]
# new F calculations in column 3
Ftop <- anovadat[1, 3] <- (sstop/df1)/(sslow/df2)
Flow <- anovadat[2, 3] <- (sslow/df2)/(ssres/df3)
anovadat[3, 3] <- NA
counter <- 1
for (i in 1:permutations) {
data2 <- randomize(data, toplev, lowlev)
assign("data2", data2, envir=.BiodiversityR)
# modified August 2022
# adonis2r <- adonis(formula, data=data2, method=method, permutations=2)
# adonis2r <- data.frame(adonis2r$aov.tab)
adonis2r <- adonis2(formula, data=data2, method=method, permutations=2)
adonis2r <- data.frame(adonis2r)
Frand <- (adonis2r[1,2]/df1)/(adonis2r[2,2]/df2)
if (Frand >= Ftop) {counter <- counter+1}
}
signi <- counter/(permutations+1)
# new permutations in column 4
anovadat[1,4] <- anovadat[2,4] <- permutations
# significance in column 5
anovadat[1,5] <- signi
counter <- 1
for (i in 1:permutations) {
data2 <- randomize2(data, toplev)
assign("data2", data2, envir=.BiodiversityR)
# modified August 2022
# adonis2r <- adonis(formula, data=data2, method=method, permutations=2)
# adonis2r <- data.frame(adonis2r$aov.tab)
adonis2r <- adonis2(formula, data=data2, method=method, permutations=2)
adonis2r <- data.frame(adonis2r)
Frand <- (adonis2r[2,2]/df2)/(adonis2r[3,2]/df3)
if (Frand >= Flow) {counter <- counter+1}
}
remove("data1", envir=.BiodiversityR)
remove("data2", envir=.BiodiversityR)
signi <- counter/(permutations+1)
anovadat[2,5] <- signi
colnames(anovadat) <- c("Df", "SumsofSquares", "F", "N.Perm", "Pr(>F)")
mod <- paste("Nested anova for", lowlev, "nested within", toplev, "\n")
head <- paste("Total sum of squares for non-parametric manova:", vartot, "\n")
options(ow)
structure(anovadat, heading = c(head, mod), Random.seed = NA,
class = c("anova.cca", "anova", "data.frame"))
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/nested.npmanova.R
|
`nnetrandom` <-
function(formula,data,tries=10,leave.one.out=F,...){
nnet.1 <- function(formula,data,tries,...) {
optimal <- Inf
values <- numeric(length=tries)
for (i in 1:tries) {
temp.result <- nnet::nnet(formula=formula,...)
values[i] <- temp.result$value
if (temp.result$value < optimal) {
final.result <- temp.result
optimal <- temp.result$value
}
}
final.result$range <- summary(values)
return(final.result)
}
result <- nnet.1(formula=formula,data=data,tries=tries,...)
result$tries <- tries
if (leave.one.out==T) {
predictresult <- character(length=nrow(data))
respvar <- all.vars(formula)[1]
for (i in 1:nrow(data)) {
data1 <- data[-i,]
result1 <- nnet.1(formula=formula,data=data1,tries=tries,...)
predictresult[i] <- predict(result1,data=data,type="class")[i]
}
result$CV <- predictresult
result$successful <- predictresult==data[,respvar]
}
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/nnetrandom.R
|
`ordibubble` <-
function(ordiplot,var,...) {
y2 <- abs(var)
ordiscores <- scores(ordiplot, display="sites")
for (i in 1:length(var)) {
if (var[i] < 0) {
var[i] <- NA
}else{
y2[i] <- NA
}
}
if (sum(var,na.rm=T) > 0) {graphics::symbols(ordiscores[,1], ordiscores[,2], circles=var, add=T,...)}
if (sum(y2,na.rm=T) > 0) {graphics::symbols(ordiscores[,1], ordiscores[,2], squares=y2, add=T,...)}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ordibubble.R
|
`ordicluster2` <-
function(ordiplot,cluster,mingroups=1,maxgroups=nrow(ordiplot$sites),...){
mingroups <- max(1,mingroups)
maxgroups <- min(maxgroups,nrow(ordiplot$sites))
n <- nrow(ordiplot$sites)
for (i in mingroups:maxgroups) {
groups <- cutree(cluster,k=i)
ordihull(ordiplot,groups,...)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ordicluster2.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.