content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Data of wave
#'
#' Data including environmental measurements around Bordeaux from CANDHIS database and data from InfoClimat website.
#' CANDHIS national in situ wave measurement database.
#' The measurements were carried out within the framework of a collaboration between the Grand Port Maritime de Nantes St-Nazaire, the École Centrale de Nantes and CEREMA. In addition, data from the InfoClimat site over the same period are used.
#'
#' @usage data(wave)
#'
#' @name wave
#' @docType data
#'
#' @format A data.frame with 453 observations (rows) and 25 variables with explicit names. The first variables are:
#' \describe{
#' \item{date}{date of measure}
#' \item{temperature}{temperature}
#' \item{pression}{pressure}
#' \item{humidite_relative}{Humidity}
#' \item{point2rose}{Dew point temperature}
#' \item{visibilite_horiz}{visibility}
#' \item{vent_cite_moy}{wind speed average}
#' \item{vent_vit_rafale}{maximum of wind speed}
#' \item{precipitation_cum}{cumulative rainfall by day}
#' }
#'
#'
#' @references InfoClimat (https://www.infoclimat.fr/climatologie/stations_principales.php?)
#'
#' @examples
#' data(wave)
NULL
|
/scratch/gouwar.j/cran-all/cranData/BeQut/R/wave.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
bess_lm <- function(X, y, T0, max_steps, beta0) {
.Call(`_BeSS_bess_lm`, X, y, T0, max_steps, beta0)
}
get_A <- function(X, y, beta, coef0, T0, B, weights) {
.Call(`_BeSS_get_A`, X, y, beta, coef0, T0, B, weights)
}
getcox_A <- function(X, beta, T0, B, status, weights) {
.Call(`_BeSS_getcox_A`, X, beta, T0, B, status, weights)
}
EigenR <- function(X) {
.Call(`_BeSS_EigenR`, X)
}
gbess_lm <- function(X, y, G, index, PhiG, invPhiG, T0, max_steps, beta0, n, p, N) {
.Call(`_BeSS_gbess_lm`, X, y, G, index, PhiG, invPhiG, T0, max_steps, beta0, n, p, N)
}
gget_A <- function(X, y, G, index, T0, beta0, coef0, n, p, N, weights, B00) {
.Call(`_BeSS_gget_A`, X, y, G, index, T0, beta0, coef0, n, p, N, weights, B00)
}
ggetcox_A <- function(X, G, index, T0, beta0, n, p, N, status, weights, B00) {
.Call(`_BeSS_ggetcox_A`, X, G, index, T0, beta0, n, p, N, status, weights, B00)
}
# Register entry points for exported C++ functions
methods::setLoadAction(function(ns) {
.Call(`_BeSS_RcppExport_registerCCallable`)
})
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/RcppExports.R
|
aic=function(object,...){
AIC=object$AIC
return(AIC)
}
bic=function(object,...){
BIC=object$BIC
return(BIC)
}
ebic=function(object,...){
EBIC=object$BIC
return(EBIC)
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/aic.R
|
bess = function(x, y, family = c("gaussian", "binomial", "cox"),
method = "gsection", s.min = 1,
s.max,
s.list,
K.max = 20,
max.steps = 15,
glm.max = 1e6,
cox.max = 20,
factor = NULL,
epsilon = 1e-4,
weights=rep(1,nrow(x)))
{
family <- match.arg(family)
if(ncol(x)==1|is.vector(x)) stop("x should be two columns at least!")
if(missing(family)) stop("Please input family!")
if(!is.null(factor)) method = "sequential"
if(family=="binomial")
{
if(is.factor(y)){
y = as.character(y)
}
if(length(unique(y))!=2) stop("Please input binary variable!")else
if(setequal(y_names<-unique(y),c(0,1))==FALSE)
{
y[which(y==unique(y)[1])]=0
y[which(y==unique(y)[2])]=1
y=as.numeric(y)
}
}
if(family=="cox")
{
if(!is.matrix(y)) y=as.matrix(y)
if(ncol(y)!=2) stop("Please input y with two columns!")
}
if(is.vector(y))
{
if(nrow(x)!=length(y)) stop("Rows of x must be the same as length of y!")
}else{
if(nrow(x)!=nrow(y)) stop("Rows of x must be the same as rows of y!")
}
if(missing(s.max)) s.max=min(ncol(x),round(nrow(x)/log(nrow(x))))
weights = weights/mean(weights)
beta0=rep(0,ncol(x))
if(!is.null(factor)){
if(is.null(colnames(x))) colnames(x) = paste0("X",1:ncol(x),"g")
if(!is.data.frame(x)) x = as.data.frame(x)
x[,factor] = apply(x[,factor,drop=FALSE], 2, function(x){
x = as.factor(x)
})
group = rep(1, ncol(x))
names(group) = colnames(x)
group[factor] = apply(x[,factor,drop=FALSE], 2,function(x) {length(unique(x))})-1
Gi = rep(1:ncol(x), times = group)
beta0 = rep(beta0, times = group)
x = model.matrix(~., data = x)[,-1]
}
#normalize x
x=as.matrix(x)
xs=x
nm = dim(x)
n = nm[1]
p = nm[2]
one = rep(1, n)
vn = dimnames(x)[[2]]
meanx = drop(weights %*% x)/n
x = scale(x, meanx, FALSE)
normx = sqrt(drop(weights %*% (x^2)))
nosignal = normx/sqrt(n) < .Machine$double.eps
if (any(nosignal)) normx[nosignal] = (.Machine$double.eps) * sqrt(n)
names(normx) = vn
x = sqrt(n)*scale(x, FALSE, normx)
if(method=="sequential"&missing(s.list)) s.list=1:min(ncol(x),round(nrow(x)/log(nrow(x))))
if(family=="gaussian")
{
ys = y
mu = mean(y*weights)
y = drop(y - mu)
#initial beta
gc()
if(method=="gsection")
{
k = 1
sL=s.min
sR=s.max
beta0R=beta0
beta0L=beta0
fit_L1=bess.lm(x,y,
beta0=beta0L,
s=sL,
weights=weights,
max.steps=max.steps)
nullmse=fit_L1$nullmse
fit_L=fit_L1
fit_R=bess.lm(x,y,
beta0=beta0R,
s=sR,
weights=weights,
max.steps=max.steps)
beta.fit=cbind(fit_L$beta,fit_R$beta)
mse=c(fit_L$mse,fit_R$mse)
lambda=c(fit_L$lambda,fit_R$lambda)
aic=c(fit_L$AIC,fit_R$AIC)
bic=c(fit_L$BIC,fit_R$BIC)
ebic=c(fit_L$EBIC,fit_R$EBIC)
beta0M=fit_R$beta
s.list=c(sL,sR)
while(k<=K.max)
{
sM <- round(sL + (sR-sL)*0.618)
s.list=c(s.list,sM)
fit_M=bess.lm(x,y,
beta0=beta0M,
s=sM,
weights=weights,
max.steps=max.steps)
cat(k,"-th iteration s.left:",sL," s.split:",sM," s.right:",sR,"\n",sep="")
beta0M=fit_M$beta
beta.fit=cbind(beta.fit,beta0M)
mse=c(mse, fit_M$mse)
lambda=c(lambda, fit_M$lambda)
aic=c(aic, fit_M$AIC)
bic=c(bic, fit_M$BIC)
ebic=c(ebic, fit_M$EBIC)
if(abs(fit_L$mse-fit_M$mse)/abs(nullmse*(sM-sL)) > epsilon &
abs(fit_R$mse-fit_M$mse)/abs(nullmse*(sM-sR)) < epsilon)
{
sR <- sM
fit_R=fit_M
}else if(abs(fit_L$mse-fit_M$mse)/abs(nullmse) > epsilon &
abs(fit_R$mse-fit_M$mse)/abs(nullmse) > epsilon)
{
sL <- sM
fit_L=fit_M
}else
{
sR=sM
fit_R=fit_M
sL=s.min
fit_L=fit_L1
}
if(sR-sL==1) break
fit_ML=bess.lm(x,y,
beta0=beta0M,
s=sM,
weights=weights,
max.steps=max.steps)
fit_MR=bess.lm(x,y,
beta0=beta0M,
s=sM,
weights=weights,
max.steps=max.steps)
if(abs(fit_ML$mse-fit_M$mse)/abs(fit_M$mse) > epsilon &
abs(fit_MR$mse-fit_M$mse)/abs(fit_M$mse) < epsilon)
{break}
k=k+1
}
}
if(method=="sequential")
{
nullmse=sum(weights*y^2)/n
#cat(nullmse,"\\n")
mse=vector()
lambda=vector()
aic=vector()
bic=vector()
ebic=vector()
for(k in 1:length(s.list))
{
#cat("select",s.list[k],"variables","\\n")
if(is.null(factor)){
if(k == 1){
fit=bess.lm(x,y,s=s.list[k],weights=weights,
max.steps=max.steps,beta0=beta0)
beta.fit = matrix(fit$beta)
}else{
fit=bess.lm(x,y,s=s.list[k],weights=weights,
max.steps=max.steps,beta0=beta.fit[,k-1,drop=TRUE])
beta.fit=cbind(beta.fit,fit$beta)
}
}else{
if(k == 1){
fit=gbess.lm(x,y,s=s.list[k],weights=weights,Gi=Gi,
max.steps=max.steps,beta0=beta0)
beta.fit = matrix(fit$beta)
s.list[k] = fit$gr_size
}else{
fit=gbess.lm(x,y,s=s.list[k],weights=weights,Gi=Gi,
max.steps=max.steps,beta0=beta.fit[,k-1,drop=TRUE])
beta.fit=cbind(beta.fit,fit$beta)
s.list[k] = fit$gr_size
}
}
#mse
mse[k]=fit$mse
#lambda
lambda[k]=fit$lambda
aic[k]=fit$AIC
bic[k]=fit$BIC
ebic[k]=fit$EBIC
}
}
beta.fit=sqrt(n)*(beta.fit)/normx
colnames(beta.fit) = s.list
rownames(beta.fit) = vn
coef0=mu-drop(t(beta.fit)%*%meanx)
names(coef0)=s.list
xbest=xs[,which(beta.fit[,ncol(beta.fit),drop=TRUE]!=0)]
bestmodel=lm(ys~xbest, weights=weights)
out=list(family="bess_gaussian",method=method,beta=beta.fit,coef0=coef0,
s.list=s.list,meanx=meanx,normx=normx,meany=mu,nsample=n,bestmodel=bestmodel,
mse=mse,nullmse=nullmse,AIC=aic,BIC=bic,EBIC=ebic,lambda=lambda,max.steps=max.steps,
factor=factor)
class(out)="bess"
return(out)
}
if(family=="binomial")
{
beta0=rep(0,ncol(x))
intercept=0
gc()
if(method=="gsection")
{
k = 1
sL=s.min
sR=s.max
beta0R=beta0
beta0L=beta0
coef0L=intercept
coef0R=intercept
fit_L1=bess.glm(x=x,y=y,
beta0=beta0L,
intercept=coef0L,
s=sL,
glm.max=glm.max,
max.steps=max.steps,
weights=weights)
nulldev=fit_L1$nulldeviance
fit_L=fit_L1
fit_R=bess.glm( x=x,y=y,
beta0=beta0R,
intercept=coef0R,
s=sR,
glm.max=glm.max,
max.steps=max.steps,
weights=weights)
beta.fit=cbind(fit_L$beta,fit_R$beta)
coef0.fit=c(fit_L$coef0,fit_R$coef0)
dev=c(fit_L$deviance,fit_R$deviance)
lambda=c(fit_L$lambda,fit_R$lambda)
beta0M=fit_R$beta
coef0M=fit_R$coef0
s.list=c(sL,sR)
aic=c(fit_L$AIC,fit_R$AIC)
bic=c(fit_L$BIC,fit_R$BIC)
ebic=c(fit_L$EBIC,fit_R$EBIC)
while(k<=K.max)
{
sM <- round(sL + (sR-sL)*0.618)
s.list=c(s.list,sM)
fit_M=bess.glm(x=x,y=y,
beta0=beta0M,
intercept=coef0M,
s=sM,
glm.max=glm.max,
max.steps=max.steps,
weights=weights)
cat(k,"-th iteration s.left:",sL," s.split:",sM," s.right:",sR,"\\n",sep="")
beta0M=fit_M$beta
beta.fit=cbind(beta.fit,beta0M)
coef0M=fit_M$coef0
coef0.fit=c(coef0.fit,coef0M)
dev=c(dev,fit_M$deviance)
lambda=c(lambda,fit_M$lambda)
aic=c(aic, fit_M$AIC)
bic=c(bic, fit_M$BIC)
ebic=c(ebic, fit_M$EBIC)
if(abs(fit_L$deviance-fit_M$deviance)/abs(nulldev*(sM-sL)) > epsilon &
abs(fit_R$deviance-fit_M$deviance)/abs(nulldev*(sM-sR)) < epsilon)
{
sR <- sM
fit_R=fit_M
}else if(abs(fit_L$deviance-fit_M$deviance)/abs(nulldev*(sM-sL)) > epsilon &
abs(fit_R$deviance-fit_M$deviance)/abs(nulldev*(sM-sR)) > epsilon)
{
sL <- sM
fit_L=fit_M
}else
{
sR=sM
fit_R=fit_M
sL=s.min
fit_L=fit_L1
}
if(sR-sL==1) break
fit_ML=bess.glm(x=x,y=y,
beta0=beta0M,
intercept=coef0M,
s=sM,
glm.max=glm.max,
max.steps=max.steps,
weights=weights)
fit_MR=bess.glm(x=x,y=y,
beta0=beta0M,
intercept=coef0M,
s=sM,
glm.max=glm.max,
max.steps=max.steps,
weights=weights)
#if(abs(fit_ML$deviance-fit_M$deviance)/abs(fit_M$deviance) > epsilon &
# abs(fit_MR$deviance-fit_M$deviance)/abs(fit_M$deviance) < epsilon)
# {break}
if(abs(fit_ML$deviance-fit_M$deviance)/abs(nulldev) > epsilon &
abs(fit_MR$deviance-fit_M$deviance)/abs(nulldev) < epsilon)
{break}
k=k+1
}
}
if(method=="sequential")
{
nulldev=-2*sum(weights*(y*log(0.5) + (1-y)*log(0.5)))
# if(abs(dev_L/nulldev)>0.5) dev_L=0
dev=vector()
lambda=vector()
aic=vector()
bic=vector()
ebic=vector()
for(k in 1:length(s.list))
{
#cat("select",s.list[k],"variables","\\n")
if(is.null(factor)){
if(k == 1){
fit=bess.glm(x=x,y=y,beta0=beta0,intercept=intercept,
s=s.list[k],max.steps=max.steps,glm.max=glm.max,
weights=weights)
beta.fit = matrix(fit$beta)
coef0.fit = fit$coef0
}else{
fit=bess.glm(x=x,y=y,beta0=beta.fit[,k-1,drop=TRUE],intercept=coef0.fit[k-1],
s=s.list[k],max.steps=max.steps,glm.max=glm.max,
weights=weights)
beta.fit = cbind(beta.fit,fit$beta)
coef0.fit = c(coef0.fit,fit$coef0)
}
}else{
if(k == 1){
fit=gbess.glm(x=x,y=y,Gi=Gi,beta0=beta0,intercept=intercept,
s=s.list[k],max.steps=max.steps,glm.max=glm.max,
weights=weights)
beta.fit = matrix(fit$beta)
coef0.fit = fit$coef0
s.list[k] = fit$gr_size
}else{
fit=gbess.glm(x,y,Gi=Gi,beta0=beta.fit[,k-1,drop=TRUE],intercept=coef0.fit[k-1],
s=s.list[k],max.steps=max.steps,glm.max=glm.max,
weights=weights)
beta.fit = cbind(beta.fit,fit$beta)
coef0.fit = c(coef0.fit,fit$coef0)
s.list[k] = fit$gr_size
}
}
dev[k]=fit$deviance
lambda[k]=fit$lambda
aic[k]=fit$AIC
bic[k]=fit$BIC
ebic[k]=fit$EBIC
}
}
beta.fit=sqrt(n)*(beta.fit)/normx
colnames(beta.fit) = s.list
rownames(beta.fit) = vn
coef0.fit = coef0.fit-drop(t(beta.fit)%*%meanx)
names(coef0.fit) = s.list
xbest=xs[,which(beta.fit[,ncol(beta.fit),drop=TRUE]!=0)]
bestmodel=glm(y~xbest, family=binomial, weights=weights)
if(!setequal(y_names,c(0,1)))
{
out=list(family="bess_binomial",method=method,beta=beta.fit,coef0=coef0.fit,s.list=s.list,
meanx=meanx,normx=normx,nsample=n,bestmodel=bestmodel,
deviance=dev,nulldeviance=nulldev,AIC=aic,BIC=bic,EBIC=ebic,
lambda=lambda,y_names=y_names,max.steps=max.steps,factor=factor)
class(out)="bess"
return(out)
}else
{
out=list(family="bess_binomial",method=method,beta=beta.fit,coef0=coef0.fit,s.list=s.list,
meanx=meanx,normx=normx,nsample=n,bestmodel=bestmodel,
deviance=dev,nulldeviance=nulldev,AIC=aic,BIC=bic,EBIC=ebic,
lambda=lambda,max.steps=max.steps,factor=factor)
class(out)="bess"
return(out)
}
}
if(family=="cox")
{
#normalize
mark=order(y[,1],decreasing = FALSE)
y=y[mark,]
weights=weights[mark]
x=x[mark,]
beta0=rep(0,p)
gc()
if(method=="gsection")
{
k = 1
sL=s.min
sR=s.max
beta0R=beta0
beta0L=beta0
fit_L1=bess.cox(x,y,
beta0=beta0L,
s=sL,
cox.max=cox.max,
max.steps=max.steps,
weights=weights)
nulldev=fit_L1$nulldeviance
fit_L=fit_L1
fit_R=bess.cox(x,y,
beta0=beta0R,
s=sR,
cox.max=cox.max,
max.steps=max.steps,
weights=weights)
beta.fit=cbind(fit_L$beta,fit_R$beta)
dev=c(fit_L$deviance,fit_R$deviance)
lambda=c(fit_L$lambda,fit_R$lambda)
beta0M=fit_R$beta
s.list=c(sL,sR)
aic=c(fit_L$AIC,fit_R$AIC)
bic=c(fit_L$BIC,fit_R$BIC)
ebic=c(fit_L$EBIC,fit_R$EBIC)
while(k<=K.max)
{
sM <- round(sL + (sR-sL)*0.618)
s.list=c(s.list,sM)
fit_M=bess.cox(x,y,
beta0=beta0M,
s=sM,
cox.max=cox.max,
max.steps=max.steps,
weights=weights)
cat(k,"-th iteration s.left:",sL," s.split:",sM," s.right:",sR,"\\n",sep="")
beta0M=fit_M$beta
beta.fit=cbind(beta.fit,beta0M)
dev=c(dev,fit_M$deviance)
lambda=c(lambda,fit_M$lambda)
aic=c(aic, fit_M$AIC)
bic=c(bic, fit_M$BIC)
ebic=c(ebic, fit_M$EBIC)
if(abs(fit_L$deviance-fit_M$deviance)/abs(nulldev*(sM-sL)) > epsilon &
abs(fit_R$deviance-fit_M$deviance)/abs(nulldev*(sM-sR)) < epsilon)
{
sR <- sM
fit_R=fit_M
}else if(abs(fit_L$deviance-fit_M$deviance)/abs(nulldev) > epsilon &
abs(fit_R$deviance-fit_M$deviance)/abs(nulldev) > epsilon)
{
sL <- sM
fit_L=fit_M
}else
{
sR=sM
fit_R=fit_M
sL=s.min
fit_L=fit_L1
}
if(sR-sL==1) break
fit_ML=bess.cox(x,y,
beta0=beta0M,
s=sM,
cox.max=cox.max,
max.steps=max.steps,
weights=weights)
fit_MR=bess.cox(x,y,
beta0=beta0M,
s=sM,
cox.max=cox.max,
max.steps=max.steps,
weights=weights)
if(abs(fit_ML$deviance-fit_M$deviance)/abs(fit_M$deviance) > epsilon &
abs(fit_MR$deviance-fit_M$deviance)/abs(fit_M$deviance) < epsilon)
{break}
k=k+1
}
}
if(method=="sequential")
{
dev=vector()
lambda=vector()
aic=vector()
bic=vector()
ebic=vector()
for(k in 1:length(s.list))
{
#cat("select",s.list[k],"variables","\\n")
if(is.null(factor)){
if(k == 1){
fit=bess.cox(x=x,y=y,beta0=beta0,
s=s.list[k],cox.max=cox.max,
max.steps=max.steps,weights=weights)
beta.fit = matrix(fit$beta)
}else{
fit=bess.cox(x=x,y=y,beta0=beta.fit[,k-1,drop=TRUE],
s=s.list[k],cox.max=cox.max,
max.steps=max.steps,weights=weights)
beta.fit = cbind(beta.fit,fit$beta)
}
}else{
if(k == 1){
fit=gbess.cox(x=x,y=y,Gi=Gi,beta0=beta0,
s=s.list[k],cox.max=cox.max,
max.steps=max.steps,weights=weights)
beta.fit = matrix(fit$beta)
s.list[k] = fit$gr_size
}else{
fit=gbess.cox(x,y,Gi=Gi,beta0=beta.fit[,k-1,drop=TRUE],
s=s.list[k],cox.max=cox.max,
max.steps=max.steps,weights=weights)
beta.fit = cbind(beta.fit,fit$beta)
s.list[k] = fit$gr_size
}
}
dev[k]=fit$deviance
lambda[k]=fit$lambda
aic[k]=fit$AIC
bic[k]=fit$BIC
ebic[k]=fit$EBIC
}
}
beta.fit=sqrt(n)*(beta.fit)/normx
colnames(beta.fit) = s.list
rownames(beta.fit) = vn
xbest=xs[,which(beta.fit[,ncol(beta.fit),drop=TRUE]!=0)]
bestmodel=coxph(Surv(y[,1],y[,2])~xbest, iter.max=cox.max, weights=weights)
nulldev = bestmodel$loglik[1]
out=list(family="bess_cox",method=method,beta=beta.fit,s.list=s.list,meanx=meanx,
normx=normx,nsample=n,bestmodel=bestmodel,
deviance=dev,nulldeviance=nulldev,AIC=aic,BIC=bic,EBIC=ebic,
lambda=lambda,max.steps=max.steps,factor=factor)
class(out)="bess"
return(out)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/bess.R
|
bess.cox=function(x, y, beta0, s, cox.max=20, max.steps=20, factor=NULL,
weights=rep(1,nrow(x)), normalize=FALSE)
{
if(missing(beta0)) beta0=rep(0,ncol(x))
if(is.matrix(y)!=0) y=as.matrix(y)
if(ncol(y)!=2) stop("Please input y with two columns!")
if(s>length(beta0))
{stop("s is too large")}
if(is.null(colnames(x))) colnames(x) = paste0("X",1:ncol(x))
if(!is.null(factor)){
if(!is.data.frame(x)) x = as.data.frame(x)
x[,factor] = apply(x[,factor,drop=FALSE], 2, function(x){
return(as.factor(x))
})
group = rep(1, ncol(x))
names(group) = colnames(x)
group[factor] = apply(x[,factor,drop=FALSE], 2,function(x) {length(unique(x))})-1
Gi = rep(1:ncol(x), times = group)
beta0 = rep(beta0, times = group)
x = model.matrix(~., data = x)[,-1]
fit = gbess.cox(x, y, Gi, beta0=beta0, s = s,
max.steps = max.steps, cox.max = cox.max,
weights = weights, normalize = normalize)
fit$factor = factor
return(fit)
}else{
x = as.matrix(x)
n = dim(x)[1]
p = dim(x)[2]
vn = dimnames(x)[[2]]
one = rep(1,n)
beta = beta0
names(beta) = vn
xs = x
weights = weights/mean(weights)
if(normalize)
{
mark=order(y[,1],decreasing = FALSE)
y=y[mark,]
x=x[mark,]
weights=weights[mark]
one = rep(1, n)
#center
meanx = drop(weights %*% x)/n
x = scale(x, meanx, FALSE)
#normalize
normx = sqrt(drop(weights %*% (x^2)))
nosignal = normx/sqrt(n) < .Machine$double.eps
if (any(nosignal)) normx[nosignal] = (.Machine$double.eps) * sqrt(n)
names(normx) = NULL
x = sqrt(n)*scale(x, FALSE, normx)
}
ind=which(y[,2]==0)
setA=getcox_A(x,beta,s,rep(0,p),status=ind,weights=weights)
l=1
A=list()
I=list()
A[[1]]=0
I[[1]]=seq(p)
A[[l+1]] = setA$A
I[[l+1]] = setA$I
while ((l <= max.steps))
{
beta[I[[l+1]]] = 0
cox=coxph(Surv(y[,1],y[,2])~x[,A[[l+1]]],weights=weights,eps=1e-8,iter.max=cox.max)
beta[A[[l+1]]]=cox$coefficients
setA=getcox_A(x,beta,s,A[[l+1]],status=ind,weights=weights)
A[[l+2]] = setA$A
I[[l+2]] = setA$I
if(setequal(A[[l+2]],A[[l]])|setequal(A[[l+2]],A[[l+1]])) {break}
else{l=l+1
gc()}
}
names(beta) = vn
xbest=xs[,which(beta!=0)]
bestmodel=coxph(Surv(y[,1],y[,2])~xbest, weights=weights, iter.max=cox.max)
dev=-2*cox$loglik[2]
nulldev=-2*cox$loglik[1]
aic=dev+2*s
bic=dev+log(n)*s
ebic=dev+(log(n)+2*log(p))*s
if(normalize)
{
beta=sqrt(n)*beta/normx
}
return(list(family="bess_cox",beta=beta,nsample=n,deviance=dev,bestmodel=bestmodel,
nulldeviance=nulldev,lambda=setA$max_T^2/2,AIC=aic,BIC=bic,EBIC=ebic,max.steps=max.steps))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/bess.cox.R
|
bess.glm=function(x, y, beta0, intercept=0, s, max.steps=20,
glm.max=1e6, factor = NULL,
weights=rep(1,nrow(x)), normalize=FALSE)
{
if(length(unique(y))!=2) stop("Please input binary variable!")
if(missing(beta0)) beta0=rep(0,ncol(x))
if(s>length(beta0))
{stop("s is too large")}
if(is.null(colnames(x))) colnames(x) = paste0("X",1:ncol(x),"g")
if(!is.null(factor)){
if(!is.data.frame(x)) x = as.data.frame(x)
x[,factor] = apply(x[,factor,drop=FALSE], 2, function(x){
return(as.factor(x))
})
group = rep(1, ncol(x))
names(group) = colnames(x)
group[factor] = apply(x[,factor,drop=FALSE], 2,function(x) {length(unique(x))})-1
Gi = rep(1:ncol(x), times = group)
beta0 = rep(beta0, times = group)
x = model.matrix(~., data = x)[,-1]
fit = gbess.glm(x, y, Gi, beta0=beta0, intercept=intercept, s = s,
max.steps = max.steps, glm.max=glm.max,
weights = weights, normalize = normalize)
fit$factor = factor
return(fit)
}else{
x = as.matrix(x)
n = dim(x)[1]
p = dim(x)[2]
vn = dimnames(x)[[2]]
one = rep(1,n)
beta = beta0
names(beta) = vn
xs = x
weights = weights/mean(weights)
if(normalize)
{
meanx = drop(weights %*% x)/n
x = scale(x, meanx, FALSE)
normx = sqrt(drop(weights %*% (x^2)))
nosignal = normx/sqrt(n) < .Machine$double.eps
if (any(nosignal)) normx[nosignal] = (.Machine$double.eps) * sqrt(n)
names(normx) = NULL
x = sqrt(n)*scale(x, FALSE, normx)
}
setA=get_A(x,y,beta,intercept,s,rep(0,p),weights)
pr=setA$p
l=1
A=list()
I=list()
A[[1]]=0
I[[1]]=seq(p)
A[[l+1]] = setA$A
I[[l+1]] = setA$I
S=1:nrow(x)
while (l <= max.steps)
{
beta[I[[l+1]]] = 0
if(s>=2)
{
logit=glmnet(x[,A[[l+1]]],y,family="binomial",lambda = 0,maxit=glm.max,weights=weights)
beta[A[[l+1]]]=logit$beta
coef0=logit$a0
}else{
logit=glm(y~x[,A[[l+1]]],family="binomial",weights=weights)
beta[A[[l+1]]]=logit$coefficients[-1]
coef0=logit$coefficients[1]
}
setA=get_A(x,y,beta,coef0,s,A[[l+1]],weights)
pr=setA$p
A[[l+2]] = setA$A
I[[l+2]] = setA$I
if(setequal(A[[l+2]],A[[l]])|setequal(A[[l+2]],A[[l+1]])) {break}
else{l=l+1
gc()}
}
#dev=logit$deviance
names(beta) = vn
xbest=xs[,which(beta!=0)]
bestmodel=glm(y~xbest, family=binomial, weights=weights)
dev=-2*sum((weights*((y*log(pr) + (1-y)*log(1-pr))))[which(pr>1e-20&pr<1-1e-20)])
nulldev=-2*sum(weights*(y*log(0.5) + (1-y)*log(0.5)))
aic=dev+2*s
bic=dev+log(n)*s
ebic=dev+(log(n)+2*log(p))*s
if(normalize)
{
beta=sqrt(n)*beta/normx
coef0=coef0-sum(beta*meanx)
}
return(list(family="bess_binomial",beta=beta,coef0=coef0,nsample=n,bestmodel=bestmodel,
deviance=dev,nulldeviance=nulldev,
lambda=setA$max_T^2/2,p=pr,AIC=aic,BIC=bic,EBIC=ebic,max.steps=max.steps))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/bess.glm.R
|
bess.lm=function(x, y, beta0, s, max.steps=20, factor = NULL,
weights=rep(1,nrow(x)), normalize=FALSE)
{
if(missing(beta0)) beta0=rep(0,ncol(x))
if(s>length(beta0))
{stop("s is too large")}
if(is.null(colnames(x))) colnames(x) = paste0("X",1:ncol(x),"g")
if(!is.null(factor)){
if(!is.data.frame(x)) x = as.data.frame(x)
x[,factor] = apply(x[,factor,drop=FALSE], 2, function(x){
x = as.factor(x)
})
group = rep(1, ncol(x))
names(group) = colnames(x)
group[factor] = apply(x[,factor,drop=FALSE], 2,function(x) {length(unique(x))})-1
Gi = rep(1:ncol(x), times = group)
beta0 = rep(beta0, times = group)
x = model.matrix(~., data = x)[,-1]
fit = gbess.lm(x, y, Gi, beta0, s = s, max.steps = max.steps,
weights = weights, normalize = normalize)
fit$factor = factor
return(fit)
}else{
x = as.matrix(x)
n = dim(x)[1]
p = dim(x)[2]
vn = dimnames(x)[[2]]
one = rep(1,n)
beta=beta0
names(beta) = vn
xs=x
ys=y
weights = weights/mean(weights)
if(normalize)
{
#center
meanx = drop(weights %*% x)/n
x = scale(x, meanx, FALSE)
mu = mean(y*weights)
y = drop(y - mu)
#normalize
normx = sqrt(drop(weights %*% (x^2)))
nosignal = normx/sqrt(n) < (.Machine$double.eps)
if (any(nosignal)) normx[nosignal] = (.Machine$double.eps) * sqrt(n)
names(normx) = NULL
x = sqrt(n)*scale(x, FALSE, normx)
}
fit=bess_lm(x*sqrt(weights),y*sqrt(weights),s,max.steps,beta0)
beta=fit$beta
names(beta) = vn
xbest=xs[,which(beta!=0)]
bestmodel=lm(ys~xbest, weights = weights)
lambda=fit$max_T^2/2
mse=mean(weights*(y-x%*%beta)^2)
nullmse=mean(weights*(y^2))
aic=n*log(mse)+2*s
bic=n*log(mse)+log(n)*s
ebic=n*log(mse)+(log(n)+2*log(p))*s
if(normalize)
{
beta=sqrt(n)*beta/normx
coef0=mu-sum(beta*meanx)
return(list(family="bess_gaussian",beta=beta,coef0=coef0,nsample=n,bestmodel=bestmodel,
lambda=lambda,mse=mse,nullmse=nullmse,AIC=aic,BIC=bic,EBIC=ebic,max.steps=max.steps))
}else return(list(family="bess_gaussian",beta=beta,coef0=0,nsample=n,bestmodel=bestmodel,
lambda=lambda,mse=mse,nullmse=nullmse,AIC=aic,BIC=bic,EBIC=ebic,max.steps=max.steps))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/bess.lm.R
|
bess.one = function(x, y, family = c("gaussian", "binomial", "cox"),
s = 1,
max.steps = 15,
glm.max = 1e6,
cox.max = 20,
factor = NULL,
weights = rep(1,nrow(x)),
normalize = TRUE)
{
family <- match.arg(family)
if(ncol(x)==1|is.vector(x)) stop("x should be two columns at least!")
if(missing(family)) stop("Please input family!")
if(family=="binomial")
{
if(is.factor(y)){
y = as.character(y)
}
if(length(unique(y))!=2) stop("Please input binary variable!")else
if(setequal(y_names<-unique(y),c(0,1))==FALSE)
{
y[which(y==unique(y)[1])]=0
y[which(y==unique(y)[2])]=1
y=as.numeric(y)
}
}
if(family=="cox")
{
if(!is.matrix(y)) y=as.matrix(y)
if(ncol(y)!=2) stop("Please input y with two columns!")
}
if(is.vector(y))
{
if(nrow(x)!=length(y)) stop("Rows of x must be the same as length of y!")
}else{
if(nrow(x)!=nrow(y)) stop("Rows of x must be the same as rows of y!")
}
beta0=rep(0,ncol(x))
if(s>length(beta0))
{stop("s is too large")}
if(family=="gaussian")
{
out=bess.lm(x=x,y=y,beta0=beta0,s=s,max.steps=max.steps,factor=factor,
weights=weights,normalize=normalize)
class(out)="bess.one"
return(out)
}
if(family=="binomial")
{
fit = bess.glm(x=x,y=y,beta0=beta0,
intercept=0,s=s,
max.steps=max.steps,
glm.max=glm.max,
factor=factor,
weights=weights,
normalize=normalize)
if(!setequal(y_names,c(0,1)))
{
fit$y_names = y_names
class(fit)="bess.one"
return(fit)
}else
{
class(fit)="bess.one"
return(fit)
}
}
if(family=="cox")
{
out=bess.cox(x=x,y=y,beta0=beta0,
s=s,
cox.max=cox.max,
max.steps=max.steps,
factor=factor,
weights=weights,
normalize=normalize)
class(out)="bess.one"
return(out)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/bess.one.R
|
coef.bess=function(object, sparse=TRUE, type = c("ALL", "AIC", "BIC", "EBIC"),...)
{
type <- match.arg(type)
if(!is.null(object$coef0)){
beta=rbind(intercept=object$coef0, object$beta)
rownames(beta)[1] = "(intercept)"
} else beta=object$beta
if(sparse==TRUE)
{
beta=Matrix(beta,sparse = TRUE)
if(type == "ALL") {
return(beta)
}else return(Matrix(beta[,which.min(object[[type]])], sparse = TRUE))
}else {
if(type == "ALL") {
return(beta)
}else return(beta[,which.min(object[[type]])])
}
}
coef.bess.one=function(object,sparse=TRUE, ...)
{
if(!is.null(object$coef0)){
beta=c(intercept=object$coef0, object$beta)
names(beta)[1] = "(intercept)"
} else beta=object$beta
if(sparse==TRUE)
{
beta=matrix(beta,byrow =TRUE)
beta=Matrix(beta,sparse = TRUE)
return(beta)
}else return(beta)
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/coef.bess.R
|
deviance.bess=function(object,...){
n=object$nsample
if(object$family!="bess_gaussian"){
deviance=object$deviance
nulldeviance=object$nulldeviance
out=c(nulldeviance, deviance)
}else{
deviance=n*log(object$mse)
nulldeviance=n*log(object$nullmse)
out=c(nulldeviance, deviance)
}
names(out)=c('nulldeviance',colnames(object$beta))
return(out)
}
deviance.bess.one=function(object,...)
{
n=object$nsample
if(object$family!="bess_gaussian"){
deviance=object$deviance
nulldeviance=object$nulldeviance
out=c(nulldeviance, deviance)
}else{
deviance=n*log(object$mse)
nulldeviance=n*log(object$nullmse)
out=c(nulldeviance, deviance)
}
names(out)=c('nulldeviance','deviance')
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/deviance.bess.R
|
gbess.cox = function(x, y, Gi, beta0, s, cox.max=20, max.steps=10,
weights=rep(1,nrow(x)), normalize=FALSE)
{
if(missing(beta0)) beta0=rep(0,ncol(x))
if(is.matrix(y)!=0) y=as.matrix(y)
if(ncol(y)!=2) stop("Please input y with two columns!")
if(missing(beta0)) beta0=rep(0,ncol(x))
if(s>length(beta0))
{stop("s is too large")}
# initial
n = dim(x)[1]
p = dim(x)[2]
vn = dimnames(x)[[2]]
one = rep(1,n)
names(beta0) = vn
xs = x
weights = weights/mean(weights)
orderGi = order(Gi)
x = x[,orderGi]
Gi = Gi[orderGi]
gi = unique(Gi)
gi_index = match(gi, Gi)
N = length(gi)
if(normalize)
{
mark=order(y[,1],decreasing = FALSE)
y=y[mark,]
x=x[mark,]
weights=weights[mark]
one = rep(1, n)
#center
meanx = drop(weights %*% x)/n
x = scale(x, meanx, FALSE)
#normalize
normx = sqrt(drop(weights %*% (x^2)))
nosignal = normx/sqrt(n) < .Machine$double.eps
if (any(nosignal)) normx[nosignal] = (.Machine$double.eps) * sqrt(n)
names(normx) = NULL
x = sqrt(n)*scale(x, FALSE, normx)
}
beta = beta0
A0 = NULL
B = rep(0,p+1)
for(k in 1:max.steps){
setA = ggetcox_A(x, Gi, gi_index, s, beta, n, p, N, y[,2], weights, B)
A = setA$A+1
B = setA$B+1
beta = rep(0,p)
gr_size = setA$gr_size
cox=coxph(Surv(y[,1],y[,2])~x[,B],weights=weights,eps=1e-8,iter.max=cox.max)
beta[B]=cox$coefficients
if(setequal(A,A0)==TRUE){
break;
}
A0 <- A
}
if(normalize)
{
beta=sqrt(n)*beta/normx
}
beta[orderGi] = beta
names(beta) = vn
A = orderGi[A]
B = orderGi[B]
s=length(B)
xbest=xs[,which(beta!=0)]
bestmodel=coxph(Surv(y[,1],y[,2])~xbest, weights=weights, iter.max=cox.max)
dev=-2*cox$loglik[2]
nulldev=-2*cox$loglik[1]
aic=dev+2*s
bic=dev+log(n)*s
ebic=dev+(log(n)+2*log(p))*s
return(list(family="bess_cox",beta=beta,nsample=n,bestmodel=bestmodel,
deviance=dev,nulldeviance=nulldev,
lambda=setA$max_T^2/2,AIC=aic,BIC=bic,EBIC=ebic,max.steps=max.steps,
gr_size=gr_size))
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/gbess.cox.R
|
gbess.glm = function(x, y, Gi, beta0, intercept=0, s, max.steps = 10, glm.max=1e6,
weights=rep(1,nrow(x)), normalize=FALSE)
{
if(length(unique(y))!=2) stop("Please input binary variable!")
if(missing(beta0)) beta0=rep(0,ncol(x))
if(s>length(beta0))
{stop("s is too large")}
# initial
n = dim(x)[1]
p = dim(x)[2]
vn = dimnames(x)[[2]]
one = rep(1,n)
names(beta0) = vn
xs = x
weights = weights/mean(weights)
orderGi = order(Gi)
x = x[,orderGi]
Gi = Gi[orderGi]
gi = unique(Gi)
gi_index = match(gi, Gi)
N = length(gi)
if(normalize)
{
meanx = drop(weights %*% x)/n
x = scale(x, meanx, FALSE)
normx = sqrt(drop(weights %*% (x^2)))
nosignal = normx/sqrt(n) < .Machine$double.eps
if (any(nosignal)) normx[nosignal] = (.Machine$double.eps) * sqrt(n)
names(normx) = NULL
x = sqrt(n)*scale(x, FALSE, normx)
}
beta = beta0
coef0 = intercept
A0 = NULL
B = rep(0,p+1)
for(k in 1:max.steps){
setA = gget_A(x, y, Gi, gi_index, s, beta, coef0, n, p, N, weights, B)
A = setA$A+1
B = setA$B+1
beta = rep(0,p)
gr_size = setA$gr_size
if(length(B)>=2)
{
logit=glmnet(x[,B],y,family="binomial",lambda = 0,maxit=glm.max, weights = weights)
beta[B]=logit$beta
coef0=logit$a0
}else{
logit=glm(y~x[,B],family="binomial", weights = weights)
beta[B]=logit$coefficients[-1]
coef0=logit$coefficients[1]
}
if(setequal(A,A0)==TRUE){
break;
}
A0 <- A
}
if(normalize)
{
beta=sqrt(n)*beta/normx
coef0=coef0-sum(beta*meanx)
}
beta[orderGi] = beta
names(beta) = vn
A = orderGi[A]
B = orderGi[B]
s=length(B)
eta = x%*%beta
pr = exp(eta)/(1+exp(eta))
xbest=xs[,which(beta!=0)]
bestmodel=glm(y~xbest, family="binomial", weights = weights)
dev=-2*sum((weights*((y*log(pr) + (1-y)*log(1-pr))))[which(pr>1e-20&pr<1-1e-20)])
nulldev=-2*sum(weights*(y*log(0.5) + (1-y)*log(0.5)))
aic=dev+2*s
bic=dev+log(n)*s
ebic=dev+(log(n)+2*log(p))*s
return(list(family="bess_binomial",beta=beta,coef0=coef0,nsample=n,bestmodel=bestmodel,
deviance=dev,nulldeviance=nulldev,
lambda=setA$max_T^2/2,p=p,AIC=aic,BIC=bic,EBIC=ebic,max.steps=max.steps,
gr_size=gr_size))
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/gbess.glm.R
|
gbess.lm = function(x, y, Gi, beta0, s, max.steps = 20,
weights=rep(1,nrow(x)), normalize=FALSE)
{
if(missing(beta0)) beta0=rep(0,ncol(x))
if(s>length(beta0))
{stop("s is too large")}
# initial
n = dim(x)[1]
p = dim(x)[2]
vn = dimnames(x)[[2]]
beta=beta0
names(beta) = vn
xs=x
ys=y
weights = weights/mean(weights)
orderGi = order(Gi)
x = x[,orderGi]
Gi = Gi[orderGi]
gi = unique(Gi)
gi_index = match(gi, Gi)
N = length(gi)
if(normalize)
{
#center
meanx = drop(weights %*% x)/n
x = scale(x, meanx, FALSE)
mu = mean(y*weights)
y = drop(y - mu)
#normalize
normx = sqrt(drop(weights %*% (x^2)))
nosignal = normx/sqrt(n) < (.Machine$double.eps)
if (any(nosignal)) normx[nosignal] = (.Machine$double.eps) * sqrt(n)
names(normx) = NULL
x = sqrt(n)*scale(x, FALSE, normx)
}
x = x*sqrt(weights)
y = y*sqrt(weights)
PhiG = lapply(1:N, function(i){
idx <- which(Gi==i)
if(length(idx) == 1)
return(-sqrt(t(x[,idx])%*%x[,idx])) else{
return(-EigenR(t(x[,idx])%*%x[,idx]))
}
})
invPhiG = lapply(PhiG, solve)
fit = gbess_lm(X=x, y=y, G=Gi, index=gi_index, PhiG=PhiG, invPhiG=invPhiG,
T0=s, max_steps = max.steps, beta0 = beta0,
n=n, p=p, N=N)
if(normalize)
{
beta=sqrt(n)*beta/normx
coef0=mu-sum(beta*meanx)
}
beta = fit$beta
beta[orderGi] = beta
names(beta) = vn
A = fit$A+1
A = orderGi[A]
B = fit$B+1
B = orderGi[B]
xbest=xs[,which(beta!=0)]
bestmodel=lm(ys~xbest, weights = weights)
lambda=fit$max_T^2/2
mse=mean(weights*(y-x%*%beta)^2)
nullmse=mean(weights*(y^2))
aic=n*log(mse)+2*s
bic=n*log(mse)+log(n)*s
ebic=n*log(mse)+(log(n)+2*log(p))*s
if(normalize)
{
beta=sqrt(n)*beta/normx
coef0=mu-sum(beta*meanx)
return(list(family="bess_gaussian",beta=beta,coef0=coef0,nsample=n,bestmodel=bestmodel,
lambda=lambda,mse=mse,nullmse=nullmse,AIC=aic,BIC=bic,EBIC=ebic,max.steps=max.steps,
gr_size=fit$gr_size))
}else return(list(family="bess_gaussian",beta=beta,coef0=0,nsample=n,bestmodel=bestmodel,
lambda=lambda,mse=mse,nullmse=nullmse,AIC=aic,BIC=bic,EBIC=ebic,max.steps=max.steps,
gr_size=fit$gr_size))
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/gbess.lm.R
|
gen.data=function(n, p, family, K, rho = 0, sigma = 1, beta = NULL,
censoring = TRUE, c = 1, scal)
{
one=rep(1,n)
zero=rep(0,n)
X=rnorm(n*p)
X=matrix(X,n,p)
X = scale(X, TRUE, FALSE)
normX = sqrt(drop(one %*% (X^2)))
X = sqrt(n)*scale(X, FALSE, normX)
gc()
x=X+rho*(cbind(zero,X[,1:(p-2)],zero)+cbind(zero,X[,3:p],zero))
colnames(x)=paste0('X',1:ncol(x))
rm(X)
gc()
nonzero=sample(1:p,K)
Tbeta=rep(0,p)
if(family=="gaussian")
{
m=5*sqrt(2*log(p)/n)
M=100*m
if(is.null(beta)) Tbeta[nonzero]=runif(K,m,M) else Tbeta=beta
y=drop(x %*% Tbeta+rnorm(n,0,sigma^2))
return(list(x=x,y=y,Tbeta=Tbeta))
}
if(family=="binomial")
{
m=5*sigma*sqrt(2*log(p)/n)
if(is.null(beta)) Tbeta[nonzero]=runif(K,2*m,10*m) else Tbeta=beta
ex=exp(drop(x %*% Tbeta))
logit=ex/(1+ex)
y=rbinom(n=n,size=1,prob=logit)
return(list(x=x,y=y,Tbeta=Tbeta))
}
if(family=="cox")
{
m=5*sigma*sqrt(2*log(p)/n)
if(is.null(beta)) Tbeta[nonzero]=runif(K,2*m,10*m) else Tbeta=beta
time = (-log(runif(n))/drop(exp(x%*%Tbeta)))^(1/scal)
if (censoring) {
ctime = c*runif(n)
status = (time < ctime) * 1
censoringrate = 1 - sum(status)/n
cat("censoring rate:", censoringrate, "\n")
time = pmin(time, ctime)
}else {
status = rep(1, times = n)
cat("no censoring", "\n")
}
return(list(x=x,y=cbind(time,status),Tbeta=Tbeta))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/gen.data.R
|
logLik.bess=function(object,...){
n=object$nsample
if(object$family!="bess_gaussian"){
deviance=-object$deviance/2
nulldeviance=-object$nulldeviance/2
out=c(nulldeviance, deviance)
}else{
deviance=-n*log(object$mse)/2
nulldeviance=-n*log(object$nullmse)/2
out=c(nulldeviance, deviance)
}
names(out)=c('nullLoglik',colnames(object$beta))
return(out)
}
logLik.bess.one=function(object,...){
n=object$nsample
if(object$family!="bess_gaussian"){
deviance=-object$deviance/2
nulldeviance=-object$nulldeviance/2
out=c(nulldeviance, deviance)
}else{
deviance=-n*log(object$mse)/2
nulldeviance=-n*log(object$nullmse)/2
out=c(nulldeviance, deviance)
}
names(out)=c('nullLoglik','Loglik')
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/logLik.bess.R
|
plot.bess=function(x,type=c("loss","coefficients","both"),breaks=TRUE,K=NULL, ...)
{
object=x
type <- match.arg(type)
s.list=object$s.list
if(is.null(K)) K=s.list[length(s.list)]
if(object$family=="bess_gaussian") dev=object$mse else dev=object$deviance
beta=object$beta
s_order=order(s.list)
s.list=s.list[s_order]
dev=dev[s_order]
beta=beta[,s_order]
beta=cbind(rep(0,nrow(object$beta)),beta)
if(type=="loss")
{
plot_loss(dev,s.list,K,breaks, mar = c(3,4,3,4))
}
if(type=="coefficients")
{
plot_solution(beta,c(0, s.list),K,breaks, mar = c(3,4,3,4))
}
if(type=="both")
{
layout(matrix(c(1,2),2,1,byrow=TRUE),heights=c(0.45,0.55), widths=1)
oldpar <- par(las=1, mar=c(2,4,2,4), oma=c(2.5,0.5,1.5,0.5))
plot_loss(dev,s.list,K,breaks,show_x = FALSE)
plot_solution(beta, c(0, s.list), K,breaks)
par(oldpar)
par(mfrow=c(1,1))
}
}
plot_loss <- function(loss,df,K,breaks=TRUE,show_x=TRUE, mar = c(0,4,2,4)){
plot.new() # empty plot
plot.window(range(df), range(loss), xaxs="i")
oldpar <- par(mar = mar, # no bottom spacing
lend="square") # square line ends
par(new=TRUE) # add to the plot
if(show_x)
{
plot(df, loss, type = "b", ylab=expression(L(beta)),
xlim=c(0,max(df)))
}else
{
plot(df, loss, type = "b", ylab=expression(L(beta)),
xlim=c(0,max(df)), xaxt='n')
}
title(xlab='Model size', line = 2)
if(breaks)abline(v=K, col="orange", lwd=1.5, lty=2) ## add a vertical line
grid()
axis(2)
#axis(4, pos=par("usr")[1], line=0.5) # this would plot them 'inside'
# box() # outer box
par(oldpar)
}
plot_solution <- function(beta, df, K, breaks = TRUE, mar = c(3,4,0,4)){
p <- nrow(beta)
plot.new() # empty plot
plot.window(range(df), range(beta), xaxs="i")
oldpar <- par(mar=mar, # no top spacing
lend="square") # square line ends
par(new=TRUE) # add to the plot
plot(df, beta[1,], type="l",col=1, xlim=c(0,max(df)),xlab="",
ylim=range(beta),ylab=expression(beta))
title(xlab='Model size', line = 2)
for(i in 2:p){
lines(df, beta[i,], col=i,xlim=c(0,p+1))
}
if(breaks) abline(v=K, col="orange", lwd=1.5, lty=2) ## add a vertical line
#matplot(df, t(beta), lty = 1, ylab="", xaxs="i",type = "l",xlim=c(0,p+1))
nnz = p
xpos = max(df)-0.8
pos = 4
xpos = rep(xpos, nnz)
ypos = beta[, ncol(beta)]
text(xpos, ypos, 1:p, cex = 0.8, pos = pos)
grid()
axis(2)
box() # outer box
par(oldpar)
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/plot.bess.R
|
predict.bess=function(object, newdata, type = c("ALL", "opt", "AIC", "BIC", "EBIC"),...)
{
type <- match.arg(type)
method = object$method
if(method == "gsection"&(type%in%c("AIC","BIC","EBIC"))) stop("method gsection shouldn't match type AIC, BIC or EBIC!")
if(method == "sequential"&type=="opt") stop("method sequential shouldn't match type opt!")
if(!is.null(object$factor)){
factor = c(object$factor)
if(!is.data.frame(newdata)) newdata = as.data.frame(newdata)
newdata[,factor] = apply(newdata[,factor,drop=FALSE], 2, function(x){
return(as.factor(x))
})
newdata = model.matrix(~., data = newdata)[,-1]
}
if(is.null(colnames(newdata))) {
newx = as.matrix(newdata)
}else{
vn = rownames(object$beta)
if(any(is.na(match(vn, colnames(newdata))))) stop("names of newdata don't match training data!")
newx = as.matrix(newdata[,vn])
}
if(object$family == "bess_gaussian")
{
betas = object$beta
coef0 = object$coef0
y = t(newx%*%betas)+coef0
if(type == "ALL"){
return(y)
}
if(type == "opt"){
return(y[nrow(y),,drop = TRUE])
}
return(y[which.min(object[[type]]),,drop = TRUE])
}
if(object$family == "bess_binomial")
{
betas = object$beta
coef = object$coef0
class = matrix(0,ncol(betas),nrow(newx))
for(i in 1:ncol(betas))
{
class[i,] = as.numeric(exp(newx%*%betas[,i]+coef[i])/(1+exp(newx%*%betas[,i]+coef[i]))>0.5)
class[i,][is.na(class[i,])] = 1
if(!is.null(object$y_names))
{
class[which(class == 0,arr.ind = T)] = object$y_names[1]
class[which(class == 1,arr.ind = T)] = object$y_names[2]
}
}
if(type == "ALL"){
return(class)
}
if(type == "opt"){
return(class[nrow(class),,drop = TRUE])
}
return(class[which.min(object[[type]]),,drop = TRUE])
}
if(object$family=="bess_cox")
{
betas = object$beta
betax = newx%*%betas
if(type == "ALL"){
return(t(betax))
}
if(type == "opt"){
return(t(betax)[ncol(betax),,drop = TRUE])
}
return(betax[,which.min(object[[type]]),drop = TRUE])
}
}
predict.bess.one=function(object,newdata, ...)
{
if(!is.null(object$factor)){
factor = c(object$factor)
if(!is.data.frame(newdata)) newdata = as.data.frame(newdata)
newdata[,factor] = apply(newdata[,factor,drop=FALSE], 2, function(x){
return(as.factor(x))
})
newdata = model.matrix(~., data = newdata)[,-1]
}
if(is.null(colnames(newdata))) {
newx = as.matrix(newdata)
}else{
vn = names(object$beta)
if(any(is.na(match(vn, colnames(newdata))))) stop("names of newdata don't match training data!")
newx = as.matrix(newdata[,vn])
}
if(object$family=="bess_gaussian")
{
betas = object$beta
coef0 = object$coef0
y = drop(newx %*% betas)+coef0
return(y)
}
if(object$family == "bess_binomial")
{
betas = object$beta
coef = object$coef0
class = as.numeric(exp(newx%*%betas+coef)/(1+exp(newx%*%betas+coef))>0.5)
class[is.na(class)] = 1
if(!is.null(object$y_names))
{
class[which(class == 0,arr.ind = T)] = object$y_names[1]
class[which(class == 1,arr.ind = T)] = object$y_names[2]
}
return(class)
}
if(object$family == "bess_cox")
{
betas = object$beta
betax = newx%*%betas;
return(drop(betax))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/predict.bess.R
|
print.bess=function(x, ...)
{
if(x$family=="bess_gaussian") print(cbind(Df=x$s.list,MSE=x$mse,AIC=x$AIC,BIC=x$BIC,EBIC=x$EBIC))else
print(cbind(Df=x$s.list,Dev=x$deviance,AIC=x$AIC,BIC=x$BIC,EBIC=x$EBIC))
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/print.bess.R
|
print.bess.one=function(x, ...)
{
if(x$family=="bess_gaussian") print(c(Df=sum(x$beta!=0),MSE=x$mse,AIC=x$AIC,BIC=x$BIC,EBIC=x$EBIC))else
print(c(Df=sum(x$beta!=0),Dev=x$deviance,AIC=x$AIC,BIC=x$BIC,EBIC=x$EBIC))
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/print.bess.one.R
|
summary.bess=function(object, ...){
beta = object$beta
if(object$method == "sequential"){
K.opt.aic = which.min(object$AIC)
K.opt.bic = which.min(object$BIC)
K.opt.ebic = which.min(object$EBIC)
predictors.aic = beta[,K.opt.aic]
predictors.bic = beta[,K.opt.bic]
predictors.ebic = beta[,K.opt.ebic]
if(sum(predictors.aic!=0)>1) predictor.a = "predictors" else predictor.a = "predictor"
if(sum(predictors.bic!=0)>1) predictor.b = "predictors" else predictor.b = "predictor"
if(sum(predictors.ebic!=0)>1) predictor.e = "predictors" else predictor.e = "predictor"
cat("-------------------------------------------------------------------------------\n")
cat(" Primal-dual active algorithm with tuning parameter determined by sequential method", "\n\n")
cat(" Best model determined by AIC includes" , sum(predictors.aic!=0), predictor.a, "with AIC =",
object$AIC[K.opt.aic], "\n\n")
cat(" Best model determined by BIC includes" , sum(predictors.bic!=0), predictor.b, "with BIC =",
object$BIC[K.opt.bic], "\n\n")
cat(" Best model determined by EBIC includes" , sum(predictors.ebic!=0), predictor.e, "with EBIC =",
object$EBIC[K.opt.ebic], "\n")
cat("-------------------------------------------------------------------------------\n")
} else {
cat("------------------------------------------------------------------------------\n")
cat(" Primal-dual active algorithm with tuning parameter determined by gsection method", "\n\n")
if(sum(beta[,ncol(beta)]!=0)>0) predictor = "predictors" else predictor = "predictor"
cat(" Best model includes", sum(beta[,ncol(beta)]!=0), predictor, "with", "\n\n")
if(logLik(object)[length(logLik(object))]>=0)
cat(" log-likelihood: ", logLik(object)[length(logLik(object))],"\n") else cat(" log-likelihood: ", logLik(object)[length(logLik(object))],"\n")
if(deviance(object)[length(deviance(object))]>=0)
cat(" deviance: ", deviance(object)[length(deviance(object))],"\n") else cat(" deviance: ", deviance(object)[length(deviance(object))],"\n")
if(object$AIC[length(object$AIC)]>=0)
cat(" AIC: ", object$AIC[length(object$AIC)],"\n") else cat(" AIC: ", object$AIC[length(object$AIC)],"\n")
if(object$BIC[length(object$BIC)]>=0)
cat(" BIC: ", object$BIC[length(object$BIC)],"\n") else cat(" BIC: ", object$BIC[length(object$BIC)],"\n")
if(object$EBIC[length(object$EBIC)]>=0)
cat(" EBIC: ", object$EBIC[length(object$EBIC)],"\n") else cat(" EBIC: ", object$EBIC[length(object$EBIC)],"\n")
cat("------------------------------------------------------------------------------\n")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/summary.bess.R
|
summary.bess.one=function(object, ...){
max.steps = object$max.steps
df = sum(object$beta!=0)
predictors = names(which(object$beta!=0))
a=rbind(predictors, object$beta[predictors])
cat("----------------------------------------------------------------------\n")
cat(" Primal-dual active algorithm with maximum iteration being", max.steps, "\n\n")
cat(" Best model with k =", df, "includes predictors:", "\n\n")
print(object$beta[predictors])
cat("\n")
if(logLik(object)[2]>=0)
cat(" log-likelihood: ", logLik(object)[2],"\n") else cat(" log-likelihood: ", logLik(object)[2],"\n")
if(deviance(object)[2]>=0)
cat(" deviance: ", deviance(object)[2],"\n") else cat(" deviance: ", deviance(object)[2],"\n")
if(object$AIC>=0)
cat(" AIC: ", object$AIC,"\n") else cat(" AIC: ", object$AIC,"\n")
if(object$BIC>=0)
cat(" BIC: ", object$BIC,"\n") else cat(" BIC: ", object$BIC,"\n")
if(object$EBIC>=0)
cat(" EBIC: ", object$EBIC,"\n") else cat(" EBIC: ", object$EBIC,"\n")
cat("----------------------------------------------------------------------\n")
}
|
/scratch/gouwar.j/cran-all/cranData/BeSS/R/summary.bess.one.R
|
#' BeastJar
#'
#' Convenient packaging of the Bayesian Evolutionary Analysis Sampling Trees (BEAST) software package
#' to facilitate Markov chain Monte Carlo sampling techniques including Hamiltonian Monte Carlo,
#' bouncy particle sampling and zig-zag sampling.
#'
#' @docType package
#' @name BeastJar
#' @import rJava
#' @examples
#' # Example MCMC simulation using BEAST
#' #
#' # This function generates a Markov chain to sample from a simple normal distribution.
#' # It uses a random walk Metropolis kernel that is auto-tuning.
#'
#' if (supportsJava8()) {
#' # Set seed
#' seed <- 123
#' rJava::J("dr.math.MathUtils")$setSeed(rJava::.jlong(seed));
#'
#' # Set up simple model - Normal(mean = 1, sd = 2)
#' mean <- 1; sd <- 2
#' distribution <- rJava::.jnew("dr.math.distributions.NormalDistribution",
#' as.numeric(mean), as.numeric(sd))
#' model <- rJava::.jnew("dr.inference.distribution.DistributionLikelihood",
#' rJava::.jcast(distribution, "dr.math.distributions.Distribution"))
#' parameter <- rJava::.jnew("dr.inference.model.Parameter$Default", "p", 1.0,
#' as.numeric(-1.0 / 0.0), as.numeric(1.0 / 0.0))
#' model$addData(parameter)
#'
#' # Construct posterior
#' dummy <- rJava::.jnew("dr.inference.model.DefaultModel",
#' rJava::.jcast(parameter, "dr.inference.model.Parameter"))
#'
#' joint <- rJava::.jnew("java.util.ArrayList")
#' joint$add(rJava::.jcast(model, "dr.inference.model.Likelihood"))
#' joint$add(rJava::.jcast(dummy, "dr.inference.model.Likelihood"))
#'
#' joint <- rJava::new(rJava::J("dr.inference.model.CompoundLikelihood"), joint)
#'
#' # Specify auto-adapting random-walk Metropolis-Hastings transition kernel
#' operator <- rJava::.jnew("dr.inference.operators.RandomWalkOperator",
#' rJava::.jcast(parameter, "dr.inference.model.Parameter"),
#' 0.75,
#' rJava::J(
#' "dr.inference.operators.RandomWalkOperator"
#' )$BoundaryCondition$reflecting,
#' 1.0,
#' rJava::J("dr.inference.operators.AdaptationMode")$DEFAULT
#' )
#'
#' schedule <- rJava::.jnew("dr.inference.operators.SimpleOperatorSchedule",
#' as.integer(1000), as.numeric(0.0))
#'
#' schedule$addOperator(operator)
#'
#' # Set up what features of posterior to log
#' subSampleFrequency <- 100
#' memoryFormatter <- rJava::.jnew("dr.inference.loggers.ArrayLogFormatter", FALSE)
#' memoryLogger <-
#' rJava::.jnew("dr.inference.loggers.MCLogger",
#' rJava::.jcast(memoryFormatter, "dr.inference.loggers.LogFormatter"),
#' rJava::.jlong(subSampleFrequency), FALSE)
#' memoryLogger$add(parameter)
#'
#' # Execute MCMC
#' mcmc <- rJava::.jnew("dr.inference.mcmc.MCMC", "mcmc1")
#' mcmc$setShowOperatorAnalysis(FALSE)
#'
#' chainLength <- 100000
#'
#' mcmcOptions <- rJava::.jnew("dr.inference.mcmc.MCMCOptions",
#' rJava::.jlong(chainLength),
#' rJava::.jlong(10),
#' as.integer(1),
#' as.numeric(0.1),
#' TRUE,
#' rJava::.jlong(chainLength/100),
#' as.numeric(0.234),
#' FALSE,
#' as.numeric(1.0))
#'
#' mcmc$init(mcmcOptions,
#' joint,
#' schedule,
#' rJava::.jarray(memoryLogger, contents.class = "dr.inference.loggers.Logger"))
#'
#' mcmc$run()
#'
#' # Summarize logged posterior quantities
#' traces <- memoryFormatter$getTraces()
#' trace <- traces$get(as.integer(1))
#'
#' obj <- trace$getValues(as.integer(0),
#' as.integer(trace$getValueCount()))
#'
#' sample <- rJava::J("dr.inference.trace.Trace")$toArray(obj)
#'
#' outputStream <- rJava::.jnew("java.io.ByteArrayOutputStream")
#' printStream <- rJava::.jnew("java.io.PrintStream",
#' rJava::.jcast(outputStream, "java.io.OutputStream"))
#'
#' rJava::J("dr.inference.operators.OperatorAnalysisPrinter")$showOperatorAnalysis(
#' printStream, schedule, TRUE)
#'
#' operatorAnalysisString <- outputStream$toString("UTF8")
#'
#' # Report auto-optimization information
#' cat(operatorAnalysisString)
#'
#' # Report posterior quantities
#' c(mean(sample), sd(sample))
#' }
NULL
.onLoad <- function(libname, pkgname) {
rJava::.jpackage(pkgname, lib.loc = libname)
}
|
/scratch/gouwar.j/cran-all/cranData/BeastJar/R/BeastJar.R
|
#' Determine if Java virtual machine supports Java
#'
#' Tests Java virtal machine (JVM) java.version system property to check if version >= 8.
#'
#' @return
#' Returns TRUE if JVM supports Java >= 8.
#'
#' @examples
#' supportsJava8()
#'
#' @export
supportsJava8 <- function() {
javaVersionText <-
rJava::.jcall("java/lang/System", "S", "getProperty", "java.version")
majorVersion <- as.integer(regmatches(
javaVersionText,
regexpr(pattern = "^\\d+", text = javaVersionText)
))
if (majorVersion == 1) {
twoDigitVersion <- regmatches(javaVersionText,
regexpr(pattern = "^\\d+\\.\\d+", text = javaVersionText))
majorVersion <- as.integer(regmatches(twoDigitVersion,
regexpr("\\d+$", text = twoDigitVersion)))
}
support <- majorVersion >= 8
message(paste0("Using JVM version ",
javaVersionText,
" (>= 8? ", support, ")"))
return (support)
}
|
/scratch/gouwar.j/cran-all/cranData/BeastJar/R/Utilities.R
|
#' Query the bee taxonomy and country checklist
#'
#' A simple function to return information about a particular species, including name validity and
#' country occurrences.
#'
#' @param beeName Character or character vector. A single or several bee species names to search for
#' in the beesTaxonomy and beesChecklist tables.
#' @param searchChecklist Logical. If TRUE (default), search the country checklist for each species.
#' @param printAllSynonyms Logical. If TRUE, all synonyms will be printed out for each entered name.
#' default = FALSE.
#' @param beesChecklist A tibble. The bee checklist file for BeeBDC. If is NULL then
#' [BeeBDC::beesChecklist()] will be called internally to download the file. Default = NULL.
#' @param beesTaxonomy A tibble. The bee taxonomy file for BeeBDC. If is NULL then
#' [BeeBDC::beesTaxonomy()] will be called internally to download the file. Default = NULL.
#'
#' @return Returns a list with the elements 'taxonomyReport' and 'SynonymReport'. IF searchChecklist
#' is TRUE, then 'checklistReport' will also be returned.
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#' # For the sake of these examples, we will use the example taxonomy and checklist
#' system.file("extdata", "testTaxonomy.rda", package="BeeBDC") |> load()
#' system.file("extdata", "testChecklist.rda", package="BeeBDC") |> load()
#'
#' # Single entry example
#' testQuery <- BeeBDCQuery(
#' beeName = "Lasioglossum bicingulatum",
#' searchChecklist = TRUE,
#' printAllSynonyms = TRUE,
#' beesTaxonomy = testTaxonomy,
#' beesChecklist = testChecklist)
#'
#' # Multiple entry example
#' testQuery <- BeeBDCQuery(
#' beeName = c("Lasioglossum bicingulatum", "Nomada flavopicta",
#' "Lasioglossum fijiense (Perkins and Cheesman, 1928)"),
#' searchChecklist = TRUE,
#' printAllSynonyms = TRUE,
#' beesTaxonomy = testTaxonomy,
#' beesChecklist = testChecklist)
#'
#' # Example way to examine a report from the output list
#' testQuery$checklistReport
#'
#'
#'
BeeBDCQuery <- function(
beeName = NULL,
searchChecklist = TRUE,
printAllSynonyms = FALSE,
beesChecklist = NULL,
beesTaxonomy = NULL){
# locally bind variables to the function
data <- validName <- accid <- inputName <- id <-
rowMatched <- . <- NULL
#### 0.0 Prep ####
##### 0.1 Errors ####
###### a. FATAL errors ####
if(is.null(beeName)){
stop(" - Please provide an argument for beeName. I'm a program not a magician.")
}
#### 1.0 Data preperation ####
# Download the datasets
if(searchChecklist == TRUE & is.null(beesChecklist)){
beesChecklist <- BeeBDC::beesChecklist()}
if(is.null(beesTaxonomy)){
beesTaxonomy <- BeeBDC::beesTaxonomy()}
# Change beeName to be matched exactly
beeNameExact <- paste0("^", beeName, "$")
#### 2.0 Taxonomy check ####
message("Starting taxonomy report...")
##### 2.1 Taxonomy report ####
# Get a report of the queried names and their matched rows in beesTaxonomy
report_beesTaxonomy <-
# Make a tibble with the input name(s)
dplyr::tibble(inputName = beeName) %>%
dplyr::left_join(beesTaxonomy, relationship = "many-to-many", keep = TRUE,
by = c("inputName" = "validName")) %>%
# Do the same and bind using canonical
dplyr::bind_rows(
dplyr::tibble(inputName = beeName) %>%
dplyr::left_join(beesTaxonomy, relationship = "many-to-many", keep = TRUE,
by = c("inputName" = "canonical"))
) %>%
# Do the same and bind using canonical_withFlags
dplyr::bind_rows(
dplyr::tibble(inputName = beeName) %>%
dplyr::left_join(beesTaxonomy, relationship = "many-to-many", keep = TRUE,
by = c("inputName" = "canonical_withFlags"))) %>%
# Drop rows that did not match
tidyr::drop_na(validName) %>%
# Keep only distinct rows
dplyr::distinct()
# Extract accepted names
acceptedNames <- report_beesTaxonomy %>%
dplyr::filter(accid == 0) %>%
dplyr::mutate(inputID = 0, .after = inputName)
# Extract accepted names for synonyms provided
synonyms <- report_beesTaxonomy %>%
dplyr::filter(accid != 0) %>%
# Filter to the inputName and inputID
dplyr::select(tidyselect::all_of(c("inputName", "accid"))) %>%
dplyr::rename("inputID" = "accid") %>%
# Rejoin with the beesTaxonomy, but ONLY have valid names
dplyr::left_join(beesTaxonomy, by = c("inputID" = "id"), keep = TRUE) %>%
dplyr::distinct()
# recombine into report_beesTaxonomy
report_beesTaxonomy <- dplyr::bind_rows(acceptedNames, synonyms)
##### 2.2 Taxonomy output ####
for(i in 1:nrow(report_beesTaxonomy)){
writeLines(paste0(
report_beesTaxonomy$inputName[[i]], " is ",
# IF Synonym
if(report_beesTaxonomy$inputID[[i]] > 0){
paste0("a synonym of ",
report_beesTaxonomy$validName[[i]], " with the taxon id number ",
report_beesTaxonomy$id[[i]],
".")
}else{ # IF accepted
paste0("an accpeted name with the taxon id number ", report_beesTaxonomy$id[[i]], ".")
} # END else
))
} # End loop
##### 2.3 printAllSynonyms ####
# Find the synonyms for the entered species
synonymsMatched <- beesTaxonomy %>%
dplyr::filter(accid %in% report_beesTaxonomy$id) %>%
dplyr::left_join(report_beesTaxonomy %>%
dplyr::select(tidyselect::all_of(c("inputName", "id"))),
by = c("accid" = "id"), relationship = "many-to-many") %>%
dplyr::relocate(inputName, .before = "flags") %>%
dplyr::arrange(accid, inputName)
# If printAllSynonyms is TRUE then print the synonyms
if(printAllSynonyms == TRUE){
for(i in 1:length(report_beesTaxonomy$inputName)){
# Get the synonyms for the ith entry
synonymsLoop <- synonymsMatched %>%
dplyr::filter(accid == report_beesTaxonomy$inputID[[i]])
if(nrow(synonymsLoop) > 0){
# Print user output
writeLines(paste0(
" - '", report_beesTaxonomy$inputName[[i]], "'",
" has the synonyms: ", paste0(synonymsLoop$validName, collapse = ", ")
))} # END if > 0
if(nrow(synonymsLoop) == 0){
synonymsLoop <- synonymsMatched %>%
dplyr::filter(accid == report_beesTaxonomy$id[[i]])
# Print user output
writeLines(paste0(
" - '", report_beesTaxonomy$inputName[[i]], "'",
" has the synonyms: ", unique(paste0(synonymsLoop$validName, collapse = ", ") %>%
stringr::str_remove(report_beesTaxonomy$inputName[[i]]))
))} # END if > 0
} # END loop
} # END printAllSynonyms
#### 3.0 Checklist ####
##### 3.1 Checklist user output ####
if(searchChecklist == TRUE){
message("Starting checklist report...")
# Get the relevant rows of accepted names in the beesChecklist
checklistMatched <- beesChecklist %>%
dplyr::filter(validName %in% unique(report_beesTaxonomy$validName)) %>%
dplyr::arrange(validName)
for(i in 1:length(unique(checklistMatched$validName))){
# Select the ith species
loopSpecies <- checklistMatched %>%
dplyr::filter(validName == checklistMatched$validName[[i]])
# User output
writeLines(paste0(
" - ", checklistMatched$validName[[i]],
" is reportedly found in: \n",
paste0(unique(loopSpecies$rNaturalEarth_name), collapse = ", ")
))
}
} # END searchChecklist
#### 4.0 Return reports ####
# Make a report of the species that did not match
failedReport <- beeName %>%
setdiff(., report_beesTaxonomy$inputName) %>%
dplyr::tibble(unmatchedSpecies = .)
# If searchChecklist is requested, then return the output as a list with it included
if(searchChecklist == TRUE){
output <- dplyr::lst(report_beesTaxonomy, synonymsMatched, checklistMatched, failedReport) %>%
stats::setNames(c("taxonomyReport", "SynonymReport", "checklistReport", "failedReport"))
writeLines(paste0(
"The output will be returned as a list with the elements: ",
"'taxonomyReport', 'SynonymReport', and 'checklistReport'. \n", "These can be accessed using",
" 'output'$taxonomyReport, 'output'$SynonymReport, 'output'$checklistReport, or ",
"'output'$failedReport."
))
}else{
output <- dplyr::lst(report_beesTaxonomy, synonymsMatched, failedReport) %>%
stats::setNames(c("taxonomyReport", "SynonymReport", "failedReport"))
writeLines(paste0(
"The output will be returned as a list with the elements: ",
"'taxonomyReport' and 'SynonymReport'. \n", "These can be accessed using",
" 'output'$taxonomyReport, 'output'$SynonymReport, or ",
"'output'$failedReport."
))
}
# Return the matched data
return(output)
} # END BeeBDCQuery
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/BeeBDCQuery.R
|
# This function was written by James Dorey on the 20th of May 2022 in ored to define the columns
# used during occurrence record cleaning
# For help you may email James at jbdorey[at]me.com
#' Sets up column names and types
#'
#' This function uses [readr::cols_only()] to assign a column name and the type of data
#' (e.g., [readr::col_character()],
#' and [readr::col_integer()]). To see the default columns simply run [BeeBDC::ColTypeR()].
#' This is intended for use with [readr::read_csv()]. Columns that are not present will NOT be included
#' in the resulting tibble unless they are specified using [...].
#'
#' @param ... Additional arguments. These can be specified in addition to the ones default to the
#' function. For example:
#' * newCharacterColumn = [readr::col_character()],
#' * newNumericColumn = [readr::col_integer()],
#' * newLogicalColumn = [readr::col_logical()]
#'
#' @importFrom readr col_character col_double col_factor col_integer col_logical col_datetime
#' @importFrom dplyr %>%
#'
#' @return Returns an object of class col_spec.
#' See [readr::as.col_spec()] for additional context and explication.
#' @export
#'
#' @examples
#' # You can simply return the below for default values
#' library(dplyr)
#' BeeBDC::ColTypeR()
#'
#' # To add new columns you can write
#' ColTypeR(newCharacterColumn = readr::col_character(),
#' newNumericColumn = readr::col_integer(),
#' newLogicalColumn = readr::col_logical())
#'
#' # Try reading in one of the test datasets as an example:
#' beesFlagged %>% dplyr::as_tibble(col_types = BeeBDC::ColTypeR())
#' # OR
#' beesRaw %>% dplyr::as_tibble(col_types = BeeBDC::ColTypeR())
#'
#'
ColTypeR <- function(...){
ColTypes <- readr::cols_only(
# Character Strings
# CHR — taxonomy
database_id = readr::col_character(), scientificName = readr::col_character(),
family = readr::col_character(), subfamily = readr::col_character(), genus = readr::col_character(),
subgenus = readr::col_character(), subspecies = readr::col_character(), species = readr::col_character(),
specificEpithet = readr::col_character(), infraspecificEpithet = readr::col_character(),
acceptedNameUsage = readr::col_character(), taxonRank = readr::col_character(),
scientificNameAuthorship = readr::col_character(),
identificationQualifier = readr::col_character(), higherClassification = readr::col_character(),
identificationReferences = readr::col_character(), typeStatus = readr::col_character(),
previousIdentifications = readr::col_character(), verbatimIdentification = readr::col_character(),
identifiedBy = readr::col_character(), dateIdentified = readr::col_character(),
# DBL — Locality info
decimalLatitude = readr::col_double(), decimalLongitude = readr::col_double(),
verbatimLatitude = readr::col_character(), verbatimLongitude = readr::col_character(),
verbatimElevation = readr::col_character(),
# CHR/Factor — Locality info
stateProvince = readr::col_character(), country = readr::col_character(), continent = readr::col_factor(),
locality = readr::col_character(), island = readr::col_character(),
county = readr::col_character(), municipality = readr::col_character(),
# CHR/Factor — Country codes
countryCode = readr::col_factor(), level0Gid = readr::col_factor(), level0Name = readr::col_factor(),
level1Gid = readr::col_factor(), level1Name = readr::col_factor(), license = readr::col_factor(),
issue = readr::col_character(),
# Date/Time — Collection time
eventDate = readr::col_character(),
eventTime = readr::col_character(),
startDayOfYear = readr::col_integer(),
endDayOfYear = readr::col_integer(),
# Int — Collection time
day = readr::col_integer(), month = readr::col_integer(), year = readr::col_integer(),
# Factor — Collection info
basisOfRecord = readr::col_factor(), type = readr::col_factor(), occurrenceStatus = readr::col_factor(),
# CHR — Collection info
recordNumber = readr::col_character(), recordedBy = readr::col_character(), eventID = readr::col_character(),
Location = readr::col_character(), samplingProtocol = readr::col_character(),
samplingEffort = readr::col_character(),
# Int — Collection info
individualCount = readr::col_double(), organismQuantity = readr::col_double(),
# mixed — Information uncertainty
coordinatePrecision = readr::col_double(), coordinateUncertaintyInMeters = readr::col_double(),
spatiallyValid = readr::col_logical(),
# CHR — Database information
catalogNumber = readr::col_character(), gbifID = readr::col_character(), datasetID = readr::col_character(),
institutionCode = readr::col_character(), datasetName = readr::col_character(),
otherCatalogNumbers = readr::col_character(), occurrenceID = readr::col_character(),
taxonKey = readr::col_character(), coreid = readr::col_character(),
recordId = readr::col_character(), collectionID = readr::col_character(),
associatedSequences = readr::col_character(),
# CHR — Verbatim information
verbatimScientificName = readr::col_character(), verbatimEventDate = readr::col_character(),
# CHR/Factor — Aux info
associatedTaxa = readr::col_character(), associatedOrganisms = readr::col_character(),
fieldNotes = readr::col_character(), sex = readr::col_character(),
# CHR — Rights info
rights = readr::col_character(), rightsHolder = readr::col_character(), accessRights = readr::col_character(),
dctermsLicense = readr::col_character(), dctermsType = readr::col_character(),
dctermsAccessRights = readr::col_character(), associatedReferences = readr::col_character(),
bibliographicCitation = readr::col_character(), dctermsBibliographicCitation = readr::col_character(),
references = readr::col_character(),
# Record notes
# CHR
flags = readr::col_character(), informationWithheld = readr::col_character(),
isDuplicateOf = readr::col_character(),
# Logical
hasCoordinate = readr::col_logical(), hasGeospatialIssues = readr::col_logical(),
# Factor
assertions = readr::col_factor(),
# mix — ALA columns
occurrenceYear = readr::col_datetime(), id = readr::col_character(), duplicateStatus = readr::col_factor(),
associatedOccurrences = readr::col_character(),
# CHR — SCAN column
locationRemarks = readr::col_character(),
# CHR — dataset origin column
dataSource = readr::col_character(),
# bdc columns
dataBase_scientificName = readr::col_character(), .rou = readr::col_logical(),
.val = readr::col_logical(), .equ = readr::col_logical(), .zer = readr::col_logical(), .cap = readr::col_logical(),
.cen = readr::col_logical(), .sea = readr::col_logical(), .otl = readr::col_logical(), .gbf = readr::col_logical(),
.inst = readr::col_logical(), .dpl = readr::col_logical(), .summary = readr::col_logical(),
names_clean = readr::col_character(), verbatim_scientificName = readr::col_character(),
.uncer_terms = readr::col_logical(), .eventDate_empty = readr::col_logical(),
.year_outOfRange = readr::col_logical(),
.duplicates = readr::col_logical(), .lonFlag = readr::col_logical(), .latFlag = readr::col_logical(),
.gridSummary = readr::col_logical(), .basisOfRecords_notStandard = readr::col_logical(),
.scientificName_empty = readr::col_logical(), .coordinates_empty = readr::col_logical(),
.coordinates_outOfRange = readr::col_logical(), coordinates_transposed = readr::col_logical(),
country_suggested = readr::col_character(), .countryOutlier = readr::col_logical(),
countryMatch = readr::col_character(), .expertOutlier = readr::col_logical(),
# jbd flags
.occurrenceAbsent = readr::col_logical(), .coordinates_country_inconsistent = readr::col_logical(),
.unLicensed = readr::col_logical(), .invalidName = readr::col_logical(),
.sequential = readr::col_logical(), idContinuity = readr::col_logical(),
.uncertaintyThreshold = readr::col_logical(),
.GBIFflags = readr::col_logical(),
# Paige columns
finalLatitude = readr::col_double(), finalLongitude = readr::col_double(),
Source = readr::col_character(),
# Dynamic dots for extra columns specified by the user
...
) # END ColTypes
return(ColTypes)
} # END ColTypeR
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/ColTypeR.R
|
# This function was made by James Dorey to manage flags in the synonym lists
# For queries, please contact James Dorey at jbdorey[at]me.com
# This function was started on 15th May 2022 and last updated 17th May 2022
#' @importFrom dplyr %>%
FlagManager <- function(InSynList = DLdf,
flagCol = flags){
# locally bind variables to the function
. <- DLdf <- flags <- genus <- validName <- canonical <- NULL
#### Flags option ####
if(flagCol == "flags"){
base::writeLines(" - Using the flags column.")
# If there is no canonical_withFlags, make one with the canonical column, assuming it has flags
suppressWarnings( CWFtest <- InSynList$canonical_withFlags,
classes = "warning")
if(is.null(CWFtest) == TRUE){
canonical_withFlags <- InSynList$canonical
InSynList <- dplyr::mutate(InSynList, canonical_withFlags, .after = "canonical")
}
# IF all of the canonical_withFlags column is empty, replace it with the canonical valuse
if(all(CWFtest == "NA") == TRUE){
InSynList$canonical_withFlags <- InSynList$canonical
}
base::writeLines(" - 1. Remove flag from validName column...")
##### START ProgBar 1 ####
# Initializes the progress bar
pb1 <- utils::txtProgressBar(min = 0, # Minimum value of the progress bar
max = nrow(InSynList), # Maximum value of the progress bar
style = 3, # Progress bar style (also available style = 1 and style = 2)
width = NA, # Progress bar width. Defaults to getOption("width")
char = "=") # Character used to create the bar
# Remove flag from validName
for(i in 2:nrow(InSynList)){
if(is.na(InSynList$flags[i]) == FALSE){ # Enter statement only IF there is a flag present
InSynList$validName[i] <- sub(InSynList$flags[i], "", InSynList$validName[i] , fixed = TRUE) %>%
trimws( which = "right" , whitespace = "[\\, ]") # Trim some special characters from the END of the string
} # END IF statement
# Sets the progress bar to the current state
utils::setTxtProgressBar(pb1, i)
} # END loop COLUMNS
#### END progBar 1 ####
close(pb1) # Close the connection
# Clean up double spaces in data frame
InSynList <- as.data.frame(apply(InSynList, 2, FUN = stringr::str_squish ), stringsAsFactors = FALSE)
# Remove empty rows that contain the following strings
InSynList <- subset(InSynList, genus!="NA" & genus!="F" & genus!="V" & genus!="Discover" &
genus!="Updated:" & genus!="Other" & validName!="Kinds of" &
validName!="Scientific name" & canonical != "Kim, NA")
# Find annotations and add the instead to the flags column
# Which columns to search through
FlagAnnot_cols <- c("genus", "subgenus", "species", "infraspecies", "authorship")
# Which flags to find
FlagsAnnot <- c(
# Name flags
"_homonym[1-4]?","_homonytm","_homony","_sic","_sensu_lato",
"_unavailable","_invalid","_nomen_nudum","_emend",
" var "," subvar "," forma "," form "," race "," auct ","^var ","^subvar ","^forma ",
"^form ","^race ",
# unsure flags
" f ","^f "," m ","^m "," r ","^r "," ab ","^ab ",
# Specific cases
"_Urban_not_Pasteels","_Friese_not_Megerle","_Friese_not_H\\u00fcbner","_Friese_not_Stimpson",
# Author flags
"Auctorum, sensu","sensu auct not","Auct non","Auct, not","auct, not","_auct not_",
"_auct","^auct ") %>%
paste(collapse="|")
base::writeLines(" - 2. Find and add flags to the 'flags' column...")
#### START ProgBar 2 ####
# Initializes the progress bar
pb2 <- utils::txtProgressBar(min = 0, # Minimum value of the progress bar
max = length(FlagAnnot_cols), # Maximum value of the progress bar
style = 3, # Progress bar style (also available style = 1 and style = 2)
width = NA, # Progress bar width. Defaults to getOption("width")
char = "=") # Character used to create the bar
# Find and add flags to flag column
for(j in 1:length(FlagAnnot_cols)){ # COLUMNS
for(i in 1:nrow(InSynList)){ # ROWS
if(grepl(FlagsAnnot, InSynList[i,FlagAnnot_cols[j]], fixed = FALSE) == TRUE){
# Extract the flag from the string and add the column name
ExtractedFlag <- stringr::str_extract_all(InSynList[i,FlagAnnot_cols[j]],
FlagsAnnot, simplify = TRUE) %>%
paste(FlagAnnot_cols[j],.,sep=" ", collapse = "|") %>%
gsub(" ", " ",. ) # Remove double spaces
if( is.na(InSynList[i,"flags"]) == TRUE){ # If there is NO flag, insert this flag as is
InSynList[i,"flags"] <- ExtractedFlag
}else{ # If there IS a flag, ADD this to the existing flag
InSynList[i,"flags"] <- paste(InSynList[i,"flags"], ExtractedFlag, sep = " | ")
} # END if else statement
} # END find flag IF statement
} # END for loop of flag annotations - ROWS
# Sets the progress bar to the current state
utils::setTxtProgressBar(pb2, j)
} # END loop COLUMNS
#### END progBar 2 ####
close(pb2) # Close the connection
# Trim internal white spaces in flag column
InSynList$flags <- gsub(" \\| | \\||\\| ", "|", InSynList$flags)
# Remove these strings from the relevant columns now that they are saved as flags
TempCols <- as.data.frame(apply(InSynList[c("canonical",FlagAnnot_cols) ], 2,
function(y) gsub(FlagsAnnot, " ", y)))
# Replace existing columns with these new, trimmed, columns
InSynList[c(FlagAnnot_cols)] <- TempCols[, FlagAnnot_cols]
# Add the canonical column after the canonical column with flags - later convert to
# canonical and canonical_withFlags
CanTest <- InSynList$canonical
# IF there IS already a canonical column repalce that column with the new one...
if(is.null(CanTest) == FALSE){
InSynList$canonical <- TempCols$canonical
}else{
InSynList <- dplyr::mutate(InSynList, TempCols$canonical, .after = "canonical")
}
# Clean up double spaces in data frame
InSynList <- as.data.frame(apply(InSynList, 2, FUN = stringr::str_squish ), stringsAsFactors = FALSE)
} # END IF flags col
#### Notes option ####
if(flagCol == "notes"){
base::writeLines(" - Using the notes column.")
# If there is no canonical_withFlags, make one with the canonical column, assuming it has flags
suppressWarnings( CWFtest <- InSynList$canonical_withFlags,
classes = "warning")
if(is.null(CWFtest) == TRUE){
canonical_withFlags <- InSynList$canonical
InSynList <- dplyr::mutate(InSynList, canonical_withFlags, .after = "canonical")
}
# IF all of the canonical_withFlags column is empty, replace it with the canonical valuse
if(all(CWFtest == "NA") == TRUE){
InSynList$canonical_withFlags <- InSynList$canonical
}
base::writeLines(" - 1. Remove flag from validName column...")
##### START ProgBar 1 ####
# Initializes the progress bar
pb1 <- utils::txtProgressBar(min = 0, # Minimum value of the progress bar
max = nrow(InSynList), # Maximum value of the progress bar
style = 3, # Progress bar style (also available style = 1 and style = 2)
width = NA, # Progress bar width. Defaults to getOption("width")
char = "=") # Character used to create the bar
# Remove flag from validName
for(i in 2:nrow(InSynList)){
if(is.na(InSynList$notes[i]) == FALSE){ # Enter statement only IF there is a flag present
InSynList$validName[i] <- sub(InSynList$notes[i], "", InSynList$validName[i] , fixed = TRUE) %>%
trimws( which = "right" , whitespace = "[\\, ]") # Trim some special characters from the END of the string
} # END IF statement
# Sets the progress bar to the current state
utils::setTxtProgressBar(pb1, i)
} # END loop COLUMNS
#### END progBar 1 ####
close(pb1) # Close the connection
# Clean up double spaces in data frame
InSynList <- as.data.frame(apply(InSynList, 2, FUN = stringr::str_squish ), stringsAsFactors = FALSE)
# Remove empty rows that contain the following strings
InSynList <- subset(InSynList, genus!="NA" & genus!="F" & genus!="V" & genus!="Discover" &
genus!="Updated:" & genus!="Other" )
# Find annotations and add the instead to the flags column
# Which columns to search through
FlagAnnot_cols <- c("genus", "subgenus", "species", "infraspecies", "authorship")
# Which flags to find
FlagsAnnot <- c(
# Name flags
"_homonym[1-4]?","_homonytm","_homony","_sic","_sensu_lato",
"_unavailable","_invalid","_nomen_nudum","_emend",
" var "," subvar "," forma "," form "," race "," auct ","^var ","^subvar ","^forma ",
"^form ","^race ",
# unsure flags
" f ","^f "," m ","^m "," r ","^r "," ab ","^ab ",
# Specific cases
"_Urban_not_Pasteels","_Friese_not_Megerle","_Friese_not_H\\u00fcbner","_Friese_not_Stimpson",
# Author flags
"Auctorum, sensu","sensu auct not","Auct non","Auct, not","auct, not","_auct not_",
"_auct","^auct ") %>%
paste(collapse="|")
base::writeLines(" - 2. Find and add flags to the 'flags' column...")
#### START ProgBar 2 ####
# Initializes the progress bar
pb2 <- utils::txtProgressBar(min = 0, # Minimum value of the progress bar
max = length(FlagAnnot_cols), # Maximum value of the progress bar
style = 3, # Progress bar style (also available style = 1 and style = 2)
width = NA, # Progress bar width. Defaults to getOption("width")
char = "=") # Character used to create the bar
# Find and add flags to notes column
for(j in 1:length(FlagAnnot_cols)){ # COLUMNS
for(i in 1:nrow(InSynList)){ # ROWS
if(grepl(FlagsAnnot, InSynList[i,FlagAnnot_cols[j]], fixed = FALSE) == TRUE){
# Extract the flag from the string and add the column name
ExtractedFlag <- stringr::str_extract_all(InSynList[i,FlagAnnot_cols[j]],
FlagsAnnot, simplify = TRUE) %>%
paste(FlagAnnot_cols[j],.,sep=" ", collapse = "|") %>%
gsub(" ", " ",. ) # Remove double spaces
if( is.na(InSynList[i,"notes"]) == TRUE){ # If there is NO flag, insert this flag as is
InSynList[i,"notes"] <- ExtractedFlag
}else{ # If there IS a flag, ADD this to the existing flag
InSynList[i,"notes"] <- paste(InSynList[i,"notes"], ExtractedFlag, sep = " | ")
} # END if else statement
} # END find flag IF statement
} # END for loop of flag annotations - ROWS
# Sets the progress bar to the current state
utils::setTxtProgressBar(pb2, j)
} # END loop COLUMNS
#### END progBar 2 ####
close(pb2) # Close the connection
# Trim internal white spaces in flag column
InSynList$notes <- gsub(" \\| | \\||\\| ", "|", InSynList$notes)
# Remove these strings from the relevant columns now that they are saved as flags
TempCols <- as.data.frame(apply(InSynList[c("canonical",FlagAnnot_cols) ], 2,
function(y) gsub(FlagsAnnot, " ", y)))
# Replace existing columns with these new, trimmed, columns
InSynList[c(FlagAnnot_cols)] <- TempCols[, FlagAnnot_cols]
# Add the canonical column after the canonical column with flags - later convert to
# canonical and canonical_withFlags
CanTest <- InSynList$canonical
# IF there IS already a canonical column repalce that column with the new one...
if(is.null(CanTest) == FALSE){
InSynList$canonical <- TempCols$canonical
}else{
InSynList <- dplyr::mutate(InSynList, TempCols$canonical, .after = "canonical")
}
# Clean up double spaces in data frame
InSynList <- as.data.frame(apply(InSynList, 2, FUN = stringr::str_squish ), stringsAsFactors = FALSE)
} # END IF notes col
#### neither ####
if(flagCol != "flags" & flagCol != "notes"){
base::writeLines("!! The flagCol option must equal 'flags' or 'notes' !!")
}
#backupdf <- InSynList
#InSynList <- backupdf
# change column names
#colnames(InSynList) <- c("flags","taxonomic_status","source","accid","id","kingdom","phylum","class","order","family","subfamily","tribe","subtribe","validName","canonical_withFlags","canonical","genus","subgenus","species","infraspecies","authorship")
return(InSynList)
} # END FlagManager
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/FlagManager.R
|
# This function was written by James B Dorey to chose and flag some issues as listed by GBIF.
# This function was written on the 30th of May 2022. For questions, please contact James at
# jbdorey[at]me.com
# Possible flags:
# allDates: RECORDED_DATE_INVALID, RECORDED_DATE_UNLIKELY
# allMetadata: AMBIGUOUS_COLLECTION, COLLECTION_MATCH_NONE, COUNTRY_DERIVED_FROM_COORDINATES,
# INSTITUTION_MATCH_NONE, DIFFERENT_OWNER_INSTITUTION, COUNTRY_INVALID, AMBIGUOUS_INSTITUTION,
# COLLECTION_MATCH_FUZZY, INSTITUTION_COLLECTION_MISMATCH, INSTITUTION_MATCH_FUZZY
# allObservations: OCCURRENCE_STATUS_INFERRED_FROM_INDIVIDUAL_COUNT, BASIS_OF_RECORD_INVALID,
# INDIVIDUAL_COUNT_INVALID
# allSpatial: GEODETIC_DATUM_ASSUMED_WGS84, COORDINATE_ROUNDED, COORDINATE_PRECISION_INVALID
# FOOTPRINT_WKT_INVALID, PRESUMED_NEGATED_LONGITUDE, CONTINENT_INVALID
# PRESUMED_NEGATED_LATITUDE, COORDINATE_INVALID, COUNTRY_COORDINATE_MISMATCH
# COORDINATE_UNCERTAINTY_METERS_INVALID, ZERO_COORDINATE, GEODETIC_DATUM_INVALID
# allTaxo: TAXON_MATCH_HIGHERRANK, TYPE_STATUS_INVALID, TAXON_MATCH_FUZZY
#' Flags records with GBIF issues
#'
#' This function will flag records which are subject to a user-specified vector of GBIF issues.
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param issueColumn Character. The column in which to look for GBIF issues. Default = "issue".
#' @param GBIFflags Character vector. The GBIF issues to flag. Users may choose their own vector of issues to flag or
#' use a pre-set vector or vectors, including c("allDates", "allMetadata", "allObservations",
#' "allSpatial", "allTaxo", or "all").
#'
#' Default = c("COORDINATE_INVALID", "PRESUMED_NEGATED_LONGITUDE", "PRESUMED_NEGATED_LATITUDE", "COUNTRY_COORDINATE_MISMATCH", "ZERO_COORDINATE")
#'
#' @return Returns the data with a new column, ".GBIFflags", where FALSE = records with any of the provided
#' GBIFflags.
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#' # Import the example data
#' data(beesRaw)
#' # Run the function
#' beesRaw_Out <- GBIFissues(data = beesRaw,
#' issueColumn = "issue",
#' GBIFflags = c("COORDINATE_INVALID", "ZERO_COORDINATE"))
#'
#'
GBIFissues <- function (data = NULL,
issueColumn = "issue",
GBIFflags = NULL)
{
.data <- .GBIFflags <- NULL
#### 0.0 Warnings ####
if(is.null(data)){
stop("\n - Please provide an argument for data. I'm a program, not a magician.")
}
if(is.null(GBIFflags)){
warning("\n - GBIFflags not provided. Please provide an argument. I'm a program, not a magician.")
writeLines(paste(
" - Possible options are:\n",
paste("TAXON_MATCH_HIGHERRANK", "TYPE_STATUS_INVALID", "TAXON_MATCH_FUZZY",
"GEODETIC_DATUM_ASSUMED_WGS84", "COORDINATE_ROUNDED", "COORDINATE_PRECISION_INVALID",
"FOOTPRINT_WKT_INVALID", "PRESUMED_NEGATED_LONGITUDE", "CONTINENT_INVALID",
"PRESUMED_NEGATED_LATITUDE", "COORDINATE_INVALID", "COUNTRY_COORDINATE_MISMATCH",
"COORDINATE_UNCERTAINTY_METERS_INVALID", "ZERO_COORDINATE", "GEODETIC_DATUM_INVALID",
"OCCURRENCE_STATUS_INFERRED_FROM_INDIVIDUAL_COUNT", "BASIS_OF_RECORD_INVALID",
"INDIVIDUAL_COUNT_INVALID",
"AMBIGUOUS_COLLECTION", "COLLECTION_MATCH_NONE",
"COUNTRY_DERIVED_FROM_COORDINATES",
"INSTITUTION_MATCH_NONE", "DIFFERENT_OWNER_INSTITUTION", "COUNTRY_INVALID",
"AMBIGUOUS_INSTITUTION", "COLLECTION_MATCH_FUZZY",
"INSTITUTION_COLLECTION_MISMATCH", "INSTITUTION_MATCH_FUZZY",
"RECORDED_DATE_INVALID", "RECORDED_DATE_UNLIKELY", collapse = "", sep = ", "), "\n",
" - Or:\n",
"allDates, allMetadata, allObservations, allTaxo, allSpatial, or all. ",
"We recommend thinking about what is required.", sep = ""
))
message(paste(
"Using default of:\n",
paste("COORDINATE_INVALID", "PRESUMED_NEGATED_LONGITUDE", "PRESUMED_NEGATED_LATITUDE",
"COUNTRY_COORDINATE_MISMATCH", "ZERO_COORDINATE", sep = ", "),
sep = ""))
GBIFflags <- c("COORDINATE_INVALID", "PRESUMED_NEGATED_LONGITUDE", "PRESUMED_NEGATED_LATITUDE",
"COUNTRY_COORDINATE_MISMATCH", "ZERO_COORDINATE")
}
#### 1.0 Flag options ####
# Flag allDates
if (any(GBIFflags == "allDates")) {
GBIFflags <- c("RECORDED_DATE_INVALID", "RECORDED_DATE_UNLIKELY")}
# Flag allMetadata
if (any(GBIFflags == "allMetadata")) {
GBIFflags <- c("AMBIGUOUS_COLLECTION", "COLLECTION_MATCH_NONE",
"COUNTRY_DERIVED_FROM_COORDINATES",
"INSTITUTION_MATCH_NONE", "DIFFERENT_OWNER_INSTITUTION", "COUNTRY_INVALID",
"AMBIGUOUS_INSTITUTION", "COLLECTION_MATCH_FUZZY",
"INSTITUTION_COLLECTION_MISMATCH", "INSTITUTION_MATCH_FUZZY")}
# Flag allObservations
if (any(GBIFflags == "allObservations")) {
GBIFflags <- c("OCCURRENCE_STATUS_INFERRED_FROM_INDIVIDUAL_COUNT", "BASIS_OF_RECORD_INVALID",
"INDIVIDUAL_COUNT_INVALID")}
# Flag allSpatial
if (any(GBIFflags == "allSpatial")) {
GBIFflags <- c("GEODETIC_DATUM_ASSUMED_WGS84", "COORDINATE_ROUNDED", "COORDINATE_PRECISION_INVALID",
"FOOTPRINT_WKT_INVALID", "PRESUMED_NEGATED_LONGITUDE", "CONTINENT_INVALID",
"PRESUMED_NEGATED_LATITUDE", "COORDINATE_INVALID", "COUNTRY_COORDINATE_MISMATCH",
"COORDINATE_UNCERTAINTY_METERS_INVALID", "ZERO_COORDINATE", "GEODETIC_DATUM_INVALID")}
# Flag allTaxo
if (any(GBIFflags == "allTaxo")) {
GBIFflags <- c("TAXON_MATCH_HIGHERRANK", "TYPE_STATUS_INVALID", "TAXON_MATCH_FUZZY")}
# Flag all
if (any(GBIFflags == "all")) {
GBIFflags <- c("TAXON_MATCH_HIGHERRANK", "TYPE_STATUS_INVALID", "TAXON_MATCH_FUZZY",
"GEODETIC_DATUM_ASSUMED_WGS84", "COORDINATE_ROUNDED", "COORDINATE_PRECISION_INVALID",
"FOOTPRINT_WKT_INVALID", "PRESUMED_NEGATED_LONGITUDE", "CONTINENT_INVALID",
"PRESUMED_NEGATED_LATITUDE", "COORDINATE_INVALID", "COUNTRY_COORDINATE_MISMATCH",
"COORDINATE_UNCERTAINTY_METERS_INVALID", "ZERO_COORDINATE", "GEODETIC_DATUM_INVALID",
"OCCURRENCE_STATUS_INFERRED_FROM_INDIVIDUAL_COUNT", "BASIS_OF_RECORD_INVALID",
"INDIVIDUAL_COUNT_INVALID",
"AMBIGUOUS_COLLECTION", "COLLECTION_MATCH_NONE",
"COUNTRY_DERIVED_FROM_COORDINATES",
"INSTITUTION_MATCH_NONE", "DIFFERENT_OWNER_INSTITUTION", "COUNTRY_INVALID",
"AMBIGUOUS_INSTITUTION", "COLLECTION_MATCH_FUZZY",
"INSTITUTION_COLLECTION_MISMATCH", "INSTITUTION_MATCH_FUZZY",
"RECORDED_DATE_INVALID", "RECORDED_DATE_UNLIKELY")}
# Add flag column
data <- data %>% dplyr::mutate(.GBIFflags = !tolower(.data[[issueColumn]]) %in%
tolower(GBIFflags))
# User output
message(paste(" - jbd_GBIFissues:\nFlagged",
format(sum(data$.GBIFflags == FALSE, na.rm = TRUE), big.mark = ","),
"\n ",
"The .GBIFflags column was added to the database.", "\n",
sep = " "))
# Return the dataset
return(data)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/GBIFissues.R
|
# This function was written by James Dorey to match higher order names to species.genus names
# For queries, please contact James Dorey at jbdorey[at]me.com
# This function was started on 15th May 2022 and last updated 17th May 2022
#' @importFrom dplyr %>%
HigherNamer <- function(HigherNameList = HigherOrders,
InSynList = DLdf){
# locally bind variables to the function
HigherOrders <- DLdf <- taxonomic_status <- validName <- family <- subfamily <- NULL
subfamily <- tribe <- subtribe <- id <- NULL
# Drop the completely NA row at the top
InSynList <- InSynList %>%
tidyr::drop_na(taxonomic_status) %>%
# Drop some left-over Discover life stuff
dplyr::filter(!validName %in% c("Kinds of Apoidea species", "Scientific name",
"Other names")) %>%
dplyr::filter(!stringr::str_detect(validName, "Updated: |Discover Life"))
# Match and copy the Higher Order names across
InSynList$family <- HigherNameList$family[ cbind(match(InSynList$genus, HigherNameList$Genus ) )]
InSynList$subfamily <- HigherNameList$subfamily[ cbind(match(InSynList$genus, HigherNameList$Genus ) )]
InSynList$tribe <- HigherNameList$tribe[ cbind(match(InSynList$genus, HigherNameList$Genus ) )]
InSynList$subtribe <- HigherNameList$subtribe[ cbind(match(InSynList$genus, HigherNameList$Genus ) )]
# Make sure these are nurmeric columns...
InSynList$accid <- as.numeric(InSynList$accid)
InSynList$id <- as.numeric(InSynList$id)
# For those synonyms that do not have a higher taxonomy, take it from the accepted name
emptyFamily <- InSynList %>%
dplyr::filter(is.na(family))
# Match to their accepted names
emptyFamily <- emptyFamily %>%
dplyr::select(!c(family, subfamily, tribe, subtribe)) %>%
dplyr::left_join(InSynList %>% dplyr::select(c(family, subfamily, tribe, subtribe, id)),
by = c("accid" = "id"))
# Re-join these data
InSynList <- InSynList %>%
dplyr::filter(!is.na(family)) %>%
dplyr::bind_rows(emptyFamily)
# Return the list
return(InSynList)
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/HigherNamer.R
|
# This function was written by James Dorey on the 16th of June 2022 to join
# Paige's cleaned dataset as best as possible.
#' Integrate manually-cleaned data from Paige Chesshire
#'
#' Replaces publicly available data with data that has been manually cleaned and error-corrected for use in
#' the paper Chesshire, P. R., Fischer, E. E., Dowdy, N. J., Griswold, T., Hughes, A. C., Orr, M. J., . . . McCabe, L. M. (In Press). Completeness analysis for over 3000 United States bee species identifies persistent data gaps. Ecography.
#'
#' @param db_standardized A data frame or tibble. Occurrence records as input.
#' @param PaigeNAm A data frame or tibble. The Paige Chesshire dataset.
#' @param columnStrings A list of character vectors. Each vector is a set of columns that will be
#' used to iteratively match the public dataset against the Paige dataset.
#'
#' @importFrom dplyr %>%
#' @importFrom stats complete.cases
#'
#' @return Returns db_standardized (input occurrence records) with the Paige Chesshire data integrated.
#' @export
#'
#'
#' @examples
#' \dontrun{
#' library(dplyr)
#' # set the DataPath to tempdir for this example
#' DataPath <- tempdir()
#' # Integrate Paige Chesshire's cleaned dataset.
# Import Paige's cleaned N. American data
# IF you haven't figured out by now, dont worry about the column name warning - not all columns occur here.
#'PaigeNAm <- readr::read_csv(paste(DataPath, "Paige_data", "NorAmer_highQual_only_ALLfamilies.csv",
#' sep = "/"), col_types = ColTypeR()) %>%
#' # Change the column name from Source to dataSource to match the rest of the data.
#' dplyr::rename(dataSource = Source) %>%
#' # add a NEW database_id column
#' dplyr::mutate(
#' database_id = paste0("Paige_data_", 1:nrow(.)),
#' .before = scientificName)
#'
#' # Set up the list of character vectors to iteratively check for matches with public data.
#'columnList <- list(
#' c("decimalLatitude", "decimalLongitude",
#' "recordNumber", "recordedBy", "individualCount", "samplingProtocol",
#' "associatedTaxa", "sex", "catalogNumber", "institutionCode", "otherCatalogNumbers",
#' "recordId", "occurrenceID", "collectionID"), # Iteration 1
#' c("catalogNumber", "institutionCode", "otherCatalogNumbers",
#' "recordId", "occurrenceID", "collectionID"), # Iteration 2
#' c("decimalLatitude", "decimalLongitude",
#' "recordedBy", "genus", "specificEpithet"), # Iteration 3
#' c("id", "decimalLatitude", "decimalLongitude"), # Iteration 4
#' c("recordedBy", "genus", "specificEpithet", "locality"), # Iteration 5
#' c("recordedBy", "institutionCode", "genus",
#' "specificEpithet","locality"),# Iteration 6
#' c("occurrenceID","decimalLatitude", "decimalLongitude"), # Iteration 7
#' c("catalogNumber","decimalLatitude", "decimalLongitude"), # Iteration 8
#' c("catalogNumber", "locality") # Iteration 9
#')
#'
#'# Merge Paige's data with downloaded data
#'db_standardized <- BeeBDC::PaigeIntegrater(
#' db_standardized = db_standardized,
#' PaigeNAm = PaigeNAm,
#' columnStrings = columnList)
#' }
#'
#'
PaigeIntegrater <- function(
db_standardized = NULL,
PaigeNAm = NULL,
columnStrings = NULL){
# locally bind variables to the function
occurrenceID<-database_id<-database_id_p<-database_id_d<-finalLatitude<-finalLongitude<-
Dorey_match<-decimalLatitude<-decimalLongitude<-scientificName<-genus<-specificEpithet<-
infraspecificEpithet<-country<-coordinateUncertaintyInMeters<-decimalLatitude_m<-
database_id_m<-decimalLongitude_m<-scientificName_m<-genus_m<-specificEpithet_m<-
infraspecificEpithet_m<-country_m<-coordinateUncertaintyInMeters_m <- . <- NULL
requireNamespace("dplyr")
#### 1.0 occurrenceID ####
# Make a temporary dataset
tempData <- db_standardized %>%
dplyr::filter(complete.cases(occurrenceID))
# Find the matches for occurrenceID
occMatched <- dplyr::tibble(
Dorey_match = tempData$database_id[cbind(
match(PaigeNAm$occurrenceID, tempData$occurrenceID )
)], # Match by occurrenceID
Paige_match = PaigeNAm$database_id)
# User output
writeLines(paste0(
" - INITIAL match with occurrenceID only ",
format(sum(complete.cases(occMatched$Dorey_match)), big.mark = ","), " of ",
format(nrow(occMatched), big.mark = ","), " Paige occurrences.\n",
"There are ",
format(nrow(occMatched) - sum(complete.cases(occMatched$Dorey_match)), big.mark = ","),
" occurrences remaining to match."))
# Save the number remaining
numMatched <- (nrow(occMatched) - sum(complete.cases(occMatched$Dorey_match)))
# Set matchedPaige to feed into the loop
matchedPaige <- occMatched
#### 2.0 Loop ####
# loop through the number of column strings
for(i in 1:length(columnStrings)){
message(paste0(" - Starting iteration ", i))
# Get the Paige occurrence records that are not matched above
matchedPaige <- matchedPaige %>%
dplyr::filter(complete.cases(matchedPaige$Dorey_match))
unMatchedPaige <- PaigeNAm %>%
# Remove the already-matched records
dplyr::filter(!database_id %in% matchedPaige$Paige_match)
# Select the columns to match by
colOverlap <- unlist(columnStrings[i])
# Get a subset of the db_standardized to feed in below
temp_db <- db_standardized %>%
# Get distinct data for theabove columns
dplyr::distinct(dplyr::across(tidyselect::all_of(colOverlap)),
.keep_all = TRUE) %>%
dplyr::select(c(database_id, tidyselect::all_of(colOverlap))) %>%
# Remove already-matched occurrences
dplyr::filter(!database_id %in% matchedPaige$Dorey_match)
# GET THE MATCHED occurrences
matchedPaige <- unMatchedPaige %>%
# Merge datasets
dplyr::left_join(temp_db, by = c(colOverlap),
suffix = c("_p", "_d")) %>%
# Select the id columns
dplyr::select(database_id_p, database_id_d) %>%
# Keep ONLY the matched columns
dplyr::filter(complete.cases(database_id_p)) %>%
# Rename those columns
dplyr::rename(Dorey_match = database_id_d, Paige_match = database_id_p) %>%
# bind with the last lot of matched names
dplyr::bind_rows(matchedPaige)
# User output
writeLines(paste0(
"Matched ",
format(sum(complete.cases(matchedPaige$Dorey_match)), big.mark = ","), " of ",
format(nrow(matchedPaige), big.mark = ","), " Paige occurrences.\n",
"There are ",
format(nrow(matchedPaige) - sum(complete.cases(matchedPaige$Dorey_match)), big.mark = ","),
" occurrences remaining to match.\n",
"This step has found ",
format(
numMatched - (nrow(matchedPaige) - sum(complete.cases(matchedPaige$Dorey_match))),
big.mark = ","),
" extra occurrences from the last iteration."
))
# Update numMatched for next iteration
numMatched = (nrow(matchedPaige) - sum(complete.cases(matchedPaige$Dorey_match)))
} # END loop
#### 3.0 Append ####
# Update the data from Paige
writeLines(" - Updating Paige datasheet to merge...")
matchedPaige <- PaigeNAm %>%
# Select the matched records.
dplyr::filter(database_id %in% matchedPaige$Paige_match) %>%
# Replace the lat/lon columns
dplyr::mutate(
decimalLatitude = finalLatitude,
decimalLongitude = finalLongitude) %>%
dplyr::select(!c(finalLatitude, finalLongitude)) %>%
# Add on the associated Dorey database_id
dplyr::left_join(matchedPaige, by = c("database_id" = "Paige_match") ) %>%
# Make sure that all Dorey_match's are unique
dplyr::distinct(Dorey_match, .keep_all = TRUE)
writeLines(" - Updating the final datasheet with new information from Paige...")
# Merge the new information
db_standardized <- db_standardized %>%
# Join select fields of the Paige data
dplyr::left_join(matchedPaige %>%
dplyr::select(., c(Dorey_match, decimalLatitude, decimalLongitude,
scientificName, genus, specificEpithet,
infraspecificEpithet, database_id, country,
coordinateUncertaintyInMeters)),
by = c("database_id" = "Dorey_match"), suffix = c("", "_m")) %>%
# Rename those fields to replace existing fields
dplyr::mutate(
decimalLatitude = dplyr::if_else(complete.cases(decimalLatitude_m), decimalLatitude_m,
decimalLatitude),
database_id = dplyr::if_else(complete.cases(database_id_m), database_id_m,
database_id),
decimalLongitude = dplyr::if_else(complete.cases(decimalLongitude_m), decimalLongitude_m,
decimalLongitude),
scientificName = dplyr::if_else(complete.cases(scientificName_m), scientificName_m,
scientificName),
genus = dplyr::if_else(complete.cases(genus_m), genus_m,
genus),
specificEpithet = dplyr::if_else(complete.cases(specificEpithet_m), specificEpithet_m,
specificEpithet),
infraspecificEpithet = dplyr::if_else(complete.cases(infraspecificEpithet_m),
infraspecificEpithet_m,
infraspecificEpithet),
country = dplyr::if_else(complete.cases(country_m), country_m,
country),
coordinateUncertaintyInMeters = dplyr::if_else(complete.cases(coordinateUncertaintyInMeters_m),
coordinateUncertaintyInMeters_m,
coordinateUncertaintyInMeters)) %>%
# Remove the additional columns
dplyr::select(!c(decimalLatitude_m, decimalLongitude_m,
scientificName_m, genus_m, specificEpithet_m,
infraspecificEpithet_m, database_id_m, country_m,
coordinateUncertaintyInMeters_m))
# Return the object
return(db_standardized)
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/PaigeIntegrateR.R
|
# This script was written by James Dorey to use Paige Chesshire's taxonomy updates to update the
# Orr and Ascher combined taxonomy
# This function checks if there is a match in the current taxonomy or not
# For queries, please contact James Dorey at jbdorey[at]me.com
# This function was started on 15th May 2022 and last updated 17th May 2022
#' @importFrom dplyr %>%
PaigesSyns <- function(PaigeSheet = Paige_sheet_loc,
SynFile = SynL_AO){
# locally bind variables to the function
. <- Paige_sheet_loc <- SynL_AO <- canonical <- id <- NULL
# Read in the relevant sheets from the excel file
SynChanges <- openxlsx::read.xlsx(PaigeSheet, sheet = "Synonym_changes")
#### 2.0 Synonyms ####
# Set up an empty tibble...
SynAnnotate_df <- dplyr::tibble()
# Run a loop
for(i in 1:nrow(SynChanges)){
# Find the ith name to change
loopName_ori <- SynChanges$OriginalScientificName_DL_s1[i]
loopName_DL <- SynChanges$DLifeName_s2[i]
loopName_Final <- SynChanges$Final_Name[i]
##### 2.1 One name ####
# If there is only one name...
if(loopName_ori == loopName_DL || loopName_DL %in% c("Remove","remove (illogical)")){
# Find that name in the original list
SLAO_row <- SynFile %>%
dplyr::filter(canonical == loopName_ori)
# Now find the accepted row based on the accid
acc_SLAO_row <- SynFile %>%
dplyr::filter(id %in% SLAO_row$accid)
# Check to see if any of the DiscoverLife accepted names match Paige's name
taxaCurrentTest <- any(acc_SLAO_row$canonical %in% loopName_Final)
# . note this gives a TRUE or FALSE to see if the names match the current taxonomy.
if(taxaCurrentTest == TRUE){
SynAnnotate_df <- dplyr::tibble(
FinalName <- loopName_Final, originalName = loopName_ori, DisLifeName = loopName_DL,
PotentialMatches = nrow(SLAO_row), iteration = i,
Correct = TRUE, note = "One name provided, VALID") %>%
dplyr::bind_rows(SynAnnotate_df, .)
}else{
SynAnnotate_df <- dplyr::tibble(
FinalName <- loopName_Final, originalName = loopName_ori, DisLifeName = loopName_DL,
PotentialMatches = nrow(SLAO_row), iteration = i,
Correct = FALSE, note = "One name provided, INVALID")%>%
dplyr::bind_rows(SynAnnotate_df, .)
}
}# End 1. valid name
##### 2.2 Two names ####
# If there is only one name...
if(loopName_ori != loopName_DL && !loopName_DL %in% c("Remove","remove (illogical)")){
# Find that name in the original list
SLAO_row <- SynFile %>%
dplyr::filter(canonical %in% c(loopName_ori, loopName_DL))
# Now find the accepted row based on the accid
acc_SLAO_row <- SynFile %>%
dplyr::filter(id %in% SLAO_row$accid)
# Check to see if any of the DiscoverLife accepted names match Paige's name
taxaCurrentTest <- any(acc_SLAO_row$canonical %in% loopName_Final)
# . note this gives a TRUE or FALSE to see if the names match the current taxonomy.
if(taxaCurrentTest == TRUE){
SynAnnotate_df <- dplyr::tibble(
FinalName <- loopName_Final, originalName = loopName_ori, DisLifeName = loopName_DL,
PotentialMatches = nrow(SLAO_row), iteration = i,
Correct = TRUE, note = "Two names provided, VALID") %>%
dplyr::bind_rows(SynAnnotate_df, .)
}else{
SynAnnotate_df <- dplyr::tibble(
FinalName <- loopName_Final, originalName = loopName_ori, DisLifeName = loopName_DL,
PotentialMatches = nrow(SLAO_row), iteration = i,
Correct = FALSE, note = "Two names provided, INVALID")%>%
dplyr::bind_rows(SynAnnotate_df, .)
} # END else
} # END 2.2 Two names
}# End 2.0 Synonym loop
# Use output
writeLines( paste("The output can be used to manually check names. ",
"There are ", sum(SynAnnotate_df$Correct == FALSE), " names that require checking",
" and ", sum(SynAnnotate_df$Correct == TRUE), " that appear fine.",
sep = ""))
return(SynAnnotate_df)
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/PaigesSyns.R
|
# This Function is designed to check state-level outliers of the t2t project.
# It was written by James B Dorey from the 1st of August 2022.
#' @importFrom dplyr %>%
StateOutlieRs <- function(
checklist = NULL,
checklistColumns = NULL,
countryList = NULL,
occData = NULL,
findHawaii = FALSE
){
# locally bind variabls to the function
. <- SciPost <- neighbourMatch <- nAssignmentCertainty <- exactMatch <- matchType <-
matchCertainty <- stateMatch <- jurisdiction_value<-stateValue<-adm1_code<-iso_3166_2<-name<-
region<-postal<- geometry<-longitude<-latitude<-database_id<-scientificName<-species<-family<-
subfamily<-genus<-specificEpithet<-country<-stateProvince<-eventDate<-institutionCode<-
recordNumber<-catalogNumber<-dataSource<-verbatim_scientificName<-neighbours<-rowNum<-state<-
neighboursText<-Genus<-Species<-assignmentCertainty <- NULL
#### 0.0 Warnings ####
if(is.null(checklist)){
stop("You must provide a checklist of states.")
}
if(is.null(checklistColumns)){
stop("You must provide vector of checklistColumns.")
}
if(is.null(countryList)){
stop("You must provide vector of countryList for rnaturalearth to download states from.")
}
if(is.null(occData)){
stop("You must provide occurrence data (occData). Honestly, what do you think I was gonna do without that?")
}
#### 1.0 Data prep ####
##### 1.1 Ascher ####
# Find Hawaii if true
if(findHawaii == TRUE){
checklist <- checklist %>%
dplyr::mutate(HI =
# IF Hawaii is mentioned in the jurisdiction_value column, then the species is known from there (introduced or not)
dplyr::if_else(stringr::str_detect(
string = jurisdiction_value,
pattern = "Hawaii"),
"HI", ""))
# Add Hawaii to checklistColumns
checklistColumns <- c(checklistColumns, "HI")
}
# For the above columns, turn into long-format spreadhseet. Each species in EACH state will have a
# row of data.
CL <- checklist %>%
tidyr::pivot_longer(data = .,
cols = tidyselect::all_of(checklistColumns),
names_to = "state", values_to = "stateValue") %>%
# remove empty cells for the values
tidyr::drop_na(stateValue) %>%
# add a certainty level == "Poor" for states with a "?"
dplyr::mutate(assignmentCertainty =
dplyr::if_else(stringr::str_detect(string = stateValue,
pattern = "\\?"),
"Poor", "Good"))
##### 1.2 rNaturalEarth ####
# Download world map using rnaturalearth packages
stateMap <- rnaturalearth::ne_states(returnclass = "sf", country = countryList) %>%
# Select only a subset of the naturalearthdata columns to extract
dplyr::select(adm1_code, iso_3166_2, name, region, postal, geometry)
# Dont's use spherical geometry
sf::sf_use_s2(FALSE)
# Can examine missing states between the two
if(length(setdiff(sort(unique(stateMap$postal)), sort(unique(CL$state))) > 0)){
message(paste0("In the Ascher list, you are mising the following state(s) that occur in the rnaturalearth package:\n",
stringr::str_c(setdiff(sort(unique(stateMap$postal)), sort(unique(CL$state))),
collapse = ", ")
))}
if(length(setdiff( sort(unique(CL$state)), sort(unique(stateMap$postal))) > 0)){
message(paste0("In the rnaturalearth list, you are mising the following state(s) that occur in the ascher list:\n",
stringr::str_c(setdiff( sort(unique(CL$state)), sort(unique(stateMap$postal))),
collapse = ", ")
))}
#### 2.0 Use occ. data ####
##### 2.1 Angela data ####
# Turn occData into a simple point feature
points <- sf::st_as_sf(occData %>%
tidyr::drop_na(longitude, latitude),
coords = c("longitude", "latitude"),
# Assign the CRS from the rnaturalearth map to the point data
crs = sf::st_crs(stateMap)) %>%
# Use a subset of columns
dplyr::select(database_id, scientificName, species, family, subfamily, genus, specificEpithet,
country, stateProvince, eventDate, institutionCode, recordNumber, catalogNumber,
dataSource, verbatim_scientificName, geometry)
##### 2.2 Extraction ####
###### a. exactState ####
writeLines(" - Extracting state data from points...")
#Extract polygon information to points
points_extract <- sf::st_intersection(stateMap,
points)
###### a. neighbouringStates ####
# Get a list of states that share borders
statesBordering <- sf::st_intersects(stateMap, stateMap) %>%
paste(., sep = ";")
# Make a new tibble with these information
neighbouringStates <- dplyr::tibble(
rowNum = 1:nrow(stateMap),
state = stateMap$postal,
neighbours = statesBordering,
# Modify the text in column
neighboursText = mgsub::mgsub(string = neighbours,
pattern = rowNum,
replacement = state) %>%
stringr::str_replace(string = .,
pattern = "c\\(", replacement = "") %>%
stringr::str_replace(string = .,
pattern = "\\)", replacement = "") %>%
stringr::str_replace(string = .,
pattern = ":", replacement = ", "))
# Make a long-format tibble with neighbouring states
neighbouringStates <- neighbouringStates %>%
tidyr::separate_rows(data = ., neighboursText,
sep = ",") %>%
# Remove states matching themselves
dplyr::filter(!(state == neighboursText))
# Remove extra spaces
neighbouringStates$neighboursText <- stringr::str_squish(neighbouringStates$neighboursText)
# Join the datasets togehter so that we can make a list of adjacent states to match also
neighbouringStates <- CL %>%
dplyr::left_join(dplyr::select(neighbouringStates, c(state, neighboursText)),
by = "state", multiple = "all", relationship = "many-to-many")
##### 2.3 Compare ####
# Get a smaller subset of the data AND make a new columns with scientific name and state
points_simple <- points_extract %>%
dplyr::select(database_id, postal, genus, specificEpithet, country) %>%
dplyr::mutate(SciPost = stringr::str_c(genus, specificEpithet, postal, sep = "_"))
###### a. exactState ####
# Do the same for the ascher checklist
CL_simple <- CL %>%
# Select subset
dplyr::select(Genus, Species, state, assignmentCertainty) %>%
# Harmonise column names
dplyr::rename(genus = Genus,
species = Species,
postal = state) %>%
# Make the new column to match with
dplyr::mutate(SciPost = stringr::str_c(genus, species, postal, sep = "_"))
# Make a new columns showing if that species is expected in that state.
points_match <- points_simple %>%
dplyr::filter(stringr::str_detect(tolower(country), "united states")) %>%
dplyr::mutate(exactMatch = dplyr::if_else(SciPost %in% CL_simple$SciPost,
TRUE, FALSE)) %>%
# join the assignmentCertainty column if there's a match
dplyr::left_join(dplyr::select(CL_simple, SciPost, assignmentCertainty),
by = "SciPost", multiple = "all", relationship = "many-to-many" )
# Show a quick summary
# table(points_match$exactMatch, useNA = "always")
# table(points_match$assignmentCertainty, useNA = "always")
###### b. neighbouringStates ####
# Get a smaller subset of the data AND make a new columns with scientific name and state
nCL_simple <- neighbouringStates %>%
# Select subset
dplyr::select(Genus, Species, neighboursText, assignmentCertainty) %>%
# Harmonise column names
dplyr::rename(genus = Genus,
species = Species,
postal = neighboursText) %>%
# Make the new column to match with
dplyr::mutate(SciPost = stringr::str_c(genus, species, postal, sep = "_")) %>%
# Get a unique set
dplyr::distinct(SciPost, .keep_all = TRUE)
# Make a new columns showing if that species is expected in that state.
npoints_match <- points_simple %>%
dplyr::filter(stringr::str_detect(tolower(country), "united states")) %>%
dplyr::mutate(neighbourMatch = dplyr::if_else(SciPost %in% nCL_simple$SciPost,
TRUE, FALSE)) %>%
# Assign neighbourMatch for assignmentCertainty where occurrence was neighbour matched.
dplyr::mutate(nAssignmentCertainty = dplyr::if_else(neighbourMatch == TRUE,
"neighbourMatch",""))
# Show a quick summary
# table(npoints_match$neighbourMatch, useNA = "always")
# table(npoints_match$nAssignmentCertainty, useNA = "always")
#### 3.0 Merge ####
writeLines(" - Combining data...")
# Merge both points_match datasets
bpoints_match <- dplyr::tibble(points_match) %>%
# Join the two datasets togehter keeping only neighbourMatch and assignmentCertainty from the
# neighbour-joined dataset
dplyr::left_join(dplyr::select(npoints_match, c(database_id, neighbourMatch,
nAssignmentCertainty)),
by = "database_id", multiple = "all", relationship = "many-to-many") %>%
# Remove geometry column
dplyr::select(!tidyselect::starts_with("geometry")) %>%
# Combine exactMatch and neighbourMatch
dplyr::mutate(matchType = dplyr::if_else(exactMatch == TRUE,
"exact", dplyr::if_else(neighbourMatch == TRUE,
"neighbour",
"noMatch"))) %>%
# combine assignmentCertainty and nAssignmentCertainty
dplyr::mutate(matchCertainty = dplyr::if_else(matchType == "exact",
assignmentCertainty,
dplyr::if_else(matchType == "neighbour",
nAssignmentCertainty, "NA")))
#### 4.0 Output ####
##### 4.1 User output ####
writeLines(paste0(
" - Finished. \n",
"We have matched ",
format(sum(bpoints_match$matchType == "exact", na.rm = TRUE), big.mark = ","),
" records to their exact state and ",
format(sum(bpoints_match$matchType == "neighbour", na.rm = TRUE), big.mark = ","),
" to an adjacent state.\n",
"We failed to match ",
format(sum(bpoints_match$matchType == "noMatch", na.rm = TRUE), big.mark = ","),
" occurrences to any 'exact' or 'neighbouring' state.\n",
"Of the 'exact' matches, ",
format(sum(bpoints_match$matchCertainty == "Good", na.rm = TRUE), big.mark = ","),
" are 'good matches' according to John Ascher and ",
format(sum(bpoints_match$matchCertainty == "Poor", na.rm = TRUE), big.mark = ","),
" are 'poor matches' might benefit from checking"
))
bpoints_match <- bpoints_match %>%
# Select the columns to keep
dplyr::select(database_id, postal, matchType, matchCertainty) %>%
dplyr::rename(
stateMatch = matchType,
stateMatch_certainty = matchCertainty) %>%
# Set flag for those that don't pass statematch
dplyr::mutate(
.stateOutlier = dplyr::if_else(stateMatch != "exact" & stateMatch != "neighbour",
FALSE, TRUE))
# Merge with orignal dataset
output <- occData %>%
dplyr::left_join(bpoints_match, by = "database_id", multiple = "all", relationship = "many-to-many") %>%
dplyr::distinct(database_id, .keep_all = TRUE)
# return message
message(paste("\nchecklistOutlieR:\nFlagged",
format(sum(output$.stateOutlier == FALSE, na.rm = TRUE), big.mark = ","),
"records.\nThe column, '.stateOutlier',",
"was added to the database.\n"), sep = "")
# Return file
return(output)
} # END checklistOutlieR
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/StateOutliers.R
|
#### 4. USGS formatter ####
#' Find, import, and format USGS data to Darwin Core
#'
#' The function finds, imports, formats, and creates metadata for the USGS dataset.
#'
#' @param path A character path to a directory that contains the USGS data, which will be found using
#' [BeeBDC::fileFinder()]. The function will look for "USGS_DRO_flat".
#' @param pubDate Character. The publication date of the dataset to update the metadata and citation.
#'
#' @return Returns a list with the occurrence data, "USGS_data", and the EML data, "EML_attributes".
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#' \dontrun{
#' USGS_data <- USGS_formatter(path = DataPath, pubDate = "19-11-2022")
#' }
#'
USGS_formatter <- function(
path,
pubDate){
# locally bind variables to the function
. <-readr <- lubridate <- problems <- how0 <- how1 <- how2 <- how3 <- how4 <- days <-
field_note <- note <- SpeciesNotes <- SpeciesNotes<-DateEntered<-DateScanned<-ip<-position<-
time1<-time2<- occurrenceID <- NULL
#### require and checks ####
# Load required packages
requireNamespace("lubridate")
requireNamespace("dplyr")
# File name to search for
USGS_fileName <- "USGS_DRO_flat"
# Find the USGS data from the HomePath
USGS_loc <- fileFinder(path, USGS_fileName)
# Define ColsToKeep
ColsToKeep <- ColTypeR()[[1]] %>% names()
# Check if the data are present
if(is.null(USGS_loc) == TRUE){ # If there are no data matching the name...
stop(" - Oh dear, it looks like there are no USGS data in the HomePath. If you want to include such data, make sure that they exist.")
}
# Check if the ddate has been enetered
if(exists("pubDate") == FALSE){ # If there are no data matching the name...
stop(paste(" - Oh no! It looks like you have not provided pubDate = dd-mm-yyy. ",
"Please include this as the date that Sam Droege passed on his data in the above format."))
}
#### Find and import ####
if(grepl(pattern = ".txt.gz", USGS_loc) == TRUE){ # If the zipped file is present
writeLines("Only one zip file detected. Unzipping file to be read in.")
# unzip the file
R.utils::gunzip(
# File to unzip
fileName = USGS_loc,
# Where to put the extracted file
destname = stringr::str_remove(USGS_loc, ".gz"),
overwrite = FALSE,
remove = FALSE)
USGS_loc <- stringr::str_remove(USGS_loc, ".gz")
# User output
writeLines(paste(" - Unzipped file to: ", USGS_loc))
}
# If already extracted, use the extracted file
if(stringr::str_detect(pattern = USGS_fileName,
# Select the string that matches only (avoids a warning but works without)
string = USGS_loc)){
writeLines(paste(" - Reading in data file. This should not take too long.","\n",
"There may be some errors upon reading in depending on the state of the data.",
"\n", "One might consider reporting errors to Sam Droege to improve the dataset."))
# Read in the data file using "$" as the delimiter
USGS_data <- readr::read_delim(USGS_loc[[1]],
delim = "$")
# Make a copy of the problems, if they exist
USGS_problems <- readr::problems(USGS_data)
}
#### Metadata building ####
Attributes_USGS <- dplyr::tibble(
dataSource = "USGS_data",
alternateIdentifier = "Not provided",
title = "USGS_DRO database",
pubDate = lubridate::dmy(pubDate),
dateStamp = pubDate,
doi = "Not provided",
downloadLink = "Not provided, contact Sam Droege at [email protected]",
abstract = dplyr::lst(
"Note: If you are getting data from Sam Droege, you are getting one of two file types. Either an Excel spreadsheet with data already extracted for you, or the entire data set which is compressed into a file called USGS_DRO_flat.gz. The file USGS_DRO_flat.gz is how this file is usually shipped when we send the entire database. If you get this file it is compressed and needs to be uncompressed with 7-zip, gzip, or something similar ...after unzipping it becomes a very large txt file which can be imported into a database program for manipulation and searching. Data can also be extracted from BISON and GBIF and we assume the file structure below is reflected in their data also.
Be sure to check for spelling errors...we do global checks only every year or two.
Column Headings for USGS Bee Flat File (or an Excel Sheet if we send a subset)
ID. - Unique 6 digit specimen number with the database identifier (USGS_DRO) in front
name - Scientific name; ('Destroyed' = label destroyed without being used; 'NONBEE' = An insect that is not a bee, but was given a label; Blank = not yet identified, can indicate that specimen is waiting for identification, is lost, or was missed during data entry)
Sex - 'f' = female; 's' = male; 'u' = unknown
DeterminedBy - Who did the identification of the specimen
DeterminedWhen - Roughly when the identification was done
WhoScanned - Who scanned or entered the data
DateScanned - Date data were scanned
SpeciesNotes - Any notes about the species such as changes in identification
DateEntered - Date entered into the new database (done automatically)
COLLECTION.db - Collection event number. Up to 5 digit code preceded by database identifier (USGS_DRO) of the date, time, place of the collection event
gmt - Greenwich Mean Time of I think the time in which the Collection Event Number was created
Latitude - Latitude in decimal degrees
Longitude - Longitude in decimal degrees
accuracy - Degree of association of specimens to the Latitude/Longitude (1 = Specimens from the level of a country/state; 2 = specimens found in area of a county; 3 = specimens found in area of a park or refuge; 4 and 5 = Specimens located very close to coordinants
elevation - Elevation of location (rarely used)
country - Country where collection occurred
state - State where collection occurred
county - County where collection occurred
city - Nearest city or the geographic unit (i.e. Park or refuge) where collected
site - A site name, number, or designation within a city/park
position - similar to \"site\" but used less often
time1 - Date/time collection started or traps put out (format is yyyymmddmmss)
time2 - Date/time collection ended or traps picked up
days - Number of trapping days (used inconsistently)
who - The collector
email - Almost always Sam Droege's OLD email address
how0 - Technique used to capture bees
how1 - Number of traps that remained full of trap fluid when traps picked up
how2 - Bowl/ trap size
how3 - Trap color (rarely used since mostly we use several colors)
how4 - Trap liquid/soap
habitat - Rarely used
field_note - 'field_note' and 'note' used interchangeably to take notes about collection event
note - 'field_note' and 'note' used interchangeably to take notes about collection event"
),
citations = dplyr::lst("Citations not provided"),
downloadCitation = paste("Sam Droege. (",
lubridate::as_date(pubDate, format = "%m-%d-%y"),
"). United States Geological Survey bee data.",
sep = ""),
rights = dplyr::lst("Rights are not provided. Please seek permission for data use from Same Droege.")
) # END metadata
# combine the input eml and the attributes tibble into a list for output from the function
EML_attributes <- dplyr::lst("No_eml_from_USGS", Attributes_USGS)
names(EML_attributes) <- c("source_eml","Source_tibble")
#### Format the data ####
writeLines(" - Formatting the USGS dataset...")
writeLines(" - Formatting the dateTime...")
# Convert time1 and time2 to dateTime format
# Convert time1
USGS_data$time1 <- USGS_data$time1 %>%
lubridate::ymd_hms(., truncated = 5)
# Convert time2
USGS_data$time2 <- USGS_data$time2 %>%
lubridate::ymd_hms(., truncated = 5)
writeLines(" - Creating samplingProtocol and samplingEffort columns...")
# Create new columns with extra information that doesn't fit the established columns well
# Merge all of the extra collection info
USGS_data <- USGS_data %>% dplyr::mutate(
samplingProtocol = stringr::str_c(
dplyr::if_else(!is.na(how0),
paste0("Technique used: ", how0), ""),
dplyr::if_else(!is.na(how1),
paste0("Bowls full upon collection: ", how1), ""),
dplyr::if_else(!is.na(how2),
paste0("Sampling Bowl/trap size: ", how2), ""),
dplyr::if_else(!is.na(how3),
paste0("Trap colour: ", how3), ""),
dplyr::if_else(!is.na(how4),
paste0("Trap liquid: ", how4), ""),
sep = "|") %>%
# Remove extra bars.
# Remove extra bars.
stringr::str_replace_all(pattern = "(\\|){2,5}",
replacement = "\\|") %>%
stringr::str_replace_all(pattern = "(\\|$)+|(^\\|)+",
replacement = "")) %>%
# Add in samplingEffort
dplyr::mutate(
samplingEffort = stringr::str_c(
dplyr::if_else(!is.na(days),
paste0("sampling days: ", days), ""),
sep = "")) %>%
# Add in dataset information
dplyr::mutate(
datasetName = "USGS_DRO database",
datasetID = "USGS_DRO",
institutionCode = "USGS"
)
writeLines(" - Creating the fieldNotes and dataSource columns...")
# Enter these extra data into a new column.
USGS_data <- USGS_data %>% dplyr::mutate(
fieldNotes = stringr::str_c(
dplyr::if_else(!is.na(field_note),
paste0("field_note: ", field_note), ""),
dplyr::if_else(!is.na(note),
paste0("note: ", note), ""),
dplyr::if_else(!is.na(SpeciesNotes),
paste0("SpeciesNotes: ", SpeciesNotes), ""),
dplyr::if_else(!is.na(DateEntered),
paste0("DateEntered: ", DateEntered), ""),
dplyr::if_else(!is.na(DateScanned),
paste0("DateScanned: ", DateScanned), ""),
dplyr::if_else(!is.na(ip),
paste0("ipAddress: ", ip), ""),
dplyr::if_else(!is.na(position),
paste0("position: ", position), ""),
dplyr::if_else(!is.na(time1),
paste0("time1: ", time1), ""),
dplyr::if_else(!is.na(time2),
paste0("time2: ", time2), ""),
sep = "|") %>%
# Remove extra bars.
stringr::str_replace_all(pattern = "(\\|){2,9}",
replacement = "\\|") %>%
stringr::str_replace_all(pattern = "(\\|$)+|(^\\|)+",
replacement = ""))
# Trim white spaces
# stringr::str_trim(side = "both")
# Set the dataSource
USGS_data$dataSource <- "USGS_data"
writeLines(" - Renaming and selecting columns...")
# These data must be formatted to match the other data sets.
# that we created at the top of the R-script
USGS_data <- USGS_data %>% # The data frame to match with
dplyr::rename("occurrenceID" = "ID.",
"scientificName" = "name",
"sex" = "sex",
"identifiedBy" = "DeterminedBy",
"dateIdentified" = "DeterminedWhen",
"eventID" = "COLLECTION.db",
"eventTime" = "gmt",
"decimalLatitude" = "latitude",
"decimalLongitude" = "longitude",
"coordinateUncertaintyInMeters" = "accuracy",
"stateProvince" = "state",
"municipality" = "city",
"Location" = "site",
"recordedBy" = "who",
"eventDate" = "time1") %>%
dplyr::mutate(
id = occurrenceID,
recordId = occurrenceID,
) %>%
# select columns that match the following string
dplyr::select( dplyr::matches(
# Use the carrot "^" to signify start of string and dollar sign "$" to signify end of
# string. Effectively, this will only return an exact match.
paste("^",ColsToKeep,"$",sep="") ))
#### Save data ####
# Check for and create outpath if needed
outPath <- outFile_maker(path = path)
# Notfiy user that occurrence file is being written
writeLines( paste(" - Writing occurrence data file...", "\n",
"Number of rows (records): ", format(nrow(USGS_data), big.mark=",",scientific=FALSE), "\n",
"Written to file called ", paste("USGS_formatted_", Sys.Date(), ".csv", sep = ""),
" at location ", outPath,
sep = ""))
# Write the occurence file
write_excel_csv(USGS_data, paste(outPath, "/USGS_formatted_", Sys.Date(), ".csv", sep = ""))
# Notify user that the .eml file is being written
writeLines( paste(" - Writing attributes file...", "\n",
"Written to file called ", paste("USGS_attribute_files", Sys.Date(),".xml", sep="" ),
" at location ", outPath,
sep = ""))
# Format the attributes for exporting lists
EML_attributes$Source_tibble$abstract <- EML_attributes$Source_tibble$abstract %>% unlist()
EML_attributes$Source_tibble$citations <- EML_attributes$Source_tibble$citations %>% unlist()
EML_attributes$Source_tibble$rights <- EML_attributes$Source_tibble$rights %>% unlist()
# Write the attribute file
write_excel_csv(EML_attributes$Source_tibble, file = paste(outPath,
"/USGS_attribute_files",
Sys.Date(),".csv", sep="" ))
# IF there were problems detected, write these to a .csv file and notify the user
if(nrow(USGS_problems) > 0){
writeLines(" - Problems detected with the tibble. Saving to a .csv file...")
write_excel_csv(USGS_problems, file = paste(outPath, "/USGS_problems",
Sys.Date(),".csv", sep="" ))
}
# Return end product and print completion note
writeLines(paste(" - Fin.", sep = "\n"))
return( dplyr::lst(USGS_data, EML_attributes) )
} # END USGS_import
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/USGS_formatter.R
|
# These functions were written by James B Dorey beginning on the 17th of June 2022 to read in,
# Format and save various datasets.
# For questions, please email jbdorey[at]me.com
#### x.0 readr_BeeBDC function ####
#' A wrapper for all of the data readr_functions
#'
#' Read in a variety of data files that are specific to certain smaller data providers.
#' There is an internal readr function for each dataset and each one of these functions is called
#' by readr_BeeBDC. While these functions are internal, they are displayed in the documentation of
#' readr_BeeBDC for clarity.
#'
#' This function wraps several internal readr functions. Users may call
#' readr_BeeBDC and select the dataset name to import a certain dataset. These datasets include:
#'
#' Excel (.xlsx) formatted datasets: CAES, MABC, Col, Bal, MEPB, MUPJ, Arm, JoLa, and VicWam.
#'
#' CSV (.csv) formatted datasets: EPEL, ASP, BMin, BMont, Ecd, Gai, KP, EcoS, GeoL, EaCo, FSCA, SMC,
#' Lic, Dor, BBD, STRI, and PALA
#'
#' See Dorey et al. 2023 BeeBDC... for further details.
#'
#' @param dataset Character. The name of the dataset to be read in. For example readr_CAES can
#' be called using "readr_CAES" or "CAES". This is not caps sensitive.
#' @param path A character path. The path to the directory containing the data.
#' @param inFile Character or character path. The name of the file itself (can also be the
#' remainder of a path including the file name).
#' @param outFile Character or character path. The name of the Darwin Core format file to be saved.
#' @param dataLicense Character. The license to accompany each record in the Darwin Core 'license'
#' column.
#' @param sheet A character String. For those datasets read from an .xlsx format, provide the
#' sheet name.
#' NOTE: This will be ignored for .csv readr_ functions and required for .xlsx readr_ functions.
#'
#' @return A data frame that is in Darwin Core format.
#' @export
#'
#' @importFrom readr read_csv write_excel_csv
#' @importFrom dplyr rename mutate select if_else %>%
#' @importFrom lubridate ymd month
#' @importFrom stringr str_c
#'
#'
#' @examples
#' \dontrun{
#' # An example using a .xlsx file
#'Arm_Data <- readr_BeeBDC(
#' dataset = "Arm",
#' path = paste0(tempdir(), "/Additional_Datasets"),
#' inFile = "/InputDatasets/Bee database Armando_Final.xlsx",
#' outFile = "jbd_Arm_Data.csv",
#' sheet = "Sheet1",
#' dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
#'
#'
#' # An example using a .csv file
#'EPEL_Data <- readr_BeeBDC(
#' dataset = "readr_EPEL",
#' path = paste0(tempdir(), "/Additional_Datasets"),
#' inFile = "/InputDatasets/bee_data_canada.csv",
#' outFile = "jbd_EPEL_data.csv",
#' dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
#' }
readr_BeeBDC <- function(
dataset = NULL,
path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL,
sheet = NULL){
#### x.0 Prep ####
##### x.1 Errors ####
###### a. FATAL errors ####
if(is.null(dataset)){
stop(" - Please provide an argument for dataset. This can be from any of the readr_ functions")
}
if(is.null(path)){
stop(" - Please provide an argument for the path to a folder containing your file.")
}
if(is.null(inFile)){
stop(" - Please provide an argument for the inFile name to find.")
}
if(is.null(outFile)){
stop(" - Please provide an argument for outFile to save as.")
}
if(is.null(dataLicense)){
stop(" - Please provide an argument for dataLicense.")
}
##### x.2 Data types ####
# Create the lists of potential data types for .xlsx or .csv inputs
excelTypes <- c("CAES", "MABC", "Col", "Bal", "MEPB", "MPUJ", "Arm", "JoLa", "VicWam")
csvTypes <- c("EPEL", "ASP", "BMin", "BMont", "Ecd", "Gai", "KP", "EcoS", "GeoL",
"EaCo", "FSCA", "SMC", "Lic", "Dor", "BBD", "PALA", "STRI")
# Wrong entry test
if(!tolower(dataset) %in% tolower(c(paste0("readr_",csvTypes), csvTypes,
paste0("readr_",excelTypes), excelTypes)) ){
stop("The dataset does not match a readr_BeeBDC function")
}
#### x.0 Choose function ####
##### x.1 Excel functions ####
# EXCEL test
if(tolower(dataset) %in% tolower(c(paste0("readr_",excelTypes), excelTypes)) ){
message("A .xlsx data type was chosen...")
# If no sheet is provided
if(is.null(sheet)){
stop(" - No sheet argument was provided. Please check for the sheet name to import.")
}
###### a. CAES ####
if(tolower(dataset) %in% tolower(c("readr_CAES", "CAES")) ){
dataOut <- readr_CAES(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense,
sheet = sheet)}
##### b. MABC ####
if(tolower(dataset) %in% tolower(c("readr_MABC", "MABC")) ){
dataOut <- readr_MABC(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense,
sheet = sheet)}
##### c. Col ####
if(tolower(dataset) %in% tolower(c("readr_Col", "Col")) ){
dataOut <- readr_Col(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense,
sheet = sheet)}
##### d. Bal ####
if(tolower(dataset) %in% tolower(c("readr_Bal", "Bal")) ){
dataOut <- readr_Bal(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense,
sheet = sheet)}
##### e. MEPB ####
if(tolower(dataset) %in% tolower(c("readr_MEPB", "MEPB")) ){
dataOut <- readr_MEPB(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense,
sheet = sheet)}
##### f. MPUJ ####
if(tolower(dataset) %in% tolower(c("readr_MPUJ", "MPUJ")) ){
dataOut <- readr_MPUJ(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense,
sheet = sheet)}
##### f. Arm ####
if(tolower(dataset) %in% tolower(c("readr_Arm", "Arm")) ){
dataOut <- readr_Arm(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense,
sheet = sheet)}
##### g. JoLa ####
if(tolower(dataset) %in% tolower(c("readr_JoLa", "JoLa")) ){
dataOut <- readr_JoLa(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense,
sheet = sheet)}
##### h. VicWam ####
if(tolower(dataset) %in% tolower(c("readr_VicWam", "VicWam")) ){
dataOut <- readr_VicWam(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense,
sheet = sheet)}
}# END Excel functions
##### x.2 CSV functions ####
# CSV test
if(tolower(dataset) %in% tolower(c(paste0("readr_",csvTypes), csvTypes)) ){
message("A .csv data type was chosen...")
###### a. EPEL ####
if(tolower(dataset) %in% tolower(c("readr_EPEL", "EPEL")) ){
dataOut <- readr_EPEL(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
##### b. ASP ####
if(tolower(dataset) %in% tolower(c("readr_ASP", "ASP")) ){
dataOut <- readr_ASP(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
##### c. BMin ####
if(tolower(dataset) %in% tolower(c("readr_BMin", "BMin")) ){
dataOut <- readr_BMin(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
##### d. BMont ####
if(tolower(dataset) %in% tolower(c("readr_BMont", "BMont")) ){
dataOut <- readr_BMont(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
##### e. Ecd ####
if(tolower(dataset) %in% tolower(c("readr_Ecd", "Ecd")) ){
dataOut <- readr_Ecd(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
##### f. Gai ####
if(tolower(dataset) %in% tolower(c("readr_Gai", "Gai")) ){
dataOut <- readr_Gai(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
##### g. KP ####
if(tolower(dataset) %in% tolower(c("readr_KP", "KP")) ){
dataOut <- readr_KP(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
##### h. EcoS ####
if(tolower(dataset) %in% tolower(c("readr_EcoS", "EcoS")) ){
dataOut <- readr_EcoS(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
##### i. GeoL ####
if(tolower(dataset) %in% tolower(c("readr_GeoL", "GeoL")) ){
dataOut <- readr_GeoL(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
##### j. EaCO ####
if(tolower(dataset) %in% tolower(c("readr_EaCO", "EaCo")) ){
dataOut <- readr_EaCO(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
##### k. FSCA ####
if(tolower(dataset) %in% tolower(c("readr_FSCA", "FSCA")) ){
dataOut <- readr_FSCA(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
##### l. SMC ####
if(tolower(dataset) %in% tolower(c("readr_SMC", "SMC")) ){
dataOut <- readr_SMC(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
##### m. Lic ####
if(tolower(dataset) %in% tolower(c("readr_Lic", "Lic")) ){
dataOut <- readr_Lic(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
##### n. Dor ####
if(tolower(dataset) %in% tolower(c("readr_Dor", "Dor")) ){
dataOut <- readr_Dor(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
##### o. BBD ####
if(tolower(dataset) %in% tolower(c("readr_BBD", "BBD")) ){
dataOut <- readr_BBD(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
##### p. STRI ####
if(tolower(dataset) %in% tolower(c("readr_STRI", "STRI")) ){
dataOut <- readr_STRI(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
##### g. PALA ####
if(tolower(dataset) %in% tolower(c("readr_PALA", "PALA")) ){
dataOut <- readr_PALA(path = path,
inFile = inFile,
outFile = outFile,
dataLicense = dataLicense)}
} #END csv
return(dataOut)
} # END readr_BeeBDC
#### 1.0 EPEL ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_EPEL <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
catalog_number<-pollinator_family<-pollinator_genus<-pollinator_species<-collection_method<-
day_collected<-month_collected<-year_collected<-location_description<-latitude<-longitude<-
basis_of_record<-genus<-specificEpithet<-year<-day<-eventDate<-collector_number<-
location_name<-habitat<-.<-catalogNumber <- NULL
#### 1.1 Prep ####
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 1.2 Read+ ####
EPEL_Data <- readr::read_csv(paste(path, inFile, sep = "/"),
trim_ws = TRUE) %>%
# Rename some columns to make the consistent with DarwinCore
dplyr::rename(
catalogNumber = catalog_number,
family = pollinator_family,
genus = pollinator_genus,
specificEpithet = pollinator_species,
samplingProtocol = collection_method,
day = day_collected,
month = month_collected,
year = year_collected,
locality = location_description,
decimalLatitude = latitude,
decimalLongitude = longitude,
basisOfRecord = basis_of_record) %>%
# Make new columns
dplyr::mutate(
# Merge to scientific name
scientificName = paste(genus, specificEpithet),
# Add data source
dataSource = "EPEL_Anthophila",
# Add eventDate
eventDate = lubridate::ymd(paste(year, month, day, sep = "-"),
truncated = 2),
month = lubridate::month(eventDate)) %>%
# Add fieldNotes
dplyr::mutate(
fieldNotes = stringr::str_c(
dplyr::if_else(!is.na(collector_number),
paste0("Collector_number: ", collector_number), ""),
dplyr::if_else(!is.na(location_name),
paste0("location_name: ", location_name), ""),
dplyr::if_else(!is.na(habitat),
paste0("habitat: ", habitat), ""),
sep = "|") %>%
# Remove extra bars "|".
stringr::str_replace_all(pattern = "(\\|){2,9}",
replacement = "\\|") %>%
stringr::str_replace_all(pattern = "(\\|$)+|(^\\|)+",
replacement = "")) %>%
# Remove these input columns
dplyr::select(!c(collector_number, location_name, habitat)) %>%
# add the database_id column
dplyr::mutate(
database_id = paste("EPEL_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "Elle Pollination Ecology Lab",
datasetID = "EPEL",
institutionCode = "SFU"
)
#### 1.3 Out ####
# Save the dataset
readr::write_excel_csv(EPEL_Data, file = paste(path, outFile, sep = "/"))
# Return data
return(EPEL_Data)
} # END readr_EPEL
#### 2.0 ASP ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_ASP <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
Tribe <- Morphospecies <- Successional_Stage <- genus <- specificEpithet <- NULL
eventDate <- catalogNumber <- . <- elevation <- NULL
#### 2.1 Prep ####
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 2.2 Read+ ####
ASP_data <- readr::read_csv(paste(path, inFile, sep = "/"),
trim_ws = TRUE) %>%
dplyr::mutate(verbatimElevation = elevation) %>%
dplyr::mutate(
# Add previousIdentifications
previousIdentifications = stringr::str_c(
dplyr::if_else(!is.na(Tribe),
paste0("Tribe: ", Tribe), ""),
dplyr::if_else(!is.na(Morphospecies),
paste0("Morphospecies: ", Morphospecies), ""),
sep = "|") %>%
# Remove extra bars "|".
stringr::str_replace_all(pattern = "(\\|){2,9}",
replacement = "\\|") %>%
stringr::str_replace_all(pattern = "(\\|$)+|(^\\|)+",
replacement = "")) %>%
dplyr::mutate(
# Add locationRemarks
locationRemarks = paste0("Successional Stage:", Successional_Stage),
# Add scientificName
scientificName = paste(genus, specificEpithet),
# Add dataSource
dataSource = "ASP_Anthophila",
# Add eventDate
eventDate = lubridate::dmy(eventDate,
truncated = 2),
# I'm sorry but this catalogNumber is useless. I'm going to edit it more-unique
catalogNumber = dplyr::if_else(!is.na(catalogNumber),
paste0("ASP_", catalogNumber), "")) %>%
# add the database_id column
dplyr::mutate(
database_id = paste("ASP_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "Allan Smith-Pardo",
datasetID = "ASP"
)
#### 2.3 Out ####
# Save the dataset
readr::write_excel_csv(ASP_data, file = paste(path, outFile, sep = "/"))
# Return data
return(ASP_data)
} # END readr_ASP
#### 3.0 BMin ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_BMin <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
eventDate <- . <- catalogNumber <- NULL
catalog_number <- pollinator_family <- pollinator_genus <- pollinator_species <- NULL
collection_method <- day_collected <- month_collected <- year_collected <- NULL
location_description <- latitude <- longitude <- basis_of_record <- genus <- NULL
specificEpithet <- year <- day <- eventDate <- collector_number <- location_name <- NULL
habitat <- . <- catalogNumber <- NULL
#### 3.1 Prep ####
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 3.2 Read+ ####
BMin_data <- readr::read_csv(paste(path, inFile, sep = "/"),
trim_ws = TRUE) %>%
dplyr::mutate(
# Format eventDate
eventDate = lubridate::dmy(eventDate,
truncated = 2, quiet = TRUE),
dataSource = "BMin_Anthophila") %>%
# add the database_id column
dplyr::mutate(
database_id = paste("BMin_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "Robert Minckley",
datasetID = "BMin",
institutionCode = "University of Rochester"
)
#### 3.3 Out ####
# Save the dataset
readr::write_excel_csv(BMin_data, file = paste(path, outFile, sep = "/"))
# Return data
return(BMin_data)
} # END readr_BMin
#### 4.0 BMont ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_BMont <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
occurence_lsid <- fieldNotes <- GPS_device <- . <- catalogNumber <- eventDate <- dateTest <- NULL
tempDate <- mdy <- VerbatimEventDate <- NULL
#### 4.1 Prep ####
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 4.2 Read+ ####
BMont_data <- readr::read_csv(paste(path, inFile, sep = "/"),
trim_ws = TRUE) %>%
dplyr::rename(
occurrenceID = occurence_lsid,
verbatimEventDate = VerbatimEventDate) %>%
dplyr::mutate(
# Add fieldNotes
fieldNotes = stringr::str_c(
dplyr::if_else(!is.na(fieldNotes),
paste0("fieldNotes: ", fieldNotes), ""),
dplyr::if_else(!is.na(GPS_device),
paste0("GPS_device: ", GPS_device), ""),
sep = "|") %>%
# Remove extra bars "|".
stringr::str_replace_all(pattern = "(\\|){2,9}",
replacement = "\\|") %>%
stringr::str_replace_all(pattern = "(\\|$)+|(^\\|)+",
replacement = "")) %>%
dplyr::mutate(dataSource = "BMont_Anthophila") %>%
# add the database_id column
dplyr::mutate(
database_id = paste("BMont_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
# Find the date ranges
dplyr::mutate(dateTest = dplyr::if_else(stringr::str_count(eventDate) > 11,
stringr::str_replace(string = eventDate,
pattern = "/", replacement = ","),
"FALSE")) %>%
# Append these to fieldNotes
dplyr::mutate(fieldNotes = dplyr::if_else(dateTest != FALSE & !is.na(dateTest),
paste0(fieldNotes,
"|dateRange: ", eventDate), fieldNotes)) %>%
# Now remove those eventDates with a range and instead insert the start date
dplyr::mutate(tempDate = dplyr::if_else(dateTest != FALSE & !is.na(dateTest),
paste0(stringr::str_replace(eventDate,
pattern = ".*/",
replacement = "")),
eventDate)) %>%
# Pick up dates of different formats and format together.
dplyr::mutate(mdy = lubridate::mdy(tempDate, quiet = TRUE)) %>%
dplyr::mutate(ymd = lubridate::ymd(tempDate, quiet = TRUE)) %>%
tidyr::unite(col = eventDate,
mdy, ymd, sep = "", na.rm = TRUE,
remove = FALSE) %>%
# Remove working columns
dplyr::select(!c(dateTest, tempDate, mdy, ymd)) %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "Bombus Montana",
datasetID = "BMon"
)
#### 4.3 Out ####
# Save the dataset
readr::write_excel_csv(BMont_data, file = paste(path, outFile, sep = "/"))
# Return data
return(BMont_data)
} # END readr_BMont
#### 5.0 Ecd ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_Ecd <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
year <- day <- institutionCode <- id <- . <- catalogNumber <- recordID <- NULL
#### 5.1 Prep ####
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 5.2 Read+ ####
Ecd_data <- readr::read_csv(paste(path, inFile, sep = "/"),
trim_ws = TRUE) %>%
dplyr::rename(recordId = recordID) %>%
dplyr::mutate(
# Format eventDate
eventDate = lubridate::ymd(paste(year, month, day, sep = "-"),
truncated = 2, quiet = TRUE),
dataSource = "Ecd_Anthophila",
# I want to make sure id is unique...
id = paste(institutionCode, id, sep = "_")) %>%
# add the database_id column
dplyr::mutate(
database_id = paste("Ecd_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "Ecdysis",
datasetID = "Ecd"
)
#### 5.3 Out ####
# Save the dataset
readr::write_excel_csv(Ecd_data, file = paste(path, outFile, sep = "/"))
# Return data
return(Ecd_data)
} # END readr_Ecd
###### 6.0 Gai ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_Gai <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
species<-subspecies<-SpecimenLocation<-eventTime<-EndTime<-TempStart<-TempEnd<-WindStart<-
WindEnd<-SkyStart<-SkyEnd<-Site<-siteLocality<-syd<-eventDate<-.<-institutionCode <- NULL
#### 6.1 Prep ####
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 6.2 Read+ ####
Gai_data <- readr::read_csv(paste(path, inFile, sep = "/"),
trim_ws = TRUE) %>%
# Make columns DarwinCore-compatible
dplyr::rename(
collectionCode = 'Collection Code',
otherCatalogNumbers = 'Other Catalog Number',
specificEpithet = species,
infraspecificEpithet = subspecies) %>%
# Add locationRemarks from a bunch of other columns
dplyr::mutate(
locationRemarks = stringr::str_c(
dplyr::if_else(!is.na(SpecimenLocation), paste0("SpecimenLocation: ", SpecimenLocation), ""),
dplyr::if_else(!is.na(eventTime),
paste0("StartTime: ", eventTime), ""),
dplyr::if_else(!is.na(EndTime),
paste0("EndTime: ", EndTime), ""),
dplyr::if_else(!is.na(TempStart),
paste0("TempStart: ", TempStart), ""),
dplyr::if_else(!is.na(TempEnd),
paste0("TempEnd: ", TempEnd), ""),
dplyr::if_else(!is.na(WindStart),
paste0("WindStart: ", WindStart), ""),
dplyr::if_else(!is.na(WindEnd),
paste0("WindEnd: ", WindEnd), ""),
dplyr::if_else(!is.na(SkyStart),
paste0("SkyStart: ", SkyStart), ""),
dplyr::if_else(!is.na(SkyEnd),
paste0("SkyEnd: ", SkyEnd), ""),
dplyr::if_else(!is.na(Site),
paste0("Site: ", Site), ""),
dplyr::if_else(!is.na(siteLocality),
paste0("siteLocality: ", siteLocality), ""),
dplyr::if_else(!is.na(syd),
paste0("syd: ", syd), ""),
sep = "|") %>%
# Remove extra bars "|".
stringr::str_replace_all(pattern = "(\\|){2,9}",
replacement = "\\|") %>%
stringr::str_replace_all(pattern = "(\\|$)+|(^\\|)+",
replacement = "")) %>%
dplyr::mutate(
# Format eventDate
eventDate = lubridate::mdy(eventDate,
truncated = 2, quiet = FALSE),
dataSource = "Gai_Anthophila") %>%
# add the database_id column
dplyr::mutate(
database_id = paste("Gai_data_", 1:nrow(.), sep = ""),
.before = institutionCode) %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "Gaiarsa et al. 2021",
datasetID = "Gai"
)
#### 6.3 Out ####
# Save the dataset
readr::write_excel_csv(Gai_data, file = paste(path, outFile, sep = "/"))
# Return data
return(Gai_data)
} # END readr_Gai
#### 7.0 CAES ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_CAES <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL,
sheet = "Sheet1"){
# locally bind variables to the function
Tribe <- Morphospecies <- Successional_Stage <- genus <- specificEpithet <- NULL
eventDate <- catalogNumber <- . <- NULL
.<-PBIUSI<-Family<-Subfamily<-Genus<-species<-Country<-State_Prov<-Sec_Subdiv<-Locality<-Lat<-
Lon<-Start_Date<-Collector<-Sex<-Inst_Code<-Det_By<-Det_Date<-Coll_Method<-Spec_Count<-
Elev_m<-Trip_Code<-Project<-Det_History<-Tribe<-Host_Genus<-Host_Common_Name<-
Host_Relation<-Host_Location<-Loc_Notes<-Lat_Lon_Method<-End_Date<-eventDate<-
Elev_Det<-Macro_Habitat<-Micro_Habitat<-Pres_Method<-Spec_Notes<-genus<-specificEpithet<-
Lat_Lon_Accuracy<-Host_species<-Host_Family<-catalogNumber <- NULL
#### 7.1 Prep ####
# This will load the requireNamespaced packages. These packages may still need to be installed to
# R using install.packages("dplyr")... etc.
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 7.2 Read+ ####
# Reads in the .csv file, trims the white spaces, and formats the columns to the correct type
CAES_data <- openxlsx::read.xlsx(paste(path, inFile, sep = "/"), sheet = sheet) %>%
# Turn spaces into "_" in column names
dplyr::rename_with(., ~ gsub(" ", "_", .x, fixed = TRUE)) %>%
# Make columns DarwinCore-compatible
dplyr::rename(
catalogNumber = PBIUSI,
family = Family,
subfamily = Subfamily,
genus = Genus,
specificEpithet = species,
country = Country,
stateProvince = State_Prov,
county = Sec_Subdiv,
locality = Locality,
decimalLatitude = Lat,
decimalLongitude = Lon,
eventDate = Start_Date,
recordedBy = Collector,
sex = Sex,
institutionCode = Inst_Code,
identifiedBy = Det_By,
dateIdentified = Det_Date,
samplingProtocol = Coll_Method,
individualCount = Spec_Count,
elevation = Elev_m,
otherCatalogNumbers = Trip_Code,
bibliographicCitation = Project,
identificationRemarks = Det_History) %>%
# Add a bunch of columns from other columns
dplyr::mutate(
# Add previousIdentifications
previousIdentifications = paste0(
# ONLY do this IF there is something in the cell - is.na() finds "NA" values. the "!" reverses this to find complete.cases only.
dplyr::if_else(!is.na(Tribe), paste0("Tribe: ", Tribe),"")),
# Add associatedTaxa
# This will ONLY concatenate columns where they have a value.
associatedTaxa = stringr::str_c(
dplyr::if_else(!is.na(Host_Genus), paste0("Host_id: ", Host_Genus), ""),
dplyr::if_else(!is.na(Host_Common_Name),
paste0("Host_commonName: ", Host_Common_Name), ""),
dplyr::if_else(!is.na(Host_Relation),
paste0("Host_relation: ", Host_Relation), ""),
dplyr::if_else(!is.na(Host_Location),
paste0("Host_location: ", Host_Location), ""),
sep = "|") %>%
# Remove extra bars "|".
stringr::str_replace_all(pattern = "(\\|){2,9}",
replacement = "\\|") %>%
stringr::str_replace_all(pattern = "(\\|$)+|(^\\|)+",
replacement = "")) %>%
# Do the same as the last mutate, but for fieldNotes
dplyr::mutate(
fieldNotes = stringr::str_c(
dplyr::if_else(!is.na(Loc_Notes),
paste0("fieldNotes: ", Loc_Notes), ""),
dplyr::if_else(!is.na(Lat_Lon_Method),
paste0("GPS_device: ", Lat_Lon_Method), ""),
dplyr::if_else(!is.na(End_Date),
paste0("Sampling period: ", eventDate, " to ", End_Date), ""),
dplyr::if_else(!is.na(Elev_Det),
paste0("Elevation_by: ", Elev_Det), ""),
dplyr::if_else(!is.na(Macro_Habitat),
paste0("Macro_Habitat: ", Macro_Habitat), ""),
dplyr::if_else(!is.na(Micro_Habitat),
paste0("Micro_Habitat: ", Micro_Habitat), ""),
dplyr::if_else(!is.na(Pres_Method),
paste0("Preservation_method: ", Pres_Method), ""),
dplyr::if_else(!is.na(Spec_Notes),
paste0("Specimen_notes: ", Spec_Notes), ""),
sep = "|") %>%
# Remove extra bars "|".
stringr::str_replace_all(pattern = "(\\|){2,9}",
replacement = "\\|") %>%
stringr::str_replace_all(pattern = "(\\|$)+|(^\\|)+",
replacement = "")) %>%
# Add scientificName
dplyr::mutate(
scientificName = stringr::str_c(
dplyr::if_else(!is.na(genus),
genus, ""),
dplyr::if_else(!is.na(specificEpithet),
specificEpithet, ""),
sep = " ")) %>%
# Get the coordinateUncertaintyInMeters by taking the UPPER uncertainty limit from the provided
# ranges. I.e., "100-1000" becomes "1000"
dplyr::mutate(
coordinateUncertaintyInMeters = stringr::str_extract(
Lat_Lon_Accuracy, pattern = "-.*|[0-9]+" ),
# Format eventDate
eventDate = lubridate::ymd(
# First convert from silly excel numeric format to real dates...
eventDate %>%
as.numeric() %>%
as.Date(., origin = "1899-12-30"),
truncated = 2, quiet = FALSE), # 215 failed to parse.
dataSource = "CAES_Anthophila") %>%
# Add the scientificNameAuthorship column using the specificEpithet column
# dplyr::mutate(scientificNameAuthorship = stringr::str_replace(
# string = verbatimScientificName,
# pattern = paste0(".*", specificEpithet, " "),
# replacement = ""
# )) %>%
# Remove those now redundant columns
dplyr::select(!c(Tribe, Host_Genus, Host_species, Host_Family, Host_Common_Name,
Host_Relation, Host_Location, Loc_Notes, Lat_Lon_Method,
End_Date, Elev_Det, Macro_Habitat, Micro_Habitat, Pres_Method, Spec_Notes,
Lat_Lon_Accuracy)) %>%
# Remove double white-spaces
apply(., 2, stringr::str_squish) %>% dplyr::as_tibble() %>%
# add the database_id column
dplyr::mutate(
database_id = paste("CAES_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "Connecticut Agricultural Experiment Station",
datasetID = "CAES"
)
#### 7.3 Out ####
# Save the dataset
readr::write_excel_csv(CAES_data, file = paste(path, outFile, sep = "/"))
# Return the data from the function to the user
return(CAES_data)
} # END readr_CAES
#### 9.0 KP ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_KP <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
.<-ID<-Family<-Subfamily<-Genus<-Det<-Number<-Collection_method<-Collector<-Order<-Suborder<-
subgenus<-species<-subspecies<-author<-whole_sci_name<-Country<-State<-County_Parish<-
Location<-Lat<-Long<-decimalLatitude<-decimalLongitude<-Tribe<-sp_group<-Male<-Female<-
genus<-specificEpithet<-infraspecificEpithet<-Collection_date<-catalogNumber <- NULL
#### 9.1 Prep ####
# This will load the requireNamespaced packages. These packages may still need to be installed to
# R using install.packages("dplyr")... etc.
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 9.2 Read+ ####
# Reads in the .xlsx file, trims the white spaces, and formats the columns to the correct type
KP_data <- openxlsx::read.xlsx(paste(path, inFile, sep = "/")) %>%
# Turn spaces into "_" in column names
dplyr::rename_with(., ~ gsub(" ", "_", .x, fixed = TRUE)) %>%
# Make columns DarwinCore-compatible
dplyr::rename(
catalogNumber = ID,
family = Family,
subfamily = Subfamily,
genus = Genus,
identifiedBy = Det,
individualCount = Number,
samplingProtocol = Collection_method,
recordedBy = Collector,
order = Order,
suborder = Suborder,
subgenus = subgenus,
specificEpithet = species,
infraspecificEpithet = subspecies,
scientificNameAuthorship = author,
verbatimScientificName = whole_sci_name,
country = Country,
stateProvince = State,
county = County_Parish,
locality = Location,
decimalLatitude = Lat,
decimalLongitude = Long) %>%
# round the coordinates to six decimal places
dplyr::mutate(
decimalLatitude = decimalLatitude %>% as.numeric() %>% base::round(digits = 6) %>%
suppressWarnings(classes = "warning"),
decimalLongitude = decimalLongitude %>% as.numeric() %>% base::round(digits = 6) %>%
suppressWarnings(classes = "warning")) %>%
# Add a bunch of columns from other columns
dplyr::mutate(
previousIdentifications = stringr::str_c(
dplyr::if_else(!is.na(Tribe),
paste0("Tribe: ", Tribe), ""),
dplyr::if_else(!is.na(sp_group),
paste0("sp_group: ", sp_group), ""),
sep = "|") %>%
# Remove extra bars "|".
stringr::str_replace_all(pattern = "(\\|){2,9}",
replacement = "\\|") %>%
stringr::str_replace_all(pattern = "(\\|$)+|(^\\|)+",
replacement = "")) %>%
# Do the same as the last mutate, but for fieldNotes
dplyr::mutate(
sex = stringr::str_c(
dplyr::if_else(!is.na(Male) & Male != 0,
paste0(Male, " M"), ""),
dplyr::if_else(!is.na(Female) & Female != 0,
paste0(Female, " F"), ""),
sep = "|") %>%
# Remove extra bars "|".
# Remove extra bars "|".
stringr::str_replace_all(pattern = "(\\|){2,9}",
replacement = "\\|") %>%
stringr::str_replace_all(pattern = "(\\|$)+|(^\\|)+",
replacement = "")) %>%
# Format eventDate and add dataSource
dplyr::mutate(
# Create scientificName
scientificName = stringr::str_c(
dplyr::if_else(!is.na(genus),
paste0(genus), ""),
dplyr::if_else(!is.na(specificEpithet),
paste0(specificEpithet), ""),
dplyr::if_else(!is.na(infraspecificEpithet),
paste0(infraspecificEpithet), ""),
sep = " ") %>% stringr::str_squish() %>% stringr::str_trim(side = "both"),
# Format eventDate
eventDate = lubridate::ymd_hms(Collection_date,
truncated = 5, quiet = FALSE, tz = "UTC"), # 215 failed to parse.
dataSource = "KP_Anthophila") %>%
# Remove those now redundant columns
dplyr::select(!c(Male, Female, sp_group, Tribe, Collection_date)) %>%
# Remove double white-spaces
apply(., 2, stringr::str_squish) %>% dplyr::as_tibble() %>%
# add the database_id column
dplyr::mutate(
database_id = paste("KP_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "USDA ARS Southeastern USA",
datasetID = "KP"
)
#### 9.3 Out ####
# Save the dataset
readr::write_excel_csv(KP_data, file = paste(path, outFile, sep = "/"))
# Return the data from the function to the user
return(KP_data)
} # END readr_KP
#### 11.0 EcoS ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_EcoS <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
Species <- . <- scientificName <- Latitude <- Longitude <- Year <- catalogNumber <- NULL
Collection <- ID_project <- NULL
#### 11.1 Prep ####
# This will load the requireNamespaced packages. These packages may still need to be installed to
# R using install.packages("dplyr")... etc.
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 11.2 Read+ ####
# Reads in the .csv file, trims the white spaces, and formats the columns to the correct type
EcoS_data <- readr::read_csv(paste(path, inFile, sep = "/"),
trim_ws = TRUE, guess_max = 33000) %>%
# Add institution information
dplyr::mutate(
institutionCode = Collection,
datasetName = Collection,
catalogNumber = ID_project,
otherCatalogNumbers = stringr::str_c(Collection, ID_project, sep = "_")) %>%
# Add taxonomic information
dplyr::mutate(
scientificName = Species) %>%
# Split genus and species names
tidyr::separate(
data = ., col = scientificName,
into = c("genus", "specificEpithet"),
sep = "_", remove = FALSE) %>%
# Lat Lon
dplyr::mutate(
decimalLatitude = Latitude,
decimalLongitude = Longitude) %>%
# Year
dplyr::mutate(
year = Year) %>%
# Add dataset information
dplyr::mutate(dataSource = "EcoS_Anthophila") %>%
# Remove any double white-spaces
apply(., 2, stringr::str_squish) %>% dplyr::as_tibble() %>%
# add the database_id column
dplyr::mutate(
database_id = paste("EcoS_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
dplyr::mutate(license = dataLicense) %>%
# keep only valid columns
dplyr::select( tidyselect::any_of(names(ColTypeR()[[1]]))) %>%
# add the database_id column
dplyr::mutate(
datasetName = "EcoSur",
datasetID = "EcoS"
)
#### 11.3 Out ####
# Save the dataset
readr::write_excel_csv(EcoS_data, file = paste(path, outFile, sep = "/"))
# Return the data from the function to the user
return(EcoS_data)
} # END readr_EcoS
#### 12.0 GeoL ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_GeoL <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
geolocate_Latitude<-geolocate_Longitude<-geolocate_UncertaintyRadiusMeters<-database_id<-
bels_decimallatitude<-bels_decimallongitude<-bels_coordinateuncertaintyinmeters<-datasource<-
scientificname<-infraspecificepithet<-specificepithet<-acceptednameusage<-taxonrank<-
scientificnameauthorship<-countrycode<-stateprovince<-eventdate<-basisofrecord<-
occurrencestatus<-recordnumber<-recordedby<-eventid<-samplingprotocol<-samplingeffort<-
individualcount<-catalognumber<-rightsholder<-institutioncode<-datasetname<-
othercatalognumbers<-occurrenceid<-coreid<-recordid<-collectionid<-
verbatimscientificname<-verbatimeventdate<-id <- . <- NULL
rightsHolder <- continent <- type <- samplingProtocol <- NULL
island <- municipality <- verbatimEventDate <- catalogNumber <- NULL
#### 12.1 Prep ####
# This will load the requireNamespaced packages. These packages may still need to be installed to
# R using install.packages("dplyr")... etc.
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 12.2 Read+ ####
###### a. GeoL_high ####
# Reads in the .xlsx file, trims the white spaces, and formats the columns to the correct type
GeoL_data <- openxlsx::read.xlsx(paste(path, inFile, sep = "/"),
sheet = "GEOLOCATE HIGH") %>%
# Return spaces in column names to keep the consistent with file before renaming
setNames(., stringr::str_replace_all(colnames(.), "\\.", " ")) %>%
# Convert GeoLocate columns into dwc columns
dplyr::rename(
decimalLatitude = geolocate_Latitude, decimalLongitude = geolocate_Longitude,
coordinateUncertaintyInMeters = geolocate_UncertaintyRadiusMeters) %>%
# keep only valid columns
dplyr::select( tidyselect::any_of(names(ColTypeR()[[1]]))) %>%
# Remove blanks
tidyr::drop_na(database_id) %>%
# Temporarily add an identifier column
dplyr::mutate(
tempSource = "GeoL",
rightsHolder = rightsHolder %>% as.character(),
island = island %>% as.character(),
municipality = municipality %>% as.character(),
verbatimEventDate = verbatimEventDate %>% as.character()
)
# User output
writeLines(paste0(
" - We have read in ",
format(nrow(GeoL_data), big.mark = ","), " occurrence records from the 'GEOLOCATE HIGH' sheet."
))
###### b. BELS_high ####
# Reads in the .xlsx file, trims the white spaces, and formats the columns to the correct type
BELS_data <- openxlsx::read.xlsx(paste(path, inFile, sep = "/"),
sheet = "BELS High") %>%
# Convert GeoLocate columns into dwc columns
dplyr::rename(
decimalLatitude = bels_decimallatitude,
decimalLongitude = bels_decimallongitude,
coordinateUncertaintyInMeters = bels_coordinateuncertaintyinmeters,
dataSource = datasource,
scientificName = scientificname,
infraspecificEpithet = infraspecificepithet,
specificEpithet = "_specificepithet",
species = specificepithet,
acceptedNameUsage = acceptednameusage,
taxonRank = taxonrank,
scientificNameAuthorship = scientificnameauthorship,
countryCode = countrycode,
stateProvince = stateprovince,
eventDate = eventdate,
basisOfRecord = basisofrecord,
occurrenceStatus = occurrencestatus,
recordNumber = recordnumber,
recordedBy = recordedby,
eventID = eventid,
samplingProtocol = samplingprotocol,
samplingEffort = samplingeffort,
individualCount = individualcount,
catalogNumber = catalognumber,
rightsHolder = rightsholder,
institutionCode = institutioncode,
datasetName = datasetname,
otherCatalogNumbers = othercatalognumbers,
occurrenceID = occurrenceid,
coreid = coreid,
recordId = recordid,
collectionID = collectionid,
verbatimScientificName = verbatimscientificname,
verbatimEventDate = verbatimeventdate,
id = id) %>%
# Correct some formatting
dplyr::mutate(continent = continent %>% as.character(),
type = type %>% as.character(),
id = id %>% as.character(),
samplingProtocol = samplingProtocol %>% as.character(),
island = island %>% as.character(),
municipality = municipality %>% as.character(),
verbatimEventDate = verbatimEventDate %>% as.character()) %>%
# keep only valid columns
dplyr::select( tidyselect::any_of(names(ColTypeR()[[1]]))) %>%
# Remove blanks
tidyr::drop_na(database_id) %>%
# Temporarily add an identifier column
dplyr::mutate(
tempSource = "Bels"
)
# User output
writeLines(paste0(
" - We have read in ",
format(nrow(BELS_data), big.mark = ","), " occurrence records from the 'BELS High' sheet."
))
###### c. merge ####
GeoL_data <- GeoL_data %>%
# Remove data that occurs in BELS_data
dplyr::filter(!database_id %in% BELS_data$database_id) %>%
# Combine datasets
dplyr::bind_rows(BELS_data) %>%
dplyr::select(!database_id) %>%
# add the database_id column
dplyr::mutate(
database_id = paste("GEOL_data_", 1:nrow(.), sep = ""),
.before = catalogNumber)
# User output
writeLines(paste0(
" - We have kept ",
format(sum(GeoL_data$tempSource == "GeoL", na.rm = FALSE), big.mark = ","),
" occurrences from GeoLocate, and ",
format(sum(GeoL_data$tempSource == "Bels", na.rm = FALSE), big.mark = ","),
" records from BELS (",
format(nrow(GeoL_data), big.mark = ","),
" in total). BELS was given preference over GeoLocate"
))
#### 12.3 Out ####
# Save the dataset
readr::write_excel_csv(GeoL_data, file = paste(path, outFile, sep = "/"))
# Return the data from the function to the user
return(GeoL_data)
} # END readr_GeoL
#### 13.0 EaCO ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_EaCO <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
County<-State<-Genus<-genus<-species<-dateRange<-dateSet<-dateCollected<-treatment<-
trapNumber<-samplingRound<-coordinates<-decimalLatitude<-decimalLongitude<-recordNumber<-
.<-catalogNumber <- NULL
#### 13.1 Prep ####
# This will load the requireNamespaced packages. These packages may still need to be installed to
# R using install.packages("dplyr")... etc.
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 13.2 Read+ ####
# Reads in the .csv file, trims the white spaces, and formats the columns to the correct type
EaCO_data <- openxlsx::read.xlsx(paste(path, inFile, sep = "/")) %>%
# Return spaces in column names to keep the consistent with file before renaming
setNames(., stringr::str_replace_all(colnames(.), "\\.", " ")) %>%
# Rename columns
dplyr::rename(
recordNumber = 'Specimen Number',
county = County,
stateProvince = State,
genus = Genus,
species = 'Species (if available)',
dateRange = 'Date Range for collection',
treatment = 'Treatment type',
trapNumber = 'Trap number',
samplingRound = 'Sampling Round',
coordinates = 'GPS Coordinates of Traps') %>%
# Drop rows without species names or that aren't bees or aren't useable names
dplyr::filter(!is.na(genus), !genus == "",
!is.na(species), !species == "",
!stringr::str_detect(species, pattern = "[0-9]"),
!stringr::str_detect(species, pattern = "/"),
!stringr::str_detect(tolower(species), pattern = "sp.")) %>%
# Split dates
tidyr::separate(
col = dateRange, sep = "-",
into = c("dateSet", "dateCollected")) %>%
# Add year and convert to ymd format
dplyr::mutate(
dateSet = stringr::str_c(dateSet, "/2017") %>% lubridate::mdy(truncated = 2),
dateCollected = stringr::str_c(dateCollected, "/2017") %>% lubridate::mdy(truncated = 2),
year = 2017) %>%
# Add scientificName
dplyr::mutate(scientificName = stringr::str_c(genus, species, sep = " ")) %>%
# Add field notes
dplyr::mutate(
fieldNotes = stringr::str_c(
dplyr::if_else(!is.na(dateSet), paste0("dateSet: ", dateSet), ""),
dplyr::if_else(!is.na(dateCollected),
paste0("dateCollected: ", dateCollected), ""),
dplyr::if_else(!is.na(treatment),
paste0("treatmentType: ", treatment), ""),
dplyr::if_else(!is.na(trapNumber),
paste0("trapNumber: ", trapNumber), ""),
dplyr::if_else(!is.na(samplingRound),
paste0("samplingRound: ", samplingRound), ""),
sep = "|") %>%
# Remove extra bars "|".
stringr::str_replace_all(pattern = "(\\|){2,9}",
replacement = "\\|") %>%
stringr::str_replace_all(pattern = "(\\|$)+|(^\\|)+",
replacement = "")) %>%
# Add samplingEffort
dplyr::mutate(
samplingEffort = dateCollected - dateSet
) %>%
# rescue coordinates
tidyr::separate(
col = coordinates,
into = c("decimalLatitude", "decimalLongitude"),
sep = ",") %>%
dplyr::mutate(
decimalLatitude = decimalLatitude %>% stringr::str_remove(pattern = "[a-zA-Z]"),
decimalLongitude = decimalLongitude %>% stringr::str_remove(pattern = "[a-zA-Z]")) %>%
dplyr::mutate(
catalogNumber = stringr::str_c("EastColarado_",recordNumber, sep = "")) %>%
# Add dataset information
dplyr::mutate(dataSource = "EaCO_Anthophila") %>%
# Remove any double white-spaces
apply(., 2, stringr::str_squish) %>% dplyr::as_tibble() %>%
# add the database_id column
dplyr::mutate(
database_id = paste("EaCO_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
dplyr::mutate(license = dataLicense) %>%
# keep only valid columns
dplyr::select( tidyselect::any_of(names(ColTypeR()[[1]]))) %>%
# add the database_id column
dplyr::mutate(
datasetName = "Eastern Colorado (Arathi Seshadri)",
datasetID = "EaCo",
institutionCode = "USDA ARS"
)
#### 13.3 Out ####
# Save the dataset
readr::write_excel_csv(EaCO_data, file = paste(path, outFile, sep = "/"))
# Return the data from the function to the user
return(EaCO_data)
} # END readr_EaCO
#### 14.0 MABC ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_MABC <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL,
sheet = "Hoja1"){
# locally bind variables to the function
genus <- specificEpithet <- collectionSite <- siteCode <- hour <- tribe <- eventDate <- NULL
. <- catalogNumber <- NULL
#### 14.1 Prep ####
# This will load the requireNamespaced packages. These packages may still need to be installed to
# R using install.packages("dplyr")... etc.
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 14.2 Read+ ####
# Reads in the .csv file, trims the white spaces, and formats the columns to the correct type
MABC_data <- openxlsx::read.xlsx(paste(path, inFile, sep = "/"),
sheet = sheet) %>%
# Return spaces in column names to keep the consistent with file before renaming
setNames(., stringr::str_replace_all(colnames(.), "\\.", " ")) %>%
# Rename columns
dplyr::rename(
catalogNumber = 'Ejemplar',
eventDate = 'Fecha colecta',
country = paste0('Pa\u00eds'),
stateProvince = 'Estado/Provincia',
municipality = 'Municipio',
locality = 'Localidad',
samplingProtocol = 'Metodo colecta',
decimalLatitude = 'Coordenadas Lat',
decimalLongitude = 'Coordenadas Long',
verbatimElevation = 'Altitud',
georeferenceVerificationStatus = paste0('Datos georeferenciaci\u00f3n'),
recordedBy = 'Colector',
identifiedBy = 'Identificador',
family = 'Familia',
subfamily = 'Subfamilia',
genus = 'Genero',
subgenus = 'Subgenero',
specificEpithet = 'Especie',
infraspecificEpithet = 'Subespecie',
species = 'Nombre especie',
taxonID = paste0('C\u00f3digo especie'),
sex = 'Sexo',
# Non-standard fields
tribe = 'Tribu',
collectionSite = 'Sitio Colecta',
siteCode = paste0('C\u00f3digo sitio'),
hour = 'Hora') %>%
# Add scientificName
dplyr::mutate(scientificName = stringr::str_c(genus, specificEpithet, sep = " ")) %>%
# Add field notes
dplyr::mutate(
fieldNotes = stringr::str_c(
dplyr::if_else(!is.na(collectionSite), paste0("collectionSite: ", collectionSite), ""),
dplyr::if_else(!is.na(siteCode),
paste0("siteCode: ", siteCode), ""),
dplyr::if_else(!is.na(hour),
paste0("time: ", hour), ""),
sep = "|") %>%
# Remove extra bars "|".
stringr::str_replace_all(pattern = "(\\|){2,9}",
replacement = "\\|") %>%
stringr::str_replace_all(pattern = "(\\|$)+|(^\\|)+",
replacement = "")) %>%
# Add identificationRemarks
dplyr::mutate(
identificationRemarks = stringr::str_c(
dplyr::if_else(!is.na(tribe), paste0("tribe: ", tribe), ""))) %>%
# Add year and ensure ymd format
dplyr::mutate(eventDate = lubridate::dmy(eventDate)) %>%
# Add dataset information
dplyr::mutate(dataSource = "MABC_Anthophila") %>%
# Remove any double white-spaces
apply(., 2, stringr::str_squish) %>% dplyr::as_tibble() %>%
# add the database_id column
dplyr::mutate(
database_id = paste("MABC_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
dplyr::mutate(license = dataLicense) %>%
# keep only valid columns
dplyr::select( tidyselect::any_of(names(ColTypeR()[[1]]))) %>%
# add the database_id column
dplyr::mutate(
datasetName = "MABC",
datasetID = "MABC"
)
#### 14.3 Out ####
# Save the dataset
readr::write_excel_csv(MABC_data, file = paste(path, outFile, sep = "/"))
# Return the data from the function to the user
return(MABC_data)
} # END readr_MABC
#### 15.0 Col ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_Col <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL,
sheet = sheet){
# locally bind variables to the function
day <- year <- eventDateInitial <- eventDate <- month2 <- day2 <- eventDate2 <- NULL
scientificName <- . <- catalogNumber <- NULL
#### 15.1 Prep ####
# This will load the requireNamespaced packages. These packages may still need to be installed to
# R using install.packages("dplyr")... etc.
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 15.2 Read+ ####
###### a. Col_data ####
# Reads in the .csv file, trims the white spaces, and formats the columns to the correct type
Col_data <- openxlsx::read.xlsx(paste(path, inFile, sep = "/"),
sheet = sheet) %>%
# Return spaces in column names to keep the consistent with file before renaming
setNames(., stringr::str_replace_all(colnames(.), "\\.", " ")) %>%
# Fix some special cases that already involve "." in the column names
setNames(., stringr::str_replace_all(colnames(.), " ", ". ")) %>%
setNames(., stringr::str_replace_all(colnames(.), " $", ".")) %>%
# Rename columns
dplyr::rename(
catalogNumber = paste0('C\u00f3digo de Barras'),
recordedBy = 'Colectores [Aggregated]',
recordedByID = 'Colectores asociados',
eventDateInitial = paste0('Fecha colecci\u00f3n inicial'),
order = 'Orden',
family = 'Familia',
genus = paste0('G\u00e9nero'),
specificEpithet = 'Especie',
scientificNameAuthorship = 'Especie Author',
typeStatus = 'Tipo',
identifiedBy = 'Determinador [Formatted]',
dateIdentified = paste0('Fecha determinaci\u00f3n'),
country = paste0('Pa\u00eds'),
stateProvince = 'Departamento',
municipality = 'Municipio',
locationRemarks = 'Corregimiento Departamental',
locality = 'Localidad',
decimalLatitude = 'Latitud georref. dec.',
decimalLongitude = 'Longitud georref. dec.',
scientificName = 'Nombre Completo',
day = "dia",
month = "mes",
year = "Ano"
# Previous column names:
# collectionID = 'Colecci\\u00f3n/Guid',
# collectionCode = 'C\\u00f3digo',
# occurrenceID = 'Collection Object/GUID',
# identificationRemarks = 'Observaciones generales',
# eventID = 'Evento de Recolecci\\u00f3n/Guid',
# recordedByID = 'Numero de colector',
# eventDateFinal = 'Fecha colecci\\u00f3n final',
# class = 'Clase',
# infraspecificEpithet = 'Subespecie',
# namePublishedInID = 'Referencia original',
# identificationID = 'Determinaciones/Guid',
# identificationQualifier = 'Obs. Determinaci\\u00f3n',
# locationID = 'LocalityID',
# geodeticDatum = 'Datum geod\\u00e9sico',
# coordinateUncertaintyInMeters = 'Precisi\\u00f3n coord. georref.',
# minimumElevationInMeters = 'Elevaci\\u00f3n m\\u00ednima',
# maximumElevationInMeters = 'Elevaci\\u00f3n m\\u00e1xima',
# georeferenceProtocol = 'Protocolo de georreferenciaci\\u00f3n',
# verbatimLatitude = 'Informaci\\u00f3n geogr\\u00e1fica/Latitude1 literal',
# verbatimLongitude = 'Longitude1 literal',
# locationRemarks = 'Observaciones ninf Geogr\\u00e1fica',
# lifeStage = 'Estado de desarrollo',
# habitat = 'H\\u00e1bitat',
# continent = 'Continente',
# tribe = 'Tribu',
# subSpeciesAuthor = 'Subespecie Author'
) %>%
# Fix some date issues:
# a. replace 00 dates with "" to be picked u pby truncated
dplyr::mutate(
day = dplyr::if_else(day == "0" | day == "00",
"", day) %>%
stringr::str_remove("to.*|^0") %>%
as.numeric(na.rm = TRUE),
month = dplyr::if_else(month == "0" | month == "00",
"", month) %>%
stringr::str_remove("^0") %>%
as.numeric(na.rm = TRUE),
year = year %>% as.numeric()
) %>%
dplyr::mutate(
eventDate = lubridate::ymd(paste(year, month, day, sep = "/"),
truncated = 2),
.after = eventDateInitial) %>%
# b. Fix inverted day-month
dplyr::mutate(
day2 = dplyr::if_else(is.na(eventDate),
month,
1),
month2 = dplyr::if_else(is.na(eventDate),
day,
1),
eventDate2 = dplyr::if_else(is.na(eventDate),
lubridate::ymd(paste(year, month2, day2, sep = "/"),
truncated = 2),
eventDate),
.after = year
) %>%
# Combine into final date values
dplyr::mutate(
# Take eventDate if it's not empty, and eventDate2 if it is empty
eventDate = dplyr::if_else(is.na(eventDate),
eventDate2,
eventDate),
day = dplyr::if_else(is.na(day),
day2,
day),
month = dplyr::if_else(is.na(month),
month2,
month)) %>%
dplyr::select(!c(eventDate2, day2, month2)) %>%
# Remvoe extra spaces in subgenus
dplyr::mutate(scientificName = scientificName %>%
stringr::str_replace(pattern = "\\( ",
replacement = "\\(")) %>%
# Remove any double white-spaces
apply(., 2, stringr::str_squish) %>% dplyr::as_tibble() %>%
# add the database_id column
dplyr::mutate(
database_id = paste("Col_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
# Add dataset information
dplyr::mutate(dataSource = "Col_Anthophila") %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "Colombia - Diego Alexander Guevara Farias",
datasetID = "Col"
)
#### 15.3 Out ####
# Save the dataset
readr::write_excel_csv(Col_data, file = paste(path, outFile, sep = "/"))
# Return the data from the function to the user
return(Col_data)
} # END readr_Col
#### 16.0 FSCA ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_FSCA <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
. <- catalogNumber <- recordID <- NULL
#### 16.1 Prep ####
# This will load the requireNamespaced packages. These packages may still need to be installed to
# R using install.packages("dplyr")... etc.
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 16.2 Read+ ####
# Reads in the .csv file, trims the white spaces, and formats the columns to the correct type
FSCA_data <- readr::read_csv(paste(path, inFile, sep = "/"),
trim_ws = TRUE) %>%
# Add dataset information
dplyr::mutate(dataSource = "FSCA_Anthophila") %>%
# Remove any double white-spaces
apply(., 2, stringr::str_squish) %>% dplyr::as_tibble() %>%
# add the database_id column
dplyr::mutate(
database_id = paste("FSCA_data_", 1:nrow(.), sep = ""),
.before = 1) %>%
dplyr::mutate(license = dataLicense) %>%
# keep only valid columns
dplyr::select( tidyselect::any_of(names(ColTypeR()[[1]]))) %>%
# add the database_id column
dplyr::mutate(
datasetName = "Florida State Collection of Arthropods",
datasetID = "FSCA",
institutionCode = "FSCA"
)
#### 16.3 Out ####
# Save the dataset
readr::write_excel_csv(FSCA_data, file = paste(path, outFile, sep = "/"))
# Return the data from the function to the user
return(FSCA_data)
} # END readr_FSCA
#### 17.0 SMC ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_SMC <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
collectionMethod <- locale <- latitude <- longitude <- organismName <- scientificName <- NULL
observationDate <- eventDate <- . <- NULL
#### 17.1 Prep ####
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 17.2 Read+ ####
SMC_Data <- readr::read_csv(paste(path, inFile, sep = "/"),
trim_ws = TRUE, guess_max = 33000) %>%
# Rename some columns to make format consistent with DarwinCore
dplyr::rename(
samplingProtocol = collectionMethod,
locality = locale,
decimalLatitude = latitude,
decimalLongitude = longitude,
scientificName = organismName) %>%
# Make new columns
dplyr::mutate(
# Remove underscore from scientificName strings
scientificName = gsub("_", " ", scientificName),
# Add basis of record
basisOfRecord = "specimen",
# Add eventDate
eventDate = lubridate::mdy(observationDate),
# Parse eventDate into day, month, and year
month = lubridate::month(eventDate),
day = lubridate::day(eventDate),
year = lubridate::year(eventDate)) %>%
# Add dataSource information
dplyr::mutate(dataSource = "SMC_Anthophila") %>%
# add the database_id column
dplyr::mutate(
database_id = paste("SMC_data_", 1:nrow(.), sep = "")) %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "Texas SMC literature data",
datasetID = "SMC"
)
# Save the dataset
readr::write_excel_csv(SMC_Data, file = paste(path, outFile, sep = "/"))
# Return data
return(SMC_Data)
} # END readr_SMC
#### 18.0 Bal ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_Bal <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL,
sheet = "animal_data"){
# locally bind variables to the function
siteID<-year<-animalID<-abundance<-samplingMethod<-censusType<-decimalLatitude<-
decimalLongitude<-studyLocation<-siteDescription<-studyID<-locationID<-.<-
samplingIntensity<-eventDate<-catalogNumber <- NULL
#### 18.1 Prep ####
# This will load the requireNamespaced packages. These packages may still need to be installed to
# R using install.packages("dplyr")... etc.
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 18.2 Read+ ####
# Reads in the .xlsx file, trims the white spaces, and formats the columns to the correct type
Bal_data <- openxlsx::read.xlsx(paste(path, inFile, sep = "/"),
sheet = sheet, startRow = 2) %>%
# Return spaces in column names to keep the consistent with file before renaming
setNames(., stringr::str_replace_all(colnames(.), "\\.", " ")) %>%
# Make columns DarwinCore-compatible
dplyr::rename(
locationID = siteID,
year = year,
eventDate = date,
scientificName = animalID,
individualCount = abundance,
samplingProtocol = samplingMethod,
fieldNotes = censusType,
decimalLatitude = decimalLatitude,
decimalLongitude = decimalLongitude,
locality = studyLocation,
locationRemarks = siteDescription) %>%
# Add some columns
dplyr::mutate(
catalogNumber = stringr::str_c(studyID, "_", locationID,"_", 1:nrow(.), sep = ""),
samplingEffort = stringr::str_c(samplingIntensity, " hours")
) %>%
# round the coordinates to six decimal places
dplyr::mutate(
decimalLatitude = decimalLatitude %>% as.numeric() %>% base::round(digits = 6) %>%
suppressWarnings(classes = "warning"),
decimalLongitude = decimalLongitude %>% as.numeric() %>% base::round(digits = 6) %>%
suppressWarnings(classes = "warning")) %>%
# Format eventDate and add dataSource
# Format eventDate
dplyr::mutate(
eventDate = lubridate::dmy(eventDate,
truncated = 2, quiet = FALSE),
# Parse eventDate into day, month, and year
month = lubridate::month(eventDate),
day = lubridate::day(eventDate),
year = lubridate::year(eventDate),
dataSource = "Bal_Anthophila") %>%
# add the database_id column
dplyr::mutate(
database_id = paste("Bal_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "Ballare et al. 2019",
datasetID = "Bal"
)
#### 18.3 Out ####
# Save the dataset
readr::write_excel_csv(Bal_data, file = paste(path, outFile, sep = "/"))
# Return the data from the function to the user
return(Bal_data)
} # END readr_Bal
#### 19.0 Lic ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_Lic <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
Kingdom<-Order<-Family_or_grp<-Genus<-Species<-sex<-Collector<-Determiner<-genus<-species<-
occurrenceID<-eventID<-eventDate<-.<-catalogNumber<-family <- NULL
#### 19.1 Prep ####
# This will load the requireNamespaced packages. These packages may still need to be installed to
# R using install.packages("dplyr")... etc.
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 19.2 Read+ ####
# Reads in the .csv file, trims the white spaces, and formats the columns to the correct type
Lic_data <- readr::read_csv(paste(path, inFile, sep = "/"),
trim_ws = TRUE, guess_max = 33000) %>%
dplyr::rename(
kingdom = Kingdom,
order = Order,
family = Family_or_grp,
genus = Genus,
species = Species,
sex = sex,
recordedBy = Collector,
identifiedBy = Determiner) %>%
# Add taxonomic information
dplyr::mutate(
scientificName = stringr::str_c(
dplyr::if_else(!is.na(genus),
paste0(genus), ""),
dplyr::if_else(!is.na(species),
paste0(species), ""),
sep = " ")) %>%
# Make a catalogue number
dplyr::mutate(
catalogNumber = stringr::str_c(
dplyr::if_else(!is.na(occurrenceID),
paste0(occurrenceID), ""),
dplyr::if_else(!is.na(eventID),
paste0(eventID), ""),
sep = "_")) %>%
# Format eventDate
dplyr::mutate(
eventDate = lubridate::dmy(eventDate,
truncated = 2, quiet = FALSE),
# Parse eventDate into day, month, and year
month = lubridate::month(eventDate),
day = lubridate::day(eventDate),
year = lubridate::year(eventDate)) %>%
# Add dataset information
dplyr::mutate(dataSource = "Lic_Anthophila") %>%
# Remove any double white-spaces
apply(., 2, stringr::str_squish) %>% dplyr::as_tibble() %>%
# add the database_id column
dplyr::mutate(
database_id = paste("Lic_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "Elinor Lichtenberg Canola Data",
datasetID = "Lic"
)
# filter to bee families only
Lic_data <- Lic_data %>%
dplyr::filter(tolower(family) %in%
tolower(c("Andrenidae","Apidae","Colletidae","Halictidae","Megachilidae",
"Melittidae","Stenotritidae")))
#### 19.3 Out ####
# Save the dataset
readr::write_excel_csv(Lic_data, file = paste(path, outFile, sep = "/"))
# Return the data from the function to the user
return(Lic_data)
} # END readr_Lic
#### 20.0 Arm ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_Arm <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL,
sheet = "Sheet1"){
# locally bind variables to the function
fam<-genus<-sp<-species<-sex<-locality<-munic<-state<-y<-x<-elev<-specificEpithet<-ecoregion<-
veget<-g<-m<-s<-G<-M<-S<-day<-year<-.<-family <- NULL
#### 20.1 Prep ####
# This will load the requireNamespaced packages. These packages may still need to be installed to
# R using install.packages("dplyr")... etc.
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 20.2 Read+ ####
# Reads in the .csv file, trims the white spaces, and formats the columns to the correct type
Arm_data <- openxlsx::read.xlsx(paste(path, inFile, sep = "/"), sheet = sheet) %>%
# Make columns DarwinCore-compatible
dplyr::rename(
family = fam,
genus = genus,
specificEpithet = sp,
species = species,
sex = sex,
locality = locality,
municipality = munic,
stateProvince = state,
decimalLatitude = y,
decimalLongitude = x,
verbatimElevation = elev) %>%
# Add a bunch of columns from other columns
dplyr::mutate(
# Add scientificName
# This will ONLY concatenate columns where they have a value.
scientificName = stringr::str_c(
dplyr::if_else(!is.na(genus), genus, ""),
dplyr::if_else(!is.na(specificEpithet),
specificEpithet, ""),
sep = " ") ) %>%
# Do the same as the last mutate, but for fieldNotes
dplyr::mutate(
fieldNotes = stringr::str_c(
dplyr::if_else(!is.na(ecoregion),
paste0("ecoregion: ", ecoregion), ""),
dplyr::if_else(!is.na(veget),
paste0("vegetation: ", veget), ""),
sep = "|") %>%
# Remove extra bars "|".
stringr::str_replace_all(pattern = "(\\|){2,9}",
replacement = "\\|") %>%
stringr::str_replace_all(pattern = "(\\|$)+|(^\\|)+",
replacement = "")) %>%
# Add scientificName
dplyr::mutate(
verbatimLatitude = stringr::str_c(
dplyr::if_else(!is.na(g),
as.character(g), ""),
dplyr::if_else(!is.na(m),
as.character(m), ""),
dplyr::if_else(!is.na(s),
as.character(s), ""),
sep = " "),
verbatimLongitude = stringr::str_c(
dplyr::if_else(!is.na(G),
as.character(G), ""),
dplyr::if_else(!is.na(M),
as.character(M), ""),
dplyr::if_else(!is.na(S),
as.character(S), ""),
sep = " ")) %>%
# Get the coordinateUncertaintyInMeters by taking the UPPER uncertainty limit from the provided
# ranges. I.e., "100-1000" becomes "1000"
dplyr::mutate(
# Format eventDate
eventDate = lubridate::dmy(paste(
day, month, year, sep = "-"
)), # 215 failed to parse.
dataSource = "Arm_Anthophila") %>%
# Remove double white-spaces
apply(., 2, stringr::str_squish) %>% dplyr::as_tibble() %>%
# add the database_id column
dplyr::mutate(
database_id = paste("Arm_data_", 1:nrow(.), sep = ""),
.before = family) %>%
dplyr::mutate(license = dataLicense) %>%
# Remove spent columns
dplyr::select(!tidyselect::any_of(c("veget", "ecoregion", "g", "m", "s", "G", "M", "S"))) %>%
# add the database_id column
dplyr::mutate(
datasetName = "Armando Falcon-Brindis",
datasetID = "Arm"
)
#### 20.3 Out ####
# Save the dataset
readr::write_excel_csv(Arm_data, file = paste(path, outFile, sep = "/"))
# Return the data from the function to the user
return(Arm_data)
} # END readr_Arm
#### 21.0 Dorey ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_Dor <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
. <- catalogNumber <- eventDate <- stateOrProvince <- NULL
#### 21.1 Prep ####
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 21.2 Read+ ####
Dor_data <- readr::read_csv(paste(path, inFile, sep = "/"),
trim_ws = TRUE) %>%
dplyr::rename(stateProvince = stateOrProvince) %>%
# add the database_id column
dplyr::mutate(
database_id = paste("Dor_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
# Add dataset information
dplyr::mutate(dataSource = "Dor_Anthophila") %>%
# Format date
dplyr::mutate(eventDate = eventDate %>% lubridate::dmy(., truncated = 2),
year = lubridate::year(eventDate),
month = lubridate::month(eventDate),
day = lubridate::day(eventDate)) %>%
# Pick up dates of different formats and format together.
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "James B Dorey Bee Data",
datasetID = "Dorey"
)
#### 21.3 Out ####
# Save the dataset
readr::write_excel_csv(Dor_data, file = paste(path, outFile, sep = "/"))
# Return data
return(Dor_data)
} # END readr_Dor
#### 22.0 MEPB ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_MEPB <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL,
sheet = NULL){
# locally bind variables to the function
catalog_number<-pollinator_family<-pollinator_genus<-pollinator_species<-collection_method<-
day_collected<-month_collected<-year_collected<-location_description<-latitude<-longitude<-
basis_of_record<-genus<-specificEpithet<-year<-day<-eventDate<-collector_number<-
location_name<-habitat<-.<-catalogNumber <- NULL
#### 22.1 Prep ####
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 22.2 Read+ ####
MEPB_data <- openxlsx::read.xlsx(paste(path, inFile, sep = "/"), sheet = sheet) %>%
# Return spaces in column names to keep the consistent with file before renaming
setNames(., stringr::str_replace_all(colnames(.), "\\.", " ")) %>%
# Fix broken encodings
dplyr:: mutate(
dplyr::across(
.cols = dplyr::everything(),
.fns = ~ stringr::str_replace_all(.,
pattern = c("\\u00c3\\u00b3"="\\u00f3",
"\\u00c3\\u00a9"="\\u00e9",
"\\u00c3\\u00b1"="\\u00f1",
"\\u00c2\\u00b0"="\\u00b0",
"\\u00c3"="\\u00e1")
))) %>%
# add the database_id column
dplyr::mutate(
database_id = paste("MEPB_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
# Add dataset information
dplyr::mutate(dataSource = "MEPB_Anthophila") %>%
# Format date
dplyr::mutate(eventDate = lubridate::ymd(stringr::str_c(year %>% as.numeric(),
month %>% as.numeric(),
day %>% as.numeric(),
sep = "/"),
truncated = 2)) %>%
# Pick up dates of different formats and format together.
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetID = "MEPB"
)
#### 22.3 Out ####
# Save the dataset
readr::write_excel_csv(MEPB_data, file = paste(path, outFile, sep = "/"))
# Return data
return(MEPB_data)
} # END readr_MEPB
#### 23.0 Brazil ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_BBD <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
. <- catalogNumber <- year <- day <- dateLastModified <- dateLastModified2 <- NULL
identifiedBy <- Spcslink.identifiedby <- NULL
#### 23.1 Prep ####
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 23.2 Read+ ####
lookupCols <- c(verbatimScientificName = "Scientificname_ORIGINAL",
verbatimLatitude = "Lat_original",
verbatimLongitude = "Long_original")
BBD_data <- readr::read_csv(paste(path, inFile, sep = "/"),
trim_ws = TRUE) %>%
# Rename columns
dplyr::rename(
tidyselect::any_of(lookupCols),
id = "CodeBBdatabase_curated",
scientificName = "Scientific name corrected",
family = "Family",
institutionCode = "institutioncode",
# infraspecificEpithet = "Spcslink.subspecies",
# scientificNameAuthorship = "Spcslink.scientificnameauthor",
day = "Day",
month = "Month",
year = "Year",
country = "Country",
stateProvince = "State",
decimalLatitude = "Latitude_dec.degrees",
decimalLongitude = "Longitude_dec.degrees",
coordinateUncertaintyInMeters = "Precision.of.coord.meters",
verbatimLocality = "Locality.original",
georeferenceRemarks = "NotasLatLong",
county = "Spcslink.county",
recordedBy = "Collector",
collectionCode = "Collection",
references = "Source",
sex = "Sex",
identifiedBy = "Det_By",
catalogNumber = "Codigo",
otherCatalogNumbers = "Spcslink.collectioncode",
dateIdentified = "Spcslink.yearidentified",
dateLastModified = "Spcslink.datelastmodified",
basisOfRecord = "Spcslink.basisofrecord",
# occurrenceID = "Spcslink.collectioncode",
eventRemarks = "NotesOnLocality") %>%
#dplyr::mutate(
# decimalLatitude = decimalLatitude %>% stringr::str_replace("\\,", "\\."),
# decimalLongitude = decimalLongitude %>% stringr::str_replace("\\,", "\\."))
# add the database_id column
dplyr::mutate(
database_id = paste("BBD_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
# Edit catalogNumber for AMNH
dplyr::mutate(catalogNumber = stringr::str_replace(
catalogNumber, pattern = "AMNHBEE", replacement = "AMNH_BEE"),
catalogNumber = stringr::str_replace(
catalogNumber, pattern = "^0\\.", replacement = ".")) %>%
# Add dataset information
dplyr::mutate(dataSource = "BBD_Anthophila") %>%
# Format date
dplyr::mutate(eventDate = lubridate::ymd(paste0(
year, month, day, sep = "/"), truncated = 2, quiet = TRUE)) %>%
dplyr::mutate(dateLastModified2 =
lubridate::dmy_hms(dateLastModified, truncated = 5, quiet = TRUE),
.after = dateLastModified) %>%
dplyr::mutate(dateLastModified2 = dplyr::if_else(is.na(dateLastModified2),
lubridate::ymd_hms(dateLastModified, truncated = 5, quiet = TRUE),
dateLastModified2)) %>%
dplyr::mutate(dateLastModified2 = dplyr::if_else(is.na(dateLastModified2),
lubridate::mdy_hms(dateLastModified, truncated = 5, quiet = TRUE),
dateLastModified2)) %>%
dplyr::mutate(dateLastModified2 = dplyr::if_else(is.na(dateLastModified2),
lubridate::ymd(dateLastModified, truncated = 2, quiet = TRUE),
dateLastModified2)) %>%
dplyr::mutate(dateLastModified2 = dplyr::if_else(is.na(dateLastModified2),
as.Date(dateLastModified %>% as.numeric(na.rm = TRUE),
origin = "1899-12-30") %>%
lubridate::ymd_hms(., truncated = 5, quiet = TRUE),
dateLastModified2),
.after = dateLastModified) %>%
dplyr::mutate(dateLastModified = dateLastModified2) %>%
dplyr::select(!dateLastModified2) %>%
# Pick up dates of different formats and format together.
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "BBD_Brazil",
datasetID = "BBD"
) %>%
# If identifiedBy is not filled where it should be, try the other column rpovided
dplyr::mutate(identifiedBy = dplyr::if_else(is.na(identifiedBy),
Spcslink.identifiedby,
identifiedBy))
#### 23.3 Out ####
# Save the dataset
readr::write_excel_csv(BBD_data, file = paste(path, outFile, sep = "/"))
# Return data
return(BBD_data)
} # END readr_BBD
#### 24.0 MPUJ ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
readr_MPUJ <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL,
sheet = sheet){
# locally bind variables to the function
collector1stName <- collectorsLastName <- determined1stName <- determinedLastName <- NULL
day <- year <- eventDate <- endDate <- . <- catalogNumber <- fieldNotes <- NULL
`Start Date (Year)` <- `Start Date (Month)` <- `Start Date (Day)` <- NULL
#### 24.1 Prep ####
# This will load the requireNamespaced packages. These packages may still need to be installed to
# R using install.packages("dplyr")... etc.
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 24.2 Read+ ####
###### a. MPUJ_data ####
# Reads in the .csv file, trims the white spaces, and formats the columns to the correct type
MPUJ_data <- openxlsx::read.xlsx(paste(path, inFile, sep = "/"),
sheet = sheet) %>%
# Return spaces in column names to keep the consistent with file before renaming
setNames(., stringr::str_replace_all(colnames(.), "\\.", " ")) %>%
# Rename columns
dplyr::rename(
catalogNumber = 'Catalog Number',
individualCount = "Count",
otherCatalogNumbers = "Alt Cat Number",
typeStatus = "Type Status",
identificationQualifier = "Qualifier",
sex = "Sex",
samplingProtocol = "Method",
habitat = "Habitat",
continent = "Continent",
country = "Country",
stateProvince = "State",
county = "County",
locality = "Locality Name",
minimumElevationInMeters = "Min Elevation",
maximumElevationInMeters = "Max Elevation",
fieldNotes = "Locality and Habitat Notes",
decimalLongitude = "Latitude1",
decimalLatitude = "Longitude1",
verbatimLatitude = "Lat1text",
verbatimLongitude = "Long1text",
scientificName = "Full Name",
kingdom = "Kingdom",
order = "Order",
family = "Family",
subfamily = "Subfamily",
genus = "Genus",
specificEpithet = "Species",
infraspecificEpithet = "Subspecies",
scientificNameAuthorship = "Species Author",
day = `Start Date (Day)`,
month = `Start Date (Month)`,
year = `Start Date (Year)`,
associatedTaxa = "Associated Taxa",
associatedOccurrences = "Associated Ocurrence",
lifeStage = "Stage",
collector1stName = 'Collectors/First Name',
collectorsLastName = 'Collectors/Last Name',
determined1stName = 'Determiner/First Name',
determinedLastName = 'Determiner/Last Name',
endDate = "End Date",
verbatimEventDate = "Verbatim Date"
) %>%
dplyr::mutate(year = year %>% as.numeric(),
month = month %>% as.numeric(),
day = day %>% as.numeric()) %>%
dplyr::mutate(recordedBy = stringr::str_c(collector1stName, collectorsLastName,
sep = " "),
identifiedBy = stringr::str_c(determined1stName, determinedLastName,
sep = " ")) %>%
dplyr::mutate(eventDate = lubridate::dmy(stringr::str_c(day, month, year, sep = "/"),
truncated = 2)) %>%
dplyr::mutate(
fieldNotes =
stringr::str_c(
dplyr::if_else(!is.na(fieldNotes),
paste0("fieldNotes: ", fieldNotes), ""),
dplyr::if_else(!is.na(eventDate),
paste0("startDate: ", eventDate), ""),
dplyr::if_else(!is.na(endDate),
paste0("endDate: ", endDate), ""),
sep = "|")
) %>%
dplyr::mutate(basisOfRecord = "Preserved specimen") %>%
# Remove any double white-spaces
apply(., 2, stringr::str_squish) %>% dplyr::as_tibble() %>%
# add the database_id column
dplyr::mutate(
database_id = paste("MPUJ_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
# Add dataset information
dplyr::mutate(dataSource = "MPUJ_Anthophila") %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "Colombia MPUJ - Diego Alexander Guevara Farias",
datasetID = "MPUJ"
)
#### 24.3 Out ####
# Save the dataset
readr::write_excel_csv(MPUJ_data, file = paste(path, outFile, sep = "/"))
# Return the data from the function to the user
return(MPUJ_data)
} # END readr_MPUJ
#### 25.0 STRI ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
#'
readr_STRI <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
fieldNotes <- Catalognumber <- . <- day <- year <- catalogNumber <- recordId <- NULL
#### 25.1 Prep ####
# This will load the requireNamespaced packages. These packages may still need to be installed to
# R using install.packages("dplyr")... etc.
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 25.2 Read+ ####
###### a. STRI_data ####
# Reads in the .csv file, trims the white spaces, and formats the columns to the correct type
STRI_data <- readr::read_csv(paste(path, inFile, sep = "/"),
trim_ws = TRUE) %>%
dplyr::rename(recordId = "recordID") %>%
# Rename columns
dplyr::mutate(
fieldNotes = stringr::str_c(
dplyr::if_else(!is.na(Catalognumber),
paste0("secondary catalog #: ", Catalognumber), ""),
sep = "|"),
species = "scientificName") %>%
# Format dates
dplyr::mutate(
# If day is not recorded, set to first day of month
day = dplyr::if_else(day == 0, 1, day),
eventDate = lubridate::dmy(stringr::str_c(day, month, year, sep = "/"),
truncated = 2)) %>%
# Remove any double white-spaces
apply(., 2, stringr::str_squish) %>% dplyr::as_tibble() %>%
# add the database_id column
dplyr::mutate(
database_id = paste("STRI_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
# Add dataset information
dplyr::mutate(dataSource = "STRI_Anthophila") %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "STRI",
datasetID = "STRI"
)
#### 25.3 Out ####
# Save the dataset
readr::write_excel_csv(STRI_data, file = paste(path, outFile, sep = "/"))
# Return the data from the function to the user
return(STRI_data)
} # END readr_STRI
#### 26.0 PALA ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
#'
readr_PALA <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL){
# locally bind variables to the function
romanNumerals <- numeralConversion <- catalogNumber <- type <- country <- municipality <- NULL
locality <- decimalLatitude <- decimalLongitude <- verbatimElevation <- verbatimEventDate <- NULL
recordedBy <- collectionCode <- otherCatalogNumbers <- associatedTaxa <- taxonRemarks <- NULL
family <- genus <- references <- specificEpithet <- scientificName <- . <- eventDate <- NULL
#### 26.1 Prep ####
# This will load the requireNamespaced packages. These packages may still need to be installed to
# R using install.packages("dplyr")... etc.
requireNamespace("dplyr")
requireNamespace("lubridate")
requireNamespace("mgsub")
#### 26.2 Read+ ####
###### a. month strings ####
# Prepare month strings to convert from roman numerals
romanNumerals <- c("i","ii","iii","iv","v","vi","vii","viii","ix","x","xi","xii",
"I","II","III","IV","V","VI","VII","VIII","IX","X","XI","XII")
numeralConversion <- c("Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct","Nov", "Dec",
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct","Nov", "Dec")
###### b. PALA_data ####
# Reads in the .csv file, trims the white spaces, and formats the columns to the correct type
PALA_data <- readr::read_csv(paste(path, inFile, sep = "/"),
trim_ws = TRUE) %>%
# Rename columns
dplyr::rename(
catalogNumber = "catalogNumber",
type = "Type",
country = "Country",
municipality = "Muninciplaity",
locality = "Site",
decimalLatitude = "Latitud",
decimalLongitude = "Longitude",
verbatimElevation = "elevation",
verbatimEventDate = "date",
recordedBy = "recordedby",
collectionCode = "Collection",
otherCatalogNumbers = "othercatalognumber",
associatedTaxa = "AssociatedTaxa",
taxonRemarks = "taxonremarks",
family = "Family",
genus = "Genus",
specificEpithet = "species",
references = "Citation"
) %>%
# Add in sciName
dplyr::mutate(scientificName = stringr::str_c(genus, specificEpithet, sep = " ")) %>%
# Format date
dplyr::mutate(eventDate = verbatimEventDate %>%
mgsub::mgsub(
pattern = paste("[-/ \\.]", romanNumerals, "[ -/\\.]", sep = ""),
replacement = numeralConversion) %>%
lubridate::dmy(truncated = 2, quiet = TRUE),
.after = verbatimEventDate) %>%
# Add day, month, year
dplyr::mutate(
day = lubridate::day(eventDate),
month = lubridate::month(eventDate),
year = lubridate::year(eventDate),
.after = eventDate
) %>%
# Remove any double white-spaces
apply(., 2, stringr::str_squish) %>% dplyr::as_tibble() %>%
# add the database_id column
dplyr::mutate(
database_id = paste("PALA_data_", 1:nrow(.), sep = ""),
.before = catalogNumber) %>%
# Add dataset information
dplyr::mutate(dataSource = "PALA_Anthophila") %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "PALA",
datasetID = "PALA"
)
#### 26.3 Out ####
# Save the dataset
readr::write_excel_csv(PALA_data, file = paste(path, outFile, sep = "/"))
# Return the data from the function to the user
return(PALA_data)
} # END readr_PALA
#### 27.0 JoLa ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
#'
readr_JoLa <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL,
sheet = c("pre-1950", "post-1950")){
# locally bind variables to the function
fieldNotes <- Catalognumber <-`Start Date (Year)` <- genus <- specificEpithet <- NULL
year <- . <- NULL
#### 27.1 Prep ####
# This will load the requireNamespaced packages. These packages may still need to be installed to
# R using install.packages("dplyr")... etc.
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 27.2 Read+ ####
###### a. JoLa_data ####
# Reads in the .csv file, trims the white spaces, and formats the columns to the correct type
# Read in both sheets and bind them together
JoLa_data <- openxlsx::read.xlsx(paste(path, inFile, sep = "/"),
sheet = sheet[1]) %>%
dplyr::bind_rows(openxlsx::read.xlsx(paste(path, inFile, sep = "/"),
sheet = sheet[2])) %>%
# Return spaces in column names to keep the consistent with file before renaming
setNames(., stringr::str_replace_all(colnames(.), "\\.", " ")) %>%
# Rename the columns
dplyr::rename(
specificEpithet = "Species",
decimalLatitude = "Latitude1",
decimalLongitude = "Longitude1",
year = `Start Date (Year)`
) %>%
# Add in higher taxonomic information
dplyr::mutate(
genus = "Lasioglossum",
family = "Halictidae",
order = "Hymenoptera",
scientificName = stringr::str_c(genus, specificEpithet, sep = " ")
) %>%
dplyr::mutate(
eventDate = lubridate::ymd(year, truncated = 2)
) %>%
# Remove any double white-spaces
apply(., 2, stringr::str_squish) %>% dplyr::as_tibble() %>%
# add the database_id column
dplyr::mutate(
database_id = paste("JoLa_data_", 1:nrow(.), sep = ""),
.before = 1) %>%
# Add dataset information
dplyr::mutate(dataSource = "JoLa_Anthophila") %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "JoLa",
datasetID = "JoLa"
)
#### 27.3 Out ####
# Save the dataset
readr::write_excel_csv(JoLa_data, file = paste(path, outFile, sep = "/"))
# Return the data from the function to the user
return(JoLa_data)
} # END readr_JoLa
#### 28.0 VicWam ####
#' @describeIn readr_BeeBDC
#'
#' Reads specific data files into Darwin Core format
#'
#'
#'
readr_VicWam <- function(path = NULL,
inFile = NULL,
outFile = NULL,
dataLicense = NULL,
sheet = "Combined"){
# locally bind variables to the function
fieldNotes <- Catalognumber <- recordNumber <- genus <- specificEpithet <- NULL
year <- . <- VicWam_data <- otherCatalogNumbers <- institutionCode <- class <- order <- NULL
infraspecificEpithet <- NULL
NEAREST <- DISTANCE <- DIST_UNIT <- DIRECTION <- locality <- locality2 <- PLACEACCURACY <-
coordinateUncertaintyInMeters <- DTFR <- day <- month <- year <- eventDate <- eventDate2 <-
DTTO <- dayTO <- monthTO <- yearTO <- eventDateTO <- eventDateTO2 <- LABELFAMILY <-
LABELGENUS <- LABELSPECIES <- associatedTaxa <- stateProvince <- country <- dataSource <-
license <- decimalLatitude <- decimalLongitude <- lengthTest <- decimalLatitude2 <-
decimalLongitude2 <- NULL
#### 28.1 Prep ####
# This will load the requireNamespaced packages. These packages may still need to be installed to
# R using install.packages("dplyr")... etc.
requireNamespace("dplyr")
requireNamespace("lubridate")
#### 28.2 Read+ ####
###### a. JoLa_data ####
# Reads in the .csv file, trims the white spaces, and formats the columns to the correct type
# Read in both sheets and bind them together
VicWam_data <- openxlsx::read.xlsx(paste(path, inFile, sep = "/"),
sheet = sheet[1]) %>%
# Start by renaming columns
dplyr::rename(
recordNumber = "Reg_DoreyExtension",
otherCatalogNumbers = "COLLNUM",
institutionCode = "INSTITUTE",
class = "CLASS",
order = "ORDER",
"superfamily" = "SUPERFAMILY",
"family" = "FAMILY",
"subfamily" = "SUBFAMILY",
"tribe" = "TRIBE",
"genus" = "GENUS",
"subgenus" = "SUBGENUS",
"specificEpithet" = "SPECIES",
"infraspecificEpithet" = "SUBSPECIES",
"identifiedBy" = "DTMNDBY",
"dateIdentified" = "DTMNDDT",
"lifeStage" = "LIFEHISTORY",
"sex" = "SEX",
"identificationQualifier" = "NAMEQUALIFIER",
"typeStatus" = "SPCMTYPE",
"individualCount" = "SPECNUM",
"stateProvince" = "STATE",
"locality" = "SITE",
"decimalLatitude" = "LATDEC",
"decimalLongitude" = "LONGDEC",
"recordedBy" = "COLLTOR",
"basisOfRecord" = "STORAGE",
"verbatimLatitude" = "LATITUDE",
"verbatimLongitude" = "LONGITUDE",
"occurrenceRemarks" = "NOTES"
) %>%
# Create the scientificName
dplyr::mutate(scientificName = stringr::str_c(
dplyr::if_else(!is.na(genus),
paste0(genus), ""),
dplyr::if_else(!is.na(specificEpithet),
paste0(specificEpithet), ""),
dplyr::if_else(!is.na(infraspecificEpithet),
paste0(infraspecificEpithet), ""),
sep = " "
) %>% stringr::str_squish()) %>%
# modify locality
dplyr::mutate(locality2 = stringr::str_c(dplyr::if_else(complete.cases(NEAREST),
NEAREST, ""),
dplyr::if_else(complete.cases(DISTANCE),
as.character(DISTANCE), ""),
dplyr::if_else(complete.cases(DIST_UNIT),
DIST_UNIT, ""),
dplyr::if_else(complete.cases(DIRECTION),
paste0(DIRECTION, "\u00b0"),
""),
sep = " ") %>% as.character() %>%
stringr::str_squish(),
.after = locality) %>%
dplyr::mutate(locality = dplyr::if_else(is.na(locality),
locality2, locality)) %>%
dplyr::select(!locality2) %>%
# Extract coordinateUncertaintyInMeters
dplyr::mutate(
coordinateUncertaintyInMeters = PLACEACCURACY %>% stringr::str_extract(
"[0-9]+\\s[m(km)(mi)]+") %>% stringr::str_replace_all(c("km" = "000",
"m" = "")) %>%
stringr::str_replace(" ","") %>% as.numeric(),
.after = PLACEACCURACY
) %>%
# Create eventDate
dplyr::mutate(
day = DTFR %>% stringr::str_extract("^[0-9]+/") %>% stringr::str_remove_all("/") %>%
dplyr::if_else(. == "00", NA_character_, .),
month = DTFR %>% stringr::str_extract("/[0-9]+/") %>% stringr::str_remove_all("/") %>%
dplyr::if_else(. == "00", NA_character_, .),
year = DTFR %>% stringr::str_extract("/[0-9]+$") %>% stringr::str_remove_all("/") %>%
dplyr::if_else(. == "0000", NA_character_, .),
eventDate = lubridate::dmy(stringr::str_c(day, month, year, sep = "/"), truncated = 2),
eventDate2 = dplyr::if_else(!stringr::str_detect(DTFR, "[a-zA-Z]"), # convert from silly excel numeric format to real dates...
as.Date(as.numeric(DTFR), origin = "1899-12-30") %>% as.character(),
DTFR) %>% lubridate::ymd(truncated = 2),
# if eventDate is empty, use eventDate2
eventDate = dplyr::if_else(is.na(eventDate),
eventDate2, eventDate),
.after = DTFR
) %>% dplyr::select(!eventDate2) %>%
# Create the date to
dplyr::mutate(
dayTO = DTTO %>% stringr::str_extract("^[0-9]+/") %>% stringr::str_remove_all("/") %>%
dplyr::if_else(. == "00", NA_character_, .),
monthTO = DTTO %>% stringr::str_extract("/[0-9]+/") %>% stringr::str_remove_all("/") %>%
dplyr::if_else(. == "00", NA_character_, .),
yearTO = DTTO %>% stringr::str_extract("/[0-9]+$") %>% stringr::str_remove_all("/") %>%
dplyr::if_else(. == "0000", NA_character_, .),
eventDateTO = lubridate::dmy(stringr::str_c(dayTO, monthTO, yearTO, sep = "/"), truncated = 2),
eventDateTO2 = dplyr::if_else(!stringr::str_detect(DTTO, "[a-zA-Z]"), # convert from silly excel numeric format to real dates...
as.Date(as.numeric(DTTO), origin = "1899-12-30") %>% as.character(),
DTTO %>% as.character()) %>% lubridate::ymd(truncated = 2),
# if eventDateTO is empty, use eventDateTO2
eventDateTO = dplyr::if_else(is.na(eventDateTO),
eventDateTO2, eventDateTO) %>% as.character(),
eventDateTO = dplyr::if_else(is.na(eventDateTO),
stringr::str_c(
dplyr::if_else(complete.cases(yearTO),
yearTO,""),
dplyr::if_else(complete.cases(monthTO),
monthTO,""),
dplyr::if_else(complete.cases(dayTO),
dayTO,""),
sep = " ") %>% stringr::str_squish() %>%
stringr::str_replace(" ", "-"),
eventDateTO),
.after = DTTO
) %>%
dplyr::select(!tidyselect::any_of(c("eventDateTO2", "dayTO", "monthTO", "yearTO"))) %>%
# Add field notes
dplyr::mutate(
fieldNotes = stringr::str_c(
dplyr::if_else(!is.na(eventDate),
paste0("startDate: ", eventDate), ""),
dplyr::if_else(!is.na(eventDateTO),
paste0("endDate: ", eventDateTO), ""),
dplyr::if_else(!is.na(LABELFAMILY),
paste0("associatedFamily: ", LABELFAMILY), ""),
dplyr::if_else(!is.na(LABELGENUS),
paste0("associatedGenus: ", LABELGENUS), ""),
dplyr::if_else(!is.na(LABELSPECIES),
paste0("associatedSpecies: ", LABELSPECIES), ""),
sep = "|")) %>%
# Add associatedTaxa
dplyr::mutate(associatedTaxa = stringr::str_c(
dplyr::if_else(!is.na(LABELGENUS),
paste0(LABELGENUS), ""),
dplyr::if_else(!is.na(LABELSPECIES),
paste0(LABELSPECIES), ""),
sep = " ") %>% stringr::str_squish()) %>%
# Worst case, use family
dplyr::mutate(associatedTaxa = stringr::str_c(
dplyr::if_else(is.na(associatedTaxa),
paste0(LABELFAMILY), associatedTaxa)
)) %>%
# Format country name
dplyr::mutate(country = stringr::str_to_sentence(country)) %>%
dplyr::mutate(stateProvince = stringr::str_replace_all(stateProvince,
c("^ACT$" = "Australian Capital Territory",
"New South wales" = "New South Wales",
"^NSW" = "New South Wales",
"^NT$" = "Northern Territory",
"^Qld$" = "Queensland",
"^SA$" = "South Australia",
"^Tas$" = "Tasmania",
"^Vic$" = "Victoria",
"^WA$" = "Western Australia",
"Western australia" = "Western Australia"))) %>%
# Remove any double white-spaces
apply(., 2, stringr::str_squish) %>% dplyr::as_tibble() %>%
# add the database_id column
dplyr::mutate(
database_id = paste("VicWam_data_", 1:nrow(.), sep = ""),
.before = 1) %>%
# Add dataset information
dplyr::mutate(dataSource = "VicWam_Anthophila") %>%
dplyr::mutate(license = dataLicense) %>%
# add the database_id column
dplyr::mutate(
datasetName = "VicWam",
datasetID = "VicWam"
) %>%
# Format some lats and lons to decimal degrees
# first, squish out extra spaces
dplyr::mutate(
decimalLatitude = decimalLatitude %>% stringr::str_squish(),
decimalLongitude = decimalLongitude %>% stringr::str_squish(),
) %>%
# make a column to test for the lat/lon length
dplyr::mutate(lengthTest = dplyr::if_else(decimalLatitude %>% stringr::str_detect("S"),
dplyr::if_else(stringr::str_count(decimalLatitude,
"\\s") > 2,
"Long", "Short"),
"NA"),
.before = decimalLatitude) %>%
# Convert to DD
# Long coordinates
dplyr::mutate(decimalLatitude2 = dplyr::if_else(decimalLatitude %>% stringr::str_detect("S") &
lengthTest == "Long",
stringr::str_c(stringr::str_extract(decimalLatitude, "^[0-9]{2}") %>%
as.numeric() +
(stringr::str_extract(decimalLatitude,
"\\s[0-9]{2}\\s") %>%
as.numeric() / 60) +
(stringr::str_extract(decimalLatitude,
"[0-9]{2}\\sS") %>%
stringr::str_remove("S") %>%
as.numeric() / 3600)) %>%
as.character(),
decimalLatitude),
decimalLongitude2 = dplyr::if_else(decimalLongitude %>% stringr::str_detect("E") &
lengthTest == "Long",
stringr::str_c(stringr::str_extract(decimalLongitude, "^[0-9]+") %>%
as.numeric() +
(stringr::str_extract(decimalLongitude,
"\\s[0-9]{2}\\s") %>%
as.numeric() / 60) +
(stringr::str_extract(decimalLongitude,
"\\s[0-9]+\\sE") %>%
stringr::str_remove("E") %>%
as.numeric() / 3600)) %>%
as.character(),
decimalLongitude),
.after = decimalLatitude) %>%
# Short coordinates
dplyr::mutate(decimalLatitude2 = dplyr::if_else(decimalLatitude %>% stringr::str_detect("S") &
lengthTest == "Short",
stringr::str_c(stringr::str_extract(decimalLatitude, "^[0-9]{2}") %>%
as.numeric() +
(stringr::str_extract(decimalLatitude,
"\\s[0-9]{2}\\sS") %>%
stringr::str_remove("S") %>%
as.numeric() / 60) ) %>%
as.character(),
decimalLatitude2) %>% as.numeric(),
decimalLongitude2 = dplyr::if_else(decimalLongitude %>% stringr::str_detect("E") &
lengthTest == "Short",
stringr::str_c(stringr::str_extract(decimalLongitude, "^[0-9]+") %>%
as.numeric() +
(stringr::str_extract(decimalLongitude,
"\\s[0-9]{2}+\\sE") %>%
stringr::str_remove("E") %>%
as.numeric() / 60) ) %>%
as.character(),
decimalLongitude2) %>% as.numeric(),
.after = decimalLatitude) %>%
# Change lat/lon to correct accuracy
dplyr::mutate(
decimalLatitude2 = dplyr::if_else(lengthTest == "Long",
round(decimalLatitude2, digits = 2),
dplyr::if_else(lengthTest == "Short",
round(decimalLatitude2, digits = 1),
decimalLatitude2)),
decimalLongitude2 = dplyr::if_else(lengthTest == "Long",
round(decimalLongitude2, digits = 2),
dplyr::if_else(lengthTest == "Short",
round(decimalLongitude2, digits = 1),
decimalLongitude2))
) %>%
# Add coordinateUncertaintyInMeters for these as well based on the length of coordinates
dplyr::mutate(
coordinateUncertaintyInMeters = coordinateUncertaintyInMeters %>% as.numeric(),
coordinateUncertaintyInMeters = dplyr::if_else(
is.na(coordinateUncertaintyInMeters),
dplyr::if_else(lengthTest == "Long", 1000, 10000),
coordinateUncertaintyInMeters
)) %>%
# Replace the lat/lon columns with the working columns
dplyr::select(!c("decimalLongitude", "decimalLatitude")) %>%
dplyr::rename(decimalLongitude = "decimalLongitude2",
decimalLatitude = "decimalLatitude2") %>%
# All of Australia is in the southern hemisphere, correct these latitudes to negative
dplyr::mutate(decimalLatitude = dplyr::if_else(
stateProvince %in% c("Australian Capital Territory", "New South Wales",
"Northern Territory", "Queensland", "South Australia",
"Tasmania", "Victoria", "Western Australia", ""),
abs(decimalLatitude) * - 1,
decimalLatitude)) %>%
# Do the same for longitude but hte entire dataset is in the eastern hemisphere
dplyr::mutate(decimalLongitude = abs(decimalLongitude))
#### 28.3 Out ####
# Save the dataset
readr::write_excel_csv(VicWam_data, file = paste(path, outFile, sep = "/"))
# Return the data from the function to the user
return(VicWam_data)
} # END readr_VicWam
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/additionalData_readRs.R
|
# Additional inputs were made by mjwestgate to allow continued support with galah V2
##### 1. atlasDownloader ####
#' Download occurrence data from the Atlas of Living Australia (ALA)
#'
#'
#' Downloads ALA data and creates a new file in the path to put those data. This function can also
#' request downloads from other atlases (see: http://galah.ala.org.au/articles/choosing_an_atlas.html).
#' However, it will only send the download to your email and you must do the rest yourself at this point.
#'
#' @param path A character directory. The path to a folder where the download will be stored.
#' @param userEmail A character string. The email used associated with the user’s ALA account;
#' user must make an ALA account to download data.
#' @param ALA_taxon A character string. The taxon to download from ALA. Uses [galah::galah_identify()]
#' @param DL_reason Numeric. The reason for data download according to [galah::galah_config()]
#' @param atlas Character. The atlas to download occurrence data from - see here https://galah.ala.org.au/R/articles/choosing_an_atlas.html for details.
#' Note: the default is "ALA" and is probably the only atlas which will work seamlessly with the rest
#' of the workflow. However, different atlases can still be downloaded and a doi will be sent to
#' your email.
#'
#' @return Completes an ALA data download and saves those data to the path provided.
#'
#' @importFrom dplyr %>%
#' @importFrom utils unzip
#' @export
#'
#' @examples
#' \dontrun{
#' atlasDownloader(path = DataPath,
#' userEmail = "InsertYourEmail",
#' ALA_taxon = "Apiformes",
#' DL_reason = 4)
#' }
atlasDownloader <- function(path, userEmail = NULL, ALA_taxon, DL_reason = 4, atlas = "ALA"){
# locally bind variables to the function
. <- file_name <- NULL
#### Intro checks ####
writeLines(paste("1.","\n",
" - Note: galah has a 50 million record download limit.", "\n",
"You may call atlas_counts() to check.", "\n",
" - Additionally, you must register your email with your ", atlas, " otherwise you will get an ",
"error message.", "\n",
"See here - https://www.ala.org.au - or your relevant atlas","\n",
" - Valid donwload reasons include can be found by running show_all_reasons()",
sep = ""))
# Check for a userEmail input present and halt if FALSE
if(exists("userEmail") == FALSE){
stop("You must provide a userEmail for the ",atlas," download.")
}
# Check for a userEmail format and halt if FALSE
if(grepl( ".[^@]+@{1}.+\\..+", userEmail) == FALSE){
stop("The email you entered might be incorrect, please double-check the format.")
}
requireNamespace("galah")
# Define ColsToKeep
ColsToKeep <- BeeBDC::ColTypeR()[[1]] %>% names()
# Create a new working directory for ALA data in the path provided
dir.create(paste0(path, "/", atlas, "_galah_path", sep = ""), showWarnings = FALSE)
atlas_galah_path <- paste0(path, "/", atlas, "_galah_path")
# Set up the ALA download configuration
writeLines(" - Setting galah configuration.")
galah::galah_config(directory = atlas_galah_path,
download_reason_id = DL_reason,
verbose=TRUE,
email = userEmail,
send_email = TRUE,
atlas = atlas)
#### ALA download ####
# Choose ALA columns to download
# Thankfully, ALA has a fantastic r package, galah, that allows easy download of occurrence data.
# Thank you, ALA <3
# DOWNLOAD ALA data here
# Apiformes is an informal name that is helpful to select the bee families out of the superfamily Apoidea.
writeLines(paste("2.","\n",
" - Beginning atlas download via galah.", "\n",
"A progress bar of your download should appear shortly. You will also receive an email ",
"when your download is complete.", sep = ""))
# Use of `Sys.Date()` comes with the risk that consecutive downloads on the same day will
# overwrite each other, even if they are for different queries
# Note: `file_name` given above is chosen for consistency with previous version of BeeBDC
file_name <- paste0("galah_download_", Sys.Date(), ".zip")
ALA_Occurence_download <- galah::galah_call() %>%
galah::galah_identify(ALA_taxon) %>%
galah::galah_select(tidyselect::any_of(ColsToKeep)) %>%
galah::atlas_occurrences(mint_doi = FALSE, file = file_name)
# get download attributes from file and make it into a dataframe
attrs_ALA_Occurence_download <- attributes(ALA_Occurence_download)
writeLines(paste("3.","\n"," - atlas download is complete.", "\n",
"The script will now unzip all of the data and metadata to ",
atlas_galah_path, ". This may take a short while.",
sep = ""))
# unzip the file
unzip(
# File to unzip
zipfile = paste(atlas_galah_path,
"/galah_download_", Sys.Date(), ".zip",
sep = ""),
# Where to put the extracted file
exdir = paste(atlas_galah_path,
"/galah_download_folder",
sep = ""),
overwrite = TRUE)
browser()
#### Save data ####
# Save some download information
dplyr::tibble(
downloaders_email = userEmail,
taxon = ALA_taxon,
doi = attr(ALA_Occurence_download, "doi"),
search_url = attr(ALA_Occurence_download, "search_url"),
# data_type = attrs_ALA_Occurence_download$data_type, # not supported post galah v.2
# data_request = paste(dplyr::lst(attrs_ALA_Occurence_download$data_request)), # not supported post galah v.2
ALA_download_reason = DL_reason,
download_date = Sys.Date()) %>%
write_excel_csv(file = paste(atlas_galah_path,
"/galah_download_folder/",
"galah_DL_info.csv",
sep = ""))
# Write user instructions
writeLines(paste("4.","\n"," - Fin.",
sep = ""))
}
##### Current end ALA ####
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/atlasDownloader.R
|
##### 2.2 attr_builder ####
#' @importFrom lubridate as_date
#' @importFrom stringr str_to_sentence
#' @importFrom dplyr %>%
attr_builder <- function(path_i = path_i, occ_input = data_i){
# locally bind variables to the function
lubridate <- stringr <- family <- data_i <- NULL
requireNamespace("lubridate")
requireNamespace("dplyr")
requireNamespace("xml2")
# This function behaves differently depending on the data source, but returns common outputs.
#### ALA START ####
if(grepl("/data.csv", path_i) == "TRUE"){
# Find and take the citations file
citations_i <- gsub("/data.csv", "/citation.csv", path_i) %>%
readr::read_csv(col_types = readr::cols(.default = readr::col_character()))
# Find and take the download information file
galahDL_i <- gsub("/data.csv", "/galah_DL_info.csv", path_i) %>%
readr::read_csv(n_max = 1, col_types = readr::cols(.default = readr::col_character()))
# Read in the closest thing ALA has to an abstract
galahAbstract_i <- gsub("/data.csv", "/README.html", path_i) %>%
rvest::read_html() %>%
rvest::html_text2()
# Combine all of these attributes into a tibble
Attributes_i <- dplyr::tibble(dataSource = paste("ALA_",
galahDL_i$taxon,
sep = ""),
alternateIdentifier = if("search_url" %in% colnames(galahDL_i)){
galahDL_i$search_url}else(NA_character_),
title = "ALA Occurrence Download. ",
pubDate = galahDL_i$download_date %>%
lubridate::as_date(),
dateStamp = galahDL_i$download_date,
doi = galahDL_i$doi,
downloadLink = galahDL_i$download_link,
abstract = dplyr::lst(galahAbstract_i),
citations = dplyr::lst(citations_i),
downloadCitation = paste("ALA.org.au. (",
lubridate::as_date(galahDL_i$download_date) %>%
format("%d %B %Y"),
"). ALA Occurrence Download. ",
galahDL_i$doi,
sep = ""),
rights = dplyr::lst("See occurrence records"),
taxon = galahDL_i$taxon)
# combine the input eml and the attributes tibble into a list for output from the function
EML_attributes <- list("No_eml_from_ALA", Attributes_i)
names(EML_attributes) <- c("source_eml","Source_tibble")
# output this list
return(EML_attributes)
} # ALA END
#### GBIF START ####
if(grepl("occurrence.txt", path_i) == "TRUE"){
# Find and take the metadata file
sourceEML_i <- emld::as_emld(gsub("/occurrence.txt", "/metadata.xml", path_i), from = "xml" )
# Find and take the citations file - convert into a list
citations_i <- gsub("/occurrence.txt", "/citations.txt", path_i) %>%
readr::read_lines() %>%
dplyr::lst()
# Find and take the rights file - convert into a list
rights_i <- gsub("/occurrence.txt", "/rights.txt", path_i) %>%
readr::read_lines() %>%
dplyr::lst()
# Find the download name
fam_name <- tidyr::drop_na(
occ_input, tidyselect::any_of("family")) %>%
dplyr::pull(family) %>%
unique()
# Combine all of these attributes int o tibble
Attributes_i <- dplyr::tibble(dataSource = paste("GBIF_",
fam_name,
sep = ""),
alternateIdentifier = sourceEML_i$dataset$alternateIdentifier,
title = sourceEML_i$dataset$title,
pubDate = sourceEML_i$dataset$pubDate %>%
stringr::str_match("[0-9]{4}-[0-9]{2}-[0-9]{2}") %>%
lubridate::as_date(),
dateStamp = sourceEML_i$additionalMetadata$metadata$gbif$dateStamp,
doi = paste("https://doi.org/",
sourceEML_i$additionalMetadata$metadata$gbif$citation$identifier, sep = ""),
downloadLink = sourceEML_i$additionalMetadata$metadata$gbif$physical$distribution$online$url$url,
abstract = dplyr::lst(sourceEML_i$dataset$abstract),
citations = dplyr::lst(citations_i),
downloadCitation = paste("GBIF.org. (",
lubridate::as_date(sourceEML_i$dataset$pubDate) %>%
format("%d %B %Y"),
"). GBIF Occurrence Download. ",
paste("https://doi.org/",
sourceEML_i$additionalMetadata$metadata$gbif$citation$identifier,
sep = ""),
sep = ""),
rights = dplyr::lst(rights_i),
taxon = fam_name)
# combine the input eml and the attributes tibble into a list for output from the function
EML_attributes <- list(sourceEML_i, Attributes_i)
names(EML_attributes) <- c("source_eml","Source_tibble")
# output this list
return(EML_attributes)
} # GBIF END
#### iDigBio START ####
if(grepl("occurrence_raw.csv", path_i) == "TRUE"){
# Find and take the citations file - convert into a list
citations_i <- gsub("/occurrence_raw.csv", "/records.citation.txt", path_i) %>%
readr::read_lines() %>%
dplyr::lst()
# Combine all of these attributes int o tibble
Attributes_i <- dplyr::tibble(dataSource = paste("iDigBio_", citations_i$.[2] %>%
stringr::str_match_all("[A-Za-z]+") %>%
unlist() %>%
dplyr::last(), sep = ""),
# Check for alt identifier and include if there
alternateIdentifier =
if(length(path_i %>%
stringr::str_match_all("[a-zA-Z0-9]+-[a-zA-Z0-9]+-[a-zA-Z0-9]+-[a-zA-Z0-9]+-[a-zA-Z0-9]+") %>%
unlist()) > 0){
# TRUE
path_i %>%
stringr::str_match_all("[a-zA-Z0-9]+-[a-zA-Z0-9]+-[a-zA-Z0-9]+-[a-zA-Z0-9]+-[a-zA-Z0-9]+") %>%
unlist()
}else{
# FALSE
NA_character_},
title = citations_i$.[1],
pubDate = citations_i$.[3] %>%
stringr::str_match("[0-9]{4}-[0-9]{2}-[0-9]{2}") %>%
lubridate::as_date(),
dateStamp = citations_i$.[3] %>%
stringr::str_match("[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}") %>%
as.character(),
doi = "Use downloadLink for iDigBio",
downloadLink = paste("http://s.idigbio.org/idigbio-downloads/",
path_i %>%
stringr::str_match_all("[a-zA-Z0-9]+-[a-zA-Z0-9]+-[a-zA-Z0-9]+-[a-zA-Z0-9]+-[a-zA-Z0-9]+"),
".zip", sep = ""),
abstract = dplyr::lst(citations_i$.[1:4]),
citations = dplyr::lst(citations_i$.[4:length(citations_i$.)]),
downloadCitation = paste("iDigBio.org. (",
citations_i$.[3] %>%
stringr::str_match("[0-9]{4}-[0-9]{2}-[0-9]{2}") %>%
lubridate::as_date() %>%
format("%d %B %Y"),
"). iDigBio Occurrence Download. ",
paste("http://s.idigbio.org/idigbio-downloads/",
path_i %>%
stringr::str_match_all("[a-zA-Z0-9]+-[a-zA-Z0-9]+-[a-zA-Z0-9]+-[a-zA-Z0-9]+-[a-zA-Z0-9]+"),
".zip", sep = ""),
sep = ""),
rights = dplyr::lst("See occurrence records"),
taxon = (citations_i$.[2] %>%
stringr::str_match_all("[A-Za-z]+") %>%
unlist() %>%
dplyr::last())
) # END tibble()
# combine the input eml and the attributes tibble into a list for output from the function
EML_attributes <- list("No_eml_from_iDigBio", Attributes_i)
names(EML_attributes) <- c("source_eml","Source_tibble")
# output this list
return(EML_attributes)
} # iDigBio END
#### SCAN START ####
if(grepl("occurrences.csv", path_i) == "TRUE"){
# Find and take the metadata file
sourceEML_i <- xml2::read_xml(gsub("/occurrences.csv", "/eml.xml", path_i), from = "xml" ) %>%
emld::as_emld()
# Combine all of these attributes int o tibble
Attributes_i <- dplyr::tibble(dataSource = paste("SCAN_",
unique(stringr::str_to_sentence(occ_input$family)),
sep = ""),
alternateIdentifier = sourceEML_i$additionalMetadata$metadata$symbiota$citation$identifier,
title = sourceEML_i$dataset$title$title,
pubDate = sourceEML_i$additionalMetadata$metadata$symbiota$dateStamp %>%
stringr::str_match("[0-9]{4}-[0-9]{2}-[0-9]{2}") %>%
lubridate::as_date(),
dateStamp = sourceEML_i$additionalMetadata$metadata$symbiota$dateStamp,
doi = "SCAN does not provide a doi. See download link.",
downloadLink = "SCAN does not provide a download link.",
abstract = dplyr::lst("SCAN does not provide a single abstract"),
citations = dplyr::lst(paste("SCAN. ",
(sourceEML_i$additionalMetadata$metadata$symbiota$dateStamp %>%
stringr::str_match("[0-9]{4}")),
". http//:scan-bugs.org/portal/index.php. ",
"accessed on ",
(sourceEML_i$additionalMetadata$metadata$symbiota$dateStamp %>%
stringr::str_match("[0-9]{4}-[0-9]{2}-[0-9]{2}")),
". ", sourceEML_i$additionalMetadata$metadata$symbiota$citation$citation,
sep = "" ) ),
downloadCitation = paste("SCAN. (",
sourceEML_i$additionalMetadata$metadata$symbiota$dateStamp %>%
stringr::str_match("[0-9]{4}-[0-9]{2}-[0-9]{2}") %>%
lubridate::as_date() %>%
format("%d %B %Y"),
"). SCAN-Bugs Occurrence Download ",
"http//:scan-bugs.org/portal/index.php. uuid - ",
sourceEML_i$additionalMetadata$metadata$symbiota$citation$identifier,
sep = ""),
rights = dplyr::lst("See .xml for rights"),
taxon = unique(stringr::str_to_sentence(occ_input$family)))
# combine the input eml and the attributes tibble into a list for output from the function
EML_attributes <- list(sourceEML_i, Attributes_i)
names(EML_attributes) <- c("source_eml","Source_tibble")
# output this list
return(EML_attributes)
} # SCAN END
} # END attr_builder
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/attr_builder.R
|
#### Aux. functions ####
##### a. dataSaver ####
#' Simple function to save occurrence AND EML data as a list
#'
#' Used at the end of 1.x in the example workflow in order to save the occurrence dataset and its associated eml metadata.
#'
#' @param path Character. The main file path to look for data in.
#' @param save_type Character. The file format in which to save occurrence and EML data.
#' Either "R_file" or "CSV_file"
#' @param occurrences The occurrences to save as a data frame or tibble.
#' @param eml_files A list of the EML files.
#' @param file_prefix Character. A prefix for the resulting output file.
#'
#' @return This function saves both occurrence and EML data as a list when save_type = "R_File" or
#' as individual csv files when save_type = "CSV_file".
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#' \dontrun{
#' dataSaver(path = tempdir(),# The main path to look for data in
#' save_type = "CSV_file", # "R_file" OR "CSV_file"
#' occurrences = Complete_data$Data_WebDL, # The existing datasheet
#' eml_files = Complete_data$eml_files, # The existing EML files
#' file_prefix = "Fin_") # The prefix for the file name
#' }
#'
dataSaver <- function(path = NULL,
save_type = NULL,
occurrences = NULL,
eml_files = NULL,
file_prefix = NULL){
# locally bind variables to the function
. <- countComplete <- NULL
# Check if the save type is present AND valid
if(exists("save_type") == FALSE){ # If there are no data matching the name...
stop(" - Heck! You must choose a save_type for the data output. Either .rds or .csv files")
} # END Missing save_type type
if(sum(stringr::str_count(pattern = c("R_file","CSV_file"), string = save_type)) == 0 ){
stop(" - Heck! Please choose a valid save_type for the data output. Either .rds (R_file) or .csv files (csv_files)")
} # END invalid save_type
# If there is no file prefix... name it "BeeData_"
if(is.null(file_prefix) == TRUE){
file_prefix <- "BeeData_"
}else{
file_prefix <- paste(file_prefix,"BeeData_", sep = "")
}
# Notify user if an out_file has been created and make one, if required.
outPath <- outFile_maker(path = path)
# Extract the attribute data
occurrences_attributes <- attributes(occurrences)
# Find the empty columns
colTest <- occurrences %>%
summarise(dplyr::across(tidyselect::everything(), ~ sum(complete.cases(.))))
colKeeps <- dplyr::tibble(column = colnames(colTest),
countComplete = t(colTest)[,1]) %>%
dplyr::filter(countComplete > 0)
# Cols to remove:
colRemoves <- dplyr::tibble(column = colnames(colTest),
countComplete = t(colTest)[,1]) %>%
dplyr::filter(countComplete == 0)
# Discard empty columns
occurrences <- occurrences %>%
dplyr::select(tidyselect::all_of(colKeeps$column))
message(paste0(
" - We have removed empty columns. This is standard, but as an FYI, these columns are: ",
paste(colRemoves$column, collapse = ", ")
))
#### R save ####
# Save R data
if(save_type == "R_file"){
writeLines( paste(" - Writing occurrence, attribute, and EML data file in .rds format...", "\n",
"Number of records: ", format(nrow(occurrences), big.mark=",",scientific=FALSE), "\n",
"Number of attribute sources: ", format(nrow(occurrences_attributes$dataSource),
big.mark=",",scientific=FALSE), "\n",
"The ", length(names(eml_files)), " eml sources are ",
paste(names(eml_files), collapse = ", "), "\n",
"Writing to file called ", paste(file_prefix, Sys.Date(), ".rds", sep = ""),
" at location ", outPath,"...",
sep = ""))
# Save all of these data into a .rds format
list(occurrences, eml_files) %>%
saveRDS(., file = paste(outPath, "/", file_prefix, Sys.Date(), ".rds", sep = ""))
}
#### csv save ####
# Save csv files
if(save_type == "CSV_file"){
##### Occ. file ####
writeLines( paste(" - Writing occurrence data file in csv format...", "\n",
"Number of rows (records): ", format(nrow(occurrences), big.mark=",",scientific=FALSE), "\n",
"Writing to file called ", paste(file_prefix, "combined_", Sys.Date(), ".csv", sep = ""),
" at location ", outPath,"...",
sep = ""))
# Write the occurence file
readr::write_excel_csv(occurrences, paste(outPath, "/", file_prefix, "combined_", Sys.Date(), ".csv", sep = ""))
#### Attr. file ####
# Notfiy user that attribute data are being written
occurrences_attributes <- attributes(occurrences)
writeLines( paste(" - Writing attribute data file in csv format...", "\n",
"Number of rows (sources): ", format(nrow(occurrences_attributes$dataSource),
big.mark=",",scientific=FALSE), "\n",
"Written to file called ", paste(file_prefix, "attributes_", Sys.Date(), ".csv",
sep = ""),
" at location ", outPath,
sep = ""))
#### DataSource file #####
# Write the citations file
# Write the occurrence file
readr::write_excel_csv(occurrences_attributes$dataSource, paste(outPath, "/" ,file_prefix, "attributes_",
Sys.Date(), ".csv", sep = ""))
#### All attributes ####
# Update the names of each list element to reflect their source and taxonomic coverage
names(occurrences_attributes$dataSource$citations) <- paste(occurrences_attributes$dataSource$dataSource, "_citations", sep = "")
names(occurrences_attributes$dataSource$rights) <- paste(occurrences_attributes$dataSource$dataSource, "_rights", sep = "")
names(occurrences_attributes$dataSource$abstract) <- paste(occurrences_attributes$dataSource$dataSource, "_abstract", sep = "")
#### EML file ####
if(!is.null(eml_files)){
# Notify user that the .eml file is being written
writeLines( paste(" - Writing eml file in xml format...", "\n",
"The ", length(names(eml_files)), " eml sources are ",
paste(names(eml_files), collapse = ", "), "\n",
"Written to file called ", paste("eml_files", Sys.Date(),".xml", sep="" ),
" at location ", outPath,
sep = ""))
# Write the compounded .eml file as a .rds file
saveRDS(eml_files, file = paste(outPath, "/eml_files", Sys.Date(),".rds", sep="" ))
# Write the .rds file with all attribute information - this file can then be read into R again later
occurrences_attributes %>%
saveRDS(., file = paste(outPath, "/", file_prefix, "completeAttributes_", Sys.Date(), ".rds", sep = ""))
}}
# Print completion note
writeLines(paste(" - dataSaver. Fin.", sep = "\n"))
} # END dataSaver
#### +++++ ####
##### b. Set strings ####
#Set up bee family list
Bee_Families <- c("Andrenidae","Apidae", "Colletidae","Halictidae","Megachilidae","Melittidae",
"Stenotritidae","andrenidae","apidae", "colletidae","halictidae","megachilidae",
"melittidae","stenotritidae")
##### c. outFile_maker ####
outFile_maker <- function(path = path, file2make = "out_file"){
# Write user output...
writeLines(" - Checking for existing out_file directory...")
# Look for outfile
outFileLoc <- file.info(list.files(path, full.names = T,
pattern = file2make,
recursive = TRUE,
include.dirs = TRUE)
)
# IF there is not outfile, create one.
if(nrow(outFileLoc) == 0){
writeLines(paste(" - No existing,", file2make, " directory found. Creating directory...", sep = ""))
dir.create(path = paste(path, file2make, sep = "/"))
} # END create outfile
# IF there IS an outfile, create one.
if(nrow(outFileLoc) != 0){
writeLines(paste(" - Existing ", file2make, "directory found. Data will be saved here.", sep = ""))
} # END create outfile
return(paste(path, file2make, sep = "/"))
} # END outFile_maker
#### ++++++ ####
#### d. fileFinder ####
#' Finds files within a directory
#'
#' A function which can be used to find files within a user-defined directory based on a
#' user-provided character string.
#'
#' @param path A directory as character. The directory to recursively search.
#' @param fileName A character/regex string. The file name to find.
#'
#' @return Returns a directory to the most-recent file that matches the provied file Using regex
#' can greatly improve specificity. Using regex can greatly improve specificity.
#' The function will also write into the console the file that it has found - it is worthwhile to
#' check that this is the correct file to avoid complications down the line
#'
#' @importFrom stats complete.cases
#' @importFrom dplyr desc %>%
#'
#' @export
#'
#' @examples
#' \donttest{
#' # load dplyr
#' library(dplyr)
#'
#' # Make the RootPath to the tempdir for this example
#' RootPath <- tempdir()
#'
#' # Load the example data
#' data("beesRaw", package = "BeeBDC")
#'
#' # Save and example dataset to the temp dir
#' readr::write_csv(beesRaw, file = paste0(RootPath, "/beesRaw.csv"))
#'
#' # Now go find it!
#' fileFinder(path = RootPath, fileName = "beesRaw")
#' # more specifically the .csv version
#' fileFinder(path = RootPath, fileName = "beesRaw.csv")
#' }
fileFinder <- function(path, fileName){
# locally bind variables to the function
. <- dates <- NULL
# Find all of the previously-produced data files
locations <- file.info(list.files(path, full.names = T, pattern = fileName,
recursive = TRUE))
# Check if the data are present
if(nrow(locations) == 0){ # If there are no data matching the name...
stop(paste0(" - Bugger it, R can't find the file that you're looking for :(\n",
"Please check that it exists in the provided directory."))
}
##### Date from name ####
# Extract only the file name to find the date from...
FileName_dates <- stringr::str_replace(rownames(locations), ".+/", "") %>%
# Remove additional text
stringr::str_replace(., "[a-zA-Z\\/\\_]+[a-zA-Z\\/\\_]+", "") %>%
# Find those files with dates in their name in d-m-y format
dplyr::tibble(
# Extract the dates from the file path rownames and supress the warning from non-matches
suppressWarnings(lubridate::dmy(.), classes = "warning")) %>%
# Set the column names of this new tibble
stats::setNames(c("Locs","dates"))
# IF there are no dates, look for ymd format
if(sum(complete.cases(FileName_dates$dates)) == 0){
FileName_dates <- stringr::str_replace(rownames(locations), ".+/", "") %>%
# Remove additional text
stringr::str_replace(., "[a-zA-Z\\/\\_]+[a-zA-Z\\/\\_]+", "") %>%
# Find those files with dates in their name in d-m-y format
dplyr::tibble(
# Extract the dates from the file path rownames and supress the warning from non-matches
suppressWarnings(lubridate::ymd(.), classes = "warning")) %>%
# Set the column names of this new tibble
stats::setNames(c("Locs","dates"))
}
# Insert the full locations in the Locs column
FileName_dates$Locs <- rownames(locations)
# Sort from most- to least-recent files
FileName_dates <- dplyr::arrange(FileName_dates, desc(dates))
##### Date from ctime ####
# IF there are NO dates in the file names, use the file's ctime
if(sum(complete.cases(FileName_dates$dates)) == 0){
writeLines(" - No dates in file name(s). Finding most-recent from file save time...")
# Find the most-recent file
maxTime <- max(locations$ctime)
# Extract the correct rowname (path)
most_recent <- rownames(locations)[stringr::str_which(string = locations$ctime,
pattern = as.character(maxTime))]
}else{
writeLines(" - Dates found in file name(s). Finding most-recent file from file name...")
# Return the strings containing this date
most_recent <- FileName_dates[1,1]
} # END else
# User output text
writeLines(paste(
" - Found the following file(s):", "\n",
most_recent
))
# Return this file location
return(most_recent[[1]])
} # END fileFinder
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/aux_functions.R
|
# Description of the BeeBDC country checklist dataset
# 16th of March 2023
#' Download a country-level checklist of bees from Discover Life
#'
#'
#' Download the table contains taxonomic and country information for the bees of the world based
#' on data collated on Discover Life. The data will be sourced from the BeeBDC article's
#' Figshare.
#'
#' Note that sometimes the download might not work without restarting R. In this case, you could
#' alternatively download the dataset from the URL below and then read it in using
#' `base::readRDS("filePath.Rda")`.
#'
#' See [BeeBDC::beesTaxonomy()] for further context.
#'
#' @param URL A character vector to the FigShare location of the dataset. The default will be to
#' the most-recent version.
#' @param ... Extra variables that can be passed to [utils::download.file()]
#'
#' @return A downloaded beesChecklist.Rda file in the outPath and the same tibble returned to
#' the environment.
#'
#'
#' **Column details **
#'
#' **validName** The valid scientificName as it should occur in the scientificName column.
#'
#' **DiscoverLife_name** The full country name as it occurs on Discover Life.
#'
#' **rNaturalEarth_name** Country name from rnaturalearth's name_long.
#'
#' **shortName** A short version of the country name.
#'
#' **DiscoverLife_ISO** The ISO country name as it occurs on Discover Life.
#'
#' **Alpha-2** Alpha-2 from rnaturalearth.
#'
#' **Alpha-3** Alpha-3 from rnaturalearth.
#'
#' **official** Official country name = "yes" or only a Discover Life name = "no".
#'
#' **Source** A text strign denoting the source or author of the name-country pair.
#'
#' **matchCertainty** Quality of the name's match to the Discover Life checklist.
#'
#' **canonical** The valid species name without scientificNameAuthority.
#'
#' **canonical_withFlags** The validName without the scientificNameAuthority but with Discover Life flags.
#'
#' **family** Bee family.
#'
#' **subfamily** Bee subfamily.
#'
#' **genus** Bee genus.
#'
#' **subgenus** Bee subgenus.
#'
#' **infraspecies** Bee infraSpecificEpithet.
#'
#' **species** Bee specificEpithet.
#'
#' **scientificNameAuthorship** Bee scientificNameAuthorship.
#'
#' **taxon_rank** Rank of the taxon name.
#'
#' **Notes** Discover Life country name notes.
#'
#'
#' @references This dataset was created using the Discover Life checklist and taxonomy.
#' Dataset is from the publication:
#' DOREY, J. B., CHESSHIRE, P. R., BOLAÑOS, A. N., O’REILLY, R. L., BOSSERT, S., COLLINS, S. M., LICHTENBERG, E. M., TUCKER, E., SMITH-PARDO, A., FALCON-BRINDIS, A., GUEVARA, D. A., RIBEIRO, B. R., DE PEDRO, D., FISCHER, E., HUNG, J. K.-L., PARYS, K. A., ROGAN, M. S., MINCKLEY, R. L., VELZCO, S. J. E., GRISWOLD, T., ZARRILLO, T. A., SICA, Y., ORR, M. C., GUZMAN, L. M., ASCHER, J., HUGHES, A. C. & COBB, N. S. In review. A globally synthesised and flagged bee occurrence dataset and cleaning workflow. Scientific Data.
#' The checklist data are mostly compiled from Discover Life data, www.discoverlife.org:
#' ASCHER, J. S. & PICKERING, J. 2020. Discover Life bee species guide and world checklist (Hymenoptera: Apoidea: Anthophila). http://www.discoverlife.org/mp/20q?guide=Apoidea_species.
#'
#' @export
#'
#' @examples
#'\dontrun{
#' beesChecklist <- BeeBDC::beesChecklist()
#'}
beesChecklist <- function(URL = "https://figshare.com/ndownloader/files/42320598?private_link=bce1f92848c2ced313ee",
...){
destfile <- checklist <- attempt <- nAttempts <- error_funcFile <- error_func <- NULL
# Set the number of attempts
nAttempts = 5
# Set up the error message function
error_func <- function(e){
message(paste("Download attempt failed..."))
}
error_funcFile <- function(e){
message(paste("Could not read download..."))
}
# Check operating system
OS <- dplyr::if_else(.Platform$OS.type == "unix",
"MacLinux",
"Windows")
# Run a code to download the data and deal with potential internet issues
checklist <- NULL
attempt <- 1
suppressWarnings(
while( is.null(checklist) && attempt <= nAttempts) {
# Don't attempt for the last attempt
if(attempt < nAttempts){
# WINDOWS
if(OS != "MacLinux"){
# Download the file
tryCatch(utils::download.file(URL, destfile = normalizePath(paste0(tempdir(),
"/beesChecklist.Rda"))),
error = error_func, warning = error_func)
# Load the file
tryCatch(
checklist <- base::readRDS(normalizePath(paste0(tempdir(), "/beesChecklist.Rda"))),
error = error_funcFile, warning = error_funcFile)
}else{
# MAC OR LINUX
# Download the file
tryCatch(utils::download.file(URL, destfile = paste0(tempdir(), "/beesChecklist.Rda")),
error = error_func, warning = error_func)
# Load the file
tryCatch(
checklist <- base::readRDS(paste0(tempdir(), "/beesChecklist.Rda")),
error = error_funcFile, warning = error_funcFile)
}
} # END if
if(attempt < nAttempts){
# Wait one second before the next request
if(attempt > 1){Sys.sleep(1)
print( paste("Attempt: ", attempt, " of ", nAttempts-1))} # Inform user of number of attempts
} # END IF #2
# Count the next attempt
attempt <- attempt + 1
} # END while
)
if(is.null(checklist)){
message(" - Checklist download failed. Please check your internet connection.")
}
# Return the data to the user
return(checklist)
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/beesChecklist.R
|
# Description of the BeeBDC taxonomic dataset
# 16th of March 2023
#' Download a nearly complete taxonomy of bees globally
#'
#'
#' Downloads the taxonomic information for the bees of the world.
#' Source of taxonomy is listed under "source" but are mostly derived from the Discover Life
#' website. The data will be sourced from the BeeBDC article's Figshare.
#'
#' Note that sometimes the download might not work without restarting R. In this case, you could
#' alternatively download the dataset from the URL below and then read it in using
#' `base::readRDS("filePath.Rda")`.
#'
#'
#'
#' **Column details**
#'
#' **flags** Flags or comments about the taxon name.
#'
#' **taxonomic_status** Taxonomic status. Values are "accepted" or "synonym"
#'
#' **source** Source of the name.
#'
#' **accid** The id of the accepted taxon name or "0" if taxonomic_status == accepted.
#'
#' **id** The id number for the taxon name.
#'
#' **kingdom** The biological kingdom the taxon belongs to. For bees, kingdom == Animalia.
#'
#' **phylum** The biological phylum the taxon belongs to. For bees, phylum == Arthropoda.
#'
#' **class** The biological class the taxon belongs to. For bees, class == Insecta.
#'
#' **order** The biological order the taxon belongs to. For bees, order == Hymenoptera.
#'
#'
#' **family** The family of bee which the species belongs to.
#'
#' **subfamily** The subfamily of bee which the species belongs to.
#'
#' **tribe** The tribe of bee which the species belongs to.
#'
#' **subtribe** The subtribe of bee which the species belongs to.
#'
#' **validName** The valid scientific name as it should occur in the “scientificName” column in a Darwin Core file.
#'
#' **canonical** The scientificName without the scientificNameAuthority.
#'
#' **canonical_withFlags** The scientificName without the scientificNameAuthority and with Discover Life taxonomy flags.
#'
#' **genus** The genus the bee species belongs to.
#'
#' **subgenus** The subgenus the bee species belongs to.
#'
#' **species** The specific epithet for the bee species.
#'
#' **infraspecies** The infraspecific epithet for the bee addressed.
#'
#' **authorship** The author who described the bee species.
#'
#' **taxon_rank** Rank for the bee taxon addressed in the entry.
#'
#' **notes** Additional notes about the name/taxon.
#'
#'
#' @param URL A character vector to the FigShare location of the dataset. The default will be to
#' the most-recent version.
#' @param ... Extra variables that can be passed to [utils::download.file()]
#'
#'
#'
#' @return A downloaded beesTaxonomy.Rda file in the [tempdir()] and the same tibble returned to
#' the environment.
#'
#'
#' @references This dataset was created using the Discover Life taxonomy.
#' Dataset is from the publication:
#' DOREY, J. B., CHESSHIRE, P. R., BOLAÑOS, A. N., O’REILLY, R. L., BOSSERT, S., COLLINS, S. M., LICHTENBERG, E. M., TUCKER, E., SMITH-PARDO, A., FALCON-BRINDIS, A., GUEVARA, D. A., RIBEIRO, B. R., DE PEDRO, D., FISCHER, E., HUNG, J. K.-L., PARYS, K. A., ROGAN, M. S., MINCKLEY, R. L., VELZCO, S. J. E., GRISWOLD, T., ZARRILLO, T. A., SICA, Y., ORR, M. C., GUZMAN, L. M., ASCHER, J., HUGHES, A. C. & COBB, N. S. In review. A globally synthesised and flagged bee occurrence dataset and cleaning workflow. Scientific Data.
#' The taxonomy data are mostly compiled from Discover Life data, www.discoverlife.org:
#' ASCHER, J. S. & PICKERING, J. 2020. Discover Life bee species guide and world checklist (Hymenoptera: Apoidea: Anthophila). http://www.discoverlife.org/mp/20q?guide=Apoidea_species.
#'
#' @seealso [BeeBDC::taxadbToBeeBDC()] to download any other taxonomy (of any taxa or of bees)
#' and [BeeBDC::harmoniseR()] for the
#' taxon-cleaning function where these taxonomies are implemented.
#'
#' @export
#'
#' @examples
#'\dontrun{
#' beesTaxonomy <- BeeBDC::beesTaxonomy()
#'}
#'
#'
beesTaxonomy <- function(URL = "https://open.flinders.edu.au/ndownloader/files/43331472",
...){
destfile <- taxonomy <- attempt <- nAttempts <- error_funcFile <- error_func <- NULL
# Set the number of attempts
nAttempts = 5
# Set up the error message function
error_func <- function(e){
message(paste("Download attempt failed..."))
}
error_funcFile <- function(e){
message(paste("Could not read download..."))
}
# Check operating system
OS <- dplyr::if_else(.Platform$OS.type == "unix",
"MacLinux",
"Windows")
# Run a code to download the data and deal with potential internet issues
taxonomy <- NULL
attempt <- 1
suppressWarnings(
while( is.null(taxonomy) && attempt <= nAttempts) {
# Don't attempt for the last attempt
if(attempt < nAttempts){
# WINDOWS
if(OS != "MacLinux"){
# Download the file to the outPath
tryCatch(utils::download.file(URL, destfile = normalizePath(paste0(tempdir(),
"/beesTaxonomy.Rda"))),
error = error_func, warning = error_func)
# Load the file from the outPath
tryCatch(
taxonomy <- base::readRDS(normalizePath(paste0(tempdir(), "/beesTaxonomy.Rda"))),
error = error_funcFile, warning = error_funcFile)
}else{
# Download the file to the outPath
tryCatch(utils::download.file(URL, destfile = paste0(tempdir(), "/beesTaxonomy.Rda")),
error = error_func, warning = error_func)
# Load the file from the outPath
tryCatch(
taxonomy <- base::readRDS(paste0(tempdir(), "/beesTaxonomy.Rda")),
error = error_funcFile, warning = error_funcFile)
}
} # END if
if(attempt < nAttempts){
# Wait one second before the next request
if(attempt > 1){Sys.sleep(1)
print( paste("Attempt: ", attempt, " of ", nAttempts-1))} # Inform user of number of attempts
} # END IF #2
# Count the next attempt
attempt <- attempt + 1
} # END while
)
if(is.null(taxonomy)){
message(" - Taxonomy download failed. Please check your internet connection.")
}
# Return the data to the user
return(taxonomy)
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/beesTaxonomy.R
|
# This function was written by James B Dorey on the 29th of September 2022
# Its purpose is to visualise duplicate occurrence data by using a chord diagram
# Please contact jbdorey[at]me.com for help
#' Build a chord diagram of duplicate occurrence links
#'
#' This function outputs a figure which shows the relative size and direction of occurrence points
#' duplicated between data providers, such as, SCAN, GBIF, ALA, etc. This function requires the
#' outputs generated by [BeeBDC::dupeSummary()].
#'
#'
#' @param dupeData A tibble or data frame. The duplicate file produced by [BeeBDC::dupeSummary()].
#' @param outPath Character. The path to a directory (folder) in which the output should be saved.
#' @param fileName Character. The name of the output file, ending in '.pdf'.
#' @param width Numeric. The width of the figure to save (in inches). Default = 7.
#' @param height Numeric. The height of the figure to save (in inches). Default = 6.
#' @param bg The plot's background colour. Default = "white".
#' @param smallGrpThreshold Numeric. The upper threshold of sub-dataSources to be listed as "other".
#' Default = 3.
#' @param title A character string. The figure title. Default = "Duplicated record sources".
#' @param palettes A vector of the palettes to be used. One palette for each major dataSource and "other"
#' using the `paletteer` package. Default = c("cartography::blue.pal", "cartography::green.pal",
#' "cartography::sand.pal", "cartography::orange.pal", "cartography::red.pal",
#' "cartography::purple.pal", "cartography::brown.pal")
#' @param canvas.ylim Canvas limits from [circlize::circos.par()]. Default = c(-1.0,1.0).
#' @param canvas.xlim Canvas limits from [circlize::circos.par()]. Default = c(-0.6, 0.25).
#' @param text.col A character string. Text colour
#' @param legendX The x position of the legends, as measured in current viewport.
#' Passed to ComplexHeatmap::draw(). Default = grid::unit(6, "mm").
#' @param legendY The y position of the legends, as measured in current viewport.
#' Passed to ComplexHeatmap::draw(). Default = grid::unit(18, "mm").
#' @param legendJustify A character vector declaring the justification of the legends.
#' Passed to ComplexHeatmap::draw(). Default = c("left", "bottom").
#' @param niceFacing TRUE/FALSE. The niceFacing option automatically adjusts the text facing
#' according to their positions in the circle. Passed to [circlize::highlight.sector()].
#' @param self.link 1 or 2 (numeric). Passed to [circlize::chordDiagram()]:
#' if there is a self link in one sector, 1 means the link will be degenerated as a 'mountain' and the width corresponds to the value for this connection. 2 means the width of the starting root and the ending root all have the width that corresponds to the value for the connection.
#'
#'
#' @return Saves a figure to the provided file path.
#'
#' @importFrom circlize circos.clear circos.par chordDiagram mm_h circos.trackPlotRegion get.cell.meta.data highlight.sector circos.clear
#' @importFrom stringr str_replace str_c
#' @importFrom dplyr bind_cols full_join mutate select group_by if_else arrange n filter %>%
#' @importFrom paletteer paletteer_dynamic
#' @importFrom grid unit
#'
#'
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # Create a basic example dataset of duplicates to visualise
#' basicData <- dplyr::tribble(
#' ~dataSource, ~dataSource_keep,
#' "GBIF_Halictidae", "USGS_data",
#' "GBIF_Halictidae", "USGS_data",
#' "GBIF_Halictidae", "USGS_data",
#' "GBIF_Halictidae", "USGS_data",
#' "GBIF_Halictidae", "USGS_data",
#' "GBIF_Halictidae", "USGS_data",
#' "SCAN_Halictidae", "GBIF_Halictidae",
#' "iDigBio_halictidae", "GBIF_Halictidae",
#' "iDigBio_halictidae", "SCAN_Halictidae",
#' "iDigBio_halictidae", "SCAN_Halictidae",
#' "SCAN_Halictidae", "GBIF_Halictidae",
#' "iDigBio_apidae", "SCAN_Apidae",
#' "SCAN_Apidae", "Ecd_Anthophila",
#' "iDigBio_apidae", "Ecd_Anthophila",
#' "SCAN_Apidae", "Ecd_Anthophila",
#' "iDigBio_apidae", "Ecd_Anthophila",
#' "SCAN_Megachilidae", "SCAN_Megachilidae",
#' "CAES_Anthophila", "CAES_Anthophila",
#' "CAES_Anthophila", "CAES_Anthophila"
#' )
#'
#'
#' chordDiagramR(
# # The duplicate data from the dupeSummary function output
#' dupeData = basicData,
#' outPath = tempdir(),
#' fileName = "ChordDiagram.pdf",
#' # These can be modified to help fit the final pdf that's exported.
#' width = 9,
#' height = 7.5,
#' bg = "white",
#' # How few distinct dataSources should a group have to be listed as "other"
#' smallGrpThreshold = 3,
#' title = "Duplicated record sources",
#' # The default list of colour palettes to choose from using the paleteer package
#' palettes = c("cartography::blue.pal", "cartography::green.pal",
#' "cartography::sand.pal", "cartography::orange.pal", "cartography::red.pal",
#' "cartography::purple.pal", "cartography::brown.pal"),
#' canvas.ylim = c(-1.0,1.0),
#' canvas.xlim = c(-0.6, 0.25),
#' text.col = "black",
#' legendX = grid::unit(6, "mm"),
#' legendY = grid::unit(18, "mm"),
#' legendJustify = c("left", "bottom"),
#' niceFacing = TRUE)}
chordDiagramR <- function(
# The duplicate data from the dupeSummary function output
dupeData = NULL,
outPath = NULL,
fileName = NULL,
width = 7,
height = 6,
bg = "white",
# How few distinct dataSources should a group have to be listed as "other"
smallGrpThreshold = 3,
title = "Duplicated record sources",
# The default list of colour palettes to choose from
palettes = c("cartography::blue.pal", "cartography::green.pal",
"cartography::sand.pal", "cartography::orange.pal", "cartography::red.pal",
"cartography::purple.pal", "cartography::brown.pal"),
canvas.ylim = c(-1.0,1.0),
canvas.xlim = c(-0.6, 0.25),
text.col = "black",
legendX = grid::unit(6, "mm"),
legendY = grid::unit(18, "mm"),
legendJustify = c("left", "bottom"),
niceFacing = TRUE,
self.link = 2){
# locally bind variabls to the function
Frequency <- Frequency_dupe <- sourceName <- . <- sourceCategories <- groupCount <- cur_group_id <-
groupNumber <- groupPalette <- groupColours <- par <- NULL
error_func_BCM <- CHtest <- error_func_CH <- input <- instructions <- NULL
requireNamespace("circlize")
requireNamespace("dplyr")
requireNamespace("paletteer")
requireNamespace("grid")
#### 0.0 Prep ####
##### 0.1 errors ####
###### a. FATAL errors ####
if(is.null(dupeData)){
stop(" - Please provide an argument for dupeData. I'm a program not a magician.")
}
if(is.null(outPath)){
stop(" - Please provide an argument for outPath Seems reckless to let me just guess.")
}
if(is.null(fileName)){
stop(" - Please provide an argument for fileName Seems reckless to let me just guess.")
}
if(nrow(dupeData) == 0){
stop(" - There are no duplicates in the dupeData object. Stopping process.")
}
##### 0.2 maintain par ####
# Make sure to maintain prior par on exit from the function
oldpar <- par(no.readonly = TRUE)
on.exit(oldpar)
##### 0.3 BcM + ComplexHeatmap ####
###### a. test ####
# Check if BiocManager is installed
# TRUE if BiocManager is found
suppressWarnings(
BcMtest <- system.file(package='BiocManager') %>%
stringr::str_count() > 0
)
# Check if ComplexHeatmap is installed
# TRUE if ComplexHeatmap is found
suppressWarnings(
CHtest <- system.file(package='ComplexHeatmap') %>%
stringr::str_count() > 0
)
###### b. BiocManager ####
if(CHtest == FALSE){
if(BcMtest == FALSE){
# Set up instructions for download on fail
instructions <- paste(" Please try installing the package for yourself",
"using the following command: \n",
" install.packages(\"BiocManager\")")
# Set up fail function for tryCatch
error_func_BCM <- function(e){
stop(paste("Failed to install the BiocManager package.\n",
instructions))
}
# Begin interactive input
input <- 1
if (interactive()){
input <- utils::menu(c("Yes", "No"),
title = paste0("Install the BiocManager package? \n",
"NOTE: if you need to install BiocManager, you may need to restart R",
" before installing ComplexHeatmap."))
}
if(input == 1){
# Check for BiocManager
if( suppressWarnings(system.file(package='BiocManager')) %>% stringr::str_count() == 0){
message("Installing the BiocManager package.")
tryCatch(
utils::install.packages("BiocManager"),
error = error_func_BCM, warning = error_func_BCM)
}# END BiocManager check
else{
stop(writeLines(paste("The ComplexHeatmap package is necessary for BeeBDC::chordDiagramR.\n",
instructions)))
} # END else
} # END input == 1
}# END BcMtest == FALSE
} # END CHtest == FALSE
###### c. ComplexHeatmap ####
if(CHtest == FALSE){
# Set up instructions for download on fail
instructions <- paste(" Please try installing the package for yourself",
"using the following command: \n",
"BiocManager::install(\"ComplexHeatmap\")")
# Set up fail function for tryCatch
error_func_CH <- function(e){
stop(paste("Failed to install the ComplexHeatmap package.\n",
instructions))
}
# Begin interactive input
input <- 1
if (interactive()){
input <- utils::menu(c("Yes", "No"),
title = paste0("Install the ComplexHeatmap package? \n"))
}
if(input == 1){
# Start ComplexHeatmap install
message("Installing the ComplexHeatmap package.")
tryCatch(
BiocManager::install("ComplexHeatmap"),
error = error_func_CH, warning = error_func_CH)
} # END input == 1
else{
stop(writeLines(paste("The ComplexHeatmap package is necessary for BeeBDC::chordDiagramR.\n",
instructions)))
} # END else
} # END CHtest == FALSE
#### 1.0 Data prep ####
# Create a table to go into chord diagram
suppressMessages(
chordData <- table(dplyr::bind_cols(dupeData$dataSource, dupeData$dataSource_keep)),
classes = "message")
# Create tables of the counts of kept source and duplicate source
keptSource <- table(dupeData$dataSource) %>%
as.data.frame() %>% dplyr::tibble() %>%
stats::setNames(c("sourceName", "Frequency"))
dupeSource <- table(dupeData$dataSource_keep) %>%
as.data.frame() %>% dplyr::tibble() %>%
stats::setNames(c("sourceName", "Frequency_dupe"))
# Merge the sources and get their sum (for a total frequency count to order by)
colourTable <- dplyr::full_join(keptSource, dupeSource, by = "sourceName") %>%
dplyr::mutate(Frequency = (Frequency + Frequency_dupe)) %>%
# Drop the Frequency_dupe column
dplyr::select(!Frequency_dupe) %>%
# Get broad source (before first underscore)
dplyr::mutate( sourceCategories = (sourceName %>%
stringr::str_replace(
string = .,
pattern = "_.*",
replacement = ""
))) %>%
dplyr::group_by(sourceCategories) %>%
dplyr::mutate( # Count group number
groupCount = dplyr::n(),
# Combine small groups (< smallGrpThreshold)
sourceCategories = dplyr::if_else(
groupCount < smallGrpThreshold, "Other", sourceCategories)) %>%
dplyr::arrange(sourceName, .by_group = TRUE) %>%
# Re-group
dplyr::group_by(sourceCategories) %>%
dplyr::mutate(groupNumber = dplyr::cur_group_id(),
# Re-count
groupCount = dplyr::n(),
groupPalette = palettes[groupNumber]) %>%
# assign colours
dplyr::mutate(groupColours =
paletteer::paletteer_dynamic(
palette = groupPalette[[1]],
n = groupCount[[1]]) %>% list(),
colour = unlist(groupColours)[dplyr::row_number()])
#### 2.0 Build plot ####
circlize::circos.clear()
circlize::circos.par(canvas.ylim = canvas.ylim, canvas.xlim = canvas.xlim)
# Create the chord diagrame
circlize::chordDiagram(
x = chordData,
order = colourTable$sourceName,
directional = 1,
direction.type = c("arrows"),
link.arr.type = "big.arrow",
reduce = 0,
# self links fold directly back onto themselves instead of going to far side
self.link = self.link,
grid.col = colourTable$colour,
keep.diagonal = TRUE,
# name, grid, axis
annotationTrack = c("grid"),
preAllocateTracks = list(
track.height = circlize::mm_h(4),
track.margin = c(circlize::mm_h(1), 0)),
scale = FALSE
)
circlize::circos.trackPlotRegion(track.index = 1, panel.fun = function(x, y) {
xlim = circlize::get.cell.meta.data("xlim")
ylim = circlize::get.cell.meta.data("ylim")
sector.name = circlize::get.cell.meta.data("sector.index")
}, bg.border = NA)
# Highlight inputs
for(i in 1:length(unique(colourTable$sourceCategories))){
loopCat <- colourTable %>%
dplyr::filter(sourceCategories == unique(colourTable$sourceCategories)[i])
circlize::highlight.sector(stringr::str_c(loopCat$sourceName),
track.index = 1, col = loopCat$colour[[1]],
text = unique(loopCat$sourceCategories), cex = 0.8,
text.col = text.col, niceFacing = niceFacing)
}
legendList <- c()
# Make legends by creating a list of legends for each sourceCategory
for(i in 1:length(unique(colourTable$sourceCategories))){
loopCat <- colourTable %>%
dplyr::filter(sourceCategories == unique(colourTable$sourceCategories)[i])
legendList[[i]] <- ComplexHeatmap::Legend(labels = stringr::str_c(loopCat$sourceName),
title = unique(stringr::str_c(loopCat$sourceCategories)),
legend_gp = grid::gpar(fill = c(loopCat$colour)))
} # END legend loop
lgd_list <- ComplexHeatmap::packLegend(list = legendList)
ComplexHeatmap::draw(lgd_list, x = legendX,
y = legendY, just = legendJustify)
circlize::circos.clear()
title(title)
grDevices::dev.copy2pdf(file = paste(outPath, "/", fileName, sep = ""),
height = height, width = width, bg = bg)
#dev.off()
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/chordDiagramR.R
|
# This function is a modification of a bdc function and flags columns as FALSE when they have a
# coordiante uncertainty over a user-set threshold.
# This function was written on the 2nd of August 2022 by James Dorey. Email James at
# jbdorey[at]me.com for help.
#' Flag occurrences with an uncertainty threshold
#'
#' To use this function, the user must choose a column, probably "coordinateUncertaintyInMeters"
#' and a threshold above which occurrences will be flagged for geographic uncertainty.
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param uncerColumn Character. The column to flag uncertainty in.
#' @param threshold Numeric. The uncertainty threshold. Values equal to, or greater than, this
#' threshold will be flagged.
#'
#' @return The input data with a new column, .uncertaintyThreshold.
#' @export
#' @importFrom dplyr %>%
#'
#' @examples
#' # Run the function
#' beesRaw_out <- coordUncerFlagR(data = beesRaw,
#' uncerColumn = "coordinateUncertaintyInMeters",
#' threshold = 1000)
#' # View the output
#' table(beesRaw_out$.uncertaintyThreshold, useNA = "always")
coordUncerFlagR <-
function(data = NULL,
uncerColumn = "coordinateUncertaintyInMeters",
threshold = NULL) {
.data <- .occurrenceAbsent <- NULL
requireNamespace("dplyr")
# Make a new column called .occurrenceAbsent to be TRUE when occurrenceStatus is "present" or NA
data <-
data %>%
dplyr::mutate(
.uncertaintyThreshold =
!.data[[uncerColumn]] > threshold)
# Return user output
message(
paste(
"\\coordUncerFlagR:\n",
"Flagged",
format(sum(data$.uncertaintyThreshold == FALSE, na.rm = TRUE), big.mark = ","),
"geographically uncertain records:\n",
"The column '.uncertaintyThreshold' was added to the database.\n"
)
)
return(data)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/coordUncerFlagR.R
|
# This function was written by James B Dorey on the 5th of August 2022 to fix up some
# Country names in occurrence records and extract country names from ISO2 codes
#' Fix country name issues using a user-input list
#'
#' This function is basic for a user to manually fix some country name inconsistencies.
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param ISO2_table A data frame or tibble with the columns ISO2 and long names for country names. Default
#' is a static version from Wikipedia.
#' @param commonProblems A data frame or tibble. It must have two columns:
#' one containing the user-identified problem and one with a user-defined fix
#'
#' @return Returns the input data, but with countries occurring in the user-supplied problem
#' column ("commonProblems") replaced with those in the user-supplied fix column
#' @export
#'
#' @importFrom dplyr %>%
#' @importFrom stats complete.cases
#'
#' @examples
#' beesFlagged_out <- countryNameCleanR(
#' data = BeeBDC::beesFlagged,
#' commonProblems = dplyr::tibble(problem = c('U.S.A.', 'US','USA','usa','UNITED STATES',
#' 'United States','U.S.A','MX','CA','Bras.','Braz.',
#' 'Brasil','CNMI','USA TERRITORY: PUERTO RICO'),
#' fix = c('United States of America','United States of America',
#' 'United States of America','United States of America',
#' 'United States of America','United States of America',
#' 'United States of America','Mexico','Canada','Brazil',
#' 'Brazil','Brazil','Northern Mariana Islands','PUERTO.RICO')))
countryNameCleanR <- function(
data = NULL,
ISO2_table = NULL,
commonProblems = NULL){
# locally bind variables to the function
database_id <- decimalLatitude <- decimalLongitude <- country <- countryCode <- scientificName <-
dataSource <- FullName <- . <- fix <- NULL
#### 0.0 Prep ####
##### 0.1 warnings ####
if(is.null(data)){
stop(" - Please provide input data.")
}
if(!"countryCode" %in% colnames(data)){
stop(" - No countryCode column in data.")
}
##### 0.2 Data defaults ####
if(is.null(ISO2_table)){
writeLines(" - Using default country names and codes from https:en.wikipedia.org/wiki/ISO_3166-1_alpha-2 - static version from July 2022.")
# Use countryCode to extract country name using the Wikipedia table - https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
# Make a tibble with
ISO2_table <- dplyr::tibble(
ISO2 = c("AD","AE","AF","AG","AI","AL","AM","AO","AQ","AR","AS","AT","AU","AW","AX","AZ","BA","BB","BD","BE","BF","BG","BH","BI","BJ","BL","BM","BN","BO","BQ","BR","BS","BT","BV","BW","BY","BZ","CA","CC","CD","CF","CG","CH","CI","CK","CL","CM","CN","CO","CR","CU","CV","CW","CX","CY","CZ","DE","DJ","DK","DM","DO","DZ","EC","EE","EG","EH","ER","ES","ET","FI","FJ","FK","FM","FO","FR","GA","GB","GD","GE","GF","GG","GH","GI","GL","GM","GN","GP","GQ","GR","GS","GT","GU","GW","GY","HK","HM","HN","HR","HT","HU","ID","IE","IL","IM","IN","IO","IQ","IR","IS","IT","JE","JM","JO","JP","KE","KG","KH","KI","KM","KN","KP","KR","KW","KY","KZ","LA","LB","LC","LI","LK","LR","LS","LT","LU","LV","LY","MA","MC","MD","ME","MF","MG","MH","MK","ML","MM","MN","MO","MP","MQ","MR","MS","MT","MU","MV","MW","MX","MY","MZ","NA","NC","NE","NF","NG","NI","NL","NO","NP","NR","NU","NZ","OM","PA","PE","PF","PG","PH","PK","PL","PM","PN","PR","PS","PT","PW","PY","QA","RE","RO","RS","RU","RW","SA","SB","SC","SD","SE","SG","SH","SI","SJ","SK","SL","SM","SN","SO","SR","SS","ST","SV","SX","SY","SZ","TC","TD","TF","TG","TH","TJ","TK","TL","TM","TN","TO","TR","TT","TV","TW","TZ","UA","UG","UM","US","UY","UZ","VA","VC","VE","VG","VI","VN","VU","WF","WS","YE","YT","ZA","ZM","ZW"),
FullName = c("Andorra","United Arab Emirates","Afghanistan","Antigua and Barbuda","Anguilla","Albania","Armenia","Angola","Antarctica","Argentina","American Samoa","Austria","Australia","Aruba","\\u00c5land Islands","Azerbaijan","Bosnia and Herzegovina","Barbados","Bangladesh","Belgium","Burkina Faso","Bulgaria","Bahrain","Burundi","Benin","Saint Barth\\u00e9lemy","Bermuda","Brunei Darussalam","Bolivia (Plurinational State of)","Bonaire, Saint Eustatius and Saba","Brazil","Bahamas","Bhutan","Bouvet Island","Botswana","Belarus","Belize","Canada","Cocos (Keeling) Islands","Congo, Democratic Republic of the","Central African Republic","Congo","Switzerland","C\\u00f4te d'Ivoire","Cook Islands","Chile","Cameroon","China","Colombia","Costa Rica","Cuba","Cabo Verde","Cura\\u00e7ao","Christmas Island","Cyprus","Czechia","Germany","Djibouti","Denmark","Dominica","Dominican Republic","Algeria","Ecuador","Estonia","Egypt","Western Sahara","Eritrea","Spain","Ethiopia","Finland","Fiji","Falkland Islands (Malvinas)","Micronesia (Federated States of)","Faroe Islands","France","Gabon","United Kingdom of Great Britain and Northern Ireland","Grenada","Georgia","French Guiana","Guernsey","Ghana","Gibraltar","Greenland","Gambia","Guinea","Guadeloupe","Equatorial Guinea","Greece","South Georgia and the South Sandwich Islands","Guatemala","Guam","Guinea-Bissau","Guyana","Hong Kong","Heard Island and McDonald Islands","Honduras","Croatia","Haiti","Hungary","Indonesia","Ireland","Israel","Isle of Man","India","British Indian Ocean Territory","Iraq","Iran (Islamic Republic of)","Iceland","Italy","Jersey","Jamaica","Jordan","Japan","Kenya","Kyrgyzstan","Cambodia","Kiribati","Comoros","Saint Kitts and Nevis","Korea (Democratic People's Republic of)","Korea, Republic of","Kuwait","Cayman Islands","Kazakhstan","Lao People's Democratic Republic","Lebanon","Saint Lucia","Liechtenstein","Sri Lanka","Liberia","Lesotho","Lithuania","Luxembourg","Latvia","Libya","Morocco","Monaco","Moldova, Republic of","Montenegro","Saint Martin (French part)","Madagascar","Marshall Islands","North Macedonia","Mali","Myanmar","Mongolia","Macao","Northern Mariana Islands","Martinique","Mauritania","Montserrat","Malta","Mauritius","Maldives","Malawi","Mexico","Malaysia","Mozambique","Namibia","New Caledonia","Niger","Norfolk Island","Nigeria","Nicaragua","Netherlands","Norway","Nepal","Nauru","Niue","New Zealand","Oman","Panama","Peru","French Polynesia","Papua New Guinea","Philippines","Pakistan","Poland","Saint Pierre and Miquelon","Pitcairn","Puerto Rico","Palestine, State of","Portugal","Palau","Paraguay","Qatar","R\\u00e9union","Romania","Serbia","Russian Federation","Rwanda","Saudi Arabia","Solomon Islands","Seychelles","Sudan","Sweden","Singapore","Saint Helena, Ascension and Tristan da Cunha","Slovenia","Svalbard and Jan Mayen","Slovakia","Sierra Leone","San Marino","Senegal","Somalia","Suriname","South Sudan","Sao Tome and Principe","El Salvador","Sint Maarten (Dutch part)","Syrian Arab Republic","Eswatini","Turks and Caicos Islands","Chad","French Southern Territories","Togo","Thailand","Tajikistan","Tokelau","Timor-Leste","Turkmenistan","Tunisia","Tonga","Turkey","Trinidad and Tobago","Tuvalu","Taiwan, Province of China","Tanzania, United Republic of","Ukraine","Uganda","United States Minor Outlying Islands","United States of America","Uruguay","Uzbekistan","Holy See","Saint Vincent and the Grenadines","Venezuela (Bolivarian Republic of)","Virgin Islands (British)","Virgin Islands (U.S.)","Viet Nam","Vanuatu","Wallis and Futuna","Samoa","Yemen","Mayotte","South Africa","Zambia","Zimbabwe"))
}
###### a. prep data ####
# Remove NA for lat and long
data_noNa <- data %>%
# Drop na in lat and long
tidyr::drop_na(c("decimalLatitude", "decimalLongitude")) %>%
# select a subset of columns to save RAM
dplyr::select(c(database_id, decimalLatitude, decimalLongitude, country, countryCode,
scientificName, dataSource))
#### 1.0 GBIF ####
# Now, because GBIF doesn't have a country column (weird, right?!),
# Create country from countryCode
GBIF_occs <- data_noNa %>%
dplyr::filter(stringr::str_detect(dataSource, pattern = "GBIF"))
# # Remove NA values from data_noNa
# GBIF_occs$countryCode <- GBIF_occs$countryCode %>% as.character() %>%
# stringr::str_replace(pattern = "NA", replacement = "")
# Join the ISO2_table names with the countryCode df
GBIF_occs <- GBIF_occs %>%
# If there is NO countryCode, but there IS a country, add this to countryCode in case
dplyr::mutate(countryCode = dplyr::if_else(countryCode == "" & complete.cases(as.character(country)),
country, as.character(countryCode))) %>%
left_join(ISO2_table, by = c("countryCode" = "ISO2")) %>%
# Insert the country information to the correct spot
dplyr::mutate(country = FullName) %>%
dplyr::select(!FullName)
# Re-join datasets
data_noNa <- data_noNa %>%
# Remove GBIF databse_ids
dplyr::filter(!database_id %in% GBIF_occs$database_id) %>%
# Re-bind those occs
dplyr::bind_rows(GBIF_occs)
rm(GBIF_occs)
#### 2.0 All ####
# Join the ISO2_table names with the countryCode df
data_all <- data_noNa %>%
# If there is NO countryCode, but there IS a country, add this to countryCode in case
dplyr::mutate(countryCode = dplyr::if_else(countryCode == "" | is.na(countryCode)
& complete.cases(country) &
tolower(country) != "namibia",
country, countryCode)) %>%
left_join(ISO2_table, by = c("countryCode" = "ISO2")) %>%
# Insert the country information to the correct spot
dplyr::mutate(country = FullName) %>%
dplyr::select(!FullName)
# Re-join datasets
data_all <- data_all %>%
# Remove GBIF databse_ids
dplyr::filter(!database_id %in% data_noNa$database_id) %>%
# Re-bind those occs
dplyr::bind_rows(data_noNa)
rm(data_noNa)
# Remove illegal characters
data_all$country <- data_all$country %>%
stringr::str_replace(., pattern = paste("\\[", "\\]", "\\?",
sep= "|"), replacement = "")
# Replace the problems as they occur
data_all <- data_all %>%
dplyr::left_join(commonProblems, by = c("country" = "problem")) %>%
dplyr::mutate(country =
dplyr::if_else(country %in% as.character(commonProblems$problem),
fix, country))
return(data_all)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/countryNameCleanR.R
|
# This Function is designed to check country-level outliers using the Discover Life checklist.
# It was written by James B Dorey from the 8th of November 2022.
#' Flag country-level outliers with a provided checklist.
#'
#'This function flags country-level outliers using the checklist provided with this package.
#'For additional context and column names, see [BeeBDC::beesChecklist()].
#'
#' @param checklist A data frame or tibble. The formatted checklist which was built based on the Discover Life website.
#' @param data A data frame or tibble. The a Darwin Core occurrence dataset.
#' @param keepAdjacentCountry Logical. If TRUE, occurrences in countries that are adjacent to checklist countries will be
#' kept. If FALSE, they will be flagged.
#' @param pointBuffer Numeric. A buffer around points to help them align with a country or coastline.
#' This provides a good way to retain points that occur right along the coast or borders of the
#' maps in rnaturalearth
#' @param scale Numeric. The value fed into the map scale parameter for
#' [rnaturalearth::ne_countries()]'s scale parameter:
#' Scale of map to return, one of 110, 50, 10 or 'small', 'medium', 'large', where smaller numbers
#' are higher resolution. WARNING: This function is tested on 110 and 50.
#' @param stepSize Numeric. The number of occurrences to process in each chunk. Default = 1000000.
#' @param mc.cores Numeric. If > 1, the function will run in parallel
#' using mclapply using the number of cores specified. If = 1 then it will be run using a serial
#' loop. NOTE: Windows machines must use a value of 1 (see ?parallel::mclapply). Additionally,
#' be aware that each thread can use large chunks of memory. If the cores throw issues, consider
#' setting mc.cores to 1.
#' Default = 1.
#'
#' @return The input data with two new columns, .countryOutlier or .sea. There are three possible
#' values for
#' the new column: TRUE == passed, FALSE == failed (not in country or in the ocean),
#' NA == did not overlap with rnaturalearth map.
#'
#' @export
#' @importFrom dplyr %>%
#'
#' @examples
#' library(magrittr)
#' # Load in the test dataset
#' beesRaw <- BeeBDC::beesRaw
#' # For the sake of this example, use the testChecklist
#' system.file("extdata", "testChecklist.rda", package="BeeBDC") |> load()
#' # For real examples, you might download the beesChecklist from FigShare using
#' # [BeeBDC::beesChecklist()]
#'
#' beesRaw_out <- countryOutlieRs(checklist = testChecklist,
#' data = beesRaw %>%
#' dplyr::filter(dplyr::row_number() %in% 1:50),
#' keepAdjacentCountry = TRUE,
#' pointBuffer = 1,
#' scale = 50,
#' stepSize = 1000000,
#' mc.cores = 1)
#' table(beesRaw_out$.countryOutlier, useNA = "always")
countryOutlieRs <- function(
checklist = NULL,
data = NULL,
keepAdjacentCountry = TRUE,
pointBuffer = NULL,
scale = 50,
stepSize = 1000000,
mc.cores = 1
){
# locally bind variables to the function
iso_a2<-iso_a3_eh<-name<-name_long<-continent<-geometry<-countryOutlieRs<-decimalLongitude<-
countryOutlieRs<-decimalLatitude<-database_id<-countryOutlieRs<-scientificName<-species<-
family<-subfamily<-genus<-countryOutlieRs<-specificEpithet<-countryOutlieRs<-
scientificNameAuthorship<-country<-stateProvince<-eventDate<-countryOutlieRs<-
institutionCode<-recordNumber<-catalogNumber<-dataSource<-countryOutlieRs<-
verbatim_scientificName<-.<-neighbours<-rowNum<-countryOutlieRs<-neighboursText<-
SciCountry<-validName<-countryOutlieRs<-SciCountry_noYear<-countryOutlieRs<-
neighbourMatch_noYear<-countryOutlieRs<-exactMatch_noYear<-matchType<-countryMatch<-
countryOutlieRs<-countryOutlieRs<-.countryOutlier <- BeeBDC_order <- BeeBDC_group <- points <-
inData <- indexMatch <- NULL
# REMOVE - TEST thinning
# data <- data %>%
# filter(row_number() %% 100 == 1)
startTime <- Sys.time()
#### 0.0 Warnings ####
if(is.null(checklist)){
stop("You must provide a checklist of countries")
}
if(is.null(data)){
stop("You must provide occurrence data (data). Honestly, what do you think I was gonna do without that?")
}
#### 1.0 Data prep ####
##### 1.1 data ####
# Drop .countryOutlier if its already present
data <- data %>%
dplyr::select(!tidyselect::any_of(".countryOutlier")) %>%
# Remove other columns made by this function
dplyr::select(!tidyselect::starts_with(c("iso_a3_eh", "countryMatch")))
##### 1.2 rNaturalEarth ####
# Download world map using rnaturalearth packages
suppressWarnings({
countryMap <- rnaturalearth::ne_countries(returnclass = "sf", country = NULL,
type = "countries", scale = scale) %>%
# buffer by zero and make geometry valid to avoid potential issues with polygon intersection
sf::st_make_valid() %>%
# Select only a subset of the naturalearthdata columns to extract
dplyr::select(iso_a2, iso_a3_eh, name, name_long, continent, geometry) %>%
# Replace some country codes to match the checklist
dplyr::mutate(iso_a3_eh = iso_a3_eh %>% stringr::str_replace_all(
c("ALA" = "FIN", # Aland Islands == Finland
"ASM" = "WSM", # American Samoa == Samoa
"HKG" = "CHN", # Hong Kong == China
"MAF" = "SXM"))) # Saint-Martin == Sint Maarten
})
# Simplify the world map ONCE to be used later
simplePoly <- countryMap %>% sf::st_drop_geometry() %>%
dplyr::mutate(indexMatch = dplyr::row_number())
# Dont's use spherical geometry
sf::sf_use_s2(FALSE)
#### 2.0 Use occ. data ####
##### 2.1 Create functions ####
###### a. simple intersect ####
# Hijack st_intersection to allow it to be run in parallel
jbd_intersection <- function(inData){
inData <- inData %>% tidyr::drop_na(decimalLongitude, decimalLatitude)
suppressWarnings({ suppressMessages({
# Turn inData into a simple point feature
points <- sf::st_as_sf(inData,
coords = c("decimalLongitude", "decimalLatitude"),
na.fail = TRUE,
# Assign the CRS from the rnaturalearth map to the point inData
crs = sf::st_crs(countryMap)) %>%
# Use a subset of columns
dplyr::select(database_id, scientificName, species, family, subfamily, genus, specificEpithet,
scientificNameAuthorship,
country, stateProvince, eventDate, institutionCode, recordNumber, catalogNumber,
dataSource, verbatim_scientificName, geometry)
#Extract polygon information to points
points_extract <- sf::st_intersects(points, countryMap) %>%
# return a tibble with the index of each match or NA where there was no match
dplyr::tibble(indexMatch = .) %>%
# Convert to numeric
dplyr::mutate(indexMatch = indexMatch %>% as.character() %>%
# deal with problems — Take the first number where two are provided
stringr::str_extract("[0-9]+") %>%
# Remove zero to NA
stringr::str_replace("^[0]$", NA_character_) %>% as.numeric()) %>%
dplyr::left_join(simplePoly,
by = "indexMatch") %>%
# Add in the database_id
dplyr::bind_cols(inData %>% sf::st_drop_geometry() %>% dplyr::select(!continent))
}) })
# Return the points
return(points_extract)
} # END jbd_intersection
###### b. buffered intersect ####
# Hijack st_intersection to allow it to be run in parallel
jbd_bufferedIntersection <- function(inData){
suppressWarnings({ suppressMessages({
inData <- inData %>%
# Use only complete lat and lon data
tidyr::drop_na(decimalLongitude, decimalLatitude) %>%
# Remove the previous column names from jbd_intersection
dplyr::select(!tidyselect::any_of(c("iso_a2","iso_a3_eh","name","name_long","continent",
"indexMatch")))
# Turn inData into a simple point feature
points <- sf::st_as_sf(inData,
coords = c("decimalLongitude", "decimalLatitude"),
na.fail = TRUE,
# Assign the CRS from the rnaturalearth map to the point inData
crs = sf::st_crs(countryMap)) %>%
# Use a subset of columns
dplyr::select(database_id, scientificName, species, family, subfamily, genus, specificEpithet,
scientificNameAuthorship,
country, stateProvince, eventDate, institutionCode, recordNumber, catalogNumber,
dataSource, verbatim_scientificName, geometry) %>%
# Buffer the points by the pointBuffer
sf::st_buffer(dist = pointBuffer)
#Extract polygon information to points
points_extract <- sf::st_intersects(points, countryMap) %>%
# return a tibble with the index of each match or NA where there was no match
dplyr::tibble(indexMatch = . ) %>%
# Convert to numeric
dplyr::mutate(indexMatch = indexMatch %>% as.character() %>%
# deal with problems — Take the first number where two are provided
stringr::str_extract("[0-9]+") %>%
# Remove zero to NA
stringr::str_replace("^[0]$", NA_character_) %>% as.numeric()) %>%
dplyr::left_join(simplePoly,
by = "indexMatch") %>%
# Add in the database_id
dplyr::bind_cols(inData %>% sf::st_drop_geometry() %>%
dplyr::select(!tidyselect::any_of("continent")))
}) })
# Return the points
return(points_extract)
} # END jbd_intersection
##### 2.2 Extraction ####
###### a. exactCountry ####
writeLines(" - Extracting country data from points...")
points_extract <- data %>%
# remove the existing iso_a3_eh column
dplyr::select(!tidyselect::any_of("iso_a3_eh")) %>%
# Make a new column with the ordering of rows
dplyr::mutate(BeeBDC_order = dplyr::row_number()) %>%
# Group by the row number and step size
dplyr::group_by(BeeBDC_group = ceiling(BeeBDC_order/stepSize)) %>%
# Split the dataset up into a list by group
dplyr::group_split(.keep = TRUE) %>%
# Run the actual function
parallel::mclapply(., jbd_intersection,
mc.cores = mc.cores
) %>%
# Combine the lists of tibbles
dplyr::bind_rows() %>%
# Drop those occurrences that did not intersect with a country
tidyr::drop_na(iso_a3_eh)
if(!is.null(pointBuffer)){
# Failed extractions
points_failed <- data %>%
dplyr::filter(!database_id %in% points_extract$database_id)
writeLines(" - Buffering failed points by pointBuffer...")
points_failed <- points_failed %>%
# Make a new column with the ordering of rows
dplyr::mutate(BeeBDC_order = dplyr::row_number()) %>%
# Group by the row number and step size
dplyr::group_by(BeeBDC_group = ceiling(BeeBDC_order/stepSize)) %>%
# Split the dataset up into a list by group
dplyr::group_split(.keep = TRUE) %>%
# Run the actual function
parallel::mclapply(., jbd_bufferedIntersection,
mc.cores = mc.cores
) %>%
# Combine the lists of tibbles
dplyr::bind_rows() %>%
# Drop those occurrences that did not intersect with a country
tidyr::drop_na(iso_a3_eh)
if(nrow(points_failed) > 0){
# Re-merge good with failed
points_extract <- points_extract %>%
sf::st_drop_geometry() %>%
# remove buffer-matched occurrences
dplyr::filter(!database_id %in% points_failed$database_id) %>%
# replace these, but now matched
dplyr::bind_rows(points_failed %>% sf::st_drop_geometry())
}
} # End if pointBuffer
writeLines(" - Prepare the neighbouring country dataset...")
###### b. neighbouringCountries ####
# Get a list of countries that share borders
countriesBordering <- sf::st_intersects(countryMap, countryMap) %>%
paste(., sep = ";")
# Make a new tibble with these information
neighbouringCountries <- dplyr::tibble(
rowNum = 1:nrow(countryMap),
country = countryMap$iso_a3_eh,
neighbours = countriesBordering,
# Modify the text in column
neighboursText = mgsub::mgsub(string = neighbours,
pattern = rowNum,
replacement = country) %>%
stringr::str_replace(string = .,
pattern = "c\\(", replacement = "") %>%
stringr::str_replace(string = .,
pattern = "\\)", replacement = "") %>%
stringr::str_replace(string = .,
pattern = ":", replacement = ", "))
# Make a long-format tibble with neighbouring countries
neighbouringCountries <- neighbouringCountries %>%
tidyr::separate_rows(data = ., neighboursText,
sep = ",") %>%
# Remove country matching themselves
dplyr::filter(!(country == neighboursText))
# Remove extra spaces
neighbouringCountries$neighboursText <- stringr::str_squish(neighbouringCountries$neighboursText)
# Join the datasets together so that we can make a list of adjacent countries to match also
neighbouringCountries <- checklist %>%
dplyr::left_join(neighbouringCountries %>%
dplyr::select(tidyselect::any_of(c("country", "neighboursText"))),
by = c("Alpha-3" = "country"),
multiple = "all", relationship = "many-to-many")
###### c. Sea points ####
# Find the points that did not overlap with countries but that had coordinates
seaPoints <- sf::st_as_sf(data %>% tidyr::drop_na(decimalLongitude, decimalLatitude),
coords = c("decimalLongitude", "decimalLatitude"),
na.fail = TRUE,
# Assign the CRS from the rnaturalearth map to the point data
crs = sf::st_crs(countryMap)) %>%
# Use a subset of columns
dplyr::select(database_id, scientificName, species, family, subfamily, genus, specificEpithet,
scientificNameAuthorship,
country, stateProvince, eventDate, institutionCode, recordNumber, catalogNumber,
dataSource, verbatim_scientificName, geometry) %>%
sf::st_drop_geometry() %>%
dplyr::filter(!database_id %in% points_extract$database_id) %>%
dplyr::select(database_id)
##### 2.3 Compare ####
writeLines(" - Compare points with the checklist...")
# Get a smaller subset of the columns AND make a new columns with scientific name and country
points_simple <- points_extract %>%
dplyr::select(database_id, iso_a3_eh, scientificName, country) %>%
dplyr::mutate(SciCountry = stringr::str_c(scientificName,
iso_a3_eh, sep = "_")) %>%
# Remove grammar and caps from SciCountry
dplyr::mutate(SciCountry = tolower(SciCountry) %>%
# Replace punctuation
stringr::str_replace_all("[\\(\\)\\,\\.\\-]", "") %>%
# Replace white spaces with underscores
stringr::str_replace_all(" ", "_")) %>%
# Make the new column to match with full species name, (NO AUTHORHSIP YEAR), and country
# Remove grammar and caps from SciCountry
dplyr::mutate(SciCountry_noYear = tolower(SciCountry) %>%
# Replace numbers
stringr::str_replace_all("[0-9]", "")%>%
# replace double __
stringr::str_replace_all("__", "_"))
###### a. exactCountry ####
# Do the same for the ascher checklist
checklist_simple <- checklist %>%
# Select subset
dplyr::select(validName, 'Alpha-3') %>%
# Harmonise column names
dplyr::rename(iso_a3_eh = 'Alpha-3') %>%
# Make the new column to match with full species name, authorship, and country
dplyr::mutate(SciCountry = stringr::str_c(validName, iso_a3_eh, sep = "_"))%>%
# Remove grammar and caps from SciCountry
dplyr::mutate(SciCountry = tolower(SciCountry) %>%
# Replace punctuation
stringr::str_replace_all("[\\(\\)\\,\\.\\-]", "") %>%
# Replace white spaces with underscores
stringr::str_replace_all(" ", "_")) %>%
# Make the new column to match with full species name, (NO AUTHORHSIP YEAR), and country
# Remove grammar and caps from SciCountry
dplyr::mutate(SciCountry_noYear = tolower(SciCountry) %>%
# Replace numbers
stringr::str_replace_all("[0-9]", "") %>%
# replace double __
stringr::str_replace_all("__", "_"))
# Make a new columns showing if that species is expected in that country
points_match <- points_simple %>%
#dplyr::filter(country == "United states") %>%
#dplyr::mutate(exactMatch = dplyr::if_else(SciCountry %in% checklist_simple$SciCountry,
# TRUE, FALSE)) %>%
dplyr::mutate(exactMatch_noYear = dplyr::if_else(SciCountry_noYear %in% checklist_simple$SciCountry_noYear,
TRUE, FALSE)) %>%
dplyr::left_join(dplyr::select(checklist_simple, SciCountry_noYear),
by = "SciCountry_noYear",
multiple = "all", relationship = "many-to-many")
###### b. neighbouringCountries ####
# Get a smaller subset of the data AND make a new columns with scientific name and country
nchecklist_simple <- neighbouringCountries %>%
# Select subset
dplyr::select(validName, neighboursText) %>%
# Make the new column to match with
dplyr::mutate(SciCountry = stringr::str_c(validName, neighboursText, sep = "_")) %>%
# Remove grammar and caps from SciCountry
dplyr::mutate(SciCountry = tolower(SciCountry) %>%
# Replace punctuation
stringr::str_replace_all("[\\(\\)\\,\\.\\-]", "") %>%
# Replace white spaces with underscores
stringr::str_replace_all(" ", "_")) %>%
# Make the new column to match with full species name, (NO AUTHORHSIP YEAR), and country
# Remove grammar and caps from SciCountry
dplyr::mutate(SciCountry_noYear = tolower(SciCountry) %>%
# Replace numbers
stringr::str_replace_all("[0-9]", "") %>%
# replace double __
stringr::str_replace_all("__", "_")) %>%
# Get a unique set
dplyr::distinct(SciCountry, .keep_all = TRUE)
# Make a new columns showing if that species is expected in that country
npoints_match <- points_simple %>%
# dplyr::filter(country == "United states") %>%
#dplyr::mutate(neighbourMatch = dplyr::if_else(SciCountry %in% nchecklist_simple$SciCountry,
# TRUE, FALSE)) %>%
dplyr::mutate(neighbourMatch_noYear = dplyr::if_else(SciCountry_noYear %in% nchecklist_simple$SciCountry_noYear,
TRUE, FALSE))
#### 3.0 Merge ####
writeLines(" - Combining data...")
# Merge both points_match datasets
bpoints_match <- dplyr::tibble(points_match) %>%
# Join the two datasets togehter keeping only neighbourMatch and assignmentCertainty from the
# neighbour-joined dataset
dplyr::left_join(dplyr::select(npoints_match, c(database_id, neighbourMatch_noYear)),
by = "database_id",
multiple = "all", relationship = "many-to-many") %>%
# Remove geometry column
dplyr::select(!tidyselect::starts_with("geometry")) %>%
# Combine exactMatch_noYear and neighbourMatch_noYear
dplyr::mutate(matchType = dplyr::if_else(exactMatch_noYear == TRUE,
"exact", dplyr::if_else(neighbourMatch_noYear == TRUE,
"neighbour",
"noMatch")))
#### 4.0 Output ####
##### 4.1 User output ####
bpoints_match <- bpoints_match %>%
# Select the columns to keep
dplyr::select(database_id, iso_a3_eh, matchType) %>%
dplyr::rename(
countryMatch = matchType)
# Set flag for those that don't pass countryMatch
###### a. keepAC == TRUE ####
if(keepAdjacentCountry == TRUE){
bpoints_match <- bpoints_match %>%
dplyr::mutate(
.countryOutlier = dplyr::if_else(countryMatch != "exact" & countryMatch != "neighbour",
FALSE, TRUE)
) # END mutate
} # END TRUE
###### b. keepAC == FALSE ####
if(keepAdjacentCountry == FALSE){
bpoints_match <- bpoints_match %>%
dplyr::mutate(
.countryOutlier = dplyr::if_else(countryMatch != "exact",
FALSE, TRUE)
) # END mutate
}# END FALSE
# Keep only entirely unique records
bpoints_match <- bpoints_match %>%
dplyr::distinct()
###### c. distinct buffer ####
# For those buffered records that might have overlapped with >1 country, select the unfiltered one, if it exists.
if(!is.null(pointBuffer)){
writeLines(" - Sorting and removing potentially duplicated buffered points...")
bpoints_match <- bpoints_match %>%
dplyr::group_by(database_id) %>%
dplyr::arrange(desc(.countryOutlier), .by_group = TRUE) %>%
dplyr::distinct(database_id, .keep_all = TRUE)
}
# Merge with original dataset
output <- data %>%
dplyr::left_join(bpoints_match, by = "database_id",
multiple = "all", relationship = "many-to-many") %>%
# Add in .sea usign the seaPoints
dplyr::mutate(.sea = dplyr::if_else(database_id %in% seaPoints$database_id,
FALSE, TRUE))
writeLines(paste0(
" - Finished. \n",
"We have matched ",
format(sum(bpoints_match$countryMatch == "exact", na.rm = TRUE), big.mark = ","),
" records to their exact country and ",
format(sum(bpoints_match$countryMatch == "neighbour", na.rm = TRUE), big.mark = ","),
" to an adjacent country\n",
"We failed to match ",
format(sum(bpoints_match$countryMatch == "noMatch", na.rm = TRUE), big.mark = ","),
" occurrences to any 'exact' or 'neighbouring' country.\n",
"There are ",
format(sum(is.na(output$.countryOutlier)), big.mark = ","),
" 'NA' occurrences for the .countryOutlier column.\n"
))
# return message
message(paste("countryOutlieRs:\nFlagged",
format(sum(output$.countryOutlier == FALSE, na.rm = TRUE), big.mark = ","),
" for country outlier and flagged ",
format(sum(output$.sea == FALSE, na.rm = TRUE), big.mark = ","),
" for in the .sea",
"records.\nThree columns were added to the database:\n 1. ",
"The '.countryOutlier' column was added which is a filtering column. \n 2. ",
"The 'countryMatch' columns indicates exact, neighbour, or noMatch. \n",
"3. The '.sea' column was added as a filtering column for points in the ocean.",
" The '.sea' column includes the user input buffer in its calculation."),
sep = "")
# rm(countryMap, points, points_extract, countriesBordering, neighbouringCountries, points_simple,
# checklist_simple, points_match, nchecklist_simple, npoints_match, bpoints_match)
# Return file
endTime <- Sys.time()
# Time output
message(paste(
" - Completed in ",
round(difftime(endTime, startTime), digits = 2 )," ",
units(round(endTime - startTime, digits = 2)),
sep = ""))
return(output)
} # END countryOutlieRs
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/countryOutliers.R
|
# This Function is designed to produce a summary table of data sources.
# It was written by James B Dorey from the 20th of January 2023.
#' Build a table of data providers for bee occurrence records
#'
#' This function will attempt to find and build a table of data providers that have contributed
#' to the input data, especially using the 'institutionCode' column. It will also look for a
#' variety of other columns to find data providers using an internally set sequence of if-else
#' statements. Hence, this function is quite specific for bee data, but should work for other
#' taxa in similar institutions.
#'
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param runBeeDataChecks Logical. If TRUE, will search in other columns for specific clues to
#' determine the institution.
#' @param outPath A character path. The path to the directory in which the figure will be saved.
#' Default = OutPath_Report.
#' @param fileName Character. The name of the file to be saved, ending in ".csv".
#'
#'
#' @return Returns a table with the data providers, an specimen count, and a species count.
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#'
# import data
#' data(beesFlagged)
#'
#' testOut <- dataProvTables(
#' data = beesFlagged,
#' runBeeDataChecks = TRUE,
#' outPath = tempdir(),
#' fileName = "testFile.csv")
#'
dataProvTables <- function(
data = NULL,
runBeeDataChecks = FALSE,
outPath = OutPath_Report,
fileName = NULL
){
requireNamespace("dplyr")
# locally bind variables to the function
OutPath_Report <- occurrenceCount <- NULL
#### 0.0 Warnings ####
if(is.null(data)){
stop("You must provide an input dataset.")
}
if(is.null(data)){
stop("You must provide a fileName.")
}
# locally bind variables to the function
dataSource<-institutionCode<-recordNumber<-institutionCodeNew<-recordedBy<-occurrenceID<-
catalogNumber<-bibliographicCitation<-datasetName<-otherCatalogNumbers<-rightsHolder<-
references<-eventID<-datasetID<-database_id<-id<-scientificName<-DataPath <- NULL
#### 1.0 Data prep ####
if(runBeeDataChecks == TRUE){
##### 1.1 Find codes ####
# Find institutionCodes using other information
temp <- data %>%
# Select ALA as data source and only those missing institutionCode
dplyr::filter(stringr::str_detect(dataSource, "ALA|GBIF|iDigBio") &
is.na(institutionCode)) %>%
# Find clues to institutionCode and add it
dplyr::mutate(
# WAM
institutionCodeNew = ifelse(
stringr::str_detect(recordNumber, "^WAM "), "WAM", NA),
# HYM
institutionCodeNew = dplyr::if_else(stringr::str_detect(recordNumber, "^HYM ") &
is.na(institutionCodeNew), "NMV", institutionCodeNew),
institutionCodeNew = dplyr::if_else(stringr::str_detect(recordedBy, "Assorted Museum of Victoria") &
is.na(institutionCodeNew), "NMV", institutionCodeNew),
# bowerbird
institutionCodeNew = dplyr::if_else(stringr::str_detect(recordNumber, "bowerbird") &
is.na(institutionCodeNew), "bowerbird", institutionCodeNew),
# PaDIL
institutionCodeNew = dplyr::if_else(stringr::str_detect(tolower(recordNumber), "^bee|^rlj ") &
is.na(institutionCodeNew), "PaDIL", institutionCodeNew),
# Flickr
institutionCodeNew = dplyr::if_else( stringr::str_detect(tolower(occurrenceID), "flickr") &
is.na(institutionCodeNew), "Flickr", institutionCodeNew),
# questagame
institutionCodeNew = dplyr::if_else( stringr::str_detect(tolower(recordedBy), "questagame") &
is.na(institutionCodeNew), "QuestaGame", institutionCodeNew),
# WINC
institutionCodeNew = dplyr::if_else( stringr::str_detect(catalogNumber, "WINC") &
is.na(institutionCodeNew), "WINC", institutionCodeNew),
# biocollectALA
institutionCodeNew = dplyr::if_else( stringr::str_detect(recordNumber, "biocollect") &
is.na(institutionCodeNew), "Wildlife Watch NSC", institutionCodeNew),
# Aus Museum
institutionCodeNew = dplyr::if_else( stringr::str_detect(recordNumber, "^K |AM ") &
is.na(institutionCodeNew), "AM", institutionCodeNew),
# WildNet
institutionCodeNew = dplyr::if_else( stringr::str_detect(occurrenceID, "WildNet") &
is.na(institutionCodeNew), "WildNet", institutionCodeNew),
## OTHER
institutionCodeNew = dplyr::if_else( stringr::str_detect(catalogNumber, "^RSKM_ENT") &
is.na(institutionCodeNew), "RSKM", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(occurrenceID, "^UVM") &
is.na(institutionCodeNew), "UVM", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(catalogNumber, "^BIOUG") &
is.na(institutionCodeNew), "BIOUG", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(catalogNumber, "^ZMUO") &
is.na(institutionCodeNew), "ZMUO", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(occurrenceID, "data\\.biodiversitydata\\.nl") &
is.na(institutionCodeNew), "Naturalis Biodiversity Center", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(bibliographicCitation, "FinBIF") &
is.na(institutionCodeNew), "FinBIF", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(datasetName, "Swiss National Apoidea Databank") &
is.na(institutionCodeNew), "CSCF", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(otherCatalogNumbers, "VTST_ENT") &
is.na(institutionCodeNew), "VTST", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(otherCatalogNumbers, "ECNRUFC") &
is.na(institutionCodeNew), "ECNRUFC", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(recordedBy, "SPIPOLL") &
is.na(institutionCodeNew), "SPIPOLL", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(datasetName, "Riiklik keskkonnaseire programm") &
is.na(institutionCodeNew), "Riiklik keskkonnaseire programm", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(rightsHolder, "Suffolk Biodiversity Information Service") &
is.na(institutionCodeNew), "Suffolk Biodiversity Information Service", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(rightsHolder, "Staffordshire Ecological Record") &
is.na(institutionCodeNew), "Staffordshire Ecological Record", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(rightsHolder, "NatureSpot") &
is.na(institutionCodeNew), "NatureSpot", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(rightsHolder, "Biological Records Centre") &
is.na(institutionCodeNew), "Biological Records Centre", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(references, "www\\.ebi\\.ac\\.uk/ena") &
is.na(institutionCodeNew), "European Nucleotide Archive", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(occurrenceID, "observation\\.org") &
is.na(institutionCodeNew), "Observation.org", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(eventID, "plutof.ut.ee") &
is.na(institutionCodeNew), "Plotuf", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(catalogNumber, "USNM ENT") &
is.na(institutionCodeNew), "USNM", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(occurrenceID, "boldsystems\\.org") &
is.na(institutionCodeNew), "BOLD", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(datasetName, "Chronicle of Nature") &
is.na(institutionCodeNew), "Chronicle of Nature", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(rightsHolder, "Isle of Wight Local Records Centre") &
is.na(institutionCodeNew), "Isle of Wight Local Records Centre", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(rightsHolder, "Gloucestershire Centre for Environmental Records") &
is.na(institutionCodeNew), "Gloucestershire Centre for Environmental Records", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(datasetName, "Bumble bees collected in a large-scale field experiment in power line clearings, southeast Norway") &
is.na(institutionCodeNew), "Norwegian University of Life Sciences (NMBU)", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(eventID, "WestAfricaBees") &
is.na(institutionCodeNew), "Station d'Ecologie de Lamto", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(eventID, "TaborW") &
is.na(institutionCodeNew), "Vermont Center for Ecostudies", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(datasetName, "Abeilles fichier Nico Schneider") &
is.na(institutionCodeNew), "National Museum of Natural History, Luxembourg", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(occurrenceID, "BSRU") &
is.na(institutionCodeNew), "Chulalongkorn University Natural History Museum (CUNHM)", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(occurrenceID, "MFV:VT|USFWS-RCN|VCE:|USfWS-RCN|MNWR:bombus|^Frey21-") &
is.na(institutionCodeNew), "Vermont Center for Ecostudies", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(catalogNumber, "21BAM") &
is.na(institutionCodeNew), "Vermont Center for Ecostudies", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(occurrenceID, "Par\\.V\\.|Viter\\.|Vit\\.S|^UNCG|Rufford\\.UA|Parkh\\.faun|Medobory\\.data|Ins\\.Khar") &
is.na(institutionCodeNew), "Ukrainian Nature Conservation Group (UNCG)", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(occurrenceID, "Calabuig:Ringsted") &
is.na(institutionCodeNew), "Natural History Museum of Denmark", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(rightsHolder, "Cambridgeshire & Peterborough Environmental Records Centre") &
is.na(institutionCodeNew), "Cambridgeshire & Peterborough Environmental Records Centre", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(references, "natureshare\\.org\\.au") &
is.na(institutionCodeNew), "NatureShare", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(rightsHolder, "Manx Wildlife Trust") &
is.na(institutionCodeNew), "Manx Wildlife Trust", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(rightsHolder, "Ministry of Justice") &
is.na(institutionCodeNew), "Ministry of Justice, UK", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(rightsHolder, "Natural England") &
is.na(institutionCodeNew), "Natural England", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(rightsHolder, "Queensland Government") &
is.na(institutionCodeNew), "Queensland Government", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(rightsHolder, "Royal Horticultural Society") &
is.na(institutionCodeNew), "Royal Horticultural Society", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(rightsHolder, "Sheffield and Rotherham Wildlife Trust") &
is.na(institutionCodeNew), "Sheffield and Rotherham Wildlife Trust", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(rightsHolder, "Prioksko-Terrasnyi Biosphere Reserve") &
is.na(institutionCodeNew), "Prioksko-Terrasnyi Biosphere Reserve", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(occurrenceID, "mohonk:") &
is.na(institutionCodeNew), "Mohonk Preserve", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(occurrenceID, "^LUXNATFUND|^DSS00") &
is.na(institutionCodeNew), "National Museum of Natural History, Luxembourg", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(datasetName, "Collection hymenopteres MNHNL") &
is.na(institutionCodeNew), "National Museum of Natural History, Luxembourg", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(occurrenceID, "IIA-ENT") &
is.na(institutionCodeNew), "INSTITUTO DE INVESTIGACaO AGRONOMICA - IIA", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(occurrenceID, "indiabiodiversity\\.org") &
is.na(institutionCodeNew), "IndiaBiodiversity.org", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(catalogNumber, "^OPI") &
is.na(institutionCodeNew), "South Australia, Department for Environment and Water", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(catalogNumber, "^WFM[0-9]+") &
is.na(institutionCodeNew), "Western Australia, Department of Biodiversity, Conservation and Attractions", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(recordedBy, "LIMOUSIN,BUREAU D'ETUDE DGE|PNR PL|Agniele Touret-Alby, Quentin Rome|AGASSE-YVER Florence, MARLE Micka|AUBERT Matthieu|BESSIERE A\\., BLEOMELEN Alwin \\(PNM\\)|Bottinelli Julien|PERCHE NATURE|BRUGEROLLES Yvan|CENSE Thierry,CENSE Colette|CESARI Lily \\(Naturoptere\\)|CHOREIN Adrien \\(CEN Centre\\)|Cocquempot C\\.|DAMOISEAU Sebastien \\(CERCOPE\\)|GENOUD David|Gosselin M\\.|Grumdi|Jean-Pierre Viallard|Jean-Pierre Carreras|Jean-Loup d'Hondt|Jean-Loup d'Hondt|Jean-Louis PRATZ|Jean-Jacques Laffitte|Jean-Laurent HENTZ|Jean-Francois Campion|Jean-Sebastien Carteron|LE DIVELEC Romain|MAILLIER Sebastien|MARTHA Benoit|PLATEAUX Luc|PRATZ Jean-Louis|Sebastien LAGUET|Sebbbounet|Thierry ROBERT|\\(ECOSPHERE\\)|\\(CEN AQUITAINE\\)") &
is.na(institutionCodeNew), "OFB-CNRS-MNHN", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(datasetID, "E053-2614A8C02B7C|B2C9849D2ACF|2614A8C021C1|2614A8C0FB88|2614A8C0E6FC|2614A8C0CF63|2614A8C05B99|2614A8C00BBE|2614A8C0C021|2614A8C0FC45|2614A8C067D6|2614A8C0753D|2614A8C057EA|5014A8C02001|5014A8C04A0C|2614A8C00722|-E053-|-e053-|F0DFF9845389|8EA38E099656|26033513335E|C9074EB78761|9AC2D5DAB4DF|DF2C4D61871E|E06C2F2DB641|A83B5C8B393F|3D6B50E67F30|6DB84CF2329A|EEE933FAF77E|9776F5D05D87|A174FB52126E|26E5D9FC07CC|1CF99798EDF1|4E1ECB87BC1F|B6A9BC006CB2|6943DF45F77E|E17388AB56E2|B65CCE479F2E|C02F4B1A3FE8") &
is.na(institutionCodeNew), "OFB-CNRS-MNHN", institutionCodeNew),
institutionCodeNew = dplyr::if_else( stringr::str_detect(bibliographicCitation, "Wild bees of Belgium") &
is.na(institutionCodeNew), "Belgian Biodiversity Platform", institutionCodeNew),
# COMBINE with institutionCode
institutionCode = dplyr::if_else(is.na(institutionCode),
institutionCodeNew,
institutionCode)
) %>%
dplyr::select(!institutionCodeNew)
# Combine
data <- data %>%
# Remove the occs from temp and then add them in again
dplyr::filter(!database_id %in% temp$database_id) %>%
dplyr::bind_rows(temp)
#### 1.2 Make edits ####
data <- data %>%
# Make sure USGS is included
dplyr::mutate(institutionCode = dplyr::if_else(
is.na(institutionCode) & stringr::str_detect(id, "USGS_DRO"),
"USGS", institutionCode))
data <- data %>%
# Make some corrections for consistency
dplyr::mutate(institutionCode = dplyr::if_else(
stringr::str_detect(institutionCode, "C\\.A\\. Triplehorn Insect Collection, Ohio State University"),
"C.A. Triplehorn Insect Collection, Ohio State University, Columbus, OH (OSUC)", institutionCode)) %>%
dplyr::mutate(institutionCode = dplyr::if_else(
stringr::str_detect(institutionCode, "Caledonian Conservation"),
"Caledonian Conservation", institutionCode)) %>%
dplyr::mutate(institutionCode = dplyr::if_else(
stringr::str_detect(institutionCode, "^CardObs$"),
"CardObs", institutionCode)) %>%
dplyr::mutate(institutionCode = dplyr::if_else(
stringr::str_detect(institutionCode, "Instituto Nacional de Pesquisas da Amaz"),
"Instituto Nacional de Pesquisas da Amazonia (INPA)", institutionCode)) %>%
dplyr::mutate(institutionCode = dplyr::if_else(
stringr::str_detect(institutionCode, "\\(JBB\\)"),
"Jardin Botanico de Bogota Jose Celestino Mutis (JBB)", institutionCode)) %>%
dplyr::mutate(institutionCode = dplyr::if_else(
stringr::str_detect(institutionCode, "^SDA$"),
"SDA - Secretaria Distrital de Ambiente", institutionCode)) %>%
dplyr::mutate(institutionCode = dplyr::if_else(
stringr::str_detect(institutionCode, "Universidad del Magdalena"),
"Universidad del Magdalena (UniMagdalena)", institutionCode)) %>%
dplyr::mutate(institutionCode = dplyr::if_else(
stringr::str_detect(institutionCode, "Universidad Nacional de Colombia"),
"Universidad Nacional de Colombia (UNAL)", institutionCode)) %>%
dplyr::mutate(institutionCode = dplyr::if_else(
stringr::str_detect(institutionCode, "University of Guelph"),
"University of Guelph", institutionCode)) %>%
dplyr::mutate(institutionCode = dplyr::if_else(
stringr::str_detect(institutionCode, "USDA[ -]ARS"),
"USDA-ARS", institutionCode))
}
#### 2.0 Transformations table ####
# Read the transformations table that has been manually constructed to link the input
# institutionCodes with those that hopefully match the intended institution code and name
instTable <- dplyr::tribble(
~institutionCode_in, ~Code_out, ~Name_out,
"SMNH", "SMNH", "Saitama Museum of Natural History or Schmidt Museum of Natural History, Emporia State University or Saskatchewan Museum of Natural History",
"ABS", "ABS", "Aberystwyth University",
"MNA", "MNA", "Museo Nazionale dell'Antartide",
"FMNH", "FMNH", "Finnish Museum of Natural History",
"BISON", "BISON", "Biodiversity Information Serving Our Nation",
"AUMNH", "AUMNH", "Auburn University Museum of Natural History",
"C.A. Triplehorn Insect Collection, Ohio State University, Columbus, OH (OSUC)", "OSUC", "Ohio State University",
"NTSRV", "NTSRV", "NatureServe Network",
"UI", "UI", "Bureau of Land Management, U.S. Department of the Interior",
"MSM", "MSM", "Arizona Museum of Natural History or University of Puerto Rico",
"AM", "AM", "Australian Museum",
"AMNH", "AMNH", "American Museum of Natural History",
"AMNHTC", "AMNH", "American Museum of Natural History",
"ANSP", "ANSP", "Academy of Natural Sciences",
"ASU", "ASU", "Arizona State University Hasbrouck Insect Collection",
"AUM", "AUM", "Auburn University",
"BBSL", "BBSL", "Bee Biology and Systematics Laboratory, Utah",
"BIOUG", "BIOUG", "Centre for Biodiversity Genomics",
"BLM", "BLM", "Bureau of Land Management",
"BRFC", "BRFC", "Black Rock Forest Collection",
"BYU", "BYU", "Brigham Young University",
"CAES", "CAES", "Connecticut Agricultural Experiment Station",
"CASC", "CAS", "California Academy of Sciences",
"CAS", "CAS", "California Academy of Sciences",
"CHAS", "CHAS", "Chicago Academy of Sciences",
"Cleveland Museum of Natural History, OH (CLEV)", "CLEV", "Cleveland Museum of Natural History, OH",
"CNC", "CNC", "Canadian National Collection of Insects, Arachnids, and Nematodes",
"CSCA", "CSCA", "California State Collection of Arthropods",
"CSIRO", "CSIRO", "Australian National Insect Collection, CSIRO",
"CSU", "CSU", "Colorado State University or University of Central Oklahoma",
"CUIC", "CUIC", "Cornell University Insect Collection",
"DBG", "DBG", "Denver Botanic Gardens",
"DCH", "DCH", "Davidson College",
"DUGWAY", "DUGWAY", "Dugway Proving Ground Natural History",
"ECOSUR", "ECOSUR", "El Colegio de la Frontera Sur",
"EMEC", "EMEC", "Essig Museum, University of California, Berkeley",
"Glacier National Park Collections, West Glacier, MT (GLNP)", "GLNP", "Glacier National Park Collections, West Glacier, MT (GLNP)",
"IBUNAM", "IBUNAM", "Instituto de BiIolog\\u00eda, Universidad Nacional Aut\\u00f3noma de M\\u00e9xico",
"iNaturalist", "iNaturalist", "iNaturalist",
"INBio", "INBio", "Instituo Nacional de Biodiversidad",
"INHS", "INHS", "Illinois Natural History Survey",
"Instituto Nacional de Pesquisas da Amaz\\u00f4nia (INPA)", "INPA", "Instituto Nacional de Pesquisas da Amaz\\u00f4nia",
"KNWR", "KNWR", "DOI/FWS, Kenai National Wildlife Refuge",
"KU", "KU", "The University of Kansas",
"KY", "KY", "University of Kentucky",
"LACM", "LACM", "Los Angeles County Museum of Natural History",
"LCDI", "LCDI", "Luther Entomological Research Collection",
"MCZ", "MCZ", "Harvard University, Museum of Comparative Zoology",
"MEL", "MEL", "Museo Entomologico de Leon",
"MHNG", "MHNG", "Mus\\u00e9um d'histoire naturelle de la Ville de Gen\\u00e8ve",
"MHNN", "MHNN", "Mus\\u00e9um d'histoire naturelle de Neuch\\u00e2tel",
"MISSA", "MISSA", "Mississippi State University",
"MIZA", "MIZA", "Museo del Instituto de Zoologia Agricola Francisco Fernandez Yepez",
"MNHN", "MNHN", "MNHN - Museum national d'Histoire naturelle",
"MSU", "MSU", "Michigan State University Museum",
"Montana State University, Bozeman, MT (MTEC)", "MTEC", "Montana State University, Bozeman, MT",
"MZH", "MZH", "Finnish Museum of Natural History",
"NAU", "NAU", "Northern Arizona University",
"NAUF", "NAUF", "Northern Arizona University, Flagstaff",
"NCSU", "NCSU", "North Carolina State University Insect Collection",
"NHMO", "NHMO", "University of Oslo, Zoological Museum",
"NHMUK", "NHMUK", "Natural History Museum, London",
"NMSU", "NMSU", "New Mexico State University",
"NYBG", "NYBG", "New York Botanical Garden",
"NYSM", "NYSM", "The New York State Museum",
"New Zealand Arthropod Collection", "NZAC", "New Zealand Arthropod Collection",
"NZCS", "NZCS", "University, National Zoological Collection of Suriname",
"OMNH", "OMNH", "Oklahoma Museum of Natural History",
"OSAC", "OSAC", "Oregon State Arthropod Collection",
"PPRI", "PPRI", "ARC-Plant Protection Research Institute, National Collection of Fungi: Culture Collection",
"PSUC", "PSUC", "Frost Entomological Museum, Penn State University",
"PU", "PU", "The Purdue Entomological Research Collection",
"PUCV", "PUCV", "Pontifical Catholic University of Valpara\\u00edso",
"QCAZ", "QCAZ", "Museo de Zoologia, Pontificia Universidad Catolica del Ecuador",
"QM", "QM", "Queensland Museum",
"QVMAG", "QVMAG", "Queen Victoria Museum and Art Gallery",
"RBCM", "RBCM", "Royal British Columbia Museum",
"RLMC", "RLMC", "RL Minckley Insect and Plant Collection",
"RMBL", "RMBL", "Rocky Mountain Biological Laboratory",
"RSKM", "RSKM", "Royal Saskatchewan Museum",
"Royal Saskatchewan Museum", "RSKM", "Royal Saskatchewan Museum",
"RUAC", "RUAC", "Rutgers University Entomological Museum",
"SAM", "SAM", "South Australian Museum",
"SBMN", "SBMNH", "Santa Barbara Museum of Natural History",
"SBMNH", "SBMNH", "Santa Barbara Museum of Natural History Entomology Collection",
"SDMC", "SDMC", "San Diego Natural History Museum",
"SDNHM", "SDNHM", "San Diego Natural History Museum, theNAT",
"SDSU", "SDSU", "San Diego State University Museum of Biodiversity",
"SFU", "SFU", "Simon Fraser University",
"SWRS", "SWRS", "Southwestern Research Station",
"TAMU", "TAMUIC", "Texas A&M University Insect Collection",
"TTU", "TTU", "Texas Tech University",
"UA", "UA", "University of Arizona Insect Collection",
"University of Alberta Museums (UAM)", "UAM", "University of Alberta Museums",
"UAM", "UAM", "University of Alberta Museums or University of Arkansas at Monticello or University of Alaska Museum",
"University of British Columbia", "UBC", "University of British Columbia",
"UCD", "UCD", "University of California, Davis",
"University of Central Florida Collection of Arthopods (UCFC)", "UCFC", "University of Central Florida Collection of Arthopods",
"UCM", "UCM", "University of Colorado Museum",
"UCSB", "UCSB", "University of California Santa Barbara",
"UCSC", "UCSC", "University of California Santa Cruz",
"UD", "UD", "University of Delaware Insect Research Collection",
"UHIM", "UHIM", "The University of Hawaii Insect Museum",
"UMCE", "UMCE", "Universidad Metropolitana de Ciencias de la Educacion",
"UMN EXT", "UMN EXT", "Minnesota Bee Atlas",
"UMNH", "UMNH", "Natural History Museum of Utah",
"Universidad Nacional de Colombia (UNAL)", "UNAL", "Universidad Nacional de Colombia",
"UNHP", "UNHP", "University of New Hampshire",
"UNM", "UNM", "University of New Mexico, Division of Arthropods Museum of Southwestern Biology",
"USDA-ARS", "USDA-ARS", "U.S. National Insects Collection Agricultural Research Service",
"NPA-ARS-USDA", "USDA-ARS", "U.S. National Insects Collection Agricultural Research Service",
"USGS", "USGS", "United States Geological Survey",
"USNM", "USNM", "Smithsonian Institution, National Museum of Natural History",
"UTEP", "UTEP", "University of Texas, El Paso",
"UVM", "UVM", "Zadock Thompson Natural History Collection, University of Vermont",
"UVM Zadock Thompson Zoological Collections", "UVM", "Zadock Thompson Natural History Collection, University of Vermont",
"UWYMED", "UWYMED", "University of Wyoming Dillon Lab Insect Collection",
"Vermont Center for Ecostudies", "VCE", "Vermont Center for Ecostudies",
"VCE", "VCE", "Vermont Center for Ecostudies",
"VCU", "VCU", "Virginia Commonwealth University",
"VPI", "VPI", "Virginia Polytechnic Institute and State University",
"University of Idaho, W.F. Barr Entomological Collection, Moscow, ID (WFBM)", "WFBM", "W.F. Barr Entomological Collection",
"M.T. James Museum, Washington State University, Pullman, WA (WSU)", "WSU", "Washington State University",
"WVW", "WVW", "West Virginia Wesleyan College",
"YPM", "YPM", "Yale Peabody Museum",
"ZFMK", "ZFMK", "Zoologisches Forschungsmuseum Alexander Koenig",
"ZMUC", "ZMUC", "University of Copenhagen, Zoological Museum",
"ZSM", "ZSM", "Zoologische Museum Staatssammlung",
"GMNH", "GMNH", "Georgia Museum of Natural History",
"ABReC", "ABReC", "Argyll Biological Records Centre",
"ACA", "ACA", "Alberta Conservation Association",
"ADT", "ADT", "Antarctic Division",
"AK Entomologie NABU Sachsen", "AK Entomologie", "Arbeitskreis Entomologie im NABU Sachsen",
"AMU", "AMU", "Adam Mickiewicz University in Pozna\\u0144",
"ARC", "ARC", "Agricultural Research Council, National Collection of Insects, South Africa",
"Artenfinder Rheinland-Pfalz", "ARPf", "Artenfinder Rheinland-Pfalz",
"Alberta Environment and Parks", "BDUC", "Alberta Environment and Parks",
"BioDiversity4All", "BioDiversity4All", "BioDiversity4All",
"BioFokus", "BioFokus", "BioFokus",
"Borror Laboratory of Bioacoustics, Ohio State University, Columbus, OH (BLB)", "BLB", "Borror Laboratory of Bioacoustics, Ohio State University, Columbus, OH",
"BNM", "BNM", "Belau National Museum",
"LI", "LI", "Biology Centre of the Upper Austrian State Museum",
"Biodiversity Institute of Ontario", "OAC", "Biodiversity Institute of Ontario",
"UFPB", "UFPB", "Departamento de Sistematica e Ecologia",
"Manx Wildlife Trust", "MWT", "Manx Wildlife Trust",
"AMDC", "AMDC", "AMDC",
"Universidad de los Andes (UniAndes)", "ANDES", "Universidad de los Andes",
"AU", "AU", "AU",
"Bumblebee Conservation Trust", "BCT", "Bumblebee Conservation Trust",
"BIS", "BIS", "BIS",
"BJS", "BJS", "BJS",
"BRERC", "BRERC", "Bristol Regional Environmental Records Centre",
"BROW", "BROW", "The Broward College Insect Collection",
"Buglife", "Buglife", "The Invertebrate Conservation Trust",
"BURKLE", "BURKLE", "BURKLE",
"Caledonian Conservation", "Caledonian Conservation", "Caledonian Conservation",
"Calluna AB", "Calluna AB", "Calluna AB",
"CAWM", "CAWM", "The College of African Wildlife Management",
"Cumbria Biodiversity Data Centre", "CBDC", "Cumbria Biodiversity Data Centre",
"CBM", "CBM", "Natural History Museum and Institute, Chiba or Working Collection M. Baehr, M\\u00fcnchen",
"Pierre Fabre", "CBPF", "Conservatoire Botanique Pierre Fabre",
"Corporaci\\u00f3n Aut\\u00f3noma Regional para la Defensa de la Meseta de Bucaramanga", "CDMB", "CDMB - Corporaci\\u00f3n Aut\\u00f3noma Regional Para la Defensa de la Meseta de Bucaramanga",
"CEDaR", "CEDaR", "CEDaR",
"Federaci\\u00f3n Nacional de Cafeteros - Centro Nacional de Investigaciones de Caf\\u00e9 (\\ CENICAFf)", "\\ CENICAFf", "Centro Nacional de Investigaciones de Caf\\u00e9",
"CENID-COMEF-INIFAP", "CENID-COMEF-INIFAP", "Centro Nacional en Investigaci\\u00f3n Disciplinaria en Conservaci\\u00f3n y Mejoramiento de Ecosistemas Forestales, Instituto Nacional de Investigaciones Forestales, Agr\\u00edcolas y Pecuarias",
"COMFENALCO", "CEPB", "Colecci\\u00f3n entomol\\u00f3gica Piedras Blancas (Comfenalco)",
"CfGA", "CfGA", "Caring For God's Acre",
"CIBNOR", "CIBNOR", "Centro de Investigaciones Biologicas del Noroeste SC",
"Tullie House Museum", "CLE", "Tullie House Museum",
"CLO", "CLO", "CLO",
"Cofnod", "Cofnod", "The Local Environmental Records Centre for North Wales",
"Conservation International", "Conservation International", "Conservation International",
"CORN", "CORN", "CORN",
"IMEDEA", "CSIC-IMEDEA", "Instituto Mediterr\\u00e1neo de Estudios Avanzados (CSIC)",
"DART", "DART", "Dartmouth College",
"Derbyshire Biological Records Centre", "DBRC", "Derbyshire Biological Records Centre",
"DEBU", "DEBU", "Ontario Insect Collection, University of Guelph",
"University of Guelph", "DEBU", "University of Guelph Insect Collection",
"Dorset Environmental Records Centre", "DERC", "Dorset Environmental Records Centre",
"DF", "DF", "Douglas Frew",
"Middlebury College", "DMIC", "The Duncan MacDonald Insect Collection, Middlebury College",
"DMP", "DMP", "DMP",
"DMSA", "DMSA", "Durban Museum",
"Derbyshire Wildlife Trust", "DWTrust", "Derbyshire Wildlife Trust",
"EBDA", "BAH", "Empresa Baiana de Desenvolvimento Agr\\u00edcola",
"Ecocom", "EcoCom", "Ecological Complexity",
"European Distributed Institute of Taxonomy (EDIT)", "EDIT", "European Distributed Institute of Taxonomy",
"EMEC-UTB", "EMEC-UTB", "Pollinator interaction flexibility across scales affects patch colonization and occupancy",
"Espace pour la vie", "EPLV", "Espace pour la vie",
"ETHZ", "ETHZ", "Erdgen\\u00e4ssische Technische Hochschule-Zentrum",
"FCB-UANL", "FCB-UANL", "Facultad de Ciencias Biol\\u00f3gicas de la UANL",
"FC-UNAM", "FC-UNAM", "Facultad De Ciencias, Universidad Nacional Aut\\u00f3noma de M\\u00e9xico",
"FiBL", "FiBL", "Forschungsinstitut f\\u00fcr biologischen Landbau Frick | Research Institute of Organic Agriculture Frick",
"Fife Nature Records Centre", "Fife", "Fife Nature Records Centre",
"Fife", "Fife", "Fife Nature Records Centre",
"Instituto de Investigaci\\u00f3n de Recursos Biol\\u00f3gicos Alexander von Humboldt (IAvH)", "FMB", "Instituto de Investigaci\\u00f3n de Recursos Biol\\u00f3gicos Alexander von Humboldt",
"FMIC", "FMIC", "FMIC",
"FMVZ-UADY", "FMVZ-UADY", "Veterinary Parasitology Laboratory at the Campus of Ciencias Biol\\u00f3gicas y Agropecuarias of Universidad Aut\\u00f3noma of Yucat\\u00e1n",
"Friends of the Earth (England, Wales and Northern Ireland)", "FotE", "Friends of the Earth (England, Wales and Northern Ireland)",
"GEO", "GEO", "Emory University",
"GMNHJ", "GMNHJ", "Gunma Museum of Natural History",
"Greensway AB", "Greensway AB", "Greensway AB",
"GSC", "GSC", "Geological Survey of Canada or Glenville State College",
"G\\u00e4teborgs Stad", "G\\u00e4teborgs Stad", "G\\u00e4teborgs Stad",
"HBRG", "HBRG", "Highland Biological Recording Group",
"HNS", "HNS", "Mus\\u00e9um national d'Histoire naturelle",
"HOC", "HOC", "HOC",
"H\\u00e4rryda kommun", "H\\u00e4rryda kommun", "H\\u00e4rryda kommun",
"HYO", "HYO", "Museum of Nature and Human Activities, Hyogo",
"HZIC", "HZIC", "Herbert Zettel Collection, Vienna, Austria",
"IFIT-CP", "IFIT-CP", "IFIT-CP",
"IGL-UNAM", "IGL-UNAM", "Instituto de Geolog\\u00eda, Universidad Nacional Aut\\u00f3noma de M\\u00e9xico",
"IIZD-UASLP", "IIZD-UASLP", "Instituto de Investigaci\\u00f3n de Zonas Des\\u00e9rticas, Universidad Aut\\u00f3noma de San Luis Potos\\u00ed",
"Instituto para la Investigaci\\u00f3n y la Preservaci\\u00f3n del Patrimonio Cultural y Natural del Valle del Cauca - INCIVA", "INCIVA", "Instituto para la Investigaci\\u00f3n y la Preservaci\\u00f3n del Patrimonio Cultural y Natural del Valle del Cauca - INCIVA",
"INM", "INM", "Ibaraki Nature Museum",
"INTA", "INTA", "Instituto National Tecnolog\\u00eda Agropecuaria, Buenos Aires, Argentina",
"IPMM", "IPMM", "Iwate Prefectural Museum",
"IRNAD", "IRNAD", "INSTITUTO DE INVESTIGACIONES EN RECURSOS NATURALES, AGROECOLOGIA Y DESARROLLO RURAL IRNAD",
"Isagen S.A. E.S.P.", "Isagen S.A. E.S.P.", "Isagen S.A. E.S.P.",
"Jard\\u00edn Bot\\u00e1nico de Bogot\\u00e1 Jos\\u00e9 Celestino Mutis (JBB)", "JBB", "Jard\\u00edn Bot\\u00e1nico Jos\\u00e9 Celestino Mutis",
"JML", "JML", "JML",
"John Muir Trust", "JMTrust", "John Muir Trust",
"JSA", "JSA", "John S Ascher",
"JSANUS", "JSANUS", "JSANUS",
"JSAOBS", "JSAOBS", "John S Ascher",
"J\\u00e4rf\\u00e4lla Kommun", "J\\u00e4rf\\u00e4lla Kommun", "J\\u00e4rf\\u00e4lla Kommun",
"KAP", "KAP", "Kagoshima Prefectural Museum",
"KMM", "KMM", "Collection of Marine Microorganisms",
"Kevin M. O'Neill Private Collection (KMOC)", "KMOC", "Kevin M. O'Neill Private Collection (KMOC)",
"KNPS", "KNPS", "Kootenay Native Plant Society",
"Kristianstads Vattenrike", "Kristianstads Vattenrike", "Kristianstads Vattenrike",
"ELKU", "KYUM", "Kyushu University Museum",
"Lancashire Environment Record Network", "LERN", "Lancashire Environment Record Network",
"LRERC", "LRERC", "Rutland Environmental Records Centre",
"Laboratory of forest Sciences (LSF/UAC)", "LSF", "Laboratory of forest Sciences",
"L\\u00e4nsstyrelsen \\u00d6sterg\\u00e4tland", "L\\u00e4nsstyrelsen \\u00d6sterg\\u00e4tland", "L\\u00e4nsstyrelsen \\u00d6sterg\\u00e4tland",
"L\\u00e4nsstyrelsen \\u00d6rebro", "L\\u00e4nsstyrelsen \\u00d6rebro", "L\\u00e4nsstyrelsen \\u00d6rebro",
"L\\u00e4nsstyrelsen Blekinge", "L\\u00e4nsstyrelsen Blekinge", "L\\u00e4nsstyrelsen Blekinge",
"L\\u00e4nsstyrelsen Dalarna", "L\\u00e4nsstyrelsen Dalarna", "L\\u00e4nsstyrelsen Dalarna",
"L\\u00e4nsstyrelsen Gotland", "L\\u00e4nsstyrelsen Gotland", "L\\u00e4nsstyrelsen Gotland",
"L\\u00e4nsstyrelsen Halland", "L\\u00e4nsstyrelsen Halland", "L\\u00e4nsstyrelsen Halland",
"L\\u00e4nsstyrelsen Kalmar", "L\\u00e4nsstyrelsen Kalmar", "L\\u00e4nsstyrelsen Kalmar",
"L\\u00e4nsstyrelsen Kronoberg", "L\\u00e4nsstyrelsen Kronoberg", "L\\u00e4nsstyrelsen Kronoberg",
"L\\u00e4nsstyrelsen Norrbotten", "L\\u00e4nsstyrelsen Norrbotten", "L\\u00e4nsstyrelsen Norrbotten",
"L\\u00e4nsstyrelsen S\\u00e4dermanland", "L\\u00e4nsstyrelsen S\\u00e4dermanland", "L\\u00e4nsstyrelsen S\\u00e4dermanland",
"L\\u00e4nsstyrelsen Uppsala", "L\\u00e4nsstyrelsen Uppsala", "L\\u00e4nsstyrelsen Uppsala",
"L\\u00e4nsstyrelsen V\\u00e4rmland", "L\\u00e4nsstyrelsen V\\u00e4rmland", "L\\u00e4nsstyrelsen V\\u00e4rmland",
"L\\u00e4nsstyrelsen V\\u00e4sterbotten", "L\\u00e4nsstyrelsen V\\u00e4sterbotten", "L\\u00e4nsstyrelsen V\\u00e4sterbotten",
"L\\u00e4nsstyrelsen V\\u00e4sternorrland", "L\\u00e4nsstyrelsen V\\u00e4sternorrland", "L\\u00e4nsstyrelsen V\\u00e4sternorrland",
"L\\u00e4nsstyrelsen V\\u00e4stmanland", "L\\u00e4nsstyrelsen V\\u00e4stmanland", "L\\u00e4nsstyrelsen V\\u00e4stmanland",
"L\\u00e4nsstyrelsen V\\u00e4stra G\\u00e4taland", "L\\u00e4nsstyrelsen V\\u00e4stra G\\u00e4taland", "L\\u00e4nsstyrelsen V\\u00e4stra G\\u00e4taland",
"LUL", "LUL", "LUL",
"MA", "MA", "MA",
"MAGNT", "MAGNT", "Museum and Art Gallery of the Northern Territory",
"MBB", "MBB", "MBB",
"Missoula Butterfly House and Insectarium (MBHI)", "MBHI", "Missoula Butterfly House and Insectarium (MBHI)",
"MBM-UACAM", "MBM-UACAM", "Museo de Biodiversidad Maya, Universidad Aut\\u00f3noma de Campeche",
"MBRP", "MBRP", "MBRP",
"MCSN", "MCSN", "Museo Civico di Storia Naturale",
"MDEIE", "MDEIE", "Darder Natural History Museum of Banyoles",
"MEM", "MEM", "University of Memphis",
"MfN", "MfN", "Museum f\\u00fcr Naturkunde",
"MHNCM", "MHNCM", "Natural History Museum of the City of Mexico",
"MHNF", "MHNF", "Mus\\u00e9e d'histoire naturelle Fribourg",
"MJIM", "MJIM", "MJIM",
"MNHM", "MNHM", "Naturhistorisches Museum Mainz/Landessammlung f\\u00fcr Naturkunde Rheinland-Pfalz or John May Museum of Natural History",
"MNHNC", "MNHNC", "Museo Nacional de Historia Natural, Cuba",
"MNHNL", "MNHNL", "National Museum of Natural History, Luxembourg",
"MNHW", "MNHW", "MNHW",
"MNVS", "MNVS", "Mus\\u00e9e de la nature du Valais",
"MPEG", "MPEG", "Museu Paraense Emilio Goeldi",
"MST-and-NHMD", "MST-and-NHMD", "Natural History Museum of Denmark",
"MVY", "MVY", "MVY",
"MZL", "MZL", "Mus\\u00e9e cantonal de zoologie, Lausanne",
"North Ayrshire Countryside Ranger Service", "NACRS", "North Ayrshire Countryside Ranger Service",
"Natagriwal asbl", "Natagriwal asbl", "Natagriwal asbl",
"Natural England", "Natural England", "Natural England",
"naturgucker", "naturgucker", "naturgucker",
"Natuurpunt", "Natuurpunt", "Natuurpunt",
"NBDC", "NBDC", "The National Biodiversity Data Centre, Ireland",
"NBIS", "NBIS", "Norfolk Biodiversity Information Service",
"NDBC", "NDBC", "National Biodiversity Data Centre",
"NE", "NE", "University of New England",
"NEMU", "NEMU", "Newark Museum",
"NHMD", "NHMD", "Natural History Museum of Denmark",
"NIEK", "NIEK", "NIEK",
"NINA", "NINA", "Norwegian Institute for Nature Research",
"NMB", "NMB", "Naturhistorisches Museum Basel",
"NMBE", "NMBE", "Naturhistorisches Museum der Burgergemeinde Bern",
"NMBU:MINA", "NMBU:MINA", "Norwegian University of Life Sciences and Museo Naturalistico Francesco Min\\u00e0 Palumbo",
"NMDG", "NMDG", "Nathaniel Green Research Collection",
"NMLU", "NMLU", "Natur-Museum Luzern",
"NMMNHS", "NMMNHS", "New Mexico Museum of Natural History and Science",
"NMOK", "NMOK", "Naturkundemuseum im Ottoneum of Kassel",
"NMOL", "NMOL", "Naturmuseum Olten",
"NMR", "NMR", "Herbarium - Semyung University",
"NMSA", "NMSA", "KwaZulu-Natal Museum",
"NMSG", "NMSG", "Naturmuseum St. Gallen",
"NMSH", "NMSH", "Museum zu Allerheiligen Schaffhausen",
"NMSO", "NMSO", "Naturmuseum Solothurn",
"NMTG", "NMTG", "Naturmuseum Thurgau",
"NMV", "NMV", "Museum of Victoria",
"NMWIN", "NMWIN", "Naturmuseum Winterthur",
"NRM", "NRM", "Swedish Museum of Natural History - Zoological Collections",
"NRW", "NRW", "NRW Regional Data: North Wales",
"Natural Resources Wales", "NRWales", "Natural Resources Wales",
"NSBC", "NSBC", "North Sea Bird Club",
"NSMK", "NSMK", "National Science Museum",
"NSMT", "NSMT", "National Museum of Nature and Science",
"NSUR", "NSUR", "Naturkundliche Sammlung Uri",
"NSW Dept of Planning, Industry and Environment", "NSW DPIE", "NSW Dept of Planning, Industry and Environment",
"NT", "NT", "Northern Territory Department of Land Resources Management or Department of Environment, Parks and Water Security",
"NTNU-VM", "NTNU-VM", "NTNU Museum of Natural History and Archaeology",
"NTS", "NTS", "Nevada Operations Office, U.S. Department of Energy",
"Nybro kommun", "Nybro kommun", "Nybro kommun",
"OHBR", "OHBR", "Ontario Hydro",
"PCYU", "PCYU", "The Packer Collection at York University",
"PMNH", "PMNH", "Pakistan Museum of Natural History",
"Pro Natura", "Pro Natura", "Pro Natura",
"PRUN", "PRUN", "PRUN",
"PUCRS", "PUCRS", "Pontif\\u00edcia Universidade Cat\\u00f3lica do Rio Grande do Sul",
"RBRC", "RBRC", "USDA Regional Biomass Research Centers",
"Record, the Biodiversity Information System for Cheshire, Halton, Warrington and the Wirral", "RECORD", "The Biodiversity Information System for Cheshire, Halton, Warrington and the Wirral",
"RESEARCH", "RESEARCH", "RESEARCH",
"RHS", "RHS", "Plant Pathology, The Royal Horticultural Society",
"RSPB", "RSPB", "The Royal Society for the Protection of Birds",
"SAMA", "SAMA", "South Australian Museum",
"SDA - Secretar\\u00eda Distrital de Ambiente", "SDA", "Secretar\\u00eda Distrital de Ambiente",
"SEDN", "SEDN", "Shropshire Ecological Data Network",
"SEHU", "SEHU", "Laboratory of Systematic Entomology, The Hokkaido University Museum, Hokkaido University, Sapporo, Japan",
"SEWBReC", "SEWBReC", "South East Wales Biodiversity Records Centre",
"Shire Group of Internal Drainage Boards", "SGIDB", "Shire Group of Internal Drainage Boards",
"SLIC", "SLIC", "SLIC",
"SLU Artdatabanken", "SLU Artdatabanken", "SLU Artdatabanken",
"Scottish Natural Heritage", "SNH", "Scottish Natural Heritage",
"Staffordshire Ecological Record", "StaffER", "Staffordshire Ecological Record",
"SWT", "SWT", "Scottish Wildlife Trust",
"SxBRC", "SxBRC", "Sussex Biodiversity Record Centre",
"TAAM", "TAAM", "Institute of Agricultural and Environmental Sciences of the Estonian University of Life Sciences",
"TAM", "TAM", "Estonian Museum of Natural History",
"TCNS", "TCNS", "Toyota city nature sanctuary",
"TMAG", "TMAG", "The Tasmanian Museum and Art Gallery",
"TOYA", "TOYA", "Toyama Science Museum",
"Trafikverket", "Trafikverket", "Trafikverket",
"Royal Ontario Museum", "TRTC", "Royal Ontario Museum",
"UTSC", "TRTS", "Scarborough College, University of Toronto",
"Tohoku university and Yamagata university", "TUSG", "Tohoku University and Yamagata university",
"TWIC", "TWIC", "Twickenham Girls' School",
"UAAM", "UAAM", "The Arthropod Museum, University of Arkansas",
"UACJ", "UACJ", "Universidad Aut\\u00f3noma de Ciudad Ju\\u00e1rez",
"UBCZ", "UBCZ", "University of British Columbia - Spencer Entomological Collection",
"UCH", "UCH", "Universidad Aut\\u00f3noma de Chiriqu\\u00ed",
"University of Colorado Museum of Natural History, Boulder, CO (UCMC)", "UCMC", "University of Colorado Museum of Natural History, Boulder, CO (UCMC)",
"UCMS", "UCMS", "University of Connecticut Biodiversity Research Collections",
"UCRC", "UCRC", "University of California, Riverside",
"UCSD", "UCSD", "University of California San Diego",
"UDLAP", "UDLAP", "The Universidad de las Americas-Puebla",
"UFCG", "UFCG", "Universidade Federal de Campina Grande",
"UFMS", "UFMS", "Universidade Federal de Mato Grosso do Sul",
"UFPR", "UFPR", "Universidade Federal do Paran\\u00e1",
"Ugent", "Ugent", "Ghent University",
"UIC", "UIC", "University of Illinois Chicago",
"nef", "UiO", "University of Oslo",
"UiT", "UiT", "UiT The Arctic University of Norway",
"ULB", "ULB", "Universit\\u00e9 Libre de Bruxelles",
"UMH", "UMH", "Universidad Miguel Hern\\u00e1ndez",
"UMIC", "UMIC", "University of Mississippi",
"UMMZ", "UMMZ", "University of Michigan, Ann Arbor",
"UMS PatriNat", "UMS PatriNat", "UMS PatriNat",
"UNHC", "UNHC", "UNHC",
"University of Northern Iowa", "UNI", "University of Northern Iowa",
"Universidad de la Amazonia (Uniamazonia)", "Uniamazonia", "Universidad de la Amazonia",
"UOG", "UOG", "University of Guelph",
"UPRM", "UPRM", "University of Puerto Rico at Mayagueez, Rhizobium Culture Collection",
"UQUIND\\u00e5O", "UQUIND\\u00e5O", "UQUIND\\u00e5O",
"University of Rochester", "UR", "University of Rochester",
"USP", "USP", "Universidad San Pablo-CEU",
"USP-RP", "USP-RP", "University of S\\u00e3o Paulo Ribeir\\u00e3o Preto",
"UTE", "UTE", "University of Tartu Natural History Museum",
"UTIC", "UTIC", "University of Texas, Biodiversity Center, Entomology Collection",
"Ville de Trois-Rivi\\u00e8res", "VdTR", "Ville de Trois-Rivi\\u00e8res",
"VTST", "VTST", "VTST",
"WASH", "WASH", "Washburn University",
"WCS Colombia", "WCSC", "Wildlife Conservation Society (WCS Colombia)",
"Wildlife Conservation Society (WCS Colombia)", "WCSC", "Wildlife Conservation Society (WCS Colombia)",
"The Wildlife Information Centre", "WildIC", "The Wildlife Information Centre",
"Woodmeadow Trust", "WoodT", "Woodmeadow Trust",
"WPU", "WPU", "William Paterson University",
"WSDA", "WSDA", "Washington State Department of Agriculture",
"WSL", "WSL", "Swiss Federal Institute for Forest, Snow and Landscape Research",
"WUR-Alterra", "WUR", "Wageningen Universiy and Research",
"WWBIC", "WWBIC", "West Wales Biodiversity Information Centre",
"YPYM", "YPYM", "Yamaguchi Prefectural Museum",
"YorkU", "YUTO", "York University",
"YUTO", "YUTO", "York University",
"YWT", "YWT", "Yorkshire Wildlife Trust",
"ZMBN", "ZMBN", "Zoological Museum, University of Bergen, Norway",
"ZMZ", "ZMZ", "Zoologisches Museum Z\\u00fcrich",
"ZS", "ZS", "ZS",
"USFQ", "ZSFQ", "Museo de Zoolog\\u00eda, Universidad San Francisco de Quito",
"MMWVU", "MMWVU", "Mathew McKinney\\u00eds Private Collection",
"891.780.111-8", "UNIMAG", "Universidad del Magdalena",
NA, NA, "UNKNOWN",
"Melica", "Melica", "Melica",
"JMSC", "JMSC", "John M. Snider",
"UMA", "UMA", "University of Massachusetts, Museum of Zoology",
"PGC", "PGC", "The Petersburg Garden Club",
"TAMUIC", "TAMUIC", "Texas A&M University Insect Collection",
"WSU", "WSU", "Washington State University",
"Atlas of Living Australia", "ALA", "Atlas of Living Australia",
"North Sydney Council", "NSydC", "North Sydney Council",
"BDRS", "BDRS", "BDRS",
"ANIC", "ANIC", "Australian National Insect Collection",
"UFMG", "UFMG", "Federal University of Minas Gerais",
"FSCA", "FSCA", "Florida State Collection of Arthropods, The Museum of Entomology",
"DAR", "DAR", "Orange Agricultural Institute",
"TKIPM", "TKIPM", "TKIPM",
"Pontificia Universidad Javeriana (PUJ)", "PUJ", "Pontificia Universidad Javeriana",
"ICMI", "Insect collection of Itami City Museum of Insects", "ICMI",
"Sweco", "Sweco", "Sweco",
"CTALA_LB", "CTALA_LB", "Ministerio del Medio Ambiente de Chile",
"L\\u00e4nsstyrelsen J\\u00e4nk\\u00e4ping", "L\\u00e4nsstyrelsen J\\u00e4nk\\u00e4ping", "L\\u00e4nsstyrelsen J\\u00e4nk\\u00e4ping",
"Svenska Botaniska F\\u00e4reningen (SBF)", "SBF", "Svenska Botaniska F\\u00e4reningen",
"Falk\\u00e4pings kommun", "Falk\\u00e4pings kommun", "Falk\\u00e4pings kommun",
"Link\\u00e4pings kommun", "Link\\u00e4pings kommun", "Link\\u00e4pings kommun",
"L\\u00e4nsstyrelsen Stockholm", "L\\u00e4nsstyrelsen Stockholm", "L\\u00e4nsstyrelsen Stockholm",
"G\\u00e4teborgs naturhistoriska museum (GNM)", "GNM", "G\\u00e4teborgs naturhistoriska museum",
"IAS (invasivaarter.nu)", "IAS.nu", "invasivaarter.nu",
"L\\u00e4nsstyrelsen J\\u00e4mtland", "L\\u00e4nsstyrelsen J\\u00e4mtland", "L\\u00e4nsstyrelsen J\\u00e4mtland",
"MCNB", "MCNB", "Museu de Ci\\u00e8ncies Naturals de Barcelona",
"East Ayrshire Leisure Countryside Ranger Service", "EALCRS", "East Ayrshire Leisure Countryside Ranger Service",
"CBG", "CANB", "Australian National Herbarium",
"University of Mons", "UMons", "University of Mons",
"BDBCV", "BDBCV", "Biodiversity Data Bank of the Valencian Community",
"MSB", "MSB", "Museum of Southwestern Biology",
"Universidad del Magdalena (UniMagdalena)", "UNIMAG", "Universidad del Magdalena",
"Agropecuaria Aliar S.A.", "Aliar", "Agropecuaria Aliar S.A.",
"MnhnL", "MNHNL", "National Museum of Natural History, Luxembourg",
"IB FRC Komi SC UB RAS", "IB FRC Komi SC UB RAS", "Institute of Biology of Komi Science Centre of the Ural Branch of the Russian Academy of Sciences",
"RCM", "RCM", "RCM",
"NHMW", "NHMW", "Naturhistorisches Museum Wien",
"UAc", "UAC", "University of Calgary",
"Universidad Pedag\\u00f3gica y Tecnol\\u00f3gica de Colombia (UPTC)", "UPTC", "Universidad Pedog\\u00f3gica y Tecnol\\u00f3gica de Colombia",
"Corporaci\\u00f3n Colombiana de Investigaci\\u00f3n Agropecuaria - AGROSAVIA (Agrosavia)", "Agrosavia", "Corporaci\\u00f3n Colombiana de Investigaci\\u00f3n Agropecuaria - AGROSAVIA",
"Aguas de Bogot\\u00e1 S.A. E.S.P. (AB)", "SAESP", "Aguas de Bogot\\u00e1 SA ESP",
"UNICAL", "UNICAL", "Universit\\u00e0 Della Calabria",
"MoJ", "MoJ", "MoJ",
"AKPM", "AKPM", "Herbarium - Akita Prefectural Museum",
"KCMN", "KCMN", "Kaizuka City Museum of Natural History",
"Dalarnas Botaniska S\\u00e4llskap (DABS)", "DABS", "Dalarnas Botaniska S\\u00e4llskap",
"SBP", "SBP", "SBP",
"DRIB", "DRIB", "DRIB",
"Museo Javeriano de Historia Natural, Pontificia Universidad Javeriana", "MPUJ", "Museo Javeriano de Historia Natural, Pontificia Universidad Javeriana",
"ULL", "ULL", "ULL",
"ARM", "ARM", "County Museum",
"KCMH", "KCMH", "Kushiro City Museum",
"FACT(TFRI)", "TFRI", "Taiwan Forestry Research Institute",
"CardObs", "CardObs", "CardObs",
"GNM", "GNM", "Gothenburg Museum of Natural History (Goteborgs Naturhistoriska Museum)",
"UIB", "UIB", "University of Bergen",
"NCMG", "NCMG", "Nottingham City Museums & Galleries",
"The Conservation Volunteers Scotland", "CVS", "The Conservation Volunteers Scotland",
"KOM", "KOM", "Komatsu City Museum",
"ODDB-0NG/Direction de la Biodiversit\\u00e9", "ODDB ONG", "Organisation pour le D\\u00e9veloppement Durable et la Biodiversit\\u00e9",
"NMNL", "NMNL", "Museum De Bastei",
"CELP", "CELP", "Catalina Environmental Leadership Program",
"UACH", "UACH", "Universidad Autonoma Chapingo",
"SAG", "SAG", "Sammlung von Algenkulturen at Universitat Gottingen",
"Universidad de La Salle (La Salle)", "BOG", "Universidad de La Salle",
"InDRE-SSA", "INDRE", "Institute for Epidemiological Diagnosis and References",
"BGBM", "BGBM", "Botanic Garden and Botanical Museum Berlin",
"Anymals.org", "Anymals.org", "Anymals.org",
"CRQ - Corporaci\\u00f3n Aut\\u00f3noma Regional del Quind\\u00edo (CRQ)", "CRQ", "Corporaci\\u00f3n Aut\\u00f3noma Regional del Quind\\u00edo",
"Sk\\u00e4vde kommun", "Sk\\u00e4vde kommun", "Sk\\u00e4vde kommun",
"Universidade Federal do Paran\\u00e1 (UFPR)", "UPCB", "Universidade Federal do Paran\\u00e1",
"Universidad Tecnol\\u00f3gica del Choc\\u00f3 (UTCH)", "CHOCO", "Universidad Tecnol\\u00f3gica del Choc\\u00f3",
"DIBA", "DIBA", "DIBA",
"GUAM", "GUAM", "University of Guam, UOG Station",
"MBA", "MBA", "Environmental Protection Agency",
"CardObs - UMS PatriNat", "UMS PatriNat", "UMS PatriNat",
"ZMUO", "University of Oulu Zoological Museum", "ZMUO",
"DFFW2018", "DFFW2018", "DFFW2018",
"L\\u00e4nsstyrelsen G\\u00e4vleborg", "L\\u00e4nsstyrelsen G\\u00e4vleborg", "L\\u00e4nsstyrelsen G\\u00e4vleborg",
"UCLA", "UCLA", "University of California Los Angeles",
"Finsp\\u00e5ngs kommun", "Finsp\\u00e5ngs kommun", "Finsp\\u00e5ngs kommun",
"L\\u00e4nsstyrelsen Sk\\u00e5ne", "L\\u00e4nsstyrelsen Sk\\u00e5ne", "L\\u00e4nsstyrelsen Sk\\u00e5ne",
"BMEC", "UCLA", "University of California Los Angeles",
"PNMH", "PNMH", "Palestine Museum of Natural History",
"NIBIO", "NIBIO", "Norwegian Institute of Bioeconomy Research",
"UVG", "UVG", "Universidad del Valle",
"Marks kommun", "Marks kommun", "Marks kommun",
"Florav\\u00e4ktarna", "Florav\\u00e4ktarna", "Florav\\u00e4ktarna",
"Bor\\u00e5s Stad", "Bor\\u00e5s Stad", "Bor\\u00e5s Stad",
"OAFS", "OAFS", "OAFS",
"CU", "UCM", "University of Colorado Museum of Natural History",
"Stockholms Stad", "Stockholms Stad", "Stockholms Stad",
"Svenljunga kommun", "Svenljunga kommun", "Svenljunga kommun",
"PSU", "PSUC", "Frost Entomological Museum, Penn State University",
"WAM", "WAM", "Western Australian Museum",
"PaDIL", "PaDIL", "Pests and Diseases Image Library",
"Chulalongkorn University Natural History Museum (CUNHM)", "CUNHM", "Chulalongkorn University Natural History Museum",
"Flickr", "Flickr", "Flickr",
"QuestaGame", "QuestaGame", "QuestaGame",
"bowerbird", "bowerbird", "bowerbird",
"South Australia, Department for Environment and Water", "DEW_SA", "South Australia, Department for Environment and Water",
"NatureShare", "NatureShare", "NatureShare",
"Wildlife Watch NSC", "WWNSC", "Wildlife Watch NSC",
"European Nucleotide Archive", "ENA", "European Nucleotide Archive",
"BOLD", "BOLD", "Barcode of Life Data Systems",
"Biological Records Centre", "BRC", "Biological Records Centre",
"Observation.org", "Observation.org", "Observation.org",
"FinBIF", "FinBIF", "Finnish Biodiversity Information Facility",
"Ukrainian Nature Conservation Group (UNCG)", "UNCG", "Ukrainian Nature Conservation Group",
"Station d'Ecologie de Lamto", "SEL", "Station d'Ecologie de Lamto",
"OFB-CNRS-MNHN", "UMS PatriNat", "UMS PatriNat",
"Riiklik keskkonnaseire programm", "Riiklik keskkonnaseire programm", "Riiklik keskkonnaseire programm",
"Plotuf", "Plotuf", "Plotuf",
"Suffolk Biodiversity Information Service", "SBIS", "Suffolk Biodiversity Information Service",
"SPIPOLL", "SPIPOLL", "Suivi Photographique des Insectes POLLinisateurs",
"National Museum of Natural History, Luxembourg", "MNHNL", "National Museum of Natural History, Luxembourg",
"CSCF", "CSCF", "Centre Suisse de Cartographie de la Faune",
"Royal Horticultural Society", "RHS", "Royal Horticultural Society",
"Belgian Biodiversity Platform", "BBP", "Belgian Biodiversity Platform",
"NatureSpot", "NatureSpot", "NatureSpot",
"Naturalis Biodiversity Center", "Naturalis", "Naturalis Biodiversity Center",
"Natural History Museum of Denmark", "NHMD", "Natural History Museum of Denmark",
"WildNet", "WildNet", "WildNet",
"Chronicle of Nature", "Chronicle of Nature", "Chronicle of Nature",
"ECNRUFC", "ECNRUFC", "ECNRUFC",
"Queensland Government", "QLDGov", "Queensland Government",
"Prioksko-Terrasnyi Biosphere Reserve", "PTBR", "Prioksko-Terrasnyi Biosphere Reserve",
"Norwegian University of Life Sciences (NMBU)", "NMBU", "Norwegian University of Life Sciences (NMBU)",
"Ministry of Justice, UK", "MoJUK", "Ministry of Justice, UK",
"INSTITUTO DE INVESTIGA\\u00c3\\u00e1\\u00c3fO AGRON\\u00c3\\u201cMICA - IIA", "IIA", "Instituto de Investiga\\u00e7\\u00e3o Agron\\u00f3mica",
"Gloucestershire Centre for Environmental Records", "GCER", "Gloucestershire Centre for Environmental Records",
"IndiaBiodiversity.org", "IndiaBiodiversity.org", "IndiaBiodiversity.org",
"Sheffield and Rotherham Wildlife Trust", "SRWT", "Sheffield and Rotherham Wildlife Trust",
"Isle of Wight Local Records Centre", "IWLRC", "Isle of Wight Local Records Centre",
"WINC", "WINC", "Waite Insect & Nematode Collection",
"Western Australia, Department of Biodiversity, Conservation and Attractions", "WA_DBCA", "Western Australia, Department of Biodiversity, Conservation and Attractions",
"Mohonk Preserve", "DSRC", "Mohonk Preserve",
"Cambridgeshire & Peterborough Environmental Records Centre", "CPERC", "Cambridgeshire & Peterborough Environmental Records Centre"
)
#### 3.0 Table ####
##### 3.1 occCount ####
# Get a count of the number of occurrences per institutionCode
occCount <- data %>%
# Group by institutionCode
dplyr::group_by(institutionCode) %>%
# Get a tally of occurrences for each institutionCode
dplyr::add_tally() %>%
# Select only institutionCode and the tally and then keep one of each
dplyr::select(c(institutionCode, n)) %>%
dplyr::distinct(institutionCode, .keep_all = TRUE) %>%
# Rename the count column
dplyr::rename("occurrenceCount" = n)
##### 3.2 spCount ####
# Get a count of the number of species per institutionCode
spCount <- data %>%
# Keep only distinct institutionCode, scientificName combinations
dplyr::distinct(institutionCode, scientificName,.keep_all = TRUE) %>%
# Group by institutionCode
dplyr::group_by(institutionCode) %>%
# Get a tally of occurrences for each institutionCode
dplyr::add_tally() %>%
# Select only institutionCode and the tally and then keep one of each
dplyr::select(c(institutionCode, n)) %>%
dplyr::distinct(institutionCode, .keep_all = TRUE) %>%
# Rename the count column
dplyr::rename("speciesCount" = n)
##### 3.3 Merge ####
counts <- occCount %>%
dplyr::left_join(spCount, by = "institutionCode") %>%
dplyr::arrange(occurrenceCount)
#### 4.0 Save and return ####
# If user provided an outPath then save the file
if(!is.null(outPath)){
readr::write_excel_csv(counts,
paste(outPath, fileName, sep = "/"))
}
return(counts)
} # END dataProvTables
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/dataProvTables.R
|
##### 2.3 dataReader ####
#' @importFrom dplyr %>%
# Read in occurence data with the correct format to be merged
dataReader <- function(path_i, home_path){
# locally bind variables to the function
dplyr <- mgsub <- setNames <- . <- family <- day <- NULL
requireNamespace("dplyr", "mgsub")
#Set up bee family list
Bee_Families <- c("Andrenidae","Apidae", "Colletidae","Halictidae","Megachilidae","Melittidae",
"Stenotritidae","andrenidae","apidae", "colletidae","halictidae","megachilidae",
"melittidae","stenotritidae") # Find the paths
occ_paths <- repoFinder(path = path_i)
# Get the column types
ColTypes <- ColTypeR()
# Get the columns to keep
ColsToKeep <- names(ColTypes$cols)
# Make an internal copy of the template for use in the loop as the template tibble
data_template <- matrix(ncol = length(BeeBDC::ColTypeR()[[1]] %>% names()), nrow = 0) %>% as.data.frame() %>%
setNames(BeeBDC::ColTypeR()[[1]] %>% names()) %>% dplyr::tibble() %>%
dplyr::mutate(dplyr::across(dplyr::everything(), as.character))
#### ALA data ####
if(grepl("ALA_data", names(path_i)) == "TRUE"){
# Import these data
data_i <- readr::read_csv(path_i, col_names = TRUE,
# read in all columns as character for now
col_types = readr::cols(.default = readr::col_character()),
name_repair = "minimal") %>%
# Suppress warnings from read_csv
suppressWarnings(., classes = "warning") %>%
# Select only the unique columns
dplyr::select(unique(colnames(.), fromLast = TRUE))
# Change column names to match other datasets
# Remove dcterms: prefixes from some column names
colnames(data_i) <- mgsub::mgsub(colnames(data_i),
c("dcterms:"),
c(""))
# Filter the columns to only those that we want to select, based on the ColsToKeep vector
# that we created at the top of the R-script
if("recordID" %in% colnames(data_i)){
data_i <- data_i %>%
# The dataframe to match with
dplyr::rename("id" = "recordID")
}
data_i <- data_i %>%
# select columns that match the following string
dplyr::select( dplyr::matches(
# Use the carrot "^" to signify start of string and dollar sign "$" to signify end of
# string. Effectivly, this will only return an exact match.
paste("^",ColsToKeep,"$",sep="") ))
# Define the column types to match our standard
data_i <- data_i %>% # readr::type_convert(data_i, col_types = cols(.default = "c")) %>%
# Keep only the columns defined in ColTypeR
dplyr::select(tidyselect::any_of(c(ColsToKeep))) %>%
# Select only Bee families (in case this is needed, e.g. if you downloaded "Apoidea")
dplyr::filter(family %in% c(Bee_Families, "", "NA") )
} # END ALA data IF statement
#### GBIF data ####
if(grepl("GBIF_data", names(path_i)) == "TRUE"){
# Read in each file and then merge together
data_i <- readr::read_tsv(path_i,
quote = "", col_names = TRUE,
col_types = readr::cols(.default = readr::col_character())) %>%
# Supress warnings from read_tsv
suppressWarnings(., classes = "warning") %>%
# Include all columns from original template file
dplyr::bind_rows(., data_template) %>%
# Keep only the columns defined in ColTypeR
dplyr::select(tidyselect::any_of(c(ColsToKeep)))
} # END GBIF data IF statement
#### iDigBio data ####
if(grepl("iDigBio_data", names(path_i)) == "TRUE"){ # Start iDigBio IF statement
# Import these data
data_i <- readr::read_csv(path_i, col_names = TRUE,
# read in all columns as character for now
col_types = readr::cols(.default = readr::col_character()),
# Do not keep the some columns
col_select = !c("abcd:typifiedName", "aec:associatedTaxa",
"ala:photographer","ala:species","ala:subfamily",
"ala:subspecies","ala:superfamily","chrono:ChronometricAge",
"dc:language","dc:rights","dc:type",
"dcterms:accessRights","dcterms:bibliographicCitation","dcterms:language",
"dcterms:license","dcterms:modified","dcterms:references",
"dcterms:rights","dcterms:rightsHolder","dcterms:source",
"dcterms:type","obis:ExtendedMeasurementOrFact",
"symbiota:recordEnteredBy","symbiota:recordID",
"symbiota:verbatimScientificName",
"taxonRankID","zan:ChronometricDate")) %>%
# Supress warnings from read_csv
suppressWarnings(., classes = "warning")
# Change column names to match other datasets
# Remove dwc. (darwin core), idigbio. and gbif. prefixes from column names
colnames(data_i) <- mgsub::mgsub(colnames(data_i),
c("dwc.", "idigbio.", "gbif.", "aec:"),
c("", "","",""))
# Filter the columns to only those that we want to select, based on the ColsToKeep vector
# that we created at the top of the R-script
data_i <- data_i %>% # The dataframe to match with
# select columns that match the following string
dplyr::select( dplyr::matches(
# Use the carrot "^" to signify start of string and dollar sign "$" to signify end of
# string. Effectivly, this will only return an exact match.
paste("^",ColsToKeep,"$",sep="") )) %>%
# Remove day ranges that will stop occurrences from being read in.
dplyr::mutate(day = stringr::str_replace(day, pattern = " -.*|/.*|-.*", replacement = "" ))
# Define the column types to match our standard
data_i <- data_i %>%
# Select only Bee families (in case this is needed, e.g. if you downloaded "Apoidea")
dplyr::filter(family %in% c(Bee_Families, "", "NA", NA) ) %>%
# Keep only the columns defined in ColTypeR
dplyr::select(tidyselect::any_of(c(ColsToKeep))) # %>%
# Define the column types to match our standard
# readr::type_convert(col_types = ColTypes)
} # End iDigBio IF statement
#### SCAN data ####
if(grepl("SCAN_data", names(path_i)) == "TRUE"){ # Start SCAN IF statement
data_i <- readr::read_csv(path_i, col_names = TRUE,
# read in all columns as character for now
col_types = readr::cols(.default = readr::col_character())) %>%
# Supress warnings from read_csv
suppressWarnings(., classes = "warning") %>%
# Filter the columns to only those that we want to select, based on the ColsToKeep vector
# that we created at the top of the R-script
dplyr::rename("fieldNotes" = "occurrenceRemarks") %>%
# select columns that match the following string
dplyr::select( tidyselect::any_of(ColsToKeep)) %>%
# Select only Bee families (in case )
dplyr::filter(family %in% c(Bee_Families, "", "NA") ) %>%
# Keep only the columns defined in ColTypeR
dplyr::select(tidyselect::any_of(c(ColsToKeep))) # %>%
# Define the column types to match our standard
# readr::type_convert(col_types = ColTypes,
# guess_integer = TRUE)
} # END SCAN IF statement
return(data_i)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/dataReader.R
|
# This function was created by James Dorey on the 26th of May 2022. It will attempt to find dates
# that dont occur in the EventDate column and restore them to avoid losing those occurrences in
# filtering.
# For questions, ask James Dorey at jbdorey[at]me.com
#' Find dates in other columns
#'
#' A function made to search other columns for dates and add them to the eventDate column.
#' The function searches the columns locality, fieldNotes, locationRemarks, and verbatimEventDate
#' for the relevant information.
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param maxYear Numeric. The maximum year considered reasonable to find.
#' Default = lubridate::year(Sys.Date()).
#' @param minYear Numeric. The minimum year considered reasonable to find. Default = 1700.
#'
#' @importFrom stats complete.cases setNames
#'
#' @return The function results in the input occurrence data with but with updated eventDate, year,
#' month, and day columns for occurrences where these data were a) missing and b) located in one of the
#' searched columns.
#'
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#' # Using the example dataset, you may not find any missing eventDates are rescued (dependent on
#' # which version of the example dataset the user inputs.
#' beesRaw_out <- dateFindR(data = beesRaw,
#' # Years above this are removed (from the recovered dates only)
#' maxYear = lubridate::year(Sys.Date()),
#' # Years below this are removed (from the recovered dates only)
#' minYear = 1700)
dateFindR <- function(data = NULL,
maxYear = lubridate::year(Sys.Date()),
minYear = 1700) {
# locally bind variables to the function
eventDate<-database_id<-.<-verbatimEventDate<-fieldNotes<-locationRemarks<-ymd_vEV<-
ymd_fieldNotes<-ymd_locationRemarks<-locality<-dmy_vEV<-dmy_locality<-dmy_fieldNotes<-
dmy_locationRemarks<-mdy_vEV<-mdy_locality<-mdy_fieldNotes<-mdy_locationRemarks<-my_vEV<-
my_locality<-my_fieldNotes<-my_locationRemarks<-amb_vEV<-amb_locality<-amb_fieldNotes<-
amb_locationRemarks<-year <- endTime <- startTime <- originalDateCount <-
eventDate_in <- day <- NULL
# load required packages
requireNamespace("dplyr")
requireNamespace("lubridate")
requireNamespace("mgsub")
timeStart <- Sys.time()
#### 0.0 prep ####
writeLines(" - Preparing data...")
# Get a count of how many eventDate rows are full
originalDateCount <- sum(complete.cases(data$eventDate))
# Create a new running dataset
noDATEa <- data %>%
# Save the original eventDate column
dplyr::mutate(eventDate_in = eventDate,
.before = eventDate) %>%
dplyr::mutate(eventDate = eventDate %>%
lubridate::parse_date_time(eventDate_in,
orders = c("ymd", "ymdHMS","dmy","mdy"),
truncated = 5,
quiet = TRUE,
tz = "UTC",
locale = Sys.getlocale("LC_TIME"))) %>%
dplyr::mutate(eventDate = dplyr::if_else(is.na(eventDate),
lubridate::ymd_hms(eventDate_in, quiet = TRUE,
truncated = 5),
eventDate)) %>%
dplyr::mutate(dateSuccess = dplyr::if_else(is.na(eventDate),
FALSE,
TRUE))
# Save this dataset to be merged at the end...
ymd_hms_0 <- noDATEa %>%
dplyr::filter(complete.cases(eventDate)) %>%
dplyr::select(database_id, eventDate) %>%
setNames(object = ., c("database_id", "date"))
#### 1.0 easyDates ####
# Retrieve dates that are much easier to recover...
writeLines(" - Extracting dates from year, month, day columns...")
##### 1.1 year month day ####
# Filter down to the records that again have no eventDate
noDATEa <- noDATEa %>%
dplyr::filter(is.na(eventDate))
# Some records have date information in the dmy columns that can easily be retrieved
noDATEa <- noDATEa %>%
dplyr::mutate(eventDate = dplyr::if_else(is.na(as.character(eventDate)),
lubridate::ymd(stringr::str_c(year, month, day,
sep = "-"),
quiet = TRUE, truncated = 2),
eventDate))
# Save this dataset to be merged at the end...
dmy_1 <- noDATEa %>%
dplyr::filter(complete.cases(eventDate)) %>%
dplyr::select(database_id, eventDate) %>%
setNames(object = ., c("database_id", "date"))
# Filter down to the records that again have no eventDate
noDATEa <- noDATEa %>%
dplyr::filter(is.na(eventDate))
if("occurrenceYear" %in% colnames(noDATEa)){
# Copy across the occurrenceYear column into the eventDate column
noDATEa$eventDate <- noDATEa$occurrenceYear}
# Save this dataset to be merged at the end...
occYr_2 <- noDATEa %>%
dplyr::filter(complete.cases(eventDate))%>%
dplyr::select(database_id, eventDate) %>%
setNames(object = ., c("database_id", "date"))
##### 1.1 Sept ####
# Because some people write "Sept" which cannot be read by lubridate, it needs to be
# replaced in these columns
noDATEa$locality <- noDATEa$locality %>%
stringr::str_replace(pattern = "[Ss]ept[\\s-/]",
replacement = "Sep ")
noDATEa$fieldNotes <- noDATEa$fieldNotes %>%
stringr::str_replace(pattern = "[Ss]ept[\\s-/]",
replacement = "Sep ")
noDATEa$locationRemarks <- noDATEa$locationRemarks %>%
stringr::str_replace(pattern = "[Ss]ept[\\s-/]",
replacement = "Sep ")
noDATEa$verbatimEventDate <- noDATEa$verbatimEventDate %>%
stringr::str_replace(pattern = "[Ss]ept[\\s-/]",
replacement = "Sep ")
#### 2.0 unAmb. str. dates ####
writeLines(paste(
" - Extracting dates from fieldNotes, locationRemarks, and verbatimEventDate ",
"columns in unambiguous ymd, dmy, mdy, and my formats...", sep = ""))
# Filter down to the records that again have no eventDate
noDATEa <- noDATEa %>%
dplyr::filter(is.na(eventDate))
monthStrings <- c("Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct","Nov", "Dec",
"jan", "feb", "mar", "apr", "may", "jun",
"jul", "aug", "sep", "oct","nov", "dec",
"January", "February", "March", "April",
"May", "June","July","August",
"September","October","November","December",
"january", "february", "march", "april",
"may", "june","july","august",
"september","october","november","december",
"JAN", "FEB", "MAR", "APR", "MAY", "JUN",
"JUL", "AUG", "SEP", "OCT","NOV", "DEC",
"JANUARY", "FEBRUARY", "MARCH", "APRIL",
"MAY", "JUNE","JULY","AUGUST",
"SEPTEMBER","OCTOBER","NOVEMBER","DECEMBER",
"sept")
romanNumerals <- c("i","ii","iii","iv","v","vi","vii","viii","ix","x","xi","xii",
"I","II","III","IV","V","VI","VII","VIII","IX","X","XI","XII")
numeralConversion <- c("Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct","Nov", "Dec",
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct","Nov", "Dec")
##### 2.1 ymd ####
# enter ymd strings...
ymd_strings <- c(
"[0-9]{4}[\\s- /]+[0-9]{2}[\\s- /]+[0-9]{2}",
paste("[0-9]{4}", monthStrings,"[0-9]{2}", collapse = "|", sep = "[\\s-/]+"),
paste("[0-9]{4}", romanNumerals,"[0-9]{2}", collapse = "|", sep = "[\\s-/]+"))
# Extract the matching strings
ymd_unambiguous <- noDATEa %>%
dplyr::mutate(
ymd_vEV = stringr::str_extract(verbatimEventDate,
pattern = paste(ymd_strings,
collapse = "|", sep = "")),
ymd_fieldNotes = stringr::str_extract(fieldNotes,
pattern = paste(ymd_strings,
collapse = "|", sep = "")),
ymd_locationRemarks = stringr::str_extract(locationRemarks,
pattern = paste(ymd_strings,
collapse = "|", sep = ""))
) %>% # END mutate
dplyr::select(database_id,
ymd_vEV,
ymd_fieldNotes,
ymd_locationRemarks)
# FORMAT the ymd_vEV column
ymd_unambiguous$ymd_vEV <- ymd_unambiguous$ymd_vEV %>%
# Remove 00 values to truncate the lubridate
stringr::str_replace(pattern = "/00$|/00/00$", replacement = "") %>%
lubridate::ymd(truncated = 2, quiet = TRUE)
#
# FORMAT the ymd_fieldNotes column
ymd_unambiguous$ymd_fieldNotes <- ymd_unambiguous$ymd_fieldNotes %>%
# Remove 00 values to truncate the lubridate
stringr::str_replace(pattern = "/00$|/00/00$", replacement = "") %>%
lubridate::ymd(truncated = 2, quiet = TRUE)
#
# FORMAT the ymd_locationRemarks column
ymd_unambiguous$ymd_locationRemarks <- ymd_unambiguous$ymd_locationRemarks %>%
# Remove 00 values to truncate the lubridate
stringr::str_replace(pattern = "/00$|/00/00$", replacement = "") %>%
lubridate::ymd(truncated = 2, quiet = TRUE)
#
# Combine the columns
ymd_keepers_21 <- ymd_unambiguous %>%
dplyr::filter(complete.cases(ymd_vEV)|
complete.cases(ymd_locationRemarks) | complete.cases(ymd_fieldNotes)) %>%
tidyr::unite(col = date,
ymd_vEV, ymd_locationRemarks, ymd_fieldNotes, na.rm = TRUE)
# add ymd_keepers_21 at the end
##### 2.2 dmy ####
dmy_strings <- c(
# 12-JUL-2002; 12 Jul 2002; 12/July/2002
paste("[0-9]{1,2}[\\s-/ ]+", monthStrings,"[\\s-/ ]+[0-9]{4}", collapse = "|", sep = ""),
paste("[0-9]{1,2}[\\s-/ ]+", monthStrings,"[\\s-/ ]+[0-9]{2}", collapse = "|", sep = ""),
# 12-XII-2022; 12 XII 2022; 12 xii 2022;
paste("[0-9]{1,2}[\\s-/ ]+", romanNumerals,"[\\s-/ ]+[0-9]{4}", collapse = "|", sep = ""),
paste("[0-9]{1,2}[\\s-/ ]+", romanNumerals,"[\\s-/ ]+[0-9]{2}", collapse = "|", sep = ""),
# >12 <12 1992 - dmy
"([1][3-9]|[2-3][0-9])[\\s-/ ]+([1-9]|1[0-2])[\\s-/ ]+[0-9]{4}",
"([1][3-9]|[2-3][0-9])[\\s-/ ]+([1-9]|1[0-2])[\\s-/ ]+[0-9]{2}"
)
# Extract the matching strings
dmy_unambiguous <- noDATEa %>%
# First, remove the strings matched prior
dplyr::filter(!database_id %in% ymd_keepers_21$database_id) %>%
dplyr::mutate(
dmy_vEV = stringr::str_extract(verbatimEventDate,
pattern = paste(dmy_strings,
collapse = "|", sep = "")),
dmy_locality = stringr::str_extract(locality,
pattern = paste(dmy_strings,
collapse = "|", sep = "")),
dmy_fieldNotes = stringr::str_extract(fieldNotes,
pattern = paste(dmy_strings,
collapse = "|", sep = "")),
dmy_locationRemarks = stringr::str_extract(locationRemarks,
pattern = paste(dmy_strings,
collapse = "|", sep = ""))
) %>% # END mutate
dplyr::select(database_id, dmy_vEV, dmy_locality,
dmy_fieldNotes, dmy_locationRemarks)
# FORMAT the dmy_vEV column
dmy_unambiguous$dmy_vEV <- dmy_unambiguous$dmy_vEV %>%
# Convert roman numerals to readable by lubridate
mgsub::mgsub(
pattern = paste("[-/ ]", romanNumerals, "[ -/]", sep = ""),
replacement = numeralConversion) %>%
# Remove 00 values to truncate the lubridate
stringr::str_replace(pattern = "/00$|/00/00$|^00", replacement = "") %>%
lubridate::dmy(truncated = 2, quiet = TRUE)
#
# FORMAT the dmy_locality column
dmy_unambiguous$dmy_locality <- dmy_unambiguous$dmy_locality %>%
# Convert roman numerals to readable by lubridate
mgsub::mgsub(
pattern = paste("[-/ ]", romanNumerals, "[ -/]", sep = ""),
replacement = numeralConversion) %>%
# Remove 00 values to truncate the lubridate
stringr::str_replace(pattern = "/00$|/00/00$", replacement = "") %>%
lubridate::dmy(truncated = 2, quiet = TRUE)
#
# FORMAT the dmy_fieldNotes column
dmy_unambiguous$dmy_fieldNotes <- dmy_unambiguous$dmy_fieldNotes %>%
# Convert roman numerals to readable by lubridate
mgsub::mgsub(
pattern = paste("[-/ ]", romanNumerals, "[ -/]", sep = ""),
replacement = numeralConversion) %>%
# Remove 00 values to truncate the lubridate
stringr::str_replace(pattern = "/00$|/00/00$", replacement = "") %>%
lubridate::dmy(truncated = 2, quiet = TRUE)
#
# FORMAT the dmy_locationRemarks column
dmy_unambiguous$dmy_locationRemarks <- dmy_unambiguous$dmy_locationRemarks %>%
# Convert roman numerals to readable by lubridate
mgsub::mgsub(
pattern = paste("[-/ ]", romanNumerals, "[ -/]", sep = ""),
replacement = numeralConversion) %>%
# Remove 00 values to truncate the lubridate
stringr::str_replace(pattern = "/00$|/00/00$", replacement = "") %>%
lubridate::dmy(truncated = 2, quiet = TRUE)
#
# Combine the columns
dmy_keepers_22 <- dmy_unambiguous %>%
dplyr::filter(complete.cases(dmy_vEV) |
complete.cases(dmy_locality) |
complete.cases(dmy_locationRemarks) |
complete.cases(dmy_fieldNotes)) %>%
tidyr::unite(col = date,
dmy_vEV, dmy_locality, dmy_locationRemarks, dmy_fieldNotes,
na.rm = TRUE)
# add dmy_keepers_22 at the end
##### 2.3 mdy ####
mdy_strings <- c(
# Aug 2, 2019
paste(monthStrings,"[\\s-/ ]+[0-9]{1,2}[\\s-/, ]+[0-9]{4}", collapse = "|", sep = ""),
paste(monthStrings,"[\\s-/ ]+[0-9]{1,2}[\\s-/, ]+[0-9]{2}", collapse = "|", sep = ""),
# Aug 1-10 2019
paste(monthStrings,"[0-9]+[-\\u2013][0-9]+[\\s-/ ]+[0-9]{4}", collapse = "|", sep = ""),
paste(monthStrings,"[0-9]+[-\\u2013][0-9]+[\\s-/ ]+[0-9]{2}", collapse = "|", sep = ""),
# V. 17 1901
paste(romanNumerals,"[\\s-/\\. ]+[0-9]{1,2}[\\s-/ ]+[0-9]{4}", collapse = "|", sep = ""),
paste(romanNumerals,"[\\s-/\\. ]+[0-9]{1,2}[\\s-/ ]+[0-9]{2}", collapse = "|", sep = ""),
# <12 >12 1992 - mdy
"(1[0-2])[\\s- /]+([2-3][0-9])[\\s- /]+[0-9]{4}",
"(1[0-2])|[\\s-/\\. ][1-9][\\s- /]+([1][3-9])[\\s- /]+[0-9]{4}",
"(1[0-2])|[\\s-/\\. ][1-9][\\s- /]+([2-3][0-9])[\\s- /]+[0-9]{4}",
"(1[0-2])|^[1-9][\\s- /]+([1][3-9])[\\s- /]+[0-9]{4}",
"(1[0-2])|^[1-9][\\s- /]+([2-3][0-9])[\\s- /]+[0-9]{4}",
"(1[0-2])|[\\s-/\\. ][1-9][\\s- /]+([1][3-9])[\\s- /]+[0-9]{2}",
"(1[0-2])|[\\s-/\\. ][1-9][\\s- /]+([2-3][0-9])[\\s- /]+[0-9]{2}",
"(1[0-2])|^[1-9][\\s- /]+([1][3-9])[\\s- /]+[0-9]{2}",
"(1[0-2])|^[1-9][\\s- /]+([2-3][0-9])[\\s- /]+[0-9]{2}",
"(1[0-2])[\\s- /]+([2-3][0-9])[\\s- /]+[0-9]{2}")
# Get the IDs to remove...
id2remove_23 <- c(ymd_keepers_21$database_id, dmy_keepers_22$database_id)
# Extract the matching strings to three columns
mdy_unambiguous <- noDATEa %>%
# First, remove the strings matched prior
dplyr::filter(!database_id %in% id2remove_23) %>%
dplyr::mutate(
mdy_vEV = stringr::str_extract(verbatimEventDate,
pattern = paste(mdy_strings,
collapse = "|", sep = "")),
mdy_locality = stringr::str_extract(locality,
pattern = paste(mdy_strings,
collapse = "|", sep = "")),
mdy_fieldNotes = stringr::str_extract(fieldNotes,
pattern = paste(mdy_strings,
collapse = "|", sep = "")),
mdy_locationRemarks = stringr::str_extract(locationRemarks,
pattern = paste(mdy_strings,
collapse = "|", sep = ""))
) %>% # END mutate
# select a subset of columns
dplyr::select(database_id, mdy_vEV, mdy_locality,
mdy_fieldNotes, mdy_locationRemarks)
# FORMAT the mdy_vEV column
mdy_unambiguous$mdy_vEV <- mdy_unambiguous$mdy_vEV %>%
# Convert roman numerals to readable by lubridate
mgsub::mgsub(
pattern = paste("^",romanNumerals, "( |\\.|-)", sep = ""),
replacement = numeralConversion) %>%
lubridate::mdy(truncated = 2, quiet = TRUE)
#
# FORMAT the mdy_locality column
mdy_unambiguous$mdy_locality <- mdy_unambiguous$mdy_locality %>%
# Convert roman numerals to readable by lubridate
mgsub::mgsub(
pattern = paste("^",romanNumerals, "( |\\.|-)", sep = ""),
replacement = numeralConversion) %>%
stringr::str_replace( pattern = "^The ", replacement = "") %>%
lubridate::mdy(truncated = 2, quiet = TRUE)
#
# FORMAT the mdy_fieldNotes column
mdy_unambiguous$mdy_fieldNotes <- mdy_unambiguous$mdy_fieldNotes %>%
# Convert roman numerals to readable by lubridate
mgsub::mgsub(
pattern = paste("^",romanNumerals, "( |\\.|-)", sep = ""),
replacement = numeralConversion) %>%
lubridate::mdy(truncated = 2, quiet = TRUE)
#
# FORMAT the mdy_locationRemarks column
mdy_unambiguous$mdy_locationRemarks <- mdy_unambiguous$mdy_locationRemarks %>%
# Convert roman numerals to readable by lubridate
mgsub::mgsub(
pattern = paste("^",romanNumerals, "( |\\.|-)", sep = ""),
replacement = numeralConversion) %>%
lubridate::mdy(truncated = 2, quiet = TRUE)
#
# Combine the columns
mdy_keepers_23 <- mdy_unambiguous %>%
dplyr::filter( complete.cases(mdy_vEV) |
complete.cases(mdy_locality) |
complete.cases(mdy_locationRemarks) |
complete.cases(mdy_fieldNotes)) %>%
tidyr::unite(col = date,
mdy_vEV, mdy_locality, mdy_locationRemarks, mdy_fieldNotes,
na.rm = TRUE, sep = "")
# KEEP mdy_keepers_23 at the end
##### 2.4 my ####
my_strings <- c(
# VIII-1946
paste(romanNumerals,"[\\s-/ \\.]+[0-9]{4}", collapse = "|", sep = ""),
# July 1995; July, 1995
paste(monthStrings,"[\\s-/ \\.]+[0-9]{4}", collapse = "|", sep = ""),
paste(monthStrings,"[\\s-/ \\.]+[0-9]{2}", collapse = "|", sep = ""),
# April 1899
paste(monthStrings,"[\\s-/ ]+[0-9]{4}", collapse = "|", sep = ""),
paste(monthStrings,"[\\s-/ ]+[0-9]{2}", collapse = "|", sep = ""),
# 1899 April
paste("[\\s- /]+[0-9]{4}", monthStrings, collapse = "|", sep = ""),
paste("[\\s- /]+[0-9]{2}", monthStrings, collapse = "|", sep = ""),
# 4/1957
"([1-9]|1[0-2])[\\s- /]+[0-9]{4}"
)
# Get the IDs to remove...
id2remove_24 <- c(mdy_keepers_23$database_id, id2remove_23)
# Extract the matching strings to three columns
my_unambiguous <- noDATEa %>%
# First, remove the strings matched prior
dplyr::filter(!database_id %in% id2remove_24) %>%
dplyr::mutate(
my_vEV = stringr::str_extract(verbatimEventDate,
pattern = paste(my_strings,
collapse = "|", sep = "")),
my_locality = stringr::str_extract(locality,
pattern = paste(my_strings,
collapse = "|", sep = "")),
my_fieldNotes = stringr::str_extract(fieldNotes,
pattern = paste(my_strings,
collapse = "|", sep = "")),
my_locationRemarks = stringr::str_extract(locationRemarks,
pattern = paste(my_strings,
collapse = "|", sep = ""))
) %>% # END mutate
# select a subset of columns
dplyr::select(database_id, my_vEV, my_locality,
my_fieldNotes, my_locationRemarks)
# FORMAT the my_vEV column
my_unambiguous$my_vEV <- my_unambiguous$my_vEV %>%
# Convert roman numerals to readable by lubridate
mgsub::mgsub(
pattern = paste("^",romanNumerals, sep = ""),
replacement = numeralConversion) %>%
# format
lubridate::my( quiet = TRUE)
#
# FORMAT the my_locality column
my_unambiguous$my_locality <- my_unambiguous$my_locality %>%
# Convert roman numerals to readable by lubridate
mgsub::mgsub(
pattern = paste("^",romanNumerals, sep = ""),
replacement = numeralConversion) %>%
# format
lubridate::my(quiet = TRUE)
#
# FORMAT the my_fieldNotes column
my_unambiguous$my_fieldNotes <- my_unambiguous$my_fieldNotes %>%
# Convert roman numerals to readable by lubridate
mgsub::mgsub(
pattern = paste("^",romanNumerals, sep = ""),
replacement = numeralConversion) %>%
# format
lubridate::my(quiet = TRUE)
#
# FORMAT the my_locationRemarks column
my_unambiguous$my_locationRemarks <- my_unambiguous$my_locationRemarks %>%
# Convert roman numerals to readable by lubridate
mgsub::mgsub(
pattern = paste("^",romanNumerals, sep = ""),
replacement = numeralConversion) %>%
# format
lubridate::my(quiet = TRUE)
#
# Combine the columns
my_keepers_24 <- my_unambiguous %>%
dplyr::filter(complete.cases(my_vEV) |
complete.cases(my_locality) |
complete.cases(my_locationRemarks) |
complete.cases(my_fieldNotes)) %>%
tidyr::unite(col = date,
my_vEV, my_locality, my_locationRemarks, my_fieldNotes,
na.rm = TRUE, sep = "-")
# Remove double-ups
my_keepers_24$date <- stringr::str_replace(my_keepers_24$date,
pattern = "-[0-9]+-[0-9]+-[0-9]+",
replacement = "")
#### 3.0 Amb. str. dates ####
writeLines(paste(
" - Extracting year from fieldNotes, locationRemarks, and verbatimEventDate ",
"columns in ambiguous formats...", sep = ""))
ambiguousDateStrings <- c(
# dmy or mdy; 10 02 1946
"[0-9]{1,2}[\\s-/ ]+[0-9]{1,2}[\\s-/ ]+[0-9]{4}",
"[0-9]{2}[\\s-/ ]+[0-9]{2}[\\s-/ ]+[0-9]{4}",
"[0-9]{1,2}[\\s-/ ]+[0-9]{1,2}[\\s-/ ]+[0-9]{2}",
"[0-9]{2}[\\s-/ ]+[0-9]{2}[\\s-/ ]+[0-9]{2}"
)
# Get the IDs to remove...
id2remove_30 <- c(my_keepers_24$database_id, id2remove_24)
# Extract the matching strings to three columns
ambiguousNames <- noDATEa %>%
# First, remove the strings matched prior
dplyr::filter(!database_id %in% id2remove_30) %>%
dplyr::mutate(
amb_vEV = stringr::str_extract(verbatimEventDate,
pattern = paste(ambiguousDateStrings,
collapse = "|", sep = "")),
amb_locality = stringr::str_extract(locality,
pattern = paste(ambiguousDateStrings,
collapse = "|", sep = "")),
amb_fieldNotes = stringr::str_extract(fieldNotes,
pattern = paste(ambiguousDateStrings,
collapse = "|", sep = "")),
amb_locationRemarks = stringr::str_extract(locationRemarks,
pattern = paste(ambiguousDateStrings,
collapse = "|", sep = ""))
) %>% # END mutate
# select a subset of columns
dplyr::select(database_id, amb_vEV, amb_locality,
amb_fieldNotes, amb_locationRemarks)
# FORMAT the amb_vEV column
ambiguousNames$amb_vEV <- ambiguousNames$amb_vEV %>%
# Remove 00 values to truncate the lubridate
stringr::str_replace(pattern = "00[-/ ]00[-/ ]", replacement = "01-01-") %>%
stringr::str_replace(pattern = "00[-/ ]", replacement = "01-") %>%
stringr::str_replace(pattern = "00-00-|[-/ ]00$|[-/ ]00[-/ ]00$|^00|^00[-/ ]00[-/ ]",
replacement = "") %>%
lubridate::dmy(truncated = 2, quiet = TRUE)
# FORMAT the amb_locality column
#
ambiguousNames$amb_locality <- ambiguousNames$amb_locality %>%
# Remove 00 values to truncate the lubridate
stringr::str_replace(pattern = "00[-/ ]00[-/ ]", replacement = "01-01-") %>%
stringr::str_replace(pattern = "00[-/ ]", replacement = "01-") %>%
stringr::str_replace(pattern = "00-00-|[-/ ]00$|[-/ ]00[-/ ]00$|^00|^00[-/ ]00[-/ ]",
replacement = "") %>%
lubridate::dmy(truncated = 2, quiet = TRUE)
# FORMAT the amb_fieldNotes column
ambiguousNames$amb_fieldNotes <- ambiguousNames$amb_fieldNotes %>%
# Remove 00 values to truncate the lubridate
stringr::str_replace(pattern = "00[-/ ]00[-/ ]", replacement = "01-01-") %>%
stringr::str_replace(pattern = "00[-/ ]", replacement = "01-") %>%
stringr::str_replace(pattern = "00-00-|[-/ ]00$|[-/ ]00[-/ ]00$|^00|^00[-/ ]00[-/ ]",
replacement = "") %>%
lubridate::dmy(truncated = 2, quiet = TRUE)
#
# FORMAT the amb_locationRemarks column
ambiguousNames$amb_locationRemarks <- ambiguousNames$amb_locationRemarks %>%
# Remove 00 values to truncate the lubridate
stringr::str_replace(pattern = "00[-/ ]00[-/ ]", replacement = "01-01-") %>%
stringr::str_replace(pattern = "00[-/ ]", replacement = "01-") %>%
stringr::str_replace(pattern = "00-00-|[-/ ]00$|[-/ ]00[-/ ]00$|^00|^00[-/ ]00[-/ ]",
replacement = "") %>%
lubridate::dmy(truncated = 2, quiet = TRUE)
# Combine the columns
amb_keepers_30 <- ambiguousNames %>%
dplyr::filter(complete.cases(amb_vEV)|
complete.cases(amb_locality) |
complete.cases(amb_locationRemarks) |
complete.cases(amb_fieldNotes)) %>%
tidyr::unite(col = date, amb_vEV,
amb_locality, amb_locationRemarks, amb_fieldNotes,
na.rm = TRUE)
# KEEP amb_keepers_30 at the end
#### 4.0 Format+combine ####
writeLines(paste(
" - Formating and combining the new data..", sep = ""))
##### 4.1 formatting... ####
# Extract only the date from occYr_2
occYr_2$date <- as.character(occYr_2$date) %>% lubridate::ymd_hms() %>% lubridate::date()
# Set as date format...
ymd_keepers_21$date <- lubridate::ymd(ymd_keepers_21$date)
dmy_keepers_22$date <- lubridate::ymd(dmy_keepers_22$date)
mdy_keepers_23$date <- lubridate::ymd(mdy_keepers_23$date)
my_keepers_24$date <- lubridate::ymd(my_keepers_24$date)
# merge these data...
saveTheDates <- dplyr::bind_rows(ymd_hms_0, dmy_1, occYr_2,
ymd_keepers_21, dmy_keepers_22, mdy_keepers_23) %>%
dplyr::select(database_id, date)
##### 4.2 Full dates ####
# Join these dates to the original rows...
datesOut_full <- data %>%
dplyr::right_join(saveTheDates,
by = "database_id")
# Fill the eventDate column
datesOut_full$eventDate <- lubridate::ymd(datesOut_full$date)
# Fill the year, month, and day columns
datesOut_full$year <- lubridate::year(datesOut_full$date)
datesOut_full$month <- lubridate::month(datesOut_full$date)
datesOut_full$day <- lubridate::day(datesOut_full$date)
# Remove records with non-sensical years
datesOut_full <- datesOut_full %>%
# remove FUTURE dates
dplyr::filter(!year > maxYear) %>%
# Remove PAST dates
dplyr::filter(!year < minYear) %>%
# remove the date column
dplyr::select(!date)
##### 4.3 No day ####
# Join these dates to the original rows...
datesOut_noDay <- data %>%
dplyr::right_join(my_keepers_24,
by = "database_id")
# Fill the eventDate column
datesOut_noDay$eventDate <- lubridate::ymd(datesOut_noDay$date, quiet = TRUE)
# Fill the year, month, and day columns
datesOut_noDay$year <- lubridate::year(datesOut_noDay$date)
datesOut_noDay$month <- lubridate::month(datesOut_noDay$date)
# Remove records with non-sensical years
datesOut_noDay <- datesOut_noDay %>%
# remove FUTURE dates
dplyr::filter(!year > maxYear) %>%
# Remove PAST dates
dplyr::filter(!year < minYear) %>%
# remove the date column
dplyr::select(!date)
##### 4.4 No month ####
# Join these dates to the original rows...
datesOut_noMonth <- data %>%
dplyr::right_join(amb_keepers_30,
by = "database_id")
# Fill the eventDate column
datesOut_noMonth$eventDate <- lubridate::ymd(datesOut_noMonth$date, quiet = TRUE)
# Fill the year, month, and day columns
datesOut_noMonth$year <- lubridate::year(datesOut_noMonth$date)
# Remove records with non-sensical years
datesOut_noMonth <- datesOut_noMonth %>%
# remove FUTURE dates
dplyr::filter(!year > maxYear) %>%
# Remove PAST dates
dplyr::filter(!year < minYear) %>%
# remove the date column
dplyr::select(!date)
#### 5.0 Merge ####
writeLines(paste(
" - Merging all data, nearly there...", sep = ""))
# Get all of the changed rows together
datesMerged <- dplyr::bind_rows(
datesOut_full, datesOut_noDay, datesOut_noMonth)
# Format the original eventDate column into a new sheet - datesOut
datesOut <- data
datesOut$eventDate <- lubridate::ymd_hms(datesOut$eventDate,
truncated = 5, quiet = TRUE)
# Replace these in the original dataset
datesOut <- datesOut %>%
# Remove the dates that are to be replaced
dplyr::filter(!database_id %in% datesMerged$database_id)
# Extract year, month, and day where possible
# year
datesOut$year <- ifelse(is.na(datesOut$year),
lubridate::year(datesOut$eventDate),
datesOut$year)
# month
datesOut$month <- ifelse(is.na(datesOut$month),
lubridate::month(datesOut$eventDate),
datesOut$month)
# day
datesOut$day <- ifelse(is.na(datesOut$day),
lubridate::day(datesOut$eventDate),
datesOut$day)
# Remove the months and days where the year is incorrect.
datesOut$month <- ifelse(datesOut$year > maxYear | datesOut$year < minYear,
NA,
datesOut$month)
datesOut$day <- ifelse(datesOut$year > maxYear | datesOut$year < minYear,
NA,
datesOut$day)
# Remove non-sensical years now
datesOut$year <- ifelse(datesOut$year > maxYear | datesOut$year < minYear,
NA,
datesOut$year)
# Now check and replace the eventDate column if it's out of range
datesOut$eventDate <- ifelse(lubridate::year(datesOut$eventDate) > maxYear |
lubridate::year(datesOut$eventDate) < minYear,
NA,
as.character(datesOut$eventDate))
# For simplicity's sake, return the date columns as character...
datesOut$eventDate <- as.character(datesOut$eventDate)
datesMerged$eventDate <- as.character(datesMerged$eventDate)
# MERGE all datsets
dates_complete <- data %>%
dplyr::mutate(eventDate = as.character(eventDate)) %>%
# REMOVE the meddled-with rows
dplyr::filter(!database_id %in% c( datesMerged$database_id, datesOut$database_id)) %>%
# Merge the new rows back in
dplyr::bind_rows(datesMerged, datesOut)
# Return to date format <3
dates_complete$eventDate <- lubridate::ymd_hms(dates_complete$eventDate,
truncated = 5, quiet = TRUE)
# Plot the dates
# graphics::hist(dates_complete$eventDate, breaks = 100,
# main = "Histogram of eventDate output")
timeEnd <- Sys.time()
# Return user output
writeLines(
paste(
" - Finished. \n",
"We now have ",
format((sum(complete.cases(dates_complete$eventDate)) - originalDateCount),
big.mark = ","),
" more full eventDate cells than in the input data.\n",
"We modified dates in \n",
format(nrow(datesMerged), big.mark = ","), " occurrences.\n",
" - As it stands, there are ",
format( sum(complete.cases(dates_complete$eventDate)), big.mark = ","),
" complete eventDates and ",
format( sum(is.na(dates_complete$eventDate)), big.mark = ","),
" missing dates.\n",
" - There are also ",
format( sum(complete.cases(dates_complete$year)), big.mark = ","),
" complete year occurrences to filter from. This is up from an initial count of ",
format( sum(complete.cases(data$year)), big.mark = ","),
" At this rate, you will stand to lose ",
format( sum(is.na(dates_complete$year)), big.mark = ","),
" occurrences on the basis of missing",
" year",
" - Operation time: ", (timeEnd - timeStart)," ",
units(round(timeEnd - timeStart, digits = 2)),
sep = "")
)
return(dates_complete)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/dateFindR.R
|
# This function was written by James B Dorey on the 20th of February 2023 to remove too-simple
# id codes from occurrences. It is intended to be used internal in the jbd_dupeSummary function
#' @importFrom dplyr %>%
#'
deSimplifieR <- function(inputData = NULL,
characterThreshold = 2,
numberThreshold = 3,
numberOnlyThreshold = 5)
{
requireNamespace("dplyr")
#### 1.0 Remove simple strings ####
# Remove simple codes, only do for each column if that column exists
inputData <- inputData %>%
dplyr::mutate(
if("occurrenceID" %in% colnames(inputData)){
occurrenceID = dplyr::if_else( stringr::str_count(occurrenceID, "[A-Za-z]") >= characterThreshold &
stringr::str_count(occurrenceID, "[0-9]") >= numberThreshold |
stringr::str_count(occurrenceID, "[0-9]") >= numberOnlyThreshold,
occurrenceID, NA_character_)},
if("recordId" %in% colnames(inputData)){
recordId = dplyr::if_else( stringr::str_count(recordId, "[A-Za-z]") >= characterThreshold &
stringr::str_count(recordId, "[0-9]") >= numberThreshold |
stringr::str_count(recordId, "[0-9]") >= numberOnlyThreshold,
recordId, NA_character_)},
if("id" %in% colnames(inputData)){
id = dplyr::if_else( stringr::str_count(id, "[A-Za-z]") >= characterThreshold &
stringr::str_count(id, "[0-9]") >= numberThreshold |
stringr::str_count(id, "[0-9]") >= numberOnlyThreshold,
id, NA_character_)},
if("catalogNumber" %in% colnames(inputData)){
catalogNumber = dplyr::if_else( stringr::str_count(catalogNumber, "[A-Za-z]") >= characterThreshold &
stringr::str_count(catalogNumber, "[0-9]") >= numberThreshold |
stringr::str_count(catalogNumber, "[0-9]") >= numberOnlyThreshold,
catalogNumber, NA_character_)},
if("otherCatalogNumbers" %in% colnames(inputData)){
otherCatalogNumbers = dplyr::if_else( stringr::str_count(otherCatalogNumbers, "[A-Za-z]") >= characterThreshold &
stringr::str_count(otherCatalogNumbers, "[0-9]") >= numberThreshold |
stringr::str_count(otherCatalogNumbers, "[0-9]") >= numberOnlyThreshold,
otherCatalogNumbers, NA_character_)})
return(inputData)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/deSimplifieR.R
|
# This function was written by James B Dorey to identify occurrence records with potential fill-down
# errors in the decimalLatitude and decimalLongitude columns. This function was written between
# the 27th and 28th of May 2022. Please contact James at jbdorey[at]me.com with questions if needed.
#' Find fill-down errors
#'
#' A simple function that looks for potential latitude and longitude fill-down errors by
#' identifying consecutive occurrences with coordinates at regular intervals. This is accomplished
#' by using a sliding window with the length determined by minRepeats.
#'
#' The sliding window (and hence fill-down errors) will only be examined
#' within the user-defined groupingColumns; if any of those
#' columns are empty, that record will be excluded.
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param minRepeats Numeric. The minimum number of lat or lon repeats needed to flag a record
#' @param groupingColumns Character. The column(s) to group the analysis by and search for fill-down
#' errors within. Default = c("eventDate", "recordedBy", "datasetName").
#' @param ndec Numeric. The number of decimal places below which records will not be considered
#' in the diagonAlley function. This is fed into [BeeBDC::jbd_coordinates_precision()]. Default = 3.
#' @param stepSize Numeric. The number of occurrences to process in each chunk. Default = 1000000.
#' @param mc.cores Numeric. If > 1, the function will run in parallel
#' using mclapply using the number of cores specified. If = 1 then it will be run using a serial
#' loop. NOTE: Windows machines must use a value of 1 (see ?parallel::mclapply). Additionally,
#' be aware that each thread can use large chunks of memory.
#' Default = 1.
#'
#' @return The function returns the input data with a new column, .sequential, where FALSE =
#' records that have consecutive latitudes or longitudes greater than or equal to the user-defined
#' threshold.
#' @export
#'
#' @importFrom dplyr %>%
#' @importFrom stats complete.cases
#'
#' @examples
#' # Read in the example data
#' data(beesRaw)
#' # Run the function
#' beesRaw_out <- diagonAlley(
#' data = beesRaw,
#' # The minimum number of repeats needed to find a sequence in for flagging
#' minRepeats = 4,
#' groupingColumns = c("eventDate", "recordedBy", "datasetName"),
#' ndec = 3,
#' stepSize = 1000000,
#' mc.cores = 1)
#'
#'
diagonAlley <- function(
data = NULL,
minRepeats = NULL,
groupingColumns = c("eventDate", "recordedBy", "datasetName"),
ndec = 3,
stepSize = 1000000,
mc.cores = 1
){
# locally bind variables to the function
eventDate<-recordedBy<-decimalLatitude<-decimalLongitude<-database_id<-.data<-leadingLat<-
laggingLat<-diffLead_Lat<-diffLag_Lat<-diffLat<- . <- NULL
.rou <- leadingLon <- laggingLon <- diffLead_Lon <- diffLag_Lon <- diffLon <- NULL
#### 0.0 Warnings ####
if(is.null(data)){
stop("\n - Please provide an argument for data. I'm a program, not a magician.")
}
if(is.null(minRepeats)){
warning("\n - minRepeats not provided. Using default value of four")
minRepeats = 4
}
if(is.null(groupingColumns)){
warning("\n - groupingColumns not provided. Using the default of, eventDate, recordedBy, ",
"and datasetName.")
groupingColumns <- c("eventDate", "recordedBy", "datasetName")
}
#### 1.0 prepare data ####
startTime <- Sys.time()
##### 1.1 ndec ####
# If an ndec is provided, then filter to remove decimal places lower than ndec
if(!is.null(ndec)){
writeLines("Removing rounded coordinates with BeeBDC::jbd_coordinates_precision...")
runningData <- data %>%
BeeBDC::jbd_coordinates_precision(
data = .,
lon = "decimalLongitude",
lat = "decimalLatitude",
ndec = ndec,
quieter = TRUE) %>%
dplyr::filter(!.rou == FALSE) %>%
dplyr::select(!.rou)
}else{
runningData <- data
}
##### 1.2 Initial filtering and prep ####
runningData <- runningData %>%
# Select fewer columns to make it easier on the old computer
dplyr::select(database_id, decimalLatitude, decimalLongitude,
tidyselect::all_of(groupingColumns)) %>%
# Remove incomplete values
tidyr::drop_na( tidyselect::all_of(groupingColumns)) %>%
tidyr::drop_na(decimalLatitude, decimalLongitude) %>%
# Group the data by eventDate and recordedBy
dplyr::group_by( dplyr::across(tidyselect::all_of(groupingColumns))) %>%
# Arrange from biggest to lowest decimalLatitude and then decimalLongitude by grouping
dplyr::arrange(dplyr::desc(decimalLatitude), dplyr::desc(decimalLongitude),
.by_group = TRUE) %>%
# Remove duplicate lat and longs
#dplyr::distinct(decimalLatitude, decimalLongitude, .keep_all = TRUE) %>%
# Select those groups with four or more occurrences
dplyr::filter(dplyr::n() >= minRepeats)
#### 2.0 Identify sequences ####
if(nrow(runningData) > 0){
##### 2.1 Create function ####
# Set up the loop function
LatLonFun <- function(funData){
for(i in 1:length(funData)){
# Run the sliding window
for(j in 1:(nrow(funData) - minRepeats+1)){
# select an amount of rows based on minRepeats
windowj <- funData[j:(j+minRepeats-1),]
# If all differences are equal, then add to a running list of database_ids
if(all(windowj$diff == windowj$diff[1])){
flaggedRecords <- flaggedRecords %>%
dplyr::bind_rows(windowj %>%
dplyr::select(database_id) )} # END if statement
} # END J loop
# Keep distinct flaggedRecords
# Run distinct every 1000th iteration, or at the end
if(i %in% seq(0, length(funData), 1000) |
i == length(funData)){
flaggedRecords <- flaggedRecords %>%
dplyr::distinct(.keep_all = TRUE)}
} # END i loop
return(flaggedRecords)
}# End LatLonFun
##### 2.2 Lat sequences ####
writeLines(" - Starting the latitude sequence...")
# Find the groups where ALL of the differences between values is the same (is.sequential)
# Return their database_id
runningData_Lat <- runningData %>%
# Sort
dplyr::arrange(dplyr::desc(.data$decimalLatitude), .by_group = TRUE) %>%
dplyr::distinct(dplyr::across(c(decimalLongitude, decimalLatitude,
tidyselect::all_of(groupingColumns))), .keep_all = TRUE) %>%
# Add leading columns with the value of the next one
dplyr::mutate(leadingLat = dplyr::lag(decimalLatitude)) %>%
dplyr::mutate(laggingLat = dplyr::lead(decimalLatitude)) %>%
# Add new new columns with the difference
dplyr::mutate(diffLead_Lat = (decimalLatitude - leadingLat)) %>%
dplyr::mutate(diffLag_Lat = (decimalLatitude - laggingLat)) %>%
# COMBINE these columns so that they are all complete from lead AND lag (no NAs)
dplyr::mutate(diff = dplyr::if_else(is.na(diffLead_Lat),
-diffLag_Lat,
diffLead_Lat),
diff = diff %>% round(digits = 9)) %>%
# Remove extra columns
dplyr::select(!c(leadingLat, laggingLat, diffLead_Lat, diffLag_Lat)) %>%
# Remove groups below the threshold
# Group by lat and lon and the groupingColumns
dplyr::group_by(dplyr::across(tidyselect::all_of(groupingColumns))) %>%
dplyr::filter(!dplyr::n() < minRepeats)
# Re-join with the runningData and match up duplicate lat/lon within groups and assign the same
# diff values
runningData_Lat <- runningData_Lat %>%
dplyr::bind_rows(runningData %>%
dplyr::filter(!database_id %in% runningData_Lat$database_id)) %>%
# Group by lat and lon and the groupingColumns
dplyr::group_by(decimalLatitude, decimalLongitude,
dplyr::across(tidyselect::all_of(groupingColumns))) %>%
dplyr::arrange(decimalLatitude) %>%
# Assign matching occurrences to the same diff number so that they will also be flagged
dplyr::mutate(diff = diff[[1]]) %>%
tidyr::drop_na(diff) %>%
dplyr::filter(!diff == 0) %>%
dplyr::ungroup()
# Turn each of the groups into its own tibble within a list
runningData_LatGrp <- runningData_Lat %>%
# Re-group by the groupingColumns and then filter to those that pass the minimum repeats
dplyr::group_by( dplyr::across(tidyselect::all_of(groupingColumns)), .add = TRUE) %>%
dplyr::filter(dplyr::n() >= minRepeats) %>%
dplyr::mutate(diff = diff %>% as.character()) %>%
# Split groups into a list
dplyr::group_split()
# Remove the spent dataset
rm(runningData_Lat)
# Remove excess columns from list
runningData_LatGrp <- lapply(runningData_LatGrp, function(x) x[(names(x) %in% c("database_id", "diff"))])
# Set up a tibble for the flagged records
flaggedRecords <- dplyr::tibble()
# Run the loop function in parallel
flagRecords_Lat <- runningData_LatGrp %>%
parallel::mclapply(LatLonFun, mc.cores = mc.cores) %>%
# Re-bind the list elements
dplyr::bind_rows()
# Remove the spent dataset
rm(runningData_LatGrp)
##### 2.3 Lon sequences ####
writeLines(" - Starting the longitude sequence...")
# Find the groups where ALL of the differences between values is the same (is.sequential)
# Return their database_id
runningData_Lon <- runningData %>%
# Sort
dplyr::arrange(dplyr::desc(.data$decimalLongitude), .by_group = TRUE) %>%
dplyr::distinct(dplyr::across(c(decimalLongitude, decimalLatitude,
tidyselect::all_of(groupingColumns))), .keep_all = TRUE) %>%
# Add leading columns with the value of the next one
dplyr::mutate(leadingLon = dplyr::lag(decimalLongitude)) %>%
dplyr::mutate(laggingLon = dplyr::lead(decimalLongitude)) %>%
# Add new new columns with the difference
dplyr::mutate(diffLead_Lon = (decimalLongitude - leadingLon)) %>%
dplyr::mutate(diffLag_Lon = (decimalLongitude - laggingLon)) %>%
# COMBINE these columns so that they are all complete from lead AND lag (no NAs)
dplyr::mutate(diff = dplyr::if_else(is.na(diffLead_Lon),
-diffLag_Lon,
diffLead_Lon),
diff = diff %>% round(digits = 9)) %>%
# Remove extra columns
dplyr::select(!c(leadingLon, laggingLon, diffLead_Lon, diffLag_Lon)) %>%
# Remove groups below the threshold
# Group by lat and lon and the groupingColumns
dplyr::group_by(dplyr::across(tidyselect::all_of(groupingColumns))) %>%
dplyr::filter(!dplyr::n() < minRepeats)
# Re-join with the runningData and match up duplicate lat/lon within groups and assign the same
# diffLon values
runningData_Lon <- runningData_Lon %>%
dplyr::bind_rows(runningData %>%
dplyr::filter(!database_id %in% runningData_Lon$database_id)) %>%
# Group by lat and lon and the groupingColumns
dplyr::group_by(decimalLatitude, decimalLongitude,
dplyr::across(tidyselect::all_of(groupingColumns))) %>%
dplyr::arrange(decimalLongitude) %>%
# Assign matching occurrences to the same diffLon number so that they will also be flagged
dplyr::mutate(diff = diff[[1]]) %>%
tidyr::drop_na(diff) %>%
dplyr::filter(!diff == 0) %>%
dplyr::ungroup()
# Turn each of the groups into its own tibble within a list
runningData_LonGrp <- runningData_Lon %>%
# Re-group by the groupingColumns and then filter to those that pass the minimum repeats
dplyr::group_by( dplyr::across(tidyselect::all_of(groupingColumns)), .add = TRUE) %>%
dplyr::filter(dplyr::n() >= minRepeats) %>%
dplyr::mutate(diff = diff %>% as.character()) %>%
# Split groups into a list
dplyr::group_split()
# Remove the spent dataset
rm(runningData_Lon)
# Remove excess columns from list
runningData_LonGrp <- lapply(runningData_LonGrp, function(x) x[(names(x) %in% c("database_id", "diff"))])
# Run the loop function in parallel
flagRecords_Lon <- runningData_LonGrp %>%
parallel::mclapply(LatLonFun, mc.cores = mc.cores) %>%
# Re-bind the list elements
dplyr::bind_rows()
##### 2.4 Merge lat lon ####
# Merge the minor runs
flagRecords <- dplyr::bind_rows(flagRecords_Lat, flagRecords_Lon) %>%
dplyr::distinct()
}else{
flagRecords = dplyr::tibble(database_id = NA_character_)
} # END nrow(runningData) > 0
# Remove the spent dataset
rm(runningData_LonGrp)
#### 3.0 Merge ####
writeLines(" - Merging results and adding the .sequential column...")
# Add a new column called .sequential to flag sequential lats and longs as FALSE
data <- data %>%
dplyr::mutate(.sequential = !database_id %in% flagRecords$database_id)
# Use output
message("\ndiagonAlley:\nFlagged ",
format(sum(data$.sequential == FALSE, na.rm = TRUE), big.mark = ","),
" records\nThe .sequential column was added to the database.\n")
# Return runtime
endTime <- Sys.time()
message(paste(
" - Completed in ",
round(difftime(endTime, startTime), digits = 2 )," ",
units(round(endTime - startTime, digits = 2)),
sep = ""))
return(data)
}# END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/diagonAlley.R
|
# This function was written by James B Dorey on the 5th of October 2022
# Its purpose is to create a list of directories for beeData cleaning
# Please contact jbdorey[at]me.com for help
#' Set up global directory paths and create folders
#'
#' This function sets up a directory for saving outputs (i.e. data, figures) generated through the
#' use of the BeeBDC package, if the required folders do not already exist.
#'
#' @param RootPath A character String. The `RootPath` is the base path for your project, and all
#' other paths should ideally be located within the `RootPath`. However, users may specify paths not
#' contained in the RootPath
#' @param ScriptPath A character String. The `ScriptPath` is the path to any additional functions
#' that you would like to read in for use with BeeBDC.
#' @param DataPath A character string. The path to the folder containing bee occurrence data
#' to be flagged and/or cleaned
#' @param DataSubPath A character String. If a `DataPath` is not provided, this will be used as the `DataPath`
#' folder name within the `RootPath.` Default is "/Data_acquisition_workflow"
#' @param DiscLifePath A character String. The path to the folder which contains data from Ascher
#' and Pcikering's Discover Life website.
#' @param OutPath A character String. The path to the folder where output data will be saved.
#' @param OutPathName A character String. The name of the `OutPath` subfolder located within the
#' `RootPath.` Default is "Output".
#' @param RDoc A character String. The path to the current script or report, relative to the project
#' root. Passing an absolute path raises an error. This argument is used by [here::i_am()] and incorrectly
#' setting this may result in `bdc` figures being saved to your computer's root directory
#' @param Report Logical. If TRUE, function creates a "Report" folder within the OutPath-defined
#' folder. Default = TRUE.
#' @param Check Logical. If TRUE, function creates a "Check" folder within the OutPath-defined
#' folder. Default = TRUE.
#' @param Figures Logical. If TRUE, function creates a "Figures" folder within the OutPath-defined
#' folder. Default = TRUE.
#' @param Intermediate Logical. If TRUE, function creates a "Intermediate" folder within the
#' OutPath-defined folder in which to save intermediate datasets. Default = TRUE.
#' @param useHere Logical. If TRUE, dirMaker will use [here::i_am()] to declare the relative path
#' to 'RDoc'. This is aimed at preserving some functionality with where bdc saves summary figures
#' and tables. Default = TRUE.
#'
#'
#' @return Results in the generation of a list containing the BeeBDC-required directories in your global
#' environment. This function should be run at the start of each session. Additionally, this
#' function will create the BeeBDC-required folders if they do not already exist in the supplied
#' directory
#'
#' @importFrom here i_am here
#' @importFrom dplyr %>%
#'
#' @export
#'
#' @examples
#' # load dplyr
#' library(dplyr)
#' # Standard/basic usage:
#' RootPath <- tempdir()
#' dirMaker(
#' RootPath = RootPath,
#' # Input the location of the workflow script RELATIVE to the RootPath
#' RDoc = NULL,
#' useHere = FALSE) %>%
#' # Add paths created by this function to the environment()
#' list2env(envir = environment())
#'
#' # Custom OutPathName provided
#' dirMaker(
#' RootPath = RootPath,
#' # Set some custom OutPath info
#' OutPath = NULL,
#' OutPathName = "T2T_Output",
#' # Input the location of the workflow script RELATIVE to the RootPath
#' RDoc = NULL,
#' useHere = FALSE) %>%
#' # Add paths created by this function to the environment()
#' list2env(envir = environment())
#' # Set the working directory
#'
#' # Further customisations are also possible
#' dirMaker(
#' RootPath = RootPath,
#' ScriptPath = "...path/Bee_SDM_paper/BDC_repo/BeeBDC/R",
#' DiscLifePath = "...path/BDC_repo/DiscoverLife_Data",
#' OutPathName = "AsianPerspective_Output",
#' # Input the location of the workflow script RELATIVE to the RootPath
#' RDoc = NULL,
#' useHere = FALSE) %>%
#' # Add paths created by this function to the environment()
#' list2env(envir = environment())
#'
#'
#'
dirMaker <- function(
RootPath = RootPath,
ScriptPath = NULL,
DataPath = NULL,
DataSubPath = "/Data_acquisition_workflow",
DiscLifePath = NULL,
OutPath = NULL,
OutPathName = "Output",
Report = TRUE,
Check = TRUE,
Figures = TRUE,
Intermediate = TRUE,
RDoc = NULL,
useHere = TRUE
){
# Ensure that the working directory remains the same on exit
oldwd <- getwd() # code line i
on.exit(setwd(oldwd)) # code line i+1
#### 0.0 Prep ####
##### 0.1 errors ####
###### a. FATAL errors ####
if(is.null(RootPath)){
stop(paste0(" - No RootPath was given. Please specifcy the root drectory that you want to use ",
"for your data-cleaning adventures. I'll do the rest."))
}
if(is.null(RDoc) & useHere == TRUE){
stop(paste0(" - Please provide a path for RDoc with useHere = TRUE.",
" This path MUST be relative to the RootPath. ",
"Hence, if the RootPath is '/user/home/', and the path to the RDoc is ",
"'/user/home/beeData/cleaningWorkflow.R', ",
"then RDoc == 'beeData/cleaningWorkflow.R"))
}
# Set the working directory
setwd(RootPath)
#### 1.0 Set paths ####
##### 1.1 ScriptPath ####
# Create the ScriptPath if it does not already exist
if(is.null(ScriptPath)){
if (!dir.exists(paste0(RootPath, "/BDC_repo/BeeBDC/R"))) {
dir.create(paste0(RootPath, "/BDC_repo/BeeBDC/R"), recursive = TRUE)
message(paste0(" - We created the ",
paste0(RootPath, "/BDC_repo/BeeBDC/R"),
"file. This file needs to have the NewFunctions added to it otherise things won't",
" work. These can be added from our GitHub"))
}
# Choose the location of the script
ScriptPath <- paste(RootPath,
"/BDC_repo/BeeBDC/R", sep = "")
}else{# If user provides an alternate ScriptRoot path
if(ScriptPath != FALSE){
if (!dir.exists(ScriptPath)) {
dir.create(ScriptPath, recursive = TRUE)
message(paste0(" - We created the ",
ScriptPath,
"file. This file needs to have the NewFunctions added to it otherise things won't",
" work. These can be added from our GitHub"))
}
# Choose the location of the script
ScriptPath <- ScriptPath}
}
##### 1.2 DataPath ####
# Create the DataPath if it does not already exist
if(is.null(DataPath)){
if (!dir.exists(paste0(RootPath, DataSubPath))) {
dir.create(paste0(RootPath, DataSubPath), recursive = TRUE)
# User message
message(paste0(" - We created the ",
paste0(RootPath, DataSubPath),
"file. This file needs to have the occurrence data that you want to use ",
"added to it otherise things won't",
" work. Please choose this data or download it from the supp. materials of our paper"))
}
# Choose the location of your data
DataPath <- paste(RootPath, DataSubPath, sep = "")
}else{
if(DataPath != FALSE){
if (!dir.exists(DataPath)) {
dir.create(DataPath, recursive = TRUE)
# User message
message(paste0(" - We created the ",
DataPath,
"file. This file needs to have the occurrence data that you want to use ",
"added to it otherise things won't",
" work. Please choose this data or download it from the supp. materials of our paper"))
}
# Choose the location of the DataPath
DataPath <- DataPath}
}
##### 1.3 DiscLifePath ####
# Create the DiscLifePath if it does not already exist
if(is.null(DiscLifePath) ){
if (!dir.exists(paste0(RootPath, "/BDC_repo/DiscoverLife_Data"))) {
dir.create(paste0(RootPath, "/BDC_repo/DiscoverLife_Data"), recursive = TRUE)
{
dir.create(paste0(RootPath, "/BDC_repo/DiscoverLife_Data"), recursive = TRUE)
# User message
message(paste0(" - We created the ",
paste0(RootPath, "/BDC_repo/DiscoverLife_Data"),
"file. This file needs to have the DiscoverLife_Data added to it otherise things won't",
" work. These can be added from our GitHub"))
}
}
# Choose the taxonomy path
DiscLifePath <- paste(RootPath,
"/BDC_repo/DiscoverLife_Data", sep = "")
}else{
if(DiscLifePath != FALSE){
if (!dir.exists(DiscLifePath)) {
dir.create(DiscLifePath, recursive = TRUE)
{
dir.create(DiscLifePath, recursive = TRUE)
# User message
message(paste0(" - We created the ",
DiscLifePath,
"file. This file needs to have the DiscoverLife_Data added to it otherise things won't",
" work. These can be added from our GitHub"))
}
}
# Choose the location of the DiscLifePath
DiscLifePath <- DiscLifePath}
}
##### 1.4 OutPath ####
# Create the OutPath if it does not already exist
if(is.null(OutPath)){
if (!dir.exists(paste0(DataPath, "/", OutPathName))) {
dir.create(paste0(DataPath, "/", OutPathName), recursive = TRUE)
{
dir.create(paste0(DataPath, "/", OutPathName), recursive = TRUE)
# User message
message(paste0(" - We created the ",
paste0(DataPath, "/", OutPathName),
"file."))
}
}
# Choose the taxonomy path
OutPath <- paste(DataPath, "/", OutPathName, sep = "")
}else{
if(OutPath != FALSE){
if (!dir.exists(OutPath)) {
dir.create(OutPath, recursive = TRUE)
{
dir.create(OutPath, recursive = TRUE)
# User message
message(paste0(" - We created the ",
OutPath,
"file."))
}}
# Choose the location of the OutPath
OutPath <- OutPath}
}
#### 2.0 Create paths ####
# If these bdc folders do not exist in the chosen directory, create them
if (!dir.exists(paste0(OutPath, "/Check")) & Check != FALSE) {
dir.create(paste0(OutPath, "/Check"), recursive = TRUE)
}
if (!dir.exists(paste0(OutPath, "/Figures")) & Figures != FALSE) {
dir.create(paste0(OutPath, "/Figures"), recursive = TRUE)
}
if (!dir.exists(paste0(OutPath, "/Intermediate")) & Intermediate != FALSE) {
dir.create(paste0(OutPath, "/Intermediate"), recursive = TRUE)
}
if (!dir.exists(paste0(OutPath, "/Report")) & Report != FALSE) {
dir.create(paste0(OutPath, "/Report"), recursive = TRUE)
}
##### 2.1 Make sub-paths ####
if(Check != FALSE){
OutPath_Check <- paste(OutPath, "/Check", sep = "")}
if(Figures != FALSE){
OutPath_Figures <- paste(OutPath, "/Figures", sep = "")}
if(Intermediate != FALSE){
OutPath_Intermediate <- paste(OutPath, "/Intermediate", sep = "")}
if(Report != FALSE){
OutPath_Report <- paste(OutPath, "/Report", sep = "")}
#### 3.0 Set here::here ####
# here::here needs to know where to find all output files. This is set here.
if(useHere == TRUE){
here::i_am(RDoc)}
#### 4.0 Output ####
return(list(ScriptPath, DataPath, DiscLifePath, OutPath,
OutPath_Check, OutPath_Figures, OutPath_Intermediate, OutPath_Report) %>%
stats::setNames(c("ScriptPath", "DataPath", "DiscLifePath", "OutPath",
"OutPath_Check", "OutPath_Figures", "OutPath_Intermediate",
"OutPath_Report")))
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/dirMaker.R
|
# This function was written by James B Dorey on the 29th of September 2022
# Its purpose is to visualise duplicate occurrence data by using a compound bargraph
# Please contact jbdorey[at]me.com for help
#' Create a compound bar graph of duplicate data sources
#'
#' Creates a plot with two bar graphs. One shows the absolute number of duplicate records for each
#' data source
#' while the other shows the proportion of records that are duplicated within each data source.
#' This function requires a dataset that has been run through [BeeBDC::dupeSummary()].
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param outPath Character. The path to a directory (folder) in which the output should be saved.
#' @param fileName Character. The name of the output file, ending in '.pdf'.
#' @param legend.position The position of the legend as coordinates. Default = c(0.85, 0.8).
#' @param base_height Numeric. The height of the plot in inches. Default = 7.
#' @param base_width Numeric. The width of the plot in inches. Default = 7.
#' @param ... Other arguments to be used to change factor levels of data sources.
#' @param dupeColours A vector of colours for the levels duplicate, kept duplicate, and unique.
#' Default = c("#F2D2A2","#B9D6BC", "#349B90").
#' @param returnPlot Logical. If TRUE, return the plot to the environment. Default = FALSE.
#'
#' @return Outputs a .pdf figure.
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#'
#' # This example will show a warning for the factor levels taht are not present in the specific
#' # test dataset
#' dupePlotR(
#' data = beesFlagged,
#' # The outPath to save the plot as
#' # Should be something like: #paste0(OutPath_Figures, "/duplicatePlot_TEST.pdf"),
#' outPath = tempdir(),
#' fileName = "duplicatePlot_TEST.pdf",
#' # Colours in order: duplicate, kept duplicate, unique
#' dupeColours = c("#F2D2A2","#B9D6BC", "#349B90"),
#' # Plot size and height
#' base_height = 7, base_width = 7,
#' legend.position = c(0.85, 0.8),
#' # Extra variables can be fed into forcats::fct_recode() to change names on plot
#' GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
#' ASP = "ASP", CAES = "CAES", 'B. Mont.' = "BMont", 'B. Minckley' = "BMin", Ecd = "Ecd",
#' Gaiarsa = "Gai", EPEL = "EPEL", Lic = "Lic", Bal = "Bal", Arm = "Arm"
#' )
dupePlotR <- function(
data = NULL,
outPath = NULL,
fileName = NULL,
legend.position = c(0.85, 0.8),
base_height = 7,
base_width = 7,
# Factor levels to be changed
...,
# Colours in order: duplicate, kept duplicate, unique
dupeColours = c("#F2D2A2","#B9D6BC", "#349B90"),
returnPlot = FALSE
){
# locally bind variables to the function
database_id <- duplicateStatus <- dataSource <- simpleSource <- NULL
# Load dependencies
requireNamespace("ggspatial")
requireNamespace("forcats")
requireNamespace("dplyr")
requireNamespace("cowplot")
#### 0.0 Prep ####
##### 0.1 errors ####
###### a. FATAL errors ####
if(is.null(data)){
stop(" - Please provide an argument for data I'm a program not a magician.")
}
if(is.null(outPath)){
stop(" - Please provide an argument for outPath Seems reckless to let me just guess.")
}
if(is.null(fileName)){
stop(" - Please provide an argument for fileName Seems reckless to let me just guess.")
}
if(sum(data$.duplicates) == 0){
stop(" - No duplicates flagged in the dataset. Stopping process.")
}
#### 1.0 data prep. ####
# Create the formatted file to create the figure
# Add duplicates
dupeTibble <- data %>%
# Select relevant columns
dplyr::select(database_id, duplicateStatus, dataSource) %>%
# Simplify the dataSources
dplyr::mutate(simpleSource = stringr::str_replace(
string = dataSource, pattern = "_.*", replacement = ""))
dupeTibble$duplicateStatus <- dupeTibble$duplicateStatus %>%
forcats::fct_relevel("Duplicate","Kept duplicate","Unique")
rm(data)
# Recode the simpleSource to be consistent with elsewhere and then order it
factorised <- dupeTibble$simpleSource %>%
# Recode the factors as the user requests
forcats::fct_recode(...) %>%
# Turn into a tibble
dplyr::tibble() %>%
# Name the single column
stats::setNames(c("simpleSource")) %>%
# Group by this column and then count the number of occurrences for each level
dplyr::group_by(simpleSource) %>%
dplyr::mutate(count = dplyr::n()) %>%
# Name these columns
stats::setNames(c("simpleSource", "count"))
# Now re-order the factor by this count and then feed back into dupeTibble
dupeTibble$simpleSource <- forcats::fct_reorder(factorised$simpleSource,
factorised$count, .desc = TRUE)
#### 2.0 Total duplicates ####
dupeBar <- ggplot2::ggplot(dupeTibble,
ggplot2::aes(simpleSource, fill = duplicateStatus)) +
ggplot2::geom_bar(position = "fill") +
ggplot2::scale_fill_manual("legend", values = c("Duplicate" = dupeColours[1],
"Kept duplicate" = dupeColours[2],
"Unique" = dupeColours[3])) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45, hjust = 1),
panel.background = ggplot2::element_rect(fill = "white", colour = NA),
axis.line = ggplot2::element_line(colour = "black"),
legend.position="none") +
ggplot2::ylab("Proportion of records") + ggplot2::xlab("data source")
#### 3.0 Proportion of duplicates ####
dupHist <- ggplot2::ggplot(dupeTibble, ggplot2::aes(simpleSource, fill = duplicateStatus)) +
ggplot2::geom_bar() +
ggplot2::scale_fill_manual("legend", values = c("Duplicate" = dupeColours[1],
"Kept duplicate" = dupeColours[2],
"Unique" = dupeColours[3])) +
ggplot2::theme(axis.text.x = ggplot2::element_blank(),
axis.title.x= ggplot2::element_blank(),
panel.background = ggplot2::element_rect(fill = "white", colour = NA),
axis.line = ggplot2::element_line(colour = "black")) +
ggplot2::ylab("Number of records") + ggplot2::xlab("data source")
#### 4.0 combine + save ####
# plot the figures together
(dupPlot <- cowplot::plot_grid(dupHist +
ggplot2::theme(legend.position = legend.position,
legend.title = ggplot2::element_blank()),
dupeBar,
labels = c("(a)","(b)"),
ncol = 1, align = 'v', axis = 'l'))
# Save the plot
cowplot::save_plot(filename = paste(outPath, fileName, sep = "/"),
plot = dupPlot,
base_width = base_width,
base_height = base_height)
if(returnPlot == TRUE){
return(dupPlot)}
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/dupePlotR.R
|
# This function was written by James Dorey to remove duplicates using between one and two methods
# This was written between the 11th of June 2022. For help, please contact James at
# jbdorey[at]me.com
#' Identifies duplicate occurrence records
#'
#' This function uses user-specified inputs and columns to identify duplicate occurrence records.
#' Duplicates are identified iteratively and will be tallied up, duplicate pairs clustered, and
#' sorted at the end of the function.
#' The function is designed to work with Darwin Core data with a database_id column,
#' but it is also modifiable to work with other columns.
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param path A character path to the location where the duplicateRun_ file will be saved.
#' @param duplicatedBy A character vector. Options are c("ID", "collectionInfo", "both"). "ID"
#' columns runs through a series of ID-only columns defined by idColumns. "collectionInfo" runs
#' through a series of columns defined by collectInfoColumns, which are checked in combination
#' with collectionCols. "both" runs both of the above.
#' @param idColumns A character vector. The columns to be checked individually for internal
#' duplicates. Intended for use with ID columns only.
#' @param collectionCols A character vector. The columns to be checked in combination with each
#' of the completeness_cols.
#' @param collectInfoColumns A character vector. The columns to be checked in combinatino with
#' all of the collectionCols columns.
#' @param completeness_cols A character vector. A set of columns that are used to order and select
#' duplicates by. For each occurrence, this function will calculate the sum of [complete.cases()].
#' Within duplicate clusters occurrences with a greater number of the completeness_cols filled
#' in will be kept over those with fewer.
#' @param CustomComparisonsRAW A list of character vectors. Custom comparisons - as a list of
#' columns to iteratively compare for duplicates. These differ from the CustomComparisons in
#' that they ignore the minimum number and character thresholds for IDs.
#' @param CustomComparisons A list of character vectors. Custom comparisons - as a list of
#' columns to iteratively compare for duplicates. These comparisons are made after character
#' and number thresholds are accounted for in ID columns.
#' @param sourceOrder A character vector. The order in which you want to KEEP duplicated
#' based on the dataSource column (i.e. what order to prioritize data sources).
#' NOTE: These dataSources are simplified to the string prior
#' to the first "_". Hence, "GBIF_Anthophyla" becomes "GBIF."
#' @param prefixOrder A character vector. Like sourceOrder, except based on the database_id prefix,
#' rather than the dataSource. Additionally, this is only examined if prefixOrder != NULL.
#' Default = NULL.
#' @param dontFilterThese A character vector. This should contain the flag columns to be ignored
#' in the creation or updating of the .summary column. Passed to [BeeBDC::summaryFun()].
#' @param characterThreshold Numeric. The complexity threshold for ID letter length. This is the
#' minimum number of characters that need to be present in ADDITION TO the numberThreshold for an
#' ID number to be tested for duplicates. Ignored by CustomComparisonsRAW. The columns that are
#' checked are occurrenceID, recordId, id, catalogNumber, and otherCatalogNumbers. Default = 2.
#' @param numberThreshold Numeric. The complexity threshold for ID number length. This is the
#' minimum number of numeric characters that need to be present in ADDITION TO the
#' characterThreshold for an ID number to be tested for duplicates. Ignored by
#' CustomComparisonsRAW. The columns that are checked are occurrenceID, recordId, id,
#' catalogNumber, and otherCatalogNumbers. Default = 3.
#' @param numberOnlyThreshold Numeric. As numberThreshold except the characterThreshold is ignored.
#' Default = 5.
#' @param catalogSwitch Logical. If TRUE, and the catalogNumber is empty the function will copy over
#' the otherCatalogNumbers into catalogNumber and visa versa. Hence, the function will attempt
#' to matchmore catalog numbers as both of these functions can be problematic. Default = TRUE.
#'
#' @return Returns data with an additional column called .duplicates where FALSE occurrences are
#' duplicates and TRUE occurrences are either kept duplicates or unique. Also exports a .csv to
#' the user-specified location with information about duplicate matching. This file is used by
#' other functions including
#' [BeeBDC::manualOutlierFindeR()] and [BeeBDC::chordDiagramR()]
#'
#' @importFrom stats complete.cases setNames
#' @importFrom dplyr n_groups lst desc %>%
#'
#' @seealso [BeeBDC::chordDiagramR()] for creating a chord diagram to visualise linkages between
#' dataSources and [BeeBDC::dupePlotR()] to visualise the numbers and proportions of duplicates in
#' each dataSource.
#'
#' @export
#'
#' @examples
#' beesFlagged_out <- dupeSummary(
#' data = BeeBDC::beesFlagged,
#' # Should start with paste0(DataPath, "/Output/Report/"), instead of tempdir():
#' path = paste0(tempdir(), "/"),
#' # options are "ID","collectionInfo", or "both"
#' duplicatedBy = "collectionInfo", # I'm only running ID for the first lot because we might
#' # recover other info later
#' # The columns to generate completeness info from
#' completeness_cols = c("decimalLatitude", "decimalLongitude",
#' "scientificName", "eventDate"),
#' # idColumns = c("gbifID", "occurrenceID", "recordId","id"),
#' # The columns to ADDITIONALLY consider when finding duplicates in collectionInfo
#' collectionCols = c("decimalLatitude", "decimalLongitude", "scientificName", "eventDate",
#' "recordedBy"),
#' # The columns to combine, one-by-one with the collectionCols
#' collectInfoColumns = c("catalogNumber", "otherCatalogNumbers"),
#' # Custom comparisons - as a list of columns to compare
#' # RAW custom comparisons do not use the character and number thresholds
#' CustomComparisonsRAW = dplyr::lst(c("catalogNumber", "institutionCode", "scientificName")),
#' # Other custom comparisons use the character and number thresholds
#' CustomComparisons = dplyr::lst(c("gbifID", "scientificName"),
#' c("occurrenceID", "scientificName"),
#' c("recordId", "scientificName"),
#' c("id", "scientificName")),
#' # The order in which you want to KEEP duplicated based on data source
#' # try unique(check_time$dataSource)
#' sourceOrder = c("CAES", "Gai", "Ecd","BMont", "BMin", "EPEL", "ASP", "KP", "EcoS", "EaCO",
#' "FSCA", "Bal", "SMC", "Lic", "Arm",
#' "USGS", "ALA", "GBIF","SCAN","iDigBio"),
#' # !!!!!! BELS > GeoLocate
#' # Set the complexity threshold for id letter and number length
#' # minimum number of characters when WITH the numberThreshold
#' characterThreshold = 2,
#' # minimum number of numbers when WITH the characterThreshold
#' numberThreshold = 3,
#' # Minimum number of numbers WITHOUT any characters
#' numberOnlyThreshold = 5)
#'
#'
dupeSummary <- function(
data = NULL,
path = NULL,
duplicatedBy = NULL,
# The columns to generate completeness info from
completeness_cols = NULL,
idColumns = NULL,
# The columns to ADDITIONALLY consider when finding duplicates in collectionInfo
collectionCols = NULL,
# The columns to combine, one-by-one with the collectionCols
collectInfoColumns = NULL,
CustomComparisonsRAW = NULL,
# Custom comparisons - as a list of
CustomComparisons = NULL,
# The order in which you want to KEEP duplicated based on data source
sourceOrder = NULL,
prefixOrder = NULL,
# Columns not to filter in .summary - default is below
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold", ".unLicensed"),
# Set the complexity threshold for id letter and number length
# minimum number of characters when WITH the numberThreshold
characterThreshold = 2,
# minimum number of numbers when WITH the characterThreshold
numberThreshold = 3,
# Minimum number of numbers WITHOUT any characters
numberOnlyThreshold = 5,
catalogSwitch = TRUE
){
# locally bind variables to the function
database_id <- dataSource <- dupColumn_s <- completeness <- .summary <- database_id_match <-
group <- database_id_Main <- dataSourceMain <- database_id_keep <- . <- NULL
# Load required packages
requireNamespace("dplyr")
requireNamespace("lubridate")
requireNamespace("igraph")
# Record start time
startTime <- Sys.time()
#### 0.0 Prep ####
##### 0.1 Errors ####
###### a. FATAL errors ####
if(is.null(data)){
stop(" - Please provide an argument for data. I'm a program not a magician.")
}
if(is.null(sourceOrder)){
stop(paste("Warning message: \n",
" - No sourceOrder provided. This must be provided as the name of each dataSource ",
"before the first '_' in the desired order.",
sep=""))
}
###### b. Warnings ####
if(is.null(duplicatedBy)){
message(paste("Warning message: \n",
" - No duplicatedBy provided. Consider if you want to choose to find duplicates by (i)",
" 'ID' columns only (for pre-cleaned data), by (ii) 'collectionInfo' columns only ",
"(for cleaned data), or (ii) 'both'.\n",
"NULL is acceptable, if quickDeDuplicate == TRUE",
sep = ""))
}
if(is.null(idColumns) & stringr::str_detect(duplicatedBy, "ID|both")){
message(paste("Warning message: \n",
" - No idColumns provided. Using default of: ",
"c('gbifID', 'occurrenceID', 'recordId', and 'id')",
sep=""))
idColumns = c("gbifID", "occurrenceID", "recordId","id")
}
if(is.null(completeness_cols)){
message(paste("Warning message: \n",
" - No completeness_cols provided. Using default of: ",
"c('decimalLatitude', 'decimalLongitude', 'scientificName', and 'eventDate')",
sep=""))
completeness_cols = c("decimalLatitude", "decimalLongitude",
"scientificName", "eventDate")
}
if(is.null(collectionCols)){
message(paste("Warning message: \n",
" - No collectionCols provided. Using default of: ",
"c('decimalLatitude', 'decimalLongitude', 'scientificName', 'eventDate', and 'recordedBy')",
sep=""))
collectionCols = c("decimalLatitude", "decimalLongitude", "scientificName", "eventDate",
"recordedBy")
}
if(is.null(collectInfoColumns)){
message(paste("Warning message: \n",
" - No collectInfoColumns provided. Using default of: ",
"c('recordNumber', 'eventID', 'catalogNumber', 'otherCatalogNumbers', and 'collectionID')",
sep=""))
collectInfoColumns = c("recordNumber", "eventID", "catalogNumber", "otherCatalogNumbers",
"collectionID")
}
##### 0.2 Data prep #####
###### a. completeness ####
# Get the sum of the complete.cases of four important fields. Preference will be given to keeping
# the most-complete records
writeLines(paste(
" - Generating a basic completeness summary from the ",
paste(completeness_cols, collapse = ", "), " columns.","\n",
"This summary is simply the sum of complete.cases in each column. It ranges from zero to the N",
" of columns. This will be used to sort duplicate rows and select the most-complete rows.",
sep = ""
))
# Update the .summary column, ignoring the dontFilterThese columns.
writeLines(" - Updating the .summary column to sort by...")
data <- summaryFun(
data = data,
# Don't filter these columns (or NULL)
dontFilterThese = dontFilterThese,
# Remove the filtering columns?
removeFilterColumns = FALSE,
# Filter to ONLY cleaned data?
filterClean = FALSE)
# Create the completeness column based on the completeness_cols
Loop_data <- data %>%
dplyr::mutate(completeness = data %>%
dplyr::select(tidyselect::all_of(completeness_cols)) %>%
apply(., MARGIN = 1, function(x) sum(complete.cases(x)))
)
###### b. catalogSwitch ####
# If the catalogNumber is empty, copy over the otherCatalogNumbers value and visa versa
if(catalogSwitch == TRUE){
Loop_data <- Loop_data %>%
dplyr::mutate(
otherCatalogNumbers = dplyr::if_else(is.na(otherCatalogNumbers),
catalogNumber,
otherCatalogNumbers),
catalogNumber = dplyr::if_else(is.na(catalogNumber),
otherCatalogNumbers,
catalogNumber)
)}
##### 0.3 format dateIdentified ####
# Removed for now because dateIdentified is too poorly filled out.
# Format the dateIdentified column
# writeLines(paste(
# " - Formatting the dateIdentified column to date format...",
# sep = ""
# ))
# Loop_data$dateIdentified <- lubridate::ymd_hms(Loop_data$dateIdentified,
# truncated = 5, quiet = TRUE) %>%
# as.Date()
# Add the dupColumn_s as NA for the first iteration
Loop_data$dupColumn_s <- NA
# Create a datset to put duplicates into
runningDuplicates = dplyr::tibble()
#### 1.0 CUSTOM_RAW ####
if(!is.null(CustomComparisonsRAW)){
message(" - Working on CustomComparisonsRAW duplicates...")
# Get complete cases of CustomComparisonsRAW from each dataset
##### 1.1 Loop ####
# Create a dataset to put unique vaules into
for(i in 1:length(CustomComparisonsRAW)){
# Select the ith CustomComparisonsRAW to match with
currentColumn <- CustomComparisonsRAW[[i]]
###### a. Identify duplicates ####
# Do the duplicate matching
dupSummary <- Loop_data %>%
# Select the columns to keep
dplyr::select(tidyselect::all_of(c(currentColumn, "database_id",
"dataSource", "dupColumn_s", "completeness",".summary"))
) %>%
# Drop any NA rows
tidyr::drop_na( tidyselect::all_of(c(currentColumn))) %>%
# Select the grouping (duplicate) columns
dplyr::group_by(
dplyr::across(tidyselect::all_of(c(currentColumn)))) %>%
# Select groups with more than one occurrence (duplicates)
dplyr::filter(dplyr::n() > 1) %>%
# Create a new column with the first occurrence in each group matching to the rest
# Do the same for database_id
dplyr::mutate(database_id_match = database_id[1], .after = database_id) %>%
# Add the matching column as a column
dplyr::mutate(
dupColumn_s = stringr::str_c(
dplyr::if_else(complete.cases( dupColumn_s),
stringr::str_c(dupColumn_s,
paste0(currentColumn, collapse = ", "), sep = " & "),
paste0(currentColumn, collapse = ", "))))
# Get numbers just for an output text
duplicates2record <- dupSummary %>%
dplyr::filter( dplyr::row_number() > 1) %>%
nrow()
keptCount = dupSummary %>%
n_groups()
##### b. Running outputs ####
# Bind the rows to a running file. Missing columns will be "NA"
runningDuplicates = dplyr::bind_rows(runningDuplicates,
dupSummary) %>%
distinct(database_id, database_id_match, .keep_all = TRUE)
##### c. User output ####
message(paste0(
"\nCompleted iteration ", i, " of ", length(CustomComparisonsRAW), ":"
))
writeLines(
paste0(" - Identified ",
format(duplicates2record, big.mark = ","),
" duplicate records and kept ",
format(keptCount, big.mark = ","),
" unique records using the column(s): \n",
paste(currentColumn, collapse = ", ")))
# Remove this temporary dataset
rm(dupSummary)
} # END for 1.1 CUSTOM_RAW LOOP
} # END for 1.0 CUSTOM_RAW
#### 2.0 Remove simple codes ####
##### 2.1 Code removal ####
# Remove the too-simple codes after make the RAW comparisons
Loop_data <- Loop_data %>%
dplyr::mutate(
if("occurrenceID" %in% colnames(Loop_data)){
occurrenceID = dplyr::if_else(
stringr::str_count(occurrenceID, "[A-Za-z]") >= characterThreshold &
stringr::str_count(occurrenceID, "[0-9]") >= numberThreshold |
stringr::str_count(occurrenceID, "[0-9]") >= numberOnlyThreshold,
occurrenceID, NA_character_)},
if("recordId" %in% colnames(Loop_data)){
recordId = dplyr::if_else(
stringr::str_count(recordId, "[A-Za-z]") >= characterThreshold &
stringr::str_count(recordId, "[0-9]") >= numberThreshold |
stringr::str_count(recordId, "[0-9]") >= numberOnlyThreshold,
recordId, NA_character_)},
if("id" %in% colnames(Loop_data)){
id = dplyr::if_else( stringr::str_count(id, "[A-Za-z]") >= characterThreshold &
stringr::str_count(id, "[0-9]") >= numberThreshold |
stringr::str_count(id, "[0-9]") >= numberOnlyThreshold,
id, NA_character_)},
if("catalogNumber" %in% colnames(Loop_data)){
catalogNumber = dplyr::if_else(
stringr::str_count(catalogNumber, "[A-Za-z]") >= characterThreshold &
stringr::str_count(catalogNumber, "[0-9]") >= numberThreshold |
stringr::str_count(catalogNumber, "[0-9]") >= numberOnlyThreshold,
catalogNumber, NA_character_)},
if("otherCatalogNumbers" %in% colnames(Loop_data)){
otherCatalogNumbers = dplyr::if_else(
stringr::str_count(otherCatalogNumbers, "[A-Za-z]") >= characterThreshold &
stringr::str_count(otherCatalogNumbers, "[0-9]") >= numberThreshold |
stringr::str_count(otherCatalogNumbers, "[0-9]") >= numberOnlyThreshold,
otherCatalogNumbers, NA_character_)})
##### 2.2. catalogSwitch ####
# If the catalogNumber is empty, copy over the otherCatalogNumbers value and visa versa
if(catalogSwitch == TRUE){
Loop_data <- Loop_data %>%
dplyr::mutate(
otherCatalogNumbers = dplyr::if_else(is.na(otherCatalogNumbers),
catalogNumber,
otherCatalogNumbers),
catalogNumber = dplyr::if_else(is.na(catalogNumber),
otherCatalogNumbers,
catalogNumber)
)}
#### 3.0 CUSTOM ####
if(!is.null(CustomComparisons)){
message(" - Working on CustomComparisons duplicates...")
# Get complete cases of CustomComparisons from each dataset
##### 3.1 Loop ####
# Create a dataset to put unique vaules into
for(i in 1:length(CustomComparisons)){
# Select the ith CustomComparisons to match with
currentColumn <- CustomComparisons[[i]]
###### a. Identify duplicates ####
# Do the duplicate matching
dupSummary <- Loop_data %>%
# Select the columns to keep
dplyr::select(database_id,
tidyselect::all_of(currentColumn),
dataSource, dupColumn_s, completeness,.summary) %>%
# Drop any NA rows
tidyr::drop_na( tidyselect::all_of(c(currentColumn))) %>%
# Select the grouping (duplicate) columns
dplyr::group_by(
dplyr::across(tidyselect::all_of(c(currentColumn)))) %>%
# Select groups with more than one occurrence (duplicates)
dplyr::filter(dplyr::n() > 1) %>%
# Create a new column with the first occurrence in each group matching to the rest
# Do the same for database_id
dplyr::mutate(database_id_match = database_id[1], .after = database_id) %>%
# Add the matching column as a column
dplyr::mutate(
dupColumn_s = stringr::str_c(
dplyr::if_else(complete.cases( dupColumn_s),
stringr::str_c(dupColumn_s,
paste0(currentColumn, collapse = ", "), sep = " & "),
paste0(currentColumn, collapse = ", "))))
# Get numbers just for an output text
duplicates2record <- dupSummary %>%
dplyr::filter( dplyr::row_number() > 1) %>%
nrow()
keptCount = dupSummary %>%
n_groups()
##### b. Running outputs ####
# Bind the rows to a running file. Missing columns will be "NA"
runningDuplicates = dplyr::bind_rows(runningDuplicates,
dupSummary) %>%
distinct(database_id, database_id_match, .keep_all = TRUE)
##### c. User output ####
message(paste0(
"\nCompleted iteration ", i, " of ", length(CustomComparisons), ":"
))
writeLines(
paste0(" - Identified ",
format(duplicates2record, big.mark = ","),
" duplicate records and kept ",
format(keptCount, big.mark = ","),
" unique records using the column(s): \n",
paste(currentColumn, collapse = ", ")))
# Remove this temporary dataset
rm(dupSummary)
} # END for 3.1 CUSTOM LOOP
} # END for 3.0 CUSTOM
#### 4.0 ID ####
if(duplicatedBy %in% c("ID","both")){
message(" - Working on ID duplicates...")
# Get complete cases of collectionInfo from each dataset
##### 4.1 Loop ####
# Create a dataset to put unique values into
for(i in 1:length(idColumns)){
# Select the ith idColumns to match with
currentColumn <- idColumns[i]
###### a. Identify duplicates ####
# Do the duplicate matching
dupSummary <- Loop_data %>%
# Select the columns to keep
dplyr::select(database_id,
tidyselect::all_of(currentColumn),
dataSource, dupColumn_s, completeness,.summary) %>%
# Drop any NA rows
tidyr::drop_na(tidyselect::all_of(currentColumn)) %>%
# Select the grouping (duplicate) columns
dplyr::group_by( dplyr::across(dplyr::all_of(currentColumn))) %>%
# Select groups with more than one occurrence (duplicates)
dplyr::filter(dplyr::n() > 1) %>%
# Create a new column with the first occurrence in each group matching to the rest to keep
# Do the same for database_id
dplyr::mutate(database_id_match = database_id[1], .after = database_id) %>%
# Add the matching column as a column
dplyr::mutate(
dupColumn_s = stringr::str_c(
dplyr::if_else(!is.na(dupColumn_s) ,
stringr::str_c(dupColumn_s, currentColumn, sep = " & "),
currentColumn)))
# Get numbers just for an output text
duplicates2record <- dupSummary %>%
dplyr::filter( dplyr::row_number() > 1) %>%
nrow()
keptCount = dupSummary %>%
n_groups()
##### b. Running outputs ####
# Bind the rows to a running file. Missing columns will be "NA"
runningDuplicates = dplyr::bind_rows(runningDuplicates,
dupSummary) %>%
distinct(database_id, database_id_match, .keep_all = TRUE)
##### c. User output ####
message(paste0(
"\nCompleted iteration ", i, " of ", length(idColumns), ":"
))
writeLines(
paste(" - Identified ",
format(duplicates2record, big.mark = ","),
" duplicate records and kept ",
format(keptCount, big.mark = ","),
" unique records using the column: \n",
currentColumn), sep = "")
# Remove this temporary dataset
rm(dupSummary)
} # END for LOOP
} # END 4.0 ID
#### 5.0 collectionInfo ####
if(duplicatedBy %in% c("collectionInfo","both")){
message(" - Working on collectionInfo duplicates...")
# Get complete cases of collectionInfo from each dataset
##### 5.1 Loop ####
# Create a dataset to put unique values into
for(i in 1:length(collectInfoColumns)){
# Select the ith collectInfoColumns to match with
currentColumn <- collectInfoColumns[i]
###### a. Identify duplicates ####
# Do the duplicate matching
dupSummary <- Loop_data %>%
# Select the columns to keep
dplyr::select(database_id,
tidyselect::all_of(collectionCols),
tidyselect::all_of(currentColumn),
dataSource, dupColumn_s, completeness, .summary) %>%
# Drop any NA rows
tidyr::drop_na( tidyselect::all_of(c(collectionCols, currentColumn))) %>%
# Select the grouping (duplicate) columns
dplyr::group_by(
dplyr::across(tidyselect::all_of(c(collectionCols, currentColumn)))) %>%
# Select groups with more than one occurrence (duplicates)
dplyr::filter(dplyr::n() > 1) %>%
# Create a new column with the first occurrence in each group matching to the rest
# Do the same for database_id
dplyr::mutate(database_id_match = database_id[1], .after = database_id) %>%
# Add the matching column as a column
dplyr::mutate(
dupColumn_s = stringr::str_c(
dplyr::if_else(!is.na(dupColumn_s) ,
stringr::str_c(dupColumn_s,
paste0(currentColumn, collapse = ", "), sep = " & "),
paste0(currentColumn, collapse = ", "))))
# Get numbers just for an output text
duplicates2record <- dupSummary %>%
dplyr::filter( dplyr::row_number() > 1) %>%
nrow()
keptCount = dupSummary %>%
n_groups()
##### b. Running outputs ####
# Bind the rows to a running file. Missing columns will be "NA"
runningDuplicates = dplyr::bind_rows(runningDuplicates,
dupSummary)%>%
distinct(database_id, database_id_match, .keep_all = TRUE)
##### c. User output ####
message(paste0(
"\nCompleted iteration ", i, " of ", length(collectInfoColumns), ":"
))
writeLines(
paste0(" - Identified ",
format(duplicates2record, big.mark = ","),
" duplicate records and kept ",
format(keptCount, big.mark = ","),
" unique records using the columns: \n",
paste(c(collectionCols), collapse = ", "), ", and ",
currentColumn))
# Remove this temporary dataset
rm(dupSummary)
} # END for 5.1 collectionInfo LOOP
} # END for 5.0 collectionInfo
#### 6.0 runningDuplicates File ####
##### 6.1 Clustering duplicates####
writeLines(" - Clustering duplicate pairs...")
# Cluster the id pairs into groups
clusteredDuplicates <- runningDuplicates %>%
dplyr::select(database_id_match, database_id) %>%
igraph::graph.data.frame() %>%
igraph::components()
# Extract the id and the group only
clusteredDuplicates <- clusteredDuplicates$membership %>% as.data.frame() %>%
setNames("group") %>%
dplyr::mutate(database_id = rownames(.)) %>% dplyr::as_tibble()
# Re-merge the relevant columns
clusteredDuplicates <- clusteredDuplicates %>%
# Re-merge the dupColumn_s column
dplyr::left_join(runningDuplicates %>%
dplyr::select(database_id, dupColumn_s) %>%
dplyr::distinct(database_id, .keep_all = TRUE),
by = "database_id") %>%
# Re-merge the rest of the information
dplyr::left_join(Loop_data %>%
dplyr::select(
tidyselect::any_of(unique(c("database_id",
completeness_cols,
collectionCols,
collectInfoColumns,
lst(CustomComparisons) %>%
unlist() %>% as.character(),
"dataSource", "completeness",
".summary"),
fromLast = TRUE,
na.rm = TRUE))),
by = "database_id") %>%
# Group by the clustered group number
dplyr::group_by(group)
# User output
writeLines(paste0(
"Duplicate pairs clustered. There are ",
format(nrow(clusteredDuplicates) - clusteredDuplicates %>% n_groups(),
big.mark = ","), " duplicates across ",
format(clusteredDuplicates %>% n_groups(), big.mark = ","),
" kept duplicates."))
##### 6.2 Arrange data ####
# Prepare data order
if(!is.null(prefixOrder)){
writeLines(" - Ordering prefixs...")
prefixOrder = prefixOrder
clusteredDuplicates <- clusteredDuplicates %>%
# Make a new column with the database_id SOURCE, not the full database_id with numbers
dplyr::mutate(database_id_Main = stringr::str_replace(database_id,
pattern = "_.*",
replacement = "") %>%
factor(levels = prefixOrder, ordered = TRUE) ) %>%
# Sort so that certain datasets will be given preference over one another as user-defined.
dplyr::arrange(database_id_Main) %>%
# Remove this sorting column
dplyr::select(!database_id_Main)
}
writeLines(paste0(" - Ordering data by 1. dataSource, 2. completeness",
" and 3. .summary column..."))
clusteredDuplicates <- clusteredDuplicates %>%
# Extract only the actual source, not the taxonomic level
dplyr::mutate(dataSourceMain = stringr::str_replace(dataSource,
pattern = "_.*",
replacement = "") %>%
factor(levels = sourceOrder, ordered = TRUE) ) %>%
# Sort so that certain datasets will be given preference over one another as user-defined.
dplyr::arrange(dataSourceMain) %>%
# Sort so that higher completeness is given FIRST preference
dplyr::arrange( desc(completeness)) %>%
# Sort by .summary so that TRUE is selected over FALSE
dplyr::arrange( desc(.summary)) %>%
# Remove these sorting columns
dplyr::select(!c(dataSourceMain))
##### 6.3 Keep first #####
writeLines(paste0(" - Find and FIRST duplicate to keep and assign other associated",
" duplicates to that one (i.e., across multiple tests a 'kept duplicate', ",
"could otherwise be removed)..."))
# Find the first duplicate and assign the match to that one as the kept dupicate
clusteredDuplicates <- clusteredDuplicates %>%
dplyr::mutate(database_id_keep = database_id[1], .after = database_id) %>%
dplyr::mutate(dataSource_keep = dataSource[1], .after = dataSource) %>%
# Remove the row for the kept duplicate
dplyr::filter(!database_id_keep == database_id)
##### 6.4 Save ####
# Save the running
readr::write_excel_csv(clusteredDuplicates,
file = paste0(path,
"/duplicateRun_", paste(duplicatedBy, collapse = "_"),
"_", Sys.Date(),
".csv") %>%
stringr::str_replace_all("//duplicateRun_", "/duplicateRun_"))
writeLines(paste0(
" - Duplicates have been saved in the file and location: ",
paste0(path,
"duplicateRun_", paste(duplicatedBy, collapse = "_"),
"_", Sys.Date(),
".csv")
))
#### 7.0 Flag .duplicates ####
# Add a flag to any database_id that occurs in the clusteredDuplicates file. The rest will be
# TRUE (not duplicates)
Loop_data <- data %>%
# Add .duplicates flag column
dplyr::mutate(.duplicates = !database_id %in% clusteredDuplicates$database_id) %>%
# Add in a column to show the duplicate status of each occurrence
dplyr::mutate(duplicateStatus = dplyr::if_else(
database_id %in% clusteredDuplicates$database_id,
"Duplicate",
dplyr::if_else(
database_id %in% clusteredDuplicates$database_id_keep,
"Kept duplicate", "Unique"))
)
#### Final output ####
writeLines(paste0(
" - Across the entire dataset, there are now ",
format(sum(Loop_data$.duplicates == FALSE), big.mark = ","), " duplicates from a total of ",
format(nrow(Loop_data), big.mark = ","), " occurrences."
))
endTime <- Sys.time()
message(paste(
" - Completed in ",
round(difftime(endTime, startTime), digits = 2 ),
" ",
units(round(endTime - startTime, digits = 2)),
sep = ""))
# Return data
return(Loop_data)
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/dupeSummary.R
|
# This function is a modification of a bdc function and flags columns as FALSE when they are marked
# as "ABSENT". This function was written on the 22nd of May 2022 by James Dorey. Email James at
# jbdorey[at]me.com for help.
#' Flags occurrences that are marked as absent
#'
#' Flags occurrences that are "ABSENT" for the occurrenceStatus (or some other user-specified) column.
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param PresAbs Character. The column in which the function will find "ABSENT" or "PRESENT" records.
#' Default = "occurrenceStatus"
#'
#' @return The input data with a new column called ".occurrenceAbsent" where FALSE == "ABSENT" records.
#' @export
#'
#'@importFrom dplyr %>%
#'
#' @examples
#' # Bring in the data
#' data(beesRaw)
#' # Run the function
#' beesRaw_out <- flagAbsent(data = beesRaw,
#' PresAbs = "occurrenceStatus")
#' # See the result
#' table(beesRaw_out$.occurrenceAbsent, useNA = "always")
flagAbsent <-
function(data = NULL,
PresAbs = "occurrenceStatus") {
.data <- .occurrenceAbsent <- NULL
requireNamespace("dplyr")
# Make a new column called .occurrenceAbsent to be TRUE when occurrenceStatus is "present" or NA
data <-
data %>%
dplyr::mutate(
.occurrenceAbsent =
!.data[[PresAbs]] %in% c("ABSENT"))
# Return user output
message(
paste(
"\\.occurrenceAbsent:\n",
"Flagged",
format(sum(data$.occurrenceAbsent == FALSE, na.rm = TRUE), big.mark = ","),
"absent records:\n",
"One column was added to the database.\n"
)
)
return(data)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/flagAbsent.R
|
# This function is a modification of a bdc function and flags columns as FALSE when they are
# present in the strings_to_restrict.
# This function was written on the 22nd of May 2022 by James Dorey. Email James at
# jbdorey[at]me.com for help.#
#' Flag license protected records
#'
#' This function will search for strings that indicate a record is restricted in its use and will
#' flag the restricted records.
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param strings_to_restrict A character vector. Should contain the strings used to detect protected records.
#' Default = c("All Rights Reserved", "All rights reserved", "All rights reserved.", "ND", "Not for public")
#' @param excludeDataSource Optional. A character vector. A vector of the data sources (dataSource)
#' that will not be flagged as protected, even if they are. This is useful if you have a private
#' dataset that should be listed as "All rights reserved" which you want to be ignored by this flag.
#'
#' @return Returns the data with a new column, .unLicensed, where FALSE = records that are protected by
#' a license.
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#' # Read in the example data
#' data("beesRaw")
#' # Run the function
#' beesRaw_out <- flagLicense(data = beesRaw,
#' strings_to_restrict = "all",
#' # DON'T flag if in the following data# source(s)
#' excludeDataSource = NULL)
flagLicense <- function(data = NULL,
strings_to_restrict = "all",
excludeDataSource = NULL) {
.data <- .unLicensed <- dataSource <- NULL
requireNamespace("dplyr")
requireNamespace("rlang")
#### 1.0 Preperation ####
##### 1.1 strings_to_restrict ####
# Flag if these strigns are present for NO USE!
if (strings_to_restrict[1] == "all") {
strings_to_restrict <-
c("All Rights Reserved",
"All rights reserved",
"All rights reserved.",
"ND", # noDerivatives to be distributed
"Not for public") # I fear this has been uploaded in error?
}
##### 1.2 Missing columns ####
###### a. dataSource ####
# If the dataSource column is not in the dataset, fill it in with "NA"s
if(!any(colnames(data) %in% "dataSource")){
data <- data %>%
dplyr::mutate(dataSource = NA_character_)
message("No dataSource provided. Filling this column with NAs...")
}
###### b. rights ####
# If the rights column is not in the dataset, fill it in with "NA"s
if(!any(colnames(data) %in% "rights")){
data <- data %>%
dplyr::mutate(rights = NA_character_)
message("No rights provided. Filling this column with NAs...")
}
###### c. license ####
# If the license column is not in the dataset, fill it in with "NA"s
if(!any(colnames(data) %in% "license")){
data <- data %>%
dplyr::mutate(license = NA_character_)
message("No license provided. Filling this column with NAs...")
}
###### a. accessRights ####
# If the accessRights column is not in the dataset, fill it in with "NA"s
if(!any(colnames(data) %in% "accessRights")){
data <- data %>%
dplyr::mutate(accessRights = NA_character_)
message("No accessRights provided. Filling this column with NAs...")
}
# Make a new column called .unLicensed to be TRUE when ... the restricted flags are present in
# any of the rights, license, or accessRights columns.
data <-
data %>%
dplyr::mutate(
.unLicensed =
dplyr::if_else(grepl(paste(strings_to_restrict, collapse = "|"), data$rights) |
grepl(paste(strings_to_restrict, collapse = "|"), data$license) |
grepl(paste(strings_to_restrict, collapse = "|"), data$accessRights) == TRUE,
dplyr::if_else(dataSource %in% excludeDataSource,
# Flagged, but EXCLUDED — keep these data
TRUE,
# Flagged AND included — DON'T use these data
FALSE),
# Not flagged — keep these data
TRUE))
# Return user output
message(
paste(
"\\.unLicensed:\n",
"Flagged",
format(sum(data$.unLicensed == FALSE, na.rm = TRUE), big.mark = ","),
"records that may NOT be used.\n",
"One column was added to the database.\n"
)
)
# Return the data to the user
return(data)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/flagLicense.R
|
# This function was written by James Dorey to load and output updated summary information from flagging
# This function was written from the 9th of June 2022. For questions, please email James
# at jbdorey[at]me.com
#' Loads, appends, and saves occurrence flag data
#'
#' This function is used to save the flag data for your occurrence data as you run the BeeBDC script.
#' It will read and append existing files, if asked to. Your flags should also be saved in the occurrence
#' file itself automatically.
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param outPath A character path. Where the file should be saved.
#' @param fileName Character. The name of the file to be saved
#' @param idColumns A character vector. The names of the columns that are to be kept along with the
#' flag columns. These columns should be useful for identifying unique records with flags.
#' Default = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource").
#' @param append Logical. If TRUE, this will find and append an existing file generated by this function.
#' @param printSummary Logical. If TRUE, print a [summary()] of all filter columns - i.e. those which
#' tidyselect::starts_with(".")
#'
#' @return Saves a file with id and flag columns and returns this as an object.
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#' # Load the example data
#' data("beesFlagged")
#'
#' # Run the function
#' OutPath_Report <- tempdir()
#' flagFile <- flagRecorder(
#' data = beesFlagged,
#' outPath = paste(OutPath_Report, sep =""),
#' fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
#' # These are the columns that will be kept along with the flags
#' idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
#' # TRUE if you want to find a file from a previous part of the script to append to
#' append = FALSE)
flagRecorder <- function(
data = NULL,
outPath = NULL,
fileName = NULL,
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
append = NULL,
printSummary = FALSE){
# locally bind variables to the function
database_id <- . <- rowSum <- .summary <- NULL
#### 0.0 Prep ####
##### 0.1 Packages ####
requireNamespace("dplyr")
requireNamespace("tidyselect")
requireNamespace("lubridate")
requireNamespace("readr")
##### 0.2 Warnings ####
if(is.null(data)){
warning(" - Please provide a dataset.")
}
if(is.null(outPath)){
warning(" - Please provide an outPath to where the bdc folders are dataset.")
}
if(is.null(idColumns)){
warning(paste(" - No ID columns were selected! We will keep the following id columns intact:\n",
"database_id, id, catalogNumber, and occurrenceID.",
sep = ""))
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID")
}
if(is.null(append)){
warning(" - Please provide a append argument - TRUE (find existing file) or FALSE (start from scratch).")
}
##### 0.3 Data in ####
###### a. Occurrence dataset ####
# Select the columns from the input occurrence dataset
data <- data %>%
dplyr::select( dplyr::all_of(idColumns), tidyselect::starts_with("."))
###### b. Existing data ####
if(append == TRUE){
# Find an existing file
flagPath <- fileFinder(path = outPath, fileName = "flagsRecorded_")
# Read it in
flagColumns <- readr::read_csv(flagPath)
# Find the new columns that need adding in
newColumns <- setdiff(colnames(data), colnames(flagColumns))
# Merge the new columns to the data tibble
data <- flagColumns %>%
dplyr::left_join(
# Select only the new columns to add, from the new tibble.
dplyr::select(data, c(database_id, dplyr::all_of(newColumns))),
# Merge by database_id
by = "database_id", keep = FALSE)
# Remove the spent dataframe
rm(flagColumns)
}
#### 1.0 Yes .summary ####
if(".summary" %in% colnames(data)){
# Update .summary column
summaryCol <- data %>%
# Select all columns starting with "."
dplyr::select(tidyselect::starts_with(".")) %>%
# Delete the summary column if it's there
dplyr::select(!tidyselect::starts_with(".summary")) %>%
# Make FALSE == 1 and TRUE == 0
dplyr::mutate_if(is.logical, ~abs(as.numeric(.) - 1)) %>%
# IF rowSum > 0 then there is at least one flag
dplyr::mutate(rowSum = rowSums(., na.rm = TRUE)) %>%
# Add the .summary column
dplyr::mutate(.summary = dplyr::if_else(rowSum > 0,
FALSE, TRUE)) %>%
dplyr::select(.summary)
# Add this column in
data <- data %>%
dplyr::mutate(.summary = summaryCol$.summary)
# User output
message(" - .summary column detected. This will be over-written.")
}
#### 2.0 No .summary ####
if(!".summary" %in% colnames(data)){
# Update .summary column
summaryCol <- data %>%
# Select al lcolumns starting with "."
dplyr::select(tidyselect::starts_with(".")) %>%
# Delete the summary column if it's there
dplyr::select(!tidyselect::starts_with(".summary")) %>%
# Make FALSE == 1 and TRUE == 0
dplyr::mutate_if(is.logical, ~abs(as.numeric(.) - 1)) %>%
# IF rowSum > 0 then there is at least one flag
dplyr::mutate(rowSum = rowSums(., na.rm = TRUE)) %>%
# Add the .summary column
dplyr::mutate(.summary = dplyr::if_else(rowSum > 0,
FALSE, TRUE)) %>%
dplyr::select(.summary)
# Add this column in
data <- data %>%
dplyr::mutate(.summary = summaryCol$.summary)
# User output
message(" - NO .summary column detected. This will added to the data.")
}
#### 3.0 Save ####
# Save this information as the csv flagsRecorded_DATE.csv
readr::write_excel_csv(data,
paste0(outPath, "/", fileName, sep = ""))
# User output
message(paste(
" - Data saved to ", paste0(outPath, "/", fileName, sep = ""),
sep = ""))
# User output
writeLines(paste(
" - Selected ", ncol(data), " columns. These include:\n",
paste(colnames(data)[1:ncol(data)-1], collapse = ", "),
", and ", paste(colnames(data)[ncol(data)]),
sep = ""
))
# Print summary if requested
if(printSummary == TRUE){
summary(dplyr::select(data, tidyselect::starts_with(".")))
}
# Return this file
return(data)
} # END flagRecorder
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/flagRecorder.R
|
#' Build a per-species summary for each and all flags
#'
#' Takes a flagged dataset and returns the total number of fails (FALSE) per flag (columns starting
#' with ".") and per species. It will ignore the .scientificName_empty and .invalidName columns as
#' species are not assigned.
#' Users may define the column to group the summary by. While it is intended to work with
#' the scientificName column, users may select any grouping column (e.g., country).
#'
#' @param data A data frame or tibble. The flagged dataset.
#' @param column Character. The name of the column to group by and summarise the failed occurrences.
#' Default = "scientificName".
#' @param outPath A character path. The path to the directory in which the figure will be saved.
#' Default = OutPath_Report. If is NULL then no file will be saved to the disk.
#' @param fileName Character. The name of the file to be saved, ending in ".csv".
#' Default = "flagTable.csv".
#' @param percentImpacted Logical. If TRUE (the default), the program will write the percentage of
#' species impacted and over the percentThreshold for each flagging column.
#' @param percentThreshold Numeric. A number between 0 and 100 to indicate the percent of
#' individuals (>; within each species) that is impacted by a flag, and to be included in the
#' percentImpacted. Default = 0.
#'
#' @importFrom dplyr %>%
#'
#' @return A tibble with a column for each flag column (starting with ".") showing the number of
#' failed (FALSE) occurrences per group. Also shows the (i) total number of records, (ii) total
#' number of failed records, and (iii) the percentage of failed records.
#' @export
#'
#' @examples
#' # Load the toy flagged bee data
#' data("beesFlagged")
#'
#' # Run the function and build the flag table
#' flagTibble <- flagSummaryTable(data = beesFlagged,
#' column = "scientificName",
#' outPath = paste0(tempdir()),
#' fileName = "flagTable.csv")
#'
#'
flagSummaryTable <- function(
data = NULL,
column = "scientificName",
outPath = OutPath_Report,
fileName = "flagTable.csv",
percentImpacted = TRUE,
percentThreshold = 0){
# locally bind variables to the function
flagColumns <- dataFlags <- speciesColumn <- loopCol <- summaryColumn <- . <- NULL
flagCol <- .summary <- totalFailed <- totalFailed <- total <- OutPath_Report <- NULL
.scientificName_empty <- .invalidName <- NULL
# Load required packages
requireNamespace("dplyr")
requireNamespace("tidyselect")
requireNamespace("stats")
requireNamespace("readr")
#### 0.0 Prep ####
##### 0.1 Errors ####
###### a. FATAL errors ####
if(is.null(data)){
stop(" - Please provide an argument for data. I'm a program not a magician.")
}
if(percentThreshold > 100 | percentThreshold < 0){
stop(" - percentThreshold must range from 0 to 100.")
}
#### 1.0 Data prep ####
# Filter for bad names to not include in the table
if(any(colnames(data) %in% ".scientificName_empty")){
data <- data %>%
dplyr::filter(!.scientificName_empty == FALSE)
}
if(any(colnames(data) %in% ".invalidName")){
data <- data %>%
dplyr::filter(!.invalidName == FALSE)
}
# Re-do the .summary column to be sure its up to date
data <- data %>%
# Remove those without scientific name and that flag column
dplyr::select(!tidyselect::any_of(".scientificName_empty")) %>%
# Do the same for invalid names
dplyr::select(!tidyselect::any_of(".invalidName")) %>%
# Refresh the .summary column
BeeBDC::summaryFun()
# Get a character vector of the flag columns
flagColumns <- data %>% dplyr::select(tidyselect::starts_with(".")) %>% colnames()
# Select only the species name and flag columns
dataFlags <- data %>%
dplyr::select(tidyselect::all_of( c(column, flagColumns)))
# Create a column of only species names to add the summary of each column to below
speciesColumn <- dataFlags %>%
dplyr::distinct(dplyr::across(tidyselect::all_of(column)))
#### 2.0 Flag column loop ####
# Loop through each column to get species level counts, starting from column two so as not to
# count the column
for(i in 1:(ncol(dataFlags)-1) ){
# For the ith column, get a COUNT of the FALSE (failed) occurrences per species (or column)
loopCol <- dataFlags %>%
# Select the relevant columns and group by them
dplyr::select(tidyselect::all_of( c(column, flagColumns[[i]]))) %>%
dplyr::group_by(dplyr::across( tidyselect::all_of(c(column, flagColumns[[i]])))) %>%
# Count the occurrences of both TRUE and FALSE
dplyr::count(name = "n") %>%
# Set the column names temporarily and then REMOVE the TRUE counts
stats::setNames(paste0( c(column, "flagCol", "n" ))) %>%
dplyr::filter(!flagCol == TRUE) %>%
# ungroup
dplyr::ungroup() %>%
# select the relevant columns
dplyr::select(tidyselect::all_of( c(column, "n"))) %>%
# Rename the columns
stats::setNames(paste0( c(column, flagColumns[[i]]) ))
# Add the count to the speciesColumn tibble
speciesColumn <- speciesColumn %>%
dplyr::left_join(loopCol, by = column) %>%
replace(is.na(.), 0)
}
#### 3.0 Summary columns ####
# Add in the totals and percentage to the last columns
summaryColumn <- speciesColumn %>%
# Add a count of total records
dplyr::left_join(dataFlags %>%
dplyr::group_by(dplyr::across(tidyselect::all_of(column))) %>%
dplyr::count(name = "total"),
by = column) %>%
# Change the .summary column to a total failed column
dplyr::rename(totalFailed = .summary) %>%
dplyr::relocate(totalFailed, .after = total) %>%
# Add percentage failed
dplyr::mutate(percentFailed = (totalFailed/total)*100)
if(percentImpacted == TRUE){
# Turn the percentThreshold into a proportion instead of a percentage
percentThreshold <- percentThreshold/100
# Get the percentages of species that are impacted by each flag
percentImpacted <- summaryColumn %>%
dplyr::summarise(dplyr::across(tidyselect::starts_with("."),
function(x){
sum(x/total > percentThreshold)/length(x)*100
})) %>%
# Transpose the tibble
tidyr::pivot_longer(cols = tidyselect::starts_with("."))
# Provide use output
writeLines(paste0("The percentages of species impacted by each flag in your analysis are as follows: \n",
paste0(" ", percentImpacted$name, " = ", round(percentImpacted$value, 2), "%",
collapse = "\n"))
) # END writeLines
} # END percentImpacted == TRUE
#### 4.0 Output ####
# If user provided an outPath then save the file
if(!is.null(outPath)){
summaryColumn %>%
readr::write_excel_csv(file = paste(outPath, fileName, sep = "/"))
}
# return the table as a tibble
return(summaryColumn)
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/flagSummaryTable.R
|
# This function, written by James Dorey builds flags from the Ascher notes column
# For queries, please contact James Dorey at jbdorey[at]me.com
# This function was started on 13th May 2022 and last updated 17th May 2022
#' @importFrom dplyr %>%
#'
#'
# Build a function that finds flags in the notes column and standardizes them into the flags column
flag_converter <- function(SynFile = DLdf){
# locally bind variables to the function
DLdf <- . <- Combined <- NULL
requireNamespace("dplyr")
#### 1.1 doubtful ####
# Doubtful species strings to find and matches
doubt_sp_str <-c("nomen dubium","doubtful synonymy","doubtful synonym")
doubt_sp_replacements <- SynFile$notes %>%
# Extract those strings
stringr::str_extract(paste(doubt_sp_str,
collapse = "|")) %>%
# Repalce those strings
stringr::str_replace(pattern = paste(doubt_sp_str,
collapse = "|"),
replacement = "doubtful species") %>%
as.character() %>% as.data.frame()
#### 1.2 Syn Issue ####
# Syn Issue strings to find and matches
synIss_str <-c("partim")
synIss_replacements <- SynFile$notes %>%
# Extract those strings
stringr::str_extract(paste(synIss_str,
collapse = "|")) %>%
# Repalce those strings
stringr::str_replace(pattern = paste(synIss_str,
collapse = "|"),
replacement = "synonym issue") %>%
as.character() %>% as.data.frame()
#### 1.3 Auth Issue ####
authIss_str <-c("auct", "auct , not Fabricius", "Auctorum", "Auct", "_auct",
"auct.", "miscitation")
authIss_replacements <- SynFile$notes %>%
# Extract those strings
stringr::str_extract(paste(authIss_str,
collapse = "|")) %>%
# Repalce those strings
stringr::str_replace(pattern = paste(authIss_str,
collapse = "|"),
replacement = "authorship issue") %>%
as.character() %>% as.data.frame()
#### 1.4 Auth not provided ####
# If the authorship column is empty, flag...
noAuth <- ifelse(is.na(SynFile$authorship), "author not provided", NA) %>%
as.character() %>% as.data.frame()
#### 2.0 Merge flags ####
MergedFlags <- dplyr::bind_cols(doubt_sp_replacements, synIss_replacements, authIss_replacements,
noAuth) %>%
# Concatenate the columns into one
tidyr::unite(., col = Combined,
na.rm = TRUE, sep = ", ")
return(MergedFlags)
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/flag_converter.R
|
#### 5. formatted combiner ####
# Currently works to combine the USGS data with the rest of the data when the string is as below.
# But, it should also work for any properly-formatted files
# strings = c("USGS_[a-zA-Z_]+[0-9]{4}-[0-9]{2}-[0-9]{2}")
#' Combine the formatted USGS data with the main dataset
#'
#' Merges the Darwin Core version of the USGS dataset that was created using [BeeBDC::USGS_formatter()]
#' with the main dataset.
#'
#' @param path A directory as character. The directory to look in for the formatted USGS data.
#' @param strings A regex string. The string to find the most-recent formatted USGS dataset.
#' @param existingOccurrences A data frame. The existing occurrence dataset.
#' @param existingEMLs An EML file. The existing EML data file to be appended.
#'
#' @return A list with the combined occurrence dataset and the updated EML file.
#' @export
#'
#' @importFrom dplyr %>%
#' @importFrom stats setNames
#'
#' @examples
#' \dontrun{
#' DataPath <- tempdir()
#' strings = c("USGS_DRO_flat_27-Apr-2022")
#' # Combine the USGS data and the existing big dataset
#' Complete_data <- formattedCombiner(path = DataPath,
#' strings = strings,
#' # This should be the list-format with eml attached
#' existingOccurrences = DataImp$Data_WebDL,
#' existingEMLs = DataImp$eml_files)
#' }
formattedCombiner <- function(path,
strings,
existingOccurrences,
existingEMLs){
# locally bind variables to the function
. <- NULL
requireNamespace("dplyr")
requireNamespace("xml2")
# Find all of the previously-produced data files
BeeData_Locs <- file.info(list.files(path, full.names = T, pattern = strings,
recursive = TRUE))
# Check if the data are present
if(nrow(BeeData_Locs) == 0){ # If there are no data matching the name...
stop(" - Bugger it, R can't find any files produced by our package in the path provided :(")
}
# Find the most-recent files based on date in file name
file_dates <- stringr::str_extract(rownames(BeeData_Locs),
pattern = "[0-9]{4}-[0-9]{2}-[0-9]{2}") %>%
# Sort from most- to least-recent files
sort(decreasing = TRUE)
# Return the strings containing this date
most_recent <- stringr::str_subset(rownames(BeeData_Locs),
pattern = file_dates[1])
# Return information to user
writeLines(paste(" - Great, R has detected some files. These files include: ", "\n",
paste(most_recent, collapse = "\n") ), sep = "")
#### CSV import ####
# IF there is ONLY a complete .csv file among the most-recent files...
if(any(stringr::str_detect(most_recent, paste(strings,".csv", sep = ""))) == TRUE &&
all(stringr::str_detect(most_recent, paste(strings,".rds", sep = ""))) == FALSE){
writeLines(paste(" - .csv export version found. Loading this file..."))
ColTypes <- ColTypeR()
# Find the most-recent .csv occurrence file
# Find the file that deos NOT include "attribute" or "problems" in the string
occ_file <- most_recent[stringr::str_which(most_recent, negate = TRUE,
paste("attribute|problems", sep = ""))] %>%
# Read in .csv file and supress warnings
read_csv(col_types = ColTypes) %>%
suppressWarnings(., classes = "warning")
# Check if attributes file is in .rds format or not and preferentially read this if present
Rdata_test <- most_recent[stringr::str_which(most_recent, paste("attribute", sep = ""))] %>%
stringr::str_detect(pattern = ".rds")
# Find the most-recent .rds attributes file
if(Rdata_test == TRUE){
# Find the file that does include "attribute in the name
attr_file <- most_recent[stringr::str_which(most_recent, paste("attribute", sep = ""))] %>%
readRDS()
} # END .rds portion of csv
# Find the most-recent .csv attributes file
if(Rdata_test == FALSE){
# Find the file that does include "attribute in the name
attr_file <- most_recent[stringr::str_which(most_recent, paste("attribute", sep = ""))] %>%
readr::read_csv(col_types = readr::cols_only(
dataSource = "c", alternateIdentifier = "c", title = "c", pubDate = "D", dateStamp = "c",
doi = "c", downloadLink = "c", abstract = "c", citations = "c", downloadCitation = "c",
rights = "c"))
# Turn the abstract, citations, and rights into lists
attr_file$abstract <- dplyr::lst(attr_file$abstract)
attr_file$citations <- dplyr::lst(attr_file$citations)
attr_file$rights <- dplyr::lst(attr_file$rights)
} # END .rds portion of csv
# Look for a .xml file
if(any(stringr::str_detect(most_recent,pattern = ".xml")) == TRUE){
# Find and read in the .xml file
xml_file <- most_recent[stringr::str_which(most_recent, paste(".xml", sep = ""))] %>%
xml2::read_xml()
} # END xml look
} #END IF .csv
#### Merge ####
# print user information
writeLines( paste(" - Merging occurrence and attribute files.", "\n",
"Depending on file size, this could take some time...","\n",
sep = ""))
##### dataset and attrs ####
# Merge the datasets
# If the dataset appears as a list, make it not a list.
if(any(class(existingOccurrences) %in% "list")){
existingOccurrences <- existingOccurrences[[1]]
}
bound_data <- dplyr::bind_rows(existingOccurrences %>%
# Convert column types
readr::type_convert(col_types = ColTypeR()),
occ_file)
# Extract the existing attributes file
extAttr_file <- attributes(existingOccurrences)
# Bind the dataSource tables together
extAttr_file$dataSource <- dplyr::bind_rows(extAttr_file$dataSource, attr_file)
# Update this in the bound_data tibble
attr(existingOccurrences, "dataSource") <- extAttr_file$dataSource
# Replace the existing data with the newly-bound data
existingOccurrences <- bound_data
##### xmls ####
# IF the xml file exists, append it to the eml_files
if(exists("xml_file") == TRUE){
# append the new xml_file to the existing ones
existingEMLs <- append(existingEMLs, xml_file)
}
# Combine these files back into a list
existing_data <- list(existingOccurrences, existingEMLs) %>%
setNames(c("Data_WebDL", "eml_files"))
# Return end product and print completion note
writeLines(paste(" - Fin.", sep = "\n"))
# Return the outfile
return(existing_data)
} # COMPLETE formattedCombiner
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/formattedCombiner.R
|
# This function was written by James Dorey to harmonise the names of bees using the Ascher-Orr-Chesshire
# bee taxonomies.
# The function first merges names based on scientificName, then merging the bdc cleaned_name and
# scientificNameAuthorship and matching those, followed by matching to canoncial with flags, then canonical.
# In all of these cases, names that are ambiguous at that level are removed so that only confident
# matches are maintaned.
# This function was written between the 18th and 20th of May 2022. For questions, please email James
# at jbdorey[at]me.com
#' Harmonise taxonomy of bee occurrence data
#'
#' Uses the Discover Life taxonomy to harmonise bee occurrences and flag those that do not match
#' the checklist. [BeeBDC::harmoniseR()] prefers to use the names_clean columns that is generated
#' by [bdc::bdc_clean_names()]. While this is not required, you may find better results by running
#' that function on your dataset first.
#' This function could be hijacked to service other taxa if a user matched the format of the
#' [BeeBDC::beesTaxonomy()] file.
#'
#' @param path A directory as character. The path to a folder that the output can be saved.
#' @param taxonomy A data frame or tibble. The bee taxonomy to use.
#' Default = [BeeBDC::beesTaxonomy()].
#' @param data A data frame or tibble. Occurrence records as input.
#' @param speciesColumn Character. The name of the column containing species names. Default = "scientificName".
#' @param rm_names_clean Logical. If TRUE then the names_clean column will be removed at the end of
#' this function to help reduce confusion about this column later. Default = TRUE
#' @param checkVerbatim Logical. If TRUE then the verbatimScientificName will be checked as well
#' for species matches. This matching will ONLY be done after harmoniseR has failed for the other
#' name columns. NOTE: this column is *not* first run through `bdc::bdc_clean_names`. Default = FALSE
#' @param stepSize Numeric. The number of occurrences to process in each chunk. Default = 1000000.
#' @param mc.cores Numeric. If > 1, the function will run in parallel
#' using mclapply using the number of cores specified. If = 1 then it will be run using a serial
#' loop. NOTE: Windows machines must use a value of 1 (see ?parallel::mclapply). Additionally,
#' be aware that each thread can use large chunks of memory.
#' Default = 1.
#'
#' @return The occurrences are returned with update taxonomy columns, including: scientificName,
#' species, family, subfamily, genus, subgenus, specificEpithet, infraspecificEpithet, and
#' scientificNameAuthorship. A new column, .invalidName, is also added and is FALSE when the occurrence's
#' name did not match the supplied taxonomy.
#'
#' @importFrom dplyr %>%
#'
#' @seealso [BeeBDC::taxadbToBeeBDC()] to download any taxonomy (of any taxa or of bees) and
#' [BeeBDC::beesTaxonomy()] for the bee taxonomy download.
#'
#' @export
#'
#' @examples
#' # load in the test dataset
#' system.file("extdata", "testTaxonomy.rda", package="BeeBDC") |> load()
#'
#' beesRaw_out <- BeeBDC::harmoniseR(
#' #The path to a folder that the output can be saved
#' path = tempdir(),
#' # The formatted taxonomy file
#' taxonomy = testTaxonomy,
#' data = BeeBDC::beesFlagged,
#' speciesColumn = "scientificName")
#' table(beesRaw_out$.invalidName, useNA = "always")
harmoniseR <- function(
data = NULL,
path = NULL, #The path to a folder that the output can be saved
taxonomy = BeeBDC::beesTaxonomy(), # The formatted taxonomy file
speciesColumn = "scientificName",
rm_names_clean = TRUE,
checkVerbatim = FALSE,
stepSize = 1000000,
mc.cores = 1
) {
# locally bind variables to the function
. <- id <- validName<-canonical<-canonical_withFlags<-family<-subfamily<-genus<-subgenus<-
species<-infraspecies<-authorship<-taxonomic_status<-flags<-accid<-validName_valid<-
family_valid<-subfamily_valid<-canonical_withFlags_valid<-genus_valid<-subgenus_valid<-
species_valid<-infraspecies_valid<-authorship_valid<-database_id<-names_clean<-
scientificNameAuthorship<-taxonRank<-authorFound<-SciNameAuthorSimple<-
authorSimple<-united_SciName<-verbatimScientificName <- scientificName <- BeeBDC_order <- NULL
# Load required packages
requireNamespace("rlang")
requireNamespace("dplyr")
# Record start time
startTime <- Sys.time()
#### 0.0 Prep ####
##### 0.1 Errors ####
###### a. FATAL errors ####
if(is.null(data)){
stop(" - Please provide an argument for data. I'm a program not a magician.")
}
if(is.null(taxonomy)){
stop(" - Please provide an argument for taxonomy I'm a program not a magician.")
}
if(is.null(path)){
stop(" - Please provide an argument for path I'm a program not a magician.")
}
if(!"verbatimScientificName" %in% colnames(data) & checkVerbatim == TRUE){
stop(paste0(" - If 'checkVerbatim = TRUE', then the verbatimScientificName column must be
present in the data."))
}
#### 1.0 _match columns ####
# Make a synonym index list
writeLines(paste(" - Formatting taxonomy for matching..."))
# save the original column names
OG_colnames <- unique(c("database_id", colnames(data)))
# Save the original number of rows
OG_rowNum <- nrow(data)
##### 1.1 Prepare columns ####
# To make the function more general, allow some column changing internally.
###### a. rename to scientificName ####
# Temporarily rename the speciesColumn to "scientificName" within the function
data <- data %>%
dplyr::rename("scientificName" = tidyselect::any_of(speciesColumn))
###### b. temp names_clean ####
# IF the names_clean column does not exist, temporarily add it to the dataset using the
# scientificName column's data.
if(!"names_clean" %in% colnames(data)){
data <- data %>%
dplyr::mutate(names_clean = scientificName)
message(paste0("The names_clean column was not found and will be temporarily copied from",
" scientificName"))
}
###### c. database_id ####
# If the database_id column isn't in the dataset, then add it for internal use
if(!"database_id" %in% colnames(data)){
data <- data %>%
dplyr::mutate(database_id = paste0("BeeBDC_TempCode_", dplyr::row_number()), .before = 1)
message("The database_idcolumn was not found, making this column with 'BeeBDC_TempCode_'...")
}
###### d. scientificNameAuthorship ####
# If there is no scientificNameAuthorship, make all NA
if(!"scientificNameAuthorship" %in% colnames(data)){
data <- data %>%
dplyr::mutate(scientificNameAuthorship = NA_character_)
message("The scientificNameAuthorship column was not found, making this column full of NAs.")
}
###### e. taxonRank ####
# If there is no taxonRank, make all NA
if(!"taxonRank" %in% colnames(data)){
data <- data %>%
dplyr::mutate(taxonRank = NA_character_)
message("The taxonRank column was not found, making this column full of NAs.")
}
###### f. species ####
# If there is no species, make all NA
if(!"species" %in% colnames(data)){
data <- data %>%
dplyr::mutate(species = scientificName)
message("The species column was not found, filling this column with scientificName.")
}
# Remove non-ambiguous tags
taxonomy <- taxonomy %>%
dplyr::mutate(flags = flags %>%
stringr::str_remove_all("non-ambiguous canonical| non-ambiguous can_wFlags"))
# Add a new column which has the canonical names matched to the synonyms
taxonomy <- taxonomy %>%
dplyr::left_join(x = .,
# left join ONLY the validName, canonical, and canonical_withFlags
y = dplyr::select(taxonomy,
tidyselect::any_of(
c("id", "validName", "canonical", "canonical_withFlags",
"family", "subfamily", "genus", "subgenus", "species",
"infraspecies", "authorship"))),
by = c("accid" = "id"), suffix = c("", "_valid"),
multiple = "all")
# Now, also duplicate the accepted names into the ._matched columns
AccMatched <- taxonomy %>%
# select only the ACCEPTED NAMES
dplyr::filter(taxonomic_status == "accepted") %>%
# duplicate the valid columns into the matched column locations
dplyr::mutate(validName_valid = validName,
canonical_valid = canonical,
canonical_withFlags_valid = canonical_withFlags,
family_valid = family,
subfamily_valid = subfamily,
genus_valid = genus,
subgenus_valid = subgenus,
species_valid = species,
infraspecies_valid = infraspecies,
authorship_valid = authorship)
# Merge these datasets
taxonomy <- taxonomy %>%
# First filter for the reverse of above - SYNONYM NAMES
dplyr::filter(taxonomic_status == "synonym") %>%
# combine
dplyr::bind_rows(AccMatched)
rm(AccMatched)
#### 2.0 Harmonise data ####
writeLines(paste("\n",
" - Harmonise the occurrence data with unambiguous names...", sep = ""))
# Create the parallel-able function
unAmbiguousFunction <- function(data){
##### 2.1 Valid Name ####
###### a. prep synonyms ####
# Filter out the AMBIGUOUS validNames prior to matching
currenttaxonomy <- taxonomy %>%
# REMOVE ambiguous validNames
dplyr::filter(!stringr::str_detect(
# Replace NA in flags with "" to allow matching
tidyr::replace_na(flags, ""),
"ambiguous validName"))
###### b. assign names ####
# Clean up some illegal characters
data$scientificName <- data$scientificName %>%
stringr::str_replace(pattern = "^\"", replacement = "") %>%
stringr::str_replace(pattern = "\"$", replacement = "")
# Match names first with the validName column
occs_21 <- data %>%
dplyr::left_join(currenttaxonomy %>%
dplyr::select(c(id, accid, validName, canonical_withFlags, canonical, validName_valid,
family_valid, subfamily_valid,
canonical_withFlags_valid, genus_valid, subgenus_valid,
species_valid, infraspecies_valid, authorship_valid)),
# Match scientific name with the valid synonym name
by = c("scientificName" = "validName"),
suffix = c("", "_harmon"),
multiple = "all", relationship = "many-to-many")
# # Add a column to express the name-match quality - "high" IF there is a match at this point
# dplyr::mutate(nameQuality = dplyr::if_else(stats::complete.cases(validName_valid),
# "high", "NA")) 3,703
###### c. return Occs ####
# Return the matched data
occs_21 <- occs_21 %>%
dplyr::filter(stats::complete.cases(validName_valid)) # 1,927
##### 2.2 validName_comb ####
# Now we will try and match the valid name by combining the names_clean and scientificNameAuthorship columns
###### a. prep synonyms ####
# For those that did not match, attempt to match them with the Canonical with flags column...
# Filter out the AMBIGUOUS validNames prior to matching
## SAME as 2.1 ##
currenttaxonomy <- taxonomy %>%
# REMOVE ambiguous validNames
dplyr::filter(!stringr::str_detect(
# Replace NA in flags with "" to allow matching
tidyr::replace_na(flags, ""),
"ambiguous validName"))
###### b. assign names ####
# Match names first with the validName column
occs_22 <- data %>%
# remove already-matched names
dplyr::filter(!database_id %in% occs_21$database_id) %>%
# Make a new column by combining names_clean and scientificNameAuthorship
tidyr::unite(col = "united_SciName", names_clean, scientificNameAuthorship, sep = " ",
na.rm = TRUE)
# Match names first with the validName column
occs_22 <- occs_22 %>%
dplyr::left_join(currenttaxonomy %>%
dplyr::select(c(id, accid, validName, canonical_withFlags, canonical, validName_valid,
family_valid, subfamily_valid,
canonical_withFlags_valid, genus_valid, subgenus_valid,
species_valid, infraspecies_valid, authorship_valid)),
# Match scientific name with the valid synonym name
by = c("united_SciName" = "validName"),
suffix = c("", "_harmon"),
multiple = "all", relationship = "many-to-many")
###### c. return Occs ####
# Return the matched data
runningOccurrences <- occs_22 %>%
dplyr::filter(stats::complete.cases(validName_valid) & validName_valid != "NA") %>%
# Bind the previous rows
dplyr::bind_rows(occs_21) # 2,678
# Remove this spent files
rm(occs_21, occs_22)
##### 2.3 canonical_wFlags ####
###### a. prep synonyms ####
# For those that did not match, attempt to match them with the Canonical with flags column...
# Filter out the AMBIGUOUS validNames prior to matching
currenttaxonomy <- taxonomy %>%
# REMOVE ambiguous validNames and can_wFlags
dplyr::filter(!stringr::str_detect(
# Replace NA in flags with "" to allow matching
tidyr::replace_na(flags, ""),
paste("ambiguous validName",
"ambiguous can_wFlags",
sep = "|"))) %>%
# remove the rows where the canonical and canonical_withFlags match
# ONLY matches those with added canonicals flags
dplyr::filter(!canonical == canonical_withFlags)
###### b. assign names ####
# Match names first with the validName column
occs_23 <- data %>%
# remove already-matched names
dplyr::filter(!database_id %in% runningOccurrences$database_id) %>%
dplyr::left_join(currenttaxonomy %>%
dplyr::select(c(id, accid, validName, canonical_withFlags, canonical, validName_valid,
family_valid, subfamily_valid,
canonical_withFlags_valid, genus_valid, subgenus_valid,
species_valid, infraspecies_valid, authorship_valid)),
# Match scientific name with the valid synonym name
by = c("species" = "canonical_withFlags"),
suffix = c("", "_harmon"),
multiple = "all", relationship = "many-to-many")
###### c. return Occs ####
# Return the matched data
runningOccurrences <- occs_23 %>%
dplyr::filter(stats::complete.cases(validName_valid)) %>%
# Bind the previous rows
dplyr::bind_rows(runningOccurrences)
# Remove this spent file
rm(occs_23)
##### 2.4 canonical ####
###### a. prep synonyms ####
# For those that did not match, attempt to match them with the Canonical with flags column...
# Filter out the AMBIGUOUS validNames prior to matching
currenttaxonomy <- taxonomy %>%
# REMOVE ambiguous names
dplyr::filter(!stringr::str_detect(
# Replace NA in flags with "" to allow matching
tidyr::replace_na(flags, ""),
paste("ambiguous validName",
"ambiguous can_wFlags",
"ambiguous canonical",
sep = "|")))
###### b. assign names ####
# Match names first with the validName column
occs_24 <- data %>%
# Keep the unmatched names
dplyr::filter(!database_id %in% runningOccurrences$database_id) %>%
dplyr::left_join(currenttaxonomy %>%
dplyr::select(c(id, accid, validName, canonical_withFlags, canonical,
validName_valid,
family_valid, subfamily_valid,
canonical_withFlags_valid, genus_valid, subgenus_valid,
species_valid, infraspecies_valid, authorship_valid)),
# Match scientific name with the valid synonym name
by = c("names_clean" = "canonical"),
suffix = c("", "_harmon"),
multiple = "all", relationship = "many-to-many")
###### c. return Occs ####
# Return the matched data
runningOccurrences <- occs_24 %>%
dplyr::filter(stats::complete.cases(validName_valid)) %>%
# Bind the previous rows
dplyr::bind_rows(runningOccurrences) %>%
# Make sure no duplicates have snuck in
dplyr::distinct(database_id, .keep_all = TRUE)
# Remove spent file
rm(occs_24)
##### 2.5 sciName_comb ####
# Now we will try and match the valid name by combining the scientificName and scientificNameAuthorship columns
###### a. prep synonyms ####
# For those that did not match, attempt to match them with the Canonical with flags column...
# Filter out the AMBIGUOUS validNames prior to matching
## SAME as 2.1 ##
currenttaxonomy <- taxonomy %>%
# REMOVE ambiguous validNames
dplyr::filter(!stringr::str_detect(
# Replace NA in flags with "" to allow matching
tidyr::replace_na(flags, ""),
"ambiguous validName"))
###### b. assign names ####
# Match names first with the validName column
occs_25 <- data %>%
# remove already-matched names
dplyr::filter(!database_id %in% runningOccurrences$database_id) %>%
# Make a new column by combining names_clean and scientificNameAuthorship
tidyr::unite(col = "united_SciName", names_clean, scientificNameAuthorship, sep = " ",
na.rm = TRUE)
# Match names first with the validName column
occs_25 <- occs_25 %>%
dplyr::left_join(currenttaxonomy %>%
dplyr::select(c(id, accid, validName, canonical_withFlags, canonical, validName_valid,
family_valid, subfamily_valid,
canonical_withFlags_valid, genus_valid, subgenus_valid,
species_valid, infraspecies_valid, authorship_valid)),
# Match scientific name with the valid synonym name
by = c("united_SciName" = "validName"),
suffix = c("", "_harmon"),
multiple = "all", relationship = "many-to-many")
###### c. return Occs ####
# Return the matched data
runningOccurrences <- occs_25 %>%
dplyr::filter(stats::complete.cases(validName_valid)) %>%
# Bind the previous rows
dplyr::bind_rows(runningOccurrences) %>%
# Make sure no duplicates have snuck in
dplyr::distinct(database_id, .keep_all = TRUE)
# Remove spent file
rm(occs_25)
##### 2.6 No subgenus validName ####
# Match scientificName with validName; remove subgenus from both
###### a. prep synonyms ####
# For those that did not match, attempt to match them with the Canonical with flags column...
# Filter out the AMBIGUOUS validNames prior to matching
# For those that did not match, attempt to match them with the Canonical with flags column...
# Filter out the AMBIGUOUS validNames prior to matching
currenttaxonomy <- taxonomy %>%
# REMOVE ambiguous names
dplyr::filter(!stringr::str_detect(
# Replace NA in flags with "" to allow matching
tidyr::replace_na(flags, ""),
paste("ambiguous validName",
"ambiguous canonical",
sep = "|")))
###### b. assign names ####
# Match names first with the validName column
occs_26 <- data %>%
# remove already-matched names
dplyr::filter(!database_id %in% runningOccurrences$database_id) %>%
dplyr::mutate(scientificNameMatch = scientificName %>%
# Replace subgenus with nothing
stringr::str_replace("\\([A-Za-z]+\\)", "") %>%
stringr::str_squish())
# Match names first with the validName column
occs_26 <- occs_26 %>%
dplyr::left_join(currenttaxonomy %>%
dplyr::select(c(id, accid, validName, canonical_withFlags, canonical, validName_valid,
family_valid, subfamily_valid,
canonical_withFlags_valid, genus_valid, subgenus_valid,
species_valid, infraspecies_valid, authorship_valid)) %>%
dplyr::mutate(validNameMatch = validName %>%
# Replace subgenus with nothing
stringr::str_replace("\\([A-Za-z]+\\)", "") %>%
stringr::str_squish()),
# Match scientific name with the valid synonym name
by = c("scientificNameMatch" = "validNameMatch"),
suffix = c("", "_harmon"),
multiple = "all", relationship = "many-to-many")
###### c. return Occs ####
# Return the matched data
runningOccurrences <- occs_26 %>%
dplyr::filter(stats::complete.cases(validName_valid)) %>%
# Bind the previous rows
dplyr::bind_rows(runningOccurrences) %>%
# Make sure no duplicates have snuck in
dplyr::distinct(database_id, .keep_all = TRUE)
# Remove spent file
rm(occs_26, currenttaxonomy)
##### 2.7 No subgenus canonical ####
# Match scientificName with canonical; remove subgenus from both
###### a. prep synonyms ####
# For those that did not match, attempt to match them with the Canonical with flags column...
# Filter out the AMBIGUOUS validNames prior to matching
# For those that did not match, attempt to match them with the Canonical with flags column...
# Filter out the AMBIGUOUS validNames prior to matching
currenttaxonomy <- taxonomy %>%
# REMOVE ambiguous names
dplyr::filter(!stringr::str_detect(
# Replace NA in flags with "" to allow matching
tidyr::replace_na(flags, ""),
paste("ambiguous validName",
"ambiguous canonical",
sep = "|")))
###### b. assign names ####
# Match names first with the canonical column
occs_27 <- data %>%
# remove already-matched names
dplyr::filter(!database_id %in% runningOccurrences$database_id) %>%
dplyr::mutate(scientificNameMatch = scientificName %>%
# Replace subgenus with nothing
stringr::str_replace("\\([A-Za-z]+\\)", "") %>%
stringr::str_squish())
# Match names first with the canonical column
occs_27 <- occs_27 %>%
dplyr::left_join(currenttaxonomy %>%
dplyr::select(c(id, accid, validName, canonical_withFlags, canonical, validName_valid,
family_valid, subfamily_valid,
canonical_withFlags_valid, genus_valid, subgenus_valid,
species_valid, infraspecies_valid, authorship_valid)) %>%
dplyr::mutate(canonicalMatch = canonical %>%
# Replace subgenus with nothing
stringr::str_replace("\\([A-Za-z]+\\)", "") %>%
stringr::str_squish()),
# Match scientific name with the canonical synonym name
by = c("scientificNameMatch" = "canonicalMatch"),
suffix = c("", "_harmon"),
multiple = "all", relationship = "many-to-many")
###### c. return Occs ####
# Return the matched data
runningOccurrences <- occs_27 %>%
dplyr::filter(stats::complete.cases(validName_valid)) %>%
# Bind the previous rows
dplyr::bind_rows(runningOccurrences) %>%
# Make sure no duplicates have snuck in
dplyr::distinct(database_id, .keep_all = TRUE)
# Remove spent file
rm(occs_27, currenttaxonomy)
return(runningOccurrences)
} # END unAmbiguousFunction
# Run the function
runningOccurrences <- data %>%
# Make a new column with the ordering of rows
dplyr::mutate(BeeBDC_order = dplyr::row_number()) %>%
# Group by the row number and step size
dplyr::group_by(BeeBDC_group = ceiling(BeeBDC_order/stepSize)) %>%
# Split the dataset up into a list by group
dplyr::group_split(.keep = TRUE) %>%
# Run the actual function
parallel::mclapply(., unAmbiguousFunction,
mc.cores = mc.cores
) %>%
# Combine the lists of tibbles
dplyr::bind_rows()
#### 3.0 Ambiguous names ####
writeLines(paste("\n",
" - Attempting to harmonise the occurrence data with ambiguous names...", sep = ""))
ambiguousFunction <- function(data){
##### 3.1 Prepare datasets ####
###### a. prep synonyms ####
# Synonym list of ambiguous names
# Filter TO the AMBIGUOUS validNames prior to matching
ambiguousSynonyms <- taxonomy %>%
# Keep only ambiguous validNames
dplyr::filter(stringr::str_detect(
# Replace NA in flags with "" to allow matching
tidyr::replace_na(flags, ""),
"ambiguous")) %>%
# Remove non-ambiguous matches
dplyr::filter(!stringr::str_detect(
# Replace NA in flags with "" to allow matching
tidyr::replace_na(flags, ""),
"non-")) %>%
# Remove ambiguous validName matches because these are ambiguous even with authorities.
# Perhaps in future these can be matched by geography.
dplyr::filter(!stringr::str_detect(
# Replace NA in flags with "" to allow matching
tidyr::replace_na(flags, ""),
"ambiguous validName")) %>%
# Remove those without authorship
dplyr::filter(!is.na(authorship)) %>%
# # Change the authorship to be easier to match by removing capitals and punctuation
dplyr::mutate(authorSimple = stringr::str_remove_all(authorship,
pattern = "[:punct:]") %>% tolower())
###### b. author in sciName ####
if(!all(is.na(data$scientificNameAuthorship))){
# Create a list of scientificNameAuthorships that can be found in scientificName, where the former is empty
ambiguousAuthorFound <- data %>%
# check only the data without authorship
dplyr::filter(is.na(scientificNameAuthorship)) %>%
# Select only UNDER genus-level IDs
dplyr::filter(taxonRank %in% c("Especie", "forma", "Infrasubspecies", "Race",
"species", "Species", "SPECIES", "subsp.", "subspecies",
"Subspecies", "SUBSPECIES", "syn", "var.", "variety",
"Variety", "VARIETY", NA, "NA")) %>%
# Make a new column with the authorship extracted from scientificName
dplyr::mutate(authorFound = stringr::str_extract(
# Use a simplified scientificName string
string = stringr::str_remove_all(scientificName,
pattern = "[:punct:]") %>% tolower(),
pattern = paste(ambiguousSynonyms$authorSimple, collapse = "|"))) %>%
# Keep only matched names
dplyr::filter(stats::complete.cases(authorFound)) %>%
# Return only the database_id and authorFound for merging...
dplyr::select(tidyselect::all_of(c("database_id", "authorFound")))
# Add the author to those data that were lacking
data <- data %>%
# Add authorFound to original dataset
dplyr::left_join(ambiguousAuthorFound, by = "database_id",multiple = "all") %>%
# If scientificNameAuthorship is empty, use authorFound from ambiguousAuthorFound
dplyr::mutate(scientificNameAuthorship =
dplyr::if_else(is.na(scientificNameAuthorship),
# If missing replace the na with the authorFound
authorFound,
# IF already complete, keep the original
scientificNameAuthorship))
# Remove used data
rm(ambiguousAuthorFound)}
###### c. ambiguous data ####
# Filter occurrence dataset to those with ambiguous names AND authorship values
data_amb <- data %>%
# Keep those with authorship recorded
dplyr::filter(stats::complete.cases(scientificNameAuthorship)) %>%
# Keep those that are in the ambiguous names list
dplyr::filter(scientificName %in% ambiguousSynonyms$validName |
scientificName %in% ambiguousSynonyms$canonical_withFlags |
scientificName %in% ambiguousSynonyms$canonical) %>%
# Simplify scientificNameAuthorship to make easier matches
dplyr::mutate(SciNameAuthorSimple = stringr::str_remove_all(scientificNameAuthorship,
pattern = "[:punct:]") %>% tolower())
##### 3.2 Valid Name ####
###### a. assign names ####
# Match names first with the validName column
runningAmb_occs <- data_amb %>%
# Select only rows with both scientificName and SciNameAuthorSimple
dplyr::filter(stats::complete.cases(scientificName) & stats::complete.cases(SciNameAuthorSimple)) %>%
# Add taxonomy information to the occurrence data.
dplyr::left_join(ambiguousSynonyms %>%
dplyr::select(c(id, accid, validName, canonical_withFlags, canonical, validName_valid,
family_valid, subfamily_valid,
canonical_withFlags_valid, genus_valid, subgenus_valid,
species_valid, infraspecies_valid, authorship_valid, authorSimple)),
# Match scientific name with the valid synonym name
by = c("scientificName" = "validName",
"SciNameAuthorSimple" = "authorSimple"),
suffix = c("", "_harmon"),
multiple = "all",relationship = "many-to-many")
###### b. return Occs ####
# Return the matched data_amb
runningAmb_occs <- runningAmb_occs %>%
dplyr::filter(stats::complete.cases(validName_valid)) # 1,927
##### 3.3 validName_comb ####
# Now we will try and match the valid name by combining the names_clean and scientificNameAuthorship columns
###### a. assign names ####
# Match names first with the validName column
occs_33 <- data_amb %>%
# remove already-matched names
dplyr::filter(!database_id %in% runningAmb_occs$database_id) %>%
# Make a new column by combining names_clean and scientificNameAuthorship
tidyr::unite(col = "united_SciName", names_clean, scientificNameAuthorship, sep = " ",
na.rm = TRUE)
# Match names first with the validName column
occs_33 <- occs_33 %>%
# Select only rows with both united_SciName and SciNameAuthorSimple
dplyr::filter(stats::complete.cases(united_SciName) & stats::complete.cases(SciNameAuthorSimple)) %>%
# Add taxonomy information to the occurrence data.
dplyr::left_join(ambiguousSynonyms %>%
dplyr::select(c(id, accid, validName, canonical_withFlags, canonical, validName_valid,
family_valid, subfamily_valid,
canonical_withFlags_valid, genus_valid, subgenus_valid,
species_valid, infraspecies_valid, authorship_valid, authorSimple)),
# Match scientific name with the valid synonym name
by = c("united_SciName" = "validName",
"SciNameAuthorSimple" = "authorSimple"),
suffix = c("", "_harmon"),
multiple = "all", relationship = "many-to-many")
###### b. return Occs ####
# Return the matched data_amb
runningAmb_occs <- occs_33 %>%
dplyr::filter(stats::complete.cases(validName_valid)) %>%
# Bind the previous rows
dplyr::bind_rows(runningAmb_occs) # 2,678
# Remove this spent files
rm(occs_33)
##### 3.4 canonical_wFlags ####
###### a. prep datasets ####
# Synonym list of ambiguous names
# Filter TO the AMBIGUOUS validNames prior to matching
syns_34 <- ambiguousSynonyms %>%
# remove the rows where the canonical and canonical_withFlags match
dplyr::filter(!canonical == canonical_withFlags)
###### b. assign names ####
# Match names first with the validName column
occs_34 <- data_amb %>%
# remove already-matched names
dplyr::filter(!database_id %in% runningAmb_occs$database_id) %>%
# Select only rows with both species and SciNameAuthorSimple
dplyr::filter(stats::complete.cases(species) & stats::complete.cases(SciNameAuthorSimple)) %>%
# Add taxonomy information to the occurrence data.
dplyr::left_join(syns_34 %>%
dplyr::select(c(id, accid, validName, canonical_withFlags, canonical, validName_valid,
family_valid, subfamily_valid,
canonical_withFlags_valid, genus_valid, subgenus_valid,
species_valid, infraspecies_valid, authorship_valid, authorSimple)),
# Match scientific name with the valid synonym name
by = c("species" = "canonical_withFlags",
"SciNameAuthorSimple" = "authorSimple"),
suffix = c("", "_harmon"),
multiple = "all", relationship = "many-to-many")
###### c. return Occs ####
# Return the matched data_amb
runningAmb_occs <- occs_34 %>%
dplyr::filter(stats::complete.cases(validName_valid)) %>%
# Bind the previous rows
dplyr::bind_rows(runningAmb_occs)
# Remove this spent file
rm(occs_34, syns_34)
##### 3.5 canonical ####
###### b. assign names ####
# Match names first with the validName column
occs_35 <- data_amb %>%
# Keep the unmatched names
dplyr::filter(!database_id %in% runningAmb_occs$database_id) %>%
# Select only rows with both names_clean and SciNameAuthorSimple
dplyr::filter(stats::complete.cases(names_clean) & stats::complete.cases(SciNameAuthorSimple)) %>%
# Add taxonomy information to the occurrence data.
dplyr::left_join(ambiguousSynonyms %>%
dplyr::select(c(id, accid, validName, canonical_withFlags, canonical, validName_valid,
family_valid, subfamily_valid,
canonical_withFlags_valid, genus_valid, subgenus_valid,
species_valid, infraspecies_valid, authorship_valid, authorSimple)),
# Match scientific name with the valid synonym name
by = c("names_clean" = "canonical",
"SciNameAuthorSimple" = "authorSimple"),
suffix = c("", "_harmon"),
multiple = "all", relationship = "many-to-many")
###### c. return Occs ####
# Return the matched data_amb
runningAmb_occs <- occs_35 %>%
dplyr::filter(stats::complete.cases(validName_valid)) %>%
# Bind the previous rows
dplyr::bind_rows(runningAmb_occs)%>%
# Make sure no duplicates have snuck in
dplyr::distinct(database_id, .keep_all = TRUE)
# Remove spent file
rm(occs_35)
##### 3.6 validName_comb ####
# Now we will try and match the valid name by combining the names_clean and scientificNameAuthorship columns
###### a. assign names ####
# Match names first with the validName column
occs_36 <- data_amb %>%
# remove already-matched names
dplyr::filter(!database_id %in% runningAmb_occs$database_id) %>%
# Make a new column by combining names_clean and scientificNameAuthorship
tidyr::unite(col = "united_SciName", names_clean, scientificNameAuthorship, sep = " ",
na.rm = TRUE)
# Match names first with the validName column
occs_36 <- occs_36 %>%
# Select only rows with both united_SciName and SciNameAuthorSimple
dplyr::filter(stats::complete.cases(united_SciName) & stats::complete.cases(SciNameAuthorSimple)) %>%
# Add taxonomy information to the occurrence data.
dplyr::left_join(ambiguousSynonyms %>%
dplyr::select(c(id, accid, validName, canonical_withFlags, canonical, validName_valid,
family_valid, subfamily_valid,
canonical_withFlags_valid, genus_valid, subgenus_valid,
species_valid, infraspecies_valid, authorship_valid, authorSimple)),
# Match scientific name with the valid synonym name
by = c("united_SciName" = "validName",
"SciNameAuthorSimple" = "authorSimple"),
suffix = c("", "_harmon"),
multiple = "all", relationship = "many-to-many")
###### b. return Occs ####
# Return the matched data_amb
runningAmb_occs <- occs_36 %>%
dplyr::filter(stats::complete.cases(validName_valid)) %>%
# Bind the previous rows
dplyr::bind_rows(runningAmb_occs) # 2,678
# Remove this spent files
rm(occs_36)
##### 3.7 No subgenus validName ####
# Match scientificName with validName; remove subgenus from both
###### a. assign names ####
# Match names first with the validName column
occs_37 <- data_amb %>%
# remove already-matched names
dplyr::filter(!database_id %in% runningAmb_occs$database_id) %>%
dplyr::mutate(scientificNameMatch = scientificName %>%
# Replace subgenus with nothing
stringr::str_replace("\\([A-Za-z]+\\)", "") %>%
stringr::str_squish())
occs_37 <- occs_37 %>%
dplyr::left_join(ambiguousSynonyms %>%
dplyr::select(c(id, accid, validName, canonical_withFlags, canonical,
validName_valid, family_valid, subfamily_valid,
canonical_withFlags_valid, genus_valid, subgenus_valid,
species_valid, infraspecies_valid, authorship_valid)) %>%
dplyr::mutate(validNameMatch = validName %>%
# Replace subgenus with nothing
stringr::str_replace("\\([A-Za-z]+\\)", "") %>%
stringr::str_squish()),
# Match scientific name with the valid synonym name
by = c("scientificNameMatch" = "validNameMatch"),
suffix = c("", "_harmon"),
multiple = "all", relationship = "many-to-many")
###### b. return Occs ####
# Return the matched data_amb
runningAmb_occs <- occs_37 %>%
dplyr::filter(stats::complete.cases(validName_valid)) %>%
# Bind the previous rows
dplyr::bind_rows(runningAmb_occs)
# Remove this spent files
rm(occs_37)
##### 3.8 No subgenus canonical ####
# Match scientificName with canonical; remove subgenus from both
###### a. assign names ####
# Match names first with the canonical column
occs_38 <- data_amb %>%
# remove already-matched names
dplyr::filter(!database_id %in% runningAmb_occs$database_id) %>%
dplyr::mutate(scientificNameMatch = scientificName %>%
# Replace subgenus with nothing
stringr::str_replace("\\([A-Za-z]+\\)", "") %>%
stringr::str_squish())
occs_38 <- occs_38 %>%
dplyr::left_join(ambiguousSynonyms %>%
dplyr::select(c(id, accid, validName, canonical_withFlags, canonical,
validName_valid, family_valid, subfamily_valid,
canonical_withFlags_valid, genus_valid, subgenus_valid,
species_valid, infraspecies_valid, authorship_valid)) %>%
dplyr::mutate(canonicalMatch = canonical %>%
# Replace subgenus with nothing
stringr::str_replace("\\([A-Za-z]+\\)", "") %>%
stringr::str_squish()),
# Match scientific name with the valid synonym name
by = c("scientificNameMatch" = "canonicalMatch"),
suffix = c("", "_harmon"),
multiple = "all", relationship = "many-to-many")
###### b. return Occs ####
# Return the matched data_amb
runningAmb_occs <- occs_38 %>%
dplyr::filter(stats::complete.cases(validName_valid)) %>%
# Bind the previous rows
dplyr::bind_rows(runningAmb_occs)
# Remove this spent files
rm(occs_38)
return(runningAmb_occs)
} # END ambiguousFunction
# Run the function
runningAmb_occs <- data %>%
# Make a new column with the ordering of rows
dplyr::mutate(BeeBDC_order = dplyr::row_number()) %>%
# Group by the row number and step size
dplyr::group_by(BeeBDC_group = ceiling(BeeBDC_order/stepSize)) %>%
# Split the dataset up into a list by group
dplyr::group_split(.keep = TRUE) %>%
# Run the actual function
parallel::mclapply(., ambiguousFunction,
mc.cores = mc.cores
) %>%
# Combine the lists of tibbles
dplyr::bind_rows()
##### 3.6 Combine 2.x & 3.x ####
# Merge the results from 2.x and 3.x
runningOccurrences <- runningOccurrences %>%
# Remove the ambiguous data from the prior-matched data
dplyr::filter(!database_id %in% runningAmb_occs$database_id) %>%
# Add in the ambiguous data again.
dplyr::bind_rows(runningAmb_occs)
# Remove the spent runningAmb_occs data
rm(runningAmb_occs)
#### 4.0 verbatimScientificName ####
if(checkVerbatim == TRUE){
writeLines(paste0("checkVerbatim = TRUE. Checking the verbatimScientificName column..."))
###### 4.1 failedMatches ####
# Find the data that did not match
failedMatches <- data %>%
# Remove the matched names from the OG dataset
dplyr::filter(!database_id %in% runningOccurrences$database_id) %>%
# Move the verbatimScientificName to scientificName
dplyr::mutate(scientificName = verbatimScientificName)
#### 4.2 Run unAmbiguous names ####
# Run the function
runningOccurrences_verb <- failedMatches %>%
# Make a new column with the ordering of rows
dplyr::mutate(BeeBDC_order = dplyr::row_number()) %>%
# Group by the row number and step size
dplyr::group_by(BeeBDC_group = ceiling(BeeBDC_order/stepSize)) %>%
# Split the dataset up into a list by group
dplyr::group_split(.keep = TRUE) %>%
# Run the actual function
parallel::mclapply(., unAmbiguousFunction,
mc.cores = mc.cores
) %>%
# Combine the lists of tibbles
dplyr::bind_rows()
#### 4.3 Run ambiguous names ####
# Run the function
runningAmb_occs_verb <- failedMatches %>%
# Make a new column with the ordering of rows
dplyr::mutate(BeeBDC_order = dplyr::row_number()) %>%
# Group by the row number and step size
dplyr::group_by(BeeBDC_group = ceiling(BeeBDC_order/stepSize)) %>%
# Split the dataset up into a list by group
dplyr::group_split(.keep = TRUE) %>%
# Run the actual function
parallel::mclapply(., ambiguousFunction,
mc.cores = mc.cores
) %>%
# Combine the lists of tibbles
dplyr::bind_rows()
#### 4.4 Combine 4.2-.3 ####
runningOccurrences_verb <- runningOccurrences_verb %>%
# Remove the ambiguous data from the prior-matched data
dplyr::filter(!database_id %in% runningAmb_occs_verb$database_id) %>%
# Add in the ambiguous data again.
dplyr::bind_rows(runningAmb_occs_verb)
#### 4.5 Combine 3.6 & 4.4 ####
# Merge the results from 2.x and 3.x
runningOccurrences <- runningOccurrences %>%
# Remove the ambiguous data from the prior-matched data
dplyr::filter(!database_id %in% runningOccurrences_verb$database_id) %>%
# Add in the ambiguous data again.
dplyr::bind_rows(runningOccurrences_verb)
# Remove the spent runningAmb_occs data
rm(runningOccurrences_verb, runningAmb_occs_verb)
}
#### 5.0 Merge ####
writeLines(" - Formatting merged datasets...")
# merge datasets
runningOccurrences <- runningOccurrences %>%
# Put the scientific name into a new column called verbatimScientificName
dplyr::mutate(verbatimScientificName = scientificName) %>%
# select the columns we want to keep
dplyr::select( c(tidyselect::any_of(OG_colnames), validName_valid,
verbatimScientificName,
family_valid, subfamily_valid,
canonical_withFlags_valid, genus_valid, subgenus_valid,
species_valid, infraspecies_valid, authorship_valid)) %>%
# rename validName_valid to scientificName and place it where it used to sit.
dplyr::mutate(scientificName = validName_valid, .after = database_id) %>%
# Add in the other taxonomic data
dplyr::mutate(species = canonical_withFlags_valid,
family = family_valid,
subfamily = subfamily_valid,
genus = genus_valid,
subgenus = subgenus_valid,
specificEpithet = species_valid,
infraspecificEpithet = infraspecies_valid,
scientificNameAuthorship = authorship_valid,
.after = scientificName) %>%
# Remove extra columns
dplyr::select(!c(canonical_withFlags_valid, family_valid, subfamily_valid, genus_valid,
subgenus_valid, species_valid, infraspecies_valid, authorship_valid,
validName_valid)) %>%
# Add the .invalidName columns as TRUE (not flagged)
dplyr::mutate(.invalidName = TRUE)
##### 5.1 User output ####
nMatchedRows <- nrow(runningOccurrences)
nUnmatchedRows <- nrow(data) - nrow(runningOccurrences)
###### a. failedMatches ####
# Find the data that did not match
failedMatches <- data %>%
# Remove the matched names from the OG dataset
dplyr::filter(!database_id %in% runningOccurrences$database_id) %>%
# Add the .invalidName columns as TRUE (not flagged)
dplyr::mutate(.invalidName = FALSE)
# Remove this spent file
rm(data)
###### b. Add column ####
runningOccurrences <- runningOccurrences %>%
# Bind the failed matches
dplyr::bind_rows(failedMatches) %>%
# Make sure no duplicates have snuck in
dplyr::distinct(database_id, .keep_all = TRUE)
###### c. return columns states ####
# Return the speciesColumn name to it's original state
names(runningOccurrences)[names(runningOccurrences) == "scientificName"] <- speciesColumn
if(rm_names_clean == TRUE){
message("Removing the names_clean column...")
runningOccurrences <- runningOccurrences %>%
dplyr::select(!tidyselect::any_of("names_clean"))
}
# Cut down the failed list...
# failedMatches <- failedMatches %>%
# dplyr::select(tidyselect::any_of("taxonRank")) %>%
# dplyr::filter(!taxonRank %in% c("Especie", "forma", "Infrasubspecies", "Race",
# "species", "Species", "SPECIES", "subsp.", "subspecies",
# "Subspecies", "SUBSPECIES", "syn", "var.", "variety",
# "Variety", "VARIETY"))
###### d. output ####
writeLines(paste(
" - We matched valid names to ",
format(sum(runningOccurrences$.invalidName == TRUE), big.mark = ","), " of ",
format(OG_rowNum, big.mark = ","), " occurrence records. This leaves a total of ",
format(sum(runningOccurrences$.invalidName == FALSE), big.mark = ","), " unmatched occurrence records.",
# " Of the unmatched records, approximately ",
# format( nrow(failedMatches), big.mark = ","),
# " are only identified to genus rank or higher.",
sep = ""))
writeLines(paste("\nharmoniseR:"))
message(paste(format(sum(runningOccurrences$.invalidName == FALSE), big.mark = ",")))
writeLines(paste(
"records were flagged.\nThe column, '.invalidName' was added to the database.\n"))
message(paste0(
" - We updated the following columns: ", speciesColumn,", species, family, subfamily, genus, subgenus, ",
"specificEpithet, infraspecificEpithet, and scientificNameAuthorship. ",
"The previous ",speciesColumn," column was converted to verbatimScientificName"
))
# End time message
endTime <- Sys.time()
message(paste(
" - Completed in ",
round(difftime(endTime, startTime), digits = 2 ),
" ",
units(round(endTime - startTime, digits = 2)),
sep = ""))
# Return this file
return(runningOccurrences)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/harmoniseR.R
|
# This function was written by James B Dorey on the 7th of September 2022.
# It aims to match up database_id numbers applied between different runs by matching the
# current and prior runs and a list of columns to find matches by. It finally matches the
# remaining occurrences by mergine by all columns.
# For help, please contact jbdorey[at]me.com
#' Attempt to match database_ids from a prior run
#'
#' This function attempts to match database_ids from a prior bdc or BeeBDC run in order to keep
#' this column somewhat consistent between iterations. However, not all records contain sufficient
#' information for this to work flawlessly.
#'
#' @param currentData A data frame or tibble. The NEW occurrence records as input.
#' @param priorData A data frame or tibble. The PRIOR occurrence records as input.
#' @param matchBy A list of character vectors Should contain the columns to iteratively compare.
#' @param completeness_cols A character vector. The columns to check for completeness, arrange,
#' and assign the relevant prior database_id.
#' @param excludeDataset A character vector. The dataSources that are to be excluded from data
#' matching. These should be static dataSources from minor providers.
#'
#' @return The input data frame returned with an updated database_id column that shows the
#' database_ids as in priorData where they could be matched. Additionally, a columnd called
#' idContinuity is returned where TRUE indicates a match to a prior database_id and FALSE
#' indicates that a new database_id was assigned.
#'
#' @export
#'
#' @importFrom dplyr %>%
#' @importFrom stats complete.cases
#' @importFrom dplyr desc across
#'
#' @examples
#' # Get the example data
#' data("beesRaw", package = "BeeBDC")
#' # Which datasets are static and should be excluded from matching?
#' excludeDataset <- c("BMin", "BMont", "CAES", "EaCO", "Ecd", "EcoS",
#' "Gai", "KP", "EPEL", "USGS", "FSCA", "SMC", "Bal", "Lic", "Arm", "BBD",
#' "MEPB")
#' # Match the data to itself just as an example of running the code.
#' beesRaw_out <- idMatchR(
#' currentData = beesRaw,
#' priorData = beesRaw,
#' # First matches will be given preference over later ones
#' matchBy = dplyr::lst(c("gbifID"),
#' c("catalogNumber", "institutionCode", "dataSource"),
#' c("occurrenceID", "dataSource"),
#' c("recordId", "dataSource"),
#' c("id"),
#' c("catalogNumber", "institutionCode")),
#' # You can exclude datasets from prior by matching their prefixs - before first underscore:
#' excludeDataset = excludeDataset)
idMatchR <- function(
currentData = NULL,
priorData = NULL,
matchBy = NULL,
completeness_cols = NULL,
excludeDataset = NULL){
# locally bind variables to the function
dataSource <- completeness <- database_id <- . <- currentConcat <- dataSourceShort <-
database_id_matched <- idContinuity <- databaseName <- database_id_current <-
databaseNum <- missingNum <- database_id_new <- databaseNum_current <- NULL
requireNamespace("dplyr")
#### 0.0 Prep ####
##### 0.1 Errors ####
###### a. fatal ####
if(is.null(currentData)){
stop("Please proivde a 'currentData' input.")
}
if(is.null(priorData)){
stop("Please proivde a 'priorData' input.")
}
if(is.null(matchBy)){
stop("Please proivde a 'matchBy' input or inputs.")
}
###### b. Warnings ####
if(is.null(completeness_cols)){
message(paste("Warning message: \n",
" - No completeness_cols provided. Using default of: ",
"c('decimalLatitude', 'decimalLongitude', 'scientificName', and 'eventDate')",
sep=""))
completeness_cols = c("decimalLatitude", "decimalLongitude",
"scientificName", "eventDate")
}
##### 0.2 Rm datasets ####
# Change the dataSource for matching
if(!is.null(excludeDataset)){
priorData <- priorData %>%
# Remove all text after the first "_"
dplyr::mutate(dataSource = stringr::str_remove(dataSource, "_.*")) %>%
# Remove the datasets
dplyr::filter(!dataSource %in% excludeDataset)
}
##### 0.3 Format data ####
# First save a version of the currentData to keep and return
returnData <- currentData
# Save a count of priorData rows
priorRowCount <- nrow(priorData)
###### a. dataSource ####
# If the user is matching by DataSource, then simplify that column to only the over-arching
# source.
if(any(stringr::str_detect(string = unlist(matchBy), pattern = "dataSource"))){
# PRIOR dataset - Only run if Rm datasets isn't running already
if(is.null(excludeDataset)){
priorData <- priorData %>%
# Remove all text after the first "_"
dplyr::mutate(dataSource = stringr::str_remove(dataSource, "_.*"))
}
# CURRENT dataset
currentData <- currentData %>%
# Remove all text after the first "_"
dplyr::mutate(dataSource = stringr::str_remove(dataSource, "_.*"))
}
###### b. simplify by select ####
# Only select the columns that are called by the function
priorData <- priorData %>%
# Keep only the columns called for and the database_id
dplyr::select(tidyselect::all_of(c("database_id", unique(unlist(matchBy), completeness_cols))))
currentData <- currentData %>%
# Keep only the columns called for and the database_id
dplyr::select(tidyselect::all_of(c("database_id", unique(unlist(matchBy), completeness_cols))))
##### 0.4 Completeness and arrange ####
###### a. completeness ####
# Get the sum of the complete.cases of four important fields. Preference will be given to keeping
# the most-complete records
writeLines(paste(
" - Generating a basic completeness summary from the ",
paste(completeness_cols, collapse = ", "), " columns.","\n",
"This summary is simply the sum of complete.cases in each column. It ranges from zero to the N",
" of columns. This will be used to sort duplicate rows and select the most-complete rows.",
sep = ""
))
priorData <- priorData %>%
dplyr::rowwise() %>%
# Create a new column called "completeness" where higher values are more-complete
dplyr::mutate(completeness = sum(complete.cases(completeness_cols))) %>%
dplyr::ungroup() %>%
# Arrange so that the most-complete are on top. This might be overkill.
dplyr::arrange( desc(completeness))
gc()
#### 1.0 loop ####
writeLines(" - Starting core loop...")
# Set up a loop dataframe to enter into
loopDF <- dplyr::tibble()
# Create a dataset to put unique values into
for(i in 1:length(matchBy)){
# Select the ith CustomComparisons to match with
currentMatch <- matchBy[[i]]
##### 1.1 single input ####
if(length(currentMatch) == 1){
matched <- priorData %>%
# Remove NA values and get distinct
tidyr::drop_na(tidyselect::all_of(currentMatch)) %>%
dplyr::distinct(dplyr::across(tidyselect::all_of(currentMatch)),
.keep_all = TRUE) %>%
# Add a new column with these values concatenated,
# dplyr::mutate(currentConcat = tidyselect::all_of(currentMatch))
# JOIN datasets
dplyr::left_join(.,
# FORMAT MATCH
currentData %>% dplyr::select(
tidyselect::all_of(c("database_id", currentMatch))) %>%
# Remove NA values and get distinct
tidyr::drop_na(tidyselect::all_of(currentMatch)) %>%
dplyr::distinct(dplyr::across(tidyselect::all_of(currentMatch)),
.keep_all = TRUE),
by = currentMatch,
suffix = c("", "_current")) %>%
# Extract only the matched ids
dplyr::select(tidyselect::any_of(c("database_id", "database_id_current"))) %>%
# Remove empty matches
tidyr::drop_na()
# User output
writeLines(paste0(" - we matched ",
format(nrow(matched), big.mark = ","),
" records using ",
paste0(currentMatch, collapse = ", "), "."))
# Merge with loopDF
loopDF <- matched %>%
dplyr::bind_rows(loopDF)
} # End single IF statement
#### 1.2 multiple inputs ####
if(length(currentMatch) > 1){
matched <- priorData %>%
# Remove NA values and get distinct
tidyr::drop_na(tidyselect::all_of(currentMatch)) %>%
dplyr::distinct(dplyr::across(tidyselect::all_of(currentMatch)),
.keep_all = TRUE) %>%
# Add a new column with these values concatenated,
tidyr::unite(., tidyselect::all_of(currentMatch), col = currentConcat) %>%
# JOIN datasets
dplyr::left_join(.,
# FORMAT MATCH
currentData %>% dplyr::select(
tidyselect::all_of(c("database_id", currentMatch))) %>%
# Remove NA values and get distinct
tidyr::drop_na(tidyselect::all_of(currentMatch)) %>%
dplyr::distinct(dplyr::across(tidyselect::all_of(currentMatch)),
.keep_all = TRUE) %>%
# Add a new column with these values concatenated,
tidyr::unite(., tidyselect::all_of(currentMatch), col = currentConcat),
by = "currentConcat",
suffix = c("", "_current")) %>%
# Extract only the matched ids
dplyr::select(tidyselect::any_of(c("database_id", "database_id_current"))) %>%
# Remove empty matches
tidyr::drop_na()
# User output
writeLines(paste0(" - we matched ",
format(nrow(matched), big.mark = ","),
" records using ", paste0(currentMatch, collapse = ", "), "."))
# Merge with loopDF
loopDF <- matched %>%
dplyr::bind_rows(loopDF)
} # End multiple IF statement
#### 1.3 Rm matches ####
# Remove the occurrences that were just matched before the next iteration
priorData <- priorData %>%
dplyr::filter(!database_id %in% loopDF$database_id)
currentData <- currentData %>%
dplyr::filter(!database_id %in% loopDF$database_id_current)
# User output
writeLines(paste0("This leaves ",
format(nrow(priorData), big.mark = ","),
" unmatched data in the priorData file"))
}# END LOOP
# Remove data no longer needed
rm(priorData, currentData)
#### 2.0 Data return ####
writeLines(" - Combining ids and assigning new ones where needed...")
# Add a column to that matched data:
# idContinuity, that shows that these ids are continuous with prior versions
loopDF <- loopDF %>%
dplyr::mutate(idContinuity = TRUE)
# Change the database_id column for return
checkedData <- returnData %>%
select(tidyselect::all_of(c("database_id", "dataSource"))) %>%
# Filter to only the examined dataSources
# Remove all text after the first "_"
dplyr::mutate(dataSourceShort = stringr::str_remove(dataSource, "_.*")) %>%
# Add the new database_id column, while removing the old one (database_id_current)
dplyr::left_join(loopDF, by = c("database_id" = "database_id_current"),
suffix = c("", "_matched"), keep = FALSE) %>%
# Remove existing [current] database_id columns
dplyr::rename(database_id_current = database_id) %>%
# Move this column to the start
dplyr::relocate(database_id_matched) %>%
# Rename the column
dplyr::rename(database_id = database_id_matched) %>%
# Highlight those records that might not be id-continuous
dplyr::mutate(idContinuity = dplyr::if_else(is.na(idContinuity),
FALSE, TRUE)) %>%
# Add a new column with the database_id NAME
dplyr::mutate(databaseName = stringr::str_extract(
string = database_id, pattern = "[a-zA-Z_]+")) %>%
# Fix those that failed to match
dplyr::mutate(databaseName = dplyr::if_else(
is.na(databaseName),
stringr::str_extract(
string = database_id_current, pattern = "[a-zA-Z_]+"),
databaseName)) %>%
# Add a new column with the database_id NUMBER
dplyr::mutate(databaseNum = stringr::str_extract(
string = database_id, "[0-9]+") %>% as.numeric(),
# Get a column with the current numbers to start the MAX count from
databaseNum_current = database_id_current %>% stringr::str_extract("[0-9]+") %>%
as.numeric()) %>%
# Group by databaseName
dplyr::group_by(databaseName) %>%
# Sort
dplyr::arrange(databaseNum, .by_group = TRUE)
# Get new numbers for the new data
newData <- checkedData %>%
# Apply by group
dplyr::group_by(databaseName) %>%
# Add a new column with the databaseNum numbers
dplyr::mutate(missingNum = databaseNum) %>%
# If the group is entirely unmatched, assign the first row in that group to equal 1
dplyr::mutate(missingNum = dplyr::if_else(dplyr::row_number() == 1 & is.na(missingNum[[1]]),
1,
missingNum)) %>%
# Fill down the missing numbers starting from 1+ the maximum within databaseName group.
dplyr::mutate(missingNum = dplyr::if_else(is.na(missingNum),
(max(databaseNum_current, na.rm = TRUE)+
dplyr::row_number()-sum(complete.cases(missingNum))
),
missingNum)) %>%
# Update the database_id column
dplyr::mutate(database_id = stringr::str_c(databaseName, missingNum)) %>%
# Filter for only NA values on the databaseNum column
dplyr::filter(is.na(databaseNum))
# Now combine
checkedData <- checkedData %>%
# First, remove those newData from the checkedData
dplyr::filter(!database_id_current %in% newData$database_id_current) %>%
# now re-combine
dplyr::bind_rows(newData) %>%
# Remove groupings
dplyr::ungroup() %>%
dplyr::distinct(database_id, .keep_all = TRUE) %>%
# Remove the excludeDataset
dplyr::filter(!dataSourceShort %in% excludeDataset)
# User output
writeLines(paste0(" - We matched a total of ",
format(sum(complete.cases(checkedData$databaseNum)), big.mark = ","),
" database_id numbers. We then assigned new database_id numbers to ",
format(sum(complete.cases(checkedData$missingNum)), big.mark = ","),
" unmatched occurrences."
))
# Merge the new databse IDs with the returnData
returnData <- returnData %>%
# Join the checkedData dataset
dplyr::left_join(checkedData %>% dplyr::select(
tidyselect::any_of(c("database_id", "database_id_current", "idContinuity"))),
by = c("database_id" = "database_id_current"),
suffix = c("", "_new")) %>%
# Update the database_id column to include the new database_ids, or the old ones where
# new ones aren't available.
dplyr::mutate(database_id = dplyr::if_else(is.na(database_id_new),
# If from an excluded dataset,
# keep existing database_id
database_id,
# Otherwise Assign the newly matched id
database_id_new)) %>%
dplyr::select(!database_id_new)
# Return the data
return(returnData )
}# END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/idMatchR.R
|
#### importOccurrences ####
#' Imports the most-recent repoMerge data
#'
#' Looks for and imports the most-recent version of the occurrence data created by the [BeeBDC::repoMerge()]
#' function.
#'
#' @param path A directory as a character. The directory to recursively look in for the above data.
#' @param fileName Character. A String of text to look for the most-recent dataset.
#' Default = "^BeeData_". Find faults by modifying [BeeBDC::fileFinder()]
#' and logic-checking the file that's found.
#'
#' @return A list with a data frame of merged occurrence records, "Data_WebDL", and a list of EML
#' files contained in "eml_files".
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#' \dontrun{
#' DataImp <- importOccurrences(path = DataPath)
#' }
importOccurrences <- function(path = path,
fileName = "^BeeData_" #occurrence file name. If not provided, R will search to match "BeeData_"
){ #spatial reference system as epsg code
. <- NULL
# Load required packages
requireNamespace("dplyr")
requireNamespace("lubridate")
# if the fileName is not provided...
if(!exists("fileName")){
fileName = "^BeeData"
}
#### Find files ####
# Find all of the previously-produced data files
most_recent <- BeeBDC::fileFinder(path = path, fileName = fileName)
# Return information to user
writeLines(paste(" - Great, R has detected file(s), including... ", "\n",
paste(most_recent, collapse = "\n") ), sep = "")
#### Detect format ####
# Find the format of the most-recent files. This could potentially be .csv or .rds
# TRUE IF .rds are present (that are not attribute files):
(rdata_query <- any(most_recent[stringr::str_count(most_recent, pattern = "([aA]ttribute)|\\.rds") > 0] %>%
stringr::str_detect(., pattern = "([aA]ttribute)", negate = TRUE) == TRUE))
# TRUE IF .csv data are present:
(csv_query <- any(stringr::str_detect(most_recent, "([aA]ttributes)")) == TRUE &&
any(stringr::str_detect(most_recent, ".*\\.csv{1}")) == TRUE)
#### Both present ####
# IF their is a complete .rds file among the most-recent files AND a .csv version...
if(rdata_query == TRUE && csv_query == TRUE){
writeLines(paste("\n",
" - Oh boy, it looks like there are both .csv and .rds versions of your data!",
"\n", "R will preferentially use the .rds file.", "\n",
"NOTE: the .rds file can be very slow to load"))
# File to read:
fileLoc <- most_recent[intersect(grep(".*\\.rds{1}", most_recent),
grep("([aA]ttributes)", most_recent, invert = TRUE))]
# Read in the .rds file
writeLines(paste("Reading in ", fileLoc, "...", sep = ""))
# Find the index of the string that matches and select that to read in
occurDF <- fileLoc %>%
readRDS()
} #END IF both
#### RData present ####
# IF their is ONLY a complete .rds file among the most-recent files...
if(rdata_query == TRUE && csv_query == FALSE){
writeLines(paste(" - .rds export version found. Loading this file...", "\n",
"NOTE: the .rds file can be very slow to load"))
# File to read:
fileLoc <- most_recent[intersect(grep(".*\\.rds{1}", most_recent),
grep("([aA]ttributes)", most_recent, invert = TRUE))]
# Read in the .rds file
writeLines(paste("Reading in ",
fileLoc,
"...", sep = ""))
# Find the index of the string that matches and select that to read in
occurDF <- as.character(fileLoc) %>%
readRDS()
writeLines("Completed reading in .rds file")
} #END IF .rds
#### CSV present ####
# IF their is ONLY a complete .csv file among the most-recent files...
if(csv_query == TRUE && rdata_query == FALSE){
writeLines(paste(" - .csv exported version found. Loading this file..."))
ColTypes <- ColTypeR()
# Find the most-recent .csv occurrence file
occurDF <- most_recent[intersect(grep(".*\\.csv", most_recent),
grep("([aA]ttributes)", most_recent, invert = TRUE))] %>%
read_csv(col_types = ColTypes)
# Find the most-recent .rds attributes file
attr_loc <- most_recent[stringr::str_which(most_recent, "(.*[aA]ttribute)(.*\\.rds)")]
# Check to see if the attributes file exists or not
if(length(attr_loc) == 0){
writeLines("No attribute file found... Please make sure that one exists to include the EML data")
}else{
writeLines(
paste("Reading attribute file named ", attr_loc, "..." ))
attr_file <- readRDS(attr_loc)
}
# Add the attributes file to the occurrence data file
attributes(occurDF) <- attr_file
# Read in the EML file
# Find the folder that the attributes file is in.
EML_home <- stringr::str_replace(attr_loc, pattern = "\\/[a-zA-Z0-9-_]+\\.rds$", "")
# Find the .xml file in the same location as the attribute's folder
EML_loc <- BeeBDC::fileFinder(path = EML_home, fileName = "eml.*\\.rds")
# Read in the EML file
EML_file <- readRDS(EML_loc)
} #END IF .csv
# Extract and save the data and the metadata based on their class
for(i in 1:length(occurDF)){
# If eml
if(base::any(class(occurDF[[i]]) %in% c( "emld") )){
eml_files <- occurDF[i]
}
# If tibble
if(base::any(class(occurDF[[i]]) %in% c("tbl_df", "data.frame", "tbl"))){
Data_WebDL <- occurDF[i]
}
}
#### Return data ####
# Re-combine the data and EML data
Data_WebDL <- dplyr::lst(Data_WebDL,
eml_files)
# Return the Data_WebDL
return(Data_WebDL)
# Return end product and print completion note
writeLines(paste(" - Fin.", sep = "\n"))
} # END data_importer
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/importOccurrences.R
|
# This function was written by James B Dorey and Angela Nava Bolańos from the 12 th of October 2022
# Its purpose is to create series of output figures or csv files for specified species
# Please contact jbdorey[at]me.com for help
#' Creates interactive html maps for species
#'
#' Uses the occurrence data (preferably uncleaned) and outputs interactive .html maps that can be opened
#' in your browser to a specific directory. The maps can highlight if an occurrence has passed all filtering
#' (.summary == TRUE) or failed at least one filter (.summary == FALSE). This can be modified by first running
#' [BeeBDC::summaryFun()] to set the columns that you want to be highlighted. It can also highlight occurrences
#' flagged as expert-identified or country outliers.
#'
#' @param data A data frame or tibble. Occurrence records to use as input.
#' @param outPath A directory as character. Directory where to save output maps.
#' @param lon Character. The name of the longitude column. Default = "decimalLongitude".
#' @param lat Character. The name of the latitude column. Default = "decimalLatitude".
#' @param speciesColumn Character. The name of the column containing species names (or another factor)
#' to build individual maps from. Default = "scientificName".
#' @param speciesList A character vector. Should contain species names as they appear in the
#' speciesColumn to make maps of. User can also specify "ALL" in order to make maps of all
#' species present in the data. Hence, a user may first filter their data and then use "ALL".
#' @param countryList A character vector. Country names to map, or NULL for to map ALL countries.
#' @param jitterValue Numeric. The amount, in decimal degrees, to jitter the map points by - this
#' is important for separating stacked points with the same coordinates.
#' @param onlySummary Logical. If TRUE, the function will not look to plot country or
#' expert-identified outliers in different colours.
#' @param overWrite Logical. If TRUE, the function will overwrite existing files in the provided
#' directory that have the same name.
#' Default = TRUE.
#' @param TrueAlwaysTop If TRUE, the quality (TRUE) points will always be displayed on top of other points.
#' If FALSE, then whichever layer was turned on most-recently will be displayed on top.
#' @param excludeApis_mellifera Logical. If TRUE, will not map records for Apis mellifera. Note: in most cases
#' A. mellifera has too many points, and the resulting map will take a long time to make and be difficult to open.
#' Default = TRUE.
#' @param pointColours A character vector of colours. In order provide colour for TRUE, FALSE, countryOutlier, and customOutlier.
#' Default = c("blue", "darkred","#ff7f00", "black").
#'
#' @return Exports .html interactive maps of bee occurrences to the specified directory.
#' @export
#'
#' @importFrom dplyr %>%
#' @importFrom dplyr across where
#'
#' @examples
#' OutPath_Figures <- tempdir()
#'
#' interactiveMapR(
#' # occurrence data - start with entire dataset, filter down to these species
#' data = BeeBDC::bees3sp, # %>%
#' # Select only those species in the 100 randomly chosen
#' # dplyr::filter(scientificName %in% beeData_interactive$scientificName),
#' # Select only one species to map
#' # dplyr::filter(scientificName %in% "Agapostemon sericeus (Forster, 1771)"),
#' # Directory where to save files
#' outPath = paste0(OutPath_Figures, "/interactiveMaps_TEST"),
#' # lat long columns
#' lon = "decimalLongitude",
#' lat = "decimalLatitude",
#' # Occurrence dataset column with species names
#' speciesColumn = "scientificName",
#' # Which species to map - a character vector of names or "ALL"
#' # Note: "ALL" is defined AFTER filtering for country
#' speciesList = "ALL",
#' # studyArea
#' countryList = NULL,
#' # Point jitter to see stacked points - jitters an amount in decimal degrees
#' jitterValue = 0.01,
#' # If TRUE, it will only map the .summary column. Otherwise, it will map .summary
#' # which will be over-written by countryOutliers and manualOutliers
#' onlySummary = TRUE,
#' excludeApis_mellifera = TRUE,
#' overWrite = TRUE,
#' # Colours for points which are flagged as TRUE, FALSE, countryOutlier, and customOutlier
#' pointColours = c("blue", "darkred","#ff7f00", "black")
#' )
interactiveMapR <- function(
# occurrence data
data = NULL,
# Directory where to save files
outPath = NULL,
# lat long columns
lon = "decimalLongitude",
lat = "decimalLatitude",
# Occurrence dataset column with species names
speciesColumn = "scientificName",
# Which species to map - a character vector of names or "ALL"
speciesList = NULL,
countryList = NULL,
jitterValue = NULL,
onlySummary = TRUE,
overWrite = TRUE,
TrueAlwaysTop = FALSE,
excludeApis_mellifera = TRUE,
pointColours = c("blue", "darkred","#ff7f00", "black")
){
# locally bind variables to the function
country <- .data <- scientificName <- expertOutlier <- .countryOutlier <- .summary <-
providers <- databaseSpp <- .expertOutlier <- NULL
requireNamespace("htmlwidgets")
requireNamespace("leaflet")
requireNamespace("dplyr")
# Ensure that working directories are maintain on exit from function
oldwd <- getwd() # code line i
on.exit(setwd(oldwd)) # code line i+1
#### 0.0 Prep ####
##### 0.1 Errors ####
###### a. FATAL errors ####
if(is.null(data)){
stop(paste0(" - No data was given. Please specify the data that you want to map ",
"for your data-cleaning adventures. I'll do the rest."))
}
if(is.null(outPath)){
stop(paste0(" - No outPath was given. Please specify the directory to save the maps to."))
}
##### 0.2 Packages ####
# Save the original directory
olddir <- getwd()
##### 0.3 Directories ####
# Create directory if it does not exist
if (!dir.exists(outPath)) {
dir.create(outPath, recursive = TRUE)}
# Set directory
setwd(outPath) #directory of work
# data$IDall <- paste0(1:nrow(data)) #to add an ID by row
#### 1.0 Data prep ####
##### 1.1 Remove na+ ####
data <- data %>%
tidyr::drop_na(tidyselect::any_of(c(lon, lat))) %>%
# Rename the lat and lon to darwincore
dplyr::rename("decimalLongitude" = tidyselect::all_of(lon),
"decimalLatitude" = tidyselect::all_of(lat) )
# Stop if no lat/lon
if(nrow(data) == 0){
stop("It looks like there may be no lat and lon data. Check that it exists and is not NA.")
}
# If there is no .expertOutlier then add one as all NA
if(!".expertOutlier" %in% colnames(data)){
message("The column .expertOutlier was not found. One will be created with all values = TRUE.")
data <- data %>%
dplyr::mutate(.expertOutlier = TRUE)
}
##### 1.2 Country list ####
# Select only the countries user provides
if(!is.null(countryList)){
data <- data %>%
dplyr::filter(country %in% countryList)
}
##### 1.3 Species list ####
if(any(stringr::str_detect(speciesList, "ALL")) == FALSE){
# Prepare the data for the loop
data <- data %>%
# Select ONLY the species requested
dplyr::filter(.data[[speciesColumn]] %in% speciesList)
}else{
speciesList <- unique(data[[speciesColumn]])
} # END if else statement
##### 1.4 excludeApis_mellifera ####
if(excludeApis_mellifera == TRUE){
data <- data %>%
dplyr::filter(!scientificName == "Apis mellifera Linnaeus, 1758")
speciesList <- setdiff(speciesList, "Apis mellifera Linnaeus, 1758")
}
##### 1.5 Overwrite ####
if(overWrite == FALSE){
# Find completed species
existingFiles <- list.files(path = outPath) %>%
stringr::str_remove("\\.html")
# remove them from the to-do list
speciesList <- setdiff(speciesList, existingFiles)
# STOP if no maps will be produced
if(length(speciesList) == 0){
stop("With overWrite = FALSE, there are no new maps to produce.")
}
# Re-filter the data to use only wanted species
data <- data %>%
# Select ONLY the species requested
dplyr::filter(.data[[speciesColumn]] %in% speciesList)
}
##### 1.6 Jitter ####
# If the user specifies a jitter value, add that calue
if(!is.null(jitterValue)){
data <- data %>%
dplyr::mutate(
decimalLongitude = base::jitter(data[[lon]], amount = jitterValue),
decimalLatitude = base::jitter(data[[lat]], amount = jitterValue)
)
}else{
# If no jitter, ensure that the lat lon columns are the same
data <- data %>%
dplyr::mutate(
decimalLongitude = data[[lon]],
decimalLatitude = data[[lat]])
} # END Jitter
# Make a new column to colour by if onlySummary == FALSE
if(onlySummary == FALSE){
data <- data %>%
dplyr::mutate(mapLevels = dplyr::if_else(.expertOutlier == FALSE,
"expertOutlier",
dplyr::if_else(.countryOutlier == FALSE | is.na(.countryOutlier),
"countryOutlier", dplyr::if_else(.summary == FALSE,
"FALSE", "TRUE"))) %>%
factor(c("TRUE", "FALSE", "countryOutlier", "expertOutlier"),
levels = c("TRUE", "FALSE", "countryOutlier", "expertOutlier"),
ordered = TRUE)
)
colPal = leaflet::colorFactor(pointColours,
levels = c("TRUE", "FALSE",
"countryOutlier", "expertOutlier"))
}else{ # Make colour palette for == TRUE
# Only take the first two, if more are provided
pointColours <- pointColours[1:2]
colPal = leaflet::colorFactor(pointColours,
levels = c("TRUE", "FALSE"))
}
# ensure UTF-8 encoding
old <- options() # code line i
on.exit(options(old)) # code line i+1
# Ensure that there are no NA entries in the species list
speciesList <- speciesList[complete.cases(speciesList)]
options(encoding = "UTF-8")
data <- data %>% dplyr::mutate(dplyr::across(where(is.character),
function(x){iconv(x,
to = "UTF-8",
sub = "")}))
#### 2.0 produce maps ####
#function for leaflet maps
for (x in 1:length(speciesList)){
# Filter to the xth species
databaseLoop <- data %>%
dplyr::filter(.data[[speciesColumn]] == speciesList[[x]] %>% iconv(x,
from = "UTF-8",
to = "UTF-8",
sub = ""))
# Split data into classes
if(onlySummary == FALSE){
databaseLoop <- split(databaseLoop, f= databaseLoop$mapLevels, drop = TRUE)
}else{
databaseLoop <- split(databaseLoop, databaseLoop$.summary)}
#here you can change the number of spp
# Make the base map
mdatabaseSpp <- leaflet::leaflet(data = databaseLoop ) %>%
# Add map panes
leaflet::addMapPane(name = "maplabels_FALSE", zIndex = 410) %>%
leaflet::addMapPane(name = "maplabels_TRUE", zIndex = 420) %>% # higher zIndex rendered on top
leaflet::addMapPane(name = "maplabels_default", zIndex = 600) %>%
# Base groups
leaflet::addTiles(group = "OSM (default)") %>%
leaflet::addProviderTiles("Stadia.StamenTonerLite", group = "Toner Lite",
layerId = 300,
options = leaflet::providerTileOptions(zIndex = 500))
# For the names in the list, apply the points function
# Apply each walkName in a for loop to add to the map.
for(i in 1:length(names(databaseLoop))){
walkName <- names(databaseLoop)[[i]]
databaseSpp <- databaseLoop[[walkName]]
mdatabaseSpp <- databaseLoop[[walkName]] %>%
leaflet::addCircleMarkers(map = mdatabaseSpp,
data = databaseSpp,
lng = ~decimalLongitude, lat = ~decimalLatitude, ###then you can specify what do you want in the popup window from your data
group = walkName,
if(TrueAlwaysTop == TRUE){
options = leaflet::leafletOptions(
pane = if(walkName == "TRUE"){"maplabels_TRUE"
}else{"maplabels_FALSE"})}else{
options = leaflet::leafletOptions(
pane = "maplabels_default")
},
popup = stringr::str_c(
sep = "",
###### a. basic data ####
"<b>Basic data </b> - ",
"ID: ", databaseSpp$database_id, " ", #databaseSpp is the name of data and ID the name of the column
if("family" %in% colnames(databaseSpp)){
paste0("Family: ", databaseSpp$family,
"; ")},
if("scientificName" %in% colnames(databaseSpp)){
paste0("Species: ", databaseSpp$scientificName,
"; ")},
if("institutionCode" %in% colnames(databaseSpp)){
paste0(" institutionCode: ", databaseSpp$institutionCode,
"; ")},
if("catalogNumber" %in% colnames(databaseSpp)){
paste0("catalogNumber: ", databaseSpp$catalogNumber,
"; ")},
if("verbatimScientificName" %in% colnames(databaseSpp)){
paste0("Original name: ", databaseSpp$verbatimScientificName,
"; ")},
if("scientificNameAuthorship" %in% colnames(databaseSpp)){
paste0("Authority: ", databaseSpp$scientificNameAuthorship,
"; ")},
###### b. summary data ####
if(".summary" %in% colnames(databaseSpp)){
paste0("<p></p> <b>Summary flag</b> - ", databaseSpp$.summary)},
###### c. initial data ####
"<p></p><b>Initial flags</b> - ",
if(".coordinates_empty" %in% colnames(databaseSpp)){
paste0("No coordinates: ", databaseSpp$.coordinates_empty,
"; ")},
if(".coordinates_outOfRange" %in% colnames(databaseSpp)){
paste0("Point off map: ", databaseSpp$.coordinates_outOfRange,
"; ")},
if(".basisOfRecords_notStandard" %in% colnames(databaseSpp)){
paste0("Excluded basis of record: ", databaseSpp$.basisOfRecords_notStandard,
"; ")},
if(".coordinates_country_inconsistent" %in% colnames(databaseSpp)){
paste0("Coords. & country inconsistent: ", databaseSpp$.coordinates_country_inconsistent,
"; ")},
if(".occurrenceAbsent" %in% colnames(databaseSpp)){
paste0("Absent record: ", databaseSpp$.occurrenceAbsent,
"; ")},
if(".unLicensed" %in% colnames(databaseSpp)){
paste0("Protected by license: ", databaseSpp$.unLicensed,
"; ")},
###### d. taxonomy data ####
# Taxonomy
"<p></p><b>Taxonomy flags</b> - ",
if(".scientificName_empty" %in% colnames(databaseSpp)){
paste0("No scientific name: ", databaseSpp$.scientificName_empty,
"; ")},
if(".invalidName" %in% colnames(databaseSpp)){
paste0("Name didn't match: ", databaseSpp$.invalidName,
"; ")},
if(".uncer_terms" %in% colnames(databaseSpp)){
paste0("Taxonomy qualifier: ", databaseSpp$.uncer_terms,
"; ")},
###### e. space data ####
# space
"<p></p><b>Space flags</b> - ",
if(".rou" %in% colnames(databaseSpp)){
paste0("Coordinates rounded: ", databaseSpp$.rou,
"; ")},
if(".uncertaintyThreshold" %in% colnames(databaseSpp)){
paste0("High coordinate uncertainty: ", databaseSpp$.uncertaintyThreshold,
"; ")},
if(".cap" %in% colnames(databaseSpp)){
paste0("Capital centroid: ", databaseSpp$.cap,
"; ")},
if(".cen" %in% colnames(databaseSpp)){
paste0("Country centroid: ", databaseSpp$.cen,
"; ")},
if(".gbf" %in% colnames(databaseSpp)){
paste0("Point on GBIF HQ: ", databaseSpp$.gbf,
"; ")},
if(".equ" %in% colnames(databaseSpp)){
paste0("Coordinates equal: ", databaseSpp$.equ,
"; ")},
if(".inst" %in% colnames(databaseSpp)){
paste0("Point on institution: ", databaseSpp$.inst,
"; ")},
if(".zer" %in% colnames(databaseSpp)){
paste0("Coordinates zero: ", databaseSpp$.zer,
"; ")},
if(".val" %in% colnames(databaseSpp)){
paste0("Coordinates zero: ", databaseSpp$.val,
"; ")},
if(".sea" %in% colnames(databaseSpp)){
paste0("In sea: ", databaseSpp$.sea,
"; ")},
if(".countryOutlier" %in% colnames(databaseSpp)){
paste0("Country outliers: ", databaseSpp$.countryOutlier,
"; ")},
if(".stateOutlier" %in% colnames(databaseSpp)){
paste0("State outliers: ", databaseSpp$.stateOutlier,
"; ")},
if(".expertOutlier" %in% colnames(databaseSpp)){
paste0("Expert-identified outliers: ", databaseSpp$.expertOutlier,
"; ")},
if(".sequential" %in% colnames(databaseSpp)){
paste0("Coordinate fill-down: ", databaseSpp$.sequential,
"; ")},
if(".latFlag" %in% colnames(databaseSpp)){
paste0("Gridded latitudes: ", databaseSpp$.latFlag,
"; ")},
if(".lonFlag" %in% colnames(databaseSpp)){
paste0("Gridded latitudes: ", databaseSpp$.lonFlag,
"; ")},
if(".gridSummary" %in% colnames(databaseSpp)){
paste0("Gridded lat & lon: ", databaseSpp$.gridSummary,
"; ")},
###### f. time data ####
# Time
"<p></p><b>Time flags</b> - ",
if(".eventDate_empty" %in% colnames(databaseSpp)){
paste0("No event date: ", databaseSpp$.eventDate_empty,
"; ")},
if(".year_outOfRange" %in% colnames(databaseSpp)){
paste0("Year out of range: ", databaseSpp$.year_outOfRange,
"; ")},
###### g. duplicate data ####
# Duplicate
if(".duplicates" %in% colnames(databaseSpp)){
paste0("<p></p><b>Duplicate flag</b> - ", databaseSpp$.duplicates,
"; ")},
###### h. collection data ####
# Time
"<p></p><b>Collection data</b> - ",
if("recordedBy" %in% colnames(databaseSpp)){
paste0("Collector(s): ", databaseSpp$recordedBy,
"; ")},
if("year" %in% colnames(databaseSpp)){
paste0("Year: ", databaseSpp$year,
"; ")},
if("identifiedBy" %in% colnames(databaseSpp)){
paste0("Identified by: ", databaseSpp$identifiedBy,
"; ")},
if("country" %in% colnames(databaseSpp)){
paste0("Country: ", databaseSpp$country,
"; ")},
if("references" %in% colnames(databaseSpp)){
paste0("References: ", databaseSpp$references,
" ")}
), #you can add what do you want from columns of your data
###### i. colour ####
fillOpacity = if(walkName %in% c("TRUE", "FALSE")){0.4}else{0.7},
opacity = if(walkName %in% c("TRUE", "FALSE")){0.65}else{1},
#opacity = if(walkName %in% c("TRUE", "FALSE")){0.25}else{1},
#stroke = if(walkName %in% c("TRUE", "FALSE")){TRUE}else{FALSE},
# colour determined by if else
fillColor =
if(onlySummary == FALSE){
colPal(databaseSpp$mapLevels)
}else{colPal(databaseSpp$.summary)},
# Stroke color
color = if(walkName %in% c("TRUE", "FALSE")){
colPal(databaseSpp$.summary)}else{
# colour TRUE border
if(all(databaseSpp$.summary) == TRUE){
pointColours[1]
}else{pointColours[2]}},
# Internal size
radius = if(walkName %in% c("TRUE", "FALSE")){
5}else{6},
# border size
weight = if(walkName %in% c("TRUE", "FALSE")){
1.5}else{2.5}) #to change the size of points
} # END for loop
###### j. controller ####
# Add the layers control
mdatabaseSpp <- mdatabaseSpp %>%
leaflet::addLegend(color = pointColours[length(names(databaseLoop)):1],
labels = names(databaseLoop),
group = names(databaseLoop)) %>%
leaflet::addLayersControl(
baseGroups = c("OSM (default)", "Toner Lite"),
overlayGroups = names(databaseLoop),
options = leaflet::layersControlOptions(collapsed = FALSE, autoZIndex = FALSE,
sortLayers = FALSE))
###### k. save ####
#then, it is to save in html format
htmlwidgets::saveWidget(plotly::as_widget(mdatabaseSpp),
file.path(outPath, #directory to save files
paste0(speciesList[[x]],".html")),
selfcontained = TRUE,
title = paste0(speciesList[[x]]))
} # END for
# reset to original directory
setwd(olddir)
} # END function
#NOTE: to can view spp by spp in the viewer of R, you can run only from line 13 to line 31 changing
# numbers of spp in the line 13
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/interactiveMapR.R
|
# This function was written by James Dorey to chunk the bdc_country_from_coordinates function
# to allow bigger datasets to be analysed without consuming too much RAM.
# This function was written on the 12th of May 2022. For questions, please email jbdorey[at]me.com
#' Get country names from coordinates
#'
#' Because the [bdc::bdc_country_from_coordinates()] function is very RAM-intensive, this wrapper
#' allows a user to specify chunk-sizes and only analyse a small portion of the occurrence data at a
#' time. The prefix jbd_ is used to highlight the difference between this function and the original
#' [bdc::bdc_country_from_coordinates()].
#'
#' @param data A data frame or tibble. Occurrence records to use as input.
#' @param lat Character. The name of the column to use as latitude. Default = "decimalLatitude".
#' @param lon Character. The name of the column to use as longitude. Default = "decimalLongitude".
#' @param country Character. The name of the column containing country names. Default = "country.
#' @param stepSize Numeric. The number of occurrences to process in each chunk. Default = 1000000.
#' @param chunkStart Numeric. The chunk number to start from. This can be > 1 when you need to
#' restart the function from a certain chunk. For example, can be used if R failed unexpectedly.
#' @param path Character. The directory path to a folder in which to save the running countrylist
#' csv file.
#' @param scale Passed to rnaturalearth's ne_countries().
#' Scale of map to return, one of 110, 50, 10 or 'small', 'medium', 'large'. Default = "large".
#' @param mc.cores Numeric. If > 1, the function will run in parallel
#' using mclapply using the number of cores specified. If = 1 then it will be run using a serial
#' loop. NOTE: Windows machines must use a value of 1 (see ?parallel::mclapply). Additionally,
#' be aware that each thread can use large chunks of memory.
#' Default = 1.
#'
#' @return A data frame containing database_ids and a country column
#' that needs to be re-merged with the data input.
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#' library("dplyr")
#' data(beesFlagged)
#' HomePath = tempdir()
#' # Tibble of common issues in country names and their replacements
#' commonProblems <- dplyr::tibble(problem = c('U.S.A.', 'US','USA','usa','UNITED STATES',
#' 'United States','U.S.A','MX','CA','Bras.','Braz.','Brasil','CNMI','USA TERRITORY: PUERTO RICO'),
#' fix = c('United States of America','United States of America',
#' 'United States of America','United States of America',
#' 'United States of America','United States of America',
#' 'United States of America','Mexico','Canada','Brazil','Brazil',
#' 'Brazil','Northern Mariana Islands','Puerto Rico'))
#'
#' beesFlagged <- beesFlagged %>%
#' # Replace a name to test
#' dplyr::mutate(country = stringr::str_replace_all(country, "Brazil", "Brasil"))
#'
#' beesFlagged_out <- countryNameCleanR(
#' data = beesFlagged,
#' commonProblems = commonProblems)
#'
#' suppressWarnings(
#' countryOutput <- jbd_CfC_chunker(data = beesFlagged_out,
#' lat = "decimalLatitude",
#' lon = "decimalLongitude",
#' country = "country",
#' # How many rows to process at a time
#' stepSize = 1000000,
#' # Start row
#' chunkStart = 1,
#' path = HomePath,
#' scale = "medium"),
#' classes = "warning")
#'
#'
#' # Left join these datasets
#' beesFlagged_out <- left_join(beesFlagged_out, countryOutput, by = "database_id") %>%
#' # merge the two country name columns into the "country" column
#' dplyr::mutate(country = dplyr::coalesce(country.x, country.y)) %>%
#' # remove the now redundant country columns
#' dplyr::select(!c(country.x, country.y)) %>%
#' # put the column back
#' dplyr::relocate(country) %>%
#' # Remove duplicates if they arose!
#' dplyr::distinct()
#'
#' # Remove illegal characters
#' beesFlagged_out$country <- beesFlagged_out$country %>%
#' stringr::str_replace(., pattern = paste("\\[", "\\]", "\\?",
#' sep= "|"), replacement = "")
jbd_CfC_chunker <- function(data = NULL,
lat = "decimalLatitude",
lon = "decimalLongitude",
country = "country",
# How many rows to process at a time
stepSize = 1000000,
# Start row
chunkStart = 1,
scale = "medium",
path = tempdir(),
mc.cores = 1){
BeeBDC_order <- . <- .data <- id_temp <- name_long <- geometry <- inData <- country_OG <- NULL
#### 0.0 Prep ####
startTime <- Sys.time()
##### 0.1 nChunks ####
# Find the number of chunks needed to complete the run
nChunks = ceiling(nrow(data)/stepSize)
# Find the max nrow
nrowMax <- nrow(data)
# The chunkEnd is the same as the stepSize initially, but the chunkEnd will change with each
# iteration
##### 0.3 chunkEnd ####
chunkEnd = (chunkStart + stepSize) - 1
##### 0.4 Text out ####
# Write user output
message(paste(" - Running chunker with:", "\n",
"stepSize = ",
format(stepSize, big.mark=",",scientific=FALSE), "\n",
"chunkStart = ",
format(chunkStart, big.mark=",",scientific=FALSE), "\n",
"chunkEnd = ",
format(chunkEnd, big.mark=",",scientific=FALSE),
sep = ""))
#### 1.0 Parallel ####
##### 1.1 Input function for parallel ####
funCoordCountry <-
function(inData) {
suppressWarnings({
check_require_cran("rnaturalearth")
# check_require_github("ropensci/rnaturalearthdata")
})
loadNamespace("bdc")
# create an id_temp
inData$id_temp <- 1:nrow(inData)
minimum_colnames <- c(lat, lon)
if(!all(minimum_colnames %in% colnames(inData))) {
stop(
"These columns names were not found in your database: ",
paste(minimum_colnames[!minimum_colnames %in% colnames(inData)],
collapse = ", "),
call. = FALSE
)}
# check if inData has a country column
has_country <- any(colnames(inData) == country)
if(!has_country) {
inData$country <- NA}
# converts coordinates columns to numeric
inData <- inData %>%
dplyr::mutate(decimalLatitude = as.numeric(.data[[lat]]),
decimalLongitude = as.numeric(.data[[lon]]))
worldmap <- rnaturalearth::ne_countries(scale = scale, returnclass = "sf") %>%
sf::st_make_valid()
data_no_country <- inData %>%
dplyr::filter(is.na(country) | country == "")
if(nrow(data_no_country) == 0) {
inData <- inData %>% dplyr::select(-id_temp)
}else{
# converts coordinates columns to spatial points
suppressWarnings({
data_no_country <-
CoordinateCleaner::cc_val(
x = data_no_country,
lon = lon,
lat = lat,
verbose = FALSE
) %>%
sf::st_as_sf(
.,
coords = c("decimalLongitude", "decimalLatitude"),
remove = FALSE
) %>%
sf::st_set_crs(., sf::st_crs(worldmap))
})
worldmap <-
sf::st_as_sf(worldmap) %>% dplyr::select(name_long)
# Extract country names from coordinates
suppressWarnings({
suppressMessages({
ext_country <-
data_no_country %>%
dplyr::select(id_temp, geometry) %>%
sf::st_intersection(., worldmap)
})
})
ext_country$geometry <- NULL
res <- dplyr::left_join(data_no_country, ext_country, by = "id_temp") %>%
dplyr::distinct(id_temp, .keep_all = TRUE)
id_replace <- res$id_temp
inData[id_replace, "country"] <- res$name_long
inData <- inData %>% dplyr::select(-id_temp)
}
return(dplyr::as_tibble(inData))
}
##### 1.2 Run mclapply ####
# User output
writeLines(paste(" - Starting parallel operation. Unlike the serial operation (mc.cores = 1)",
", a parallel operation will not provide running feedback. Please be patient",
" as this function may take some time to complete. Each chunk will be run on",
" a seperate thread so also be aware of RAM usage."))
loop_check_pf = data %>%
# Make a new column with the ordering of rows
dplyr::mutate(BeeBDC_order = dplyr::row_number()) %>%
# Group by the row number and step size
dplyr::group_by(BeeBDC_group = ceiling(BeeBDC_order/stepSize)) %>%
# Split the dataset up into a list by group
dplyr::group_split(.keep = TRUE) %>%
# Run the actual function
parallel::mclapply(., funCoordCountry,
mc.cores = mc.cores
) %>%
# Combine the lists of tibbles
dplyr::bind_rows()
CountryList = dplyr::tibble(database_id = loop_check_pf$database_id,
country = loop_check_pf$country,
BeeBDC_order = loop_check_pf$BeeBDC_order) %>%
# Arrange these
dplyr::arrange(BeeBDC_order) %>%
# Remove extra columns
dplyr::select(!tidyselect::any_of("BeeBDC_order"))
#### 2.0 Return ####
colnames(CountryList) <- c("database_id", "country")
endTime <- Sys.time()
message(paste(
" - Completed in ",
round(difftime(endTime, startTime), digits = 2 ),
" ",
units(round(endTime - startTime, digits = 2)),
sep = ""))
# Get a subset of the input data
data <- data %>%
dplyr::select(tidyselect::any_of(c("database_id", "country")))
# Clean a little
CountryList <- CountryList %>%
# Drop na rows
tidyr::drop_na(country)
# Get a summary of the output
summaryTable <- CountryList %>%
dplyr::left_join(data, by = "database_id",
suffix = c("", "_OG")) %>%
# Assign changed == 1 if the country name has changed from the original
dplyr::mutate(changed = dplyr::if_else(is.na(country_OG), 1, 0))
writeLines(paste0(" - We have updated the country names of ",
format(sum(summaryTable$changed), big.mark = ","),
" occurrences that previously had no country name assigned."))
return(CountryList)
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/jbd_CfC_chunker.R
|
# This function was written by James Dorey to chunk the bdc_coordinates_transposed function
# to allow bigger datasets to be analysed without consuming too much RAM.
# This function was written on the 31st of May 2022. For questions, please email jbdorey[at]me.com
#' Wraps jbd_coordinates_transposed to identify and fix transposed occurrences
#'
#' Because the [BeeBDC::jbd_coordinates_transposed()] function is very RAM-intensive, this wrapper
#' allows a user to specify chunk-sizes and only analyse a small portion of the occurrence data at a
#' time. The prefix jbd_ is used to highlight the difference between this function and the original
#' [bdc::bdc_coordinates_transposed()].
#' This function will preferably use the countryCode column generated by
#' [bdc::bdc_country_standardized()].
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param lat Character. The column with latitude in decimal degrees. Default = "decimalLatitude".
#' @param lon Character. The column with longitude in decimal degrees. Default = "decimalLongitude".
#' @param country Character. The name of the column containing country names. Default = "country".
#' @param idcol Character. The column name with a unique record identifier. Default = "database_id".
#' @param countryCode Character. Identifies the column containing ISO-2 country codes
#' Default = "countryCode".
#' @param sci_names Character. The column containing scientific names. Default = "scientificName".
#' @param border_buffer Numeric. The buffer, in decimal degrees, around points to help match them
#' to countries. Default = 0.2 (~22 km at equator).
#' @param save_outputs Logical. If TRUE, transposed occurrences will be saved to their own file.
#' @param stepSize Numeric. The number of occurrences to process in each chunk. Default = 1000000.
#' @param chunkStart Numeric. The chunk number to start from. This can be > 1 when you need to restart
#' the function from a certain chunk; for example if R failed unexpectedly.
#' @param progressiveSave Logical. If TRUE then the country output list will be saved between
#' each iteration so that `append` can be used if the function is stopped part way through.
#' @param path Character. The path to a file in which to save the 01_coordinates_transposed_
#' output.
#' @param append Logical. If TRUE, the function will look to append an existing file.
#' @param scale Passed to rnaturalearth's ne_countries().
#' Scale of map to return, one of 110, 50, 10 or 'small', 'medium', 'large'. Default = "large".
#' @param mc.cores Numeric. If > 1, the jbd_correct_coordinates function will run in parallel
#' using mclapply using the number of cores specified. If = 1 then it will be run using a serial
#' loop. NOTE: Windows machines must use a value of 1 (see ?parallel::mclapply). Additionally,
#' be aware that each thread can use large chunks of memory.
#' Default = 1.#'
#' @return Returns the input data frame with a new column, coordinates_transposed, where FALSE = columns
#' that had coordinates transposed.
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#' library(dplyr)
#' # Import and prepare the data
#' data(beesFlagged)
#' beesFlagged <- beesFlagged %>% dplyr::select(!c(.val, .sea)) %>%
#' # Cut down the dataset to un example quicker
#' dplyr::filter(dplyr::row_number() %in% 1:20)
#' # Run the function
#' beesFlagged_out <- jbd_Ctrans_chunker(
#' # bdc_coordinates_transposed inputs
#' data = beesFlagged,
#' idcol = "database_id",
#' lat = "decimalLatitude",
#' lon = "decimalLongitude",
#' country = "country_suggested",
#' countryCode = "countryCode",
#' # in decimal degrees (~22 km at the equator)
#' border_buffer = 1,
#' save_outputs = FALSE,
#' sci_names = "scientificName",
#' # chunker inputs
#' # How many rows to process at a time
#' stepSize = 1000000,
#' # Start row
#' chunkStart = 1,
#' # Progressively save the output between each iteration?
#' progressiveSave = FALSE,
#' path = tempdir(),
#' # If FALSE it may overwrite existing dataset
#' append = FALSE,
#' # Users should select scale = "large" as it is more thoroughly tested
#' scale = "medium",
#' mc.cores = 1
#' )
#' table(beesFlagged_out$coordinates_transposed, useNA = "always")
#'
jbd_Ctrans_chunker <- function(
data = NULL,
lat = "decimalLatitude",
lon = "decimalLongitude",
idcol = "databse_id",
country = "country_suggested",
countryCode = "countryCode",
sci_names = "scientificName",
border_buffer = 0.2, # in decimal degrees (~22 km at the equator)
save_outputs = TRUE,
# How many rows to process at a time
stepSize = 1000000,
# Start row
chunkStart = 1,
progressiveSave = TRUE,
path = tempdir(),
# If FALSE it may overwrite existing dataset
append = TRUE,
scale = "large",
mc.cores = 1){
database_id <- NULL
requireNamespace("dplyr")
#### 0.0 Prep ####
startTime <- Sys.time()
# Select the output file name to be saved as you run
fileName <- paste("01_coordinates_transposed_", Sys.Date(), ".csv", sep = "")
#### 0.1 nChuncks ####
# Find the number of chunks needed to complete the run
nChunks = ceiling(nrow(data)/stepSize)
# Find the max nrow
nrowMax <- nrow(data)
# IF a run failed you can start again from the same spot using append = TRUE
#### 0.2 Append ####
if(append == TRUE){
suppressWarnings({
# Read in the Tranps_tibble csv
Tranps_tibble = readr::read_csv(paste0(path, "/","Tranps_tibble.csv"), col_types = ColTypeR())
})
# set the chunkStart to the number of rows completed plus one
chunkStart = nrow(Tranps_tibble) + 1
nChunks = ceiling((nrow(data)-chunkStart)/stepSize)
} # END append IF statement
# The chunkEnd is the same as the stepSize initially, but the chunkEnd will change with each iteration
# It will also differ if append == true based on where the run is at.
chunkEnd = (chunkStart + stepSize) - 1
#### 0.3 User text ####
# Write user output
writeLines(paste(" - Running chunker with:", "\n",
"stepSize = ",
format(stepSize, big.mark=",",scientific=FALSE), "\n",
"chunkStart = ",
format(chunkStart, big.mark=",",scientific=FALSE), "\n",
"chunkEnd = ",
format(chunkEnd, big.mark=",",scientific=FALSE), "\n",
"append = ", append,
sep = ""))
#### 1.0 Run Loop ####
# Loop - from chunkStart to the end, process rows in batches of chunkEnd
for(i in 1:nChunks){
# Select rows from chunkStart to chunkEnd
loop_check_pf = data[chunkStart:chunkEnd,] %>%
tidyr::drop_na(tidyselect::all_of(idcol)) %>%
# Drop unused factors
base::droplevels()
# User output
writeLines(paste(" - Starting chunk ", i, "...", "\n",
"From ",
format(chunkStart, big.mark=",",scientific=FALSE), " to ",
format(chunkEnd, big.mark=",",scientific=FALSE),
sep = ""))
##### 1.1 Function ####
# Run the bdc_country_from_coordinates function from the BeeBDC package
loop_check_pf <- jbd_coordinates_transposed(
data = loop_check_pf,
lat = lat,
lon = lon,
sci_names = sci_names,
country = country,
countryCode = countryCode,
idcol = idcol,
border_buffer = border_buffer,
save_outputs = save_outputs,
path = path,
fileName = fileName,
scale = scale,
mc.cores = mc.cores)
#### 1.2 Save + bind file ####
# Save a smaller csv file with the database_id and country name to be matched later
# For the first instance in the loop...
if(i == 1 && append == FALSE){
Tranps_tibble = dplyr::tibble(loop_check_pf)
}else{
Tranps_tibble = dplyr::bind_rows(Tranps_tibble,
loop_check_pf)
}
# Set chunkStart to be chunkEnd +1 for the next row
chunkStart = chunkStart + stepSize
chunkEnd = chunkEnd + stepSize
# If chunkEnd surpasses nrowMax, then assign nrowMax.
if(chunkEnd > nrowMax){
chunkEnd = nrowMax
}
# Make room on the RAM by cleaning up the garbage
# user output
#### 1.3 User text ####
# Print use output
writeLines(paste(" - Finished chunk ", i, " of ", nChunks, ". ",
"Total records examined: ",
format(nrow(Tranps_tibble), big.mark=",",scientific=FALSE),
sep = "") )
# Save as a csv after each iteration
if(progressiveSave == TRUE){
readr::write_excel_csv(Tranps_tibble, file = paste0(path, "/", "Tranps_tibble.csv"))}
} # END loop
#### 2.0 Clean and return ####
# Remove NA values
Tranps_tibble <- Tranps_tibble %>%
dplyr::filter(!is.na("database_id")) %>%
# Remove any duplicates that have been introduced
dplyr::distinct()
endTime <- Sys.time()
message(paste(
" - Completed in ",
round(difftime(endTime, startTime), digits = 2 ),
" ",
units(round(endTime - startTime, digits = 2)),
sep = ""))
return(Tranps_tibble)
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/jbd_Ctrans_chunker.R
|
# This function was written by James B Dorey starting from the 3rd-7th of June 2022.
# It is intended to replace the bdc function bdc_coordinates_country_inconsistent for larger datasets
# where it is simply not feasible.
# Initial function finished: _____
# For questions, please ask James at jbdorey[at]me.com
#' Flags coordinates that are inconsistent with the stated country name
#'
#' Compares stated country name in an occurrence record with record’s coordinates using
#' rnaturalearth data. The prefix, jbd_ is meant
#' to distinguish this function from the original [bdc::bdc_coordinates_country_inconsistent()].
#' This functions will preferably use the countryCode and country_suggested columns
#' generated by `bdc::bdc_country_standardized()`; please run it on your dataset prior to running
#' this function.
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param lon Character. The name of the column to use as longitude. Default = "decimalLongitude".
#' @param lat Character. The name of the column to use as latitude. Default = "decimalLatitude".
#' @param scale Numeric or character. To be passed to [rnaturalearth::ne_countries()]'s scale.
#' Scale of map to return, one of 110, 50, 10 or “small”, “medium”, “large”.
#' Smaller values return higher-resolution maps.
#' @param pointBuffer Numeric. Amount to buffer points, in decimal degrees. If the point is outside
#' of a country, but within this point buffer, it will not be flagged. Default = 0.01.
#' @param mc.cores Numeric. If > 1, the st_intersects function will run in parallel
#' using mclapply using the number of cores specified. If = 1 then it will be run using a serial
#' loop. NOTE: Windows machines must use a value of 1 (see ?parallel::mclapply). Additionally,
#' be aware that each thread can use large chunks of memory.
#' Default = 1.
#' @param stepSize Numeric. The number of occurrences to process in each chunk. Default = 1000000.
#'
#' @return The input occurrence data with a new column, .coordinates_country_inconsistent
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#'
#' beesRaw_out <- jbd_coordCountryInconsistent(
#' data = BeeBDC::beesRaw,
#' lon = "decimalLongitude",
#' lat = "decimalLatitude",
#' scale = 50,
#' pointBuffer = 0.01)
jbd_coordCountryInconsistent <- function(
data = NULL,
lon = "decimalLongitude",
lat = "decimalLatitude",
scale = 50,
pointBuffer = 0.01,
stepSize = 1000000,
mc.cores = 1){
database_id <- decimalLatitude <- decimalLongitude <- country <- name_long <- iso_a2 <-
geometry <- admin <- sovereignt <- name <- . <- NULL
.coordinates_outOfRange <- .coordinates_empty <- indexMatch <- BeeBDC_order <- NULL
countryCode <- country_suggested <- NULL
startTime <- Sys.time()
requireNamespace("rnaturalearth")
requireNamespace("dplyr")
requireNamespace("ggspatial")
requireNamespace("mgsub")
requireNamespace("terra")
country
#### 0.0 Prep ####
###### 0.1 fatal errors ####
if(!any(colnames(data) %in% "country")){
stop("There is no column called 'country' in the dataset. This is a minimum requirement.")
}
###### 0.2 Coord quality ####
if(!any(colnames(data) %in% ".coordinates_outOfRange")){
writeLines("No '.coordinates_outOfRange' column found, running bdc_coordinates_outOfRange...")
data <- bdc::bdc_coordinates_outOfRange(
data = data,
lat = lat,
lon = lon)
}
###### 0.3 columns present ####
if(!any(colnames(data) %in% ".coordinates_empty")){
writeLines("No '.coordinates_empty' column found, running bdc_coordinates_empty")
data <- bdc::bdc_coordinates_empty(
data = data,
lat = lat,
lon = lon)
}
if(!any(colnames(data) %in% "country_suggested")){
writeLines(paste0("No 'country_suggested' column found, adding an empty (NA) placeholder. This",
" column can be added by running bdc::bdc_country_standardized() on the ",
"input data."))
data <- data %>%
dplyr::mutate(country_suggested = NA_character_)
}
if(!any(colnames(data) %in% "countryCode")){
writeLines(paste0("No 'countryCode' column found, adding an empty (NA) placeholder. This",
" column can be added by running bdc::bdc_country_standardized() on the ",
"input data."))
data <- data %>%
dplyr::mutate(countryCode = NA_character_)
}
# Remove poor-quality coordinates
dataR <- data %>%
dplyr::filter(!.coordinates_outOfRange == FALSE) %>%
dplyr::filter(!.coordinates_empty == FALSE)
# Reduce dataset
dataR <- dataR %>%
dplyr::select(
tidyselect::any_of(c("database_id", "decimalLatitude", "decimalLongitude",
"country", "countryCode","country_suggested"))) %>%
# Remove lat/lon NAs
dplyr::filter(!is.na(decimalLatitude)) %>% dplyr::filter(!is.na(decimalLongitude))
#### 1.1 Terrestrial map ####
##### 1.1 rnaturalearth DL ####
writeLines(" - Downloading naturalearth map...")
suppressWarnings({
# Download the rnaturalearth countries
vectEarth <- rnaturalearth::ne_countries(scale = scale, type = "countries",
returnclass = "sf" )%>%
dplyr::select(name_long, iso_a2, geometry, admin, sovereignt, name) %>%
sf::st_make_valid()
# Simplify the world map ONCE to be used later
simplePoly <- vectEarth %>% sf::st_drop_geometry() %>%
dplyr::mutate(indexMatch = dplyr::row_number())
# Repair gemoetries
sf::sf_use_s2(FALSE)
})
#### 2.0 Extractions ####
##### 2.1 Create function 1 ####
intersectFun <- function(sp){
suppressMessages({
extracted <- sf::st_intersects(sp, vectEarth, sparse = TRUE) %>%
# return a tibble with the index of each match or NA where there was no match
dplyr::tibble(indexMatch = .)
# If first element is full, unlist each one
extracted <- extracted %>%
dplyr::mutate(indexMatch = indexMatch %>% as.character() %>% as.numeric() )
# rejoin
extracted <- extracted %>%
dplyr::left_join(simplePoly,
by = "indexMatch") %>%
# Add in the database_id
dplyr::bind_cols(sp)
}) # END suppressWarnings
return(extracted)
}# END intersectFun function
##### 2.2 Country name ####
writeLines(" - Extracting initial country names without buffer...")
suppressWarnings({
# Turn the points into an sf object
sp <- sf::st_as_sf(dataR, coords = c(lon, lat),
crs = sf::st_crs("WGS84"))
# Extract the country for the points from the vectEarth map
country_extracted = sp %>%
# Make a new column with the ordering of rows
dplyr::mutate(BeeBDC_order = dplyr::row_number()) %>%
# Group by the row number and step size
dplyr::group_by(BeeBDC_group = ceiling(BeeBDC_order/stepSize)) %>%
# Split the dataset up into a list by group
dplyr::group_split(.keep = TRUE) %>%
# Run the actual function
parallel::mclapply(., intersectFun,
mc.cores = mc.cores
) %>%
# Combine the lists of tibbles
dplyr::bind_rows()
})
##### 2.3 Failures ####
# Find those records that don't match.
failed_extract <- country_extracted %>%
# Find the mis-matched countries
dplyr::filter(!tolower(country) %in% c(tolower(name_long), tolower(admin),
tolower(sovereignt), tolower(name))) %>%
dplyr::filter(!tolower(country_suggested) %in% c(tolower(name_long), tolower(admin),
tolower(sovereignt), tolower(name))) %>%
dplyr::filter(!tolower(iso_a2) %in% c(tolower(country), tolower(countryCode))) %>%
# Remove NA countries
dplyr::filter(!is.na(country)) %>%
dplyr::tibble() %>%
sf::st_as_sf(crs = sf::st_crs("WGS84"))
# Replace some country names as needed and re-remove
failed_extract$country <-
mgsub::mgsub(failed_extract$country,
pattern = c("Martinique", "French guiana", "Federated States of Micronesia"),
replacement = c("France", "France", "Micronesia"))
# Remove new matches
failed_extract <- failed_extract %>%
# Find the mis-matched countries
dplyr::filter(!tolower(country) %in% c(tolower(name_long), tolower(admin),
tolower(sovereignt), tolower(name))) %>%
dplyr::filter(!tolower(country_suggested) %in% c(tolower(name_long), tolower(admin),
tolower(sovereignt), tolower(name))) %>%
dplyr::filter(!tolower(iso_a2) %in% c(tolower(country), tolower(countryCode))) %>%
# Remove NA countries
dplyr::filter(!is.na(country)) %>%
dplyr::tibble() %>%
sf::st_as_sf(crs = sf::st_crs("WGS84"))
# remove spent dataset
rm(country_extracted)
##### 2.4 Buffer fails ####
writeLines(" - Buffering naturalearth map by pointBuffer...")
# Buffer the natural earth map
suppressWarnings({
vectEarth <- vectEarth %>%
sf::st_buffer(dist = pointBuffer)
})
writeLines(" - Extracting FAILED country names WITH buffer...")
# Extract the country for the points from the vectEarth map
suppressWarnings({
failed_extract_2 = failed_extract %>%
dplyr::select(database_id, country, geometry, country_suggested, countryCode) %>%
# Make a new column with the ordering of rows
dplyr::mutate(BeeBDC_order = dplyr::row_number()) %>%
# Group by the row number and step size
dplyr::group_by(BeeBDC_group = ceiling(BeeBDC_order/stepSize)) %>%
# Split the dataset up into a list by group
dplyr::group_split(.keep = TRUE) %>%
# Run the actual function
parallel::mclapply(., intersectFun,
mc.cores = mc.cores
) %>%
# Combine the lists of tibbles
dplyr::bind_rows()
})
# Find MATCHES #
# With country
fExtr_1 <- failed_extract_2 %>%
dplyr::filter(!tolower(country) %in% c(tolower(name_long), tolower(admin),
tolower(sovereignt), tolower(name))) %>%
dplyr::filter(!tolower(country_suggested) %in% c(tolower(name_long), tolower(admin),
tolower(sovereignt), tolower(name)))
# With iso_a2
fExtr_2 <- failed_extract_2 %>%
dplyr::filter(tolower(iso_a2) %in% c(tolower(country), tolower(countryCode)))
ids2keep <- dplyr::bind_rows(fExtr_1, fExtr_2) %>%
# Find the mis-matched countries
dplyr::filter(!tolower(country) %in% c(tolower(name_long), tolower(admin),
tolower(sovereignt), tolower(name))) %>%
dplyr::filter(!tolower(country_suggested) %in% c(tolower(name_long), tolower(admin),
tolower(sovereignt), tolower(name))) %>%
dplyr::filter(tolower(iso_a2) %in% c(tolower(country), tolower(countryCode))) %>%
# Keep only the database id
dplyr::select(database_id)
#### 3.0 Final fails ####
# Get the final fails by removing those to keep from the failed list
ids2remove <- failed_extract_2 %>%
dplyr::filter(!database_id %in% ids2keep$database_id) %>%
dplyr::select(database_id)
#### 4.0 Finals ####
# Create new column
data <- data %>%
dplyr::mutate(.coordinates_country_inconsistent = !database_id %in% ids2remove$database_id)
# return message
message(paste("\njbd_coordinates_country_inconsistent:\nFlagged",
format(sum(data$.coordinates_country_inconsistent == FALSE, na.rm = TRUE), big.mark = ","),
"records.\nThe column, '.coordinates_country_inconsistent',",
"was added to the database.\n"), sep = "")
endTime <- Sys.time()
# Time output
message(paste(
" - Completed in ",
round(difftime(endTime, startTime), digits = 2 ),
" ",
units(round(endTime - startTime, digits = 2)),
sep = ""))
# Return the data
return(data)
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/jbd_coordCountryInconsistent.R
|
# This function was written by James B Dorey on the 19th December 2022
# Its purpose is to flag rounded coordinates. This replaces bdc_coordinates_precision by flagging ONLY
# Occurrences where both lat AND lon are rounded; not just one.
# Please contact jbdorey[at]me.com for help
#' Flags coordinates for imprecision
#'
#' This function flags occurrences where BOTH latitude and longitude values are rounded. This
#' contrasts with the original function, bdc::bdc_coordinates_precision() that will flag
#' occurrences where only one of latitude OR longitude are rounded. The BeeBDC approach saves
#' occurrences that may have had terminal zeros rounded in one coordinate column.
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param lat Character. The name of the column to use as latitude. Default = "decimalLatitude".
#' @param lon Character. The name of the column to use as longitude. Default = "decimalLongitude".
#' @param ndec Numeric. The number of decimal places to flag in decimal degrees. For example,
#' argument value of 2 would flag occurrences with nothing in the hundredths place (0.0x).
#' @param quieter Logical. If TRUE, the functino will run a little quieter. Default = FALSE.
#'
#' @return Returns the input data frame with a new column, .rou, where FALSE indicates occurrences
#' that failed the test.
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#' beesRaw_out <- jbd_coordinates_precision(
#' data = BeeBDC::beesRaw,
#' lon = "decimalLongitude",
#' lat = "decimalLatitude",
#' # number of decimals to be tested
#' ndec = 2
#' )
#' table(beesRaw_out$.rou, useNA = "always")
jbd_coordinates_precision <-
function(data,
lat = "decimalLatitude",
lon = "decimalLongitude",
ndec = NULL,
quieter = FALSE) {
. <- .ndec_all <- NULL
#### 0.0 Prep ####
##### 0.1 errors ####
###### a. FATAL errors ####
if(is.null(ndec)){
stop(paste0(" - No ndec was provided. This is the minimum number of decimal places",
" that the coordinates should have to be considered valid"))
}
#### 1.0 Run function ####
##### 1.1 Prepare data ####
# Remove a .rou column if it already exists
data <- data %>%
dplyr::select(!tidyselect::any_of(".rou"))
##### 1.2 Tests ####
# Select the columns that you want
df <-
data %>%
dplyr::select({{ lon }}, {{ lat }}) %>%
as.data.frame()
# get a character vector of the length (number of decimal places) for each lat or lon
ndec_lat <- (df[, lat] %>%
as.character() %>%
stringr::str_split_fixed(., pattern = "[.]", n = 2))[, 2] %>%
stringr::str_length()
ndec_lon <- (df[, lon] %>%
as.character() %>%
stringr::str_split_fixed(., pattern = "[.]", n = 2))[, 2] %>%
stringr::str_length()
rm(df)
ndec_list <- as.list(ndec)
names(ndec_list) <- paste0(".", "ndec", ndec)
for (i in 1:length(ndec)) {
ndec_list[[i]] <- (ndec_lat >= ndec[i] | ndec_lon >= ndec[i])
}
ndec_list <- dplyr::bind_cols(ndec_list)
ndec_list$.ndec_all <- apply(ndec_list, 1, all) # all flagged as low decimal precision
ndec_list <-
ndec_list %>%
dplyr::select(.ndec_all) %>%
dplyr::rename(.rou = .ndec_all)
#### 2.0 User output ####
if(quieter == FALSE){
message("jbd_coordinates_precision:\nFlagged ",
format(sum(!ndec_list[".rou"]), big.mark = ","),
" records\nThe '.rou' column was added to the database.\n")}else{
# QUIETER message
message("jbd_coordinates_precision:\nRemoved ",
format(sum(!ndec_list[".rou"]), big.mark = ","),
" records.")
}
res <- dplyr::bind_cols(data, ndec_list)
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/jbd_coordinates_precision.R
|
#' Identify transposed geographic coordinates
#'
#' This function flags and corrects records when latitude and longitude appear
#' to be transposed.
#' This function will preferably use the countryCode column generated by
#' [bdc::bdc_country_standardized()].
#'
#' @family prefilter
#' @param data A data frame or tibble. Containing a unique identifier for each record,
#' geographical coordinates, and country names. Coordinates must be expressed
#' in decimal degrees and WGS84.
#' @param idcol A character string. The column name with a unique record identifier.
#' Default = "database_id".
#' @param sci_names A character string. The column name with species' scientific
#' names. Default = "scientificName".
#' @param lat A character string. The column name with latitudes. Coordinates must
#' be expressed in decimal degrees and WGS84. Default = "decimalLatitude".
#' @param lon A character string. The column name with longitudes. Coordinates must be
#' expressed in decimal degrees and WGS84. Default = "decimalLongitude".
#' @param country A character string. The column name with the country
#' assignment of each occurrence record. Default = "country".
#' @param countryCode A character string. The column name containing an ISO-2 country code for
#' each record.
#' @param border_buffer Numeric. Must have value greater than or equal to 0.
#' A distance in decimal degrees used to
#' created a buffer around each country. Records within a given country and at
#' a specified distance from the border will be not be corrected.
#' Default = 0.2 (~22 km at the equator).
#' @param save_outputs Logical. Indicates if a table containing transposed coordinates should be
#' saved for further inspection. Default = FALSE.
#' @param fileName A character string. The out file's name.
#' @param path A character string. A path as a character vector for where to create the directories
#' and save the figures. If
#' no path is provided (the default), the directories will be created using [here::here()].
#' @param scale Passed to rnaturalearth's ne_countries().
#' Scale of map to return, one of 110, 50, 10 or 'small', 'medium', 'large'. Default = "large".
#' @param mc.cores Numeric. If > 1, the jbd_correct_coordinates function will run in parallel
#' using mclapply using the number of cores specified. If = 1 then it will be run using a serial
#' loop. NOTE: Windows machines must use a value of 1 (see ?parallel::mclapply). Additionally,
#' be aware that each thread can use large chunks of memory.
#' Default = 1.#'
#' @details This test identifies transposed coordinates based on mismatches between the
#' country provided for a record and the record’s latitude and longitude coordinates. Transposed
#' coordinates often fall outside of the indicated country (i.e., in other
#' countries or in the sea). Different coordinate transformations are
#' performed to correct country/coordinates mismatches. Importantly, verbatim
#' coordinates are replaced by the corrected ones in the returned database. A
#' database containing verbatim and corrected coordinates is created in
#' "Output/Check/01_coordinates_transposed.csv" if save_outputs == TRUE. The
#' columns "country" and "countryCode" can be retrieved by using the function
#' [bdc::bdc_country_standardized].
#'
#' @return A tibble containing the column "coordinates_transposed" which indicates if
#' verbatim coordinates were not transposed (TRUE). Otherwise
#' records are flagged as (FALSE) and, in this case, verbatim coordinates are
#' replaced by corrected coordinates.
#'
#' @importFrom readr write_excel_csv
#' @importFrom dplyr tibble rename mutate select contains pull
#' @importFrom here here
#'
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#' \donttest{
#' database_id <- c(1, 2, 3, 4)
#' scientificName <- c(
#' "Rhinella major", "Scinax ruber",
#' "Siparuna guianensis", "Psychotria vellosiana"
#' )
#' decimalLatitude <- c(63.43333, -14.43333, -41.90000, -46.69778)
#' decimalLongitude <- c(-17.90000, -67.91667, -13.25000, -13.82444)
#' country <- c("BOLIVIA", "bolivia", "Brasil", "Brazil")
#'
#' x <- data.frame(
#' database_id, scientificName, decimalLatitude,
#' decimalLongitude, country
#' )
#'
#' # Get country codes
#' x <- bdc::bdc_country_standardized(data = x, country = "country")
#'
#' jbd_coordinates_transposed(
#' data = x,
#' idcol = "database_id",
#' sci_names = "scientificName",
#' lat = "decimalLatitude",
#' lon = "decimalLongitude",
#' country = "country_suggested",
#' countryCode = "countryCode",
#' border_buffer = 0.2,
#' save_outputs = FALSE,
#' scale = "medium"
#' )
#' }
#'
jbd_coordinates_transposed <- function(data,
idcol = "database_id",
sci_names = "scientificName",
lat = "decimalLatitude",
lon = "decimalLongitude",
country = "country",
countryCode = "countryCode",
border_buffer = 0.2,
save_outputs = FALSE,
fileName = NULL,
scale = "large",
path = NULL,
mc.cores = 1) {
decimalLatitude <- decimalLongitude <- database_id <- scientificName <- NULL
requireNamespace("dplyr")
suppressWarnings({
check_require_cran("rnaturalearth")
check_require_cran("readr")
})
# Ensure that working directories are maintain on exit from function
oldwd <- getwd() # code line i
on.exit(setwd(oldwd)) # code line i+1
# Copy original wd
OGwd <- getwd()
sf::sf_use_s2(TRUE)
data <- dplyr::tibble(data)
minimum_colnames <-
c(idcol, sci_names, lat, lon, country, countryCode)
if (length(minimum_colnames) < 6) {
stop("Fill all function arguments: idcol, sci_names, lon, lat, and
country")
}
if (!all(minimum_colnames %in% colnames(data))) {
stop(
"These columns names were not found in your database: ",
paste(minimum_colnames[!minimum_colnames %in% colnames(data)],
collapse = ", "),
call. = FALSE
)
}
# Temporarily change names of the collumn .summary to avoid error of duplicated coordinates
if(".summary" %in% names(data)){
w <- which(names(data) == ".summary")
names(data)[w] <- "temp_summary"
}
# Standardizing columns names
data <-
data %>%
dplyr::rename(
database_id = {{ idcol }},
decimalLatitude = {{ lat }},
decimalLongitude = {{ lon }},
scientificName = {{ sci_names }},
countryCode = {{ countryCode }}
)
# converts coordinates columns to numeric
data <-
data %>%
dplyr::mutate(
decimalLatitude = as.numeric(decimalLatitude),
decimalLongitude = as.numeric(decimalLongitude)
)
worldmap <- jbd_get_world_map(scale = scale) # get world map and country iso
# Correct latitude and longitude transposed
message("Correcting latitude and longitude transposed\n")
corrected_coordinates <-
jbd_correct_coordinates(
data = data,
x = "decimalLongitude",
y = "decimalLatitude",
sp = "scientificName",
idcol = idcol,
cntr_iso2 = "countryCode",
world_poly = worldmap,
world_poly_iso = "iso2c",
border_buffer = border_buffer,
mc.cores = mc.cores
)
if (!is.null(corrected_coordinates)) {
# Exports a table with verbatim and transposed xy
corrected_coordinates <-
corrected_coordinates %>%
dplyr::select(database_id, scientificName, dplyr::contains("decimal"))
if (save_outputs) {
jbd_create_dir(path = path)
setwd(path)
corrected_coordinates %>%
readr::write_excel_csv(
paste(path, fileName, sep = "/"),
append = TRUE)
message(
paste(
"\nCheck database containing coordinates corrected in:\nOutput/Check/01_coordinates_transposed.csv",
"\n", "This will be appended to any existing rows from THIS run."
)
)
}
# finding the position of records with lon/lat modified
w <-
which(data %>% dplyr::pull(database_id) %in% (corrected_coordinates %>% dplyr::pull(database_id)))
data[w, "decimalLatitude"] <-
corrected_coordinates[, "decimalLatitude_modified"]
data[w, "decimalLongitude"] <-
corrected_coordinates[, "decimalLongitude_modified"]
# Flags transposed coordinates
data$coordinates_transposed <- TRUE
data[w, "coordinates_transposed"] <- FALSE
# Return collumn .summary
if("temp_summary" %in% names(data)){
w <- which(names(data) == "temp_summary")
names(data)[w] <- ".summary"
}
message(
paste(
"\njbd_coordinates_transposed:\nCorrected",
format(sum(data$coordinates_transposed == FALSE, na.rm = TRUE), big.mark = ","),
"records.\nOne columns were added to the database.\n"
)
)
return(data)
} else{
# Return collumn .summary
if("temp_summary" %in% names(data)){
w <- which(names(data) == "temp_summary")
names(data)[w] <- ".summary"
}
message("No latitude and longitude were transposed\n")
return(data)
}
setwd(OGwd)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/jbd_coordinates_transposed.R
|
#' Internal function. Detects and corrects transposed geographic coordinates
#'
#' This functions detects mismatches between country names informed coordinates.
#' Once detects, transposed coordinates are corrected by the used of different
#' coordinates transformations by using the 'jbd_coord_trans' function.
#'
#' @param data data.frame. Containing an unique identifier for each records,
#' geographical coordinates, and country names. Coordinates must be expressed in
#' decimal degree and in WGS84.
#' @param x character string. The column name with longitude. Default =
#' "decimalLongitude".
#' @param y character string. The column name with latitude Default =
#' "decimalLatitude".
#' @param sp character string. The column name with species scientific name.
#' Default = "scientificName".
#' @param idcol idcol character string. The column name with an unique record
#' identifier. #' Default = "idcol".
#' @param cntr_iso2 character string. The column name with the country code
#' assignment of each record. Default = "country_code".
#' @param world_poly polygon. Borders of the world.
#' @param world_poly_iso charterer sting. Iso2 code column of country polygon
#' database
#' @param border_buffer numeric. A distance in decimal degrees used to created a
#' buffer around the country. Records within a given country and at a specified
#' distance from its coast will be not be corrected. Default = 0.2 (~20 km at
#' the equator).
#' @param mc.cores Numeric. If > 1, the jbd_correct_coordinates function will run in parallel
#' using mclapply using the number of cores specified. If = 1 then it will be run using a serial
#' loop. NOTE: Windows machines must use a value of 1 (see ?parallel::mclapply). Additionally,
#' be aware that each thread can use large chunks of memory.
#' Default = 1.
#'
#' @return Internal function
#'
#' @importFrom CoordinateCleaner cc_val clean_coordinates
#' @importFrom dplyr filter mutate as_tibble select all_of pull bind_rows distinct relocate left_join %>%
#'
#' @noRd
#'
#' @examples
#' \donttest{
#' # This is an internal function, that is exported for clarity and for users to potentially trace
#' errors.
#' }
jbd_correct_coordinates <-
function(data,
x,
y,
sp,
idcol,
cntr_iso2,
world_poly,
world_poly_iso,
border_buffer,
mc.cores = 1) {
. <- decimalLatitude <- decimalLongitude <- .summary <- iso2c <- jbd_coord_trans <- ':=' <- NULL
indexMatch <- countryCode <- countryMatch <- database_id <- NULL
#### 0.0 Perperation ####
##### 0.1 Simplify map ####
# Simplify the world map ONCE to be used later
simplePoly <- world_poly %>% sf::st_drop_geometry() %>%
dplyr::mutate(indexMatch = dplyr::row_number())
##### 0.2 Create function ####
# Create the coord_trans function for internal use
jbd_coord_trans <-
function(data) {
. <- ':=' <- indexMatch <- NULL
data <-
data %>% dplyr::select(
dplyr::all_of(x),
dplyr::all_of(y),
dplyr::all_of(cntr_iso2),
dplyr::all_of(idcol)
)
names(data)[names(data) == idcol] <- "idcol"
d1 <- data.frame(x = data[, x], y = -data[, y], idcol = data[, "idcol"])
d2 <- data.frame(x = -data[, x], y = data[, y], idcol = data[, "idcol"])
d3 <- data.frame(x = -data[, x], y = -data[, y], idcol = data[, "idcol"])
d4 <- data.frame(x = data[, y], y = data[, x], idcol = data[, "idcol"])
d5 <- data.frame(x = data[, y], y = -data[, x], idcol = data[, "idcol"])
d6 <- data.frame(x = -data[, y], y = data[, x], idcol = data[, "idcol"])
d7 <- data.frame(x = -data[, y], y = -data[, x], idcol = data[, "idcol"])
d.list <- list(d1, d2, d3, d4, d5, d6, d7)
rm(list = paste0("d", 1:7))
d.list <- lapply(d.list, function(x) {
colnames(x) <- c("x", "y", "idcol")
return(x)
})
over_list <- list()
for (d in 1:length(d.list)) {
# Check for coordinate validity first
caluse <- d.list[[d]] %>%
# Remove coordinates that dont land on [our] earth
dplyr::filter(!y > 90) %>% dplyr::filter(!y < -90) %>%
dplyr::filter(!x > 180) %>% dplyr::filter(!x < -180)
# IF The coordinates do land on [our] earth, then turn them into sf objects and
# check if they overlap with a country
if(nrow(caluse) > 0){
caluse <- caluse %>%
sf::st_as_sf(., coords = c("x", "y"), crs = sf::st_crs("WGS84")) %>%
sf::st_make_valid()
suppressWarnings({
overresult <- sf::st_intersects(caluse, world_poly) %>%
# return a tibble with the index of each match or NA where there was no match
dplyr::tibble(indexMatch = . ) %>%
# Convert to numeric
dplyr::mutate(indexMatch = indexMatch %>% as.numeric()) %>%
dplyr::left_join(simplePoly,
by = "indexMatch") %>%
# Add in the database_id
dplyr::bind_cols(caluse %>% sf::st_drop_geometry())
})}else{
overresult = tibble()
}
if(nrow(overresult) > 0){
colnames(d.list[[d]]) <-
c(paste0(x, "_modified"), paste0(y, "_modified"), "idcol")
over_list[[d]] <- dplyr::left_join(d.list[[d]], data, by = "idcol") %>%
dplyr::left_join(overresult, by = "idcol")
rm(caluse)
filt <-
which(over_list[[d]][cntr_iso2] == over_list[[d]][world_poly_iso])
}else{
filt = dplyr::tibble()
}
if (length(filt) > 0) {
over_list[[d]] <- over_list[[d]][filt,]
} else {
over_list[[d]] <- NULL
}
rm(list = c("overresult", "filt"))
}
rm(d.list)
non_empty_list_test <- !sapply(over_list <- over_list, is.null)
if (any(non_empty_list_test)) {
over_list <- over_list[non_empty_list_test]
over_list <- dplyr::bind_rows(over_list)
} else{
over_list <- dplyr::tibble(
decimalLongitude = double(),
decimalLatitude = double(),
countryCode = character(),
database_id = character()
)
}
# Return the database_id column to its correct name
colnames(over_list)[colnames(over_list) == "idcol"] = idcol
return(over_list)
}
#### 1.0 data prep ####
x_mod <- paste0(x, "_modified")
y_mod <- paste0(y, "_modified")
occ_country <- data %>% dplyr::filter(!is.na(data[[cntr_iso2]]))
#### 2.0 CoordinateCleaner ####
# Filter occurrences database to avoid error in clean_coordinates errors
suppressWarnings({
suppressMessages({
occ_country <-
occ_country %>%
CoordinateCleaner::cc_val(., lon = x, lat = y) %>%
dplyr::mutate(
decimalLatitude = as.numeric(decimalLatitude),
decimalLongitude = as.numeric(decimalLongitude)
)
})
})
# Detect records outside a country
# Convert to sf object
suppressWarnings({
countryTest <- occ_country %>%
sf::st_as_sf(coords = c(x, y), crs = sf::st_crs(world_poly)) %>%
# Perform intersect operation with world_poly
sf::st_intersects(., world_poly) %>%
# return a tibble with the index of each match or NA where there was no match
dplyr::tibble(indexMatch = . ) %>%
# Convert to numeric
dplyr::mutate(indexMatch = indexMatch %>% as.character() %>% as.numeric()) %>%
dplyr::left_join(simplePoly,
by = "indexMatch")
})# END suppressWarnings
# Join with the original dataset to find the database_ids of those occurrences that 1. do not
# match with their supplied country code and/or 2. fall in the ocean (are NA)
countryTest <- occ_country %>%
# Get a subset of columns
dplyr::select(tidyselect::all_of(c("database_id", cntr_iso2))) %>%
# Bind columns with the original data
dplyr::bind_cols(countryTest) %>%
# Make a column to test country matches by
dplyr::mutate(countryMatch = dplyr::if_else(countryCode == iso2c,
TRUE, FALSE)) %>%
# Filter to failed and ocean occurrences
dplyr::filter(countryMatch == FALSE | is.na(countryMatch))
# Separate those records outside their countries
occ_country <-
occ_country %>%
dplyr::filter(database_id %in% countryTest$database_id)
# now this database have all those records with potential error that be
# corrected
message(occ_country %>% nrow(), " occurrences will be tested")
# If occ_country have no data
if(nrow(occ_country)==0){
return(NULL)
}
# Split database by country code - cntr_iso2
occ_country <-
split(occ_country, occ_country[cntr_iso2])
# JBD edit — Remove empty elements from list before testing.
occ_country <- occ_country[sapply(occ_country, function(x) dim(x)[1]) > 0]
# jbd_coord_trans() function will try different coordinate transformations
# to correct georeferenced occurrences
coord_test <- list()
#### 3.0 Run function ####
##### 3.1 Run mclapply ####
# Run the actual function
coord_test <- parallel::mclapply(occ_country, jbd_coord_trans,
mc.cores = mc.cores
)
# elimination from the list those countries without correction
filt <- sapply(coord_test, function(x) nrow(x) > 0)
if(any(filt)){
coord_test <- coord_test[filt]
# Elimination of those records near to country border (to avoid flip
# coordinates or sign that fall too close to country border)
for (i in 1:length(coord_test)) {
n <-
coord_test[[i]] %>%
dplyr::select(dplyr::all_of(cntr_iso2)) %>%
unique() %>%
dplyr::pull()
# Select only the relevant polygon to buffer
my_country2 <- world_poly %>%
dplyr::filter(iso2c %in% n)
# Here filter polygon based on your country iso2c code
my_country2 <-
my_country2 %>%
dplyr::filter(iso2c %in% n) %>%
# JBD — France was failing to buffer using raster due to TopologyException. Use sf instead.
sf::st_as_sf() %>% sf::st_buffer(border_buffer)
# JBD — turned off for above reason
# 0.5 degree ~50km near to equator
# my_country2 <- raster::buffer(my_country, width = border_buffer)
# > my_country2
# class : SpatialPolygons
# features : 1
# extent : -180, 180, -90, -60.51621 (xmin, xmax, ymin, ymax)
# crs : NA
coord_sp <- sf::st_as_sf(coord_test[[i]] %>% dplyr::select({{ x }}, {{ y }}),
coords = c(x, y))
sf::st_crs(coord_sp) <- sf::st_crs(my_country2)
over_occ <- sf::st_join(coord_sp, my_country2) %>%
dplyr::pull(iso2c)
# Eliminate as corrected those records too close to country border
coord_test[[i]] <-
coord_test[[i]] %>% dplyr::filter(is.na(over_occ))
}
# Elimination of those records with more than two possible corrections
coord_test <-
dplyr::bind_rows(coord_test) %>%
dplyr::as_tibble() # binding dataframes allocated in the list in a single one
coord_test <-
coord_test[!duplicated(coord_test[idcol]), ] %>%
dplyr::relocate(dplyr::all_of(idcol), dplyr::all_of(x), dplyr::all_of(y))
# Merge coord_test with other columns of occurrence database
coord_test <-
dplyr::left_join(coord_test,
data %>% dplyr::select(-c({{ x }}, {{ y }}, {{ cntr_iso2 }})),
by = idcol
)
return(coord_test)
}else{
return(NULL)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/jbd_correct_coordinates.R
|
#' Internal function. Create directories for saving the outputs of the bdc package
#'
#' Creates directories for saving the outputs of bdc package, namely
#' checks, figures, reports, and databases.
#'
#' @param path Character. A path as a character vector for where to create the directories. If
#' no path is provided (the default), the directories will be created using [here::here()].
#'
#'
#' @importFrom here here
#'
#' @return None
#'
#' @details:
#' Function used to created four folder for saving results of some functions.
#' @noRd
#' @examples
#' \donttest{
#' jbd_create_dir(path = tempdir())
#' }
jbd_create_dir <- function(path = NULL) {
# Option 1 if no path is provided, create directories using here::here
if(is.null(path)){
dir.create(here::here("Output/Check"), recursive = TRUE)
dir.create(here::here("Output/Intermediate"), recursive = TRUE)
dir.create(here::here("Output/Report"), recursive = TRUE)
dir.create(here::here("Output/Figures"), recursive = TRUE)
} # END option 1
# Option 2 if a path is provided, create directories at that path
if(!is.null(path) & !file.exists(path)){
dir.create(paste0(path, "/Output/Check"), recursive = TRUE)
dir.create(paste0(path, "/Output/Intermediate"), recursive = TRUE)
dir.create(paste0(path, "/Output/Report"), recursive = TRUE)
dir.create(paste0(path, "/Output/Figures"), recursive = TRUE)
} # END option 2
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/jbd_create_dir.R
|
#' Create figures reporting the results of the bdc/BeeBDC packages
#'
#' Creates figures (i.e., bar plots, maps, and histograms) reporting the results
#' of data quality tests implemented the bdc and BeeBDC packages. Works like [bdc::bdc_create_figures()],
#' but it allows the user to specify a save path.
#'
#' @param data A data frame or tibble. Needs to contain the results of data quality tests; that
#' is, columns starting with ".".
#' @param path A character directory. The path to a directory in which to save the figures.
#' Default = OutPath_Figures.
#' @param workflow_step A character string. Name of the workflow step. Options
#' available are "prefilter", "space", and "time".
#' @param database_id A character string. The column name with a unique record
#' identifier. Default = "database_id".
#' @param save_figures Logical. Indicates if the figures should be saved for further inspection or
#' use. Default = FALSE.
#'
#' @details This function creates figures based on the results of data quality
#' tests. A pre-defined list of test names is used for creating
#' figures depending on the name of the workflow step informed. Figures are
#' saved in "Output/Figures" if save_figures = TRUE.
#'
#' @return List containing figures showing the results of data quality tests
#' implemented in one module of bdc/BeeBDC. When save_figures = TRUE, figures are
#' also saved locally in a .png format.
#'
#' @importFrom CoordinateCleaner cc_val
#' @importFrom readr read_csv
#' @importFrom dplyr summarise n pull mutate group_by intersect filter full_join
#' select mutate_if summarise_all rename
#' @importFrom ggplot2 theme_minimal theme element_text element_line
#' element_blank unit ggplot aes geom_col coord_flip labs geom_hline
#' scale_y_continuous ggsave theme_void geom_polygon geom_hex coord_quickmap
#' scale_fill_viridis_c geom_histogram
#' @importFrom here here
#' @importFrom stats reorder
#' @importFrom dplyr as_tibble
#' @importFrom tidyselect starts_with
#' @importFrom dplyr %>%
#' @export
#'
#' @examples
#' \donttest{
#' database_id <- c("GBIF_01", "GBIF_02", "GBIF_03", "FISH_04", "FISH_05")
#' lat <- c(-19.93580, -13.01667, -22.34161, -6.75000, -15.15806)
#' lon <- c(-40.60030, -39.60000, -49.61017, -35.63330, -39.52861)
#' .scientificName_emptys <- c(TRUE, TRUE, TRUE, FALSE, FALSE)
#' .coordinates_empty <- c(TRUE, TRUE, TRUE, TRUE, TRUE)
#' .invalid_basis_of_records <- c(TRUE, FALSE, TRUE, FALSE, TRUE)
#' .summary <- c(TRUE, FALSE, TRUE, FALSE, FALSE)
#'
#' x <- data.frame(
#' database_id,
#' lat,
#' lon,
#' .scientificName_emptys,
#' .coordinates_empty,
#' .invalid_basis_of_records,
#' .summary
#' )
#'
#'figures <-
#' jbd_create_figures(
#' data = x,
#' database_id = "database_id",
#' workflow_step = "prefilter",
#' save_figures = FALSE
#' )
#' }
jbd_create_figures <-
function(data,
path = OutPath_Figures,
database_id = "database_id",
workflow_step = NULL,
save_figures = FALSE) {
. <- .data <- n_flagged <- n_total <- freq <- NULL
. <- V1 <- Name <- freq <- year <- decimalLongitude <- NULL
decimalLatitude <- . <- long <- lat <- group <- `NA` <- .summary <- NULL
OutPath_Figures <- OutPath_Report <- NULL
if(is.null(path)){
warning("Please provide a path!")
}
suppressWarnings({
check_require_cran("cowplot")
check_require_cran("readr")
check_require_cran("ggspatial")
#check_require_cran("hexbin")
requireNamespace("bdc")
})
match.arg(arg = workflow_step,
choices = c("prefilter", "space", "time")
)
temp <- data %>% dplyr::select(tidyselect::starts_with("."))
convertNames <- dplyr::tibble(
# Space
.cap = "Records around country capital centroid" ,
.cen = "Records around country or province centroids",
.dbl = "Duplicated coordinates per species",
.equ = "Identical coordinates",
.otl = "Geographical outliers",
.gbf = "Records around the GBIF headquarters",
.inst = "Records around biodiversity institutions",
.rou = "Rounded (probably imprecise) coordinates",
.urb = "Records within urban areas",
.zer = "Plain zeros",
# The rest
.scientificName_empty = "Scientific name empty",
.coordinates_empty = "Coordinates empty",
.coordinates_outOfRange = "Coordinates out of range",
.invalid_basis_of_records = "Invalid basis of record",
.coordinates_country_inconsistent = "Coordinates inconsistent with country",
.uncer_term = "Uncertainty term in name",
.eventDate_empty = "Empty eventDate",
.year_outOfRange = "Year out of range",
.summary = "Summary",
summary_all_tests = "Summary of all tests"
)
if (all((colSums(temp, na.rm = TRUE) - nrow(temp)) == 0)) {
message("Figures were not created.\nNo records flagged as 'FALSE' in columns starting with '.'")
}
if (ncol(temp) == 0) {
message(
"Figures were not created.\nAt least one column 'starting with '.' containing results of data-quality tests must be provided"
)
}
# Formatting y axis of ggplot bar
fancy_scientific <- function(l) {
format(l, big.mark = ",", digits = 2, nsmall = 1)
}
# Total number of records
suppressMessages({
n_records <-
data %>%
dplyr::summarise(n = dplyr::n()) %>%
dplyr::pull(n)
# Total number of records per database
n_record_database <-
data %>%
dplyr::mutate(
database_id = gsub("[[:digit:]]+", "", database_id),
database_id = gsub("_", "", database_id)
) %>%
dplyr::group_by(database_id) %>%
dplyr::summarise(n_total = dplyr::n())
our_theme <-
ggplot2::theme_minimal() +
ggplot2::theme(axis.title = ggplot2::element_text(size = 18),
axis.text = ggplot2::element_text(size = 12),
panel.grid.major.x = ggplot2::element_line(color = "#cbcbcb"),
panel.grid.major.y = ggplot2::element_blank(),
plot.margin = ggplot2::unit(c(0.5, 0.5, 0.5, 0.5), "cm")
)
# prefilter
if (workflow_step == "prefilter") {
tests <-
c(
".scientificName_empty",
".coordinates_empty",
".coordinates_outOfRange",
".invalid_basis_of_records",
".coordinates_country_inconsistent",
".summary"
)
names_tab <- names(data)
col_to_tests <- dplyr::intersect(tests, names_tab)
if (file.exists("Output/Check/01_coordinates_transposed.csv")) {
col_to_tests <- c(col_to_tests, "coordinates_transposed")
}
}
# space
if (workflow_step == "space") {
tests <-
c(
".equ",
".zer",
".cap",
".cen",
".otl",
".gbf",
".inst",
".dpl",
".rou",
".urb",
".summary"
)
names_tab <- names(data)
col_to_tests <- dplyr::intersect(tests, names_tab)
}
# time
if (workflow_step == "time") {
tests <-
c(
".eventDate_empty",
".year_outOfRange",
".summary",
"summary_all_tests"
)
names_tab <- names(data)
col_to_tests <- dplyr::intersect(tests, names_tab)
}
# function to create bar plots for each dataset separately
create_barplot_database <-
function(data, column_to_map, workflow_step = workflow_step) {
temp <-
data %>%
dplyr::mutate(
database_id = gsub("[[:digit:]]+", "", database_id),
database_id = gsub("_", "", database_id)
) %>%
dplyr::filter(., .data[[column_to_map]] == FALSE) %>%
dplyr::group_by(database_id, .data[[column_to_map]]) %>%
dplyr::summarise(n_flagged = dplyr::n()) %>%
dplyr::full_join(., n_record_database, by = "database_id") %>%
dplyr::mutate(freq = round(n_flagged / n_total, 5) * 100)
temp[is.na(temp)] <- 0
b <-
ggplot2::ggplot(temp, ggplot2::aes(x = stats::reorder(database_id, -freq), y = freq)) +
ggplot2::geom_col(colour = "white", fill = "royalblue") +
ggplot2::coord_flip() +
our_theme +
ggplot2::labs(x = "Dataset", y = "% of records flagged",
title = convertNames[column_to_map == convertNames]) +
ggplot2::geom_hline(
yintercept = 0,
linewidth = 1,
colour = "#333333"
) +
ggplot2::scale_y_continuous(expand = c(0, 0),
labels = fancy_scientific)
}
# function to create barplots considering all datasets together
create_barplot_all_tests <-
function(data,
column_to_map = "summary_all_tests",
workflow_step = workflow_step) {
temp <-
data %>%
dplyr::select(tidyselect::starts_with(".")) %>%
dplyr::mutate_if(is.character, ~ as.logical(as.character(.))) %>%
dplyr::summarise_all(., .funs = sum) %>%
t() %>%
dplyr::as_tibble(rownames = "NA", .name_repair = "unique") %>%
setNames(c('NA', "V1")) %>%
dplyr::mutate(V1 = nrow(data) - V1) %>%
dplyr::mutate(
freq =
round((V1 / n_records * 100), 2)
) %>%
dplyr::rename(
Name = `NA`,
n_flagged = V1
)
temp[is.na(temp)] <- 0
if (all(temp$freq == 0)) {
temp$freq <- 0.000001
} else {
gg <-
ggplot2::ggplot(temp, ggplot2::aes(
x = stats::reorder(Name, -freq),
y = freq
))
}
b <-
gg +
ggplot2::geom_col(colour = "white", fill = "royalblue") +
ggplot2::coord_flip() +
our_theme +
ggplot2::labs(x = "Tests", y = "% of records flagged",
title = column_to_map) +
ggplot2::geom_hline(
yintercept = 0,
linewidth = 1,
colour = "#333333"
) +
ggplot2::scale_y_continuous(expand = c(0, 0),
labels = fancy_scientific)
}
# Names of columns available for creating barplot
bar <- c(
".scientificName_empty",
".coordinates_empty",
".coordinates_outOfRange",
".invalid_basis_of_records",
".coordinates_country_inconsistent",
".uncer_term",
".rou",
".equ",
".zer",
".cap",
".cen",
".otl",
".gbf",
".inst",
".urb",
".dpl",
".eventDate_empty",
".year_outOfRange",
".summary",
"summary_all_tests"
)
# Names of columns available for creating maps
maps <-
c(
".coordinates_country_inconsistent",
".equ",
".cap",
".cen",
".otl",
".inst",
".urb",
".dpl",
".rou"
)
# Names of column available for creating histogram
hist <- c("year")
# Find which names were provided
w_bar <- dplyr::intersect(col_to_tests, bar)
w_maps <- dplyr::intersect(col_to_tests, maps)
w_tranposed <- dplyr::intersect(col_to_tests, "coordinates_transposed")
w_hist <- hist
# List for saving figures
res <- list()
# Create bar plots
if (length(w_bar) == 0 & workflow_step %in% c("prefilter", "space")) {
message("At least one column 'starting with '.' must be provided")
}
if (length(w_bar) != 0) {
w <- which(colSums(!data[{{ w_bar }}], na.rm = TRUE) == 0)
if (length(w) != 0) {
message(
"No records flagged for the following tests:\n",
paste(w_bar[w], collapse = " ")
)
w_bar <- w_bar[-w]
}
if (nrow(n_record_database) != 1 & length(w_bar) != 0) {
for (i in 1:length(w_bar)) {
bp <-
create_barplot_database(
data = data,
column_to_map = w_bar[i],
workflow_step = workflow_step
)
bp_list <- list(bp)
names(bp_list) <- w_bar[i]
res <- c(res, bp_list)
}
}
# summary of all tests
if (nrow(n_record_database) != 1 & length(w_bar) != 0) {
bp_all <-
create_barplot_all_tests(
data = data,
column_to_map = "summary_all_tests",
workflow_step = workflow_step)
bp_all_list <- list(bp_all)
names(bp_all_list) <- "summary_all_tests"
res <- c(res, bp_all_list)
}
}
# Create maps of invalid vs valid records
if (length(w_maps) == 0 & workflow_step %in% c("prefilter", "space")) {
message("At least one of the following columns must be provided for creating maps\n", paste0(maps, sep = " "))
}
if (length(w_maps) != 0) {
w <- which(colSums(!data[{{ w_maps }}]) == 0)
if (length(w) != 0) {
message(
"No records flagged for the following tests:\n",
paste(w_maps[w], collapse = " ")
)
w_maps <- w_maps[-w]
}
# Worldmap
m <- rnaturalearth::ne_countries(returnclass = "sf")
# new theme
our_theme2 <-
ggplot2::theme_classic() +
ggplot2::theme(panel.border = ggplot2::element_blank(),
panel.grid.major = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_blank(),
)
for (i in 1:length(w_maps)) {
d <-
CoordinateCleaner::cc_val(
data,
lon = "decimalLongitude",
lat = "decimalLatitude",
verbose = FALSE,
value = "clean"
)
d <-
d %>%
dplyr::select(tidyselect::all_of(w_maps[i]), decimalLongitude, decimalLatitude) %>%
dplyr::filter(.data[[w_maps[i]]] == FALSE)
if (nrow(d) > 0){
p <-
ggplot2::ggplot() +
ggplot2::geom_sf(
data = m,
ggplot2::aes(),
fill = "gray75", colour = "gray88"
) +
ggplot2::coord_sf() +
ggplot2::geom_hex(
data = d,
ggplot2::aes(x = decimalLongitude, y = decimalLatitude),
binwidth = 3
#bins = 75
) +
#ggplot2::coord_quickmap() +
ggplot2::theme_void() +
ggplot2::labs(fill = "# of Records",
title = convertNames[w_maps[i]][[1]]) +
ggplot2::scale_fill_viridis_c() +
our_theme2
p_list <- list(p)
names(p_list) <- w_maps[i]
res <- c(res, p_list)
}
}
}
# Create maps of transposed and corrected coordinates
if (length(w_tranposed) == 0 & workflow_step == "prefilter") {
message("file 'Output/Check/01_coordinates_transposed.csv' not found. Maps showing the results of bdc_coordinates_transposed test will not be created")
}
if (length(w_tranposed) != 0) {
temp <- readr::read_csv("Output/Check/01_coordinates_transposed.csv")
p1 <-
bdc::bdc_quickmap(
data = temp,
lon = "decimalLongitude",
lat = "decimalLatitude",
col_to_map = "#EC364F",
size = 0.7
)
p2 <-
bdc::bdc_quickmap(
data = temp,
lon = "decimalLongitude_modified",
lat = "decimalLatitude_modified",
col_to_map = "royalblue",
size = 0.7
)
pt <- cowplot::plot_grid(p1, p2, nrow = 2)
pt_list <- list(pt)
names(pt_list) <- "coordinates_transposed"
res <- c(res, pt_list)
}
if (length(w_hist) == 0 & workflow_step == "time") {
message("Column 'year' not found")
}
if (length(w_hist) != 0 & workflow_step == "time") {
data <-
data %>%
dplyr::filter(.summary == TRUE)
min_year <- min(data$year, na.rm = T)
max_year <- max(data$year, na.rm = T)
t <- data %>%
ggplot2::ggplot(ggplot2::aes(x = year)) +
ggplot2::geom_histogram(
colour = "white",
fill = "royalblue", position = "identity", bins = 80
) +
our_theme +
ggplot2::labs(x = "Year", y = "Number of records",
title = convertNames[w_bar[i]][[1]]) +
ggplot2::geom_hline(
yintercept = 0,
linewidth = 1,
colour = "#333333"
) +
ggplot2::scale_y_continuous(labels = fancy_scientific)
t_list <- list(t)
names(t_list) <- "year"
res <- c(res, t_list)
}
}) # suppresswarning
#### start IF ####
if (save_figures == TRUE){
for (i in seq_along(res)) {
column_to_map <- names(res)[i]
ggplot2::ggsave(
paste(path,
"/",
workflow_step,
"_",
column_to_map,
"_",
"BAR",
".png",
sep = ""
),
res[[i]],
dpi = 300,
width = 6,
height = 3,
units = "cm",
scale = 5
)
}
message("Check figures in ", paste(path, sep = "/"))
}
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/jbd_create_figures.R
|
#' Internal function. Get a world map with country names and iso code
#'
#' This is a helper function used to obtain names, iso code, and the limits
#' (polygon) of world countries. Data from the package 'rnaturalearth'. The prefix, jbd_ is meant
#' to distinguish this function from the original bdc::bdc_get_world_map().
#'
#' @param scale Passed to rnaturalearth's ne_countries().
#' Scale of map to return, one of 110, 50, 10 or 'small', 'medium', 'large'. Default = "large".
#'
#' @noRd
#' @importFrom dplyr %>%
#'
#' @examples
#' \donttest{
#' # This is an internal function.
#' worldmap <- jbd_get_world_map(scale = "large")
#' }
jbd_get_world_map <- function(scale = "large") {
name_en <- iso_n3 <- iso2c <- iso3c <- NULL
check_require_cran("rnaturalearth")
# check_require_github("ropensci/rnaturalearthdata")
# loadNamespace("rnaturalearthdata")
suppressWarnings({
worldmap <- rnaturalearth::ne_countries(scale = scale, returnclass = "sf")
# For large scales
if(scale %in% c("large", "10")){
worldmap <- worldmap %>%
dplyr::mutate(iso2c = countrycode::countrycode(worldmap$name_en,
origin = "country.name.en",
destination = "iso2c"),
iso3c = countrycode::countrycode(worldmap$name_en,
origin = "country.name.en",
destination = "iso3c")
) %>% # END mutate
dplyr::select(name_en, tidyselect::starts_with("iso"))
}else{ # For other scales...
# Add some iso code to some countries polygons
# Also, add the name column to be used
worldmap <- worldmap %>%
dplyr::mutate(name_en = countrycode::countrycode(iso_n3 %>% as.numeric(),
origin = "iso3n",
destination = "country.name.en"),
iso2c = countrycode::countrycode(worldmap$iso_n3 %>% as.numeric(),
origin = "iso3n",
destination = "iso2c"),
iso3c = countrycode::countrycode(worldmap$iso_n3 %>% as.numeric(),
origin = "iso3n",
destination = "iso3c")
) %>%# END mutate
dplyr::select(name_en, tidyselect::starts_with("iso"))
}# END else
is.na(worldmap) %>% colSums() # number of polygons without isocode
worldmap <- worldmap %>%
dplyr::select(iso2c, iso3c) %>%
sf::st_make_valid()
})
return(worldmap)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/jbd_get_world_map.R
|
# This function was written on the 25th of February to find manually identified outliers in
# bee data
# For questions, contact James B Dorey at jbdorey[at]me.com
#' Finds outliers, and their duplicates, as determined by experts
#'
#' Uses expert-identified outliers with source spreadsheets that may be edited by users. The function
#' will also use the duplicates file made using [BeeBDC::dupeSummary()] to identify duplicates of the
#' expert-identified outliers and flag those as well.
#' The function will add a flagging column called `.expertOutlier` where records that are FALSE are
#' the expert outliers.
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param DataPath A character path to the directory that contains the outlier spreadsheets.
#' @param PaigeOutliersName A character patch. Should lead to outlier spreadsheet from Paige Chesshire (csv file).
#' @param newOutliersName A character path. Should lead to appropriate outlier spreadsheet (xlsx file).
#' @param ColombiaOutliers_all A character path. Should lead to spreadsheet of bee outliers from Colombia (csv file).
#' @param duplicates A data frame or tibble. The duplicate file produced by [BeeBDC::dupeSummary()].
#' @param NearTRUE Optional. A character file name to the csv file. If you want to remove expert
#' outliers that are too close to TRUE points, use the name of the NearTRUE.csv.
#' Note: This implementation is only basic for now unless there is a greater need in the future.
#' @param NearTRUE_threshold Numeric. The threshold (in km) for the distance to TRUE points to
#' keep expert outliers.
#'
#' @return Returns the data with a new column, `.expertOutlier` where records that are FALSE are
#' the expert outliers.
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#' \dontrun{
#' # Read example data
#' data(beesFlagged)
#' # Read in the most-recent duplicates file as well
#' if(!exists("duplicates")){
#' duplicates <- fileFinder(path = DataPath,
#' fileName = "duplicateRun_") %>%
#' readr::read_csv()}
#' # identify the outliers and get a list of their database_ids
#' beesFlagged_out <- manualOutlierFindeR(
#' data = beesFlagged,
#' DataPath = DataPath,
#' PaigeOutliersName = "removedBecauseDeterminedOutlier.csv",
#' newOutliersName = "^All_outliers_ANB_14March.xlsx",
#' ColombiaOutliers_all = "All_Colombian_OutlierIDs.csv",
#' duplicates = duplicates)
#' }
#'
manualOutlierFindeR <- function(
data = NULL,
DataPath = NULL,
PaigeOutliersName = "removedBecauseDeterminedOutlier.csv",
newOutliersName = "All_outliers_ANB.xlsx",
ColombiaOutliers_all = "All_Colombian_OutlierIDs.csv",
duplicates = NULL,
NearTRUE = NULL,
NearTRUE_threshold = 5
){
# locally bind variables to the function
OutPath_Report <- eventDate <- near_truepoints_KM <- database_id <- occurrenceID <- NULL
institutionCode <- database_id_keep <- catalogNumber <- .expertOutlier <- . <- NULL
#### 0.0 Prep ####
##### 0.1 Errors ####
###### a. FATAL errors ####
if(is.null(data)){
stop(paste0(" - No data was given. Please specify the occurrence data."))
}
if(is.null(DataPath)){
stop(paste0(" - No DataPath was given. Please specify the directory that contains the outliers."))
}
if(is.null(PaigeOutliersName)){
stop(paste0(" - No PaigeOutliersName was given. Please specify the outliers' file name."))
}
if(is.null(newOutliersName)){
stop(paste0(" - No newOutliersName was given. Please specify the outliers' file name."))
}
if(is.null(duplicates)){
stop(paste0(" - No duplicates was given. Please provide the duplicates dataset as generated by jbd_dupeSummary."))
}
###
#### 1.0 Data prep ####
##### 1.1 Find data ####
writeLines(" - Looking for the datasets...")
###### a. Paige outliers ####
# Find the outliers from chesshire et al. 2023
PaigeOutliers <- fileFinder(path = DataPath,
fileName = PaigeOutliersName) %>%
readr::read_csv( col_types = readr::cols(.default = "c")) %>%
suppressWarnings()
###### b. new outliers ####
# Find the new outliers from the three sheets concatenates by Angela
outliersAll <- fileFinder(path = DataPath,
fileName = newOutliersName) %>%
openxlsx::read.xlsx("Outliers_FromCanadaToPanama_ANB") %>%
dplyr::mutate(dplyr::across(tidyselect::all_of(colnames(.)), as.character)) %>%
dplyr::bind_rows(fileFinder(path = DataPath,
fileName = newOutliersName) %>%
openxlsx::read.xlsx("Tracys_outliers") %>%
dplyr::mutate(dplyr::across(tidyselect::all_of(colnames(.)), as.character))
) %>%
dplyr::bind_rows(fileFinder(path = DataPath,
fileName = newOutliersName) %>%
openxlsx::read.xlsx("Colombian_outliers") %>%
dplyr::mutate(dplyr::across(tidyselect::all_of(colnames(.)), as.character))
) %>%
dplyr::bind_rows(fileFinder(path = DataPath,
fileName = newOutliersName) %>%
openxlsx::read.xlsx("Outliers_SppInStatus3") %>%
dplyr::mutate(dplyr::across(tidyselect::all_of(colnames(.)), as.character))
) %>%
readr::write_excel_csv(paste(tempdir(), "newOutliers.csv", sep = "/"))
# Read back in with the correct column classes
outliersAll <- fileFinder(path = tempdir(),
fileName = "newOutliers.csv") %>%
readr::read_csv(col_types = readr::cols(.default = "c"), lazy = FALSE) %>%
dplyr::mutate(eventDate = eventDate %>%
lubridate::ymd_hms(truncated = 5)) %>%
suppressWarnings()
###### c. Colombia ####
ColombiaOutliers <- fileFinder(path = DataPath,
fileName = ColombiaOutliers_all) %>%
readr::read_csv( col_types = readr::cols(.default = "c")) %>%
suppressWarnings()
###### d. remove NearTRUE ####
# If user provies a NearTRUE input
if(!is.null(NearTRUE)){
# Find and read the csv
NearTRUE_data <- fileFinder(path = DataPath,
fileName = NearTRUE) %>%
readr::read_csv() %>%
dplyr::filter(near_truepoints_KM >= NearTRUE_threshold)
# Remove those below a threshold in from NearTRUE in outliersAll
outliersAll <- outliersAll %>%
dplyr::filter(!database_id %in% NearTRUE_data$database_id)
# Remove those below a threshold in from NearTRUE in ColombiaOutliers
ColombiaOutliers <- ColombiaOutliers %>%
dplyr::filter(!database_id %in% NearTRUE_data$database_id)
}
###### e. eventDate ####
# format data eventDate
data <- data %>%
dplyr::mutate(eventDate = eventDate %>%
lubridate::ymd_hms(truncated = 5))
##### 1.2 Process Paige ####
writeLines(" - Processing the Paige outliers...")
# Find PaigeOutliers in the occurrence data by occurrenceID and institutionCode
Outl_occID <- data %>%
tidyr::drop_na(occurrenceID) %>%
dplyr::filter(occurrenceID %in% PaigeOutliers$occurrenceID &
institutionCode %in% PaigeOutliers$institutionCode)
# Find PaigeOutliers by occurrenceID and institutionCode
Outliers_matched <- data %>%
# Remove matched IDs
dplyr::filter(!occurrenceID %in% Outl_occID$occurrenceID) %>%
tidyr::drop_na(catalogNumber, institutionCode) %>%
dplyr::filter(catalogNumber %in% PaigeOutliers$catalogNumber &
institutionCode %in% PaigeOutliers$institutionCode) %>%
# Re-bind the outlier matches
dplyr::bind_rows(Outl_occID)
# Combine the Paige and new outliers
outliersAll <- outliersAll %>%
# Convert to the correct column types
readr::type_convert(col_types = ColTypeR()) %>%
dplyr::bind_rows(Outliers_matched)
#### 2.0 Find outlier duplicates ####
##### 2.1 Find duplicates ####
writeLines(" - Looking for duplicates of the outliers...")
# Get a list of the outliers and their duplicates
outlierDuplicates <- duplicates %>%
dplyr::filter(database_id %in% outliersAll$database_id |
database_id_keep %in% outliersAll$database_id)
duplicateList <- c(outlierDuplicates %>% dplyr::pull(database_id),
outlierDuplicates %>% dplyr::pull(database_id_keep)) %>% unique()
##### 2.2 Combine duplicates ####
# Get a list of all outliers and their duplicates - database_id
outList <- c(outliersAll %>% dplyr::pull(database_id), duplicateList,
ColombiaOutliers %>% pull(database_id)) %>% unique()
#### 3.0 Flag records ####
# Find the occurrences that did not match
data <- data %>%
# Add the .expertOutlier columns as TRUE (not flagged)
dplyr::mutate(.expertOutlier = dplyr::if_else(
database_id %in% outList,
FALSE, TRUE))
# Return user output
message(
paste(
"\\manualOutlierFindeR:\n",
"Flagged",
format(sum(data$.expertOutlier == FALSE, na.rm = TRUE), big.mark = ","),
"expert-identified outliers:\n",
"The column '.expertOutlier' was added to the database.\n"
)
)
# Return data
return(data)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/manualOutlierFindeR.R
|
###### 0.2.1 NS func. ####
#' @importFrom dplyr %>%
# This function splits up the input name into its component elements
nameSplitR <- function(NameInput,
Authority_patterns = NULL){
requireNamespace("mgsub")
if(is.null(Authority_patterns)){
# split up the authority from the species name based on regular expressions
# Help can be found testing regex here: https://www.regextester.com/95629
Authority_patterns <- paste0("([a-zA-Z.\\-\\_]+,)", "|", # Find words ending in comma
"[a-zA-Z0-9.\\-\\_]+ and ","|", # Or find words before " and "
"\\s[:upper:][a-z]+$", "|", # Find a space then an upper-case letter (i.e., an author name after the species name)
# and at the end of the string
"\\s[:upper:][a-z]+\\s\\&", "|", # Find uppercase followed by "&" like "Smith &..."
"\\s[:upper:][a-z]+[A-Z][a-z]+\\s\\&", "|", # As above, but including multiple capitals, like "LaBerge &..."
"\\s[:upper:][a-z]+\\s[0-9]{4}", "|", # Find Author followed by no comma, space, and a year like " Cresson 1863"
"\\s[:upper:]\\'[A-Z]", "|", # Deal with names like "O'Brien"
"(\\([A-Z][a-z]+)(?=\\s)", "|", #, "|", # Find a capitalized word (with or without a bracket) - \\([A-Z][a-z]+) - that is followed by a space (not a bracket) - (?=\\s)
"(\\([A-Z]{1,2}[\\.]?)(?=\\s)", "|", # Find those few authorities that start with multiple capital letters - (SS Saunders, 1850), or with initials - (W. F. Kirby, 1900)
"(\\([A-Z][a-z\\u016f\\u00c0-\\u017e]+\\-[A-Z]+)(?=[\\s\\,])","|", # Match as above, but with special characters
"(\\([A-Z][a-z\\u016f\\u00c0-\\u017e]+)(?=[\\s\\,])","|", # Match as above, but with special characters
" \\(?de | \\(?van der | \\?van ", "|", # Finds the prefixs for last names "de Villers", "van der Zanden" etc.
"(\\([A-Z][a-z\\u016f\\u00c0-\\u017e]+\\-[A-Z][a-z\\u016f\\u00c0-\\u017e]+)(?=[\\s\\,])", "|", # Matches European/African characters with a hyphen between
"( not_\\(Cockerell)|\\u016fozi\\u016f|auct \\,| sensu auct not| 1914| sensu auct") # Find other haphazard matches
}
# Split the name based on Auth. Patterns
SpSplit <- stringr::str_split(NameInput, Authority_patterns, simplify = TRUE) %>%
mgsub::mgsub( c(" \\($", " $"), c("","")) # Get only the species name in ELEMENT ONE
AuthSplit <- substr( NameInput[1], # using the raw NameInput
nchar(SpSplit[[1]])+2, nchar(NameInput[1])) # find authority by removing name
##### subgenus test #####
# Test for presence of subgenus
if( grepl( "\\)",SpSplit[1], fixed = FALSE) == TRUE &
!grepl( "\\)\\s\\(", SpSplit[1], fixed = FALSE)|
grepl( "\\s[A-Z][a-z]", SpSplit[1], fixed = FALSE) == TRUE &
!grepl( "\\)\\s\\(", SpSplit[1], fixed = FALSE)){ # Check to see if there is a subgenus using the presence of a a bracket
# if there IS a subgenus present
# SPLIT into individual words (by any white space)
SynCols_split <- stringr::str_split(SpSplit[1],"\\s", simplify = TRUE) # Express as matrix
Gen_Name <- SynCols_split[1] # Get genus
SubGen_Name <- mgsub::mgsub(SynCols_split[2], c("\\)", "\\("), c("",""),) # Get subgenus and remove brackets from the subgenus column
SpName <- SynCols_split[3] # Get species
SubSpName <- ifelse(length(SynCols_split)>3, paste(c(SynCols_split[4:length(SynCols_split)]),
sep=" ",collapse=" "), "NA") # Get subspecies, if present
}else{ # if there is NOT a subgenus present
# SPLIT into individual words (by any white space)
SynCols_split <- stringr::str_split(SpSplit[1],"\\s", simplify = TRUE) # Express as matrix
Gen_Name <- SynCols_split[1] # Get genus
SubGen_Name <- "NA" # NO subgenus
SpName <- SynCols_split[2] # Get species
SubSpName <- ifelse(length(SynCols_split)>2, paste(c(SynCols_split[3:length(SynCols_split)]),
sep=" ",collapse=" "), "NA") # Get subspecies, if present
} # END else
##### flag test ####
flagTest <- sub( "(.*?[A-Za-z\\s]?){0,1}([\\(0-9\\)]{4}\\,?)|^[\\w]+", "", AuthSplit) %>% # Extract the text after the year in authorship
trimws( which = "left" , whitespace = "[\\, \\] \\) ]") # Trim some special characters from the FRONT of the string
if(flagTest != ""){ # IF there are flags present
AuthSplit2 <- substr(AuthSplit, # using the raw AuthSplit
start = 1, stop = (nchar(AuthSplit) - nchar(flagTest))) %>% # extract just the authority by removing the flag
trimws( which = "right" , whitespace = "[\\, \\] \\) ]")
Flags <- substr(AuthSplit,
start = (nchar(AuthSplit2)+1), stop = (nchar(AuthSplit))) %>% # extract just the FLAG by removing the author
trimws( which = "left" , whitespace = "[\\, \\] \\) ]")
}else{ # END flag test - IF
AuthSplit2 <- AuthSplit
Flags <- ""
} # END flag test - ELSE
# Return values of interest
AuthSplitOut <- list("flags" = Flags, "validName" = NameInput,
"canonical" = paste( Gen_Name, "(", SubGen_Name , ")", SpName, SubSpName, sep = " ") %>%
mgsub::mgsub(c("NA","\\( "," \\)"),c("","\\(","\\)") ) %>%
mgsub::mgsub(c(" ","\\(\\)"),c(" ","") ), # Formerly in full - SpSplit[1]
"genus" = Gen_Name,
"subgenus" = SubGen_Name, "species" = SpName, "infraspecies" = SubSpName,
"authorship" = AuthSplit2)
} # END the NameAuth_Splitter FUNCTION
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/nameSplitR.R
|
# This function was written by James B Dorey on the 29th of September 2022
# Its purpose is to visualise all flags for each dataSource (simplified to the text before the
# first underscore)
# Please contact jbdorey[at]me.com for help
#' Generate a plot summarising flagged data
#'
#' Creates a compound bar plot that shows the proportion of records that pass or fail each flag (rows)
#' and for each data source (columns). The function can also optionally return a point map for
#' a user-specified species when plotMap = TRUE. This function requires that your dataset has been
#' run through some filtering functions - so that is can display logical columns starting with
#' ".".
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param flagColours A character vector. Colours in order of pass (TRUE), fail (FALSE), and NA.
#' Default = c("#127852", "#A7002D", "#BDBABB").
#' @param fileName Character. The name of the file to be saved, ending in ".pdf".
#' If saving as a different file type, change file type suffix - See `device`.
#' @param outPath A character path. The path to the directory in which the figure will be saved.
#' Default = OutPath_Figures.
#' @param width Numeric. The width of the output figure in user-defined units Default = 15.
#' @param height Numeric. The height of the output figure in user-defined units Default = 9.
#' @param units Character. The units for the figure width and height passed to [ggplot2::ggsave()]
#' ("in", "cm", "mm", or "px"). Default = "in".
#' @param dpi Numeric. Passed to [ggplot2::ggsave()]. Plot resolution. Also accepts a string input: "retina" (320), "print" (300), or
#' "screen" (72). Applies only to raster output types. Default = 300.
#' @param bg Character. Passed to [ggplot2::ggsave()]. Background colour. If NULL, uses the plot.background fill value from the plot theme.
#' Default = "white."
#' @param device Character. Passed to [ggplot2::ggsave()]. Device to use. Can either be a device function (e.g. png), or one of "eps", "ps", "tex" (pictex), "pdf", "jpeg", "tiff", "png", "bmp", "svg" or "wmf" (windows only).
#' Default = "pdf". If not using default, change file name suffix in fileName argument.
#' @param speciesName Optional. Character. A species name, as it occurs in the user-input nameColumn.
#' If provided, the data will be filtered to this species for the plot.
#' @param nameColumn Optional. Character. If speciesName is not NULL, enter the column to look
#' for the species in. A User might realise that, combined with speciesName, figures can be made for
#' a variety of factors.
#' @param saveFiltered Optional. Logical. If TRUE, the filtered data will be saved to the computer
#' as a .csv file.
#' @param plotMap Logical. If TRUE, the function will produce a point map. Tested for use with one
#' species at a time; i.e., with speciesName is not NULL.
#' @param filterColumn Optional. The flag column to display on the map. Default = .summary.
#' @param mapAlpha Optional. Numeric. The opacity for the points on the map.
#' @param xbuffer Optional. Numeric vector. A buffer in degrees of the amount to increase the
#' min and max bounds along the
#' x-axis. This may require some experimentation, keeping in mind
#' the negative and positive directionality of hemispheres. Default = c(0,0).
#' @param ybuffer Optional. Numeric vector. A buffer in degrees of the amount to increase the
#' min and max bounds along the y-axis. This may require some experimentation, keeping in mind
#' the negative and positive directionality of hemispheres. Default = c(0,0).
#' @param ptSize Optional. Numeric. The size of the points as passed to ggplot2. Default = 1.
#' @param saveTable Optional. Logical. If TRUE, the function will save the data used to produce the
#' compound bar plot.
#' @param jitterValue Optional. Numeric. The value to jitter points by in the map in decimal degrees.
#' @param returnPlot Logical. If TRUE, return the plot to the environment. Default = FALSE.
#' @param ... Optional. Extra variables to be fed into [forcats::fct_recode()] to change names on plot.
#' For example... 'B. Mont.' = "BMont", 'B. Minkley' = "BMin", Ecd = "Ecd", Gaiarsa = "Gai"
#'
#' @return Exports a compound bar plot that summarises all flag columns. Optionally can also return
#' a point map for a particular species in tandem with the summary plot.
#' @export
#'
#' @importFrom dplyr across desc %>%
#' @importFrom ggplot2 geom_sf geom_point geom_jitter scale_color_manual coord_sf
#' element_rect scale_fill_viridis_d
#' xlab ylab ggtitle
#' @importFrom ggspatial north_arrow_fancy_orienteering annotation_north_arrow
#' @importFrom grDevices gray
#'
#' @examples
#' # import data
#' data(beesFlagged)
#' OutPath_Figures <- tempdir()
#' # Visualise all flags for each dataSource (simplified to the text before the first underscore)
#' plotFlagSummary(
#' data = beesFlagged,
#' # Colours in order of pass (TRUE), fail (FALSE), and NA
#' flagColours = c("#127852", "#A7002D", "#BDBABB"),
#' fileName = paste0("FlagsPlot_TEST_", Sys.Date(),".pdf"),
#' outPath = OutPath_Figures,
#' width = 15, height = 9,
#' # OPTIONAL:
#' #\ # # Filter to species
#' #\ speciesName = "Holcopasites heliopsis",
#' #\ # column to look in
#' #\ nameColumn = "species",
#' #\ # Save the filtered data
#' #\ saveFiltered = TRUE,
#' #\ # Filter column to display on map
#' #\ filterColumn = ".summary",
#' #\ plotMap = TRUE,
#' #\ # amount to jitter points if desired, e.g. 0.25 or NULL
#' #\ jitterValue = NULL,
#' #\ # Map opacity value for points between 0 and 1
#' #\ mapAlpha = 1,
#' # Extra variables can be fed into forcats::fct_recode() to change names on plot
#' GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
#' ASP = "ASP", CAES = "CAES", 'B. Mont.' = "BMont", 'B. Minkley' = "BMin", Ecd = "Ecd",
#' Gaiarsa = "Gai", EPEL = "EPEL"
#' )
#'
#'
#'
plotFlagSummary <- function(
data = NULL,
flagColours = c("#127852", "#A7002D", "#BDBABB"),
fileName = NULL,
outPath = OutPath_Figures,
width = 15, height = 9, units = "in",
dpi = 300,
bg = "white", device = "pdf",
# OPTIONAL:
speciesName = NULL,
saveFiltered = FALSE,
filterColumn = ".summary",
nameColumn = NULL,
plotMap = FALSE,
mapAlpha = 0.5,
xbuffer = c(0,0),
ybuffer = c(0,0),
ptSize = 1,
saveTable = FALSE,
# Jitter map? enter jitter amount
jitterValue = NULL,
returnPlot = FALSE,
...
){
# locally bind variables to the function
OutPath_Figures <- decimalLatitude <- decimalLongitude <- . <- dataSource <- NULL
database <- flags <- value <- count <- .data <- NULL
requireNamespace("ggspatial")
requireNamespace("dplyr")
requireNamespace("bdc")
requireNamespace("forcats")
#### 0.0 Prep ####
##### 0.1 errors ####
###### a. FATAL errors ####
if(is.null(data)){
stop(" - Please provide an argument for data I'm a program not a magician.")
}
if(is.null(outPath)){
stop(" - Please provide an argument for outPath Seems reckless to let me just guess.")
}
if(is.null(speciesName) & saveFiltered == TRUE){
stop(" - saveFiltered cannot be TRUE if no speciesName is provided to filter occurrences.\n",
"This functionality is provided to save the filtered dataset for examination.")
}
###### b. warnings ####
if(is.null(speciesName) & plotMap == TRUE){
warning(" - plotMap is not tested with no speciesName provided to filter occurrences.\n",
"This functionality is provided to check the filtered dataset for examination and I fear ",
"that this might result in an intense task to run... Maybe not... Enjoy!")
}
if(is.null(fileName)){
writeLines(" - No argument provided for fileName. Using default of 'FlagsPlot_DATE.pdf'")
fileName = paste0("FlagsPlot_", Sys.Date(),".pdf")
}
if(is.null(filterColumn)){
writeLines(" - No argument provided for filterColumn Using default of '.summary'")
filterColumn = ".summary"
}
if(!is.null(speciesName) & is.null(nameColumn)){
writeLines(" - nameColumn is not provided. Defaulting to scientificName.\n")
nameColumn = "scientificName"
}
#### 1.0 Prepare data ####
##### 1.1 Optional species filter ####
# If a species name is provided then filter to ONLY that/those species
if(!is.null(speciesName)){
writeLines(" - Filtering to selected species...")
###### a. filter ####
# Filter data
data <- data %>%
dplyr::filter( data[[nameColumn]] %in% speciesName)
writeLines(paste0(" - Selected species has ",
format(nrow(data), big.mark = ","),
" occurrences."))
# OPTIONAL save filtered data
##### b. save ####
# If a save location is provided, then save the filtered dataset
if(saveFiltered == TRUE){
data %>%
readr::write_excel_csv(paste0(outPath, "/FlagsPlot_", speciesName,".csv"))
}
##### c. map data ####
# Save a version of the data for mapping, if asked by user
if(plotMap == TRUE){
mapData <- data %>%
# Select the columns to use
dplyr::select(c(decimalLatitude, decimalLongitude,
tidyselect::all_of(c(nameColumn, filterColumn))))
# Sort the filterColumn to have TRUE on top of FALSE
mapData <- mapData %>%
dplyr::select(tidyselect::all_of(filterColumn)) %>%
dplyr::mutate(across(1, as.character)) %>%
dplyr::bind_cols(mapData %>% dplyr::select(!tidyselect::all_of(filterColumn))) %>%
dplyr::arrange(desc(.))
}
} # END !is.null(speciesName)
##### 1.3 Prepare for plot ####
writeLines(" - Preparing data to plot...")
# Make a column with the dataSource without numbers
data <- data %>%
# Make a new column with the dataSource names but not the specifics
dplyr::mutate(database = stringr::str_replace(dataSource,
pattern = "_.*",
replacement = "")) %>%
# Group by the new column (i.e. database)
dplyr::group_by(database) %>%
# Select only the filter columns (starting with ".") and the database column
dplyr::select(database, tidyselect::starts_with(".")) %>%
#mutate(across(is.logical, ~as.numeric(.x))) %>%
#dplyr::filter(database %in% c("ASP", "Ecd")) %>%
# Pivot the data to a longer format for plotting in ggplot2.
tidyr::pivot_longer(cols = tidyselect::starts_with("."),
names_to = "flags",
values_to = "value") %>%
dplyr::group_by(database, flags, value) %>%
dplyr::summarise(count = dplyr::n())
# Make flag type
data$flagType <- data$flags %>%
dplyr::recode(.coordinates_empty = "Initial", .coordinates_outOfRange = "Initial",
.basisOfRecords_notStandard = "Initial", .coordinates_country_inconsistent = "Initial",
.occurrenceAbsent = "Initial", .unLicensed = "Initial", .GBIFflags = "Initial",
# Taxonomy
.scientificName_empty = "Taxonomy",.invalidName = "Taxonomy", .uncer_terms = "Taxonomy",
#Space
.rou = "Space", .uncertaintyThreshold = "Space", .cap = "Space", .cen = "Space",
.equ = "Space",.gbf = "Space", .inst = "Space", .zer = "Space",.sea = "Space",
.val = "Space",
.countryOutlier = "Space", .expertOutlier = "Space",
.sequential = "Space", .lonFlag = "Space", .latFlag = "Space", .gridSummary = "Space",
# Time
.eventDate_empty = "Time", .year_outOfRange = "Time",
# Summary
.duplicates = "Summary", .summary = "Summary") %>%
factor(levels = c("Initial",
"Taxonomy","Space",
"Time","Summary")) %>%
forcats::fct_relevel("Initial",
"Taxonomy","Space",
"Time","Summary")
# You can turn the flag columns into factors and order them here
data$flags <- data$flags %>%
dplyr::recode_factor(.coordinates_empty = "No coordinates",
.coordinates_outOfRange = "Point off map",
.basisOfRecords_notStandard = "Excluded basis of record",
.coordinates_country_inconsistent = "Coords. & country inconsistent",
.occurrenceAbsent = "Absent record",
.unLicensed = "Protected by license",
.GBIFflags = "GBIF flags",
# Taxonomy
.scientificName_empty = "No scientific name",
.invalidName = "Name didn't match",
.uncer_terms = "Taxonomy qualifier",
#Space
.rou = "Coordinates rounded",
.uncertaintyThreshold = "High coordinate uncertainty",
.cap = "Capital centroid",
.cen = "Country centroid",
.equ = "Coordinates equal",
.gbf = "Point on GBIF HQ",
.inst = "Point on institution",
.zer = "Coordinates zero",
.sea = "Point in sea",
.val = "Coordinates invalid",
.countryOutlier = "Country outliers",
.expertOutlier = "Expert outliers",
.sequential = "Coordinate fill-down",
.lonFlag = "Gridded longitudes", .latFlag = "Gridded latitudes",
.gridSummary = "Gridded lat & lon",
# Time
.eventDate_empty = "No event date", .year_outOfRange = "Year out of range",
# Summary
.duplicates = "Duplicate", .summary = "Summary") %>%
forcats::fct_rev()
# Factorise and order by database
data$database <- data$database %>%
dplyr::recode_factor(...)
##### 1.4 Save table ####
# If user choses to save the table that makes the plot, do so here.
if(saveTable == TRUE){
data %>%
readr::write_excel_csv(paste0(outPath, "/",
stringr::str_replace(fileName,
pattern = "\\.pdf",
replacement = ".csv")))
}
# Make data$value into a character type
data$value <- as.character(data$value)
# NA levels may be removed from the plot here, but I prefer to keep them.
# data <- data %>%
# tidyr::drop_na()
#### 2.0 Plot ####
##### 2.1 Build plot ####
writeLines(" - Building plot...")
plot <- ggplot2::ggplot(data = data) +
# Set up the plot facets
ggplot2::facet_grid( flagType~database, scales = "free", space= "free_y") +
# Make the bar plots
ggplot2::geom_bar(ggplot2::aes(y = flags, x = count, fill = as.factor(value)),
position="fill", stat='identity') +
# Colour and label the plots
ggplot2::scale_fill_manual(labels = c("TRUE" = "Pass",
"FALSE" = "Fail",
"NA" = "NA"),
values = c("TRUE" = flagColours[1],
"FALSE" = flagColours[2],
"NA" = flagColours[3])
) +
# Set up the theme stuff
ggplot2::theme(axis.text.x = ggplot2::element_blank(), # element_text(angle = 35, vjust = 0.5, hjust=0.5),
#axis.text.y = element_text(colour = as.character(flagCols)),
strip.placement = "outside",
panel.background = ggplot2::element_rect(fill = "white"),
plot.title = ggplot2::element_text(size = 20, face = "bold")) +
ggplot2::labs(x ="Proportion flagged", y = "Flag columns", fill = "Filter")
##### 2.2 Filtered title+map+save ####
if(!is.null(speciesName)){
###### a. title ####
# Add title if filtered by species name
plot <- plot +
ggplot2::ggtitle(paste0("Species: ", speciesName))
##### b. map ####
if(plotMap == TRUE){
# Make into an ordered factor
mapData <- mapData %>%
dplyr::mutate(dplyr::across(tidyselect::all_of(filterColumn),
~factor(.x, levels = c(FALSE, TRUE), ordered = TRUE))) %>%
tidyr::drop_na(decimalLatitude, decimalLongitude)
# Download world map using rnaturalearth packages
WorldMap_layer <- rnaturalearth::ne_countries(scale = "medium", returnclass = "sf",
country = NULL, type="map_units")
# Create the checklist map
(PointMap <- ggplot2::ggplot(data = WorldMap_layer ) +
# CORE plotting of map and data
# Plot and colour the terrestrial base map
ggplot2::geom_sf(ggplot2::aes(fill = NULL), size = 0.15)+
# plot point data
# POINTS IF IS NULL; i.e. DON'T jitter
{if(is.null(jitterValue))
ggplot2::geom_point(data = mapData %>% dplyr::filter(.[[1]] == "FALSE"),
mapping = ggplot2::aes(x = decimalLongitude, y = decimalLatitude,
colour = .data[[filterColumn]]),
size = ptSize,
alpha = mapAlpha)} +
{if(is.null(jitterValue))
ggplot2::geom_point(data = mapData %>% dplyr::filter(.[[1]] == "TRUE"),
mapping = ggplot2::aes(x = decimalLongitude, y = decimalLatitude,
colour = .data[[filterColumn]]),
size = ptSize,
alpha = mapAlpha)} +
# POINTS IF IS NOT NULL; i.e. jitter
{if(!is.null(jitterValue))ggplot2::geom_jitter(mapData %>%
dplyr::filter(.[[1]] == "FALSE"),
mapping = ggplot2::aes(x = decimalLongitude, y = decimalLatitude,
colour = .data[[filterColumn]]),
size = ptSize,
alpha = mapAlpha, width = jitterValue, height = jitterValue)}+
{if(!is.null(jitterValue))ggplot2::geom_jitter(mapData %>%
dplyr::filter(.[[1]] == "TRUE"),
mapping = ggplot2::aes(x = decimalLongitude,
y = decimalLatitude,
colour = .data[[filterColumn]]),
size = ptSize,
alpha = mapAlpha, width = jitterValue,
height = jitterValue)}+
ggplot2::scale_color_manual(values = c(
"TRUE" = flagColours[[1]], # "#013766",
"FALSE" = flagColours[[2]]), #"#ac0e28"),
name = "Passed occ.") +
# Set map limits, if wanted
ggplot2::coord_sf(expand = TRUE,
ylim = c(min(mapData$decimalLatitude, na.rm = TRUE)+ybuffer[[1]],
max(mapData$decimalLatitude, na.rm = TRUE)+ybuffer[[2]]),
xlim = c(min(mapData$decimalLongitude, na.rm = TRUE)+xbuffer[[1]],
max(mapData$decimalLongitude, na.rm = TRUE)+xbuffer[[2]]),
lims_method = "box") +
# Map formatting
# Add in the map's north arrow
ggspatial::annotation_north_arrow(location = "tl", which_north = "true",
pad_x = unit(0.1, "cm"), pad_y = unit(0.1, "cm"),
style = ggspatial::north_arrow_fancy_orienteering()) + # Add in NORTH ARROW
ggplot2::theme(panel.grid.major = ggplot2::element_line(color = gray(.1, alpha = 0.1),
linetype = "dashed", linewidth = 0.5), # Add grid lines
panel.border = ggplot2::element_rect(color = gray(.1, alpha = 1),
linetype = "solid", linewidth = 0.5,
fill = NA), # add panel border
panel.background = ggplot2::element_rect(fill = "aliceblue") ,
plot.title = ggplot2::element_text(face = "italic"))+ # Add background - colour in the ocean
# Change map colour scheme
ggplot2::scale_fill_viridis_d(option = "magma") + # options = "magma", "inferno", "plasma", "cividis"
# Add in X and Y labels
ggplot2::xlab("Longitude") + ggplot2::ylab("Latitude") +
# Add in the title
ggplot2::ggtitle( speciesName) )
# save as the map as 10*6"
ggplot2::ggsave(paste0("/Map_FlagsPlot_", speciesName, ".pdf"), plot = PointMap, device = "pdf",
width = 10, height = 5, dpi = 300, path = outPath)
}
##### c. save ####
# Filtered save plot
if(!is.null(speciesName)){
ggplot2::ggsave(filename = paste0("/FlagsPlot_", speciesName,".pdf"),
path = outPath,
plot = plot, dpi = dpi, bg = bg, device = device,
width = width, height = height, units = units)}
}
# Save the figure
##### 2.2 Save all option ####
if(is.null(speciesName)){
ggplot2::ggsave(filename = fileName,
path = outPath,
plot = plot, dpi = dpi, bg = bg, device = device,
width = width, height = height, units = units)}
if(returnPlot == TRUE){
return(plot)}
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/plotFlagSummary.R
|
##### 2.1 repoFinder ####
#' Find GBIF, ALA, iDigBio, and SCAN files in a directory
#'
#' @param path A directory as character. The path within which to recursively look for GBIF, ALA,
#' iDigBio, and SCAN files.
#'
#' @return Returns a list of directories to each of the above data downloads
#' @export
#'
#' @importFrom dplyr %>%
#'
#' @examples
#' \dontrun{
#' # Where DataPath is made by [BeeBDC::dirMaker()]
#' BeeBDC::repoFinder(path = DataPath)
#' }
repoFinder <- function(path){
. <- NULL
requireNamespace("dplyr")
# Find ALL occurrence file downloads contained within the HomePath and return their location and
# their information
AllOccLocs <- file.info(list.files(path, full.names = T, pattern = "occurrence(s)?(_raw)?\\.|^data.csv",
recursive = TRUE))
# Split out each data source's occurrence files and file paths based on their slight differences
occ_paths <- list(
# ALA paths
rownames(AllOccLocs) %>% grep("/data.csv", ., value = TRUE),
# GBIF paths
rownames(AllOccLocs) %>% grep("/occurrence.txt", ., value = TRUE),
# iDigBio paths
rownames(AllOccLocs) %>% grep("/occurrence_raw.csv", ., value = TRUE),
# SCAN paths
rownames(AllOccLocs) %>% grep("/occurrences.csv", ., value = TRUE))
# Return those paths
names(occ_paths) <- c("ALA_data", "GBIF_data", "iDigBio_data", "SCAN_data")
return(occ_paths)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/repoFinder.R
|
#### 2. repoMerge ####
#' Import occurrences from GBIF, ALA, iDigBio, and SCAN downloads
#'
#' Locates data from GBIF, ALA, iDigBio, and SCAN within a directory and reads it in along with its eml
#' metadata. Please keep the original download folder names and architecture unchanged.
#' NOTE: This function uses family-level data to identify taxon downloads. If this, or something new,
#' becomes an issue, please contact James Dorey (the developer) as there are likely to be exceptions
#' to how files are downloaded. current as of versions 1.0.4.
#'
#' @param path A directory as a character. The directory to recursively look in for the above data.
#' @param save_type Character. The data type to save the resulting file as. Options are:
#' csv_files" or "R_file".
#' @param occ_paths A list of directories. Preferably produced using [BeeBDC::repoFinder()] the
#' function asks for a list of paths to the relevant input datasets. You can fault-find errors
#' in this function by checking the output of [BeeBDC::repoFinder()].
#'
#' @return A list with a data frame of merged occurrence records, "Data_WebDL", and a list of eml
#' files contained in "eml_files". Also saves these files in the requested format.
#' @export
#'
#' @importFrom dplyr %>%
#' @importFrom readr col_character cols
#' @importFrom stats setNames
#'
#' @examples
#' \dontrun{
#' DataImp <- repoMerge(path = DataPath,
#' # Find data - Many problems can be solved by running [BeeBDC::repoFinder(path = DataPath)]
#' # And looking for problems
#' occ_paths = BeeBDC::repoFinder(path = DataPath),
#' save_type = "R_file")
#' }
repoMerge <- function(path, save_type, occ_paths){
. <- family <- data_i <- NULL
#### 0.0 Prep ####
requireNamespace("dplyr")
# Remove empty elements
occ_paths <- occ_paths[lapply(occ_paths,length)>0]
#### 1.0 Data read+merge ####
###### 1.1 Loop prep. ####
{startTime <- Sys.time()
# print user information
writeLines( paste(" - Reading and joining ",length(unlist(occ_paths))," occurrence files.", "\n",
"Depending on file size and number, this could take some time.","\n",
sep = ""))
# Make an internal copy of the template for use in the loop as the template tibble
data_template <- matrix(ncol = length(BeeBDC::ColTypeR()[[1]] %>% names()), nrow = 0) %>% as.data.frame() %>%
setNames(BeeBDC::ColTypeR()[[1]] %>% names()) %>% dplyr::tibble() %>%
dplyr::mutate(dplyr::across(dplyr::everything(), as.character))
# Copy the template
Data_WebDL <- data_template
# Make an empty eml file for the loop
eml_files <- dplyr::tibble()
# Set up a counter to keep track of the number of files processed
counter = 1
##### 1.2 Data loop ####
for(j in 1:length(occ_paths)){ # Start path loop
for(i in 1:length(unlist(occ_paths[j]))){
# Firstly, select the download path for the loop
path_i <- occ_paths[j] %>% unlist() %>% .[i]
###### a. loop data ####
# Use the custom function dataReader to read in the occurrence data in the correct format
data_i <- dataReader(path_i = path_i, home_path = path) #%>%
#readr::type_convert(col_types = BeeBDC::ColTypeR())
# If there is a date range of days, take only the first day
data_i$day <- data_i$day %>%
stringr::str_replace(pattern = "-.*", replacement = "") %>%
stringr::str_squish() #%>%
#as.integer()
###### b. attr. mangement ####
# Use the custom function attr_builder to extract attribute information from file
data_Attributes <- data_i %>%
# Group by family
dplyr::group_by(family) %>%
# Split the groups into their own lists
dplyr::group_split(.keep = TRUE) %>%
lapply(., attr_builder,
path_i = path_i)
# Extract all of the Source_tibble tibbles and bind them together
data_Source_tibbles <- data_Attributes %>%
lapply(., function(x) dplyr::tibble(x$Source_tibble)) %>%
# Combine the lists of tibbles
dplyr::bind_rows()
# Recombine the source eml dat aand the source tibble data
data_Attributes <- list(data_Attributes[[1]][[1]], data_Source_tibbles) %>%
setNames(c("source_eml","Source_tibble"))
# Add an additional test for iDigBio to extract families
if(any(data_Attributes$Source_tibble$dataSource %>% stringr::str_detect("iDigBio"))){
data_Attributes$Source_tibble <- data_Attributes$Source_tibble %>%
dplyr::mutate(
dataSource = stringr::str_c("iDigBio_", unique(data_i$family) %>%
stringr::str_replace_na("NA")),
taxon = stringr::str_c(unique(data_i$family) %>% stringr::str_replace_na("NA"))
)} # END if iDigBio
# Test if the attributes are sourced from a single family, or from more. Should == TRUE if it
# is from a single family and == FALSE if from a multiple family download.
singleFamilyTest <- nrow(data_Attributes$Source_tibble) == 1
###### b1. single family ####
if(singleFamilyTest == TRUE){
# Annotate the dataSource column with information from the attributes (based on one input
# family)
data_i$dataSource <- data_Attributes$Source_tibble$dataSource
} # END singleFamilyTest == TRUE
###### b2. many families ####
if(singleFamilyTest == FALSE){
# If the family column is NA, allow linking of the dataSource by temporarily modifying that
# column to include the source_NA
data_i <- data_i %>%
dplyr::mutate(family = dplyr::if_else(is.na(family),
"NA",
family))
# Annotate the dataSource column with information from the attributes (based on many input
# families)
data_i <- data_i %>%
# Remove the empty dataSource column
dplyr::select(!tidyselect::any_of("dataSource")) %>%
# Match the family-level dataSources to the dataSource column using left_join
dplyr::left_join(data_Attributes$Source_tibble %>%
dplyr::select(c("dataSource", "taxon")),
by = c("family" = "taxon"))
} # END singleFamilyTest == FALSE
## Source table ##
# Bind with either the template or running GBIF_Data_WebDL tibble
Data_WebDL <- dplyr::bind_rows(Data_WebDL, data_i) # END rbind.fill
# Extract existing attributes
ExistingAttrs <- attributes(Data_WebDL)
# Combine with new attributes
# combine tibbles
SrcTbl_new <- dplyr::bind_rows(ExistingAttrs$dataSource, data_Attributes$Source_tibble)
# Add the new attribute information into the file
attr(Data_WebDL, which = "dataSource") <- SrcTbl_new
## eml files ##
# Make the eml file into a list
new_eml_file <- list(data_Attributes$source_eml)
# Name that list with the appropriate source and name
names(new_eml_file) <- if(singleFamilyTest == TRUE){
# If singleFamilyTest == TRUE, then use that single family dataSource name as the
# eml name
data_Attributes$Source_tibble$dataSource
}else{
# If singleFamilyTest == FALSE, then concatenate those names into a text string
# separated by "; " for each family present
stringr::str_c(data_Attributes$Source_tibble$dataSource, collapse = "; ")}
# Append this to the running eml list file
eml_files <- append(eml_files, new_eml_file) %>%
emld::as_emld( from = "list")
###### c. Progress print ####
# Print the progress and number of records (rows) to user
writeLines( paste(" - Completed", i,"of", length(unlist(occ_paths[j])), names(occ_paths[j]),
"files. ","\n",
counter, "of", length(unlist(occ_paths)), "total files processed","\n",
"Cumulative number of rows =",
format(nrow(Data_WebDL), big.mark=",",scientific=FALSE), sep = " ") )
# progress the counter by 1
counter = counter + 1
} # End GBIF loop
} # End path loop
##### 3. Save data ####
# user-input includes the path and save_type, which are supplied from repoMerge()
dataSaver(path = path, # Path to HomeDirectory
save_type = save_type, # save_type is either "csv_files" or "R_file"
occurrences = Data_WebDL, # Input data file
eml_files = eml_files, # Input eml_files
file_prefix = "BeeData_")
# Completion message to user with endTime
endTime <- Sys.time()
writeLines( paste(" - Completed in ", round(endTime - startTime, digits = 2), " ",
units(round(endTime - startTime, digits = 2)), sep = ""))
} # END Data Loop
return( dplyr::lst(Data_WebDL, eml_files))
} # END repoMerge function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/repoMerge.R
|
# This function was written by James B Dorey on 8th November 2022 to create a .summary column and
# replace the bdc_summary_col function which fails with NA values
#' Create or update the .summary flag column
#'
#' Using all flag columns (column names starting with "."), this function either creates or updates
#' the .summary flag column which is FALSE when ANY of the flag columns are FALSE. Columns can be excluded
#' and removed after creating the .summary column. Additionally, the occurrence dataset
#' can be filtered to only those where .summary = TRUE at the end of the function.
#'
#' @param data A data frame or tibble. Occurrence records to use as input.
#' @param dontFilterThese A character vector of flag columns to be ignored in the creation or updating
#' of the .summary column.
#' @param removeFilterColumns Logical. If TRUE all columns starting with "." will be removed in the
#' output data. This only makes sense to use when filterClean = TRUE. Default = FALSE.
#' @param filterClean Logical. If TRUE, the data will be filtered to only those occurrence where .summary
#' = TRUE (i.e., completely clean according to the used flag columns). Default = FALSE.
#'
#' @return Returns a data frame or tibble of the input data but modified based on the above parameters.
#' @export
#'
#' @importFrom dplyr %>%
#'
#'
#' @examples
#' # Read in example data
#' data(beesFlagged)
#'
#' # To only update the .summary column
#' beesFlagged_out <- summaryFun(
#' data = beesFlagged,
#' dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms", ".unLicensed"),
#' removeFilterColumns = FALSE,
#' filterClean = FALSE)
#' # View output
#' table(beesFlagged_out$.summary, useNA = "always")
#'
#' # Now filter to only the clean data and remove the flag columns
#' beesFlagged_out <- summaryFun(
#' data = BeeBDC::beesFlagged,
#' dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms", ".unLicensed"),
#' removeFilterColumns = TRUE,
#' filterClean = TRUE)
#' # View output
#' table(beesFlagged_out$.summary, useNA = "always")
#'
#'
#'
summaryFun <- function(
data = NULL,
dontFilterThese = NULL,
removeFilterColumns = FALSE,
filterClean = FALSE){
# locally bind variables to the function
. <- rowSum <- .summaryNew <- .summary <- NULL
#### 0.0 Prep ####
if(is.null(data)){
stop("You must provide a dataset in the 'data' argument.")
}
#### 1.0 Generate .summary column ####
##### 1.1 dontFilterThese present ####
# User output
if(!is.null(dontFilterThese)){
writeLines(paste0(" - We will NOT flag the following columns. However, they will remain",
" in the data file.\n",
paste(dontFilterThese, collapse = ", ") ))
# Run function
dataOut <-
data %>%
# Which columns NOT to filter
dplyr::select(!tidyselect::any_of(dontFilterThese)) %>%
# Update .summary column
# Select all columns starting with "."
dplyr::select(tidyselect::starts_with(".")) %>%
# Delete the summary column if it's there
dplyr::select(!tidyselect::starts_with(".summary")) %>%
# Make FALSE == 1 and TRUE == 0
dplyr::mutate_if(is.logical, ~abs(as.numeric(.) - 1)) %>%
# IF rowSum > 0 then there is at least one flag
dplyr::mutate(rowSum = base::rowSums(., na.rm = TRUE)) %>%
# Add the .summary column
dplyr::mutate(.summaryNew = dplyr::if_else(rowSum > 0,
FALSE, TRUE)) %>%
dplyr::select(.summaryNew) %>%
dplyr::bind_cols(data, .) %>%
dplyr::mutate(.summary = .summaryNew) %>% dplyr::select(!.summaryNew)
}
##### 1.2 dontFilterThese NULL ####
if(is.null(dontFilterThese)){
writeLines(paste0(" - We will flag all columns starting with '.'"))
# Run function
dataOut <-
data %>%
# Update .summary column
# Select all columns starting with "."
dplyr::select(tidyselect::starts_with(".")) %>%
# Delete the summary column if it's there
dplyr::select(!tidyselect::starts_with(".summary")) %>%
# Make FALSE == 1 and TRUE == 0
dplyr::mutate_if(is.logical, ~abs(as.numeric(.) - 1)) %>%
# IF rowSum > 0 then there is at least one flag
dplyr::mutate(rowSum = rowSums(., na.rm = TRUE)) %>%
# Add the .summary column
dplyr::mutate(.summaryNew = dplyr::if_else(rowSum > 0,
FALSE, TRUE)) %>%
dplyr::select(.summaryNew) %>%
dplyr::bind_cols(data, .) %>%
dplyr::mutate(.summary = .summaryNew) %>% dplyr::select(!.summaryNew)
}
##### 1.3 User message ####
message(paste(" - summaryFun:\nFlagged",
format(sum(dataOut$.summary == FALSE, na.rm = TRUE), big.mark = ","),
"\n ",
"The .summary column was added to the database.",
sep = " "))
#### 2.0 Optional extras ####
##### 2.1 Filter for clean ####
# RFilter for only clean records here if user specifies
if(filterClean == TRUE){
dataOut <- dataOut %>%
# FILTER HERE
dplyr::filter(.summary == TRUE)
message(paste(" - REMOVED all occurrences that were FALSE for the 'summary' column."))
}
##### 2.2 Remove filtering columns ####
# Remove filtering columns if user specifies
if(removeFilterColumns == TRUE){
dataOut <- dataOut %>%
dplyr::select(!tidyselect::starts_with("."))
}
#### 3.0 Output ####
return(dataOut)
} # End function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/summaryFun.R
|
# This function was written by James B Dorey on the 29th of September 2022
# Its purpose is to visualise some data spatially by country
# Please contact jbdorey[at]me.com for help
#' Create country-level summary maps of species and occurrence numbers
#'
#' Builds an output figure that shows the number of species and the number of occurrences per
#' country. Breaks the data into classes for visualisation. Users may filter data to their taxa
#' of interest to produce figures of interest.
#'
#' @param data A data frame or tibble. Occurrence records as input.
#' @param class_n Numeric. The number of categories to break the data into.
#' @param class_Style Character. The class style passed to [classInt::classIntervals()]. Options are chosen
#' style: one of "fixed", "sd", "equal", "pretty", "quantile", "kmeans", "hclust", "bclust",
#' "fisher", "jenks", "dpih", "headtails", or "maximum". Default = "fisher"
#' @param outPath A character vector the path to the save location for the output figure.
#' @param fileName A character vector with file name
#' for the output figure, ending with '.pdf'.
#' @param width Numeric. The width, in inches, of the resulting figure. Default = 10.
#' @param height Numeric. The height, in inches, of the resulting figure. Default = 5.
#' @param dpi Numeric. The resolution of the resulting plot. Default = 300.
#' @param returnPlot Logical. If TRUE, return the plot to the environment. Default = FALSE.
#' @param scale Numeric or character. Passed to rnaturalearth's ne_countries().
#' Scale of map to return, one of 110, 50, 10 or 'small', 'medium', 'large'. Default = 110.
#' @param pointBuffer Numeric. Amount to buffer points, in decimal degrees. If the point is outside
#' of a country, but within this point buffer, it will count towards that country. It's a good idea
#' to keep this value consistent with the prior flags applied. Default = 0.01.
#'
#' @return Saves a figure to the user-specified outpath and name with a global map of bee
#' occurrence species and count data from the input dataset.
#' @export
#'
#' @importFrom dplyr %>%
#' @importFrom sf sf_use_s2
#' @importFrom ggspatial north_arrow_fancy_orienteering
#' @importFrom grDevices gray
#' @importFrom ggplot2 xlab ylab ggtitle
#'
#' @examples
#' # Read in data
#' data(beesFlagged)
#' OutPath_Figures <- tempdir()
#' # This simple example using the test data has very few classes due to the small amount of input
#' # data.
#' summaryMaps(
#' data = beesFlagged,
#' width = 10, height = 10,
#' class_n = 4,
#' class_Style = "fisher",
#' outPath = OutPath_Figures,
#' fileName = paste0("CountryMaps_fisher_TEST.pdf"),
#' )
#'
#'
summaryMaps <- function(
data = NULL,
class_n = 15,
class_Style = "fisher",
outPath = NULL,
fileName = NULL,
width = 10,
height = 5,
dpi = 300,
returnPlot = FALSE,
scale = 110,
pointBuffer = 0.01
){
# locally bind variables to the function
name_long<-iso_a3<-name<-geometry<-decimalLongitude<-decimalLatitude<-database_id<-
scientificName<-species<-country<-stateProvince<-dataSource<-count<-class_count<-
class_count2<-occCount <- indexMatch <- . <- iso_a2 <- NULL
requireNamespace("dplyr")
requireNamespace("classInt")
requireNamespace("rnaturalearth")
requireNamespace("ggspatial")
#### 0.0 Prep ####
##### 0.1 errors ####
###### a. FATAL errors ####
if(is.null(data)){
stop(" - Please provide an argument for data I'm a program not a magician.")
}
if(is.null(fileName)){
stop(" - No argument provided for fileName. Please provide a fileName.")
}
if(is.null(outPath)){
stop(" - No argument provided for outPath Please provide an outPath.")
}
#### 1.0 Download base map ####
###### 1.1 naturalEarth ####
# Download world map using rnaturalearth packages
worldMap <- rnaturalearth::ne_countries(returnclass = "sf", country = NULL,
type = "map_units", scale = scale) %>%
# Select only a subset of the naturalearthdata columns to extract
dplyr::select(name_long, iso_a3, iso_a2, name, name_long, geometry) %>%
sf::st_make_valid()
# This stops a plotting error
sf::sf_use_s2(FALSE)
###### 1.2 occurrences ####
# Filter the data columns
data <- data %>%
# Use a subset of columns
dplyr::select(tidyselect::any_of(c("database_id", "scientificName", "species",
"country", "stateProvince", "dataSource", "geometry",
"decimalLongitude", "decimalLatitude"))) %>%
# Drop the points without coordinates
tidyr::drop_na(tidyselect::all_of(c("decimalLongitude", "decimalLatitude")))
# Make all of the US virgin islands species into US species
# data$countryCode <- stringr::str_replace(string = data$countryCode,
# pattern = "VI", replacement = "US")
# Turn occData into a simple point feature
dataPoints <- sf::st_as_sf(data,
coords = c("decimalLongitude", "decimalLatitude"),
na.fail = TRUE,
# Assign the CRS from the rnaturalearth map to the point data
crs = sf::st_crs(worldMap))
##### 1.3 Extraction ####
writeLines(" - Extracting country data from points...")
suppressWarnings({
# Set geometries to constant for the sake of the map
sf::st_agr(worldMap) = "constant"
sf::st_agr(dataPoints) = "constant"
# Simplify the world map ONCE to be used later
simplePoly <- worldMap %>% sf::st_drop_geometry() %>%
dplyr::mutate(indexMatch = dplyr::row_number())
#Extract polygon information to points
extracted <- sf::st_intersects(dataPoints, worldMap, sparse = TRUE) %>%
# return a tibble with the index of each match or NA where there was no match
dplyr::tibble(indexMatch = .)
# If first element is full, unlist each one
extracted <- extracted %>%
dplyr::mutate(indexMatch = indexMatch %>% as.character() %>%
# deal with problems — Take the first number where two are provided
stringr::str_extract("[0-9]+") %>%
# Remove zero to NA
stringr::str_replace("^[0]$", NA_character_) %>%
# Make numeric
as.numeric()
) %>%
# drop geometry
sf::st_drop_geometry()
})
# rejoin
data <- extracted %>%
dplyr::left_join(simplePoly,
by = "indexMatch") %>%
# Add in the database_id
dplyr::bind_cols(data)
rm(extracted)
writeLines("Extraction complete.")
##### 1.4 Buffer fails ####
writeLines(" - Buffering naturalearth map by pointBuffer...")
###### a. buffer map ####
# Buffer the natural earth map
suppressWarnings({
worldMap <- worldMap %>%
sf::st_buffer(dist = pointBuffer)
})
###### b. extract fails ####
#Extract polygon information to points
suppressWarnings({
extracted2 <- sf::st_intersects(dataPoints %>% dplyr::filter(
database_id %in% (data %>% dplyr::filter(is.na(name_long)) %>% pull(database_id))),
worldMap %>% sf::st_make_valid(), sparse = TRUE) %>%
# return a tibble with the index of each match or NA where there was no match
dplyr::tibble(indexMatch = .)
# If first element is full, unlist each one
extracted2 <- extracted2 %>%
dplyr::mutate(indexMatch = indexMatch %>% as.character() %>%
# deal with problems — Take the first number where two are provided
stringr::str_extract("[0-9]+") %>%
# Remove zero to NA
stringr::str_replace("^[0]$", NA_character_) %>%
# Make numeric
as.numeric()
) %>%
# drop geometry
sf::st_drop_geometry()
})
# rejoin
extracted2 <- extracted2 %>%
dplyr::left_join(simplePoly,
by = "indexMatch") %>%
# Add in the database_id
dplyr::bind_cols(data %>% dplyr::filter(is.na(name_long)) %>%
dplyr::select(!tidyselect::any_of(c("name_long", "iso_a3", "indexMatch",
"iso_a2", "name"))))
# Rejoin with data
data <- data %>%
dplyr::filter(!database_id %in% extracted2$database_id) %>%
dplyr::bind_rows(extracted2)
rm(extracted2)
#### 2.0 Species map ####
##### 2.1 Data prep ####
# Get the unique country-species pairs
spMapData <- data %>%
dplyr::distinct(scientificName, name_long, .keep_all = TRUE) %>%
# Group by the country
dplyr::group_by(name_long) %>%
# Get a count of the records per country
dplyr::mutate(count = n()) %>% dplyr::ungroup() %>%
# Get unique
dplyr::distinct(name_long, .keep_all = TRUE)
##### 2.2 Breaks ####
# make class intervals.
# Class intervals from ?classIntervals: fixed", "sd", "equal", "pretty", "quantile",
# "kmeans", "hclust", "bclust", "fisher", "jenks", "dpih" or "headtails"
classes <- classInt::classIntervals(spMapData$count, n = class_n,
style = class_Style, dig.lab=20,
dataPrecision=0)
# Next we’ll create a new column in our sf object using the base R cut() function to cut up our
# percent variable into distinct groups:
spMapData <- spMapData %>%
dplyr::mutate(class_count = cut(count,
classes$brks %>% round(digits = 0),
include.lowest = T, dig.lab = 10)) %>%
# format the class_count column to remove spaces, add comma break, and join min and max
dplyr::mutate(class_count2 = class_count %>%
stringr::str_remove("\\[|\\]|\\(|\\)") %>%
stringr::str_remove("\\]") %>%
stringr::str_replace(",", "-")) %>%
tidyr::separate(col = class_count2, into = c("min", "max"), sep = "-") %>%
dplyr::mutate(min = min %>% as.numeric() %>% format(big.mark = ",") %>%
stringr::str_remove("\\s+"),
max = max %>% as.numeric() %>% format(big.mark = ",") %>%
stringr::str_remove("\\s+"),
class_count2 = stringr::str_c(min, max, sep = "-") )
# Join the map and occurrence data
fullMap <- dplyr::full_join(worldMap, spMapData %>% sf::st_drop_geometry(),
by = c("name_long" = "name_long")) %>%
# Remove na rows
tidyr::drop_na(count)
##### 2.3 Draw map ####
# Make the map
(spCountryMap <- ggplot2::ggplot(data = fullMap, ) +
# Add in a blank base-map to highlight countries with no data
ggplot2::geom_sf(data = worldMap, size = 0.15, fill = "white")+
# Plot and colour the terrestrial base map
ggplot2::geom_sf(ggplot2::aes(fill = class_count), size = 0.15)+
# Set map limits, if wanted
ggplot2::coord_sf(expand = FALSE, ylim = c(-60,90), lims_method = "geometry_bbox") +
# Map formatting
# Add in the map's north arrow
ggspatial::annotation_north_arrow(location = "tl", which_north = "true",
pad_x = unit(0.1, "cm"), pad_y = unit(0.1, "cm"),
style = ggspatial::north_arrow_fancy_orienteering()) + # Add in NORTH ARROW
ggplot2::theme(panel.grid.major = ggplot2::element_line(color = grDevices::gray(.1, alpha = 0.1),
linetype = "dashed", linewidth = 0.5), # Add grid lines
panel.border = ggplot2::element_rect(color = grDevices::gray(.1, alpha = 1),
linetype = "solid", linewidth = 0.5,
fill = NA), # add panel border
# Add background - colour in the ocean
panel.background = ggplot2::element_rect(fill = "aliceblue") )+
# Change map colour scheme - CHOOSE YOUR OWN ADVENTURE
# For Dorey colour scheme use the below
ggplot2::scale_fill_viridis_d(option = "inferno",
na.value = "grey50",
name = "Class count",
labels = fullMap %>%
dplyr::arrange(count) %>%
dplyr::distinct(class_count2) %>%
# options = "magma", "inferno", "plasma", "cividis"
dplyr::pull(class_count2)) +
# Add in X and Y labels
ggplot2::xlab("Longitude") + ggplot2::ylab("Latitude") +
# Add in the title
ggplot2::ggtitle( "Number of species per country") )
rm(spMapData)
#### 3.0 occurrence map ####
##### 2.1 data prep ####
# Get the unique county-database_id pairs
mapTable <- data %>%
# Group by the country
dplyr::group_by(name_long) %>%
# Get a count of the records per country
dplyr::mutate(occCount = dplyr::n()) %>%
# Get unique
dplyr::distinct(name_long, .keep_all = TRUE) %>%
# Select only these columns
dplyr::select(name_long, occCount)
# Join the map and occurrence data
fullMap <- dplyr::full_join(worldMap, mapTable %>% sf::st_drop_geometry(),
by = c("name_long" = "name_long")) %>%
# Remove na rows
tidyr::drop_na(occCount)
##### 2.2 Breaks ####
# make class intervals.
# Class intervals from ?classIntervals: fixed", "sd", "equal", "pretty", "quantile",
# "kmeans", "hclust", "bclust", "fisher", "jenks", "dpih" or "headtails"
classes <- classInt::classIntervals(fullMap$occCount, n = class_n,
style = class_Style, dig.lab=20,
dataPrecision=0)
# Next we’ll create a new column in our sf object using the base R cut() function to cut up our
# percent variable into distinct groups:
fullMap <- fullMap %>%
dplyr::mutate(class_count = cut(occCount,
classes$brks %>% round(digits = 0),
include.lowest = T, dig.lab = 10)) %>%
# format the class_count column to remove spaces, add comma break, and join min and max
dplyr::mutate(class_count2 = class_count %>%
stringr::str_remove("\\[|\\]|\\(|\\)") %>%
stringr::str_remove("\\]") %>%
stringr::str_replace(",", "-")) %>%
tidyr::separate(col = class_count2, into = c("min", "max"), sep = "-") %>%
dplyr::mutate(min = min %>% as.numeric() %>% format(big.mark = ",") %>%
stringr::str_remove("\\s+"),
max = max %>% as.numeric() %>% format(big.mark = ",") %>%
stringr::str_remove("\\s+"),
class_count2 = stringr::str_c(min, max, sep = "-") )
##### 2.3 Draw map ####
# Make the map
(occCountryMap <- ggplot2::ggplot(data = fullMap) +
# Add in a blank base-map to highlight countries with no data
ggplot2::geom_sf(data = worldMap, size = 0.15, fill = "white")+
# Plot and colour the terrestrial base map
ggplot2::geom_sf(ggplot2::aes(fill = class_count), size = 0.15)+
# Set map limits, if wanted
ggplot2::coord_sf(expand = FALSE, ylim = c(-60,90), lims_method = "geometry_bbox") +
# Map formatting
# Add in the map's north arrow
ggspatial::annotation_north_arrow(location = "tl", which_north = "true",
pad_x = unit(0.1, "cm"), pad_y = unit(0.1, "cm"),
style = ggspatial::north_arrow_fancy_orienteering()) + # Add in NORTH ARROW
ggplot2::theme(panel.grid.major = ggplot2::element_line(color = grDevices::gray(.1, alpha = 0.1),
linetype = "dashed",
linewidth = 0.5), # Add grid lines
panel.border = ggplot2::element_rect(color = grDevices::gray(.1, alpha = 1),
linetype = "solid", linewidth = 0.5,
fill = NA), # add panel border
# Add background - colour in the ocean
panel.background = ggplot2::element_rect(fill = "aliceblue") )+
# Change map colour scheme - CHOOSE YOUR OWN ADVENTURE
# For Dorey colour scheme use the below
ggplot2::scale_fill_viridis_d(option = "inferno",
na.value = "grey50",
name = "Class count",
labels = fullMap %>%
dplyr::arrange(occCount) %>%
dplyr::distinct(class_count2) %>%
# options = "magma", "inferno", "plasma", "cividis"
dplyr::pull(class_count2)) +
# Add in X and Y labels
ggplot2::xlab("Longitude") + ggplot2::ylab("Latitude") +
# Add in the title
ggplot2::ggtitle( "Number of occurrences per country") )
#### 4.0 combine + save ####
# plot the figures together
(combinedPlot <- cowplot::plot_grid(spCountryMap,
# +
# theme(legend.position = legend.position,
# legend.title = element_blank()),
occCountryMap,
labels = c("(a)","(b)"),
ncol = 1, align = 'v', axis = 'l'))
# Save the plot
cowplot::save_plot(filename = paste(outPath, "/", fileName, sep = ""),
plot = combinedPlot,
base_width = width,
base_height = height, dpi = dpi)
if(returnPlot == TRUE){
return(combinedPlot)}
} # END function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/summaryMaps.R
|
# This function was written by James Dorey to build taxonomy files using taxadb and transform them
# into the BeeBDC input format
# This function was written from the 19th of March 2024. For questions, please email James
# at jbdorey[at]me.com
#' Import and convert taxadb taxonomies to BeeBDC format
#'
#' Uses the taxadb R package to download a requested taxonomy and then transforms it into the input
#' BeeBDC format. This means that any taxonomy in their databases can be used with BeeBDC. You can
#' also save the output to your computer and to the R environment for immediate use. See
#' details below for a list of providers or see `taxadb::td_create()`.
#'
#'
#' @param name Character. Taxonomic scientific name (e.g. "Aves").
#' As defined by `taxadb::filter_rank()`.
#' @param rank Character. Taxonomic rank name. (e.g. "class").
#' As defined by `taxadb::filter_rank()`.
#' @param provider Character. From which provider should the hierarchy be returned?
#' Default is 'gbif', which can also be configured using options(default_taxadb_provide = ...").
#' See `taxadb::td_create()` for a list of recognized providers. NOTE: gbif seems to have the most-complete
#' columns, especially in terms of scientificNameAuthorship, which is important for matching
#' ambiguous names.
#' As defined by `taxadb::filter_rank()`.
#' @param version Character. Which version of the taxadb provider database should we use? defaults
#' to latest. See tl_import for details. Default = 22.12.
#' As defined by `taxadb::filter_rank()`.
#' @param collect Logical. Should we return an in-memory data.frame
#' (default, usually the most convenient), or a reference to lazy-eval table on disk
#' (useful for very large tables on which we may first perform subsequent filtering operations.).
#' Default = TRUE.
#' As defined by `taxadb::filter_rank()`.
#' @param ignore_case Logical. should we ignore case (capitalization) in matching names?
#' Can be significantly slower to run. Default = TRUE.
#' As defined by `taxadb::filter_rank()`.
#' @param db a connection to the taxadb database. See details of `taxadb::filter_rank()`. Default
#' = Null which should work.
#' As defined by `taxadb::filter_rank()`.
#'
#' @param removeEmptyNames Logical. If True (default), it will remove entries without an entry
#' for specificEpithet.
#' @param outPath Character. The path to a directory (folder) in which the output should be saved.
#' @param fileName Character. The name of the output file, ending in '.csv'.
#'
#'
#' @return Returns a taxonomy file (to the R environment and to the disk, if a fileName is
#' provided) as a tibble that can be used with `BeeBDC::harmoniseR()`.
#'
#' @importFrom dplyr %>%
#'
#' @seealso [BeeBDC::beesTaxonomy()] for the bee taxonomy and [BeeBDC::harmoniseR()] for the
#' taxon-cleaning function where these taxonomies are implemented.
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # Run the function using the bee genus Apis as an example...
#' ApisTaxonomy <- BeeBDC::taxadbToBeeBDC(
#' name = "Apis",
#' rank = "Genus",
#' provider = "gbif",
#' version = "22.12",
#' removeEmptyNames = TRUE,
#' outPath = getwd(),
#' fileName = NULL
#' )
#' }
#'
taxadbToBeeBDC <- function(
name = NULL,
rank = NULL,
provider = "gbif",
version = "22.12",
collect = TRUE,
ignore_case = TRUE,
db = NULL,
removeEmptyNames = TRUE,
outPath = getwd(),
fileName = NULL
) {
# locally bind variables to the function
. <- taxonomy_taxadb <- taxonomyOut <- canonical <- authorship <- taxonomic_status <- species <-
taxonID <- id <- accid <- id_matched <- NULL
# Load required packages
requireNamespace("stringr")
requireNamespace("dplyr")
requireNamespace("taxadb")
#### 0.0 Prep ####
##### 0.1 Errors ####
###### a. FATAL errors ####
if(is.null(name)){
stop(" - Please provide an argument for name I'm a program not a magician.")
}
if(is.null(rank)){
stop(" - Please provide an argument for rank I'm a program not a magician.")
}
if(provider == "ncbi"){
stop(" - Sorry, ncbi doesn't include a taxonID against which to match the acceptedNameUsageID.")
}
if (!provider %in% c("itis",
"ncbi",
"col",
"tpl",
"gbif",
"fb",
"slb",
"wd",
"ott",
"iucn")) {
stop(provider, " provided is not a valid name")
}
#### 1.0 Download taxonomy ####
##### 1.1 Download ####
writeLines(" - Downloading taxonomy...")
taxadb::td_create(provider = provider,
schema = "dwc",
version = version,
# Only provide inputs here if user-inputs are provided.
if(is.null(db)){db = taxadb::td_connect()
}else{db = db})
# User output
writeLines(paste0(" - taxadb save the taxonomy to: ",
taxadb::taxadb_dir()))
##### 1.2 Turn into data table ####
# Run the filter_rank function to output the data table taxonomy
taxonomy_taxadb <- taxadb::filter_rank(name,
rank,
provider = provider,
collect = collect,
ignore_case = ignore_case,
# Only provide inputs here if user-inputs are provided.
if(is.null(db)){db = taxadb::td_connect()
}else{db = db},
version = version
)
##### 1.3 missing columns ####
if(!"scientificNameAuthorship" %in% colnames(taxonomy_taxadb)){
taxonomy_taxadb <- taxonomy_taxadb %>%
dplyr::mutate(scientificNameAuthorship = NA_character_)
warning(paste0(" - BeeBDC: no scientificNameAuthorship in downloaded data. BeeBDC really ",
"likes this column as it helps identify ambiguities."))
}
if(!"infraspecificEpithet" %in% colnames(taxonomy_taxadb)){
taxonomy_taxadb <- taxonomy_taxadb %>%
dplyr::mutate(infraspecificEpithet = NA_character_)
warning(paste0(" - BeeBDC: no infraspecificEpithet in downloaded data. This can be a really",
" helpful column for some taxa."))
}
#### 2.0 Transform data ####
##### 2.1 basic rename and mutate ####
# Begin transforming the taxonomy to BeeBDC format
taxonomyOut <- taxonomy_taxadb %>%
# Rename columns... sadly away from DWC... for now.
dplyr::rename(taxonomic_status = "taxonomicStatus",
authorship = "scientificNameAuthorship",
infraspecies = "infraspecificEpithet",
species = "specificEpithet",
taxon_rank = "taxonRank",
canonical = "scientificName") %>%
# Build new columns
dplyr::mutate(validName = stringr::str_c(canonical,
dplyr::if_else(!is.na(authorship),
paste0(authorship), ""),
sep = " ") %>%
stringr::str_squish(),
canonical_withFlags = canonical,
valid = dplyr::if_else(taxonomic_status == "accepted",
TRUE, FALSE))
##### 2.2 Remove empty names ####
# Remove empty names
if(removeEmptyNames == TRUE){
taxonomyOut <- taxonomyOut %>%
dplyr::filter(complete.cases(species))
}
##### 2.3 Add id and accid ####
# Add id and accepted accid
taxonomyOut <- taxonomyOut %>%
# Add id as a simple count, top to bottom
dplyr::mutate(id = 1:nrow(.)) %>%
# Add accid for the accepted names
dplyr::mutate(accid = dplyr::if_else(taxonomic_status == "accepted",
0, NA_integer_))
# Match the synonyms to their accepted names
taxonomyOut <- taxonomyOut %>%
dplyr::left_join(taxonomyOut %>% dplyr::select(taxonID, id),
by = c("acceptedNameUsageID" = "taxonID"),
suffix = c("", "_matched")) %>%
# Transfer this id to the accid column
dplyr::mutate(accid = dplyr::if_else(is.na(accid),
id_matched, accid)) %>%
# Drop the temporary column
dplyr::select(!id_matched) %>%
# Add in source
dplyr::mutate(source = stringr::str_c("taxadb_",provider, sep = "")) %>%
# Add in empty columns
dplyr::mutate(flags = NA_character_,
notes = NA_character_)
##### 2.4 Clean missing columns ####
# Clean up potentially missing columns
if(!"subfamily" %in% colnames(taxonomyOut)){
taxonomyOut <- taxonomyOut %>%
dplyr::mutate(subfamily = NA_character_)
}
if(!"tribe" %in% colnames(taxonomyOut)){
taxonomyOut <- taxonomyOut %>%
dplyr::mutate(tribe = NA_character_)
}
if(!"subtribe" %in% colnames(taxonomyOut)){
taxonomyOut <- taxonomyOut %>%
dplyr::mutate(subtribe = NA_character_)
}
if(!"subgenus" %in% colnames(taxonomyOut)){
taxonomyOut <- taxonomyOut %>%
dplyr::mutate(subgenus = NA_character_)
}
##### 2.5 Re-order columns ####
taxonomyOut <- taxonomyOut %>%
dplyr::relocate(c("flags","taxonomic_status","source","accid","id",
"kingdom","phylum","class","order","family",
"subfamily","tribe","subtribe","validName","canonical",
"canonical_withFlags","genus","subgenus","species","infraspecies","authorship",
"taxon_rank","valid","notes"))
#### 3.0 Identify ambiguities ####
# Test for duplicate and ambiguous names
taxonomyOut <- taxoDuplicator(
SynList = taxonomyOut,
source1 = provider,
# This is not needed in this instance
source2 = "")
#### 4.0 Save ####
# Save the output file if a fileName is provided
if(!is.null(fileName)){
readr::write_excel_csv(taxonomyOut,
file = paste0(outPath, "/", fileName, sep = ""))
}
# Return the dataset
return(taxonomyOut)
} # END taxadbToBeeBDC
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/taxadbToBeeBDC.R
|
# This function was written by James Dorey to remove duplicates from the combined
# source2 and source1 — user input, but originally Orr 2021 and Discover Life
# synonym lists. For questions, please email jbdorey[at]me.com
# This function was started on 17th May 2022 and last updated 17th May 2022
#' @importFrom dplyr %>%
#' @importFrom dplyr row_number
taxoDuplicator <- function(
SynList = NULL,
source1 = "DiscoverLife",
source2 = "Orr_et_al_2021_CurrBiol"){
# locally bind variables to the function
validName <- accid <- id <- flags <- taxonomic_status <- canonical_withFlags <- canonical <- NULL
# Load required packages
requireNamespace("dplyr")
#### 0.0 Prep ####
##### 0.1 Remove existing flags ####
writeLines("Removing previous flags generated with this function")
# Remove the xisitng flags generated from this function
SynList <- SynList %>%
dplyr::mutate(flags = stringr::str_remove_all(flags, "non-ambiguous can_wFlags") %>%
stringr::str_remove_all("non-ambiguous canonical") %>%
stringr::str_remove_all("ambiguous canonical") %>%
stringr::str_remove_all("ambiguous validName") %>%
stringr::str_remove_all("ambiguous can_wFlags") %>%
stringr::str_remove_all("ambiguous can_wFlags") %>%
stringr::str_remove_all("^, $") %>%
stringr::str_replace(", , ", ", ") )
##### 0.2 Find duplciates ####
# Look for duplicated names in the DiscoverLife subset of data
duplicates <- SynList %>%
#dplyr::filter(source == "DiscoverLife") %>%
dplyr::group_by(validName) %>%
dplyr::filter(dplyr::n() > 1)
# User output
writeLines(paste(" - ", format(nrow(duplicates), big.mark = ","),
" duplicates found in the data.", sep = ""))
# Build subsetted datasets to examine
S1accepted <- duplicates %>% dplyr::filter(accid == 0 & source %in% source1)
S2Accepted <- duplicates %>% dplyr::filter(accid == 0 & source %in% source2)
S1synonyms <- duplicates %>% dplyr::filter(accid != 0 & source %in% source1)
S2synonyms <- duplicates %>% dplyr::filter(accid != 0 & source %in% source2)
#### 1.0 S1_S2 ####
##### 1.1 acc. names ####
# Find all duplicated valid names that occur in the source2 list and the source1 list.
# This looks
# like All of them! These will later be REMOVED.
S2Acc2remove <- S2Accepted %>%
dplyr::filter(validName %in% S1accepted$validName)
# Do any of the accids in the full list match these names' ids?
S1IDmatch <- S2Acc2remove %>%
dplyr::filter(id %in% SynList$accid)
# Stop here becuase I have no matches, but this might be important down the track if someone
# finds them!
if(nrow(S1IDmatch) > 0 ){
return(S1IDmatch)
stop(paste(" - That's odd! There is an S2 accepted name that is referred to by another name.",
"This hasn't happened before, but you'll need to sort it out, chump!", "\n",
"I have returned the list of offending names."))
}
# For now, because these are all duplicates, I will not return these data.
##### 1.2 synonyms ####
# Find all duplicated SYNONYMS names that occur in the source2 list and the source1 list.
S2DupeSyns <- S2synonyms %>%
dplyr::filter(validName %in% S1synonyms$validName)
# Do any of the accids in the full list match these names' ids?
S2IDmatch <- S2DupeSyns %>%
dplyr::filter(id %in% SynList$accid)
# Stop here becuase I have no matches, but this might be important down the track if someone
# finds them!
if(nrow(S2IDmatch) > 0 ){
return(S2IDmatch)
stop(paste(" - That's odd! There is an S2 synonym that is referred to as an accepted name.",
"This hasn't happened before, but you'll need to sort it out, chump!", "\n",
"I have returned the list of offending names."))
}
# Which names in the source2 list can we keep as unique synonyms?
S2Unique <- S2synonyms %>%
dplyr::filter(!validName %in% S1synonyms$validName)
# Pass these names onto 3.0
#### 2.0 S1 duplicates ####
##### 2.1 acc. names ####
# Look for internal source1 duplicated ACCEPTED names
S1duplicates <- S1accepted %>%
dplyr::group_by(validName) %>%
dplyr::filter(dplyr::n() > 1)
# Stop here becuase I have no matches, but this might be important down the track if someone
# finds them!
if(nrow(S1duplicates) > 0 ){
return(S1duplicates)
stop(paste(" - That's odd! There is an internal S1 synonym.",
"This hasn't happened before, but you'll need to sort it out, chump!", "\n",
"I have returned the list of offending names."))
}
# Because none of these are duplicates, I will KEEP the original dataset S1accepted.
##### 2.2 valName synonyms ####
# Look for internal source1 duplicated SYNONYMS
S1duplicatesyns <- S1synonyms %>%
dplyr::group_by(validName) %>%
dplyr::filter(dplyr::n() > 1)
S1dupes_nest <- S1duplicatesyns %>%
# ungroup but nest the data by valid name instead
dplyr::ungroup() %>%
dplyr::nest_by(validName)
###### a. source1 loop ####
# Set up empty dataframes for loop
ambiSyns <- dplyr::tibble()
nonAmbiSyns <- dplyr::tibble()
# Run a loop to examine each duplicate pair in the list
if(nrow(S1dupes_nest) > 0){
for(i in 1:nrow(S1dupes_nest)){
# Get the first tibble
LoopTibble <- S1dupes_nest$data[[i]] %>%
# add the validName column back in to each row
dplyr::mutate(validName = S1dupes_nest$validName[[i]], .after = "subtribe")
# FOR n == 2
if(nrow(LoopTibble) == 2){
# LOGICAL both duplicates match to the same accid
logiTest <- all(duplicated(LoopTibble$accid) | duplicated(LoopTibble$accid, fromLast = TRUE))
# IF the duplicates match the same accid (accepted name) - NON-ambiguous
if(logiTest == TRUE){
nonAmbiSyns <- nonAmbiSyns %>% dplyr::bind_rows(LoopTibble)
} # END TRUE
# IF the duplicates match different accid (accepted name) - AMBIGUOUS
if(logiTest == FALSE){
ambiSyns <- ambiSyns %>% dplyr::bind_rows(LoopTibble)
} # END FALSE
}# END n == 2
# FOR n > 2
if(nrow(LoopTibble) > 2){
# Find non-ambiguous duplicates
nrow_nonAmbi <- LoopTibble %>% dplyr::group_by(accid) %>% dplyr::filter(dplyr::n() > 1) %>%
nrow()
# Find ambiguous duplicates
nrow_Ambi <- LoopTibble %>% dplyr::group_by(accid) %>% dplyr::filter(dplyr::n() == 1) %>%
nrow()
# IF ALL of these rows have the same accid, then they are just regular synonym duplicates
if(nrow_nonAmbi == nrow(LoopTibble)){
# Add the lowest id number to the nonAmbiSyns tibble
LoopTibble <- LoopTibble %>% dplyr::arrange(id)
nonAmbiSyns <- nonAmbiSyns %>%
dplyr::bind_rows(dplyr::filter(LoopTibble, dplyr::row_number() == 1))
}else{ # ALL of the others have been ambiguous so far
# Add these data to the ambiSyns dataframe
ambiSyns <- ambiSyns %>%
dplyr::bind_rows(LoopTibble)
} # END else
} # END n > 2
} # END Ambiguous loop
} # END length(S1dupes_nest) > 0
###### b. loop_clean ####
if(nrow(nonAmbiSyns) > 0){
# Take only one of each non-ambiguous synonyms
nonAmbiSyns_deDuped <- nonAmbiSyns %>%
dplyr::group_by(validName) %>%
dplyr::filter(dplyr::row_number() == 1)
# For ambiguous accids, add this to the flags
ambiSyns$flags <- "ambiguous validName"
###### c. merge ####
# Merge this back to the S1synonyms data. This will have duplicates removed and
# internally-ambiguous names flagged.
S1synonyms <- S1synonyms %>%
# REMOVE the duplicated valid names
dplyr::filter(!validName %in% S1duplicatesyns$validName) %>%
# ADD the cleaned rows back into the dataset
dplyr::bind_rows(nonAmbiSyns_deDuped, ambiSyns) }else{
# IF there are no non-ambiguous names then...
# For ambiguous accids, add this to the flags
ambiSyns$flags <- "ambiguous validName"
# Merge this back to the S1synonyms data. This will have duplicates removed and
# internally-ambiguous names flagged.
S1synonyms <- S1synonyms %>%
# REMOVE the duplicated valid names
dplyr::filter(!validName %in% S1duplicatesyns$validName) %>%
# ADD the cleaned rows back into the dataset
dplyr::bind_rows(ambiSyns)
}
# KEEP S1synonyms
#### 3.0 source2 duplicates ####
# Look for internal source1 duplicates
S2Duplicates <- S2Unique %>%
dplyr::group_by(validName) %>%
dplyr::filter(dplyr::n() > 1)
# Yep, there are source2 synonym duplicates to deal with!
# Do any of the accids in the full list match these names' ids?
S2IDmatches <- S2Duplicates %>%
dplyr::filter(id %in% SynList$accid)
# Stop here because I have no matches, but this might be important down the track if
# someone finds them!
if(nrow(S2IDmatches) > 0 ){
return(S2IDmatches)
stop(paste(" - That's odd! There are accids matching to source2 synonym IDs.",
"This hasn't happened before, but you'll need to sort it out, chump!", "\n",
"I have returned the list of offending names. You're welcome."))
}
# Take only the lowest id number match
S2Originals <- S2Duplicates %>%
# Sort by id number
dplyr::arrange(id) %>%
# Filter out ANY duplicated rows for validName
dplyr::group_by(validName) %>%
# take the first row
dplyr::filter(dplyr::row_number() == 1)
# KEEP S2Originals
#### 4.0 Merge ####
dupeMerge <- dplyr::bind_rows(S1accepted, S2Originals, S1synonyms) %>%
# sort again by id
dplyr::arrange(id)
# Check to make sure that all ids are unique
UniqueIDcheck <- dupeMerge %>%
dplyr::arrange(id) %>%
dplyr::filter(!duplicated(id))
# FIRST, for now remove ambiguous names
ambi_VNcheck <- UniqueIDcheck %>% dplyr::filter(flags %in% "ambiguous validName")
NonAmbi_VNcheck <- UniqueIDcheck %>% dplyr::filter(!flags %in% "ambiguous validName") %>%
dplyr::ungroup()
# Check to make sure that all validNames are unique
UniqueVNcheck <- NonAmbi_VNcheck %>%
dplyr::group_by(validName) %>%
dplyr::filter(dplyr::n() > 1)
##### 4.1 ValSyn_clean ####
# look for matches between source1 accepted names and source2 synonyms
# At present, all of these represent an accepted source1 name with a contradictory source2
# name.
# Keep the source1 name but warn the user if this changes...
dupes2remove_UnVNcheck <- UniqueVNcheck %>%
dplyr::group_by(validName) %>%
dplyr::filter(accid != 0)
# Stop if this is not half of the original (not all correspond to an source2 Syn)
if(nrow(dupes2remove_UnVNcheck) != (nrow(UniqueVNcheck)/2)){
stop(paste(" - This is new! There is a problem at 4.1 ValSyn_clean. Please go and have a look.",
"\n", "Good luck, LOL."))
}
# Remove dupes2remove_UnVNcheck (duplicates) from the list to return, and then add the new names.
dupes2keep <- dupeMerge %>%
# Remove duplicates...
dplyr::filter(!id %in% dupes2remove_UnVNcheck$id)
# Merge these with the original dataset
deDuplicated <- SynList %>%
# FIRST, remove all of the original duplicate rows
dplyr::filter(!validName %in% duplicates$validName) %>%
# Add in the duplicates we want to keep
dplyr::bind_rows(dupes2keep)
##### 4.2 Duplicate ids ####
# There might be some rows with duplicate ids. These are now unique validNames. Assign these all NEW ids
dupID <- deDuplicated %>%
dplyr::filter(duplicated(id) | duplicated(id, fromLast = TRUE))
# IF so, remove them
if(nrow(dupID) > 0){
# Remove these from the original dataset
deDuplicated <- deDuplicated %>%
dplyr::filter(!id %in% unique(dupID$id))
# Replace the ids with new ones starting from +1 the max id number already existing
# find the largest id...
SeqStart <- max(deDuplicated$id)+1
SeqEnd <- as.numeric(SeqStart+nrow(dupID))-1
dupID$id <- seq(from = SeqStart, to = SeqEnd, by = 1)
# re-merge
deDuplicated <- deDuplicated %>%
dplyr::bind_rows(dupID)
} # END dupID
##### 4.3 Ambi accepted ####
# Some ambiguous names are accepted names. Therefore, I will remove the associated ambiguous synonyms
ambiAcc <- deDuplicated %>%
# Find the duplicate names
dplyr::filter(duplicated(validName)|duplicated(validName, fromLast = TRUE)) %>%
# Find the accepted names
dplyr::filter(taxonomic_status == "accepted")
# Get the number of accepted-assocaited ambiguous names that were removed.
ambiAccCount <- nrow(dplyr::filter(deDuplicated, validName %in% ambiAcc$validName)) - nrow(ambiAcc)
# REMOVE those names from the whole dataset
deDuplicated <- deDuplicated %>%
# remove
dplyr::filter(!validName %in% ambiAcc$validName) %>%
# rejoin those accepted name rows
dplyr::bind_rows(ambiAcc)
#### 5.0 Final Ambi ####
##### 5.1 can_wFl synonyms ####
# Look for internal source1 duplicated SYNONYMS
S1duplicatesyns_51 <- deDuplicated %>%
dplyr::group_by(canonical_withFlags) %>%
dplyr::filter(canonical_withFlags %>% stringr::str_detect(
"_"
)) %>%
dplyr::filter(dplyr::n() > 1)
S1dupes_nest <- S1duplicatesyns_51 %>%
# ungroup but nest the data by valid name instead
dplyr::ungroup() %>%
dplyr::nest_by(canonical_withFlags)
###### a. source1 loop ####
# Set up empty dataframes for loop
ambiSyns_51 <- dplyr::tibble()
nonAmbiSyns_51 <- dplyr::tibble()
# IF S1duplicatesyns_51 is EMPTy, do not run.
if(nrow(S1duplicatesyns_51) > 0){
# Run a loop to examine each duplicate pair in the list
for(i in 1:nrow(S1dupes_nest)){
# Get the first tibble
LoopTibble <- S1dupes_nest$data[[i]] %>%
# add the canonical_withFlags column back in to each row
dplyr::mutate(canonical_withFlags = S1dupes_nest$canonical_withFlags[[i]], .after = "canonical")
# FOR n == 2
if(nrow(LoopTibble) == 2){
# LOGICAL both duplicates match to the same accid
logiTest <- all(duplicated(LoopTibble$accid) | duplicated(LoopTibble$accid, fromLast = TRUE))
# IF the duplicates match the same accid (accepted name) - NON-ambiguous
if(logiTest == TRUE){
nonAmbiSyns_51 <- nonAmbiSyns_51 %>% dplyr::bind_rows(LoopTibble)
} # END TRUE
# IF the duplicates match different accid (accepted name) - AMBIGUOUS
if(logiTest == FALSE){
# If one of these matches the other, they are NOT ambiguous.
accTEST <- any(LoopTibble$id %in% LoopTibble$accid)
if(accTEST == FALSE){
ambiSyns_51 <- ambiSyns_51 %>% dplyr::bind_rows(LoopTibble)
}
} # END FALSE
}# END n == 2
# FOR n > 2
if(nrow(LoopTibble) > 2){
# Find non-ambiguous duplicates
nrow_nonAmbi <- LoopTibble %>% dplyr::group_by(accid) %>%
dplyr::filter(dplyr::n() > 1) %>%
nrow()
# Find ambiguous duplicates
nrow_Ambi <- LoopTibble %>% dplyr::group_by(accid) %>%
dplyr::filter(dplyr::n() == 1) %>%
nrow()
# IF ALL of these rows have the same accid, then they are just regular synonym duplicates
if(nrow_nonAmbi == nrow(LoopTibble)){
# # Add the lowest id number to the nonAmbiSyns_51 tibble
ambiSyns_51 <- ambiSyns_51 %>%
dplyr::bind_rows(LoopTibble)
}else{ # ALL of the others have been ambiguous so far
# Logical - if ALL but one accid matches an id, take the to mean they are all pointing at
# the same record. None shold match for now.
accTest <- sum(LoopTibble$id %in% LoopTibble$accid) == nrow(LoopTibble)-1
# Add these data to the ambiSyns_51 dataframe
if(accTest == FALSE){ # Ad all as synonyms
ambiSyns_51 <- ambiSyns_51 %>%
dplyr::bind_rows(LoopTibble)
}else(
stop(" - unique problem at 5.1. :(")
)
} # END else
} # END n > 2
} # END Ambiguous loop
}else{
ambiSyns_51 = dplyr::tibble()
nonAmbiSyns_51 = dplyr::tibble()
} # END big IF
###### b. loop_clean ####
# NON-AMBIGUOUS — because accid matches
if(nrow(nonAmbiSyns_51) > 0){
nonAmbiSyns_51_nAmb <- nonAmbiSyns_51 %>%
# Filter for ONLY the names that AREN'T already flagged as ambiguous
dplyr::filter(!flags %in% c("ambiguous validName")) %>%
dplyr::filter(canonical_withFlags %>% stringr::str_detect("_homonym"))
# For ambiguous accids, add this to the flags
nonAmbiSyns_51_nAmb$flags <- paste(nonAmbiSyns_51_nAmb$flags, "non-ambiguous can_wFlags",
sep = ", ") %>%
# REMOVE EMPTYS
stringr::str_replace(pattern = "NA, ", "")
# internally-ambiguous names flagged.
deDuplicated_51 <- deDuplicated %>%
# REMOVE the matching ids
dplyr::filter(!id %in% nonAmbiSyns_51_nAmb$id) %>%
# ADD the new rows
dplyr::bind_rows(nonAmbiSyns_51_nAmb)
} else{
# If not, pass this new name onto the next section
deDuplicated_51 <- deDuplicated
}
# AMBIGUOUS 2
if(nrow(ambiSyns_51) > 0){
ambiSyns_51_NavN <- ambiSyns_51 %>%
# Filter for ONLY the names that AREN'T already flagged as ambiguous
dplyr::filter(!flags %in% c("ambiguous validName"))
# For ambiguous accids, add this to the flags
ambiSyns_51_NavN$flags <- paste(ambiSyns_51_NavN$flags, "ambiguous can_wFlags", sep = ", ") %>%
# REMOVE EMPTYS
stringr::str_replace(pattern = "NA, ", "")
# Filter the VALID ambiguities and ADD to the wflags
ambiSyns_51_all <- ambiSyns_51 %>%
# filter
dplyr::filter(!id %in% ambiSyns_51_NavN$id) %>%
# add
dplyr::bind_rows(ambiSyns_51_NavN)
# Merge this back to the deDuplicated data. This will have duplicates removed and
# internally-ambiguous names flagged.
deDuplicated_51 <- deDuplicated_51 %>%
# REMOVE the duplicated valid names
dplyr::filter(!canonical_withFlags %in% ambiSyns_51_all$canonical_withFlags) %>%
# ADD the cleaned rows back into the dataset
dplyr::bind_rows(ambiSyns_51_all)
}else{
ambiSyns_51 = dplyr::tibble()
# If not, pass this new name onto the next section
deDuplicated_51 <- deDuplicated
} # END ambiSyns_51 IF
##### 5.2 canon synonyms ####
# Look for internal source1 duplicated SYNONYMS
S1duplicatesyns_52 <- deDuplicated_51 %>%
dplyr::group_by(canonical) %>%
dplyr::filter(dplyr::n() > 1)
S1dupes_nest <- S1duplicatesyns_52 %>%
# ungroup but nest the data by valid name instead
dplyr::ungroup() %>%
dplyr::nest_by(canonical)
###### a. source1 loop ####
# Set up empty dataframes for loop
ambiSyns_52 <- dplyr::tibble()
nonAmbiSyns_52 <- dplyr::tibble()
# IF S1duplicatesyns_52 is EMPTy, do not run.
if(nrow(S1duplicatesyns_52) > 0){
# Run a loop to examine each duplicate pair in the list
for(i in 1:nrow(S1dupes_nest)){
# Get the first tibble
LoopTibble <- S1dupes_nest$data[[i]] %>%
# add the canonical column back in to each row
dplyr::mutate(canonical = S1dupes_nest$canonical[[i]], .after = "validName")
# FOR n == 2
if(nrow(LoopTibble) == 2){
# LOGICAL both duplicates match to the same accid
logiTest <- all(duplicated(LoopTibble$accid) | duplicated(LoopTibble$accid, fromLast = TRUE))
# IF the duplicates match the same accid (accepted name) - NON-ambiguous
if(logiTest == TRUE){
nonAmbiSyns_52 <- nonAmbiSyns_52 %>% dplyr::bind_rows(LoopTibble)
} # END TRUE
# IF the duplicates match different accid (accepted name) - AMBIGUOUS
if(logiTest == FALSE){
# If one of these matches the other, they are NOT ambiguous.
accTEST <- any(LoopTibble$id %in% LoopTibble$accid)
if(accTEST == FALSE){
ambiSyns_52 <- ambiSyns_52 %>% dplyr::bind_rows(LoopTibble)
}
} # END FALSE
}# END n == 2
# FOR n > 2
if(nrow(LoopTibble) > 2){
# Find non-ambiguous duplicates
nrow_nonAmbi <- LoopTibble %>% dplyr::group_by(accid) %>%
dplyr::filter(dplyr::n() > 1) %>%
nrow()
# Find ambiguous duplicates
nrow_Ambi <- LoopTibble %>% dplyr::group_by(accid) %>%
dplyr::filter(dplyr::n() == 1) %>%
nrow()
# IF ALL of these rows have the same accid, then they are just regular synonym duplicates
if(nrow_nonAmbi == nrow(LoopTibble)){
# # Add the lowest id number to the nonAmbiSyns_52 tibble
ambiSyns_52 <- ambiSyns_52 %>%
dplyr::bind_rows(LoopTibble)
}else{ # ALL of the others have been ambiguous so far
# Logical - if ALL but one accid matches an id, take the to mean they are all pointing at
# the same record. None shold match for now.
accTest <- sum(LoopTibble$id %in% LoopTibble$accid) == nrow(LoopTibble)-1
# Add these data to the ambiSyns_52 dataframe
if(accTest == FALSE){ # Ad all as synonyms
ambiSyns_52 <- ambiSyns_52 %>%
dplyr::bind_rows(LoopTibble)
}else(
stop(" - unique problem at 5.2! :(")
)
} # END else
} # END n > 2
} # END Ambiguous loop
}else{
ambiSyns_52 = dplyr::tibble()
nonAmbiSyns_52 = dplyr::tibble()
} # END big IF
###### b. loop_clean ####
# NON-AMBIGUOUS because accids match
if(nrow(nonAmbiSyns_52) > 0){
# Take only one of each non-ambiguous synonyms
nonAmbiSyns_deDuped_52 <- nonAmbiSyns_52 %>%
dplyr::group_by(validName) %>%
dplyr::filter(dplyr::row_number() == 1)
}
# NON-AMBIGUOUS
if(nrow(nonAmbiSyns_52) > 0){
nonAmbiSyns_52_nAmb <- nonAmbiSyns_52 %>%
# Filter for ONLY the names that AREN'T already flagged as ambiguous
dplyr::filter(!flags %in% c("ambiguous validName", "ambiguous can_wFlags",
"ambiguous can_wFlags"))
# For ambiguous accids, add this to the flags
nonAmbiSyns_52_nAmb$flags <- paste(nonAmbiSyns_52_nAmb$flags, "non-ambiguous canonical", sep = ", ") %>%
# REMOVE EMPTYS
stringr::str_replace(pattern = "NA, ", "")
# internally-ambiguous names flagged.
deDuplicated_52 <- deDuplicated_51 %>%
# REMOVE the matching ids
dplyr::filter(!id %in% nonAmbiSyns_52_nAmb$id) %>%
# ADD the new rows
dplyr::bind_rows(nonAmbiSyns_52_nAmb)
} else{
# If not, pass this new name onto the next section
deDuplicated_52 <- deDuplicated_51
}
# AMBIGUOUS
if(nrow(ambiSyns_52) > 0){
ambiSyns_52_NavN <- ambiSyns_52 %>%
# Filter for ONLY the names that AREN'T already flagged as ambiguous
dplyr::filter(!flags %in% c("ambiguous validName", "ambiguous can_wFlags"))
# For ambiguous accids, add this to the flags
ambiSyns_52_NavN$flags <- paste(ambiSyns_52_NavN$flags, "ambiguous canonical", sep = ", ") %>%
# REMOVE EMPTYS
stringr::str_replace(pattern = "NA, ", "")
# Filter the VALID ambiguities and ADD to the wflags
ambiSyns_52_all <- ambiSyns_52 %>%
# filter
dplyr::filter(!id %in% ambiSyns_52_NavN$id) %>%
# add
dplyr::bind_rows(ambiSyns_52_NavN)
# Merge this back to the deDuplicated data. This will have duplicates removed and
# internally-ambiguous names flagged.
deDuplicated_52 <- deDuplicated_52 %>%
# REMOVE the duplicated valid names
dplyr::filter(!canonical %in% ambiSyns_52_all$canonical) %>%
# ADD the cleaned rows back into the dataset
dplyr::bind_rows(ambiSyns_52_all)
}else{
ambiSyns_52 = dplyr::tibble()
} # END ambiSyns_52 IF
# KEEP deDuplicated_52
# What an adventure that was!
# Now, lets try and return some user information
writeLines(paste( " - Cleaning complete! From an initial dataset of ",
format(nrow(SynList), big.mark = ","), " names, there ",
"remain ", format(nrow(deDuplicated_52), big.mark = ",")," names.", "\n",
" - We removed:", "\n" ,
nrow(S1duplicates), " source1 accepted names,", "\n" ,
nrow(S2Acc2remove), " source2 'accepted' names,", "\n"))
# 2.2 - synonyms removed
if(exists("nonAmbiSyns_deDuped")){
writeLines(paste(
format(nrow(nonAmbiSyns)-nrow(nonAmbiSyns_deDuped), big.mark = ","),
" source1 synonyms,", "\n" ))}
writeLines(paste(
format(nrow(S2synonyms) - nrow(S2Unique), big.mark = ",")
, " source2 synonyms internally duplicated,", "\n" ,
nrow(S2Duplicates)-nrow(S2Originals), " source2 synonyms duplicated with the source1 list,", "\n" ,
nrow(dupes2remove_UnVNcheck), " subsequent duplicates after merging,", "\n",
# AMBIGUOUS flagged
" - We flagged:", "\n" ,
sum(deDuplicated_52$flags %in% "ambiguous validName"),
" ambiguous validName, ", "\n" ,
sum(deDuplicated_52$flags %in% "ambiguous can_wFlags"),
" ambiguous canonical_withFlags names, ", "\n" ,
sum(deDuplicated_52$flags %in% "ambiguous canonical"),
" ambiguous canonical names, ", "\n",
sum(deDuplicated_52$flags %in% "non-ambiguous can_wFlags"),
" NON-ambiguous, but duplicated, canonical_withFlags names, ", "\n",
sum(deDuplicated_52$flags %in% "non-ambiguous canonical"),
" NON-ambiguous, but duplicated, canonical names, ", "\n",
" - We removed: ", "\n",
ambiAccCount, " ambiguous synonyms associated with accepted names.", "\n",
" - We re-assigned:", "\n" ,
nrow(dupID), " duplicated [non-duplicate] ids",
sep = ""))
#### 6.0 Clean flags ####
deDuplicated_52 <- deDuplicated_52 %>%
dplyr::mutate(flags =
# Fix non-ambiguous canonical repeat
dplyr::if_else(flags %>% stringr::str_count("non-ambiguous canonical") > 1,
stringr::str_remove_all(flags, "non-ambiguous canonical") %>%
stringr::str_c("non-ambiguous canonical"), flags),
# Fix contradictory non- and is-
flags = dplyr::if_else(stringr::str_detect(
flags, "ambiguous canonical, non-ambiguous canonical") |
stringr::str_detect(flags, "non-ambiguous canonical, ambiguous canonical"),
stringr::str_remove_all(flags, "ambiguous canonical") %>%
stringr::str_remove_all("non-ambiguous canonical") %>%
stringr::str_remove("^, |, $") %>%
stringr::str_replace(", , ", ", ") %>%
stringr::str_c(" ambiguous canonical "), flags),
flags = dplyr::if_else(stringr::str_detect(
flags,
"non-ambiguous can_wFlags, ambiguous canonical, non-ambiguous can_wFlags, ambiguous canonical"),
stringr::str_c(flags, " non-ambiguous can_wFlags, ambiguous canonical "), flags
),
### 3
flags = dplyr::if_else(stringr::str_detect(flags,
"non-ambiguous can_wFlags, non-ambiguous can_wFlagsnon-ambiguous canonical"),
"non-ambiguous can_wFlags, non-ambiguous canonical", flags),
### 4
flags = dplyr::if_else(stringr::str_detect(flags,
"non-ambiguous can_wFlags, ambiguous canonical, non-ambiguous can_wFlags, ambiguous canonical non-ambiguous can_wFlags, ambiguous canonical"),
"non-ambiguous can_wFlags, ambiguous canonical", flags),
### 5
flags = dplyr::if_else(stringr::str_detect(flags,
"ambiguous can_wFlags, ambiguous can_wFlags, ambiguous canonical"),
"ambiguous can_wFlags, ambiguous canonical", flags),
### 6
flags = dplyr::if_else(stringr::str_detect(flags,
"non- ambiguous canonical"),
"non-ambiguous canonical", flags),
### 7
flags = dplyr::if_else(stringr::str_detect(flags,
"ambiguous canonical, ambiguous canonical"),
"ambiguous canonical", flags),
### cleanup
flags = flags %>% stringr::str_squish() %>%
stringr::str_remove("^, |, $") %>%
stringr::str_replace(", , ", ", ")
)
# Return the cleaned dataset
return(deDuplicated_52)
}
#### END ####
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/taxoDuplicator.R
|
# This function, written by James Dorey merges the Ascher and other datasets with the columns:
# "Original" and "Correct"
# For queries, please contact James Dorey at jbdorey[at]me.com
# This function was started on 13th May 2022 and last updated 17th May 2022
#' @importFrom dplyr %>%
#' @importFrom stats complete.cases
#' @importFrom dplyr row_number
#'
taxoMergeR <- function(currentNames = NULL,
newNames = NULL,
HigherNameList = NULL,
inKingdom = "Animalia",
inPhylum = "Arthropoda",
inClass = "Insecta",
inOrder = "Hymenoptera",
outPath = getwd(),
fileName = "taxonomy_taxoMergeR",
simpleNames = NULL,
problemStrings = NULL
){
# locally bind variables to the function
Original <- Correct <- accid <- id <- validName <- canonical <- canonical_withFlags <- NULL
genus <- species <- . <- tempIndex <- authorship <- taxonomic_status <-
authorship_nameSplit <- NULL
requireNamespace("dplyr")
#### 0.0 Prep ####
##### 0.1 Errors ####
###### a. FATAL errors ####
if(is.null(simpleNames)){
stop(" - Please provide an argument for simpleNames. This should be TRUE if using ",
"'Genus species'",
" format and FALSE if a more complex format like 'Genus (Subgenus) species Authority'")
}
#### 1.0 Data prep ####
##### 1.1 redundant names ####
Original_newNames_Count <- nrow(newNames)
# Remove redundant rows in NEW data (where Original == Correct or != unique())
newNames <- newNames %>%
# remove duplicate rows
dplyr::distinct() %>%
# Remove rows where Original == Correct
dplyr::filter(Original != Correct) %>%
# Remove empty spaces
dplyr::mutate(Original = Original %>% stringr::str_squish(),
Correct = Correct %>% stringr::str_squish())
# Simply count how many rows there are to output later
Original_unNew_Count <- nrow(newNames)
# Make a temporary index
newNames$tempIndex <- 1:nrow(newNames)
##### 1.2 Unique synonyms [new] ####
# In the newNames file, ONLY keep those synonyms that DO NOT occur in the list already
###### a. current names #####
# Get all of the current names
nameList <- currentNames %>%
dplyr::select(accid, id, validName, canonical, canonical_withFlags, genus, species) %>%
dplyr::mutate(Genus_species = stringr::str_c(dplyr::if_else(!is.na(genus), genus, ""),
dplyr::if_else(!is.na(species), species, ""),
sep = " "))
###### b. filter ####
# Filter out names that are already an Ascher synonym or accepted name
# IF simple
if(simpleNames == TRUE){
newNames <- newNames %>%
dplyr::filter(!Original %in% nameList$Genus_species)
}
# IF complex
if(simpleNames == FALSE){
newNames <- newNames %>%
dplyr::filter(!Original %in% nameList$validName) %>%
dplyr::filter(!Original %in% nameList$canonical) %>%
dplyr::filter(!Original %in% nameList$canonical_withFlags) %>%
dplyr::filter(!Original %in% nameList$Genus_species)
}
##### 1.3 Name splitting ####
# Split the Original column into genus and species
###### a. simpleNames ####
# Match simple Genus species format names
# Do this in-line
# if(simpleNames == TRUE){
# nameSplit <- newNames %>%
# tidyr::separate(Original, into = c("genusNew", "speciesNew"), sep = " ",
# remove = FALSE)}
###### b. complexNames ####
# Remove problemStrings if they exist
if(is.null(problemStrings) == FALSE){
# Make a new loop string of names
loopNames <- newNames$Original
for(i in 1:length(problemStrings)){
# Select the ith problemString
str2remove = problemStrings[i]
loopNames <- loopNames %>%
stringr::str_replace_all(string = .,
pattern = str2remove,
replacement = "")
}# END for problemStrings
# Squish out extra white spaces
loopNames <- loopNames %>% stringr::str_squish()
# Add this as a new column
newNames$Original_cleaned <- loopNames
}else{# END if problemStrings
newNames$Original_cleaned <- newNames$Original
}
# Match complex names
if(simpleNames == FALSE){
# create an empty tibble to populate
loopTibble <- dplyr::tibble()
# loop through each name to extract the relevant information
for(i in 1:nrow(newNames)){
# Extract the ith original name
loopName <- newNames$Original_cleaned[i]
loopSource <- newNames$Source[i]
# Use nameSplitR to extract the information within
loopData <- nameSplitR(NameInput = loopName, #NameInput
Authority_patterns = NULL) %>%
as.data.frame(stringsAsFactors = FALSE, row.names = FALSE) %>% dplyr::tibble() %>%
dplyr::mutate(Source = loopSource)
# Add these data to loopTibble
loopTibble <- loopTibble %>%
dplyr::bind_rows(loopData)
}
# Sometimes a flag might be called due to a lack of author year. Make sure this does not happen.
loopTibble$flags <- dplyr::if_else(stringr::str_detect(string = loopTibble$validName,
pattern = "[0-9]"),
loopTibble$flags,
"")
# create a temporary tibble to populate
matchTibble <- dplyr::tibble(
tempIndex = newNames$tempIndex) %>%
dplyr::bind_cols(loopTibble)
### END loop section
# Add this information to newNames
nameSplit <- newNames %>%
# Remove the columns to be replace
dplyr::select(!tidyselect::any_of(colnames(loopTibble))) %>%
# add new info using the tempIndex column
dplyr::left_join(matchTibble, by = "tempIndex")
}# END simpleNames == FALSE
#### 2.0 Simple Names ####
if(simpleNames == TRUE){
##### 2.1 Acc. match ####
# find all of the matching columns between New and Ascher using the final and flagged name columns
Correct_matched <- newNames %>%
dplyr::inner_join(currentNames, by = c("Correct" = "canonical_withFlags"), keep = TRUE,
relationship = "many-to-many")
##### 2.2 single match ####
# subset those names that matched a single Ascher name
SingleMatch <- Correct_matched %>%
dplyr::group_by(tempIndex) %>%
dplyr::filter(dplyr::n() == 1)
# Split the Original column into genus and species
suppressWarnings(
SingleMatch_split <- SingleMatch %>%
tidyr::separate(Original, into = c("genusNew", "speciesNew"), sep = " ",
remove = FALSE),
classes = "warning")
# Find out if the Original New name (synonym) has a match and authority
Syn_matched <- newNames %>%
dplyr::inner_join(currentNames, by = c("Original" = "canonical_withFlags"), keep = TRUE,
relationship = "many-to-many") %>%
# Keep only the current tempIndeces
dplyr::filter(tempIndex %in% SingleMatch_split$tempIndex)%>%
# Keep those with a single match
dplyr::group_by(tempIndex) %>%
dplyr::filter(dplyr::n() == 1)
# Attach the authority to the SingleMatch_split tibble
Single_newMatched <- SingleMatch_split %>%
dplyr::left_join( dplyr::select(Syn_matched, c(tempIndex, authorship)), by = "tempIndex")
###### a. accepted ####
# For those that match accepted names, use the id as the accid
SOM_acc <- Single_newMatched %>%
dplyr::filter(taxonomic_status == "accepted")
# Merge these into a single tibble with the correct data
SOM_acc_final <- dplyr::bind_cols(
flags = SOM_acc$flags,
taxonomic_status = "synonym",
source = SOM_acc$Source,
accid = SOM_acc$id, # Get the ACCEPTED name's id for accid
id = NA, # Enter later
kingdom = inKingdom,
phylum = inPhylum,
class = inClass,
order = inOrder,
family = SOM_acc$family,
subfamily = SOM_acc$subfamily,
tribe = SOM_acc$tribe,
subtribe = SOM_acc$subtribe,
validName = stringr::str_c(
dplyr::if_else(complete.cases(SOM_acc$genusNew) & SOM_acc$genusNew != "NA",
SOM_acc$genusNew, ""),
dplyr::if_else(complete.cases(SOM_acc$authorship.y) & SOM_acc$authorship.y != "NA",
SOM_acc$authorship.y, ""),
sep = " "),
canonical_withFlags = "NA",
canonical = stringr::str_c(
dplyr::if_else(complete.cases(SOM_acc$genusNew) & SOM_acc$genusNew != "NA",
SOM_acc$genusNew, ""),
dplyr::if_else(complete.cases(SOM_acc$genusNew) & SOM_acc$genusNew != "NA",
SOM_acc$genusNew, ""),
dplyr::if_else(complete.cases(SOM_acc$speciesNew) & SOM_acc$speciesNew != "NA",
SOM_acc$speciesNew, ""),
sep = " "),
genus = SOM_acc$genusNew,
subgenus = "NA",
species = SOM_acc$speciesNew,
infraspecies = "NA",
authorship = SOM_acc$authorship.y,
taxon_rank = "species",
valid = FALSE,
tempIndex = SOM_acc$tempIndex,
notes = SOM_acc$notes
)
###### b. synonyms ####
# For those that match SYNONYM names, use the accid as the accid - MATCH the actual accepted name.
SOM_syn <- Single_newMatched %>%
dplyr::filter(taxonomic_status == "synonym")
# Merge these into a single tibble with the correct data
SOM_syn_final <- dplyr::bind_cols(
flags = SOM_syn$flags,
taxonomic_status = "synonym",
source = SOM_syn$Source,
# Get the accepted id for accid. Hence, find the accepted name from the synonym this matched to
accid = SOM_syn$accid,
id = NA, # Assign later
kingdom = inKingdom,
phylum = inPhylum,
class = inClass,
order = inOrder,
family = SOM_syn$family,
subfamily = SOM_syn$subfamily,
tribe = SOM_syn$tribe,
subtribe = SOM_syn$subtribe,
validName = stringr::str_c(
dplyr::if_else(complete.cases(SOM_syn$genusNew) & SOM_syn$genusNew != "NA",
SOM_syn$genusNew, ""),
dplyr::if_else(complete.cases(SOM_syn$speciesNew) & SOM_syn$speciesNew != "NA",
SOM_syn$speciesNew, ""),
dplyr::if_else(complete.cases(SOM_syn$authorship.y) & SOM_syn$authorship.y != "NA",
SOM_syn$authorship.y, ""),
sep = " "),
canonical_withFlags = "NA",
canonical = stringr::str_c(
dplyr::if_else(complete.cases(SOM_syn$genusNew) & SOM_syn$genusNew != "NA",
SOM_syn$genusNew, ""),
dplyr::if_else(complete.cases(SOM_syn$speciesNew) & SOM_syn$speciesNew != "NA",
SOM_syn$speciesNew, ""),
sep = " "),
genus = SOM_syn$genusNew,
subgenus = "NA",
species = SOM_syn$speciesNew,
infraspecies = "NA",
authorship = SOM_syn$authorship.y,
taxon_rank = "species",
valid = FALSE,
tempIndex = SOM_syn$tempIndex,
notes = SOM_syn$notes
)
##### 2.3 multiple matches ####
###### a. take accepted ####
# subset those names that matched multiple Ascher names
MultiMatch <- Correct_matched %>%
# group by the temporary index number
dplyr::group_by(tempIndex) %>%
# Find all with more than one
dplyr::filter(dplyr::n() > 1) %>%
# Sort in order of waht you want to keep
dplyr::arrange(accid,
.by_group = TRUE) %>%
# Filter for the first row only - this will direct to the accepted name hopefully
dplyr::filter(dplyr::row_number() == 1)
###### a. find accepted (if needed) ####
# If any rows do not == 0 (accepted names), then find those names.
synMatched <- MultiMatch %>%
# Get those without an accepted match
dplyr::filter(accid != 0)
# Remove the ascher columns
synMatched_reduced <- synMatched %>%
dplyr::select(!colnames(currentNames))
# Re-add the accid
synMatched_reduced$accid <- synMatched$accid
synMatched_reduced$authorship_nameSplit <- synMatched$authorship
# re-combine with the new accepted name's data
synMatched <- synMatched_reduced %>%
dplyr::left_join(., currentNames, by = c("accid" = "id"))
# These will need to have their accid become id - and changed back in a later step
synMatched <- synMatched %>%
dplyr::mutate(id = accid)
###### c. recombine ####
MultiMatch <- MultiMatch %>%
# Remove the old synonym records if they exist
dplyr::filter(!tempIndex %in% synMatched$tempIndex) %>%
dplyr::bind_rows(synMatched)
###### d. authorship ####
# Give preference to the occurrence's authorship and then combine
MultiMatch$authorship <- dplyr::if_else(MultiMatch$authorship_nameSplit == "" |
is.na(MultiMatch$authorship_nameSplit),
MultiMatch$authorship,
MultiMatch$authorship_nameSplit )
# Get the accepted names
Mult_newAcc <- MultiMatch %>%
dplyr::filter(taxonomic_status == "accepted")
# Get the synonym names
Mult_newSyn <- MultiMatch %>%
dplyr::filter(taxonomic_status == "synonym")
# IF there are synonyms that dont occur in the accepted names, STOP. Because this isn't a
# problem for me.
MULTSynTest <- Mult_newSyn %>%
dplyr::filter(!Original %in% Mult_newAcc$Original)
if(nrow(MULTSynTest) > 0){
stop(paste0("There are multiple-match synonyms that aren't in the mult.accepted list. ",
"This is new. Look for 'MULTSynTest'"))}
# Merge these into a single tibble with the correct data
MO_FINAL <- dplyr::bind_cols(
flags = Mult_newAcc$flags,
taxonomic_status = "synonym",
source = Mult_newAcc$Source,
accid = Mult_newAcc$id, # Get the accepted id for accid
id = NA, # Get the new id from the number of rows plus i
kingdom = inKingdom,
phylum = inPhylum,
class = inClass,
order = inOrder,
family = Mult_newAcc$family,
subfamily = Mult_newAcc$subfamily,
tribe = Mult_newAcc$tribe,
subtribe = Mult_newAcc$subtribe,
validName = stringr::str_c(
dplyr::if_else(complete.cases(Mult_newAcc$genus) & Mult_newAcc$genus != "NA",
Mult_newAcc$genus, ""),
dplyr::if_else(complete.cases(Mult_newAcc$species) & Mult_newAcc$species != "NA",
Mult_newAcc$species, ""),
dplyr::if_else(complete.cases(Mult_newAcc$authorship) & Mult_newAcc$authorship != "NA",
Mult_newAcc$authorship, ""),
sep = " "),
canonical_withFlags = "NA",
canonical = stringr::str_c(
dplyr::if_else(complete.cases(Mult_newAcc$genus) & Mult_newAcc$genus != "NA",
Mult_newAcc$genus, ""),
dplyr::if_else(complete.cases(Mult_newAcc$species) & Mult_newAcc$species != "NA",
Mult_newAcc$species, ""),
sep = " "),
genus = Mult_newAcc$genus,
subgenus = "NA",
species = Mult_newAcc$species,
infraspecies = "NA",
authorship = Mult_newAcc$authorship,
taxon_rank = "species",
valid = FALSE,
tempIndex = Mult_newAcc$tempIndex
#notes = SM_GenSp$notes
)
} # END simpleNames == TRUE
#
#### 3.0 Complex names ####
if(simpleNames == FALSE){
##### 3.1 Acc. match ####
# find all of the matching columns between New and Ascher using the final and flagged
# name columns
Correct_matched <- nameSplit %>%
# Match with canonical_withFlags
dplyr::inner_join(currentNames, by = c("Correct" = "canonical_withFlags"), keep = TRUE,
suffix = c("_nameSplit", ""), relationship = "many-to-many")
# Match with validName for those that FAILED with canonical_withFlags
Correct_matched2 <- nameSplit %>%
# remove already matched
dplyr::filter(!tempIndex %in% Correct_matched$tempIndex) %>%
dplyr::inner_join(currentNames %>%
# Makes ure to sort by acceptednames first
dplyr::arrange(accid),
, by = c("Correct" = "validName"), keep = TRUE,
suffix = c("_nameSplit", ""),
# Only keep the first match (will be the lowest accid)
multiple ="first")
#merge
Correct_matched <- Correct_matched %>%
dplyr::bind_rows(Correct_matched2)
rm(Correct_matched2)
#### 3.2 single match ####
# subset those names that matched a single Ascher name
Single_newMatched <- Correct_matched %>%
dplyr::group_by(tempIndex) %>%
dplyr::filter(dplyr::n() == 1)
# Give preference to the occurrence's authorship and then combine
Single_newMatched$authorship <- dplyr::if_else(Single_newMatched$authorship_nameSplit == "" |
is.na(Single_newMatched$authorship_nameSplit),
Single_newMatched$authorship,
Single_newMatched$authorship_nameSplit )
###### a. accepted ####
# For those that match accepted names, use the id as the accid
SOM_acc <- Single_newMatched %>%
dplyr::filter(taxonomic_status == "accepted")
# Merge these into a single tibble with the correct data
SOM_acc_final <- dplyr::tibble(
flags = SOM_acc$flags,
taxonomic_status = "synonym",
source = SOM_acc$Source,
accid = SOM_acc$id, # Get the ACCEPTED name's id for accid
id = NA, # Enter later
kingdom = inKingdom,
phylum = inPhylum,
class = inClass,
order = inOrder,
family = SOM_acc$family,
subfamily = SOM_acc$subfamily,
tribe = SOM_acc$tribe,
subtribe = SOM_acc$subtribe,
validName = stringr::str_c(
dplyr::if_else(complete.cases(SOM_acc$genus_nameSplit) & SOM_acc$genus_nameSplit != "NA",
SOM_acc$genus_nameSplit, ""),
# subgenus
dplyr::if_else(complete.cases(SOM_acc$subgenus_nameSplit) &
SOM_acc$subgenus_nameSplit != "NA",
paste0("(",SOM_acc$subgenus_nameSplit ,")"), ""),
dplyr::if_else(complete.cases(SOM_acc$species_nameSplit) &
SOM_acc$species_nameSplit != "NA",
SOM_acc$species_nameSplit, ""),
dplyr::if_else(complete.cases(SOM_acc$infraspecies_nameSplit) &
SOM_acc$infraspecies_nameSplit != "NA",
SOM_acc$infraspecies_nameSplit, ""),
dplyr::if_else(complete.cases(SOM_acc$authorship) & SOM_acc$authorship != "NA",
SOM_acc$authorship, ""),
sep = " "),
canonical_withFlags = "NA",
canonical = stringr::str_c(
dplyr::if_else(complete.cases(SOM_acc$genus_nameSplit) & SOM_acc$genus_nameSplit != "NA",
SOM_acc$genus_nameSplit, ""),
# subgenus
dplyr::if_else(complete.cases(SOM_acc$subgenus_nameSplit) &
SOM_acc$subgenus_nameSplit != "NA",
paste0("(",SOM_acc$subgenus_nameSplit ,")"), ""),
dplyr::if_else(complete.cases(SOM_acc$species_nameSplit) & SOM_acc$species_nameSplit != "NA",
SOM_acc$species_nameSplit, ""),
dplyr::if_else(complete.cases(SOM_acc$infraspecies_nameSplit) &
SOM_acc$infraspecies_nameSplit != "NA",
SOM_acc$infraspecies_nameSplit, ""),
sep = " "),
genus = SOM_acc$genus_nameSplit,
subgenus = dplyr::if_else(complete.cases(SOM_acc$subgenus_nameSplit) &
SOM_acc$subgenus_nameSplit != "NA",
SOM_acc$subgenus_nameSplit, ""),
species = SOM_acc$species_nameSplit,
# EDIT:
infraspecies = SOM_acc$infraspecies_nameSplit,
authorship = SOM_acc$authorship,
# EDIT:
taxon_rank = dplyr::if_else(complete.cases(SOM_acc$infraspecies_nameSplit) &
SOM_acc$infraspecies_nameSplit != "NA",
"infraspecies",
dplyr::if_else(complete.cases(SOM_acc$species) &
SOM_acc$species != "NA",
"species",
dplyr::if_else(complete.cases(SOM_acc$genus),
"genus", "higher"))),
valid = FALSE,
tempIndex = SOM_acc$tempIndex,
notes = SOM_acc$notes
)
###### b. synonyms ####
# For those that match SYNONYM names, use the accid as the accid - MATCH the actual accepted name.
SOM_syn <- Single_newMatched %>%
dplyr::filter(taxonomic_status == "synonym")
# Merge these into a single tibble with the correct data
SOM_syn_final <- dplyr::bind_cols(
flags = SOM_syn$flags,
taxonomic_status = "synonym",
source = SOM_syn$Source,
accid = SOM_syn$accid, # Get the accepted id for accid. Hence, find the accepted name from
# the synonym this matched to
id = NA, # Assign later
kingdom = inKingdom,
phylum = inPhylum,
class = inClass,
order = inOrder,
family = SOM_syn$family,
subfamily = SOM_syn$subfamily,
tribe = SOM_syn$tribe,
subtribe = SOM_syn$subtribe,
validName = stringr::str_c(
dplyr::if_else(complete.cases(SOM_syn$genus_nameSplit) & SOM_syn$genus_nameSplit != "NA",
SOM_syn$genus_nameSplit, ""),
# subgenus
dplyr::if_else(complete.cases(SOM_syn$subgenus_nameSplit) &
SOM_syn$subgenus_nameSplit != "NA",
paste0("(",SOM_syn$subgenus_nameSplit ,")"), ""),
dplyr::if_else(complete.cases(SOM_syn$species_nameSplit) & SOM_syn$species_nameSplit != "NA",
SOM_syn$species_nameSplit, ""),
dplyr::if_else(complete.cases(SOM_syn$infraspecies_nameSplit) &
SOM_syn$infraspecies_nameSplit != "NA",
SOM_syn$infraspecies_nameSplit, ""),
dplyr::if_else(complete.cases(SOM_syn$authorship) & SOM_syn$authorship != "NA",
SOM_syn$authorship, ""),
sep = " "),
canonical_withFlags = "NA",
# EDIT:
canonical = stringr::str_c(
dplyr::if_else(complete.cases(SOM_syn$genus_nameSplit) & SOM_syn$genus_nameSplit != "NA",
SOM_syn$genus_nameSplit, ""),
# subgenus
dplyr::if_else(complete.cases(SOM_syn$subgenus_nameSplit) &
SOM_syn$subgenus_nameSplit != "NA",
paste0("(",SOM_syn$subgenus_nameSplit ,")"), ""),
dplyr::if_else(complete.cases(SOM_syn$species_nameSplit) & SOM_syn$species_nameSplit != "NA",
SOM_syn$species_nameSplit, ""),
dplyr::if_else(complete.cases(SOM_syn$infraspecies_nameSplit) &
SOM_syn$infraspecies_nameSplit != "NA",
SOM_syn$infraspecies_nameSplit, ""),
sep = " "),
genus = SOM_syn$genus_nameSplit,
subgenus = dplyr::if_else(complete.cases(SOM_syn$subgenus_nameSplit) &
SOM_syn$subgenus_nameSplit != "NA",
paste0(SOM_syn$subgenus_nameSplit), ""),
species = SOM_syn$species_nameSplit,
# EDIT:
infraspecies = SOM_syn$infraspecies_nameSplit,
authorship = SOM_syn$authorship,
# EDIT:
taxon_rank = dplyr::if_else(complete.cases(SOM_syn$infraspecies_nameSplit) &
SOM_syn$infraspecies_nameSplit != "NA",
"infraspecies",
dplyr::if_else(complete.cases(SOM_syn$species) &
SOM_syn$species != "NA",
"species",
dplyr::if_else(complete.cases(SOM_syn$genus) &
SOM_syn$genus != "NA",
"genus", "higher"))),
valid = FALSE,
tempIndex = SOM_syn$tempIndex,
notes = SOM_syn$notes
)
##### 3.3 multiple matches ####
###### a. take accepted ####
# subset those names that matched multiple Ascher names
MultiMatch <- Correct_matched %>%
# group by the temporary index number
dplyr::group_by(tempIndex) %>%
# Find all with more than one
dplyr::filter(dplyr::n() > 1) %>%
# Sort in order of waht you want to keep
dplyr::arrange(accid,
.by_group = TRUE) %>%
# Filter for the first row only - this will direct to the accepted name hopefully
dplyr::filter(dplyr::row_number() == 1)
###### a. find accepted (if needed) ####
# If any rows do not == 0 (accepted names), then find those names.
synMatched <- MultiMatch %>%
# Get those without an accepted match
dplyr::filter(accid != 0)
# Remove the ascher columns
synMatched_reduced <- synMatched %>%
dplyr::select(!colnames(currentNames))
# Re-add the accid
synMatched_reduced$accid <- synMatched$accid
# re-combine with the new accepted name's data
synMatched <- synMatched_reduced %>%
dplyr::left_join(., currentNames, by = c("accid" = "id"))
# These will need to have their accid become id - and changed back in a later step
synMatched <- synMatched %>%
dplyr::mutate(id = accid)
###### c. recombine ####
MultiMatch <- MultiMatch %>%
# Remove the old synonym records if they exist
dplyr::filter(!tempIndex %in% synMatched$tempIndex) %>%
dplyr::bind_rows(synMatched)
###### d. authorship ####
# Give preference to the occurrence's authorship and then combine
MultiMatch$authorship <- dplyr::if_else(MultiMatch$authorship_nameSplit == "" |
is.na(MultiMatch$authorship_nameSplit),
MultiMatch$authorship,
MultiMatch$authorship_nameSplit )
# Remove redundant column
MultiMatch <- MultiMatch %>%
dplyr::select(!c(authorship_nameSplit))
# Get the accepted names
Mult_newAcc <- MultiMatch %>%
dplyr::filter(taxonomic_status == "accepted")
# Get the synonym names
Mult_newSyn <- MultiMatch %>%
dplyr::filter(taxonomic_status == "synonym")
# IF there are synonyms that dont occur in the accepted names, STOP. Because this isn't a
# problem for me.
MULTSynTest <- Mult_newSyn %>%
dplyr::filter(!Original %in% Mult_newAcc$Original)
if(nrow(MULTSynTest) > 0){
stop(paste0("There are multiple-match synonyms that aren't in the mult.accepted list.",
" This is new. Look for 'MULTSynTest'"))}
# Merge these into a single tibble with the correct data
MO_FINAL <- dplyr::bind_cols(
flags = Mult_newAcc$flags,
taxonomic_status = "synonym",
source = Mult_newAcc$Source,
accid = Mult_newAcc$id, # Get the accepted id for accid
id = NA, # Get the new id from the number of rows plus i
kingdom = inKingdom,
phylum = inPhylum,
class = inClass,
order = inOrder,
family = Mult_newAcc$family,
subfamily = Mult_newAcc$subfamily,
tribe = Mult_newAcc$tribe,
subtribe = Mult_newAcc$subtribe,
validName = stringr::str_c(
dplyr::if_else(complete.cases(Mult_newAcc$genus_nameSplit) &
Mult_newAcc$genus_nameSplit != "NA",
Mult_newAcc$genus_nameSplit, ""),
#subgenus
dplyr::if_else(complete.cases(Mult_newAcc$subgenus_nameSplit) &
Mult_newAcc$subgenus_nameSplit != "NA",
paste0("(",Mult_newAcc$subgenus_nameSplit ,")"), ""),
dplyr::if_else(complete.cases(Mult_newAcc$species_nameSplit) &
Mult_newAcc$species_nameSplit != "NA",
Mult_newAcc$species_nameSplit, ""),
dplyr::if_else(complete.cases(Mult_newAcc$infraspecies_nameSplit) &
Mult_newAcc$infraspecies_nameSplit != "NA",
Mult_newAcc$infraspecies_nameSplit, ""),
dplyr::if_else(complete.cases(Mult_newAcc$authorship) & Mult_newAcc$authorship != "NA",
Mult_newAcc$authorship, ""),
sep = " "),
canonical_withFlags = "NA",
canonical = stringr::str_c(
dplyr::if_else(complete.cases(Mult_newAcc$genus_nameSplit) &
Mult_newAcc$genus_nameSplit != "NA",
Mult_newAcc$genus_nameSplit, ""),
#subgenus
dplyr::if_else(complete.cases(Mult_newAcc$subgenus_nameSplit) &
Mult_newAcc$subgenus_nameSplit != "NA",
paste0("(",Mult_newAcc$subgenus_nameSplit ,")"), ""),
dplyr::if_else(complete.cases(Mult_newAcc$species_nameSplit) &
Mult_newAcc$species_nameSplit != "NA",
Mult_newAcc$species_nameSplit, ""),
dplyr::if_else(complete.cases(Mult_newAcc$infraspecies_nameSplit) &
Mult_newAcc$infraspecies_nameSplit != "NA",
Mult_newAcc$infraspecies_nameSplit, ""),
sep = " "),
genus = Mult_newAcc$genus_nameSplit,
subgenus = dplyr::if_else(complete.cases(Mult_newAcc$subgenus_nameSplit) &
Mult_newAcc$subgenus_nameSplit != "NA",
paste0(Mult_newAcc$subgenus_nameSplit), ""),
species = Mult_newAcc$species_nameSplit,
# EDIT:
infraspecies = Mult_newAcc$infraspecies_nameSplit,
authorship = Mult_newAcc$authorship,
# EDIT:
taxon_rank = dplyr::if_else(
complete.cases(Mult_newAcc$infraspecies_nameSplit) & Mult_newAcc$infraspecies_nameSplit != "NA",
"infraspecies",
dplyr::if_else(complete.cases(Mult_newAcc$species) &
Mult_newAcc$species != "NA",
"species",
dplyr::if_else(complete.cases(Mult_newAcc$genus) &
Mult_newAcc$genus != "NA",
"genus", "higher"))),
valid = FALSE,
tempIndex = Mult_newAcc$tempIndex
#notes = SM_GenSp$notes
)
} # END simpleNames == FALSE
#
#### 4.0 Merge ####
# Merge the output rows
merged_names <- dplyr::bind_rows(SOM_acc_final, SOM_syn_final, MO_FINAL)
# Remove " NA" from end of validNames where there was no authority
merged_names <- merged_names %>%
dplyr::mutate(validName = stringr::str_replace(validName,
pattern = " NA$",
replacement = "") %>%
stringr::str_squish())
# Find those rows that did not match
failed_names <- newNames %>%
dplyr::filter(!tempIndex %in% merged_names$tempIndex)
# Select the desired columns
merged_names <- merged_names %>%
dplyr::select(colnames(currentNames))
# Add id numbers
idMax <- max(currentNames$id) + 1
idEnd <- idMax + nrow(merged_names) -1
merged_names$id <- seq(from = idMax, to = idEnd, by = 1)
# Remove silly names... for some reason I cant '!' inverse select these...
# Not sure why this isn't filtering in the negative...
# toRemove <- newNames %>%
# dplyr::filter(!Correct %in%
# c("extinct", "genus", "Genus", "GENUS", "NIDB", "not_a_species",
# "not a bee", "NOT A BEE", "not a species", "Not_a_bee", "NOT_A_BEE",
# "untraced", "x", "Check"))
#
# Write user output
writeLines(paste(
" - Names merged. ","\n",
"We removed ", format(Original_newNames_Count - Original_unNew_Count, big.mark = ","),
" duplicate new synonyms ",
"\n ", "We successfuly matched: ", "\n ",
format(nrow(SOM_acc_final), big.mark = ","), " new names to the current accepted names;", "\n ",
format(nrow(SOM_syn_final), big.mark = ","),
" new names to the current synonyms, and then their accepted name;",
"\n ",format(nrow(MO_FINAL), big.mark = ","),
" new names that matched the current accepted and synonym names.",
" These are matched to their accepted names;", "\n ",
"!! There were a total of ", format(nrow(failed_names), big.mark = ","),
" new names that failed",
" to find a match in the the current list !!", "\n ",
"We kept a total of ", format(nrow(merged_names), big.mark = ","), " new synonyms.",
sep = ""))
#### 3.0 Clean data ####
writeLines(paste(
" - Cleaning new data...", sep = ""
))
## manage new data ##
writeLines(paste(
" - Adding higher names with the HigherNamer function...", sep = ""
))
# Add higher order names
merged_names_cl <- HigherNamer(HigherNameList = HigherNameList,
InSynList = merged_names)
# manage flags
writeLines(paste(
" - Managing flags with the FlagManager function...", sep = ""
))
merged_names_cl <- FlagManager(InSynList = merged_names_cl,
flagCol = "notes")
#### 4.0 Save ####
writeLines(paste(
" - Saving the matched new component of the data to the file ",
outPath, "/", fileName, "_", Sys.Date(), ".csv", " seperately...", sep = ""
))
# Save the current-matched new dataset
readr::write_excel_csv(merged_names_cl, file = paste(outPath,"/",
fileName, "_", Sys.Date(), ".csv", sep = ""))
writeLines(paste(" - ", nrow(failed_names),
" names from the new list did not have an accepted or synonym match to the current list. They ",
"will be removed. ", "\n",
"Saving these no-match names to ",
outPath, "/", fileName, "_", Sys.Date(), ".csv", " seperately...", sep = ""
))
# Save noMatch_df
readr::write_excel_csv(failed_names,
file = paste(outPath, "/",
fileName, "_failed_", Sys.Date(), ".csv", sep = ""))
# Add this dataset to the Ascher dataset
merged_names_cl <- dplyr::tibble(merged_names_cl)
# Convert some column types to merge with Ascher data
merged_names_cl$id <- as.numeric(merged_names_cl$id)
merged_names_cl$accid <- as.numeric(merged_names_cl$accid)
merged_names_cl$valid <- as.logical(merged_names_cl$valid)
namesFinal <- currentNames %>%
dplyr::bind_rows(merged_names_cl)
# Return this object
return(namesFinal)
} # END Function
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/taxoMergeR.R
|
# Description of the BeeBDC toy dataset bees3sp.csv written by James B. Dorey on the
# 16th of March 2023
#' A flagged dataset of 105 random bee occurrence records from the three species
#'
#' This test dataset includes 105 random occurrence records from three bee species.
#' The included species are: "Agapostemon tyleri Cockerell, 1917", "Centris rhodopus Cockerell,
#' 1897", and "Perdita octomaculata (Say, 1824)".
#'
#' A small bee occurrence dataset with flags generated by BeeBDC which can be used to run the
#' example script and to test functions. For data types, see [BeeBDC::ColTypeR()].
#'
#' @docType data
#'
#' @usage data("bees3sp", package = "BeeBDC")
#'
#' @format An object of class \code{"tibble"}
#' \describe{
#' \item{database_id}{Occurrence code generated in bdc or BeeBDC}
#' \item{scientificName}{Full scientificName as shown on DiscoverLife}
#' \item{family}{Family name}
#' \item{subfamily}{Subfamily name}
#' \item{genus}{Genus name}
#' \item{subgenus}{Subgenus name}
#' \item{subspecies}{Full scientific name with subspecies name — ALA column}
#' \item{specificEpithet}{The species name (specific epithet) only}
#' \item{infraspecificEpithet}{The subspecies name (intraspecific epithet) only}
#' \item{acceptedNameUsage}{The full scientific name, with authorship and date information if known, of the currently valid (zoological) or accepted (botanical) taxon.}
#' \item{taxonRank}{The taxonomic rank of the most specific name in the scientificName column.}
#' \item{scientificNameAuthorship}{The authorship information for the scientificName column formatted according to the conventions of the applicable nomenclaturalCode.}
#' \item{identificationQualifier}{A brief phrase or a standard term ("cf.", "aff.") to express the determiner's doubts about the identification.}
#' \item{higherClassification}{A list (concatenated and separated) of taxon names terminating at the rank immediately superior to the taxon referenced in the taxon record.}
#' \item{identificationReferences}{A list (concatenated and separated) of references (e.g. publications, global unique identifier, URI, etc.) used in the identification of the occurrence.}
#' \item{typeStatus}{A list (concatenated and separated) of nomenclatural types (e.g. type status, typified scientific name, publication) applied to the occurrence.}
#' \item{previousIdentifications}{A list (concatenated and separated) of previous assignments of names to the occurrence.}
#' \item{verbatimIdentification}{This term is meant to allow the capture of an unaltered original identification/determination, including identification qualifiers, hybrid formulas, uncertainties, etc. This term is meant to be used in addition to scientificName (and identificationQualifier etc.), not instead of it.}
#' \item{identifiedBy}{A list (concatenated and separated) of names of people, groups, or organizations who assigned the Taxon to the subject.}
#' \item{dateIdentified}{The date on which the occurrence was identified as belonging to a taxon.}
#' \item{decimalLatitude}{The geographic latitude (in decimal degrees, using the spatial reference system given in geodeticDatum) of the geographic center of a location. Positive values are north of the Equator, negative values are south of it, and valid values lie between -90 and 90, inclusive.}
#' \item{decimalLongitude}{The geographic longitude (in decimal degrees, using the spatial reference system given in geodeticDatum) of the geographic center of a location. Positive values are east of the Greenwich Meridian, and negative values are west of it. Valid values lie between -180 and 180, inclusive.}
#' \item{stateProvince}{The name of the next smaller administrative region than country (e.g. state, province, canton, department, region, etc.) in which the location for the occurrence is found.}
#' \item{continent}{The name of the continent in which the location for the occurrence is found.}
#' \item{locality}{A specific description of the place the occurrence was found.}
#' \item{island}{The name of the island on or near which the location for the occurrence is found, if applicable.}
#' \item{county}{The full, unabbreviated name of the next smaller administrative region than stateProvince (e.g. county, shire, department, etc.) in which the location for the occurrence is found.}
#' \item{municipality}{The full, unabbreviated name of the next smaller administrative region than county (e.g. city, municipality, etc.) in which the location for the occurrence is found. Do not use this term for a nearby named place that does not contain the actual location for the occurrence.}
#' \item{license}{A legal document giving official permission to do something with the resource.}
#' \item{issue}{A GBIF-defined issue.}
#' \item{eventDate}{The time or interval during which the Event occurred. For occurrences, this is the time or interval when the event was recorded.}
#' \item{eventTime}{The time or interval during which an Event occurred.}
#' \item{day}{The integer day of the month on which the Event occurred. For occurrences, this is the day when the event was recorded.}
#' \item{month}{The integer month in which the Event occurred. For occurrences, this is the month of when the event was recorded.}
#' \item{year}{The four-digit year in which the Event occurred, according to the Common Era Calendar. For occurrences, this is the year when the event was recorded.}
#' \item{basisOfRecord}{The specific nature of the data record. Recommended best practice is to use the standard label of one of the Darwin Core classes.PreservedSpecimen, FossilSpecimen, LivingSpecimen, MaterialSample, Event, HumanObservation, MachineObservation, Taxon, Occurrence, MaterialCitation}
#' \item{country}{The name of the country or major administrative unit in which the location for the occurrence is found.}
#' \item{type}{The nature or genre of the resource. StillImage, MovingImage, Sound, PhysicalObject, Event, Text.}
#' \item{occurrenceStatus}{A statement about the presence or absence of a Taxon at a Location. present, absent.}
#' \item{recordNumber}{An identifier given to the Occurrence at the time it was recorded. Often serves as a link between field notes and an Occurrence record, such as a specimen collector's number.}
#' \item{recordedBy}{A list (concatenated and separated) of names of people, groups, or organizations responsible for recording the original Occurrence. The primary collector or observer, especially one who applies a personal identifier (recordNumber), should be listed first.}
#' \item{eventID}{An identifier for the set of information associated with an Event (something that occurs at a place and time). May be a global unique identifier or an identifier specific to the data set.}
#' \item{Location}{A spatial region or named place.}
#' \item{samplingProtocol}{The names of, references to, or descriptions of the methods or protocols used during an Event. Examples UV light trap, mist net, bottom trawl, ad hoc observation | point count, Penguins from space: faecal stains reveal the location of emperor penguin colonies, https://doi.org/10.1111/j.1466-8238.2009.00467.x, Takats et al. 2001.}
#' \item{samplingEffort}{The amount of effort expended during an Event. Examples 40 trap-nights, 10 observer-hours, 10 km by foot, 30 km by car.}
#' \item{individualCount}{The number of individuals present at the time of the Occurrence. Integer.}
#' \item{organismQuantity}{A number or enumeration value for the quantity of organisms. Examples 27 (organismQuantity) with individuals (organismQuantityType). 12.5 (organismQuantity) with percentage biomass (organismQuantityType). r (organismQuantity) with Braun Blanquet Scale (organismQuantityType). many (organismQuantity) with individuals (organismQuantityType).}
#' \item{coordinatePrecision}{A decimal representation of the precision of the coordinates given in the decimalLatitude and decimalLongitude.}
#' \item{coordinateUncertaintyInMeters}{The horizontal distance (in meters) from the given decimalLatitude and decimalLongitude describing the smallest circle containing the whole of the Location. Leave the value empty if the uncertainty is unknown, cannot be estimated, or is not applicable (because there are no coordinates). Zero is not a valid value for this term.}
#' \item{spatiallyValid}{Occurrence records in the ALA can be filtered by using the spatially valid flag. This flag combines a set of tests applied to the record to see how reliable are its spatial data components.}
#' \item{catalogNumber}{An identifier (preferably unique) for the record within the data set or collection.}
#' \item{gbifID}{The identifier assigned by GBIF for each record.}
#' \item{datasetID}{An identifier for the set of data. May be a global unique identifier or an identifier specific to a collection or institution.}
#' \item{institutionCode}{The name (or acronym) in use by the institution having custody of the object(s) or information referred to in the record. Examples MVZ, FMNH, CLO, UCMP.}
#' \item{datasetName}{The name identifying the data set from which the record was derived.}
#' \item{otherCatalogNumbers}{A list (concatenated and separated) of previous or alternate fully qualified catalog numbers or other human-used identifiers for the same Occurrence, whether in the current or any other data set or collection.}
#' \item{occurrenceID}{An identifier for the Occurrence (as opposed to a particular digital record of the occurrence). In the absence of a persistent global unique identifier, construct one from a combination of identifiers in the record that will most closely make the occurrenceID globally unique.}
#' \item{taxonKey}{The GBIF-assigned taxon identifier number.}
#' \item{collectionID}{An identifier for the collection or dataset from which the record was derived.}
#' \item{verbatimScientificName}{Scientific name as recorded on specimen label, not necessarily valid.}
#' \item{verbatimEventDate}{The verbatim original representation of the date and time information for an event. For occurrences, this is the date-time when the event was recorded as noted by the collector.}
#' \item{associatedTaxa}{A list (concatenated and separated) of identifiers or names of taxa and the associations of this occurrence to each of them.}
#' \item{associatedOrganisms}{A list (concatenated and separated) of identifiers of other Organisms and the associations of this occurrence to each of them.}
#' \item{fieldNotes}{One of (a) an indicator of the existence of, (b) a reference to (publication, URI), or (c) the text of notes taken in the field about the Event.}
#' \item{sex}{The sex of the biological individual(s) represented in the Occurrence.}
#' \item{rights}{A description of the usage rights applicable to the record.}
#' \item{rightsHolder}{A person or organization owning or managing rights over the resource.}
#' \item{accessRights}{Information about who can access the resource or an indication of its security status.}
#' \item{associatedReferences}{A list (concatenated and separated) of identifiers (publication, bibliographic reference, global unique identifier, URI) of literature associated with the Occurrence.}
#' \item{bibliographicCitation}{A bibliographic reference for the resource as a statement indicating how this record should be cited (attributed) when used.}
#' \item{references}{A related resource that is referenced, cited, or otherwise pointed to by the described resource.}
#' \item{informationWithheld}{Additional information that exists, but that has not been shared in the given record.}
#' \item{isDuplicateOf}{The code for another occerrence but for the same specimen.}
#' \item{hasCoordinate}{Variable indicating presence/absence of location coordinates.}
#' \item{hasGeospatialIssues}{Variable indicating validity of geospatial data associated with record.}
#' \item{occurrenceYear}{Year associated with Occurrence.}
#' \item{id}{Variable with identifying value for the Occurrenc.}
#' \item{duplicateStatus}{Variable indicating is Occurrence is duplicate or not.}
#' \item{associatedOccurrences}{A list (concatenated and separated) of identifiers of other occurrence records and their associations to this occurrence.}
#' \item{locationRemarks}{Comments or notes about the Location.}
#' \item{dataSource}{BeeBDC assigned source of the data. Often written when the data is formatted by a BeeBDC::xxx_readr function or similar.}
#' \item{verbatim_scientificName}{The verbatim (originally-provided) scientific name}
#' \item{.scientificName_empty}{Flag produced by [bdc::bdc_scientificName_empty()] where FALSE == no scientific name provided and TRUE means that there is text in that column.}
#' \item{.coordinates_empty}{Flag produced by [bdc::bdc_coordinates_empty()] where FALSE == no coordinates provided.}
#' \item{.coordinates_outOfRange}{Flag column produced by bdc::bdc_coordinates_outOfRange() where FALSE == coordinates represent a point off of the Earth. This is to say, the function identifies records with out-of-range coordinates (not between -90 and 90 for latitude; not between -180 and 180 for longitude).}
#' \item{.basisOfRecords_notStandard}{Flag produced by [bdc::bdc_basisOfRecords_notStandard()] where FALSE == an occurrence with a basisOfRecord not defined as acceptable by the user.}
#' \item{country_suggested}{A country name suggested by the [bdc::bdc_country_standardized()] function.}
#' \item{countryCode}{A country code suggested by the [bdc::bdc_country_standardized()] function.}
#' \item{coordinates_transposed}{A column indicating if coordinates were identified as being transposed by the function [BeeBDC::jbd_Ctrans_chunker()] where FALSE == transposed.}
#' \item{.coordinates_country_inconsistent}{A flag generated by [BeeBDC::jbd_coordCountryInconsistent()] where FALSE == an occurrence where the country name and coordinates did not match.}
#' \item{.occurrenceAbsent}{A flag generated by [BeeBDC::flagAbsent()] where FALSE == occurrences marked as "ABSENT" in the "occurrenceStatus" column}
#' \item{.unLicensed}{A flag generated by [BeeBDC::flagLicense()] where FALSE == those occurrences protected by a restrictive license.}
#' \item{.GBIFflags}{A flag generated by [BeeBDC::GBIFissues()] where FALSE == an occurrence with user-specified GBIF issues to flag.}
#' \item{.uncer_terms}{A flag generated by [bdc::bdc_clean_names()] where FALSE == the presence of taxonomic uncertainty terms.}
#' \item{names_clean}{A column made by [bdc::bdc_clean_names()] indicating the cleaned scientificName}
#' \item{.invalidName}{A flag generated by [BeeBDC::harmoniseR()] where FALSE == occurrences whose scientificName did not match the Discover Life taxonomy.}
#' \item{.rou}{A flag generated by [CoordinateCleaner::clean_coordinates()] where FALSE == rounded (probably imprecise) coordinates.}
#' \item{.val}{A flag generated by [CoordinateCleaner::clean_coordinates()] where FALSE == invalid coordinates.}
#' \item{.equ}{A flag generated by [CoordinateCleaner::clean_coordinates()] where FALSE == equal coordinates (e.g., 0.1, 0.1).}
#' \item{.zer}{A flag generated by [CoordinateCleaner::clean_coordinates()] where FALSE == zeros as coordinates}
#' \item{.cap}{A flag generated by [CoordinateCleaner::clean_coordinates()] where FALSE == records around country capital centroid.}
#' \item{.cen}{A flag generated by [CoordinateCleaner::clean_coordinates()] where FALSE == records around country or province centroids.}
#' \item{.gbf}{A flag generated by [CoordinateCleaner::clean_coordinates()] where FALSE == records around the GBIF headquarters.}
#' \item{.inst}{A flag generated by [CoordinateCleaner::clean_coordinates()] where FALSE == records around biodiversity institutions.}
#' \item{.sequential}{A flag generated by [BeeBDC::diagonAlley()] where FALSE == records that are possibly the result of fill-down errors in sequence.}
#' \item{.lonFlag}{A flag generated by [CoordinateCleaner::cd_round()] where FALSE == potential gridding in the longitude column within dataset.}
#' \item{.latFlag}{A flag generated by [CoordinateCleaner::cd_round()] where FALSE == potential gridding in the latitude column within dataset.}
#' \item{.gridSummary}{A flag generated by [CoordinateCleaner::cd_round()] where FALSE == potential gridding in either the longitude or latitude columns within dataset.}
#' \item{.uncertaintyThreshold}{A flag generated by [BeeBDC::coordUncerFlagR()] where FALSE == occurrences that did not pass a user-specified threshold in the "coordinateUncertaintyInMeters" column.}
#' \item{countryMatch}{A column made by [BeeBDC::countryOutlieRs()]. Summarises the occurrence-level result: where the species is not known to occur in that country (noMatch), it is known from a bordering country (neighbour), or it is known to occur in that country (exact).}
#' \item{.countryOutlier}{A flag generated by [BeeBDC::countryOutlieRs()] where FALSE == occurrences the do not occur in a country that concurs with the Discover Life country checklist OR an adjacent country.}
#' \item{.sea}{A flag generated by [BeeBDC::countryOutlieRs()] where FALSE == occurrences that are in the ocean.}
#' \item{.summary}{A flag generated by [BeeBDC::summaryFun()] where FALSE == occurrences flagged as FALSE in any of the .flag columns. In this example it excludes flags in the ".gridSummary", ".lonFlag", ".latFlag", and ".uncer_terms" columns.}
#' \item{.eventDate_empty}{A flag generated by [bdc::bdc_eventDate_empty()] where FALSE == occurrences with no eventDate provided.}
#' \item{.year_outOfRange}{A flag column generated by [bdc::bdc_year_outOfRange()] where FALSE == occurrences older than a threshold date. In the case of the bee dataset used in this package, the lower threshold is 1950}
#' \item{.duplicates}{A flag generated by [BeeBDC::dupeSummary()] where FALSE == occurrences identified as duplicates. There will be an associated kept duplicate (.duplictes == TRUE) for all duplicate clusters.}
#' }
#' @references This data set was created by generating a random subset of 105 rows from the full BeeBDC dataset from the publication:
#' DOREY, J. B., CHESSHIRE, P. R., BOLAÑOS, A. N., O’REILLY, R. L., BOSSERT, S., COLLINS, S. M., LICHTENBERG, E. M., TUCKER, E., SMITH-PARDO, A., FALCON-BRINDIS, A., GUEVARA, D. A., RIBEIRO, B. R., DE PEDRO, D., FISCHER, E., HUNG, J. K.-L., PARYS, K. A., ROGAN, M. S., MINCKLEY, R. L., VELZCO, S. J. E., GRISWOLD, T., ZARRILLO, T. A., SICA, Y., ORR, M. C., GUZMAN, L. M., ASCHER, J., HUGHES, A. C. & COBB, N. S. In review. A globally synthesised and flagged bee occurrence dataset and cleaning workflow. Scientific Data.
#' @keywords datasets
#' @examples
#'
#' bees3sp <- BeeBDC::bees3sp
#' head(bees3sp)
#'
"bees3sp"
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/toyData_bees3sp.R
|
# Description of the BeeBDC toy dataset beesFlagged.csv written by James B. Dorey on the
# 16th of March 2023
#' A flagged dataset of 100 random bee occurrence records
#'
#' A small bee occurrence dataset with flags generated by BeeBDC used to run example script and test
#' functions. For data types, see [BeeBDC::ColTypeR()].
#'
#' @docType data
#'
#' @usage data("beesFlagged", package = "BeeBDC")
#'
#' @format An object of class \code{"tibble"}
#' \describe{
#' \item{database_id}{Occurrence code generated in bdc or BeeBDC}
#' \item{scientificName}{Full scientificName as shown on DiscoverLife}
#' \item{family}{Family name}
#' \item{subfamily}{Subfamily name}
#' \item{genus}{Genus name}
#' \item{subgenus}{Subgenus name}
#' \item{subspecies}{Full name with subspecies name — ALA column}
#' \item{specificEpithet}{The species name only}
#' \item{infraspecificEpithet}{The subspecies name only}
#' \item{acceptedNameUsage}{The full name, with authorship and date information if known, of the currently valid (zoological) or accepted (botanical) taxon.}
#' \item{taxonRank}{The taxonomic rank of the most specific name in the scientificName.}
#' \item{scientificNameAuthorship}{The authorship information for the scientificName formatted according to the conventions of the applicable nomenclaturalCode.}
#' \item{identificationQualifier}{A brief phrase or a standard term ("cf.", "aff.") to express the determiner's doubts about the Identification.}
#' \item{higherClassification}{A list (concatenated and separated) of taxa names terminating at the rank immediately superior to the taxon referenced in the taxon record.)}
#' \item{identificationReferences}{A list (concatenated and separated) of references (publication, global unique identifier, URI) used in the Identification.}
#' \item{typeStatus}{A list (concatenated and separated) of nomenclatural types (type status, typified scientific name, publication) applied to the subject.}
#' \item{previousIdentifications}{A list (concatenated and separated) of previous assignments of names to the Organism.}
#' \item{verbatimIdentification}{This term is meant to allow the capture of an unaltered original identification/determination, including identification qualifiers, hybrid formulas, uncertainties, etc. This term is meant to be used in addition to scientificName (and identificationQualifier etc.), not instead of it.}
#' \item{identifiedBy}{A list (concatenated and separated) of names of people, groups, or organizations who assigned the Taxon to the subject.}
#' \item{dateIdentified}{The date on which the subject was determined as representing the Taxon.}
#' \item{decimalLatitude}{The geographic latitude (in decimal degrees, using the spatial reference system given in geodeticDatum) of the geographic center of a Location. Positive values are north of the Equator, negative values are south of it. Legal values lie between -90 and 90, inclusive.}
#' \item{decimalLongitude}{The geographic longitude (in decimal degrees, using the spatial reference system given in geodeticDatum) of the geographic center of a Location. Positive values are east of the Greenwich Meridian, negative values are west of it. Legal values lie between -180 and 180, inclusive.}
#' \item{stateProvince}{The name of the next smaller administrative region than country (state, province, canton, department, region, etc.) in which the Location occurs.}
#' \item{continent}{The name of the continent in which the Location occurs.}
#' \item{locality}{The specific description of the place.}
#' \item{island}{The name of the island on or near which the Location occurs.}
#' \item{county}{The full, unabbreviated name of the next smaller administrative region than stateProvince (county, shire, department, etc.) in which the Location occurs.}
#' \item{municipality}{The full, unabbreviated name of the next smaller administrative region than county (city, municipality, etc.) in which the Location occurs. Do not use this term for a nearby named place that does not contain the actual location.}
#' \item{license}{A legal document giving official permission to do something with the resource.}
#' \item{issue}{A GBIF-defined issue.}
#' \item{eventDate}{The date-time or interval during which an Event occurred. For occurrences, this is the date-time when the event was recorded. Not suitable for a time in a geological context.}
#' \item{eventTime}{The time or interval during which an Event occurred.}
#' \item{day}{The integer day of the month on which the Event occurred.}
#' \item{month}{The integer month in which the Event occurred.}
#' \item{year}{The four-digit year in which the Event occurred, according to the Common Era Calendar.}
#' \item{basisOfRecord}{The specific nature of the data record. Recommended best practice is to use the standard label of one of the Darwin Core classes.PreservedSpecimen, FossilSpecimen, LivingSpecimen, MaterialSample, Event, HumanObservation, MachineObservation, Taxon, Occurrence, MaterialCitation}
#' \item{country}{The name of the country or major administrative unit in which the Location occurs.}
#' \item{type}{The nature or genre of the resource. StillImage, MovingImage, Sound, PhysicalObject, Event, Text.}
#' \item{occurrenceStatus}{A statement about the presence or absence of a Taxon at a Location. present, absent.}
#' \item{recordNumber}{An identifier given to the Occurrence at the time it was recorded. Often serves as a link between field notes and an Occurrence record, such as a specimen collector's number.}
#' \item{recordedBy}{A list (concatenated and separated) of names of people, groups, or organizations responsible for recording the original Occurrence. The primary collector or observer, especially one who applies a personal identifier (recordNumber), should be listed first.}
#' \item{eventID}{An identifier for the set of information associated with an Event (something that occurs at a place and time). May be a global unique identifier or an identifier specific to the data set.}
#' \item{Location}{A spatial region or named place.}
#' \item{samplingProtocol}{The names of, references to, or descriptions of the methods or protocols used during an Event. Examples UV light trap, mist net, bottom trawl, ad hoc observation | point count, Penguins from space: faecal stains reveal the location of emperor penguin colonies, https://doi.org/10.1111/j.1466-8238.2009.00467.x, Takats et al. 2001.}
#' \item{samplingEffort}{The amount of effort expended during an Event. Examples 40 trap-nights, 10 observer-hours, 10 km by foot, 30 km by car.}
#' \item{individualCount}{The number of individuals present at the time of the Occurrence. Integer.}
#' \item{organismQuantity}{A number or enumeration value for the quantity of organisms. Examples 27 (organismQuantity) with individuals (organismQuantityType). 12.5 (organismQuantity) with percentage biomass (organismQuantityType). r (organismQuantity) with Braun Blanquet Scale (organismQuantityType). many (organismQuantity) with individuals (organismQuantityType).}
#' \item{coordinatePrecision}{A decimal representation of the precision of the coordinates given in the decimalLatitude and decimalLongitude.}
#' \item{coordinateUncertaintyInMeters}{The horizontal distance (in meters) from the given decimalLatitude and decimalLongitude describing the smallest circle containing the whole of the Location. Leave the value empty if the uncertainty is unknown, cannot be estimated, or is not applicable (because there are no coordinates). Zero is not a valid value for this term.}
#' \item{spatiallyValid}{Occurrence records in the ALA can be filtered by using the spatially valid flag. This flag combines a set of tests applied to the record to see how reliable are its spatial data components.}
#' \item{catalogNumber}{An identifier (preferably unique) for the record within the data set or collection.}
#' \item{gbifID}{The identifier assigned by GBIF for each record.}
#' \item{datasetID}{An identifier for the set of data. May be a global unique identifier or an identifier specific to a collection or institution.}
#' \item{institutionCode}{The name (or acronym) in use by the institution having custody of the object(s) or information referred to in the record. Examples MVZ, FMNH, CLO, UCMP.}
#' \item{datasetName}{The name identifying the data set from which the record was derived.}
#' \item{otherCatalogNumbers}{A list (concatenated and separated) of previous or alternate fully qualified catalog numbers or other human-used identifiers for the same Occurrence, whether in the current or any other data set or collection.}
#' \item{occurrenceID}{An identifier for the Occurrence (as opposed to a particular digital record of the occurrence). In the absence of a persistent global unique identifier, construct one from a combination of identifiers in the record that will most closely make the occurrenceID globally unique.}
#' \item{taxonKey}{The GBIF-assigned taxon identifier number.}
#' \item{collectionID}{An identifier for the collection or dataset from which the record was derived.}
#' \item{verbatim_scientificName}{The verbatim (originally-provided) scientific name}
#' \item{verbatimEventDate}{The verbatim original representation of the date and time information for an Event.}
#' \item{associatedTaxa}{A list (concatenated and separated) of identifiers or names of taxa and the associations of this Occurrence to each of them.}
#' \item{associatedOrganisms}{A list (concatenated and separated) of identifiers of other Organisms and the associations of this Organism to each of them.}
#' \item{fieldNotes}{One of a) an indicator of the existence of, b) a reference to (publication, URI), or c) the text of notes taken in the field about the Event.}
#' \item{sex}{The sex of the biological individual(s) represented in the Occurrence.}
#' \item{rights}{A description of the usage rights applicable to the record.}
#' \item{rightsHolder}{A person or organization owning or managing rights over the resource.}
#' \item{accessRights}{Information about who can access the resource or an indication of its security status.}
#' \item{associatedReferences}{A list (concatenated and separated) of identifiers (publication, bibliographic reference, global unique identifier, URI) of literature associated with the Occurrence.}
#' \item{bibliographicCitation}{A bibliographic reference for the resource as a statement indicating how this record should be cited (attributed) when used.}
#' \item{references}{A related resource that is referenced, cited, or otherwise pointed to by the described resource.}
#' \item{informationWithheld}{Additional information that exists, but that has not been shared in the given record.}
#' \item{isDuplicateOf}{Additional information that exists, but that has not been shared in the given record.}
#' \item{hasCoordinate}{Variable indicating presence/absence of location coordinates.}
#' \item{hasGeospatialIssues}{Variable indicating validity of geospatial data associated with record.}
#' \item{occurrenceYear}{Year associated with Occurrence.}
#' \item{id}{Variable with identifying value for the Occurrenc.}
#' \item{duplicateStatus}{Variable indicating is Occurrence is duplicate or not.}
#' \item{associatedOccurrences}{A list (concatenated and separated) of identifiers of other Occurrence records and their associations to this Occurrence.}
#' \item{locationRemarks}{Comments or notes about the Location.}
#' \item{dataSource}{BeeBDC assigned source of the data. Often written when the data is formatted by a BeeBDC::xxx_readr function or similar.}
#' \item{verbatim_scientificName}{The verbatim (originally-provided) scientific name}
#' \item{.scientificName_empty}{Flag produced by [bdc::bdc_scientificName_empty()] where FALSE == no scientific name provided and TRUE means that there is text in that column.}
#' \item{.coordinates_empty}{Flag produced by [bdc::bdc_coordinates_empty()] where FALSE == no coordinates provided.}
#' \item{.coordinates_outOfRange}{Flag produced by [bdc::bdc_coordinates_outOfRange()] where FALSE == point off the earth. This function identifies records with out-of-range coordinates (not between -90 and 90 for latitude; between -180 and 180 for longitude).}
#' \item{.basisOfRecords_notStandard}{Flag produced by [bdc::bdc_basisOfRecords_notStandard()] where FALSE == an occurrence with a basisOfRecord not defined as acceptable by the user.}
#' \item{country_suggested}{A country name suggested by the [bdc::bdc_country_standardized()] function.}
#' \item{countryCode}{A country code suggested by the [bdc::bdc_country_standardized()] function.}
#' \item{coordinates_transposed}{A column indicating if coordinates were tansposed by [BeeBDC::jbd_Ctrans_chunker()] where FALSE == transposed.}
#' \item{.coordinates_country_inconsistent}{A flag generated by [BeeBDC::jbd_coordCountryInconsistent()] where FALSE == an occurrence where the country name and coordinates did not match.}
#' \item{.occurrenceAbsent}{A flag generated by [BeeBDC::flagAbsent()] where FALSE == occurrences marked as "ABSENT" in the "occurrenceStatus" column}
#' \item{.unLicensed}{A flag generated by [BeeBDC::flagLicense()] where FALSE == those occurrences protected by a restrictive license.}
#' \item{.GBIFflags}{A flag generated by [BeeBDC::GBIFissues()] where FALSE == an occurrence with user-specified GBIF issues to flag.}
#' \item{.uncer_terms}{A flag generated by [bdc::bdc_clean_names()] where FALSE == the presence of taxonomic uncertainty terms.}
#' \item{names_clean}{A column made by [bdc::bdc_clean_names()] indicating the cleaned scientificName}
#' \item{.invalidName}{A flag generated by [BeeBDC::harmoniseR()] where FALSE == occurrences whose scientificName did not match the Discover Life taxonomy.}
#' \item{.rou}{A flag generated by [CoordinateCleaner::clean_coordinates()] where FALSE == rounded (probably imprecise) coordinates.}
#' \item{.val}{A flag generated by [CoordinateCleaner::clean_coordinates()] where FALSE == invalid coordinates.}
#' \item{.equ}{A flag generated by [CoordinateCleaner::clean_coordinates()] where FALSE == equal coordinates (e.g., 0.1, 0.1).}
#' \item{.zer}{A flag generated by [CoordinateCleaner::clean_coordinates()] where FALSE == zeros as coordinates}
#' \item{.cap}{A flag generated by [CoordinateCleaner::clean_coordinates()] where FALSE == records around country capital centroid.}
#' \item{.cen}{A flag generated by [CoordinateCleaner::clean_coordinates()] where FALSE == records around country or province centroids.}
#' \item{.gbf}{A flag generated by [CoordinateCleaner::clean_coordinates()] where FALSE == records around the GBIF headquarters.}
#' \item{.inst}{A flag generated by [CoordinateCleaner::clean_coordinates()] where FALSE == records around biodiversity institutions.}
#' \item{.sequential}{A flag generated by [BeeBDC::diagonAlley()] where FALSE == records that are possibly the result of fill-down errors in sequence.}
#' \item{.lonFlag}{A flag generated by [CoordinateCleaner::cd_round()] where FALSE == potential gridding in the longitude column within dataset.}
#' \item{.latFlag}{A flag generated by [CoordinateCleaner::cd_round()] where FALSE == potential gridding in the latitude column within dataset.}
#' \item{.gridSummary}{A flag generated by [CoordinateCleaner::cd_round()] where FALSE == potential gridding in either the longitude or latitude columns within dataset.}
#' \item{.uncertaintyThreshold}{A flag generated by [BeeBDC::coordUncerFlagR()] where FALSE == occurrences that did not pass a user-specified threshold in the "coordinateUncertaintyInMeters" column.}
#' \item{countryMatch}{A column made by [BeeBDC::countryOutlieRs()]. Summarises the occurrence-level result: where the species is not known to occur in that country (noMatch), it is known from a bordering country (neighbour), or it is known to occur in that country (exact).}
#' \item{.countryOutlier}{A flag generated by [BeeBDC::countryOutlieRs()] where FALSE == occurrences the do not occur in a country that concurs with the Discover Life country checklist OR an adjacent country.}
#' \item{.sea}{A flag generated by [BeeBDC::countryOutlieRs()] where FALSE == occurrences that are in the ocean.}
#' \item{.summary}{A flag generated by [BeeBDC::summaryFun()] where FALSE == occurrences flagged as FALSE in any of the .flag columns. In this example it excludes flags in the ".gridSummary", ".lonFlag", ".latFlag", and ".uncer_terms" columns.}
#' \item{.eventDate_empty}{A flag generated by [bdc::bdc_eventDate_empty()] where FALSE == occurrences with no eventDate provided.}
#' \item{.year_outOfRange}{A flag generated by [bdc::bdc_year_outOfRange()] where FALSE == occurrences older than a threshold date. In this case 1950.}
#' \item{.duplicates}{A flag generated by [BeeBDC::dupeSummary()] where FALSE == occurrences identified as duplicates. There will be an associated kept duplicate (.duplictes == TRUE) for all duplicate clusters.}
#' }
#' @references This data set was created by generating a random subset of 100 rows from the full BeeBDC dataset from the publication:
#' DOREY, J. B., CHESSHIRE, P. R., BOLAÑOS, A. N., O’REILLY, R. L., BOSSERT, S., COLLINS, S. M., LICHTENBERG, E. M., TUCKER, E., SMITH-PARDO, A., FALCON-BRINDIS, A., GUEVARA, D. A., RIBEIRO, B. R., DE PEDRO, D., FISCHER, E., HUNG, J. K.-L., PARYS, K. A., ROGAN, M. S., MINCKLEY, R. L., VELZCO, S. J. E., GRISWOLD, T., ZARRILLO, T. A., SICA, Y., ORR, M. C., GUZMAN, L. M., ASCHER, J., HUGHES, A. C. & COBB, N. S. In review. A globally synthesised and flagged bee occurrence dataset and cleaning workflow. Scientific Data.
#' @keywords datasets
#' @examples
#'
#' beesFlagged <- BeeBDC::beesFlagged
#' head(beesFlagged)
#'
"beesFlagged"
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/toyData_beesFlagged.R
|
# Description of the BeeBDC toy dataset beesRaw.csv written by James B. Dorey on the
# 16th of March 2023
#' A dataset of 100 random bee occurrence records without flags or filters applied
#'
#' A small bee occurrence dataset with flags generated by BeeBDC used to run example script and test
#' functions. For data types, see [BeeBDC::ColTypeR()].
#'
#' @docType data
#'
#' @usage data("beesRaw", package = "BeeBDC")
#'
#' @format An object of class \code{"tibble"}
#' \describe{
#' \item{database_id}{Occurrence code generated in bdc or BeeBDC}
#' \item{scientificName}{Full scientificName as shown on DiscoverLife}
#' \item{family}{Family name}
#' \item{subfamily}{Subfamily name}
#' \item{genus}{Genus name}
#' \item{subgenus}{Subgenus name}
#' \item{subspecies}{Full name with subspecies name — ALA column}
#' \item{specificEpithet}{The species name only}
#' \item{infraspecificEpithet}{The subspecies name only}
#' \item{acceptedNameUsage}{The full name, with authorship and date information if known, of the currently valid (zoological) or accepted (botanical) taxon.}
#' \item{taxonRank}{The taxonomic rank of the most specific name in the scientificName.}
#' \item{scientificNameAuthorship}{The authorship information for the scientificName formatted according to the conventions of the applicable nomenclaturalCode.}
#' \item{identificationQualifier}{A brief phrase or a standard term ("cf.", "aff.") to express the determiner's doubts about the Identification.}
#' \item{higherClassification}{A list (concatenated and separated) of taxa names terminating at the rank immediately superior to the taxon referenced in the taxon record.)}
#' \item{identificationReferences}{A list (concatenated and separated) of references (publication, global unique identifier, URI) used in the Identification.}
#' \item{typeStatus}{A list (concatenated and separated) of nomenclatural types (type status, typified scientific name, publication) applied to the subject.}
#' \item{previousIdentifications}{A list (concatenated and separated) of previous assignments of names to the Organism.}
#' \item{verbatimIdentification}{This term is meant to allow the capture of an unaltered original identification/determination, including identification qualifiers, hybrid formulas, uncertainties, etc. This term is meant to be used in addition to scientificName (and identificationQualifier etc.), not instead of it.}
#' \item{identifiedBy}{A list (concatenated and separated) of names of people, groups, or organizations who assigned the Taxon to the subject.}
#' \item{dateIdentified}{The date on which the subject was determined as representing the Taxon.}
#' \item{decimalLatitude}{The geographic latitude (in decimal degrees, using the spatial reference system given in geodeticDatum) of the geographic center of a Location. Positive values are north of the Equator, negative values are south of it. Legal values lie between -90 and 90, inclusive.}
#' \item{decimalLongitude}{The geographic longitude (in decimal degrees, using the spatial reference system given in geodeticDatum) of the geographic center of a Location. Positive values are east of the Greenwich Meridian, negative values are west of it. Legal values lie between -180 and 180, inclusive.}
#' \item{stateProvince}{The name of the next smaller administrative region than country (state, province, canton, department, region, etc.) in which the Location occurs.}
#' \item{continent}{The name of the continent in which the Location occurs.}
#' \item{locality}{The specific description of the place.}
#' \item{island}{The name of the island on or near which the Location occurs.}
#' \item{county}{The full, unabbreviated name of the next smaller administrative region than stateProvince (county, shire, department, etc.) in which the Location occurs.}
#' \item{municipality}{The full, unabbreviated name of the next smaller administrative region than county (city, municipality, etc.) in which the Location occurs. Do not use this term for a nearby named place that does not contain the actual location.}
#' \item{license}{A legal document giving official permission to do something with the resource.}
#' \item{issue}{A GBIF-defined issue.}
#' \item{eventDate}{The date-time or interval during which an Event occurred. For occurrences, this is the date-time when the event was recorded. Not suitable for a time in a geological context.}
#' \item{eventTime}{The time or interval during which an Event occurred.}
#' \item{day}{The integer day of the month on which the Event occurred.}
#' \item{month}{The integer month in which the Event occurred.}
#' \item{year}{The four-digit year in which the Event occurred, according to the Common Era Calendar.}
#' \item{basisOfRecord}{The specific nature of the data record. Recommended best practice is to use the standard label of one of the Darwin Core classes.PreservedSpecimen, FossilSpecimen, LivingSpecimen, MaterialSample, Event, HumanObservation, MachineObservation, Taxon, Occurrence, MaterialCitation}
#' \item{country}{The name of the country or major administrative unit in which the Location occurs.}
#' \item{type}{The nature or genre of the resource. StillImage, MovingImage, Sound, PhysicalObject, Event, Text.}
#' \item{occurrenceStatus}{A statement about the presence or absence of a Taxon at a Location. present, absent.}
#' \item{recordNumber}{An identifier given to the Occurrence at the time it was recorded. Often serves as a link between field notes and an Occurrence record, such as a specimen collector's number.}
#' \item{recordedBy}{A list (concatenated and separated) of names of people, groups, or organizations responsible for recording the original Occurrence. The primary collector or observer, especially one who applies a personal identifier (recordNumber), should be listed first.}
#' \item{eventID}{An identifier for the set of information associated with an Event (something that occurs at a place and time). May be a global unique identifier or an identifier specific to the data set.}
#' \item{Location}{A spatial region or named place.}
#' \item{samplingProtocol}{The names of, references to, or descriptions of the methods or protocols used during an Event. Examples UV light trap, mist net, bottom trawl, ad hoc observation | point count, Penguins from space: faecal stains reveal the location of emperor penguin colonies, https://doi.org/10.1111/j.1466-8238.2009.00467.x, Takats et al. 2001.}
#' \item{samplingEffort}{The amount of effort expended during an Event. Examples 40 trap-nights, 10 observer-hours, 10 km by foot, 30 km by car.}
#' \item{individualCount}{The number of individuals present at the time of the Occurrence. Integer.}
#' \item{organismQuantity}{A number or enumeration value for the quantity of organisms. Examples 27 (organismQuantity) with individuals (organismQuantityType). 12.5 (organismQuantity) with percentage biomass (organismQuantityType). r (organismQuantity) with Braun Blanquet Scale (organismQuantityType). many (organismQuantity) with individuals (organismQuantityType).}
#' \item{coordinatePrecision}{A decimal representation of the precision of the coordinates given in the decimalLatitude and decimalLongitude.}
#' \item{coordinateUncertaintyInMeters}{The horizontal distance (in meters) from the given decimalLatitude and decimalLongitude describing the smallest circle containing the whole of the Location. Leave the value empty if the uncertainty is unknown, cannot be estimated, or is not applicable (because there are no coordinates). Zero is not a valid value for this term.}
#' \item{spatiallyValid}{Occurrence records in the ALA can be filtered by using the spatially valid flag. This flag combines a set of tests applied to the record to see how reliable are its spatial data components.}
#' \item{catalogNumber}{An identifier (preferably unique) for the record within the data set or collection.}
#' \item{gbifID}{The identifier assigned by GBIF for each record.}
#' \item{datasetID}{An identifier for the set of data. May be a global unique identifier or an identifier specific to a collection or institution.}
#' \item{institutionCode}{The name (or acronym) in use by the institution having custody of the object(s) or information referred to in the record. Examples MVZ, FMNH, CLO, UCMP.}
#' \item{datasetName}{The name identifying the data set from which the record was derived.}
#' \item{otherCatalogNumbers}{A list (concatenated and separated) of previous or alternate fully qualified catalog numbers or other human-used identifiers for the same Occurrence, whether in the current or any other data set or collection.}
#' \item{occurrenceID}{An identifier for the Occurrence (as opposed to a particular digital record of the occurrence). In the absence of a persistent global unique identifier, construct one from a combination of identifiers in the record that will most closely make the occurrenceID globally unique.}
#' \item{taxonKey}{The GBIF-assigned taxon identifier number.}
#' \item{collectionID}{An identifier for the collection or dataset from which the record was derived.}
#' \item{verbatim_scientificName}{The verbatim (originally-provided) scientific name}
#' \item{verbatimEventDate}{The verbatim original representation of the date and time information for an Event.}
#' \item{associatedTaxa}{A list (concatenated and separated) of identifiers or names of taxa and the associations of this Occurrence to each of them.}
#' \item{associatedOrganisms}{A list (concatenated and separated) of identifiers of other Organisms and the associations of this Organism to each of them.}
#' \item{fieldNotes}{One of a) an indicator of the existence of, b) a reference to (publication, URI), or c) the text of notes taken in the field about the Event.}
#' \item{sex}{The sex of the biological individual(s) represented in the Occurrence.}
#' \item{rights}{A description of the usage rights applicable to the record.}
#' \item{rightsHolder}{A person or organization owning or managing rights over the resource.}
#' \item{accessRights}{Information about who can access the resource or an indication of its security status.}
#' \item{associatedReferences}{A list (concatenated and separated) of identifiers (publication, bibliographic reference, global unique identifier, URI) of literature associated with the Occurrence.}
#' \item{bibliographicCitation}{A bibliographic reference for the resource as a statement indicating how this record should be cited (attributed) when used.}
#' \item{references}{A related resource that is referenced, cited, or otherwise pointed to by the described resource.}
#' \item{informationWithheld}{Additional information that exists, but that has not been shared in the given record.}
#' \item{isDuplicateOf}{Additional information that exists, but that has not been shared in the given record.}
#' \item{hasCoordinate}{Variable indicating presence/absence of location coordinates.}
#' \item{hasGeospatialIssues}{Variable indicating validity of geospatial data associated with record.}
#' \item{occurrenceYear}{Year associated with Occurrence.}
#' \item{id}{Variable with identifying value for the Occurrenc.}
#' \item{duplicateStatus}{Variable indicating is Occurrence is duplicate or not.}
#' \item{associatedOccurrences}{A list (concatenated and separated) of identifiers of other Occurrence records and their associations to this Occurrence.}
#' \item{locationRemarks}{Comments or notes about the Location.}
#' \item{dataSource}{BeeBDC assigned source of the data. Often written when the data is formatted by a BeeBDC::xxx_readr function or similar.}
#' \item{verbatim_scientificName}{The verbatim (originally-provided) scientific name}
#' }
#' @references This data set was created by generating a random subset of 100 rows from the full, unfiltered and unflagged, BeeBDC dataset from the publication:
#' DOREY, J. B., CHESSHIRE, P. R., BOLAÑOS, A. N., O’REILLY, R. L., BOSSERT, S., COLLINS, S. M., LICHTENBERG, E. M., TUCKER, E., SMITH-PARDO, A., FALCON-BRINDIS, A., GUEVARA, D. A., RIBEIRO, B. R., DE PEDRO, D., FISCHER, E., HUNG, J. K.-L., PARYS, K. A., ROGAN, M. S., MINCKLEY, R. L., VELZCO, S. J. E., GRISWOLD, T., ZARRILLO, T. A., SICA, Y., ORR, M. C., GUZMAN, L. M., ASCHER, J., HUGHES, A. C. & COBB, N. S. In review. A globally synthesised and flagged bee occurrence dataset and cleaning workflow. Scientific Data.
#' @keywords datasets
#' @examples
#'
#' beesRaw <- BeeBDC::beesRaw
#' head(beesRaw)
#'
"beesRaw"
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/toyData_beesRaw.R
|
check_require_cran <- function(pkg) {
if (!requireNamespace(pkg, quietly = TRUE)) {
stop(paste0("Package `", pkg, "` needed for this function to work! Please, install it with: `install.packages(\"", pkg, "\")`"), call. = FALSE)
} else {
require(pkg, character.only = TRUE)
}
}
check_require_github <- function(pkg) {
userpkg <- pkg
pkg <- basename(pkg)
if (!requireNamespace(pkg, quietly = TRUE)) {
stop(paste0("Package `", pkg, "` needed for this function to work! Please, install it with: remotes::install_github(\"", userpkg, "\")"), call. = FALSE)
} else {
require(pkg, character.only = TRUE)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/R/utils.R
|
# This R script was written by James Dorey, starting on the 2nd of May 2022. The script intends
# to clean occurrence data from several sources using a combination of custom functions and
# functions from the "bdc" package.
# For queries, please feel free to contact James Dorey at [email protected]
# WHAT TO ADD? You can list things here if you see the need.
#### 0.0 Script preparation ####
##### 0.1 Working directory ####
# Choose the path to the root folder in which all other folders can be found (or made by dirMaker)
RootPath <- "/Users/jamesdorey/Desktop/Uni/My_papers/Bee_SDM_paper"
# Read in this initial function
source(paste(RootPath, "/BDC_repo/NewFunctions/dirMaker.R", sep = ""))
library(magrittr)
# Create file paths and prepare for what's to come
dirMaker(
RootPath = RootPath,
# Input the location of the workflow script RELATIVE to the RootPath
RDoc = "BDC_repo/BeeCleaning_SciData.R") %>%
# Add paths created by this function to the environment()
list2env(envir = parent.env(environment()))
# Set the working directory
setwd(DataPath)
# Install reenv, IF NEEDED
#install.packages("renv")
renv::init()
##### 0.2 Install packages (if needed) #####
# Install only those packages that are not already present in your system
# Choose packages that need to be installed
# You may need to install gdal on your computer. This can be done on mac by using
# Homebrew in the terminal and the command "brew install gdal"
list.of.packages <- c("R.utils", # To use gunzip
"magrittr", # to use pipes
"dplyr", # Part of the tidyverse
"forcats", # tidyverse for working with factors
"tidyr", # Part of the tidyverse
"rlist", # Package to save lists
"galah", # To download ALA data
"praise", # To whispers sweet nothings
"EML", # To work with .eml files
"emld", # To work with .eml files
"rlang", # Part of the tidyverse — core functions
"xml2", # Part of the tidyverse — reads .xml files
"stringr", # Part of the tidyverse — works with text strings
"lubridate", # Part of the tidyverse — works with dates
"tidyselect", # Part of the tidyverse
"mgsub", # To perform multiple text substitutions
"bdc", # data cleaning package
"rvest", # Package for interfacing with and downloading from the internet
"rnaturalearth", # Global vector map data
"rnaturalearthdata", # To access the above global map data
"countrycode", # Package to deal with country codes
"rworldmap",
"readxl",
"cowplot", # ggplot2 helper package
"ggspatial") # Makes ggplot2 create north arrows or scale bars
# Install sf and terra seperately
# renv::install(c("sf","terra"), type = "binary")
renv::install(c("sf","terra"), type = "binary")
# List the new (not installed) packages and then if there are any, install them.
renv::install(packages = c(list.of.packages),
rebuild = FALSE) # try changing to TRUE if you're having package troubles
##### 0.3 Load packages ####
# Load all packages from the list specified above,
lapply(c(list.of.packages, "sf","terra"),
library, character.only = TRUE)
### Load in R scripts and character strings in our package
sapply(list.files(file.path(ScriptPath), pattern = ".R$", full.names = TRUE), source) # loads in all functions
# Save a snapshot of the environment
renv::snapshot()
#### 1.0 Data read ####
##### 1.1 Read data ####
# Read in the uncleaned, but flagged, dataset
beeData <- readr::read_csv("/Users/jamesdorey/Desktop/Uni/My_papers/Bee_SDM_paper/Data_acquisition_workflow/Output/Intermediate/05_unCleaned_database.csv",
col_types = ColTypeR())
##### 1.2 Bee taxonomy ####
# Read in the custom taxonomy file
# This can be used to filter to a particular taxon
BeeTaxonomy <- BeeBDC::beesTaxonomy()
#### 2.0 Taxon example ####
# Users could filter to a particular taxonomic group by editing the below
# Select only unique selected bee genera
selectedGenera <- BeeTaxonomy %>%
# Select only tribe anthophorini (for example)
dplyr::filter(tolower(tribe) == tolower("anthophorini")) %>%
distinct(genus)
# Filter the data
taxonData <- beeData %>%
dplyr::filter(genus %in% selectedGenera$genus)
#### 3.0 Country example ####
# Users could filter to a particular group of countries by editing the below
studyArea <- c("Canada", "United states", "Mexico", "Guatemala")
# Filter the data
countryData <- beeData %>%
dplyr::filter(country %in% studyArea)
#### 4.0 Filtering example ####
##### 4.1 Simple filter ####
# Users can filter the flagged data by whichever columns they wish using the summaryFun...
source(paste(ScriptPath, "summaryFun.R", sep = "/"))
filteredData <-
# The input dataset
beeData %>%
# Run the summary function
summaryFun(
# Use the above input dataset to filter
data = .,
# Choose the columns to NOT filter (or NULL to filter all columns)
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
# In the output, do you want to REMOVE all filtering columns (TRUE), or keep them (FALSE)
removeFilterColumns = TRUE,
# In the output, do you want to only keep clean data according to your filtering (TRUE),
# Or keep all data and simply update the .summary column (FALSE)
filterClean = TRUE) # END summary function
##### 4.2 Uncertainty threshold ####
# Users may also want to filter by .uncertaintyThreshold, but want to specify their own
# Uncertainty threshold...
# Users can filter the flagged data by whichever columns they wish using the summaryFun...
source(paste(ScriptPath, "summaryFun.R", sep = "/"))
filteredData <-
# The input dataset
beeData %>%
# Remove any exiting .uncertaintyThreshold column
dplyr::select(!tidyselect::any_of(".uncertaintyThreshold")) %>%
# Chose the coordinate uncertainty to filter to...
coordUncerFlagR(data = .,
uncerColumn = "coordinateUncertaintyInMeters",
# 10 km here
threshold = 10000) %>%
# Now re-do the .summary column and filter the data using this new value
summaryFun(
# Select the input dataset to filter
data = .,
# Choose the columns to NOT filter (or NULL to filter all columns)
# NOTE: the .uncertaintyThreshold is now removed and WILL be filtered
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms"),
# In the output, do you want to REMOVE all filtering columns (TRUE), or keep them (FALSE)
removeFilterColumns = TRUE,
# In the output, do you want to only keep clean data according to your filtering (TRUE),
# Or keep all data and simply update the .summary column (FALSE)
filterClean = TRUE)
##### 4.2 Date filter ####
###### a. bdc_year_outOfRange ####
# Users can filter the flagged data by whichever columns they wish using the summaryFun...
source(paste(ScriptPath, "summaryFun.R", sep = "/"))
filteredData <-
# The input dataset
beeData %>%
# Remove any exisitng .year_outOfRange column
dplyr::select(!".year_outOfRange") %>%
# Chose the minimum year to filter to...
bdc::bdc_year_outOfRange(data = .,
eventDate = "year",
year_threshold = 1970) %>%
# Now re-do the .summary column and filter the data using this new value
summaryFun(
# Select the input dataset to filter
data = .,
# Choose the columns to NOT filter (or NULL to filter all columns)
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
# In the output, do you want to REMOVE all filtering columns (TRUE), or keep them (FALSE)
removeFilterColumns = TRUE,
# In the output, do you want to only keep clean data according to your filtering (TRUE),
# Or keep all data and simply update the .summary column (FALSE)
filterClean = TRUE)
###### b. year range ####
# Users can filter the flagged data by whichever columns they wish using the summaryFun...
source(paste(ScriptPath, "summaryFun.R", sep = "/"))
filteredData <-
# The input dataset
beeData %>%
# Chose the year range...
dplyr::filter(year > 1950 & year < 1970) %>%
# Now re-do the .summary column and filter the data using this new value
summaryFun(
# Select the input dataset to filter
data = .,
# Choose the columns to NOT filter (or NULL to filter all columns)
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
# In the output, do you want to REMOVE all filtering columns (TRUE), or keep them (FALSE)
removeFilterColumns = TRUE,
# In the output, do you want to only keep clean data according to your filtering (TRUE),
# Or keep all data and simply update the .summary column (FALSE)
filterClean = TRUE)
# Users may choose any number of filtering steps form the main workflow to include above
# summaryFun(), just use pipes '%>%' between the function and use '.' as the data input
# because this wil lfeed in the data aoutput from the above function into the proceeding one.
#### 5. Summary figures ####
##### 5.1 Duplicate chordDiagrams ####
install.packages("circlize")
if(!require("BiocManager", quietly = TRUE)){
install.packages("BiocManager")}
BiocManager::install("ComplexHeatmap", force = TRUE)
install.packages("paletteer")
library(paletteer)# Find palettes here
renv::snapshot()
# Read in the most-RECENT file
duplicates <- fileFinder(path = "PATH TO A FOLDER CONTAINING THE duplicateRun_ — could be supp. materials folder",
fileName = "duplicateRun_") %>%
readr::read_csv() %>%
# Select only the stingless bee data
dplyr::filter(database_id %in% stinglessData$database_id |
database_id_match %in% stinglessData$database_id)
# Choose the global figure parameters
par(mar = c(2, 2, 2, 2)/2, mfrow = c(1,1))
# Create the chorDiagram. You can leave many of the below values out but we show here
# the defaults
source(paste(ScriptPath, "chordDiagramR.R", sep = "/"))
chordDiagramR(
# The duplicate data from the dupeSummary function output
dupeData = duplicates,
outPath = OutPath_Figures,
fileName = "ChordDiagram.pdf",
# These can be modified to help fit the final pdf that's exported.
width = 9,
height = 7.5,
bg = "white",
# How few distinct dataSources should a group have to be listed as "other"
smallGrpThreshold = 3,
title = "Duplicated record sources",
# The default list of colour palettes to choose from usign the paleteer package
palettes = c("cartography::blue.pal", "cartography::green.pal",
"cartography::sand.pal", "cartography::orange.pal", "cartography::red.pal",
"cartography::purple.pal", "cartography::brown.pal"),
canvas.ylim = c(-1.0,1.0),
canvas.xlim = c(-0.6, 0.25),
text.col = "black",
legendX = grid::unit(6, "mm"),
legendY = grid::unit(18, "mm"),
legendJustify = c("left", "bottom"),
niceFacing = TRUE)
# Save 7*6
##### 5.2 Duplicate histogram ####
# Find the existing flag file and read it in
flagColumns <- fileFinder(path = "PATH TO A FOLDER CONTAINING THE flagsRecorded_ — could be supp. materials folder",
fileName = "flagsRecorded_") %>%
readr::read_csv() %>%
# WARNING: alternate path if wanting to produce figures for the selected taxonData (2.0 above)
# Select only the taxonData data
dplyr::filter(database_id %in% taxonData$database_id)
# Read in the function
source(paste(ScriptPath, "dupePlotR.R", sep = "/"))
# Create a figure shoring the total number of duplicates, kept duplicates, and unique
# records for each datasource (simplified to the text before the first underscore) and
# the proportion of the above for each data source
dupePlotR(
flagColumns = flagColumns,
# The outPath to save the plot as
outPath = paste0(DataPath, "/Output", "/Figures"),
fileName = "duplicatePlot.pdf",
# Colours in order: duplicate, kept duplicate, unique
dupeColours = c("#F2D2A2","#B9D6BC", "#349B90"),
# Plot size and height
base_height = 7, base_width = 7,
legend.position = c(0.85, 0.8),
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP"
)
##### 5.3 Flags by source ####
# Read in the function
source(paste(ScriptPath, "plotFlagSummary.R", sep = "/"))
###### a. All taxa in dataset ####
# Visualise all flags for each dataSource (simplified to the text before the first underscore)
plotFlagSummary(
# WARNING: alternate path if wanting to produce figures for the selected taxonData (2.0 above)
# Select only the taxonData data
plotData = taxonData,
# Colours in order of pass (TRUE), fail (FALSE), and NA
flagColours = c("#127852", "#A7002D", "#BDBABB"),
fileName = paste0("FlagsPlot_", Sys.Date(),".pdf"),
outPath = paste0(DataPath, "/Output/Figures"),
width = 15, height = 9,
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP", CAES = "CAES", 'B. Mont.' = "BMont", 'B. Minkley' = "BMin", Ecd = "Ecd",
Gaiarsa = "Gai", EPEL = "EPEL"
)
###### b. Single sp. summary ####
# Visualise all flags for each dataSource (simplified to the text before the first underscore)
# A clever user might also realise the potential to summarise and produce outputs in other columns
plotFlagSummary(
# WARNING: alternate path if wanting to produce figures for the selected taxonData (2.0 above)
# Select only the taxonData data
plotData = beeData,
# Colours in order of pass (TRUE), fail (FALSE), and NA
flagColours = c("#127852", "#A7002D", "#BDBABB"),
fileName = paste0("FlagsPlot_Lfijiense", Sys.Date(),".pdf"),
outPath = paste0(DataPath, "/Output/Figures"),
width = 15, height = 9,
# OPTIONAL:
# # Filter to species
speciesName = "Lasioglossum fijiense",
# column to look in
nameColumn = "species",
# Save the filtered data
saveFiltered = TRUE,
# Filter column to display on map
filterColumn = ".summary",
plotMap = TRUE,
# amount to jitter points if desired, e.g. 0.25 or NULL
jitterValue = NULL,
# Map opacity value for points between 0 and 1
mapAlpha = 1,
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP", CAES = "CAES", 'B. Mont.' = "BMont", 'B. Minkley' = "BMin", Ecd = "Ecd",
Gaiarsa = "Gai", EPEL = "EPEL"
)
##### 5.4 Maps ####
# Import CLEANED dataset (you can change this option)
# WARNING: alternate path if wanting to produce figures for the selected taxonData (2.0 above)
# Select only the taxonData data
mapData <- taxonData %>%
dplyr::filter(.summary == TRUE)
# Read in the function
# TO ADD: Change legend title and text to be nicer
# Add A and B
source(paste(ScriptPath, "summaryMaps.R", sep = "/"))
summaryMaps(
mapData = mapData,
width = 10, height = 10,
class_n = 15,
class_Style = "jenks",
fileName = paste0(DataPath, "/Output/Figures/", "CountryMaps_jenks.pdf")
)
#### 6.0 Save data ####
mapData %>%
readr::write_excel_csv(paste0(DataPath, "/Output/Intermediate/", "cleanTaxon_",
Sys.Date(), ".csv"))
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/inst/BeeBDC_basicWorkflow.R
|
# This R script was written by James Dorey, starting on the 2nd of May 2022. The script serves
# as a workflow for the BeeBDC package to clean and flag bee, and other, occurrence data.
# It also uses functions from several sources and particularly from the "bdc" package.
# For queries, please feel free to contact James Dorey at [email protected]
#### 0.0 Script preparation ####
##### 0.1 Working directory ####
# Choose the path to the root folder in which all other folders can be found (or made by dirMaker)
RootPath <- "/Users/jamesdorey/Desktop/Uni/My_papers/Bee_SDM_paper"
# Set the working directory
setwd(paste0(RootPath,"/Data_acquisition_workflow"))
# Initialise renv the project if needed
# renv::init(project = paste0(RootPath,"/Data_acquisition_workflow"))
renv::activate(project = paste0(RootPath,"/Data_acquisition_workflow"))
# Install BeeBDC from CRAN
install.packages("BeeBDC")
# You could also install BeeBDC's development version using the below:
# WARNING the development version may not pass all CRAN or GitHub tests.
remotes::install_github("https://github.com/jbdorey/BeeBDC.git", user="jbdorey",
# To use the development version, do below, otherwise choose "main"
ref = "devel",
force = TRUE)
##### 0.2 Load packages ####
# Save a snapshot of the environment
renv::snapshot(project = paste0(RootPath,"/Data_acquisition_workflow"))
# Load all packages from the list specified above,
lapply(c("ComplexHeatmap", "BeeBDC", "magrittr"),
library, character.only = TRUE)
# Create file paths and prepare for what's to come
BeeBDC::dirMaker(
RootPath = RootPath,
# Input the location of the workflow script RELATIVE to the RootPath
RDoc = "Packages/BeeBDC_development/Workflows/BeeBDC_fullWorkflow.R") %>%
# Add paths created by this function to the environment()
list2env(envir = parent.env(environment()))
#### 1.0 Data merge ####
##### 1.1 Download ALA data ####
# Downloads ALA data and creates a new file in the HomePath to put those data
BeeBDC::atlasDownloader(path = DataPath,
userEmail = "[email protected]",
atlas = "ALA",
ALA_taxon = "Apiformes")
##### 1.2 Import and merge ALA, SCAN, iDigBio and GBIF data ####
# Supply the path to where the data are
# save_type is either "csv_files" or "R_file"
DataImp <- BeeBDC::repoMerge(path = DataPath,
# Find data — Many problems can be solved by running repoFinder(path = DataPath)
# And looking for problems
occ_paths = BeeBDC::repoFinder(path = DataPath),
save_type = "R_file")
# Load in the most-recent version of these data if needed
# This will return a list with
# 1. the occurrence dataset with attributes and
# 2. the appended eml file
DataImp <- BeeBDC::importOccurrences(path = DataPath,
fileName = "BeeData_")
##### 1.3 Import USGS Data ####
# The USGS_formatter will find, import, format, and create metadata for the USGS dataset
# pubDate must be in day-month-year format and must be supplied by the user here.
USGS_data <- BeeBDC::USGS_formatter(path = DataPath, pubDate = "19-11-2022")
##### 1.4 Formatted Source Importer ####
# Formatted source importer. Use this importer to find files that have been formatted and need to
# be added to the larger data file (e.g., made by repoMerge and USGS_formatter)
# Combine the USGS data and the existing big dataset
Complete_data <- BeeBDC::formattedCombiner(path = DataPath,
strings = c("USGS_[a-zA-Z_]+[0-9]{4}-[0-9]{2}-[0-9]{2}"),
# This should be the list-format with eml attached
existingOccurrences = DataImp$Data_WebDL,
existingEMLs = DataImp$eml_files)
# In the catalogNumber, remove ".*specimennumber:" as what comes after should be the USGS
# number to match for duplicates
Complete_data$Data_WebDL <- Complete_data$Data_WebDL %>%
dplyr::mutate(catalogNumber = stringr::str_replace(catalogNumber,
pattern = ".*\\| specimennumber:",
replacement = ""))
##### 1.5 Save data ####
# Choose the type of data format you want
BeeBDC::dataSaver(path = DataPath,# The main path to look for data in
save_type = "CSV_file", # "R_file" OR "CSV_file"
occurrences = Complete_data$Data_WebDL, # The existing datasheet
eml_files = Complete_data$eml_files, # The existing EML files
file_prefix = "Fin_") # The prefix for the fileNames
rm(Complete_data, DataImp, USGS_data)
#### 2.0 Data preparation ####
##### 2.1 Standardise datasets ####
# You may either use
# (a) the bdc import method (works well with general datasets) or
# (b) the BeeBDC import method (works well with above data merge)
# The bdc import is NOT truly supported here, but provided as an example. Please go to section
# 2.1b below.
###### a. bdc import ####
warning(paste0("The bdc method here is not truly implemented and supported. If you use it you must do so alone.",
" This is just a place-holder for people using the bdc package more heavily.",
"\nPreferably, go directly to 2.1b — BeeBDC import."))
# Read in the bdc metadata
bdc_metadata <- readr::read_csv(paste(DataPath, "Output", "bdc_integration.csv", sep = "/"))
# Standardise the dataset to bdc
db_standardized <- bdc::bdc_standardize_datasets(
metadata = bdc_metadata,
format = "csv",
overwrite = TRUE,
save_database = TRUE)
# read in configuration description file of the column header info
config_description <- readr::read_csv(paste(DataPath, "Output", "bdc_configDesc.csv",
sep = "/"),
show_col_types = FALSE, trim_ws = TRUE)
###### b. BeeBDC import ####
# You can also just read the data in using the below script. This will
# likely be quicker and more-reliable. Find the path
occPath <- BeeBDC::fileFinder(path = DataPath, fileName = "Fin_BeeData_combined_")
# read in the file
db_standardized <- readr::read_csv(occPath,
# Use the basic ColTypeR function to determine types
col_types = BeeBDC::ColTypeR(), trim_ws = TRUE) %>%
# add the database_id columns
dplyr::mutate(database_id = paste("Dorey_data_", 1:nrow(.), sep = ""),
.before = family)
###### c. optional thin ####
# You can thin the dataset for TESTING ONLY!
# check_pf <- check_pf %>%
# # take every 100th record
# filter(row_number() %% 100 == 1)
##### 2.2 Paige dataset ####
# Integrate Paige Chesshire's cleaned dataset.
# Import Paige's cleaned N. American data
# IF you haven't figured out by now, dont worry about the column name warning — not all columns occur here.
PaigeNAm <- readr::read_csv(paste(DataPath, "Paige_data", "NorAmer_highQual_only_ALLfamilies.csv",
sep = "/"), col_types = BeeBDC::ColTypeR()) %>%
# Change the column name from Source to dataSource to match the rest of the data.
dplyr::rename(dataSource = Source) %>%
# add a NEW database_id column
dplyr::mutate(
database_id = paste0("Paige_data_", 1:nrow(.)),
.before = scientificName)
# Merge Paige's data with downloaded data
db_standardized <- BeeBDC::PaigeIntegrater(
db_standardized = db_standardized,
PaigeNAm = PaigeNAm,
# This is a list of columns by which to match Paige's data to the most-recent download with.
# Each vector will be matched individually
columnStrings = list(
c("decimalLatitude", "decimalLongitude",
"recordNumber", "recordedBy", "individualCount", "samplingProtocol",
"associatedTaxa", "sex", "catalogNumber", "institutionCode", "otherCatalogNumbers",
"recordId", "occurrenceID", "collectionID"), # Iteration 1
c("catalogNumber", "institutionCode", "otherCatalogNumbers",
"recordId", "occurrenceID", "collectionID"), # Iteration 2
c("decimalLatitude", "decimalLongitude",
"recordedBy", "genus", "specificEpithet"),# Iteration 3
c("id", "decimalLatitude", "decimalLongitude"),# Iteration 4
c("recordedBy", "genus", "specificEpithet", "locality"), # Iteration 5
c("recordedBy", "institutionCode", "genus",
"specificEpithet","locality"),# Iteration 6
c("occurrenceID","decimalLatitude", "decimalLongitude"),# Iteration 7
c("catalogNumber","decimalLatitude", "decimalLongitude"),# Iteration 8
c("catalogNumber", "locality") # Iteration 9
) )
# Remove spent data
rm(PaigeNAm)
##### 2.3 USGS ####
# The USGS dataset also partially occurs on GBIF from BISON, however, the occurrence codes are in
# a silly place... We will correct these here to help identify duplicates later
db_standardized <- db_standardized %>%
# Remove the discoverlife html if it is from USGS
dplyr::mutate(occurrenceID = dplyr::if_else(
stringr::str_detect(occurrenceID, "USGS_DRO"),
stringr::str_remove(occurrenceID, "http://www\\.discoverlife\\.org/mp/20l\\?id="),
occurrenceID)) %>%
# Use otherCatalogNumbers when occurrenceID is empty AND when USGS_DRO is detected there
dplyr::mutate(
occurrenceID = dplyr::if_else(
stringr::str_detect(otherCatalogNumbers, "USGS_DRO") & is.na(occurrenceID),
otherCatalogNumbers, occurrenceID)) %>%
# Make sure that no eventIDs have snuck into the occurrenceID columns
# For USGS_DRO, codes with <6 digits are event ids
dplyr::mutate(
occurrenceID = dplyr::if_else(stringr::str_detect(occurrenceID, "USGS_DRO", negate = TRUE),
# Keep occurrenceID if it's NOT USGS_DRO
occurrenceID,
# If it IS USGS_DRO and it has => 6 numbers, keep it, else, NA
dplyr::if_else(stringr::str_detect(occurrenceID, "USGS_DRO[0-9]{6,10}"),
occurrenceID, NA_character_)),
catalogNumber = dplyr::if_else(stringr::str_detect(catalogNumber, "USGS_DRO", negate = TRUE),
# Keep catalogNumber if it's NOT USGS_DRO
catalogNumber,
# If it IS USGS_DRO and it has => 6 numbers, keep it, else, NA
dplyr::if_else(stringr::str_detect(catalogNumber, "USGS_DRO[0-9]{6,10}"),
catalogNumber, NA_character_)))
##### 2.4 Additional datasets ####
# Import additional and potentially private datasets.
# Private dataset functions are provided but the data not integrated here until those datasets
# become freely available post-publication.
# There will be some warnings were a few rows may not be formatted correctly or where dates fail
# to parse. This is normal.
###### a. EPEL ####
# Guzman, L. M., Kelly, T. & Elle, E. A data set for pollinator diversity and their interactions
# with plants in the Pacific Northwest. Ecology n/a, e3927 (2022).
# <https://doi.org/10.1002/ecy.3927>
EPEL_Data <- BeeBDC::readr_BeeBDC(dataset = "EPEL",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/bee_data_canada.csv",
outFile = "jbd_EPEL_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### b. Allan Smith-Pardo ####
ASP_Data <- BeeBDC::readr_BeeBDC(dataset = "ASP",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Allan_Smith-Pardo_Dorey_ready2.csv",
outFile = "jbd_ASP_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### c. Minckley ####
BMin_Data <- BeeBDC::readr_BeeBDC(dataset = "BMin",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Bob_Minckley_6_1_22_ScanRecent-mod_Dorey.csv",
outFile = "jbd_BMin_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### d. BMont ####
# Delphia, C. M. Bumble bees of Montana.
# <https://www.mtent.org/projects/Bumble_Bees/bombus_species.html>. (2022)
BMont_Data <- BeeBDC::readr_BeeBDC(dataset = "BMont",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Bombus_Montana_dorey.csv",
outFile = "jbd_BMont_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-sa/4.0/")
###### e. Ecd ####
# Ecdysis. Ecdysis: a portal for live-data arthropod collections,
# <https://serv.biokic.asu.edu/ecdysis/index.php> (2022).
Ecd_Data <- BeeBDC::readr_BeeBDC(dataset = "Ecd",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Ecdysis_occs.csv",
outFile = "jbd_Ecd_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### f. Gai ####
# Gaiarsa, M. P., Kremen, C. & Ponisio, L. C. Pollinator interaction flexibility across scales
# affects patch colonization and occupancy. Nature Ecology & Evolution 5, 787-793 (2021).
# <https://doi.org/10.1038/s41559-021-01434-y>
Gai_Data <- BeeBDC::readr_BeeBDC(dataset = "Gai",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/upload_to_scan_Gaiarsa et al_Dorey.csv",
outFile = "jbd_Gai_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### g. CAES ####
# This is a little slower and will have a few date warnings — 215 failed to parse.
CAES_Data <- BeeBDC::readr_BeeBDC(dataset = "CAES",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/CT_BEE_DATA_FROM_PBI.xlsx",
outFile = "jbd_CT_Data.csv",
sheet = "Sheet1",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### h. GeoL ####
GeoL_Data <- BeeBDC::readr_BeeBDC(dataset = "GeoL",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Geolocate and BELS_certain and accurate.xlsx",
outFile = "jbd_GeoL_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### i. EaCO ####
EaCO_Data <- BeeBDC::readr_BeeBDC(dataset = "EaCO",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Eastern Colorado bee 2017 sampling.xlsx",
outFile = "jbd_EaCo_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### j. FSCA ####
# Florida State Collection of Arthropods
FSCA_Data <- BeeBDC::readr_BeeBDC(dataset = "FSCA",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "InputDatasets/fsca_9_15_22_occurrences.csv",
outFile = "jbd_FSCA_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### k. Texas SMC ####
# # Published or unpublished data from Texas literature not in an online database, usually copied
# into spreadsheet from document format, or otherwise copied from a very differently-formatted spreadsheet
# # Unpublished or partially published data were obtained with express permission from the lead author
SMC_Data <- BeeBDC::readr_BeeBDC(dataset = "SMC",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/TXbeeLitOccs_31Oct22.csv",
outFile = "jbd_SMC_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### l. Texas Bal ####
# # Data with GPS coordinates from Ballare et al. 2019, https://doi.org/10.1002/eap.1869.
# The version on Dryad is missing site GPS coordinates (by accident). Kim is okay with these data
# being made public as long as her paper is referenced.
# - Elinor Lichtenberg
Bal_Data <- BeeBDC::readr_BeeBDC(dataset = "Bal",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Beedata_ballare.xlsx",
outFile = "jbd_Bal_Data.csv",
sheet = "animal_data",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### m. Palouse Lic ####
# # *Attached: My canola data. I tried to get this in DarwinCore format.
# These data go with the manuscript published here:
# https://doi.org/10.1111/jen.13188
# These are the data I will be putting on SCAN.
# - Elinor Lichtenberg
Lic_Data <- BeeBDC::readr_BeeBDC(dataset = "Lic",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Lichtenberg_canola_records.csv",
outFile = "jbd_Lic_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### n. Arm ####
Arm_Data <- BeeBDC::readr_BeeBDC(dataset = "Arm",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Bee database Armando_Final.xlsx",
outFile = "jbd_Arm_Data.csv",
sheet = "Sheet1",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### o. Dor #####
Dor_Data <- BeeBDC::readr_BeeBDC(dataset = "Dor",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/DoreyData.csv",
outFile = "jbd_Dor_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### p. VicWam #####
VicWam_Data <- BeeBDC::readr_BeeBDC(dataset = "VicWam",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Combined_Vic_WAM_databases.xlsx",
outFile = "jbd_VicWam_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/",
sheet = "Combined")
##### 2.5 Merge all ####
# Remove these spent datasets
rm(EPEL_Data, ASP_Data, BMin_Data, BMont_Data, Ecd_Data, Gai_Data, CAES_Data,
# INHS_Data, MABC_Data, EcoS_Data, KP_Data,
GeoL_Data, EaCO_Data, FSCA_Data, SMC_Data, Bal_Data, Lic_Data, Arm_Data, Dor_Data,
VicWam_Data)
# Read in and merge all
db_standardized <- db_standardized %>%
dplyr::bind_rows(
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_ASP_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_EPEL_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_BMin_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_BMont_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Ecd_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Gai_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_CT_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_GeoL_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_EaCo_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_SMC_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Bal_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Lic_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Arm_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Dor_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_VicWam_Data.csv"), col_types = BeeBDC::ColTypeR())) %>%
# END bind_rows
suppressWarnings(classes = "warning") # End suppressWarnings — due to col_types
##### 2.6 Match database_id ####
# Try to match database IDs with prior runs.
# read in a prior run of choice
priorRun <- fileFinder(path = DataPath,
file = "01_prefilter_database_9Aug22.csv") %>%
readr::read_csv(file = ., col_types = ColTypeR())
# attempt to match records using the below function
# This function will attempt to find the database_ids from prior runs
# source(paste(ScriptPath, "idMatchR.R", sep = "/"))
db_standardized <- idMatchR(
currentData = db_standardized,
priorData = priorRun,
# First matches will be given preference over later ones
matchBy = tibble::lst(c("gbifID", "dataSource"),
c("catalogNumber", "institutionCode", "dataSource", "decimalLatitude",
"decimalLongitude"),
c("occurrenceID", "dataSource","decimalLatitude","decimalLongitude"),
c("recordId", "dataSource","decimalLatitude","decimalLongitude"),
c("id", "dataSource","decimalLatitude","decimalLongitude"),
# Because INHS was entered as it's own dataset but is now included in the GBIF download...
c("catalogNumber", "institutionCode", "dataSource",
"decimalLatitude","decimalLongitude")),
# You can exclude datasets from prior by matching their prefixs — before first underscore:
excludeDataset = c("ASP", "BMin", "BMont", "CAES", "EaCO", "Ecd", "EcoS",
"Gai", "KP", "EPEL", "CAES", "EaCO", "FSCA", "SMC", "Lic", "Arm",
"VicWam"))
# Remove redundant files
rm(priorRun)
# # Save the dataset
db_standardized %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "00_prefilter_database.csv",
sep = "/"))
#### 3.0 Initial flags ####
# Read this back in if needed.
if(!exists("db_standardized")){
db_standardized <- readr::read_csv(paste(OutPath_Intermediate, "00_prefilter_database.csv",
sep = "/"), col_types = BeeBDC::ColTypeR())}
# See here for bdc prefilter tutorial — https://brunobrr.github.io/bdc/articles/prefilter.html
##### 3.1 SciName ####
# Flag occurrences without scientificName provided
check_pf <- bdc::bdc_scientificName_empty(
data = db_standardized,
sci_name = "scientificName")
# now that this is saved, remove it to save space in memory
rm(db_standardized)
##### 3.2 MissCoords ####
# Flag occurrences with missing lat and lon
check_pf <- bdc::bdc_coordinates_empty(
data = check_pf,
lat = "decimalLatitude",
lon = "decimalLongitude")
##### 3.3 OutOfRange ####
# Flag occurrences that are not on earth (outside of -180 to 180 or -90 to 90 degrees)
check_pf <- bdc::bdc_coordinates_outOfRange(
data = check_pf,
lat = "decimalLatitude",
lon = "decimalLongitude")
##### 3.4 ?Source ####
# Flag occurrences that don't match the basisOfRecord types below
check_pf <- bdc::bdc_basisOfRecords_notStandard(
data = check_pf,
basisOfRecord = "basisOfRecord",
names_to_keep = c(
# Keep all plus some at the bottom.
"Event",
"HUMAN_OBSERVATION",
"HumanObservation",
"LIVING_SPECIMEN",
"LivingSpecimen",
"MACHINE_OBSERVATION",
"MachineObservation",
"MATERIAL_SAMPLE",
"O",
"Occurrence",
"MaterialSample",
"OBSERVATION",
"Preserved Specimen",
"PRESERVED_SPECIMEN",
"preservedspecimen Specimen",
"Preservedspecimen",
"PreservedSpecimen",
"preservedspecimen",
"pinned",
"carded/pointed",
"S",
"Specimen",
"Taxon",
"UNKNOWN",
"",
NA,
"NA",
"LITERATURE",
"None", "Pinned Specimen", "Voucher reared", "Emerged specimen"
))
##### 3.5 CountryName ####
# Try to harmonise country names
###### a. prepare dataset ####
# Fix up country names based on common problems above and extract ISO2 codes for occurrences
check_pf_noNa <- BeeBDC::countryNameCleanR(
data = check_pf,
# Create a Tibble of common issues in country names and their replacements
commonProblems = dplyr::tibble(problem = c('U.S.A.', 'US','USA','usa','UNITED STATES',
'United States','U.S.A','MX','CA','Bras.','Braz.',
'Brasil','CNMI','USA TERRITORY: PUERTO RICO'),
fix = c('United States of America','United States of America',
'United States of America','United States of America',
'United States of America','United States of America',
'United States of America','Mexico','Canada','Brazil',
'Brazil','Brazil','Northern Mariana Islands','PUERTO.RICO'))
)
###### b. run function ####
# Get country name from coordinates using a wrapper around the bdc_country_from_coordinates function
# Because our dataset is much larger than those used to design bdc, we have made it so that you
# can analyse data in smaller pieces.
suppressWarnings(
countryOutput <- BeeBDC::jbd_CfC_chunker(data = check_pf_noNa,
lat = "decimalLatitude",
lon = "decimalLongitude",
country = "country",
# How many rows to process at a time
stepSize = 1000000,
# Start row
chunkStart = 1,
path = OutPath_Intermediate,
scale = "medium",
mc.cores = 2),
classes = "warning")
###### c. re-merge ####
# Left join these datasets
check_pf <- dplyr::left_join(check_pf,
countryOutput,
by = "database_id",
suffix = c("", "CO")) %>%
# Take the new country name if the original is NA
dplyr::mutate(country = dplyr::if_else(is.na(country),
countryCO,
country)) %>%
# Remove duplicates if they arose from left_join!
dplyr::distinct()
# Save the dataset
check_pf %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "01_prefilter_database.csv",
sep = "/"))
# Save the countryOutput dataset
countryOutput %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "countryOutput.csv",
sep = "/"))
# Read in IF needed
if(!exists("check_pf")){
check_pf <- readr::read_csv(paste(DataPath,
"Output", "Intermediate", "01_prefilter_database.csv", sep = "/"),
col_types = BeeBDC::ColTypeR())}
# remove the interim datasets
rm(check_pf_noNa, countryOutput)
##### 3.6 StandardCoNames ####
# Run the function
# Standardise country names and add ISO2 codes if needed
check_pf <- bdc::bdc_country_standardized(
# Remove the countryCode and country_suggested columns to avoid an error with
# where two "countryCode" and "country_suggested" columns exist (i.e. if the dataset has been
# run before)
data = check_pf %>% dplyr::select(!tidyselect::any_of(c("countryCode", "country_suggested"))),
country = "country"
)
##### 3.7 TranspCoords ####
# Flag and correct records when lat and long appear to be transposed. We have chunked
# this because it is too RAM-heavy to run on our large dataset
check_pf <- BeeBDC::jbd_Ctrans_chunker(
# bdc_coordinates_transposed inputs
data = check_pf,
id = "database_id",
lat = "decimalLatitude",
lon = "decimalLongitude",
country = "country",
countryCode = "countryCode",
border_buffer = 0.2, # in decimal degrees (~22 km at the equator)
save_outputs = TRUE,
sci_names = "scientificName",
# chunker inputs
stepSize = 1000000, # How many rows to process at a time
chunkStart = 1, # Start row
append = FALSE, # If FALSE it may overwrite existing dataset
path = OutPath_Check,
mc.cores = 4
)
# Get a summary of the number of transposed records
table(check_pf$coordinates_transposed, useNA = "always")
# Save the dataset
check_pf %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "01_prefilter_database.csv",
sep = "/"))
gc()
##### 3.8 Coord-country ####
# Read data in again if needed
if(!exists("check_pf")){
check_pf <- readr::read_csv(paste(OutPath_Intermediate, "01_prefilter_database.csv",
sep = "/"), col_types = BeeBDC::ColTypeR())}
# Collect all country names in the country column
# rebuilt a bdc function to flag occurrences where the coordinates are inconsistent with the provided
# country name
check_pf <- BeeBDC::jbd_coordCountryInconsistent(
data = check_pf,
lon = "decimalLongitude",
lat = "decimalLatitude",
scale = 50,
pointBuffer = 0.01)
# Save the dataset
check_pf %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "01_prefilter_database.csv",
sep = "/"))
##### 3.9 GeoRefIssue ####
# This function Identifies records whose coordinates can potentially be extracted from locality
# information must be manually checked later
xyFromLocality <- bdc::bdc_coordinates_from_locality(
data = check_pf,
locality = "locality",
lon = "decimalLongitude",
lat = "decimalLatitude",
save_outputs = TRUE
) %>%
# Save data if needed.
readr::write_excel_csv(paste(OutPath_Check, "01_coordinates_from_locality.csv",
sep = "/"))
# Remove spent data
rm(xyFromLocality)
##### 3.10 flag Absent ####
# Flag the records marked as "absent"
check_pf <- BeeBDC::flagAbsent(data = check_pf,
PresAbs = "occurrenceStatus")
##### 3.11 flag License ####
# Flag the records that may not be used according to their license information
check_pf <- BeeBDC::flagLicense(data = check_pf,
strings_to_restrict = "all",
# DON'T flag if in the following dataSource(s)
excludeDataSource = NULL)
##### 3.12 GBIF issue ####
# Flag select issues that are flagged by GBIF
check_pf <- BeeBDC::GBIFissues(data = check_pf,
issueColumn = "issue",
GBIFflags = c("COORDINATE_INVALID", "ZERO_COORDINATE"))
##### 3.13 Flag Reports ####
###### a. save flags ####
# SAVE the flags so far
# This function will make sure that you keep a copy of everything that has been flagged up until now.
# This will be updated throughout the script and accessed at the end, so be wary of moving files around manually.
flagFile <- BeeBDC::flagRecorder(
data = check_pf,
outPath = paste(OutPath_Report, sep =""),
fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
# These are the columns that will be kept along with the flags
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
# TRUE if you want to find a file from a previous part of the script to append to
append = FALSE)
# produce the .summary column in main dataset — will be FALSE if ANY .filtering column is FALSE
check_pf <- BeeBDC::summaryFun(
data = check_pf,
# Don't filter these columns (or NULL)
dontFilterThese = NULL,
# Remove the filtering columns?
removeFilterColumns = FALSE,
# Filter to ONLY cleaned data?
filterClean = FALSE)
###### b. reporting ####
(report <- bdc::bdc_create_report(data = check_pf,
database_id = "database_id",
workflow_step = "prefilter",
save_report = TRUE)
)
###### c. figures ####
figures <-
bdc::bdc_create_figures(data = check_pf,
database_id = "database_id",
workflow_step = "prefilter",
save_figures = TRUE)
# You can check figures using
figures$.coordinates_country_inconsistent
##### 3.14 save ####
# Save the intermediate dataset
check_pf %>%
readr::write_excel_csv(., paste(OutPath_Intermediate, "01_prefilter_output.csv",
sep = "/"))
#### 4.0 Taxonomy ####
# See bdc tutorial here — https://brunobrr.github.io/bdc/articles/taxonomy.html
if(!exists("check_pf")){
# Read in the filtered dataset
database <-
readr::read_csv( paste(OutPath_Intermediate, "01_prefilter_output.csv",
sep = "/"), col_types = BeeBDC::ColTypeR())
}else{
# OR rename and remove
database <- check_pf
# Remove spent dataset
rm(check_pf)}
# Remove names_clean if it already exists (i.e. you have run this before on this dataset)
database <- database %>%
dplyr::select(!tidyselect::any_of("names_clean"))
##### 4.1 Prep data names ####
# This next step cleans the database's scientificName column. The cleaning tool will:
# Flag and remove family names pre-pended to species names;
# Flag and remove qualifiers denoting uncertain or provisional status of taxonomic identification (e.g., confer, species, affinis, and among others);
# Flag and remove infraspecific terms (e.g., variety [var.], subspecies [subsp], forma [f.], and their spelling variations);
# Standardize names, i.e., capitalize only the first letter of the genus name and remove extra whitespaces);
# Parse names, i.e., separate author, date, annotations from taxon name.
# ! MAC: You need to install gnparser through terminal — brew
# brew tap gnames/gn
# brew install gnparser
parse_names <-
bdc::bdc_clean_names(sci_names = database$scientificName, save_outputs = TRUE)
# Keep only the .uncer_terms and names_clean columns
parse_names <-
parse_names %>%
dplyr::select(.uncer_terms, names_clean)
# Merge names with the complete dataset
database <- dplyr::bind_cols(database, parse_names)
rm(parse_names)
##### 4.2 Harmonise taxonomy ####
# Download the custom taxonomy file
taxonomyFile <- BeeBDC::beesTaxonomy(URL = "https://figshare.com/ndownloader/files/42402264?private_link=bce1f92848c2ced313ee")
# Harmonise the names in the occurrence tibble
# # This flags the occurrences without a matched name and matches names to their correct name
# according to DiscoverLife
database <- BeeBDC::harmoniseR(path = DataPath, #The path to a folder that the output can be saved
taxonomy = taxonomyFile, # The formatted taxonomy file
data = database,
speciesColumn = "scientificName",
stepSize = 1000000,
mc.cores = 1)
# You don't need this file anymore...
rm(taxonomyFile)
# Save the harmonised file.
database %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "02_taxonomy_database.csv",
sep = "/"))
##### 4.3 Save flags ####
# SAVE the flags so far
# This will find the most-recent flag file and append your new data to it.
# You can double-check the data and number of columns if you'd like to be thorough and sure that
# all data are intact. <3
flagFile <- BeeBDC::flagRecorder(
data = database,
outPath = paste(OutPath_Report, sep =""),
fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
append = TRUE,
printSummary = TRUE)
#### 5.0 Space ####
# the final frontier or whatever.
# Read in the last database
if(!exists("database")){
database <-
readr::read_csv(paste(OutPath_Intermediate, "02_taxonomy_database.csv", sep = "/"),
col_types = BeeBDC::ColTypeR())}
##### 5.1 Coord precision ####
# This function identifies records with a coordinate precision below a specified number of decimal
# places. For example, the precision of a coordinate with 1 decimal place is 11.132 km at the
# equator, i.e., the scale of a large city.
# "Coordinates with one, two, or three decimal places present a precision of
# ~11.1 km, ~1.1 km, and ~111 m at the equator, respectively."
# This function differs from the bdc function by ONLY flagging occurrences where BOTH lat and lon
# are rounded (having only one or the other rounded could be due to rounding in excel).
check_space <-
BeeBDC::jbd_coordinates_precision(
data = database,
lon = "decimalLongitude",
lat = "decimalLatitude",
ndec = 2 # number of decimals to be tested
)
# Remove the spent dataset
rm(database)
# Save the harmonised file.
check_space %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "03_space_inter_database.csv",
sep = "/"))
if(!exists("check_space")){
# Read in the filtered dataset
check_space <-
readr::read_csv( paste(OutPath_Intermediate, "03_space_inter_database.csv",
sep = "/"), col_types = BeeBDC::ColTypeR())
}
##### 5.2 Common spatial issues ####
# Only run for occurrences through clean_coordinates that are spatially 'valid'.
# Not doing this might crash R.
tempSpace <- check_space %>%
dplyr::filter(!.coordinates_empty == FALSE) %>%
dplyr::filter(!.coordinates_outOfRange == FALSE) %>%
# Next, we will flag common spatial issues using functions of the package CoordinateCleaner.
# Addresses some common issues in biodiversity datasets
CoordinateCleaner::clean_coordinates(
x = .,
lon = "decimalLongitude",
lat = "decimalLatitude",
species = "scientificName",
countries = NULL, # Tests if coords are from x country. This is not needed.
tests = c(
"capitals", # records within 0.5 km of capitals centroids
"centroids", # records within 1 km around country and province centroids
"equal", # records with equal coordinates
"gbif", # records within 1 km of GBIF headquarters. (says 1 degree in package, but code says 1000 m)
"institutions", # records within 100m of zoo and herbaria
"zeros" # records with coordinates 0,0
# "seas" # Not flagged as this should be flagged by coordinate country inconsistent
),
capitals_rad = 1000,
centroids_rad = 500,
centroids_detail = "both", # test both country and province centroids
inst_rad = 100, # remove zoo and herbaria within 100m
range_rad = 0,
zeros_rad = 0.5,
capitals_ref = NULL,
centroids_ref = NULL,
country_ref = NULL,
country_refcol = "countryCode",
inst_ref = NULL,
range_ref = NULL,
# seas_scale = 50,
value = "spatialvalid" # result of tests are appended in separate columns
) %>%
# Remove duplicate .summary column that can be replaced later and turn into a tibble
dplyr::select(!tidyselect::starts_with(".summary")) %>%
dplyr::tibble()
# re-merge the datasets
check_space <- tempSpace %>%
# Re-bind with the records that were excluded earlier
dplyr::bind_rows(check_space %>%
dplyr::filter(.coordinates_empty == FALSE |
.coordinates_outOfRange == FALSE) )
# Remove the temporary dataset
rm(tempSpace)
# Save the intermediate dataset
check_space %>%
readr::write_excel_csv(paste(OutPath_Intermediate, "03_space_inter_database.csv",
sep = "/"))
if(!exists("check_space")){
check_space <- readr::read_csv(paste(OutPath_Intermediate, "03_space_inter_database.csv",
sep = "/"),
col_types = BeeBDC::ColTypeR())
}
##### 5.3 Diagonal + grid ####
# Finds sequential numbers that could be fill-down errors in lat and long.
# groups by eventDate, recordedBy
# This is accomplished by using a sliding window with the length determined by minRepeats.
check_space <- BeeBDC::diagonAlley(
data = check_space,
# The minimum number of repeats needed to find a sequence in for flagging
minRepeats = 6,
mc.cores = 4)
# SPATIAL gridding from rasterisation:
# Select only the records with more than X occurrences
griddingDF <- check_space %>%
# Exclude NA lat and lon values
tidyr::drop_na(c("decimalLatitude", "decimalLongitude")) %>%
# Group by the dataset name
dplyr::group_by(datasetName) %>%
# Remove rows that aren't unique for lat and long
dplyr::distinct(decimalLongitude, decimalLatitude,
.keep_all = TRUE) %>%
# Find the groups with 4 or more occurrence records
dplyr::filter(dplyr::n() >= 4) %>%
dplyr::ungroup()
# Run the gridding analysis to find datasets that might be gridded
gridded_datasets <- CoordinateCleaner::cd_round(
x = griddingDF,
lon = "decimalLongitude",
lat = "decimalLatitude",
ds = "datasetName",
T1 = 7,
min_unique_ds_size = 4,
test = "both",
value = "dataset",
graphs = FALSE,
verbose = TRUE,
reg_out_thresh = 2,
reg_dist_min = 0.1,
reg_dist_max = 2
) %>%
dplyr::tibble()
# The griddingDF is no longer needed. remove it.
rm(griddingDF)
# Integrate these results with the main dataset
check_space <- check_space %>%
# Join the datasets
dplyr::left_join(
# Select the columns of interest
dplyr::select(gridded_datasets, dataset, lon.flag, lat.flag, summary),
by = c("datasetName" = "dataset")) %>%
# Make new columns with more-consistent naming and change the NA vlaues to = TRUE (not flagged)
dplyr::mutate(.lonFlag = tidyr::replace_na(lon.flag, TRUE),
.latFlag = tidyr::replace_na(lat.flag, TRUE),
.gridSummary = tidyr::replace_na(summary, TRUE)) %>%
# Remove old columns
dplyr::select(!c(lon.flag, lat.flag, summary))
# Save the gridded_datasets file for later examination
gridded_datasets %>%
readr::write_excel_csv(paste(OutPath_Intermediate, "03_space_griddedDatasets.csv",
sep = "/"))
# Now remove this file
rm(gridded_datasets)
##### 5.4 Uncertainty ####
# Flag records that exceed a coordinateUncertaintyInMeters threshold
check_space <- BeeBDC::coordUncerFlagR(data = check_space,
uncerColumn = "coordinateUncertaintyInMeters",
threshold = 1000)
##### 5.5 Country checklist ####
# Download the country-level checklist
beesChecklist <- BeeBDC::beesChecklist()
check_space <- countryOutlieRs(checklist = beesChecklist,
data = check_space,
keepAdjacentCountry = TRUE,
pointBuffer = 0.05,
# Scale of map to return, one of 110, 50, 10 OR 'small', 'medium', 'large'
# Smaller numbers will result in much longer calculation times.
# We have not attempted a scale of 10.
scale = 50,
mc.cores = 1)
# A list of failed species-country combinations and their numbers can be output here
check_space %>%
dplyr::filter(.countryOutlier == FALSE) %>%
dplyr::select(database_id, scientificName, country) %>%
dplyr::group_by(scientificName) %>%
dplyr::mutate(count_scientificName = dplyr::n()) %>%
dplyr::distinct(scientificName, country, .keep_all = TRUE) %>%
readr::write_excel_csv(paste(OutPath_Intermediate, "03_space_failedCountryChecklist.csv",
sep = "/"))
##### 5.6 Map spatial errors ####
#rebuild the .summary column
check_space <- BeeBDC::summaryFun(
data = check_space,
dontFilterThese = NULL,
removeFilterColumns = FALSE,
filterClean = FALSE)
# Map ONE spatial flag at a time or map the .SUMMARY of all
# Make this selection in the col_to_map = section
check_space %>%
dplyr::filter(.summary == FALSE) %>% # map only records flagged as FALSE
bdc::bdc_quickmap(
data = .,
lon = "decimalLongitude",
lat = "decimalLatitude",
col_to_map = ".summary",
size = 0.9
)
##### 5.7 Space report ####
# Create the report
(report <-
bdc::bdc_create_report(
data = dplyr::tibble(check_space %>% dplyr::select(!.uncer_terms)),
database_id = "database_id",
workflow_step = "space",
save_report = TRUE)
)
##### 5.8 Space figures ####
# Sadly not a figure of outer space :(
# Create figures of spacial data filtering
(figures <-
BeeBDC::jbd_create_figures(
data = dplyr::tibble(check_space %>% dplyr::select(!.uncer_terms)),
path = DataPath,
database_id = "database_id",
workflow_step = "space",
save_figures = TRUE)
)
# Check figures using
# options are:
# .cap == Records around country capital centroid
# .cen == Records around country or province centroids
# .dbl == Duplicated coordinates per species
# .equ == Identical coordinates
# .otl == Geographical outliers
# .gbf == Records around the GBIF headquarters
# .inst == Records around biodiversity institutions
# .rou == Rounded (probably imprecise) coordinates
# .urb == Records within urban areas — Not relevant for bees, I think.
# Example:
figures$.rou
# Save interim dataset
check_space %>%
readr::write_excel_csv(paste(OutPath_Intermediate, "03_space_inter_database.csv",
sep = "/"))
##### 5.9 Save flags ####
# SAVE the flags so far
BeeBDC::flagRecorder(
data = check_space,
outPath = paste(OutPath_Report, sep =""),
fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
append = TRUE,
printSummary = TRUE)
##### 5.10 Save ####
# Save the intermediate dataset
check_space %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "03_space_database.csv",
sep = "/"))
#### 6.0 Time ####
# Read in the last database
if(!exists("check_space")){
check_time <-
readr::read_csv(paste(OutPath_Intermediate, "03_space_database.csv", sep = "/"),
col_types = BeeBDC::ColTypeR())
}else{
check_time <- check_space
# Remove the spent file
rm(check_space)}
# You can plot a histogram of dates here, pre-cleaning to examine potential issues
hist(lubridate::ymd_hms(check_time$eventDate, truncated = 5), breaks = 20)
# Filter some silly dates that don't make sense...
check_time$year <- ifelse(check_time$year > lubridate::year(Sys.Date()) | check_time$year < 1600,
NA, check_time$year)
check_time$month <- ifelse(check_time$month > 12 | check_time$month < 1,
NA, check_time$month)
check_time$day <- ifelse(check_time$day > 31 | check_time$day < 1,
NA, check_time$day)
##### 6.1 Recover dates ####
# RESCUE some records with poor date data if possible — e.g., from other columns
check_time <- BeeBDC::dateFindR(data = check_time,
# Years above this are removed (from the recovered dates only)
maxYear = lubridate::year(Sys.Date()),
# Years below this are removed (from the recovered dates only)
minYear = 1700)
##### 6.2 No eventDate ####
# Flag records that simply lack collection date :(
check_time <-
bdc::bdc_eventDate_empty(data = check_time, eventDate = "eventDate")
##### 6.3 Old records ####
# This will flag records prior to the date selected. 1950 is frequently chosen for SDM work. You may
# not need to filter old records at all. Please just think critically about your use
check_time <-
bdc::bdc_year_outOfRange(data = check_time,
eventDate = "year",
year_threshold = 1950)
##### 6.4 Time report ####
# Not all of it, just the time pertaining to our precise occurrence records. Obviously...
# Create a .summary column with all of the time flags where TRUE == records that passed
# all filtering.
check_time <- BeeBDC::summaryFun(
data = check_time,
# Don't filter these columns (or NULL)
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms"),
# Remove the filtering columns?
removeFilterColumns = FALSE,
# Filter to ONLY cleaned data?
filterClean = FALSE)
( report <-
bdc::bdc_create_report(data = check_time,
database_id = "database_id",
workflow_step = "time",
save_report = FALSE)
)
##### 6.5 Time figures ####
# Create figures
figures <-
BeeBDC::jbd_create_figures(data = check_time,
path = DataPath,
database_id = "database_id",
workflow_step = "time",
save_figures = TRUE)
# Check figures using
figures$year
# Save the ~raw time dataset into the intermediate folder
check_time %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "04_time_database.csv",
sep = "/"))
##### 6.6 Save flags ####
# SAVE the flags so far
BeeBDC::flagRecorder(
data = check_time,
outPath = paste(OutPath_Report, sep =""),
fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
append = TRUE,
printSummary = TRUE)
#### 7.0 De-duplication ####
# Raw dataset can be re-read here if it does not already exist
if(!exists("check_time")){
check_time <-
readr::read_csv(paste(OutPath_Intermediate, "04_time_database.csv",
sep = "/"),
col_types = BeeBDC::ColTypeR())}
##### 7.1 deDuplicate ####
# We will FLAG duplicates here.
# These input columns can be hacked to de-duplicate as you wish.
check_time <- BeeBDC::dupeSummary(
data = check_time,
path = OutPath_Report,
# options are "ID","collectionInfo", or "both"
duplicatedBy = "collectionInfo",
# The columns to generate completeness info from (and to sort by completness)
completeness_cols = c("decimalLatitude", "decimalLongitude",
"scientificName", "eventDate"),
# idColumns = c("gbifID", "occurrenceID", "recordId","id"),
# The columns to ADDITIONALLY consider when finding duplicates in collectionInfo
collectionCols = c("decimalLatitude", "decimalLongitude", "scientificName", "eventDate",
"recordedBy"),
# The columns to combine, one-by-one with the collectionCols
collectInfoColumns = c("catalogNumber", "otherCatalogNumbers"),
# Custom comparisons — as a list of columns to compare
# RAW custom comparisons do not use the character and number thresholds
CustomComparisonsRAW = dplyr::lst(c("catalogNumber", "institutionCode", "scientificName")),
# Other custom comparisons use the character and number thresholds
CustomComparisons = dplyr::lst(c("gbifID", "scientificName"),
c("occurrenceID", "scientificName"),
c("recordId", "scientificName"),
c("id", "scientificName")),
# The order in which you want to KEEP duplicated based on data source
# try unique(check_time$dataSource)
sourceOrder = c("CAES", "Gai", "Ecd","BMont", "BMin", "EPEL", "ASP", "KP", "EcoS", "EaCO",
"FSCA", "Bal", "SMC", "Lic", "Arm",
"USGS", "ALA", "VicWam", "GBIF","SCAN","iDigBio"),
# Paige ordering is done using the database_id prefix, not the dataSource prefix.
prefixOrder = c("Paige", "Dorey"),
# Set the complexity threshold for id letter and number length
# minimum number of characters when WITH the numberThreshold
characterThreshold = 2,
# minimum number of numbers when WITH the characterThreshold
numberThreshold = 3,
# Minimum number of numbers WITHOUT any characters
numberOnlyThreshold = 5
) %>% # END dupeSummary
dplyr::as_tibble(col_types = BeeBDC::ColTypeR())
# Save the dataset into the intermediate folder
check_time %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "04_2_dup_database.csv",
sep = "/"))
##### 7.2 Save flags ####
# SAVE the flags so far
BeeBDC::flagRecorder(
data = check_time,
outPath = paste(OutPath_Report, sep =""),
fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
append = TRUE,
printSummary = TRUE)
#### 8.0 Data filtering ####
# Raw dataset can be re-read here if it does not already exist
if(!exists("check_time")){
check_time <-
readr::read_csv(paste(OutPath_Intermediate, "04_2_dup_database.csv",
sep = "/"), col_types = ColTypeR())}
##### 8.1 rm Outliers ####
# Read in the most-recent duplicates file as well
if(!exists("duplicates")){
duplicates <- fileFinder(path = DataPath,
fileName = "duplicateRun_") %>%
readr::read_csv()}
# identify the outliers and get a list of their database_ids
check_time <- manualOutlierFindeR(
data = check_time,
DataPath = DataPath,
PaigeOutliersName = "removedBecauseDeterminedOutlier.csv",
newOutliersName = "^All_outliers_ANB_23Jan24.xlsx",
ColombiaOutliers_all = "All_Colombian_OutlierIDs.csv",
# A .csv with manual outlier records that are too close to otherwise TRUE records
NearTRUE = "nearTRUE.csv",
duplicates = duplicates)
##### 8.2 Save uncleaned ####
# Make sure that the .summary column is updated
check_time <- summaryFun(
data = check_time,
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold", ".sequential", ".year_outOfRange"),
removeFilterColumns = FALSE,
filterClean = FALSE)
# Save the uncleaned dataset
check_time %>% readr::write_excel_csv(.,
paste(OutPath_Intermediate, "/05_unCleaned_database_",
Sys.Date(), ".csv",
sep = ""))
##### 8.3 Save cleaned ####
# Now clean the dataset of extra columns and failed rows and save it...
cleanData <- BeeBDC::summaryFun(
data = check_time,
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold", ".sequential", ".year_outOfRange"),
# Remove the filtering columns?
removeFilterColumns = TRUE,
# Filter to ONLY cleaned data?
filterClean = TRUE)
# Save this CLEANED dataset
cleanData %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "/05_cleaned_database_",
Sys.Date(), ".csv",
sep = ""))
# Dataset can be re-read here
# cleanData <-
# readr::read_csv(paste(OutPath_Intermediate, "05_cleaned_database.csv",
# sep = "/"))
#### 9.0 Summary figures and tables ####
##### 9.1 Duplicate chordDiagrams ####
# install ComplexHeatmap if needed
if (!require("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("ComplexHeatmap")
# Read in the most-RECENT duplicates file.
if(!exists("duplicates")){
duplicates <- fileFinder(path = DataPath,
fileName = "duplicateRun_") %>%
readr::read_csv()}
# Choose the global figure parameters
par(mar = c(2, 2, 2, 2)/2, mfrow = c(1,1))
# Create the chorDiagram. You can leave many of the below values out but we show here
# the defaults
BeeBDC::chordDiagramR(
# The duplicate data from the dupeSummary function output
dupeData = duplicates,
outPath = OutPath_Figures,
fileName = "Fig2_ChordDiagram.pdf",
# These can be modified to help fit the final pdf that's exported.
width = 9,
height = 7.5,
bg = "white",
# How few distinct dataSources should a group have to be listed as "other"
smallGrpThreshold = 3,
title = "Duplicated record sources",
# The default list of colour palettes to choose from usign the paleteer package
palettes = c("cartography::blue.pal", "cartography::green.pal",
"cartography::sand.pal", "cartography::orange.pal", "cartography::red.pal",
"cartography::purple.pal", "cartography::brown.pal"),
canvas.ylim = c(-1.0,1.0),
canvas.xlim = c(-0.6, 0.25),
text.col = "black",
legendX = grid::unit(6, "mm"),
legendY = grid::unit(18, "mm"),
legendJustify = c("left", "bottom"),
niceFacing = TRUE)
##### 9.2 Duplicate histogram ####
# Use the uncleaned dataset (read it in, or change its name to beeData)
if(!exists("check_time")){
beeData <- readr::read_csv(paste(OutPath_Intermediate, "05_unCleaned_database.csv",
sep = "/"),
col_types = BeeBDC::ColTypeR())
}else{
beeData <- check_time
rm(check_time)
}
# Create a figure showing the total number of duplicates, kept duplicates, and unique
# records for each data source (simplified to the text before the first underscore) and
# the proportion of the above for each data source
BeeBDC::dupePlotR(
data = beeData,
# The outPath to save the plot as
outPath = OutPath_Figures,
fileName = "Fig3_duplicatePlot.pdf",
# Colours in order: duplicate, kept duplicate, unique
dupeColours = c("#F2D2A2","#B9D6BC", "#349B90"),
# Plot size and height
base_height = 7, base_width = 7,
legend.position = c(0.85, 0.8),
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP", CAES = "CAES", 'BMont' = "BMont", 'BMin' = "BMin", Ecd = "Ecd",
Gaiarsa = "Gai", EPEL = "EPEL", VicWam = "VicWam"
)
##### 9.3 Flags by source ####
# Visualise all flags for each dataSource (simplified to the text before the first underscore)
BeeBDC::plotFlagSummary(
data = beeData,
# Colours in order of pass (TRUE), fail (FALSE), and NA
flagColours = c("#127852", "#A7002D", "#BDBABB"),
fileName = paste0("Fig4_FlagsPlot_", Sys.Date(),".pdf"),
outPath = paste0(OutPath_Figures),
width = 15, height = 9,
# OPTIONAL:
# # Filter to species
# speciesName = "Holcopasites heliopsis",
# # column to look in
# nameColumn = "species",
# # Save the filtered data
# saveFiltered = TRUE,
# # Filter column to display on map
# filterColumn = ".summary",
# plotMap = TRUE,
# # amount to jitter points if desired, e.g. 0.25 or NULL
# jitterValue = NULL,
# # Map opacity value for points between 0 and 1
# mapAlpha = 1,
# # If a user wants to output the table used to make the figure, change this to TRUE
# saveTable = FALSE,
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP", CAES = "CAES", 'BMont' = "BMont", 'BMin' = "BMin", Ecd = "Ecd",
Gaiarsa = "Gai", EPEL = "EPEL", VicWam = "VicWam"
)
##### 9.4 Maps ####
# Import CLEANED dataset (you can change this option)
if(!exists("cleanData")){
cleanData <- readr::read_csv(paste(OutPath_Intermediate, "05_cleaned_database.csv",
sep = "/"),
col_types = BeeBDC::ColTypeR())}
###### a. Summary maps ####
# Draw a global summary map for occurrence and species number by country
BeeBDC::summaryMaps(
data = cleanData,
width = 10, height = 10,
class_n = 15,
class_Style = "fisher",
fileName = "Fig5_CountryMaps_fisher.pdf",
outPath = OutPath_Figures,
scale = 110
)
###### b. Interactive maps ####
# Generate a list of random species names to map and check
beeData_interactive <- beeData %>%
# Select only valid species
dplyr::filter(.invalidName == TRUE) %>%
# Get a distinct list of valid species names
dplyr::distinct(scientificName, .keep_all = FALSE) %>%
# Select a random subset of species to map
slice_sample(n = 100)
# Make the interactive maps
BeeBDC::interactiveMapR(
# occurrence data
data = beeData %>%
# Select only those species
dplyr::filter(scientificName %in% beeData_interactive),
# Directory where to save files
outPath = paste0(OutPath_Figures, "interactiveMaps", sep = "/"),
# lat long columns
lon = "decimalLongitude",
lat = "decimalLatitude",
# Occurrence dataset column with species names
speciesColumn = "scientificName",
# Which species to map — a character vector of names or "ALL"
# Note: "ALL" is defined AFTER filtering for country
speciesList = "ALL",
countryList = NULL, # studyArea
# Point jitter to see stacked points — jitters an amount in decimal degrees
jitterValue = 0.01
)
##### 9.5 Data providers ####
# Read in the clean data if it's not already in the environment
if(!exists("cleanData")){
cleanData <- readr::read_csv(paste(OutPath_Intermediate, "05_cleaned_database.csv",
sep = "/"),
col_types = BeeBDC::ColTypeR(),
locale = readr::locale(encoding = "UTF-8"))}
institutionList_DL <- readxl::read_excel(paste(DiscLifePath, "Apoidea Bee Collections Master List jan 2023.xlsx",
sep = "/"))
# Create the table
dataProvTable <- BeeBDC::dataProvTables(data = cleanData,
runBeeDataChecks = TRUE,
outPath = OutPath_Report,
fileName = "dataProvTable.csv")
##### 9.6 Flag summary ####
# Produce a summary table of flags per species
summaryTable <- BeeBDC::flagSummaryTable(data = beeData,
column = "scientificName",
outPath = OutPath_Report,
fileName = "flagTable.csv",
percentThreshold = 0)
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/inst/BeeBDC_fullWorkflow.R
|
## ----libraryChunk, load-packages, include=FALSE-------------------------------
# markdown packages
library(rgnparser)
library(magrittr)
library(knitr)
library(rmarkdown)
library(rmdformats)
library(prettydoc)
library(htmltools)
library(pkgdown)
# Load core packages
library(devtools)
library(BiocManager)
library(purrr)
library(here)
library(renv)
library(bdc)
library(CoordinateCleaner)
library(dplyr)
library(readr)
library(stringr)
library(lubridate)
library(tidyselect)
library(R.utils)
library(tidyr)
library(ggplot2)
library(forcats)
library(emld)
library(rlang)
library(xml2)
library(mgsub)
library(rvest)
library(rnaturalearth)
library(rnaturalearthdata)
library(countrycode)
library(janitor)
library(circlize)
library(paletteer)
library(cowplot)
library(igraph)
library(ggspatial)
library(sf)
library(parallel)
library(terra)
# Dont detect cores to avoid GitHbub error
old <- options() # code line i
on.exit(options(old)) # code line i+1
options(mc.cores = parallel::detectCores())
## ----secretRootPath, include=FALSE--------------------------------------------
# Set the RootPath to tempdir
RootPath <- tempdir()
if (!dir.exists(paste0(RootPath, "/Data_acquisition_workflow"))) {
dir.create(paste0(RootPath, "/Data_acquisition_workflow"), recursive = TRUE)
}
## ----global-options, include=FALSE--------------------------------------------
knitr::opts_chunk$set(error = TRUE,
eval = TRUE,
tidy = TRUE,
warning = FALSE,
root.dir = normalizePath(tempdir()))
## ----falseRootPath, eval=FALSE------------------------------------------------
# RootPath <- paste0("/your/path/here")
## ----CreateRootPath, warning=FALSE, collapse = TRUE---------------------------
# Create the working directory in the RootPath if it doesn't exist already
if (!dir.exists(paste0(RootPath, "/Data_acquisition_workflow"))) {
dir.create(paste0(RootPath, "/Data_acquisition_workflow"), recursive = TRUE)
}
# Set the working directory
setwd(paste0(RootPath,"/Data_acquisition_workflow"))
## ----activate, collapse = TRUE------------------------------------------------
renv::activate(project = paste0(RootPath,"/Data_acquisition_workflow"))
## ----installPackages, message=FALSE, warning=FALSE, results=FALSE, collapse = TRUE, eval = FALSE----
# if (!require("BiocManager", quietly = TRUE))
# install.packages("BiocManager", repos = "http://cran.us.r-project.org")
#
# BiocManager::install("ComplexHeatmap")
## ----rnaturalearthhires, eval=FALSE-------------------------------------------
# # Install remotes if needed
# if (!require("remotes", quietly = TRUE))
# install.packages("remotes", repos = "http://cran.us.r-project.org")
# # Download and then load rnaturalearthhires
# remotes::install_github("ropensci/rnaturalearthhires")
# install.packages("rnaturalearthhires", repos = "https://ropensci.r-universe.dev", type = "source")
# library(rnaturalearthhires)
## ----installBeeBDC, results=TRUE, message=TRUE, eval = FALSE, collapse = TRUE----
# install.packages("BeeBDC")
# library(BeeBDC)
## ----snapshot, collapse = TRUE------------------------------------------------
renv::snapshot(project = paste0(RootPath,"/Data_acquisition_workflow"),
prompt = FALSE)
## ----dirMaker, collapse = TRUE, eval = FALSE----------------------------------
# BeeBDC::dirMaker(
# RootPath = RootPath,
# RDoc = "vignettes/BeeBDC_main.Rmd") %>%
# # Add paths created by this function to the environment()
# list2env(envir = parent.env(environment()))
## ----dirMakerSECRETELY, include = FALSE---------------------------------------
# For the sake of this tutorial, we will not use here::i_am in dirMaker, because we aren't allowed
# to mess with package directories in this way. This will work-around to use the tempdir()
DataPath <- paste0(RootPath, "/Data_acquisition_workflow")
OutPath_Check <- paste0(RootPath, "/Data_acquisition_workflow/Output/Check")
OutPath_Figures <- paste0(RootPath, "/Data_acquisition_workflow/Output/Figures")
OutPath_Intermediate <- paste0(RootPath, "/Data_acquisition_workflow/Output/Intermediate")
OutPath_Report <- paste0(RootPath, "/Data_acquisition_workflow/Output/Report")
# Create these files
if (!dir.exists(DataPath)) {
dir.create(DataPath, recursive = TRUE)}
if (!dir.exists(OutPath_Check)) {
dir.create(OutPath_Check, recursive = TRUE)}
if (!dir.exists(OutPath_Figures)) {
dir.create(OutPath_Figures, recursive = TRUE)}
if (!dir.exists(OutPath_Intermediate)) {
dir.create(OutPath_Intermediate, recursive = TRUE)}
if (!dir.exists(OutPath_Report)) {
dir.create(OutPath_Report, recursive = TRUE)}
## ----lapply_library, results=FALSE, collapse = TRUE---------------------------
lapply(c("ComplexHeatmap", "magrittr"),
library, character.only = TRUE)
## ----3.0, collapse = TRUE-----------------------------------------------------
data("bees3sp", package = "BeeBDC")
data("beesRaw", package = "BeeBDC")
db_standardized <- dplyr::bind_rows(beesRaw,
# Only keep a subset of columns from bees3sp
bees3sp %>% dplyr::select(tidyselect::all_of(colnames(beesRaw)), countryCode))
## ----3.1, collapse = TRUE-----------------------------------------------------
check_pf <- bdc::bdc_scientificName_empty(
data = db_standardized,
sci_name = "scientificName")
# now that this is saved, remove it to save space in memory
rm(db_standardized)
## ----3.2, collapse = TRUE-----------------------------------------------------
check_pf <- bdc::bdc_coordinates_empty(
data = check_pf,
lat = "decimalLatitude",
lon = "decimalLongitude")
## ----3.3, collapse = TRUE-----------------------------------------------------
check_pf <- bdc::bdc_coordinates_outOfRange(
data = check_pf,
lat = "decimalLatitude",
lon = "decimalLongitude")
## ----3.4, collapse = TRUE-----------------------------------------------------
check_pf <- bdc::bdc_basisOfRecords_notStandard(
data = check_pf,
basisOfRecord = "basisOfRecord",
names_to_keep = c(
# Keep all plus some at the bottom.
"Event",
"HUMAN_OBSERVATION",
"HumanObservation",
"LIVING_SPECIMEN",
"LivingSpecimen",
"MACHINE_OBSERVATION",
"MachineObservation",
"MATERIAL_SAMPLE",
"O",
"Occurrence",
"MaterialSample",
"OBSERVATION",
"Preserved Specimen",
"PRESERVED_SPECIMEN",
"preservedspecimen Specimen",
"Preservedspecimen",
"PreservedSpecimen",
"preservedspecimen",
"S",
"Specimen",
"Taxon",
"UNKNOWN",
"",
NA,
"NA",
"LITERATURE",
"None", "Pinned Specimen", "Voucher reared", "Emerged specimen"
))
## ----3.5a, collapse = TRUE----------------------------------------------------
check_pf_noNa <- BeeBDC::countryNameCleanR(
data = check_pf,
# Create a Tibble of common issues in country names and their replacements
commonProblems = dplyr::tibble(problem = c('U.S.A.', 'US','USA','usa','UNITED STATES',
'United States','U.S.A','MX','CA','Bras.','Braz.',
'Brasil','CNMI','USA TERRITORY: PUERTO RICO'),
fix = c('United States of America','United States of America',
'United States of America','United States of America',
'United States of America','United States of America',
'United States of America','Mexico','Canada','Brazil',
'Brazil','Brazil','Northern Mariana Islands','PUERTO.RICO'))
)
## ----3.5b, message=FALSE, warning=FALSE, collapse = TRUE----------------------
suppressWarnings(
countryOutput <- BeeBDC::jbd_CfC_chunker(data = check_pf_noNa,
lat = "decimalLatitude",
lon = "decimalLongitude",
country = "country",
# How many rows to process at a time
stepSize = 1000000,
# Start row
chunkStart = 1,
path = OutPath_Intermediate,
# Normally, please use scale = "large"
scale = "medium",
mc.cores = 1),
classes = "warning")
## ----3.5ci, collapse = TRUE---------------------------------------------------
check_pf <- dplyr::left_join(check_pf,
countryOutput,
by = "database_id",
suffix = c("", "CO")) %>%
# Take the new country name if the original is NA
dplyr::mutate(country = dplyr::if_else(is.na(country),
countryCO,
country)) %>%
# Remove duplicates if they arose from left_join!
dplyr::distinct()
## ----3.5cii, eval = FALSE, collapse = TRUE------------------------------------
# check_pf %>%
# readr::write_excel_csv(.,
# paste(OutPath_Intermediate, "01_prefilter_database.csv",
# sep = "/"))
## ----3.5ciii, eval = FALSE, collapse = TRUE-----------------------------------
# if(!exists("check_pf")){
# check_pf <- readr::read_csv(paste(DataPath,
# "Output", "Intermediate", "01_prefilter_database.csv", sep = "/"),
# col_types = BeeBDC::ColTypeR())}
## ----3.5civ, collapse = TRUE--------------------------------------------------
rm(check_pf_noNa, countryOutput)
## ----3.6, collapse = TRUE-----------------------------------------------------
# Standardise country names and add ISO2 codes if needed
check_pf <- bdc::bdc_country_standardized(
# Remove the countryCode and country_suggested columns to avoid an error with
# where two "countryCode" and "country_suggested" columns exist (i.e. if the dataset has been
# run before)
data = check_pf %>% dplyr::select(!tidyselect::any_of(c("countryCode", "country_suggested"))),
country = "country"
)
## ----3.7, message=FALSE, warning=FALSE, collapse = TRUE-----------------------
check_pf <- BeeBDC::jbd_Ctrans_chunker(
# bdc_coordinates_transposed inputs
data = check_pf,
id = "database_id",
lat = "decimalLatitude",
lon = "decimalLongitude",
country = "country",
countryCode = "countryCode",
border_buffer = 0.2, # in decimal degrees (~22 km at the equator)
save_outputs = TRUE,
sci_names = "scientificName",
# chunker inputs
stepSize = 1000000, # How many rows to process at a time
chunkStart = 1, # Start row
append = FALSE, # If FALSE it may overwrite existing dataset
progressiveSave = FALSE,
# In a normal run, please use scale = "large"
scale = "medium",
path = OutPath_Check,
mc.cores = 1
)
## ----3.7ii, eval = FALSE, collapse = TRUE-------------------------------------
# table(check_pf$coordinates_transposed, useNA = "always")
## ----3.7iii, eval = FALSE, collapse = TRUE------------------------------------
# check_pf %>%
# readr::write_excel_csv(.,
# paste(OutPath_Intermediate, "01_prefilter_database.csv",
# sep = "/"))
## ----3.7iv, eval = FALSE, collapse = TRUE-------------------------------------
# if(!exists("check_pf")){
# check_pf <- readr::read_csv(paste(OutPath_Intermediate, "01_prefilter_database.csv",
# sep = "/"), col_types = BeeBDC::ColTypeR())}
## ----3.8, collapse = TRUE-----------------------------------------------------
check_pf <- BeeBDC::jbd_coordCountryInconsistent(
data = check_pf,
lon = "decimalLongitude",
lat = "decimalLatitude",
scale = 50,
pointBuffer = 0.01)
## ----3.8ii, eval = FALSE------------------------------------------------------
# check_pf %>%
# readr::write_excel_csv(.,
# paste(OutPath_Intermediate, "01_prefilter_database.csv",
# sep = "/"))
## ----3.9, eval = TRUE, collapse = TRUE----------------------------------------
xyFromLocality <- bdc::bdc_coordinates_from_locality(
data = check_pf,
locality = "locality",
lon = "decimalLongitude",
lat = "decimalLatitude",
save_outputs = FALSE
)
## ----3.9ii, eval = FALSE------------------------------------------------------
# # Save the resultant data
# xyFromLocality %>% readr::write_excel_csv(paste(OutPath_Check, "01_coordinates_from_locality.csv",
# sep = "/"))
## ----3.9iii, eval = FALSE-----------------------------------------------------
# rm(xyFromLocality)
## ----3.10, collapse = TRUE----------------------------------------------------
check_pf <- BeeBDC::flagAbsent(data = check_pf,
PresAbs = "occurrenceStatus")
## ----3.11, collapse = TRUE----------------------------------------------------
check_pf <- BeeBDC::flagLicense(data = check_pf,
strings_to_restrict = "all",
# DON'T flag if in the following dataSource(s)
excludeDataSource = NULL)
## ----3.12, collapse = TRUE----------------------------------------------------
check_pf <- BeeBDC::GBIFissues(data = check_pf,
issueColumn = "issue",
GBIFflags = c("COORDINATE_INVALID", "ZERO_COORDINATE"))
## ----3.13a, eval = FALSE------------------------------------------------------
# flagFile <- BeeBDC::flagRecorder(
# data = check_pf,
# outPath = paste(OutPath_Report, sep =""),
# fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
# # These are the columns that will be kept along with the flags
# idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
# # TRUE if you want to find a file from a previous part of the script to append to
# append = FALSE)
#
## ----3.13b, collapse = TRUE---------------------------------------------------
check_pf <- BeeBDC::summaryFun(
data = check_pf,
# Don't filter these columns (or NULL)
dontFilterThese = NULL,
# Remove the filtering columns?
removeFilterColumns = FALSE,
# Filter to ONLY cleaned data?
filterClean = FALSE)
## ----3.13c, eval = FALSE------------------------------------------------------
# (report <- bdc::bdc_create_report(data = check_pf,
# database_id = "database_id",
# workflow_step = "prefilter",
# save_report = TRUE)
# )
## ----3.14, eval = FALSE-------------------------------------------------------
# check_pf %>%
# readr::write_excel_csv(., paste(OutPath_Intermediate, "01_prefilter_output.csv",
# sep = "/"))
## ----4.0, collapse = TRUE-----------------------------------------------------
if(!exists("check_pf")){
database <-
readr::read_csv( paste(OutPath_Intermediate, "01_prefilter_output.csv",
sep = "/"), col_types = BeeBDC::ColTypeR())
}else{
# OR rename and remove
database <- check_pf
# Remove spent dataset
rm(check_pf)}
## ----4.0ii, collapse = TRUE---------------------------------------------------
database <- database %>%
dplyr::select(!tidyselect::any_of("names_clean"))
## ----4.1, eval = FALSE, collapse = TRUE---------------------------------------
# parse_names <-
# bdc::bdc_clean_names(sci_names = database$scientificName, save_outputs = FALSE)
## ----4.1ii, collapse = TRUE, eval = FALSE-------------------------------------
# parse_names <-
# parse_names %>%
# dplyr::select(.uncer_terms, names_clean)
## ----4.1iii, collapse = TRUE--------------------------------------------------
database <- dplyr::bind_cols(database)
rm(parse_names)
## ----4.2, collapse = TRUE, eval = FALSE---------------------------------------
# taxonomyFile <- BeeBDC::beesTaxonomy()
## ----4.2secret, collapse = TRUE, eval = TRUE----------------------------------
# load in the small test dataset i nthe background
system.file("extdata", "testTaxonomy.rda", package="BeeBDC") |>
load()
# Rename the file
taxonomyFile <- testTaxonomy
rm(testTaxonomy)
## ----4.2ii, collapse = TRUE---------------------------------------------------
database <- BeeBDC::harmoniseR(path = DataPath, #The path to a folder that the output can be saved
taxonomy = taxonomyFile, # The formatted taxonomy file
data = database,
mc.cores = 1)
## ----4.2iii, collapse = TRUE--------------------------------------------------
rm(taxonomyFile)
## ----4.2iv, eval = FALSE, collapse = TRUE-------------------------------------
# database %>%
# readr::write_excel_csv(.,
# paste(DataPath, "Output", "Intermediate", "02_taxonomy_database.csv",
# sep = "/"))
## ----4.3, eval = FALSE, collapse = TRUE---------------------------------------
# flagFile <- BeeBDC::flagRecorder(
# data = database,
# outPath = paste(OutPath_Report, sep =""),
# fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
# idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
# append = TRUE,
# printSummary = TRUE)
## ----5.0, collapse = TRUE-----------------------------------------------------
if(!exists("database")){
database <-
readr::read_csv(paste(OutPath_Intermediate, "02_taxonomy_database.csv", sep = "/"),
col_types = BeeBDC::ColTypeR())}
## ----5.1, collapse = TRUE-----------------------------------------------------
check_space <-
BeeBDC::jbd_coordinates_precision(
data = database,
lon = "decimalLongitude",
lat = "decimalLatitude",
ndec = 2 # number of decimals to be tested
)
## ----5.1ii, collapse = TRUE---------------------------------------------------
rm(database)
## ----5.1iii, eval = FALSE, collapse = TRUE------------------------------------
# check_space %>%
# readr::write_excel_csv(.,
# paste(OutPath_Intermediate, "03_space_inter_database.csv",
# sep = "/"))
## ----5.2, eval = FALSE, collapse = TRUE---------------------------------------
# tempSpace <- check_space %>%
# dplyr::filter(!.coordinates_empty == FALSE) %>%
# dplyr::filter(!.coordinates_outOfRange == FALSE)
## ----5.2ii, message=TRUE, warning=FALSE, eval = FALSE, collapse = TRUE--------
# tempSpace <-
# CoordinateCleaner::clean_coordinates(
# x = tempSpace,
# lon = "decimalLongitude",
# lat = "decimalLatitude",
# species = "scientificName",
# countries = NULL, # Tests if coords are from x country. This is not needed.
# tests = c(
# "capitals", # records within 0.5 km of capitals centroids
# "centroids", # records within 1 km around country and province centroids
# "equal", # records with equal coordinates
# "gbif", # records within 1 km of GBIF headquarters. (says 1 degree in package, but code says 1000 m)
# "institutions", # records within 100m of zoo and herbaria
# "zeros" # records with coordinates 0,0
# # "seas" # Not flagged as this should be flagged by coordinate country inconsistent
# ),
# capitals_rad = 1000,
# centroids_rad = 500,
# centroids_detail = "both", # test both country and province centroids
# inst_rad = 100, # remove zoo and herbaria within 100m
# range_rad = 0,
# zeros_rad = 0.5,
# capitals_ref = NULL,
# centroids_ref = NULL,
# country_ref = NULL,
# country_refcol = "countryCode",
# inst_ref = NULL,
# range_ref = NULL,
# # seas_scale = 50,
# value = "spatialvalid" # result of tests are appended in separate columns
# ) %>%
# # Remove duplicate .summary column that can be replaced later and turn into a tibble
# dplyr::select(!tidyselect::starts_with(".summary")) %>%
# dplyr::tibble()
## ----5.2iii, eval = FALSE, collapse = TRUE------------------------------------
# check_space <- tempSpace %>%
# # Re-bind with the records that were removed earlier
# dplyr::bind_rows(check_space %>%
# dplyr::filter(.coordinates_empty == FALSE |
# .coordinates_outOfRange == FALSE) )
## ----5.2iv, eval = FALSE, collapse = TRUE-------------------------------------
# rm(tempSpace)
## ----5.2v, eval = FALSE, collapse = TRUE--------------------------------------
# check_space %>%
# readr::write_excel_csv(paste(OutPath_Intermediate, "03_space_inter_database.csv",
# sep = "/"))
## ----5.3, collapse = TRUE-----------------------------------------------------
check_space <- BeeBDC::diagonAlley(
data = check_space,
# The minimum number of repeats needed to find a sequence in for flagging
minRepeats = 6,
ndec = 3,
groupingColumns = c("eventDate", "recordedBy", "datasetName"),
mc.cores = 1)
## ----5.3ii, collapse = TRUE---------------------------------------------------
griddingDF <- check_space %>%
# Exclude NA lat and lon values
tidyr::drop_na(c("decimalLatitude", "decimalLongitude")) %>%
# Group by the dataset name
dplyr::group_by(datasetName) %>%
# Remove rows that aren't unique for lat and long
dplyr::distinct(decimalLongitude, decimalLatitude,
.keep_all = TRUE) %>%
# Find the groups with 4 or more occurrence records
dplyr::filter(dplyr::n() >= 4) %>%
dplyr::ungroup()
## ----5.3iii, eval = FALSE, collapse = TRUE------------------------------------
# gridded_datasets <- CoordinateCleaner::cd_round(
# x = griddingDF,
# lon = "decimalLongitude",
# lat = "decimalLatitude",
# ds = "datasetName",
# T1 = 7,
# min_unique_ds_size = 4,
# test = "both",
# value = "dataset",
# graphs = FALSE,
# verbose = TRUE,
# reg_out_thresh = 2,
# reg_dist_min = 0.1,
# reg_dist_max = 2
# ) %>%
# dplyr::tibble()
# # The griddingDF is no longer needed. remove it.
# rm(griddingDF)
## ----5.3iv, eval = FALSE, collapse = TRUE-------------------------------------
# check_space <- check_space %>%
# # Join the datasets
# dplyr::left_join(
# # Select the columns of interest
# dplyr::select(gridded_datasets, dataset, lon.flag, lat.flag, summary),
# by = c("datasetName" = "dataset")) %>%
# # Make new columns with more-consistent naming and change the NA vlaues to = TRUE (not flagged)
# dplyr::mutate(.lonFlag = tidyr::replace_na(lon.flag, TRUE),
# .latFlag = tidyr::replace_na(lat.flag, TRUE),
# .gridSummary = tidyr::replace_na(summary, TRUE)) %>%
# # Remove old columns
# dplyr::select(!c(lon.flag, lat.flag, summary))
## ----5.3 v, eval = FALSE, collapse = TRUE-------------------------------------
# gridded_datasets %>%
# readr::write_excel_csv(paste(OutPath_Intermediate, "03_space_griddedDatasets.csv",
# sep = "/"))
## ----5.3vi, eval = FALSE, collapse = TRUE-------------------------------------
# rm(gridded_datasets)
## ----5.4, collapse = TRUE-----------------------------------------------------
check_space <- BeeBDC::coordUncerFlagR(data = check_space,
uncerColumn = "coordinateUncertaintyInMeters",
threshold = 1000)
## ----5.5, collapse = TRUE, eval = FALSE---------------------------------------
# checklistFile <- BeeBDC::beesChecklist()
## ----5.5secret, collapse = TRUE, eval = TRUE----------------------------------
# load in the small test dataset i nthe background
system.file("extdata", "testChecklist.rda", package="BeeBDC") |>
load()
# Rename the file
taxonomyFile <- testChecklist
rm(testChecklist)
## ----5.5ii, collapse = TRUE---------------------------------------------------
check_space <- BeeBDC::countryOutlieRs(checklist = checklistFile,
data = check_space,
keepAdjacentCountry = TRUE,
pointBuffer = 0.05,
# Scale of map to return, one of 110, 50, 10 OR 'small', 'medium', 'large'
# Smaller numbers will result in much longer calculation times.
# We have not attempted a scale of 10.
scale = 50,
mc.cores = 1)
## ----5.5iii, eval = FALSE, collapse = TRUE------------------------------------
# # A list of failed species-country combinations and their numbers can be output here
# check_space %>%
# dplyr::filter(.countryOutlier == FALSE) %>%
# dplyr::select(database_id, scientificName, country) %>%
# dplyr::group_by(scientificName) %>%
# dplyr::mutate(count_scientificName = n()) %>%
# dplyr::distinct(scientificName, country, .keep_all = TRUE) %>%
# readr::write_excel_csv(paste(OutPath_Intermediate, "03_space_failedCountryChecklist.csv",
# sep = "/"))
## ----5.6, eval = FALSE, collapse = TRUE---------------------------------------
# check_space <- BeeBDC::summaryFun(
# data = check_space,
# dontFilterThese = NULL,
# removeFilterColumns = FALSE,
# filterClean = FALSE)
## ----5.6ii, eval = FALSE, collapse = TRUE-------------------------------------
# check_space %>%
# dplyr::filter(.summary == FALSE) %>% # map only records flagged as FALSE
# bdc::bdc_quickmap(
# data = .,
# lon = "decimalLongitude",
# lat = "decimalLatitude",
# col_to_map = ".summary",
# size = 0.9
# )
#
## ----5.7, eval = FALSE, collapse = TRUE---------------------------------------
# (report <-
# bdc::bdc_create_report(
# data = dplyr::tibble(check_space %>% dplyr::select(!.uncer_terms)),
# database_id = "database_id",
# workflow_step = "space",
# save_report = TRUE)
# )
## ----5.8, eval = FALSE, collapse = TRUE---------------------------------------
# (figures <-
# BeeBDC::jbd_create_figures(
# data = dplyr::tibble(check_space %>% dplyr::select(!.uncer_terms)),
# path = DataPath,
# database_id = "database_id",
# workflow_step = "space",
# save_figures = TRUE)
# )
## ----5.8ii, eval = FALSE, collapse = TRUE-------------------------------------
# check_space %>%
# readr::write_excel_csv(paste(OutPath_Intermediate, "03_space_inter_database.csv",
# sep = "/"))
## ----5.9, eval = FALSE, collapse = TRUE---------------------------------------
# BeeBDC::flagRecorder(
# data = check_space,
# outPath = paste(OutPath_Report, sep =""),
# fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
# idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
# append = TRUE,
# printSummary = TRUE)
## ----5.10, eval = FALSE, collapse = TRUE--------------------------------------
# check_space %>%
# readr::write_excel_csv(.,
# paste(OutPath_Intermediate, "03_space_database.csv",
# sep = "/"))
## ----6.0, collapse = TRUE-----------------------------------------------------
if(!exists("check_space")){
check_time <-
readr::read_csv(paste(OutPath_Intermediate, "03_space_database.csv", sep = "/"),
col_types = BeeBDC::ColTypeR())
}else{
check_time <- check_space
# Remove the spent file
rm(check_space)}
## ----6.0ii, collapse = TRUE---------------------------------------------------
hist(lubridate::ymd_hms(check_time$eventDate, truncated = 5), breaks = 20,
main = "Histogram of eventDates")
## ----6.0iii, collapse = TRUE--------------------------------------------------
check_time$year <- ifelse(check_time$year > lubridate::year(Sys.Date()) | check_time$year < 1600,
NA, check_time$year)
check_time$month <- ifelse(check_time$month > 12 | check_time$month < 1,
NA, check_time$month)
check_time$day <- ifelse(check_time$day > 31 | check_time$day < 1,
NA, check_time$day)
## ----6.1, collapse = TRUE-----------------------------------------------------
check_time <- BeeBDC::dateFindR(data = check_time,
# Years above this are removed (from the recovered dates only)
maxYear = lubridate::year(Sys.Date()),
# Years below this are removed (from the recovered dates only)
minYear = 1700)
## ----6.2, collapse = TRUE-----------------------------------------------------
check_time <-
bdc::bdc_eventDate_empty(data = check_time, eventDate = "eventDate")
## ----6.3, collapse = TRUE-----------------------------------------------------
check_time <-
bdc::bdc_year_outOfRange(data = check_time,
eventDate = "year",
year_threshold = 1950)
## ----6.4, eval = TRUE, collapse = TRUE----------------------------------------
check_time <- BeeBDC::summaryFun(
data = check_time,
# Don't filter these columns (or NULL)
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms"),
# Remove the filtering columns?
removeFilterColumns = FALSE,
# Filter to ONLY cleaned data?
filterClean = FALSE)
## ----6.4ii, eval = FALSE, collapse = TRUE-------------------------------------
# ( report <-
# bdc::bdc_create_report(data = check_time,
# database_id = "database_id",
# workflow_step = "time",
# save_report = FALSE)
# )
## ----6.5, eval = FALSE, collapse = TRUE---------------------------------------
# figures <-
# BeeBDC::jbd_create_figures(data = check_time,
# path = DataPath,
# database_id = "database_id",
# workflow_step = "time",
# save_figures = TRUE)
## ----6.5ii, eval = FALSE, collapse = TRUE-------------------------------------
# figures$year
## ----6.5iii, eval = FALSE, collapse = TRUE------------------------------------
# check_time %>%
# readr::write_excel_csv(.,
# paste(OutPath_Intermediate, "04_time_database.csv",
# sep = "/"))
## ----eval = FALSE, collapse = TRUE--------------------------------------------
# BeeBDC::flagRecorder(
# data = check_time,
# outPath = paste(OutPath_Report, sep =""),
# fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
# idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
# append = TRUE,
# printSummary = TRUE)
#
## ----7.0, eval = FALSE, collapse = TRUE---------------------------------------
# if(!exists("check_time")){
# check_time <-
# readr::read_csv(paste(OutPath_Intermediate, "04_time_database.csv",
# sep = "/"),
# col_types = BeeBDC::ColTypeR())}
## ----7.1, collapse = TRUE-----------------------------------------------------
check_time <- BeeBDC::dupeSummary(
data = check_time,
path = OutPath_Report,
# options are "ID","collectionInfo", or "both"
duplicatedBy = "collectionInfo",
# The columns to generate completeness info from (and to sort by completness)
completeness_cols = c("decimalLatitude", "decimalLongitude",
"scientificName", "eventDate"),
# The columns to ADDITIONALLY consider when finding duplicates in collectionInfo
collectionCols = c("decimalLatitude", "decimalLongitude", "scientificName", "eventDate",
"recordedBy"),
# The columns to combine, one-by-one with the collectionCols
collectInfoColumns = c("catalogNumber", "otherCatalogNumbers"),
# Custom comparisons — as a list of columns to compare
# RAW custom comparisons do not use the character and number thresholds
CustomComparisonsRAW = dplyr::lst(c("catalogNumber", "institutionCode", "scientificName")),
# Other custom comparisons use the character and number thresholds
CustomComparisons = dplyr::lst(c("gbifID", "scientificName"),
c("occurrenceID", "scientificName"),
c("recordId", "scientificName"),
c("id", "scientificName")),
# The order in which you want to KEEP duplicated based on data source
# try unique(check_time$dataSource)
sourceOrder = c("CAES", "Gai", "Ecd","BMont", "BMin", "EPEL", "ASP", "KP", "EcoS", "EaCO",
"FSCA", "Bal", "SMC", "Lic", "Arm",
"USGS", "ALA", "VicWam", "GBIF","SCAN","iDigBio"),
# Paige ordering is done using the database_id prefix, not the dataSource prefix.
prefixOrder = c("Paige", "Dorey"),
# Set the complexity threshold for id letter and number length
# minimum number of characters when WITH the numberThreshold
characterThreshold = 2,
# minimum number of numbers when WITH the characterThreshold
numberThreshold = 3,
# Minimum number of numbers WITHOUT any characters
numberOnlyThreshold = 5
) %>% # END dupeSummary
dplyr::as_tibble(col_types = BeeBDC::ColTypeR())
## ----7.1ii, eval = FALSE, collapse = TRUE-------------------------------------
# check_time %>%
# readr::write_excel_csv(.,
# paste(OutPath_Intermediate, "04_2_dup_database.csv",
# sep = "/"))
## ----7.2, eval = FALSE, collapse = TRUE---------------------------------------
# BeeBDC::flagRecorder(
# data = check_time,
# outPath = paste(OutPath_Report, sep =""),
# fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
# idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
# append = TRUE,
# printSummary = TRUE)
## ----8.0, eval = FALSE, collapse = TRUE---------------------------------------
# if(!exists("check_time")){
# check_time <-
# readr::read_csv(paste(OutPath_Intermediate, "04_2_dup_database.csv",
# sep = "/"), col_types = ColTypeR())}
## ----8.1, eval = TRUE, collapse = TRUE----------------------------------------
if(!exists("duplicates")){
duplicates <- BeeBDC::fileFinder(path = DataPath,
fileName = "duplicateRun_") %>%
readr::read_csv()}
## ----8.2, eval = TRUE, collapse = TRUE----------------------------------------
# Make sure that the .summary column is updated
check_time <- BeeBDC::summaryFun(
data = check_time,
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
removeFilterColumns = FALSE,
filterClean = FALSE)
## ----8.2ii, eval = FALSE, collapse = TRUE-------------------------------------
# # Save the uncleaned dataset
# check_time %>% readr::write_excel_csv(.,
# paste(OutPath_Intermediate, "05_unCleaned_database.csv",
# sep = "/"))
## ----8.3, eval = TRUE, collapse = TRUE----------------------------------------
cleanData <- BeeBDC::summaryFun(
data = check_time,
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
# Remove the filtering columns?
removeFilterColumns = TRUE,
# Filter to ONLY cleaned data?
filterClean = TRUE)
## ----8.3ii, eval = FALSE, collapse = TRUE-------------------------------------
# # Save this CLEANED dataset
# cleanData %>% readr::write_excel_csv(.,
# paste(OutPath_Intermediate, "05_cleaned_database.csv",
# sep = "/"))
## ----9.1, message=FALSE, warning=FALSE, eval = FALSE, collapse = TRUE---------
# if (!require("BiocManager", quietly = TRUE))
# install.packages("BiocManager", repos = "http://cran.us.r-project.org")
# BiocManager::install("ComplexHeatmap")
## ----9.1ii, eval = TRUE, collapse = TRUE--------------------------------------
if(!exists("duplicates")){
duplicates <- BeeBDC::fileFinder(path = DataPath,
fileName = "duplicateRun_") %>%
readr::read_csv()}
## ----9.1on.exit, include = FALSE----------------------------------------------
oldpar <- par(no.readonly = TRUE)
on.exit(oldpar)
## ----9.1iii, eval = FALSE, collapse = TRUE------------------------------------
# par(mar = c(2, 2, 2, 2)/2, mfrow = c(1,1))
## ----9.1iv, eval=FALSE, fig.fullwidth=TRUE, fig.height=7.5, fig.width=9, collapse = TRUE----
# BeeBDC::chordDiagramR(
# # The duplicate data from the dupeSummary function output
# dupeData = duplicates,
# outPath = OutPath_Figures,
# fileName = "ChordDiagram.pdf",
# # These can be modified to help fit the final pdf that's exported.
# width = 9,
# height = 7.5,
# bg = "white",
# # How few distinct dataSources should a group have to be listed as "other"
# smallGrpThreshold = 3,
# title = "Duplicated record sources",
# # The default list of colour palettes to choose from usign the paleteer package
# palettes = c("cartography::blue.pal", "cartography::green.pal",
# "cartography::sand.pal", "cartography::orange.pal", "cartography::red.pal",
# "cartography::purple.pal", "cartography::brown.pal"),
# canvas.ylim = c(-1.0,1.0),
# canvas.xlim = c(-0.6, 0.25),
# text.col = "black",
# legendX = grid::unit(6, "mm"),
# legendY = grid::unit(18, "mm"),
# legendJustify = c("left", "bottom"),
# niceFacing = TRUE)
## ----9.2, eval = TRUE, collapse = TRUE----------------------------------------
if(!exists("check_time")){
beeData <- readr::read_csv(paste(OutPath_Intermediate, "05_unCleaned_database.csv",
sep = "/"),
col_types = BeeBDC::ColTypeR())
}else{
beeData <- check_time
rm(check_time)
}
## ----9.2ii, warning=FALSE, eval=TRUE, collapse = TRUE-------------------------
BeeBDC::dupePlotR(
data = beeData,
# The outPath to save the plot as
outPath = OutPath_Figures,
fileName = "duplicatePlot.pdf",
# Colours in order: duplicate, kept duplicate, unique
dupeColours = c("#F2D2A2","#B9D6BC", "#349B90"),
# Plot size and height
base_height = 7, base_width = 7,
legend.position = c(0.85, 0.8),
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP", CAES = "CAES", Ecd = "Ecd",
returnPlot = TRUE
)
## ----9.3, fig.width=15, fig.height=9, fig.fullwidth=TRUE, eval=TRUE, collapse = TRUE----
BeeBDC::plotFlagSummary(
data = beeData,
# Colours in order of pass (TRUE), fail (FALSE), and NA
flagColours = c("#127852", "#A7002D", "#BDBABB"),
fileName = paste0("FlagsPlot_", Sys.Date(),".pdf"),
outPath = paste0(OutPath_Figures),
width = 15, height = 9,
# OPTIONAL:
# # Filter to a single species
# speciesName = "Holcopasites heliopsis",
# # column to look in
# nameColumn = "species",
# # Save the filtered data
# saveFiltered = TRUE,
# # Filter column to display on map
# filterColumn = ".summary",
# plotMap = TRUE,
# # amount to jitter points if desired, e.g. 0.25 or NULL
# jitterValue = NULL,
# # Map opacity value for points between 0 and 1
# mapAlpha = 1,
# # If a user wants to output the table used to make the figure, change this to TRUE
# saveTable = FALSE,
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP", CAES = "CAES", 'BMont' = "BMont", 'BMin' = "BMin", Ecd = "Ecd",
Gaiarsa = "Gai", EPEL = "EPEL", VicWam = "VicWam",
returnPlot = TRUE
)
## ----9.4, eval = TRUE, collapse = TRUE----------------------------------------
if(!exists("cleanData")){
cleanData <- readr::read_csv(paste(OutPath_Intermediate, "05_cleaned_database.csv",
sep = "/"),
col_types = BeeBDC::ColTypeR())}
## ----9.4a, eval=FALSE, collapse = TRUE----------------------------------------
# BeeBDC::summaryMaps(
# data = cleanData,
# width = 10, height = 10,
# class_n = 3,
# class_Style = "fisher",
# fileName = "CountryMaps_fisher.pdf",
# outPath = OutPath_Figures,
# returnPlot = TRUE
# )
## ----9.4b, eval = FALSE, collapse = TRUE--------------------------------------
# BeeBDC::interactiveMapR(
# # occurrence data
# data = beeData,
# # Directory where to save files
# outPath = paste0(OutPath_Figures, "interactiveMaps", sep = "/"),
# lon = "decimalLongitude",
# lat = "decimalLatitude",
# # Occurrence dataset column with species names
# speciesColumn = "scientificName",
# # Which species to map — a character vector of names or "ALL"
# # Note: "ALL" is defined AFTER filtering for country
# speciesList = "ALL",
# countryList = NULL, # study area
# # Point jitter to see stacked points — jitters an amount in decimal degrees
# jitterValue = 0.01
# )
## ----9.5, eval = FALSE, collapse = TRUE---------------------------------------
# if(!exists("cleanData")){
# cleanData <- readr::read_csv(paste(OutPath_Intermediate, "05_cleaned_database.csv",
# sep = "/"),
# col_types = BeeBDC::ColTypeR(),
# locale = readr::locale(encoding = "UTF-8"))}
## ----9.5ii, eval = TRUE, collapse = TRUE--------------------------------------
# Note, if outPath = NULL then no file will be saved
dataProvTable <- BeeBDC::dataProvTables(data = cleanData,
runBeeDataChecks = TRUE,
outPath = NULL,
fileName = "dataProvTable.csv")
## ----9.6, eval = TRUE, collapse = TRUE----------------------------------------
# Note, if outPath = NULL then no file will be saved
summaryTable <- BeeBDC::flagSummaryTable(data = beeData,
column = "scientificName",
outPath = NULL,
fileName = "flagTable.csv")
## ----cleanup, include=FALSE, collapse = TRUE----------------------------------
# Remove the webpage folder
unlink(paste0(dirname(getwd()), "/inst/extdata/WebDir"), recursive = TRUE)
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/inst/doc/BeeBDC_main.R
|
---
title: "BeeBDC vignette"
output:
rmarkdown::html_vignette:
vignette: >
%\VignetteIndexEntry{BeeBDC vignette}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r libraryChunk, load-packages, include=FALSE}
# markdown packages
library(rgnparser)
library(magrittr)
library(knitr)
library(rmarkdown)
library(rmdformats)
library(prettydoc)
library(htmltools)
library(pkgdown)
# Load core packages
library(devtools)
library(BiocManager)
library(purrr)
library(here)
library(renv)
library(bdc)
library(CoordinateCleaner)
library(dplyr)
library(readr)
library(stringr)
library(lubridate)
library(tidyselect)
library(R.utils)
library(tidyr)
library(ggplot2)
library(forcats)
library(emld)
library(rlang)
library(xml2)
library(mgsub)
library(rvest)
library(rnaturalearth)
library(rnaturalearthdata)
library(countrycode)
library(janitor)
library(circlize)
library(paletteer)
library(cowplot)
library(igraph)
library(ggspatial)
library(sf)
library(parallel)
library(terra)
# Dont detect cores to avoid GitHbub error
old <- options() # code line i
on.exit(options(old)) # code line i+1
options(mc.cores = parallel::detectCores())
```
```{r secretRootPath, include=FALSE}
# Set the RootPath to tempdir
RootPath <- tempdir()
if (!dir.exists(paste0(RootPath, "/Data_acquisition_workflow"))) {
dir.create(paste0(RootPath, "/Data_acquisition_workflow"), recursive = TRUE)
}
```
```{r global-options, include=FALSE}
knitr::opts_chunk$set(error = TRUE,
eval = TRUE,
tidy = TRUE,
warning = FALSE,
root.dir = normalizePath(tempdir()))
```
# 0.0 Script preparation
## 0.1 Working directory
Choose the path to the root folder in which all other folders can be found.
```{r falseRootPath, eval=FALSE}
RootPath <- paste0("/your/path/here")
```
```{r CreateRootPath, warning=FALSE, collapse = TRUE}
# Create the working directory in the RootPath if it doesn't exist already
if (!dir.exists(paste0(RootPath, "/Data_acquisition_workflow"))) {
dir.create(paste0(RootPath, "/Data_acquisition_workflow"), recursive = TRUE)
}
# Set the working directory
setwd(paste0(RootPath,"/Data_acquisition_workflow"))
```
For the first time that you run BeeBDC, and if you want to use the renv package to manage your
packages, you can install renv...
install.packages("renv", repos = "http://cran.us.r-project.org")
and then initialise renv the project.
renv::init(project = paste0(RootPath,"/Data_acquisition_workflow"))
If you have already initialised a project, you can instead just activate it.
```{r activate, collapse = TRUE}
renv::activate(project = paste0(RootPath,"/Data_acquisition_workflow"))
```
## 0.2 Install packages (if needed)
You may need to install gdal on your computer. This can be done on a Mac by using Homebrew in the terminal and the command "brew install gdal".
To start out, you will need to install **BiocManager**, **devtools**, **ComplexHeatmap**, and **rnaturalearthhires** to then install and fully use **BeeBDC**.
```{r installPackages, message=FALSE, warning=FALSE, results=FALSE, collapse = TRUE, eval = FALSE}
if (!require("BiocManager", quietly = TRUE))
install.packages("BiocManager", repos = "http://cran.us.r-project.org")
BiocManager::install("ComplexHeatmap")
```
```{r rnaturalearthhires, eval=FALSE}
# Install remotes if needed
if (!require("remotes", quietly = TRUE))
install.packages("remotes", repos = "http://cran.us.r-project.org")
# Download and then load rnaturalearthhires
remotes::install_github("ropensci/rnaturalearthhires")
install.packages("rnaturalearthhires", repos = "https://ropensci.r-universe.dev", type = "source")
library(rnaturalearthhires)
```
Now install **BeeBDC**.
```{r installBeeBDC, results=TRUE, message=TRUE, eval = FALSE, collapse = TRUE}
install.packages("BeeBDC")
library(BeeBDC)
```
Snapshot the renv environment.
```{r snapshot, collapse = TRUE}
renv::snapshot(project = paste0(RootPath,"/Data_acquisition_workflow"),
prompt = FALSE)
```
Set up the directories used by BeeBDC. These directories include where the data, figures, reports, etc. will be saved. The RDoc needs to be a path RELATIVE to the RootPath; i.e., the file path from which the two diverge.
```{r dirMaker, collapse = TRUE, eval = FALSE}
BeeBDC::dirMaker(
RootPath = RootPath,
RDoc = "vignettes/BeeBDC_main.Rmd") %>%
# Add paths created by this function to the environment()
list2env(envir = parent.env(environment()))
```
```{r dirMakerSECRETELY, include = FALSE}
# For the sake of this tutorial, we will not use here::i_am in dirMaker, because we aren't allowed
# to mess with package directories in this way. This will work-around to use the tempdir()
DataPath <- paste0(RootPath, "/Data_acquisition_workflow")
OutPath_Check <- paste0(RootPath, "/Data_acquisition_workflow/Output/Check")
OutPath_Figures <- paste0(RootPath, "/Data_acquisition_workflow/Output/Figures")
OutPath_Intermediate <- paste0(RootPath, "/Data_acquisition_workflow/Output/Intermediate")
OutPath_Report <- paste0(RootPath, "/Data_acquisition_workflow/Output/Report")
# Create these files
if (!dir.exists(DataPath)) {
dir.create(DataPath, recursive = TRUE)}
if (!dir.exists(OutPath_Check)) {
dir.create(OutPath_Check, recursive = TRUE)}
if (!dir.exists(OutPath_Figures)) {
dir.create(OutPath_Figures, recursive = TRUE)}
if (!dir.exists(OutPath_Intermediate)) {
dir.create(OutPath_Intermediate, recursive = TRUE)}
if (!dir.exists(OutPath_Report)) {
dir.create(OutPath_Report, recursive = TRUE)}
```
## 0.3 Load packages
Load packages.
```{r lapply_library, results=FALSE, collapse = TRUE}
lapply(c("ComplexHeatmap", "magrittr"),
library, character.only = TRUE)
```
***
# 1.0 Data merge
<div class="alert alert-info">
<strong> Attention:</strong> <br>
Although each line of code has been validated, in order to save time knitting the R **markdown** document the next section is display only. If you are not data merging (section 1.0) or preparing the data (section 2.0), feel free to skip to Section 3.0 Initial flags.
</div>
## 1.1 Download ALA data
Download ALA data and create a new file in the DataPath to put those data into. You should also
first make an account with ALA in order to download your data — <https://auth.ala.org.au/userdetails/registration/createAccount>
BeeBDC::atlasDownloader(path = DataPath,
userEmail = "[email protected]",
atlas = "ALA",
ALA_taxon = "Apiformes")
## 1.2 Import and merge ALA, SCAN, iDigBio, and GBIF data
Supply the path to where the data is, the save_type is either "csv_files" or "R_file".
DataImp <- BeeBDC::repoMerge(path = DataPath,
occ_paths = BeeBDC::repoFinder(path = DataPath),
save_type = "R_file")
If there is an error in finding a file, run `repoFinder()` by itself to troubleshoot. For example:
#BeeBDC::repoFinder(path = DataPath)
#OUTPUT:
#$ALA_data
#[1] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/ALA_galah_path/galah_download_2022-09-15/data.csv"
#$GBIF_data
#[1] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/GBIF_webDL_30Aug2022/0000165-220831081235567/occurrence.txt"
#[2] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/GBIF_webDL_30Aug2022/0436695-210914110416597/occurrence.txt"
#[3] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/GBIF_webDL_30Aug2022/0436697-210914110416597/occurrence.txt"
#[4] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/GBIF_webDL_30Aug2022/0436704-210914110416597/occurrence.txt"
#[5] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/GBIF_webDL_30Aug2022/0436732-210914110416597/occurrence.txt"
#[6] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/GBIF_webDL_30Aug2022/0436733-210914110416597/occurrence.txt"
#[7] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/GBIF_webDL_30Aug2022/0436734-210914110416597/occurrence.txt"
#$iDigBio_data
#[1] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/iDigBio_webDL_30Aug2022/5aa5abe1-62e0-4d8c-bebf-4ac13bd9e56f/occurrence_raw.csv"
#$SCAN_data
#character(0)
#Failing because SCAN_data seems to be missing. Downloaded separatly from the one drive
Load in the most-recent version of these data if needed. This will return a list with:
1. The occurrence dataset with attributes (.$Data_WebDL)
2. The appended eml file (.$eml_files)
DataImp <- BeeBDC::importOccurrences(path = DataPath,
fileName = "BeeData_")
## 1.3 Import USGS Data
The `USGS_formatter()` will find, import, format, and create metadata for the USGS dataset. The pubDate must be in day-month-year format.
USGS_data <- BeeBDC::USGS_formatter(path = DataPath, pubDate = "19-11-2022")
## 1.4 Formatted Source Importer
Use this importer to find files that have been formatted and need to be added to the larger data file.
The attributes file must contain "attribute" in its name, and the occurrence file must not.
Complete_data <- BeeBDC::formattedCombiner(path = DataPath,
strings = c("USGS_[a-zA-Z_]+[0-9]{4}-[0-9]{2}-[0-9]{2}"),
# This should be the list-format with eml attached
existingOccurrences = DataImp$Data_WebDL,
existingEMLs = DataImp$eml_files)
In the column *catalogNumber*, remove ".*specimennumber:" as what comes after should be the USGS number to match for duplicates.
Complete_data$Data_WebDL <- Complete_data$Data_WebDL %>%
dplyr::mutate(catalogNumber = stringr::str_replace(catalogNumber,
pattern = ".*\\| specimennumber:",
replacement = ""))
## 1.5 Save data
Choose the type of data format you want to use in saving your work in 1.x.
BeeBDC::dataSaver(path = DataPath,# The main path to look for data in
save_type = "CSV_file", # "R_file" OR "CSV_file"
occurrences = Complete_data$Data_WebDL, # The existing datasheet
eml_files = Complete_data$eml_files, # The existing EML files
file_prefix = "Fin_") # The prefix for the fileNames
rm(Complete_data, DataImp)
# 2.0 Data preparation
The data preparatin section of the script relates mostly to integrating **bee** occurrence datasets and corrections and so may be skipped by many general taxon users.
## 2.1 Standardise datasets
You may either use:
- (a) the bdc import method (works well with general datasets) ***or***
- (b) the jbd import method (works well with above data merge)
### a. bdc import
The bdc import is **NOT** truly supported here, but provided as an example. Please go to section 2.1b below.
Read in the **bdc** metadata and standardise the dataset to bdc.
bdc_metadata <- readr::read_csv(paste(DataPath, "out_file", "bdc_integration.csv", sep = "/"))
# ?issue — datasetName is a darwinCore field already!
# Standardise the dataset to bdc
db_standardized <- bdc::bdc_standardize_datasets(
metadata = bdc_metadata,
format = "csv",
overwrite = TRUE,
save_database = TRUE)
# read in configuration description file of the column header info
config_description <- readr::read_csv(paste(DataPath, "Output", "bdc_configDesc.csv",
sep = "/"),
show_col_types = FALSE, trim_ws = TRUE)
### b. jbd import
Find the path, read in the file, and add the *database_id* column.
occPath <- BeeBDC::fileFinder(path = DataPath, fileName = "Fin_BeeData_combined_")
db_standardized <- readr::read_csv(occPath,
# Use the basic ColTypeR function to determine types
col_types = BeeBDC::ColTypeR(), trim_ws = TRUE) %>%
dplyr::mutate(database_id = paste("Dorey_data_",
1:nrow(.), sep = ""),
.before = family)
### c. optional thin
You can thin the dataset for ***TESTING ONLY!***
check_pf <- check_pf %>%
# take every 100th record
filter(row_number() %% 100 == 1)
***
## 2.2 Paige dataset
Paige Chesshire's cleaned American dataset — <https://doi.org/10.1111/ecog.06584>
### Import data
If you haven't figured it out by now, don't worry about the column name warning — not all columns occur here.
PaigeNAm <- readr::read_csv(paste(DataPath, "Paige_data", "NorAmer_highQual_only_ALLfamilies.csv",
sep = "/"), col_types = BeeBDC::ColTypeR()) %>%
# Change the column name from Source to dataSource to match the rest of the data.
dplyr::rename(dataSource = Source) %>%
# EXTRACT WAS HERE
# add a NEW database_id column
dplyr::mutate(
database_id = paste0("Paige_data_", 1:nrow(.)),
.before = scientificName)
<div class="alert alert-info">
<strong> Attention:</strong> <br>
It is recommended to run the below code on the full bee dataset with more than 16GB RAM. Robert ran this on a laptop with 16GB RAM and an Intel(R) Core(TM) i7-8550U processor (4 cores and 8 threads) — it struggled.
</div>
### Merge Paige's data with downloaded data
db_standardized <- BeeBDC::PaigeIntegrater(
db_standardized = db_standardized,
PaigeNAm = PaigeNAm,
# This is a list of columns by which to match Paige's data to the most-recent download with.
# Each vector will be matched individually
columnStrings = list(
c("decimalLatitude", "decimalLongitude",
"recordNumber", "recordedBy", "individualCount", "samplingProtocol",
"associatedTaxa", "sex", "catalogNumber", "institutionCode", "otherCatalogNumbers",
"recordId", "occurrenceID", "collectionID"), # Iteration 1
c("catalogNumber", "institutionCode", "otherCatalogNumbers",
"recordId", "occurrenceID", "collectionID"), # Iteration 2
c("decimalLatitude", "decimalLongitude",
"recordedBy", "genus", "specificEpithet"),# Iteration 3
c("id", "decimalLatitude", "decimalLongitude"),# Iteration 4
c("recordedBy", "genus", "specificEpithet", "locality"), # Iteration 5
c("recordedBy", "institutionCode", "genus",
"specificEpithet","locality"),# Iteration 6
c("occurrenceID","decimalLatitude", "decimalLongitude"),# Iteration 7
c("catalogNumber","decimalLatitude", "decimalLongitude"),# Iteration 8
c("catalogNumber", "locality") # Iteration 9
) )
Remove spent data.
rm(PaigeNAm)
## 2.3 USGS
The USGS dataset also partially occurs on GBIF from BISON. However, the occurrence codes are in a silly place... We will correct these here to help identify duplicates later.
db_standardized <- db_standardized %>%
# Remove the discoverlife html if it is from USGS
dplyr::mutate(occurrenceID = dplyr::if_else(
stringr::str_detect(occurrenceID, "USGS_DRO"),
stringr::str_remove(occurrenceID, "http://www\\.discoverlife\\.org/mp/20l\\?id="),
occurrenceID)) %>%
# Use otherCatalogNumbers when occurrenceID is empty AND when USGS_DRO is detected there
dplyr::mutate(
occurrenceID = dplyr::if_else(
stringr::str_detect(otherCatalogNumbers, "USGS_DRO") & is.na(occurrenceID),
otherCatalogNumbers, occurrenceID)) %>%
# Make sure that no eventIDs have snuck into the occurrenceID columns
# For USGS_DRO, codes with <6 digits are event ids
dplyr::mutate(
occurrenceID = dplyr::if_else(stringr::str_detect(occurrenceID, "USGS_DRO", negate = TRUE),
# Keep occurrenceID if it's NOT USGS_DRO
occurrenceID,
# If it IS USGS_DRO and it has => 6 numbers, keep it, else, NA
dplyr::if_else(stringr::str_detect(occurrenceID, "USGS_DRO[0-9]{6,10}"),
occurrenceID, NA_character_)),
catalogNumber = dplyr::if_else(stringr::str_detect(catalogNumber, "USGS_DRO", negate = TRUE),
# Keep catalogNumber if it's NOT USGS_DRO
catalogNumber,
# If it IS USGS_DRO and it has => 6 numbers, keep it, else, NA
dplyr::if_else(stringr::str_detect(catalogNumber, "USGS_DRO[0-9]{6,10}"),
catalogNumber, NA_character_)))
## 2.4 Additional datasets
Import additional and potentially private datasets.
**Note:** Private dataset functions are provided but the data itself is not integrated here until those datasets become freely available.
There will be some warnings were a few rows may not be formatted correctly or where dates fail to parse. This is normal.
###### a. EPEL
Guzman, L. M., Kelly, T. & Elle, E. A data set for pollinator diversity and their interactions with plants in the Pacific Northwest. Ecology, e3927 (2022). <https://doi.org/10.1002/ecy.3927>
EPEL_Data <- BeeBDC::readr_BeeBDC(dataset = "EPEL",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/bee_data_canada.csv",
outFile = "jbd_EPEL_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### b. Allan Smith-Pardo
Data from Allan Smith-Pardo
ASP_Data <- BeeBDC::readr_BeeBDC(dataset = "ASP",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Allan_Smith-Pardo_Dorey_ready2.csv",
outFile = "jbd_ASP_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### c. Minckley
Data from Robert Minckley
BMin_Data <- BeeBDC::readr_BeeBDC(dataset = "BMin",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Bob_Minckley_6_1_22_ScanRecent-mod_Dorey.csv",
outFile = "jbd_BMin_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### d. BMont
Delphia, C. M. Bumble bees of Montana. <https://www.mtent.org/projects/Bumble_Bees/bombus_species.html>. (2022)
BMont_Data <- BeeBDC::readr_BeeBDC(dataset = "BMont",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Bombus_Montana_dorey.csv",
outFile = "jbd_BMont_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-sa/4.0/")
###### e. Ecd
Ecdysis. Ecdysis: a portal for live-data arthropod collections, <https://ecdysis.org/index.php> (2022).
Ecd_Data <- BeeBDC::readr_BeeBDC(dataset = "Ecd",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Ecdysis_occs.csv",
outFile = "jbd_Ecd_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### f. Gai
Gaiarsa, M. P., Kremen, C. & Ponisio, L. C. Pollinator interaction flexibility across scales affects patch colonization and occupancy. *Nature Ecology & Evolution* 5, 787-793 (2021). <https://doi.org/10.1038/s41559-021-01434-y>
Gai_Data <- BeeBDC::readr_BeeBDC(dataset = "Gai",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/upload_to_scan_Gaiarsa et al_Dorey.csv",
outFile = "jbd_Gai_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### g. CAES
From the Connecticut Agricultural Experiment Station.
Zarrillo, T. A., Stoner, K. A. & Ascher, J. S. Biodiversity of bees (Hymenoptera: Apoidea: Anthophila) in Connecticut (USA). Zootaxa (Accepted).
Ecdysis. Occurrence dataset (ID: 16fca9c2-f622-4cb1-aef0-3635a7be5aeb). https://ecdysis.org/content/dwca/CAES-CAES_DwC-A.zip. (2023)
CAES_Data <- BeeBDC::readr_BeeBDC(dataset = "CAES",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/CT_BEE_DATA_FROM_PBI.xlsx",
outFile = "jbd_CT_Data.csv",
sheet = "Sheet1",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### h. GeoL
GeoL_Data <- BeeBDC::readr_BeeBDC(dataset = "GeoL",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Geolocate and BELS_certain and accurate.xlsx",
outFile = "jbd_GeoL_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### i. EaCO
EaCO_Data <- BeeBDC::readr_BeeBDC(dataset = "EaCO",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Eastern Colorado bee 2017 sampling.xlsx",
outFile = "jbd_EaCo_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### j. FSCA
Florida State Collection of Arthropods
FSCA_Data <- BeeBDC::readr_BeeBDC(dataset = "FSCA",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "InputDatasets/fsca_9_15_22_occurrences.csv",
outFile = "jbd_FSCA_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### k. Texas SMC
Published or unpublished data from Texas literature not in an online database, usually copied into spreadsheet from document format, or otherwise copied from a very differently-formatted spreadsheet. Unpublished or partially published data were obtained with express permission from the lead author.
SMC_Data <- BeeBDC::readr_BeeBDC(dataset = "SMC",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/TXbeeLitOccs_31Oct22.csv",
outFile = "jbd_SMC_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### l. Texas Bal
Data with GPS coordinates (missing accidentally from records on Dryad) from Ballare, K. M., Neff, J. L., Ruppel, R. & Jha, S. Multi-scalar drivers of biodiversity: local management mediates wild bee community response to regional urbanization. Ecological Applications 29, e01869 (2019), <https://doi.org/10.1002/eap.1869>. The version on Dryad is missing site GPS coordinates (by accident). Kim is okay with these data being made public as long as her paper is referenced. - Elinor Lichtenberg
Bal_Data <- BeeBDC::readr_BeeBDC(dataset = "Bal",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Beedata_ballare.xlsx",
outFile = "jbd_Bal_Data.csv",
sheet = "animal_data",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### m. Palouse Lic
Elinor Lichtenberg’s canola data: Lichtenberg, E. M., Milosavljević, I., Campbell, A. J. & Crowder, D. W. Differential effects of soil conservation practices on arthropods and crop yields. *Journal of Applied Entomology*, (2023) <https://doi.org/10.1111/jen.13188>. These are the data I will be putting on SCAN. - Elinor Lichtenberg
Lic_Data <- BeeBDC::readr_BeeBDC(dataset = "Lic",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Lichtenberg_canola_records.csv",
outFile = "jbd_Lic_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### n. Arm
Data from Armando Falcon-Brindis from the University of Kentucky.
Arm_Data <- BeeBDC::readr_BeeBDC(dataset = "Arm",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Bee database Armando_Final.xlsx",
outFile = "jbd_Arm_Data.csv",
sheet = "Sheet1",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### o. Dor
From several papers:
1. Dorey, J. B., Fagan-Jeffries, E. P., Stevens, M. I., & Schwarz, M. P. (2020). Morphometric comparisons and novel observations of diurnal and low-light-foraging bees. *Journal of Hymenoptera Research*, 79, 117–144. doi:<https://doi.org/10.3897/jhr.79.57308>
2. Dorey, J. B. (2021). Missing for almost 100 years: the rare and potentially threatened bee Pharohylaeus lactiferus (Hymenoptera, Colltidae). *Journal of Hymenoptera Research*, 81, 165-180. doi: <https://doi.org/10.3897/jhr.81.59365>
3. Dorey, J. B., Schwarz, M. P., & Stevens, M. I. (2019). Review of the bee genus Homalictus Cockerell (Hymenoptera: Halictidae) from Fiji with description of nine new species. *Zootaxa*, 4674(1), 1–46. doi:<https://doi.org/10.11646/zootaxa.4674.1.1>
```{}
```
Dor_Data <- BeeBDC::readr_BeeBDC(dataset = "Dor",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/DoreyData.csv",
outFile = "jbd_Dor_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### p. VicWam
These data are originally from the Victorian Museum and Western Australian Museum in Australia. However, in their current form they are from Dorey et al. 2021.
1. PADIL. (2020). PaDIL. <https://www.PADIL.gov.au/>
2. Houston, T. F. (2000). Native bees on wildflowers in Western Australia. *Western Australian Insect Study Society*.
3. Dorey, J. B., Rebola, C. M., Davies, O. K., Prendergast, K. S., Parslow, B. A., Hogendoorn, K., . . . Caddy-Retalic, S. (2021). Continental risk assessment for understudied taxa post catastrophic wildfire indicates severe impacts on the Australian bee fauna. *Global Change Biology*, 27(24), 6551-6567. doi:<https://doi.org/10.1111/gcb.15879>
```{}
```
VicWam_Data <- BeeBDC::readr_BeeBDC(dataset = "VicWam",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Combined_Vic_WAM_databases.xlsx",
outFile = "jbd_VicWam_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/",
sheet = "Combined")
### 2.5 Merge all
Remove these spent datasets.
rm(EPEL_Data, ASP_Data, BMin_Data, BMont_Data, Ecd_Data, Gai_Data, CAES_Data,
GeoL_Data, EaCO_Data, FSCA_Data, SMC_Data, Bal_Data, Lic_Data, Arm_Data, Dor_Data,
VicWam_Data)
Read in and merge all. There are more `readr_BeeBDC()` supported than currently implemented and these represent datasets that will be publicly released in the future. See '?`readr_BeeBDC()`' for details.
db_standardized <- db_standardized %>%
dplyr::bind_rows(
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_ASP_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_EPEL_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_BMin_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_BMont_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Ecd_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Gai_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_CT_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_GeoL_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_EaCo_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_SMC_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Bal_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Lic_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Arm_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Dor_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_VicWam_Data.csv"), col_types = BeeBDC::ColTypeR())) %>%
# END bind_rows
suppressWarnings(classes = "warning") # End suppressWarnings — due to col_types
### 2.6 Match database_id
If you have prior runs from which you'd like to match *database_id*s with from the current run, you may use the below script to try to match *database_id*s with prior runs.
Read in a prior run of choice.
priorRun <- BeeBDC::fileFinder(path = DataPath,
file = "01_prefilter_database_9Aug22.csv") %>%
readr::read_csv(file = ., col_types = BeeBDC::ColTypeR())
This function will attempt to find the *database_id*s from prior runs.
db_standardized <- BeeBDC::idMatchR(
currentData = db_standardized,
priorData = priorRun,
# First matches will be given preference over later ones
matchBy = tibble::lst(c("gbifID", "dataSource"),
c("catalogNumber", "institutionCode", "dataSource", "decimalLatitude",
"decimalLongitude"),
c("occurrenceID", "dataSource","decimalLatitude","decimalLongitude"),
c("recordId", "dataSource","decimalLatitude","decimalLongitude"),
c("id", "dataSource","decimalLatitude","decimalLongitude"),
# Because INHS was entered as it's own dataset but is now included in the GBIF download...
c("catalogNumber", "institutionCode", "dataSource",
"decimalLatitude","decimalLongitude")),
# You can exclude datasets from prior by matching their prefixs — before first underscore:
excludeDataset = c("ASP", "BMin", "BMont", "CAES", "EaCO", "Ecd", "EcoS",
"Gai", "KP", "EPEL", "CAES", "EaCO", "FSCA", "SMC", "Lic", "Arm",
"VicWam"))
# Remove redundant files
rm(priorRun)
Save the dataset.
db_standardized %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "00_prefilter_database.csv",
sep = "/"))
# 3.0 Initial flags
Read data back in if needed. OutPath_Intermediate (and a few other directories) should be have been created and saved to the global environment by `dirMaker()`.
if(!exists("db_standardized")){
db_standardized <- readr::read_csv(paste(OutPath_Intermediate, "00_prefilter_database.csv",
sep = "/"), col_types = BeeBDC::ColTypeR())}
Normally, you would use the full dataset, as read in above. But, for the sake of this vignette, we will use a combination of two example datasets. These example datasets can further be very useful for testing functions if you're ever feeling a bit confused and overwhelmed!
```{r 3.0, collapse = TRUE}
data("bees3sp", package = "BeeBDC")
data("beesRaw", package = "BeeBDC")
db_standardized <- dplyr::bind_rows(beesRaw,
# Only keep a subset of columns from bees3sp
bees3sp %>% dplyr::select(tidyselect::all_of(colnames(beesRaw)), countryCode))
```
*For more details about the bdc package, please see their [tutorial.](https://brunobrr.github.io/bdc/articles/prefilter.html)*
## 3.1 SciName
Flag occurrences without *scientificName* provided.
```{r 3.1, collapse = TRUE}
check_pf <- bdc::bdc_scientificName_empty(
data = db_standardized,
sci_name = "scientificName")
# now that this is saved, remove it to save space in memory
rm(db_standardized)
```
## 3.2 MissCoords
Flag occurrences with missing *decimalLatitude* and *decimalLongitude*.
```{r 3.2, collapse = TRUE}
check_pf <- bdc::bdc_coordinates_empty(
data = check_pf,
lat = "decimalLatitude",
lon = "decimalLongitude")
```
## 3.3 OutOfRange
Flag occurrences that are not on Earth (outside of -180 to 180 or -90 to 90 degrees).
```{r 3.3, collapse = TRUE}
check_pf <- bdc::bdc_coordinates_outOfRange(
data = check_pf,
lat = "decimalLatitude",
lon = "decimalLongitude")
```
## 3.4 Source
Flag occurrences that don't match the *basisOfRecord* types below.
```{r 3.4, collapse = TRUE}
check_pf <- bdc::bdc_basisOfRecords_notStandard(
data = check_pf,
basisOfRecord = "basisOfRecord",
names_to_keep = c(
# Keep all plus some at the bottom.
"Event",
"HUMAN_OBSERVATION",
"HumanObservation",
"LIVING_SPECIMEN",
"LivingSpecimen",
"MACHINE_OBSERVATION",
"MachineObservation",
"MATERIAL_SAMPLE",
"O",
"Occurrence",
"MaterialSample",
"OBSERVATION",
"Preserved Specimen",
"PRESERVED_SPECIMEN",
"preservedspecimen Specimen",
"Preservedspecimen",
"PreservedSpecimen",
"preservedspecimen",
"S",
"Specimen",
"Taxon",
"UNKNOWN",
"",
NA,
"NA",
"LITERATURE",
"None", "Pinned Specimen", "Voucher reared", "Emerged specimen"
))
```
## 3.5 CountryName
Try to harmonise country names.
### a. prepare dataset
Fix up country names based on common problems above and extract ISO2 codes for occurrences.
```{r 3.5a, collapse = TRUE}
check_pf_noNa <- BeeBDC::countryNameCleanR(
data = check_pf,
# Create a Tibble of common issues in country names and their replacements
commonProblems = dplyr::tibble(problem = c('U.S.A.', 'US','USA','usa','UNITED STATES',
'United States','U.S.A','MX','CA','Bras.','Braz.',
'Brasil','CNMI','USA TERRITORY: PUERTO RICO'),
fix = c('United States of America','United States of America',
'United States of America','United States of America',
'United States of America','United States of America',
'United States of America','Mexico','Canada','Brazil',
'Brazil','Brazil','Northern Mariana Islands','PUERTO.RICO'))
)
```
### b. run function
Get country name from coordinates using a wrapper around the `jbd_country_from_coordinates()` function. Because our dataset is much larger than those used to design **bdc**, we have made it so that you can analyse data in smaller pieces. Additionally, like some other functions in **BeeBDC**, we have implemented parallel operations (using mc.cores = #cores in stepSize = #rowsPerOperation); see '?`jbd_CfC_chunker()`' for details.
NOTE: In an actual run you should use scale = "large"
```{r 3.5b, message=FALSE, warning=FALSE, collapse = TRUE}
suppressWarnings(
countryOutput <- BeeBDC::jbd_CfC_chunker(data = check_pf_noNa,
lat = "decimalLatitude",
lon = "decimalLongitude",
country = "country",
# How many rows to process at a time
stepSize = 1000000,
# Start row
chunkStart = 1,
path = OutPath_Intermediate,
# Normally, please use scale = "large"
scale = "medium",
mc.cores = 1),
classes = "warning")
```
### c. re-merge
Join these datasets.
```{r 3.5ci, collapse = TRUE}
check_pf <- dplyr::left_join(check_pf,
countryOutput,
by = "database_id",
suffix = c("", "CO")) %>%
# Take the new country name if the original is NA
dplyr::mutate(country = dplyr::if_else(is.na(country),
countryCO,
country)) %>%
# Remove duplicates if they arose from left_join!
dplyr::distinct()
```
Save the dataset.
```{r 3.5cii, eval = FALSE, collapse = TRUE}
check_pf %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "01_prefilter_database.csv",
sep = "/"))
```
Read in if needed.
```{r 3.5ciii, eval = FALSE, collapse = TRUE}
if(!exists("check_pf")){
check_pf <- readr::read_csv(paste(DataPath,
"Output", "Intermediate", "01_prefilter_database.csv", sep = "/"),
col_types = BeeBDC::ColTypeR())}
```
Remove these interim datasets.
```{r 3.5civ, collapse = TRUE}
rm(check_pf_noNa, countryOutput)
```
## 3.6 StandardCoNames
Run the function, which standardises country names and adds ISO2 codes, if needed.
```{r 3.6, collapse = TRUE}
# Standardise country names and add ISO2 codes if needed
check_pf <- bdc::bdc_country_standardized(
# Remove the countryCode and country_suggested columns to avoid an error with
# where two "countryCode" and "country_suggested" columns exist (i.e. if the dataset has been
# run before)
data = check_pf %>% dplyr::select(!tidyselect::any_of(c("countryCode", "country_suggested"))),
country = "country"
)
```
## 3.7 TranspCoords
Flag and correct records when *decimalLatitude* and *decimalLongitude* appear to be transposed. We created this chunked version of `bdc::bdc_coordinates_transposed()` because it is very RAM-heavy using our large bee dataset. Like many of our other 'jbd_...' functions there are other improvements - e.g., parallel running.
NOTE: Usually you would use scale = "large", which requires rnaturalearthhires
```{r 3.7, message=FALSE, warning=FALSE, collapse = TRUE}
check_pf <- BeeBDC::jbd_Ctrans_chunker(
# bdc_coordinates_transposed inputs
data = check_pf,
id = "database_id",
lat = "decimalLatitude",
lon = "decimalLongitude",
country = "country",
countryCode = "countryCode",
border_buffer = 0.2, # in decimal degrees (~22 km at the equator)
save_outputs = TRUE,
sci_names = "scientificName",
# chunker inputs
stepSize = 1000000, # How many rows to process at a time
chunkStart = 1, # Start row
append = FALSE, # If FALSE it may overwrite existing dataset
progressiveSave = FALSE,
# In a normal run, please use scale = "large"
scale = "medium",
path = OutPath_Check,
mc.cores = 1
)
```
Get a quick summary of the number of transposed records.
```{r 3.7ii, eval = FALSE, collapse = TRUE}
table(check_pf$coordinates_transposed, useNA = "always")
```
Save the dataset.
```{r 3.7iii, eval = FALSE, collapse = TRUE}
check_pf %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "01_prefilter_database.csv",
sep = "/"))
```
Read the data in again if needed.
```{r 3.7iv, eval = FALSE, collapse = TRUE}
if(!exists("check_pf")){
check_pf <- readr::read_csv(paste(OutPath_Intermediate, "01_prefilter_database.csv",
sep = "/"), col_types = BeeBDC::ColTypeR())}
```
## 3.8 Coord-country
Collect all country names in the *country_suggested* column. We rebuilt a **bdc** function to flag occurrences where the coordinates are inconsistent with the provided country name.
```{r 3.8, collapse = TRUE}
check_pf <- BeeBDC::jbd_coordCountryInconsistent(
data = check_pf,
lon = "decimalLongitude",
lat = "decimalLatitude",
scale = 50,
pointBuffer = 0.01)
```
Save the dataset.
```{r 3.8ii, eval = FALSE}
check_pf %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "01_prefilter_database.csv",
sep = "/"))
```
## 3.9 GeoRefIssue
This function identifies records whose coordinates can potentially be extracted from locality information, which must be manually checked later.
```{r 3.9, eval = TRUE, collapse = TRUE}
xyFromLocality <- bdc::bdc_coordinates_from_locality(
data = check_pf,
locality = "locality",
lon = "decimalLongitude",
lat = "decimalLatitude",
save_outputs = FALSE
)
```
```{r 3.9ii, eval = FALSE}
# Save the resultant data
xyFromLocality %>% readr::write_excel_csv(paste(OutPath_Check, "01_coordinates_from_locality.csv",
sep = "/"))
```
Remove spent data.
```{r 3.9iii, eval = FALSE}
rm(xyFromLocality)
```
## 3.10 Flag Absent
Flag the records marked as "absent".
```{r 3.10, collapse = TRUE}
check_pf <- BeeBDC::flagAbsent(data = check_pf,
PresAbs = "occurrenceStatus")
```
## 3.11 flag License
Flag the records that may not be used according to their license information.
```{r 3.11, collapse = TRUE}
check_pf <- BeeBDC::flagLicense(data = check_pf,
strings_to_restrict = "all",
# DON'T flag if in the following dataSource(s)
excludeDataSource = NULL)
```
## 3.12 GBIF issue
Flag select issues that are flagged by GBIF.
```{r 3.12, collapse = TRUE}
check_pf <- BeeBDC::GBIFissues(data = check_pf,
issueColumn = "issue",
GBIFflags = c("COORDINATE_INVALID", "ZERO_COORDINATE"))
```
## 3.13 Flag Reports
### a. Save flags
Save the flags so far. This function will make sure that you keep a copy of everything that has been flagged up until now. This will be updated throughout the script and can accessed at the end, so be wary of moving files around manually. However, these data will also still be maintained in the main running file, so this is an optional fail-safe.
```{r 3.13a, eval = FALSE}
flagFile <- BeeBDC::flagRecorder(
data = check_pf,
outPath = paste(OutPath_Report, sep =""),
fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
# These are the columns that will be kept along with the flags
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
# TRUE if you want to find a file from a previous part of the script to append to
append = FALSE)
```
Update the *.summary* column
```{r 3.13b, collapse = TRUE}
check_pf <- BeeBDC::summaryFun(
data = check_pf,
# Don't filter these columns (or NULL)
dontFilterThese = NULL,
# Remove the filtering columns?
removeFilterColumns = FALSE,
# Filter to ONLY cleaned data?
filterClean = FALSE)
```
### c. Reporting
Use **bdc** to generate reports.
```{r 3.13c, eval = FALSE}
(report <- bdc::bdc_create_report(data = check_pf,
database_id = "database_id",
workflow_step = "prefilter",
save_report = TRUE)
)
```
## 3.14 Save
Save the intermediate dataset.
```{r 3.14, eval = FALSE}
check_pf %>%
readr::write_excel_csv(., paste(OutPath_Intermediate, "01_prefilter_output.csv",
sep = "/"))
```
# 4.0 Taxonomy
*For more information about the corresponding bdc functions used in this section, see their [tutorial](https://brunobrr.github.io/bdc/articles/taxonomy.html). *
Read in the filtered dataset or rename the 3.x dataset for 4.0.
```{r 4.0, collapse = TRUE}
if(!exists("check_pf")){
database <-
readr::read_csv( paste(OutPath_Intermediate, "01_prefilter_output.csv",
sep = "/"), col_types = BeeBDC::ColTypeR())
}else{
# OR rename and remove
database <- check_pf
# Remove spent dataset
rm(check_pf)}
```
Remove *names_clean* if it already exists (i.e. you have run the following functions before on this dataset before).
```{r 4.0ii, collapse = TRUE}
database <- database %>%
dplyr::select(!tidyselect::any_of("names_clean"))
```
## 4.1 Prep data names
This step cleans the database's *scientificName* column.
**! MAC**: You might need to install gnparser through terminal — brew
brew tap gnames/gn
brew install gnparser
<div class="alert alert-info">
<strong> Attention:</strong> <br>
This can be difficult for a Windows install. Ensure you have the most recent version of R, R Studio, and R packages. Also, check package '**rgnparser**' is installed correctly. If you still can not get the below code to work, you may have to download the latest version of 'gnparser' from [here](https://github.com/gnames/gnparser/releases/tag/v1.6.9). You may then need to manually install it and edit your systems environmental variable PATH to locate 'gnparser.exe'. See [here](https://github.com/gnames/gnparser#installation).
</div>
```{r 4.1, eval = FALSE, collapse = TRUE}
parse_names <-
bdc::bdc_clean_names(sci_names = database$scientificName, save_outputs = FALSE)
```
## The latest gnparser version is v1.7.4
## gnparser has been installed to /home/runner/bin
##
## >> Family names prepended to scientific names were flagged and removed from 0 records.
## >> Terms denoting taxonomic uncertainty were flagged and removed from 0 records.
## >> Other issues, capitalizing the first letter of the generic name, replacing empty names by NA, and removing extra spaces, were flagged and corrected or removed from 1 records.
## >> Infraspecific terms were flagged and removed from 0 records.
Keep only the *.uncer_terms* and *names_clean* columns.
```{r 4.1ii, collapse = TRUE, eval = FALSE}
parse_names <-
parse_names %>%
dplyr::select(.uncer_terms, names_clean)
```
Merge names with the complete dataset.
```{r 4.1iii, collapse = TRUE}
database <- dplyr::bind_cols(database)
rm(parse_names)
```
## 4.2 Harmonise taxonomy
Download the custom taxonomy file from the BeeBDC package and [Discover Life](https://www.discoverlife.org) website.
```{r 4.2, collapse = TRUE, eval = FALSE}
taxonomyFile <- BeeBDC::beesTaxonomy()
```
```{r 4.2secret, collapse = TRUE, eval = TRUE}
# load in the small test dataset i nthe background
system.file("extdata", "testTaxonomy.rda", package="BeeBDC") |>
load()
# Rename the file
taxonomyFile <- testTaxonomy
rm(testTaxonomy)
```
<div class="alert alert-info">
<strong> Attention:</strong> <br>
As of version 1.1.0, BeeBDC now has a new function that can download taxonomies using the taxadb package and transform them into the BeeBDC format. The function, `BeeBDC::taxadbToBeeBDC()`, allows the user to choose their desired provider (e.g., "gbif", "itis"...), version, taxon name and rank, and to save the taxonomy as a readable csv or not. For example for the bee genus Apis:
ApisTaxonomy <- BeeBDC::taxadbToBeeBDC(
name = "Apis",
rank = "Genus",
provider = "gbif",
version = "22.12",
outPath = getwd(),
fileName = "ApisTaxonomy.csv"
)
</div>
Harmonise the names in the occurrence tibble. This flags the occurrences without a matched name and matches names to their correct name according to [Discover Life](https://www.discoverlife.org). You can also use multiple cores to achieve this. See '?`harmoniseR()`' for details.
```{r 4.2ii, collapse = TRUE}
database <- BeeBDC::harmoniseR(path = DataPath, #The path to a folder that the output can be saved
taxonomy = taxonomyFile, # The formatted taxonomy file
data = database,
mc.cores = 1)
```
You don't need this file any more...
```{r 4.2iii, collapse = TRUE}
rm(taxonomyFile)
```
Save the harmonised file.
```{r 4.2iv, eval = FALSE, collapse = TRUE}
database %>%
readr::write_excel_csv(.,
paste(DataPath, "Output", "Intermediate", "02_taxonomy_database.csv",
sep = "/"))
```
## 4.3 Save flags
Save the flags so far. This will find the most-recent flag file and append your new data to it. You can double-check the data and number of columns if you'd like to be thorough and sure that all of data are intact.
```{r 4.3, eval = FALSE, collapse = TRUE}
flagFile <- BeeBDC::flagRecorder(
data = database,
outPath = paste(OutPath_Report, sep =""),
fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
append = TRUE,
printSummary = TRUE)
```
# 5.0 Space
*The final frontier or whatever.*
Read in the latest database.
```{r 5.0, collapse = TRUE}
if(!exists("database")){
database <-
readr::read_csv(paste(OutPath_Intermediate, "02_taxonomy_database.csv", sep = "/"),
col_types = BeeBDC::ColTypeR())}
```
## 5.1 Coordinate precision
This function identifies records with a coordinate precision below a specified number of decimal places. For example, the precision of a coordinate with 1 decimal place is 11.132 km at the equator, i.e., the scale of a large city. The major difference between the **bdc** and **BeeBDC** functions is that `jbd_coordinates_precision()` will only flag occurrences if BOTH latitude and longitude are rounded (as opposed to only one of these).
Coordinates with one, two, or three decimal places present a precision of ~11.1 km, ~1.1 km, and ~111 m at the equator, respectively.
```{r 5.1, collapse = TRUE}
check_space <-
BeeBDC::jbd_coordinates_precision(
data = database,
lon = "decimalLongitude",
lat = "decimalLatitude",
ndec = 2 # number of decimals to be tested
)
```
Remove the spent dataset.
```{r 5.1ii, collapse = TRUE}
rm(database)
```
Save the resulting file.
```{r 5.1iii, eval = FALSE, collapse = TRUE}
check_space %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "03_space_inter_database.csv",
sep = "/"))
```
## 5.2 Common spatial issues
Only run for occurrences through `clean_coordinates()` that are spatially 'valid'.
```{r 5.2, eval = FALSE, collapse = TRUE}
tempSpace <- check_space %>%
dplyr::filter(!.coordinates_empty == FALSE) %>%
dplyr::filter(!.coordinates_outOfRange == FALSE)
```
Next, we will flag common spatial issues using functions of the package **CoordinateCleaner**. It addresses some common issues in biodiversity datasets.
```{r 5.2ii, message=TRUE, warning=FALSE, eval = FALSE, collapse = TRUE}
tempSpace <-
CoordinateCleaner::clean_coordinates(
x = tempSpace,
lon = "decimalLongitude",
lat = "decimalLatitude",
species = "scientificName",
countries = NULL, # Tests if coords are from x country. This is not needed.
tests = c(
"capitals", # records within 0.5 km of capitals centroids
"centroids", # records within 1 km around country and province centroids
"equal", # records with equal coordinates
"gbif", # records within 1 km of GBIF headquarters. (says 1 degree in package, but code says 1000 m)
"institutions", # records within 100m of zoo and herbaria
"zeros" # records with coordinates 0,0
# "seas" # Not flagged as this should be flagged by coordinate country inconsistent
),
capitals_rad = 1000,
centroids_rad = 500,
centroids_detail = "both", # test both country and province centroids
inst_rad = 100, # remove zoo and herbaria within 100m
range_rad = 0,
zeros_rad = 0.5,
capitals_ref = NULL,
centroids_ref = NULL,
country_ref = NULL,
country_refcol = "countryCode",
inst_ref = NULL,
range_ref = NULL,
# seas_scale = 50,
value = "spatialvalid" # result of tests are appended in separate columns
) %>%
# Remove duplicate .summary column that can be replaced later and turn into a tibble
dplyr::select(!tidyselect::starts_with(".summary")) %>%
dplyr::tibble()
```
Re-merge the datasets.
```{r 5.2iii, eval = FALSE, collapse = TRUE}
check_space <- tempSpace %>%
# Re-bind with the records that were removed earlier
dplyr::bind_rows(check_space %>%
dplyr::filter(.coordinates_empty == FALSE |
.coordinates_outOfRange == FALSE) )
```
Remove the temporary dataset.
```{r 5.2iv, eval = FALSE, collapse = TRUE}
rm(tempSpace)
```
Save the intermediate dataset.
```{r 5.2v, eval = FALSE, collapse = TRUE}
check_space %>%
readr::write_excel_csv(paste(OutPath_Intermediate, "03_space_inter_database.csv",
sep = "/"))
```
## 5.3 Diagonal + grid
Finds sequential numbers that could be fill-down errors in lat and long, and groups by the 'groupingColumns'. This is accomplished by using a sliding window with the length determined by minRepeats. Only coordinates of precision 'ndec' (number of decimals in decimal degree format) will be examined. Note, that this function is very RAM-intensive and so the use of multiple threads should be approached with caution depending on your dataset. However, the option is provided.
```{r 5.3, collapse = TRUE}
check_space <- BeeBDC::diagonAlley(
data = check_space,
# The minimum number of repeats needed to find a sequence in for flagging
minRepeats = 6,
ndec = 3,
groupingColumns = c("eventDate", "recordedBy", "datasetName"),
mc.cores = 1)
```
Spatial gridding from rasterisation: Select only the records with more than X occurrences.
```{r 5.3ii, collapse = TRUE}
griddingDF <- check_space %>%
# Exclude NA lat and lon values
tidyr::drop_na(c("decimalLatitude", "decimalLongitude")) %>%
# Group by the dataset name
dplyr::group_by(datasetName) %>%
# Remove rows that aren't unique for lat and long
dplyr::distinct(decimalLongitude, decimalLatitude,
.keep_all = TRUE) %>%
# Find the groups with 4 or more occurrence records
dplyr::filter(dplyr::n() >= 4) %>%
dplyr::ungroup()
```
Run the gridding analysis to find datasets that might be gridded.
```{r 5.3iii, eval = FALSE, collapse = TRUE}
gridded_datasets <- CoordinateCleaner::cd_round(
x = griddingDF,
lon = "decimalLongitude",
lat = "decimalLatitude",
ds = "datasetName",
T1 = 7,
min_unique_ds_size = 4,
test = "both",
value = "dataset",
graphs = FALSE,
verbose = TRUE,
reg_out_thresh = 2,
reg_dist_min = 0.1,
reg_dist_max = 2
) %>%
dplyr::tibble()
# The griddingDF is no longer needed. remove it.
rm(griddingDF)
```
Integrate these results with the main dataset.
```{r 5.3iv, eval = FALSE, collapse = TRUE}
check_space <- check_space %>%
# Join the datasets
dplyr::left_join(
# Select the columns of interest
dplyr::select(gridded_datasets, dataset, lon.flag, lat.flag, summary),
by = c("datasetName" = "dataset")) %>%
# Make new columns with more-consistent naming and change the NA vlaues to = TRUE (not flagged)
dplyr::mutate(.lonFlag = tidyr::replace_na(lon.flag, TRUE),
.latFlag = tidyr::replace_na(lat.flag, TRUE),
.gridSummary = tidyr::replace_na(summary, TRUE)) %>%
# Remove old columns
dplyr::select(!c(lon.flag, lat.flag, summary))
```
Save the gridded_datasets file for later examination.
```{r 5.3 v, eval = FALSE, collapse = TRUE}
gridded_datasets %>%
readr::write_excel_csv(paste(OutPath_Intermediate, "03_space_griddedDatasets.csv",
sep = "/"))
```
Now remove this file.
```{r 5.3vi, eval = FALSE, collapse = TRUE}
rm(gridded_datasets)
```
## 5.4 Uncertainty
Flag records that exceed a *coordinateUncertaintyInMeters* threshold.
```{r 5.4, collapse = TRUE}
check_space <- BeeBDC::coordUncerFlagR(data = check_space,
uncerColumn = "coordinateUncertaintyInMeters",
threshold = 1000)
```
## 5.5 Country checklist
This step identifies mismatches between the [Discover Life](https://www.discoverlife.org) country checklist — `beesChecklist` — for bee species and the dataset, identifying potential misidentifications, outliers, etc.’
Download the country-level checklist.
```{r 5.5, collapse = TRUE, eval = FALSE}
checklistFile <- BeeBDC::beesChecklist()
```
```{r 5.5secret, collapse = TRUE, eval = TRUE}
# load in the small test dataset i nthe background
system.file("extdata", "testChecklist.rda", package="BeeBDC") |>
load()
# Rename the file
taxonomyFile <- testChecklist
rm(testChecklist)
```
```{r 5.5ii, collapse = TRUE}
check_space <- BeeBDC::countryOutlieRs(checklist = checklistFile,
data = check_space,
keepAdjacentCountry = TRUE,
pointBuffer = 0.05,
# Scale of map to return, one of 110, 50, 10 OR 'small', 'medium', 'large'
# Smaller numbers will result in much longer calculation times.
# We have not attempted a scale of 10.
scale = 50,
mc.cores = 1)
```
```{r 5.5iii, eval = FALSE, collapse = TRUE}
# A list of failed species-country combinations and their numbers can be output here
check_space %>%
dplyr::filter(.countryOutlier == FALSE) %>%
dplyr::select(database_id, scientificName, country) %>%
dplyr::group_by(scientificName) %>%
dplyr::mutate(count_scientificName = n()) %>%
dplyr::distinct(scientificName, country, .keep_all = TRUE) %>%
readr::write_excel_csv(paste(OutPath_Intermediate, "03_space_failedCountryChecklist.csv",
sep = "/"))
```
## 5.6 Map spatial errors
Assemble maps of potential spatial errors and outliers, either one flag at a time or using the *.summary* column. First, you need to rebuild the *.summary* column.
Rebuild the *.summary* column.
```{r 5.6, eval = FALSE, collapse = TRUE}
check_space <- BeeBDC::summaryFun(
data = check_space,
dontFilterThese = NULL,
removeFilterColumns = FALSE,
filterClean = FALSE)
```
Use col_to_map in order to map ONE spatial flag at a time or map the *.summary* column for all flags.
```{r 5.6ii, eval = FALSE, collapse = TRUE}
check_space %>%
dplyr::filter(.summary == FALSE) %>% # map only records flagged as FALSE
bdc::bdc_quickmap(
data = .,
lon = "decimalLongitude",
lat = "decimalLatitude",
col_to_map = ".summary",
size = 0.9
)
```
## 5.7 Space report
Create the space report using **bdc**.
```{r 5.7, eval = FALSE, collapse = TRUE}
(report <-
bdc::bdc_create_report(
data = dplyr::tibble(check_space %>% dplyr::select(!.uncer_terms)),
database_id = "database_id",
workflow_step = "space",
save_report = TRUE)
)
```
## 5.8 Space figures
Create figures for the spatial data filtering results.
```{r 5.8, eval = FALSE, collapse = TRUE}
(figures <-
BeeBDC::jbd_create_figures(
data = dplyr::tibble(check_space %>% dplyr::select(!.uncer_terms)),
path = DataPath,
database_id = "database_id",
workflow_step = "space",
save_figures = TRUE)
)
```
For examining the figures, the options are:
- *.cap* = Records around country capital centroid
- *.cen* = Records around country or province centroids
- *.dbl* = Duplicated coordinates per species
- *.equ* = Identical coordinates
- *.otl* = Geographical outliers
- *.gbf* = Records around the GBIF headquarters
- *.inst* = Records around biodiversity institutions
- *.rou* = Rounded (probably imprecise) coordinates
- *.urb* = Records within urban areas — (Likely not relevant for bees.)
You can examine these figures, for example, by running:
figures$.rou
Save interim dataset.
```{r 5.8ii, eval = FALSE, collapse = TRUE}
check_space %>%
readr::write_excel_csv(paste(OutPath_Intermediate, "03_space_inter_database.csv",
sep = "/"))
```
## 5.9 Save flags
Save the flags so far.
```{r 5.9, eval = FALSE, collapse = TRUE}
BeeBDC::flagRecorder(
data = check_space,
outPath = paste(OutPath_Report, sep =""),
fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
append = TRUE,
printSummary = TRUE)
```
## 5.10 Save
Save the intermediate dataset.
```{r 5.10, eval = FALSE, collapse = TRUE}
check_space %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "03_space_database.csv",
sep = "/"))
```
# 6.0 Time
Read in the last database, if needed.
```{r 6.0, collapse = TRUE}
if(!exists("check_space")){
check_time <-
readr::read_csv(paste(OutPath_Intermediate, "03_space_database.csv", sep = "/"),
col_types = BeeBDC::ColTypeR())
}else{
check_time <- check_space
# Remove the spent file
rm(check_space)}
```
You can plot a histogram of dates here.
```{r 6.0ii, collapse = TRUE}
hist(lubridate::ymd_hms(check_time$eventDate, truncated = 5), breaks = 20,
main = "Histogram of eventDates")
```
Filter some silly dates that don't make sense.
```{r 6.0iii, collapse = TRUE}
check_time$year <- ifelse(check_time$year > lubridate::year(Sys.Date()) | check_time$year < 1600,
NA, check_time$year)
check_time$month <- ifelse(check_time$month > 12 | check_time$month < 1,
NA, check_time$month)
check_time$day <- ifelse(check_time$day > 31 | check_time$day < 1,
NA, check_time$day)
```
## 6.1 Recover dates
The `dateFindR()` function will search through some other columns in order to find and rescue dates that may not have made it into the correct columns. It will further update the *eventDate*, *day*, *month*, and *year* columns where these data were a) missing and b) located in one of the searched columns.
```{r 6.1, collapse = TRUE}
check_time <- BeeBDC::dateFindR(data = check_time,
# Years above this are removed (from the recovered dates only)
maxYear = lubridate::year(Sys.Date()),
# Years below this are removed (from the recovered dates only)
minYear = 1700)
```
## 6.2 No eventDate
Flag records that simply lack collection date. :(
```{r 6.2, collapse = TRUE}
check_time <-
bdc::bdc_eventDate_empty(data = check_time, eventDate = "eventDate")
```
## 6.3 Old records
This will flag records prior to the date selected. 1970 is frequently chosen for SDM work. You may not need to filter old records at all, so think critically about your use. We have chosen 1950 as a lower extreme.
```{r 6.3, collapse = TRUE}
check_time <-
bdc::bdc_year_outOfRange(data = check_time,
eventDate = "year",
year_threshold = 1950)
```
## 6.4 Time report
*Not all of time, just the time pertaining to our precise occurrence records.*
Update the *.summary* column.
```{r 6.4, eval = TRUE, collapse = TRUE}
check_time <- BeeBDC::summaryFun(
data = check_time,
# Don't filter these columns (or NULL)
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms"),
# Remove the filtering columns?
removeFilterColumns = FALSE,
# Filter to ONLY cleaned data?
filterClean = FALSE)
```
```{r 6.4ii, eval = FALSE, collapse = TRUE}
( report <-
bdc::bdc_create_report(data = check_time,
database_id = "database_id",
workflow_step = "time",
save_report = FALSE)
)
```
## 6.5 Time figures
Create time results figures.
```{r 6.5, eval = FALSE, collapse = TRUE}
figures <-
BeeBDC::jbd_create_figures(data = check_time,
path = DataPath,
database_id = "database_id",
workflow_step = "time",
save_figures = TRUE)
```
You can check figures by using...
```{r 6.5ii, eval = FALSE, collapse = TRUE}
figures$year
```
Save the time-revised data into the intermediate folder.
```{r 6.5iii, eval = FALSE, collapse = TRUE}
check_time %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "04_time_database.csv",
sep = "/"))
```
## 6.6 Save flags
Save the flags so far.
```{r, eval = FALSE, collapse = TRUE}
BeeBDC::flagRecorder(
data = check_time,
outPath = paste(OutPath_Report, sep =""),
fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
append = TRUE,
printSummary = TRUE)
```
# 7.0 De-duplication
The dataset can be re-read here if it does not already exist.
```{r 7.0, eval = FALSE, collapse = TRUE}
if(!exists("check_time")){
check_time <-
readr::read_csv(paste(OutPath_Intermediate, "04_time_database.csv",
sep = "/"),
col_types = BeeBDC::ColTypeR())}
```
## 7.1 deDuplicate
We will FLAG duplicates here. These input columns can be hacked to de-duplicate as you wish. This function uses user-specified inputs and columns to identify duplicate occurrence records. Duplicates are identified iteratively and will be tallied up, duplicate pairs clustered, and sorted at the end of the function. The function is designed to work with Darwin Core data with a *database_id* column, but it is also modifiable to work with other columns.
I would encourage you to see '?`dupeSummary()`' for more details as this function is quite modifiable to user needs.
```{r 7.1, collapse = TRUE}
check_time <- BeeBDC::dupeSummary(
data = check_time,
path = OutPath_Report,
# options are "ID","collectionInfo", or "both"
duplicatedBy = "collectionInfo",
# The columns to generate completeness info from (and to sort by completness)
completeness_cols = c("decimalLatitude", "decimalLongitude",
"scientificName", "eventDate"),
# The columns to ADDITIONALLY consider when finding duplicates in collectionInfo
collectionCols = c("decimalLatitude", "decimalLongitude", "scientificName", "eventDate",
"recordedBy"),
# The columns to combine, one-by-one with the collectionCols
collectInfoColumns = c("catalogNumber", "otherCatalogNumbers"),
# Custom comparisons — as a list of columns to compare
# RAW custom comparisons do not use the character and number thresholds
CustomComparisonsRAW = dplyr::lst(c("catalogNumber", "institutionCode", "scientificName")),
# Other custom comparisons use the character and number thresholds
CustomComparisons = dplyr::lst(c("gbifID", "scientificName"),
c("occurrenceID", "scientificName"),
c("recordId", "scientificName"),
c("id", "scientificName")),
# The order in which you want to KEEP duplicated based on data source
# try unique(check_time$dataSource)
sourceOrder = c("CAES", "Gai", "Ecd","BMont", "BMin", "EPEL", "ASP", "KP", "EcoS", "EaCO",
"FSCA", "Bal", "SMC", "Lic", "Arm",
"USGS", "ALA", "VicWam", "GBIF","SCAN","iDigBio"),
# Paige ordering is done using the database_id prefix, not the dataSource prefix.
prefixOrder = c("Paige", "Dorey"),
# Set the complexity threshold for id letter and number length
# minimum number of characters when WITH the numberThreshold
characterThreshold = 2,
# minimum number of numbers when WITH the characterThreshold
numberThreshold = 3,
# Minimum number of numbers WITHOUT any characters
numberOnlyThreshold = 5
) %>% # END dupeSummary
dplyr::as_tibble(col_types = BeeBDC::ColTypeR())
```
Save the dataset into the intermediate folder.
```{r 7.1ii, eval = FALSE, collapse = TRUE}
check_time %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "04_2_dup_database.csv",
sep = "/"))
```
## 7.2 Save flags
Save the flags so far.
```{r 7.2, eval = FALSE, collapse = TRUE}
BeeBDC::flagRecorder(
data = check_time,
outPath = paste(OutPath_Report, sep =""),
fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
append = TRUE,
printSummary = TRUE)
```
# 8.0 Data filtering
The dataset can be re-read here if it does not already exist.
```{r 8.0, eval = FALSE, collapse = TRUE}
if(!exists("check_time")){
check_time <-
readr::read_csv(paste(OutPath_Intermediate, "04_2_dup_database.csv",
sep = "/"), col_types = ColTypeR())}
```
## 8.1 rm Outliers
Read in the most-recent duplicates file (generated by `dupeSummary()`) in order to identify the duplicates of the expert outliers.
```{r 8.1, eval = TRUE, collapse = TRUE}
if(!exists("duplicates")){
duplicates <- BeeBDC::fileFinder(path = DataPath,
fileName = "duplicateRun_") %>%
readr::read_csv()}
```
Identify the outliers and get a list of their database_ids. This would require the source outlier files provided with the [BeeBDC](https://doi.org/10.1101/2023.06.30.547152) paper. These files can further be modified to include more outliers.
check_time <- BeeBDC::manualOutlierFindeR(
data = check_time,
DataPath = DataPath,
PaigeOutliersName = "removedBecauseDeterminedOutlier.csv",
newOutliersName = "^All_outliers_ANB_14March.xlsx",
ColombiaOutliers_all = "All_Colombian_OutlierIDs.csv",
# A .csv with manual outlier records that are too close to otherwise TRUE records
NearTRUE = "nearTRUE.csv",
duplicates = duplicates)
## 8.2 Save uncleaned
Save the uncleaned dataset.
```{r 8.2, eval = TRUE, collapse = TRUE}
# Make sure that the .summary column is updated
check_time <- BeeBDC::summaryFun(
data = check_time,
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
removeFilterColumns = FALSE,
filterClean = FALSE)
```
```{r 8.2ii, eval = FALSE, collapse = TRUE}
# Save the uncleaned dataset
check_time %>% readr::write_excel_csv(.,
paste(OutPath_Intermediate, "05_unCleaned_database.csv",
sep = "/"))
```
## 8.3 Filter
Now clean the dataset of extra columns and failed rows and then save it.
```{r 8.3, eval = TRUE, collapse = TRUE}
cleanData <- BeeBDC::summaryFun(
data = check_time,
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
# Remove the filtering columns?
removeFilterColumns = TRUE,
# Filter to ONLY cleaned data?
filterClean = TRUE)
```
```{r 8.3ii, eval = FALSE, collapse = TRUE}
# Save this CLEANED dataset
cleanData %>% readr::write_excel_csv(.,
paste(OutPath_Intermediate, "05_cleaned_database.csv",
sep = "/"))
```
# 9.0 Figures and tables
## 9.1 Duplicate chordDiagrams
Install **BiocManager** and **ComplexHeatmap** if you missed them at the start.
```{r 9.1, message=FALSE, warning=FALSE, eval = FALSE, collapse = TRUE}
if (!require("BiocManager", quietly = TRUE))
install.packages("BiocManager", repos = "http://cran.us.r-project.org")
BiocManager::install("ComplexHeatmap")
```
Read in the most recent file of flagged duplicates, if it’s not already in your environment.
```{r 9.1ii, eval = TRUE, collapse = TRUE}
if(!exists("duplicates")){
duplicates <- BeeBDC::fileFinder(path = DataPath,
fileName = "duplicateRun_") %>%
readr::read_csv()}
```
Choose the global figure parameters.
```{r 9.1on.exit, include = FALSE}
oldpar <- par(no.readonly = TRUE)
on.exit(oldpar)
```
```{r 9.1iii, eval = FALSE, collapse = TRUE}
par(mar = c(2, 2, 2, 2)/2, mfrow = c(1,1))
```
Create the chordDiagram. You can leave many of the below values out, but we show here the defaults. There are [internally] no duplicates in current our test dataset, so **BeeBDC** will throw an informative error. However, we show the full output figure from our bee dataset below.
```{r 9.1iv, eval=FALSE, fig.fullwidth=TRUE, fig.height=7.5, fig.width=9, collapse = TRUE}
BeeBDC::chordDiagramR(
# The duplicate data from the dupeSummary function output
dupeData = duplicates,
outPath = OutPath_Figures,
fileName = "ChordDiagram.pdf",
# These can be modified to help fit the final pdf that's exported.
width = 9,
height = 7.5,
bg = "white",
# How few distinct dataSources should a group have to be listed as "other"
smallGrpThreshold = 3,
title = "Duplicated record sources",
# The default list of colour palettes to choose from usign the paleteer package
palettes = c("cartography::blue.pal", "cartography::green.pal",
"cartography::sand.pal", "cartography::orange.pal", "cartography::red.pal",
"cartography::purple.pal", "cartography::brown.pal"),
canvas.ylim = c(-1.0,1.0),
canvas.xlim = c(-0.6, 0.25),
text.col = "black",
legendX = grid::unit(6, "mm"),
legendY = grid::unit(18, "mm"),
legendJustify = c("left", "bottom"),
niceFacing = TRUE)
```
![Full chord diagram from Dorey et al. 2023]
<img src="https://photos.smugmug.com/photos/i-Xt3tN8L/0/X5/i-Xt3tN8L-X5.jpg" align="left" />
## 9.2 Duplicate histogram
Read in the uncleaned dataset, if it's not already present.
```{r 9.2, eval = TRUE, collapse = TRUE}
if(!exists("check_time")){
beeData <- readr::read_csv(paste(OutPath_Intermediate, "05_unCleaned_database.csv",
sep = "/"),
col_types = BeeBDC::ColTypeR())
}else{
beeData <- check_time
rm(check_time)
}
```
Create a plot with two bar graphs. One shows the absolute number of duplicate records for each data source, while the other shows the proportion of records that are duplicated within each data source. (*'dataSource'* is simplified to the text before the first underscore).
```{r 9.2ii, warning=FALSE, eval=TRUE, collapse = TRUE}
BeeBDC::dupePlotR(
data = beeData,
# The outPath to save the plot as
outPath = OutPath_Figures,
fileName = "duplicatePlot.pdf",
# Colours in order: duplicate, kept duplicate, unique
dupeColours = c("#F2D2A2","#B9D6BC", "#349B90"),
# Plot size and height
base_height = 7, base_width = 7,
legend.position = c(0.85, 0.8),
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP", CAES = "CAES", Ecd = "Ecd",
returnPlot = TRUE
)
```
## 9.3 Flags by source
Create a compound bar plot that shows the proportion of records that pass or fail each flag (rows) for each data source (columns). The function can also optionally return a point map for a user-specified species when plotMap = TRUE. (*dataSource* is simplified to the text before the first underscore.)
```{r 9.3, fig.width=15, fig.height=9, fig.fullwidth=TRUE, eval=TRUE, collapse = TRUE}
BeeBDC::plotFlagSummary(
data = beeData,
# Colours in order of pass (TRUE), fail (FALSE), and NA
flagColours = c("#127852", "#A7002D", "#BDBABB"),
fileName = paste0("FlagsPlot_", Sys.Date(),".pdf"),
outPath = paste0(OutPath_Figures),
width = 15, height = 9,
# OPTIONAL:
# # Filter to a single species
# speciesName = "Holcopasites heliopsis",
# # column to look in
# nameColumn = "species",
# # Save the filtered data
# saveFiltered = TRUE,
# # Filter column to display on map
# filterColumn = ".summary",
# plotMap = TRUE,
# # amount to jitter points if desired, e.g. 0.25 or NULL
# jitterValue = NULL,
# # Map opacity value for points between 0 and 1
# mapAlpha = 1,
# # If a user wants to output the table used to make the figure, change this to TRUE
# saveTable = FALSE,
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP", CAES = "CAES", 'BMont' = "BMont", 'BMin' = "BMin", Ecd = "Ecd",
Gaiarsa = "Gai", EPEL = "EPEL", VicWam = "VicWam",
returnPlot = TRUE
)
```
## 9.4 Maps
Import CLEANED dataset (you can change this option).
```{r 9.4, eval = TRUE, collapse = TRUE}
if(!exists("cleanData")){
cleanData <- readr::read_csv(paste(OutPath_Intermediate, "05_cleaned_database.csv",
sep = "/"),
col_types = BeeBDC::ColTypeR())}
```
### a. Summary maps
Draw a global summary map for occurrence and species number by country.
```{r 9.4a, eval=FALSE, collapse = TRUE}
BeeBDC::summaryMaps(
data = cleanData,
width = 10, height = 10,
class_n = 3,
class_Style = "fisher",
fileName = "CountryMaps_fisher.pdf",
outPath = OutPath_Figures,
returnPlot = TRUE
)
```
### b. Interactive maps
Uses the occurrence data (preferably uncleaned in order to show pass/fail points) and outputs interactive .html maps, which can be opened in your browser, to a specific directory. The maps can highlight if an occurrence has passed all filtering (*.summary* == TRUE) or failed at least one filter (*.summary* == FALSE). This can be updated by first running `summaryFun()` to choose the columns that you want to be highlighted. This function will also highlight occurrences flagged as expert-identified or country outliers separately. Because the function can have any categorical variable fed into 'speciesColumn', users may choose another column of interest to map; however, maps made using very large categories can be slow to produce and unwieldy to view.
```{r 9.4b, eval = FALSE, collapse = TRUE}
BeeBDC::interactiveMapR(
# occurrence data
data = beeData,
# Directory where to save files
outPath = paste0(OutPath_Figures, "interactiveMaps", sep = "/"),
lon = "decimalLongitude",
lat = "decimalLatitude",
# Occurrence dataset column with species names
speciesColumn = "scientificName",
# Which species to map — a character vector of names or "ALL"
# Note: "ALL" is defined AFTER filtering for country
speciesList = "ALL",
countryList = NULL, # study area
# Point jitter to see stacked points — jitters an amount in decimal degrees
jitterValue = 0.01
)
```
## 9.5 Data providers
Read in the clean data if it's not already in the environment.
```{r 9.5, eval = FALSE, collapse = TRUE}
if(!exists("cleanData")){
cleanData <- readr::read_csv(paste(OutPath_Intermediate, "05_cleaned_database.csv",
sep = "/"),
col_types = BeeBDC::ColTypeR(),
locale = readr::locale(encoding = "UTF-8"))}
```
This function will attempt to find and build a table of data providers that have contributed to the input data, especially using the *'institutionCode'* column. It will also search a variety of other columns to find data providers using an internally set sequence of if-else statements. Hence, this function is quite specific for bee data, but it should work for other taxa in similar institutions (perhaps to a lesser degree).
```{r 9.5ii, eval = TRUE, collapse = TRUE}
# Note, if outPath = NULL then no file will be saved
dataProvTable <- BeeBDC::dataProvTables(data = cleanData,
runBeeDataChecks = TRUE,
outPath = NULL,
fileName = "dataProvTable.csv")
```
## 9.6 Flag summary
The function `flagSummaryTable()` takes a flagged dataset and returns the total number of fails (FALSE) per flag (in columns starting with “.”) and per species. Users may define the column by which to group the summary. While it is intended to work with the *scientificName* column, users may select any grouping column (e.g., *country*).
```{r 9.6, eval = TRUE, collapse = TRUE}
# Note, if outPath = NULL then no file will be saved
summaryTable <- BeeBDC::flagSummaryTable(data = beeData,
column = "scientificName",
outPath = NULL,
fileName = "flagTable.csv")
```
```{r cleanup, include=FALSE, collapse = TRUE}
# Remove the webpage folder
unlink(paste0(dirname(getwd()), "/inst/extdata/WebDir"), recursive = TRUE)
```
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/inst/doc/BeeBDC_main.Rmd
|
## ----libraryChunk, load-packages, include=FALSE-------------------------------
# markdown packages
library(rgnparser)
library(magrittr)
library(knitr)
library(rmarkdown)
library(rmdformats)
library(prettydoc)
library(htmltools)
library(pkgdown)
# Load core packages
library(devtools)
library(BiocManager)
library(purrr)
library(here)
library(renv)
library(bdc)
library(CoordinateCleaner)
library(dplyr)
library(readr)
library(stringr)
library(lubridate)
library(tidyselect)
library(R.utils)
library(tidyr)
library(ggplot2)
library(forcats)
library(emld)
library(rlang)
library(xml2)
library(mgsub)
library(rvest)
library(rnaturalearth)
library(rnaturalearthdata)
library(countrycode)
library(janitor)
library(circlize)
library(paletteer)
library(cowplot)
library(igraph)
library(ggspatial)
library(sf)
library(parallel)
library(terra)
# Dont detect cores to avoid GitHbub error
old <- options() # code line i
on.exit(options(old)) # code line i+1
options(mc.cores = parallel::detectCores())
## ----secretRootPath, include=FALSE--------------------------------------------
# Set the RootPath to tempdir
RootPath <- tempdir()
if (!dir.exists(paste0(RootPath, "/Data_acquisition_workflow"))) {
dir.create(paste0(RootPath, "/Data_acquisition_workflow"), recursive = TRUE)
}
## ----global-options, include=FALSE--------------------------------------------
knitr::opts_chunk$set(error = TRUE,
eval = TRUE,
tidy = TRUE,
warning = FALSE,
root.dir = normalizePath(tempdir()))
## ----falseRootPath, eval=FALSE------------------------------------------------
# RootPath <- paste0("/your/path/here")
## ----CreateRootPath, warning=FALSE, collapse = TRUE---------------------------
# Create the working directory in the RootPath if it doesn't exist already
if (!dir.exists(paste0(RootPath, "/Data_acquisition_workflow"))) {
dir.create(paste0(RootPath, "/Data_acquisition_workflow"), recursive = TRUE)
}
# Set the working directory
setwd(paste0(RootPath,"/Data_acquisition_workflow"))
## ----activate, collapse = TRUE------------------------------------------------
renv::activate(project = paste0(RootPath,"/Data_acquisition_workflow"))
## ----installPackages, message=FALSE, warning=FALSE, results=FALSE, collapse = TRUE, eval = FALSE----
# if (!require("BiocManager", quietly = TRUE))
# install.packages("BiocManager", repos = "http://cran.us.r-project.org")
#
# BiocManager::install("ComplexHeatmap")
## ----rnaturalearthhires, eval=FALSE-------------------------------------------
# # Install remotes if needed
# if (!require("remotes", quietly = TRUE))
# install.packages("remotes", repos = "http://cran.us.r-project.org")
# # Download and then load rnaturalearthhires
# remotes::install_github("ropensci/rnaturalearthhires")
# install.packages("rnaturalearthhires", repos = "https://ropensci.r-universe.dev", type = "source")
# library(rnaturalearthhires)
## ----installBeeBDC, results=TRUE, message=TRUE, eval = FALSE, collapse = TRUE----
# install.packages("BeeBDC")
# library(BeeBDC)
## ----snapshot, collapse = TRUE------------------------------------------------
renv::snapshot(project = paste0(RootPath,"/Data_acquisition_workflow"),
prompt = FALSE)
## ----dirMaker, collapse = TRUE, eval = FALSE----------------------------------
# BeeBDC::dirMaker(
# RootPath = RootPath,
# RDoc = "vignettes/BeeBDC_main.Rmd") %>%
# # Add paths created by this function to the environment()
# list2env(envir = parent.env(environment()))
## ----dirMakerSECRETELY, include = FALSE---------------------------------------
# For the sake of this tutorial, we will not use here::i_am in dirMaker, because we aren't allowed
# to mess with package directories in this way. This will work-around to use the tempdir()
DataPath <- paste0(RootPath, "/Data_acquisition_workflow")
OutPath_Check <- paste0(RootPath, "/Data_acquisition_workflow/Output/Check")
OutPath_Figures <- paste0(RootPath, "/Data_acquisition_workflow/Output/Figures")
OutPath_Intermediate <- paste0(RootPath, "/Data_acquisition_workflow/Output/Intermediate")
OutPath_Report <- paste0(RootPath, "/Data_acquisition_workflow/Output/Report")
# Create these files
if (!dir.exists(DataPath)) {
dir.create(DataPath, recursive = TRUE)}
if (!dir.exists(OutPath_Check)) {
dir.create(OutPath_Check, recursive = TRUE)}
if (!dir.exists(OutPath_Figures)) {
dir.create(OutPath_Figures, recursive = TRUE)}
if (!dir.exists(OutPath_Intermediate)) {
dir.create(OutPath_Intermediate, recursive = TRUE)}
if (!dir.exists(OutPath_Report)) {
dir.create(OutPath_Report, recursive = TRUE)}
## ----lapply_library, results=FALSE, collapse = TRUE---------------------------
lapply(c("ComplexHeatmap", "magrittr"),
library, character.only = TRUE)
## ----2.0, eval = FALSE--------------------------------------------------------
# # Load some package data — the taxonomy and a flagged example dataset
# # Download the full beesTaxonomy file
# taxonomyFile <- BeeBDC::beesTaxonomy()
## ----2.0secret, collapse = TRUE, eval = TRUE----------------------------------
# load in the small test dataset in the background
system.file("extdata", "testTaxonomy.rda", package="BeeBDC") |>
load()
# Rename the file
taxonomyFile <- testTaxonomy
rm(testTaxonomy)
## ----2.0ii--------------------------------------------------------------------
# Load the example beesFlagged dataset
beesFlagged <- BeeBDC::beesFlagged
selectedGenera <- taxonomyFile %>%
# Select only tribe anthophorini (for example)
dplyr::filter(tolower(tribe) == tolower("anthophorini")) %>%
distinct(genus)
# Filter the data
taxonData <- beesFlagged %>%
dplyr::filter(genus %in% selectedGenera$genus)
# View the data
taxonData
## ----3.0----------------------------------------------------------------------
# Select your study area
studyArea <- c("Canada", "United states", "Mexico", "Guatemala")
# Filter the data to that area
countryData <- beesFlagged %>%
dplyr::filter(country %in% studyArea)
# View the data
countryData
## ----4.1----------------------------------------------------------------------
filteredData <-
BeeBDC::summaryFun(data = beesFlagged,
# Choose the columns to NOT filter (or NULL to filter all columns)
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
# In the output, do you want to REMOVE all filtering columns (TRUE), or keep them (FALSE)
removeFilterColumns = TRUE,
# In the output, do you want to only keep clean data according to your filtering (TRUE),
# Or keep all data and simply update the .summary column (FALSE)
filterClean = TRUE)
## ----4.2----------------------------------------------------------------------
filteredData <- beesFlagged %>%
# Remove any exiting .uncertaintyThreshold column
dplyr::select(!tidyselect::any_of(".uncertaintyThreshold")) %>%
# Chose the coordinate uncertainty to filter to...
BeeBDC::coordUncerFlagR(data = .,
uncerColumn = "coordinateUncertaintyInMeters",
# 10 km here
threshold = 10000) %>%
# Now re-do the .summary column and filter the data using this new value
BeeBDC::summaryFun(
data = .,
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms"),
removeFilterColumns = TRUE,
filterClean = TRUE)
## ----4.2a---------------------------------------------------------------------
filteredData <- beesFlagged %>%
# Remove any exisitng .year_outOfRange column
dplyr::select(!".year_outOfRange") %>%
# Chose the minimum year to filter to...
bdc::bdc_year_outOfRange(data = .,
eventDate = "year",
year_threshold = 1970) %>%
# Now re-do the .summary column and filter the data using this new value
BeeBDC::summaryFun(
data = .,
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
removeFilterColumns = TRUE,
filterClean = TRUE)
## ----4.2b---------------------------------------------------------------------
filteredData <-
# The input dataset
beesFlagged %>%
# Chose the year range...
dplyr::filter(year > 1950 & year < 1970) %>%
# Now re-do the .summary column and filter the data using this new value
BeeBDC::summaryFun(
# Select the input dataset to filter
data = .,
# Choose the columns to NOT filter (or NULL to filter all columns)
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
# In the output, do you want to REMOVE all filtering columns (TRUE), or keep them (FALSE)
removeFilterColumns = TRUE,
# In the output, do you want to only keep clean data according to your filtering (TRUE),
# Or keep all data and simply update the .summary column (FALSE)
filterClean = TRUE)
## ----5.1, eval = FALSE--------------------------------------------------------
# if(!require("BiocManager", quietly = TRUE)){
# install.packages("BiocManager")}
# BiocManager::install("ComplexHeatmap", force = TRUE)
# renv::snapshot()
## ----5.1ii, eval = FALSE------------------------------------------------------
# duplicates <- fileFinder(path = "PATH TO A FOLDER CONTAINING THE duplicateRun_ — could be supp. materials folder",
# fileName = "duplicateRun_") %>%
# readr::read_csv() %>%
# # Select only the stingless bee data
# dplyr::filter(database_id %in% stinglessData$database_id |
# database_id_match %in% stinglessData$database_id)
## ----5.1on.exit, include = FALSE----------------------------------------------
oldpar <- par(no.readonly = TRUE)
on.exit(oldpar)
## ----5.1iii, eval = FALSE-----------------------------------------------------
# # Choose the global figure parameters
# par(mar = c(2, 2, 2, 2)/2, mfrow = c(1,1))
#
# # Create the chorDiagram. You can leave many of the below values out but we show here
# # the defaults
#
# BeeBDC::chordDiagramR(
# # The duplicate data from the dupeSummary function output
# dupeData = duplicates,
# outPath = OutPath_Figures,
# fileName = "ChordDiagram.pdf",
# # These can be modified to help fit the final pdf that's exported.
# width = 9,
# height = 7.5,
# bg = "white",
# # How few distinct dataSources should a group have to be listed as "other"
# smallGrpThreshold = 3,
# title = "Duplicated record sources",
# # The default list of colour palettes to choose from usign the paleteer package
# palettes = c("cartography::blue.pal", "cartography::green.pal",
# "cartography::sand.pal", "cartography::orange.pal", "cartography::red.pal",
# "cartography::purple.pal", "cartography::brown.pal"),
# canvas.ylim = c(-1.0,1.0),
# canvas.xlim = c(-0.6, 0.25),
# text.col = "black",
# legendX = grid::unit(6, "mm"),
# legendY = grid::unit(18, "mm"),
# legendJustify = c("left", "bottom"),
# niceFacing = TRUE)
## ----5.2----------------------------------------------------------------------
data("beesFlagged", package = "BeeBDC")
# Create a figure shoring the total number of duplicates, kept duplicates, and unique
# records for each datasource (simplified to the text before the first underscore) and
# the proportion of the above for each data source
BeeBDC::dupePlotR(
data = beesFlagged,
# The outPath to save the plot as
outPath = tempdir(),
fileName = "Fig3_duplicatePlot.pdf",
# Colours in order: duplicate, kept duplicate, unique
dupeColours = c("#F2D2A2","#B9D6BC", "#349B90"),
# Plot size and height
base_height = 7, base_width = 7,
legend.position = c(0.85, 0.8),
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP",
returnPlot = TRUE
)
## ----5.3b---------------------------------------------------------------------
# Visualise all flags for each dataSource (simplified to the text before the first underscore)
# A clever user might also realise the potential to summarise and produce outputs in other columns
BeeBDC::plotFlagSummary(
# WARNING: alternate path if wanting to produce figures for the selected taxonData (2.0 above)
# Select only the taxonData data
data = beesFlagged,
# Colours in order of pass (TRUE), fail (FALSE), and NA
flagColours = c("#127852", "#A7002D", "#BDBABB"),
fileName = paste0("FlagsPlot_Amell", Sys.Date(),".pdf"),
outPath = tempdir(),
width = 15, height = 9,
# OPTIONAL:
# # Filter to species
speciesName = "Apis mellifera Linnaeus, 1758",
# column to look in
nameColumn = "scientificName",
# Save the filtered data
saveFiltered = FALSE,
# Filter column to display on map
filterColumn = ".summary",
plotMap = TRUE,
# amount to jitter points if desired, e.g. 0.25 or NULL
jitterValue = NULL,
# Map opacity value for points between 0 and 1
mapAlpha = 1,
returnPlot = TRUE,
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP", CAES = "CAES", 'B. Mont.' = "BMont", 'B. Minkley' = "BMin", Ecd = "Ecd",
Gaiarsa = "Gai", EPEL = "EPEL"
)
## ----5.4----------------------------------------------------------------------
BeeBDC::summaryMaps(
data = beesFlagged,
width = 10, height = 10,
class_n = 3,
class_Style = "jenks",
outPath = tempdir(),
fileName = "CountryMaps_jenks.pdf",
returnPlot = TRUE
)
## ----6.0, eval = FALSE--------------------------------------------------------
# mapData %>%
# readr::write_excel_csv(paste0(DataPath, "/Output/Intermediate/", "cleanTaxon_",
# Sys.Date(), ".csv"))
## ----cleanup, include=FALSE, collapse = TRUE----------------------------------
# Remove the webpage folder
unlink(paste0(dirname(getwd()), "/inst/extdata/WebDir"), recursive = TRUE)
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/inst/doc/basic_workflow.R
|
---
title: "Basic workflow"
output:
rmarkdown::html_vignette:
vignette: >
%\VignetteIndexEntry{Basic workflow}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r libraryChunk, load-packages, include=FALSE}
# markdown packages
library(rgnparser)
library(magrittr)
library(knitr)
library(rmarkdown)
library(rmdformats)
library(prettydoc)
library(htmltools)
library(pkgdown)
# Load core packages
library(devtools)
library(BiocManager)
library(purrr)
library(here)
library(renv)
library(bdc)
library(CoordinateCleaner)
library(dplyr)
library(readr)
library(stringr)
library(lubridate)
library(tidyselect)
library(R.utils)
library(tidyr)
library(ggplot2)
library(forcats)
library(emld)
library(rlang)
library(xml2)
library(mgsub)
library(rvest)
library(rnaturalearth)
library(rnaturalearthdata)
library(countrycode)
library(janitor)
library(circlize)
library(paletteer)
library(cowplot)
library(igraph)
library(ggspatial)
library(sf)
library(parallel)
library(terra)
# Dont detect cores to avoid GitHbub error
old <- options() # code line i
on.exit(options(old)) # code line i+1
options(mc.cores = parallel::detectCores())
```
```{r secretRootPath, include=FALSE}
# Set the RootPath to tempdir
RootPath <- tempdir()
if (!dir.exists(paste0(RootPath, "/Data_acquisition_workflow"))) {
dir.create(paste0(RootPath, "/Data_acquisition_workflow"), recursive = TRUE)
}
```
```{r global-options, include=FALSE}
knitr::opts_chunk$set(error = TRUE,
eval = TRUE,
tidy = TRUE,
warning = FALSE,
root.dir = normalizePath(tempdir()))
```
This workflow is meant to be a basic example workflow of how a user might take a flagged version of our (or some other) occurrence dataset and filter for specific taxa or countries, re-apply flagging functions, re-filter the data, or make maps based of those data.
# 0.0 Script preparation
## 0.1 Working directory
Choose the path to the root folder in which all other folders can be found.
```{r falseRootPath, eval=FALSE}
RootPath <- paste0("/your/path/here")
```
```{r CreateRootPath, warning=FALSE, collapse = TRUE}
# Create the working directory in the RootPath if it doesn't exist already
if (!dir.exists(paste0(RootPath, "/Data_acquisition_workflow"))) {
dir.create(paste0(RootPath, "/Data_acquisition_workflow"), recursive = TRUE)
}
# Set the working directory
setwd(paste0(RootPath,"/Data_acquisition_workflow"))
```
For the first time that you run BeeBDC, and if you want to use the renv package to manage your
packages, you can install renv...
install.packages("renv", repos = "http://cran.us.r-project.org")
and then initialise renv the project.
renv::init(project = paste0(RootPath,"/Data_acquisition_workflow"))
If you have already initialised a project, you can instead just activate it.
```{r activate, collapse = TRUE}
renv::activate(project = paste0(RootPath,"/Data_acquisition_workflow"))
```
## 0.2 Install packages (if needed)
You may need to install gdal on your computer. This can be done on a Mac by using Homebrew in the terminal and the command "brew install gdal".
To start out, you will need to install **BiocManager**, **devtools**, **ComplexHeatmap**, and **rnaturalearthhires** to then install and fully use **BeeBDC**.
```{r installPackages, message=FALSE, warning=FALSE, results=FALSE, collapse = TRUE, eval = FALSE}
if (!require("BiocManager", quietly = TRUE))
install.packages("BiocManager", repos = "http://cran.us.r-project.org")
BiocManager::install("ComplexHeatmap")
```
```{r rnaturalearthhires, eval=FALSE}
# Install remotes if needed
if (!require("remotes", quietly = TRUE))
install.packages("remotes", repos = "http://cran.us.r-project.org")
# Download and then load rnaturalearthhires
remotes::install_github("ropensci/rnaturalearthhires")
install.packages("rnaturalearthhires", repos = "https://ropensci.r-universe.dev", type = "source")
library(rnaturalearthhires)
```
Now install **BeeBDC**.
```{r installBeeBDC, results=TRUE, message=TRUE, eval = FALSE, collapse = TRUE}
install.packages("BeeBDC")
library(BeeBDC)
```
Snapshot the renv environment.
```{r snapshot, collapse = TRUE}
renv::snapshot(project = paste0(RootPath,"/Data_acquisition_workflow"),
prompt = FALSE)
```
Set up the directories used by **BeeBDC**. These directories include where the data, figures, reports, etc. will be saved. The RDoc needs to be a path RELATIVE to the RootPath; i.e., the file path from which the two diverge.
```{r dirMaker, collapse = TRUE, eval = FALSE}
BeeBDC::dirMaker(
RootPath = RootPath,
RDoc = "vignettes/BeeBDC_main.Rmd") %>%
# Add paths created by this function to the environment()
list2env(envir = parent.env(environment()))
```
```{r dirMakerSECRETELY, include = FALSE}
# For the sake of this tutorial, we will not use here::i_am in dirMaker, because we aren't allowed
# to mess with package directories in this way. This will work-around to use the tempdir()
DataPath <- paste0(RootPath, "/Data_acquisition_workflow")
OutPath_Check <- paste0(RootPath, "/Data_acquisition_workflow/Output/Check")
OutPath_Figures <- paste0(RootPath, "/Data_acquisition_workflow/Output/Figures")
OutPath_Intermediate <- paste0(RootPath, "/Data_acquisition_workflow/Output/Intermediate")
OutPath_Report <- paste0(RootPath, "/Data_acquisition_workflow/Output/Report")
# Create these files
if (!dir.exists(DataPath)) {
dir.create(DataPath, recursive = TRUE)}
if (!dir.exists(OutPath_Check)) {
dir.create(OutPath_Check, recursive = TRUE)}
if (!dir.exists(OutPath_Figures)) {
dir.create(OutPath_Figures, recursive = TRUE)}
if (!dir.exists(OutPath_Intermediate)) {
dir.create(OutPath_Intermediate, recursive = TRUE)}
if (!dir.exists(OutPath_Report)) {
dir.create(OutPath_Report, recursive = TRUE)}
```
## 0.3 Load packages
Load packages.
```{r lapply_library, results=FALSE, collapse = TRUE}
lapply(c("ComplexHeatmap", "magrittr"),
library, character.only = TRUE)
```
# 2.0 Taxon example
If you want to filter the dataset to a particular taxon of interest, you can do so quite easily using **dplyr** from the **tidyverse** group of packages. To filter to a selected bee genus, in our case Anthophorini...
```{r 2.0, eval = FALSE}
# Load some package data — the taxonomy and a flagged example dataset
# Download the full beesTaxonomy file
taxonomyFile <- BeeBDC::beesTaxonomy()
```
```{r 2.0secret, collapse = TRUE, eval = TRUE}
# load in the small test dataset in the background
system.file("extdata", "testTaxonomy.rda", package="BeeBDC") |>
load()
# Rename the file
taxonomyFile <- testTaxonomy
rm(testTaxonomy)
```
```{r 2.0ii}
# Load the example beesFlagged dataset
beesFlagged <- BeeBDC::beesFlagged
selectedGenera <- taxonomyFile %>%
# Select only tribe anthophorini (for example)
dplyr::filter(tolower(tribe) == tolower("anthophorini")) %>%
distinct(genus)
# Filter the data
taxonData <- beesFlagged %>%
dplyr::filter(genus %in% selectedGenera$genus)
# View the data
taxonData
```
# 3.0 Country example
Similarly to the above you can filter for only countries of interest. Keep in mind, that sometimes the *country* column may not hold all of the records that fall in that country, if it, or the coordinates, have been entered incorrectly.
```{r 3.0}
# Select your study area
studyArea <- c("Canada", "United states", "Mexico", "Guatemala")
# Filter the data to that area
countryData <- beesFlagged %>%
dplyr::filter(country %in% studyArea)
# View the data
countryData
```
# 4.0 Filtering example
## 4.1 Simple filter
The **BeeBDC** package provides a simple function that can re-build the *.summary* column based off of the filtering columns that are present in the dataset (those starting with "."). you can also choose which filters you DO NOT want to implement using the dontFilterThese argument. In this example, we are also removing all of the filtering columns in the output dataset (removeFilterColumns = TRUE) and filtering to only completely clean occurrences (filterClean = TRUE). For the latter, we are only keeping *.summary* == TRUE.
```{r 4.1}
filteredData <-
BeeBDC::summaryFun(data = beesFlagged,
# Choose the columns to NOT filter (or NULL to filter all columns)
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
# In the output, do you want to REMOVE all filtering columns (TRUE), or keep them (FALSE)
removeFilterColumns = TRUE,
# In the output, do you want to only keep clean data according to your filtering (TRUE),
# Or keep all data and simply update the .summary column (FALSE)
filterClean = TRUE)
```
## 4.2 Uncertainty threshold
You may also want to change the *.uncertaintyThreshold* as we have chosen a somewhat strict default of 1 km in our dataset. Here, we will instead flag to 10 km (threshold = 10000 [m]). Additionally, we use the **magrittr** package pipe (%>%) to feed the outputs directly into `summaryFun()` to filter our data in one action!
```{r 4.2}
filteredData <- beesFlagged %>%
# Remove any exiting .uncertaintyThreshold column
dplyr::select(!tidyselect::any_of(".uncertaintyThreshold")) %>%
# Chose the coordinate uncertainty to filter to...
BeeBDC::coordUncerFlagR(data = .,
uncerColumn = "coordinateUncertaintyInMeters",
# 10 km here
threshold = 10000) %>%
# Now re-do the .summary column and filter the data using this new value
BeeBDC::summaryFun(
data = .,
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms"),
removeFilterColumns = TRUE,
filterClean = TRUE)
```
## 4.2 Date filter
### a. bdc_year_outOfRange
Another column that users are likely to want to pay close attention to is the *.year_outOfRange* column that is set at 1950 in our dataset. In this case, **bdc** provides the function where users can change the year_threshold argument to, in this case, 1970. As with above, we then use `summaryFun()` to get results in one go.
```{r 4.2a}
filteredData <- beesFlagged %>%
# Remove any exisitng .year_outOfRange column
dplyr::select(!".year_outOfRange") %>%
# Chose the minimum year to filter to...
bdc::bdc_year_outOfRange(data = .,
eventDate = "year",
year_threshold = 1970) %>%
# Now re-do the .summary column and filter the data using this new value
BeeBDC::summaryFun(
data = .,
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
removeFilterColumns = TRUE,
filterClean = TRUE)
```
### b. year range
Or, if you're interested in a particular time period, again **dplyr** comes to the rescue with some very straight forward filtering within a year range.
```{r 4.2b}
filteredData <-
# The input dataset
beesFlagged %>%
# Chose the year range...
dplyr::filter(year > 1950 & year < 1970) %>%
# Now re-do the .summary column and filter the data using this new value
BeeBDC::summaryFun(
# Select the input dataset to filter
data = .,
# Choose the columns to NOT filter (or NULL to filter all columns)
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
# In the output, do you want to REMOVE all filtering columns (TRUE), or keep them (FALSE)
removeFilterColumns = TRUE,
# In the output, do you want to only keep clean data according to your filtering (TRUE),
# Or keep all data and simply update the .summary column (FALSE)
filterClean = TRUE)
```
Users may choose any number of filtering steps form the main workflow to include above `summaryFun()`, just use pipes '%>%' between the function and use '.' as the data input because this will feed in the data aoutput from the above function into the proceeding one.
# 5. Summary figures
Now, if you wanted to rebuild some figures, say after you've added or filtered data, then you can use some of the below processes.
## 5.1 Duplicate chordDiagrams
Our `chordDiagramR()` function is very useful and it relies on two great packages, **circlize** and **ComplexHeatmap**. Unfortunately, the latter is not available on CRAN and so must be downloaded using **BiocManager**.
```{r 5.1, eval = FALSE}
if(!require("BiocManager", quietly = TRUE)){
install.packages("BiocManager")}
BiocManager::install("ComplexHeatmap", force = TRUE)
renv::snapshot()
```
We don't actually have an example duplicates dataset with the package, so I'll magic one up behind the scences!
```{r 5.1ii, eval = FALSE}
duplicates <- fileFinder(path = "PATH TO A FOLDER CONTAINING THE duplicateRun_ — could be supp. materials folder",
fileName = "duplicateRun_") %>%
readr::read_csv() %>%
# Select only the stingless bee data
dplyr::filter(database_id %in% stinglessData$database_id |
database_id_match %in% stinglessData$database_id)
```
Then, set some parameters for figure borders and run your data through `chordDiagramR()`.
```{r 5.1on.exit, include = FALSE}
oldpar <- par(no.readonly = TRUE)
on.exit(oldpar)
```
```{r 5.1iii, eval = FALSE}
# Choose the global figure parameters
par(mar = c(2, 2, 2, 2)/2, mfrow = c(1,1))
# Create the chorDiagram. You can leave many of the below values out but we show here
# the defaults
BeeBDC::chordDiagramR(
# The duplicate data from the dupeSummary function output
dupeData = duplicates,
outPath = OutPath_Figures,
fileName = "ChordDiagram.pdf",
# These can be modified to help fit the final pdf that's exported.
width = 9,
height = 7.5,
bg = "white",
# How few distinct dataSources should a group have to be listed as "other"
smallGrpThreshold = 3,
title = "Duplicated record sources",
# The default list of colour palettes to choose from usign the paleteer package
palettes = c("cartography::blue.pal", "cartography::green.pal",
"cartography::sand.pal", "cartography::orange.pal", "cartography::red.pal",
"cartography::purple.pal", "cartography::brown.pal"),
canvas.ylim = c(-1.0,1.0),
canvas.xlim = c(-0.6, 0.25),
text.col = "black",
legendX = grid::unit(6, "mm"),
legendY = grid::unit(18, "mm"),
legendJustify = c("left", "bottom"),
niceFacing = TRUE)
```
## 5.2 Duplicate histogram
In this example, we will use one of the example datasets to show you how this works. We will use beesFlagged, which has been filtered from a larger dataset and contains duplicates from that larger dataset. To print the plot in R, you need to specify returnPlot = TRUE, otherwise it will only save to the disk
```{r 5.2}
data("beesFlagged", package = "BeeBDC")
# Create a figure shoring the total number of duplicates, kept duplicates, and unique
# records for each datasource (simplified to the text before the first underscore) and
# the proportion of the above for each data source
BeeBDC::dupePlotR(
data = beesFlagged,
# The outPath to save the plot as
outPath = tempdir(),
fileName = "Fig3_duplicatePlot.pdf",
# Colours in order: duplicate, kept duplicate, unique
dupeColours = c("#F2D2A2","#B9D6BC", "#349B90"),
# Plot size and height
base_height = 7, base_width = 7,
legend.position = c(0.85, 0.8),
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP",
returnPlot = TRUE
)
```
## 5.3 Flags by source
The `plotFlagSummary()` function is one of the most important for quickly summarising and checking that your data and flags have worked together correctly. It can be a good starting point for error-checking. You will also see in `plotFlagSummary()` that you can filter to particular species and also output quick point maps of those species.
### a. all taxa in dataset ####
# Visualise all flags for each dataSource (simplified to the text before the first underscore)
BeeBDC::plotFlagSummary(
data = beesFlagged,
# Colours in order of pass (TRUE), fail (FALSE), and NA
flagColours = c("#127852", "#A7002D", "#BDBABB"),
fileName = paste0("Fig4_FlagsPlot_", Sys.Date(),".pdf"),
outPath = tempdir(),
width = 15, height = 9,
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP",
returnPlot = TRUE
)
```
###### b. Single sp. summary ####
In fact, lets build one of these single-species example below using the same data and the omnipresent *Apis mellifera*.
```{r 5.3b}
# Visualise all flags for each dataSource (simplified to the text before the first underscore)
# A clever user might also realise the potential to summarise and produce outputs in other columns
BeeBDC::plotFlagSummary(
# WARNING: alternate path if wanting to produce figures for the selected taxonData (2.0 above)
# Select only the taxonData data
data = beesFlagged,
# Colours in order of pass (TRUE), fail (FALSE), and NA
flagColours = c("#127852", "#A7002D", "#BDBABB"),
fileName = paste0("FlagsPlot_Amell", Sys.Date(),".pdf"),
outPath = tempdir(),
width = 15, height = 9,
# OPTIONAL:
# # Filter to species
speciesName = "Apis mellifera Linnaeus, 1758",
# column to look in
nameColumn = "scientificName",
# Save the filtered data
saveFiltered = FALSE,
# Filter column to display on map
filterColumn = ".summary",
plotMap = TRUE,
# amount to jitter points if desired, e.g. 0.25 or NULL
jitterValue = NULL,
# Map opacity value for points between 0 and 1
mapAlpha = 1,
returnPlot = TRUE,
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP", CAES = "CAES", 'B. Mont.' = "BMont", 'B. Minkley' = "BMin", Ecd = "Ecd",
Gaiarsa = "Gai", EPEL = "EPEL"
)
```
## 5.4 Maps
We can also make some overall summary maps at the country level using `summaryMaps()`. If you get an error about breaks not being unique, then reduce class_n.
```{r 5.4}
BeeBDC::summaryMaps(
data = beesFlagged,
width = 10, height = 10,
class_n = 3,
class_Style = "jenks",
outPath = tempdir(),
fileName = "CountryMaps_jenks.pdf",
returnPlot = TRUE
)
```
# 6.0 Save data
```{r 6.0, eval = FALSE}
mapData %>%
readr::write_excel_csv(paste0(DataPath, "/Output/Intermediate/", "cleanTaxon_",
Sys.Date(), ".csv"))
```
```{r cleanup, include=FALSE, collapse = TRUE}
# Remove the webpage folder
unlink(paste0(dirname(getwd()), "/inst/extdata/WebDir"), recursive = TRUE)
```
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/inst/doc/basic_workflow.Rmd
|
---
title: "BeeBDC vignette"
output:
rmarkdown::html_vignette:
vignette: >
%\VignetteIndexEntry{BeeBDC vignette}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r libraryChunk, load-packages, include=FALSE}
# markdown packages
library(rgnparser)
library(magrittr)
library(knitr)
library(rmarkdown)
library(rmdformats)
library(prettydoc)
library(htmltools)
library(pkgdown)
# Load core packages
library(devtools)
library(BiocManager)
library(purrr)
library(here)
library(renv)
library(bdc)
library(CoordinateCleaner)
library(dplyr)
library(readr)
library(stringr)
library(lubridate)
library(tidyselect)
library(R.utils)
library(tidyr)
library(ggplot2)
library(forcats)
library(emld)
library(rlang)
library(xml2)
library(mgsub)
library(rvest)
library(rnaturalearth)
library(rnaturalearthdata)
library(countrycode)
library(janitor)
library(circlize)
library(paletteer)
library(cowplot)
library(igraph)
library(ggspatial)
library(sf)
library(parallel)
library(terra)
# Dont detect cores to avoid GitHbub error
old <- options() # code line i
on.exit(options(old)) # code line i+1
options(mc.cores = parallel::detectCores())
```
```{r secretRootPath, include=FALSE}
# Set the RootPath to tempdir
RootPath <- tempdir()
if (!dir.exists(paste0(RootPath, "/Data_acquisition_workflow"))) {
dir.create(paste0(RootPath, "/Data_acquisition_workflow"), recursive = TRUE)
}
```
```{r global-options, include=FALSE}
knitr::opts_chunk$set(error = TRUE,
eval = TRUE,
tidy = TRUE,
warning = FALSE,
root.dir = normalizePath(tempdir()))
```
# 0.0 Script preparation
## 0.1 Working directory
Choose the path to the root folder in which all other folders can be found.
```{r falseRootPath, eval=FALSE}
RootPath <- paste0("/your/path/here")
```
```{r CreateRootPath, warning=FALSE, collapse = TRUE}
# Create the working directory in the RootPath if it doesn't exist already
if (!dir.exists(paste0(RootPath, "/Data_acquisition_workflow"))) {
dir.create(paste0(RootPath, "/Data_acquisition_workflow"), recursive = TRUE)
}
# Set the working directory
setwd(paste0(RootPath,"/Data_acquisition_workflow"))
```
For the first time that you run BeeBDC, and if you want to use the renv package to manage your
packages, you can install renv...
install.packages("renv", repos = "http://cran.us.r-project.org")
and then initialise renv the project.
renv::init(project = paste0(RootPath,"/Data_acquisition_workflow"))
If you have already initialised a project, you can instead just activate it.
```{r activate, collapse = TRUE}
renv::activate(project = paste0(RootPath,"/Data_acquisition_workflow"))
```
## 0.2 Install packages (if needed)
You may need to install gdal on your computer. This can be done on a Mac by using Homebrew in the terminal and the command "brew install gdal".
To start out, you will need to install **BiocManager**, **devtools**, **ComplexHeatmap**, and **rnaturalearthhires** to then install and fully use **BeeBDC**.
```{r installPackages, message=FALSE, warning=FALSE, results=FALSE, collapse = TRUE, eval = FALSE}
if (!require("BiocManager", quietly = TRUE))
install.packages("BiocManager", repos = "http://cran.us.r-project.org")
BiocManager::install("ComplexHeatmap")
```
```{r rnaturalearthhires, eval=FALSE}
# Install remotes if needed
if (!require("remotes", quietly = TRUE))
install.packages("remotes", repos = "http://cran.us.r-project.org")
# Download and then load rnaturalearthhires
remotes::install_github("ropensci/rnaturalearthhires")
install.packages("rnaturalearthhires", repos = "https://ropensci.r-universe.dev", type = "source")
library(rnaturalearthhires)
```
Now install **BeeBDC**.
```{r installBeeBDC, results=TRUE, message=TRUE, eval = FALSE, collapse = TRUE}
install.packages("BeeBDC")
library(BeeBDC)
```
Snapshot the renv environment.
```{r snapshot, collapse = TRUE}
renv::snapshot(project = paste0(RootPath,"/Data_acquisition_workflow"),
prompt = FALSE)
```
Set up the directories used by BeeBDC. These directories include where the data, figures, reports, etc. will be saved. The RDoc needs to be a path RELATIVE to the RootPath; i.e., the file path from which the two diverge.
```{r dirMaker, collapse = TRUE, eval = FALSE}
BeeBDC::dirMaker(
RootPath = RootPath,
RDoc = "vignettes/BeeBDC_main.Rmd") %>%
# Add paths created by this function to the environment()
list2env(envir = parent.env(environment()))
```
```{r dirMakerSECRETELY, include = FALSE}
# For the sake of this tutorial, we will not use here::i_am in dirMaker, because we aren't allowed
# to mess with package directories in this way. This will work-around to use the tempdir()
DataPath <- paste0(RootPath, "/Data_acquisition_workflow")
OutPath_Check <- paste0(RootPath, "/Data_acquisition_workflow/Output/Check")
OutPath_Figures <- paste0(RootPath, "/Data_acquisition_workflow/Output/Figures")
OutPath_Intermediate <- paste0(RootPath, "/Data_acquisition_workflow/Output/Intermediate")
OutPath_Report <- paste0(RootPath, "/Data_acquisition_workflow/Output/Report")
# Create these files
if (!dir.exists(DataPath)) {
dir.create(DataPath, recursive = TRUE)}
if (!dir.exists(OutPath_Check)) {
dir.create(OutPath_Check, recursive = TRUE)}
if (!dir.exists(OutPath_Figures)) {
dir.create(OutPath_Figures, recursive = TRUE)}
if (!dir.exists(OutPath_Intermediate)) {
dir.create(OutPath_Intermediate, recursive = TRUE)}
if (!dir.exists(OutPath_Report)) {
dir.create(OutPath_Report, recursive = TRUE)}
```
## 0.3 Load packages
Load packages.
```{r lapply_library, results=FALSE, collapse = TRUE}
lapply(c("ComplexHeatmap", "magrittr"),
library, character.only = TRUE)
```
***
# 1.0 Data merge
<div class="alert alert-info">
<strong> Attention:</strong> <br>
Although each line of code has been validated, in order to save time knitting the R **markdown** document the next section is display only. If you are not data merging (section 1.0) or preparing the data (section 2.0), feel free to skip to Section 3.0 Initial flags.
</div>
## 1.1 Download ALA data
Download ALA data and create a new file in the DataPath to put those data into. You should also
first make an account with ALA in order to download your data — <https://auth.ala.org.au/userdetails/registration/createAccount>
BeeBDC::atlasDownloader(path = DataPath,
userEmail = "[email protected]",
atlas = "ALA",
ALA_taxon = "Apiformes")
## 1.2 Import and merge ALA, SCAN, iDigBio, and GBIF data
Supply the path to where the data is, the save_type is either "csv_files" or "R_file".
DataImp <- BeeBDC::repoMerge(path = DataPath,
occ_paths = BeeBDC::repoFinder(path = DataPath),
save_type = "R_file")
If there is an error in finding a file, run `repoFinder()` by itself to troubleshoot. For example:
#BeeBDC::repoFinder(path = DataPath)
#OUTPUT:
#$ALA_data
#[1] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/ALA_galah_path/galah_download_2022-09-15/data.csv"
#$GBIF_data
#[1] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/GBIF_webDL_30Aug2022/0000165-220831081235567/occurrence.txt"
#[2] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/GBIF_webDL_30Aug2022/0436695-210914110416597/occurrence.txt"
#[3] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/GBIF_webDL_30Aug2022/0436697-210914110416597/occurrence.txt"
#[4] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/GBIF_webDL_30Aug2022/0436704-210914110416597/occurrence.txt"
#[5] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/GBIF_webDL_30Aug2022/0436732-210914110416597/occurrence.txt"
#[6] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/GBIF_webDL_30Aug2022/0436733-210914110416597/occurrence.txt"
#[7] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/GBIF_webDL_30Aug2022/0436734-210914110416597/occurrence.txt"
#$iDigBio_data
#[1] "F:/BeeDataCleaning2022/BeeDataCleaning/BeeDataCleaning/BeeData/iDigBio_webDL_30Aug2022/5aa5abe1-62e0-4d8c-bebf-4ac13bd9e56f/occurrence_raw.csv"
#$SCAN_data
#character(0)
#Failing because SCAN_data seems to be missing. Downloaded separatly from the one drive
Load in the most-recent version of these data if needed. This will return a list with:
1. The occurrence dataset with attributes (.$Data_WebDL)
2. The appended eml file (.$eml_files)
DataImp <- BeeBDC::importOccurrences(path = DataPath,
fileName = "BeeData_")
## 1.3 Import USGS Data
The `USGS_formatter()` will find, import, format, and create metadata for the USGS dataset. The pubDate must be in day-month-year format.
USGS_data <- BeeBDC::USGS_formatter(path = DataPath, pubDate = "19-11-2022")
## 1.4 Formatted Source Importer
Use this importer to find files that have been formatted and need to be added to the larger data file.
The attributes file must contain "attribute" in its name, and the occurrence file must not.
Complete_data <- BeeBDC::formattedCombiner(path = DataPath,
strings = c("USGS_[a-zA-Z_]+[0-9]{4}-[0-9]{2}-[0-9]{2}"),
# This should be the list-format with eml attached
existingOccurrences = DataImp$Data_WebDL,
existingEMLs = DataImp$eml_files)
In the column *catalogNumber*, remove ".*specimennumber:" as what comes after should be the USGS number to match for duplicates.
Complete_data$Data_WebDL <- Complete_data$Data_WebDL %>%
dplyr::mutate(catalogNumber = stringr::str_replace(catalogNumber,
pattern = ".*\\| specimennumber:",
replacement = ""))
## 1.5 Save data
Choose the type of data format you want to use in saving your work in 1.x.
BeeBDC::dataSaver(path = DataPath,# The main path to look for data in
save_type = "CSV_file", # "R_file" OR "CSV_file"
occurrences = Complete_data$Data_WebDL, # The existing datasheet
eml_files = Complete_data$eml_files, # The existing EML files
file_prefix = "Fin_") # The prefix for the fileNames
rm(Complete_data, DataImp)
# 2.0 Data preparation
The data preparatin section of the script relates mostly to integrating **bee** occurrence datasets and corrections and so may be skipped by many general taxon users.
## 2.1 Standardise datasets
You may either use:
- (a) the bdc import method (works well with general datasets) ***or***
- (b) the jbd import method (works well with above data merge)
### a. bdc import
The bdc import is **NOT** truly supported here, but provided as an example. Please go to section 2.1b below.
Read in the **bdc** metadata and standardise the dataset to bdc.
bdc_metadata <- readr::read_csv(paste(DataPath, "out_file", "bdc_integration.csv", sep = "/"))
# ?issue — datasetName is a darwinCore field already!
# Standardise the dataset to bdc
db_standardized <- bdc::bdc_standardize_datasets(
metadata = bdc_metadata,
format = "csv",
overwrite = TRUE,
save_database = TRUE)
# read in configuration description file of the column header info
config_description <- readr::read_csv(paste(DataPath, "Output", "bdc_configDesc.csv",
sep = "/"),
show_col_types = FALSE, trim_ws = TRUE)
### b. jbd import
Find the path, read in the file, and add the *database_id* column.
occPath <- BeeBDC::fileFinder(path = DataPath, fileName = "Fin_BeeData_combined_")
db_standardized <- readr::read_csv(occPath,
# Use the basic ColTypeR function to determine types
col_types = BeeBDC::ColTypeR(), trim_ws = TRUE) %>%
dplyr::mutate(database_id = paste("Dorey_data_",
1:nrow(.), sep = ""),
.before = family)
### c. optional thin
You can thin the dataset for ***TESTING ONLY!***
check_pf <- check_pf %>%
# take every 100th record
filter(row_number() %% 100 == 1)
***
## 2.2 Paige dataset
Paige Chesshire's cleaned American dataset — <https://doi.org/10.1111/ecog.06584>
### Import data
If you haven't figured it out by now, don't worry about the column name warning — not all columns occur here.
PaigeNAm <- readr::read_csv(paste(DataPath, "Paige_data", "NorAmer_highQual_only_ALLfamilies.csv",
sep = "/"), col_types = BeeBDC::ColTypeR()) %>%
# Change the column name from Source to dataSource to match the rest of the data.
dplyr::rename(dataSource = Source) %>%
# EXTRACT WAS HERE
# add a NEW database_id column
dplyr::mutate(
database_id = paste0("Paige_data_", 1:nrow(.)),
.before = scientificName)
<div class="alert alert-info">
<strong> Attention:</strong> <br>
It is recommended to run the below code on the full bee dataset with more than 16GB RAM. Robert ran this on a laptop with 16GB RAM and an Intel(R) Core(TM) i7-8550U processor (4 cores and 8 threads) — it struggled.
</div>
### Merge Paige's data with downloaded data
db_standardized <- BeeBDC::PaigeIntegrater(
db_standardized = db_standardized,
PaigeNAm = PaigeNAm,
# This is a list of columns by which to match Paige's data to the most-recent download with.
# Each vector will be matched individually
columnStrings = list(
c("decimalLatitude", "decimalLongitude",
"recordNumber", "recordedBy", "individualCount", "samplingProtocol",
"associatedTaxa", "sex", "catalogNumber", "institutionCode", "otherCatalogNumbers",
"recordId", "occurrenceID", "collectionID"), # Iteration 1
c("catalogNumber", "institutionCode", "otherCatalogNumbers",
"recordId", "occurrenceID", "collectionID"), # Iteration 2
c("decimalLatitude", "decimalLongitude",
"recordedBy", "genus", "specificEpithet"),# Iteration 3
c("id", "decimalLatitude", "decimalLongitude"),# Iteration 4
c("recordedBy", "genus", "specificEpithet", "locality"), # Iteration 5
c("recordedBy", "institutionCode", "genus",
"specificEpithet","locality"),# Iteration 6
c("occurrenceID","decimalLatitude", "decimalLongitude"),# Iteration 7
c("catalogNumber","decimalLatitude", "decimalLongitude"),# Iteration 8
c("catalogNumber", "locality") # Iteration 9
) )
Remove spent data.
rm(PaigeNAm)
## 2.3 USGS
The USGS dataset also partially occurs on GBIF from BISON. However, the occurrence codes are in a silly place... We will correct these here to help identify duplicates later.
db_standardized <- db_standardized %>%
# Remove the discoverlife html if it is from USGS
dplyr::mutate(occurrenceID = dplyr::if_else(
stringr::str_detect(occurrenceID, "USGS_DRO"),
stringr::str_remove(occurrenceID, "http://www\\.discoverlife\\.org/mp/20l\\?id="),
occurrenceID)) %>%
# Use otherCatalogNumbers when occurrenceID is empty AND when USGS_DRO is detected there
dplyr::mutate(
occurrenceID = dplyr::if_else(
stringr::str_detect(otherCatalogNumbers, "USGS_DRO") & is.na(occurrenceID),
otherCatalogNumbers, occurrenceID)) %>%
# Make sure that no eventIDs have snuck into the occurrenceID columns
# For USGS_DRO, codes with <6 digits are event ids
dplyr::mutate(
occurrenceID = dplyr::if_else(stringr::str_detect(occurrenceID, "USGS_DRO", negate = TRUE),
# Keep occurrenceID if it's NOT USGS_DRO
occurrenceID,
# If it IS USGS_DRO and it has => 6 numbers, keep it, else, NA
dplyr::if_else(stringr::str_detect(occurrenceID, "USGS_DRO[0-9]{6,10}"),
occurrenceID, NA_character_)),
catalogNumber = dplyr::if_else(stringr::str_detect(catalogNumber, "USGS_DRO", negate = TRUE),
# Keep catalogNumber if it's NOT USGS_DRO
catalogNumber,
# If it IS USGS_DRO and it has => 6 numbers, keep it, else, NA
dplyr::if_else(stringr::str_detect(catalogNumber, "USGS_DRO[0-9]{6,10}"),
catalogNumber, NA_character_)))
## 2.4 Additional datasets
Import additional and potentially private datasets.
**Note:** Private dataset functions are provided but the data itself is not integrated here until those datasets become freely available.
There will be some warnings were a few rows may not be formatted correctly or where dates fail to parse. This is normal.
###### a. EPEL
Guzman, L. M., Kelly, T. & Elle, E. A data set for pollinator diversity and their interactions with plants in the Pacific Northwest. Ecology, e3927 (2022). <https://doi.org/10.1002/ecy.3927>
EPEL_Data <- BeeBDC::readr_BeeBDC(dataset = "EPEL",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/bee_data_canada.csv",
outFile = "jbd_EPEL_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### b. Allan Smith-Pardo
Data from Allan Smith-Pardo
ASP_Data <- BeeBDC::readr_BeeBDC(dataset = "ASP",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Allan_Smith-Pardo_Dorey_ready2.csv",
outFile = "jbd_ASP_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### c. Minckley
Data from Robert Minckley
BMin_Data <- BeeBDC::readr_BeeBDC(dataset = "BMin",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Bob_Minckley_6_1_22_ScanRecent-mod_Dorey.csv",
outFile = "jbd_BMin_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### d. BMont
Delphia, C. M. Bumble bees of Montana. <https://www.mtent.org/projects/Bumble_Bees/bombus_species.html>. (2022)
BMont_Data <- BeeBDC::readr_BeeBDC(dataset = "BMont",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Bombus_Montana_dorey.csv",
outFile = "jbd_BMont_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-sa/4.0/")
###### e. Ecd
Ecdysis. Ecdysis: a portal for live-data arthropod collections, <https://ecdysis.org/index.php> (2022).
Ecd_Data <- BeeBDC::readr_BeeBDC(dataset = "Ecd",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Ecdysis_occs.csv",
outFile = "jbd_Ecd_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### f. Gai
Gaiarsa, M. P., Kremen, C. & Ponisio, L. C. Pollinator interaction flexibility across scales affects patch colonization and occupancy. *Nature Ecology & Evolution* 5, 787-793 (2021). <https://doi.org/10.1038/s41559-021-01434-y>
Gai_Data <- BeeBDC::readr_BeeBDC(dataset = "Gai",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/upload_to_scan_Gaiarsa et al_Dorey.csv",
outFile = "jbd_Gai_data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### g. CAES
From the Connecticut Agricultural Experiment Station.
Zarrillo, T. A., Stoner, K. A. & Ascher, J. S. Biodiversity of bees (Hymenoptera: Apoidea: Anthophila) in Connecticut (USA). Zootaxa (Accepted).
Ecdysis. Occurrence dataset (ID: 16fca9c2-f622-4cb1-aef0-3635a7be5aeb). https://ecdysis.org/content/dwca/CAES-CAES_DwC-A.zip. (2023)
CAES_Data <- BeeBDC::readr_BeeBDC(dataset = "CAES",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/CT_BEE_DATA_FROM_PBI.xlsx",
outFile = "jbd_CT_Data.csv",
sheet = "Sheet1",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### h. GeoL
GeoL_Data <- BeeBDC::readr_BeeBDC(dataset = "GeoL",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Geolocate and BELS_certain and accurate.xlsx",
outFile = "jbd_GeoL_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### i. EaCO
EaCO_Data <- BeeBDC::readr_BeeBDC(dataset = "EaCO",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Eastern Colorado bee 2017 sampling.xlsx",
outFile = "jbd_EaCo_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### j. FSCA
Florida State Collection of Arthropods
FSCA_Data <- BeeBDC::readr_BeeBDC(dataset = "FSCA",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "InputDatasets/fsca_9_15_22_occurrences.csv",
outFile = "jbd_FSCA_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### k. Texas SMC
Published or unpublished data from Texas literature not in an online database, usually copied into spreadsheet from document format, or otherwise copied from a very differently-formatted spreadsheet. Unpublished or partially published data were obtained with express permission from the lead author.
SMC_Data <- BeeBDC::readr_BeeBDC(dataset = "SMC",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/TXbeeLitOccs_31Oct22.csv",
outFile = "jbd_SMC_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### l. Texas Bal
Data with GPS coordinates (missing accidentally from records on Dryad) from Ballare, K. M., Neff, J. L., Ruppel, R. & Jha, S. Multi-scalar drivers of biodiversity: local management mediates wild bee community response to regional urbanization. Ecological Applications 29, e01869 (2019), <https://doi.org/10.1002/eap.1869>. The version on Dryad is missing site GPS coordinates (by accident). Kim is okay with these data being made public as long as her paper is referenced. - Elinor Lichtenberg
Bal_Data <- BeeBDC::readr_BeeBDC(dataset = "Bal",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Beedata_ballare.xlsx",
outFile = "jbd_Bal_Data.csv",
sheet = "animal_data",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### m. Palouse Lic
Elinor Lichtenberg’s canola data: Lichtenberg, E. M., Milosavljević, I., Campbell, A. J. & Crowder, D. W. Differential effects of soil conservation practices on arthropods and crop yields. *Journal of Applied Entomology*, (2023) <https://doi.org/10.1111/jen.13188>. These are the data I will be putting on SCAN. - Elinor Lichtenberg
Lic_Data <- BeeBDC::readr_BeeBDC(dataset = "Lic",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Lichtenberg_canola_records.csv",
outFile = "jbd_Lic_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### n. Arm
Data from Armando Falcon-Brindis from the University of Kentucky.
Arm_Data <- BeeBDC::readr_BeeBDC(dataset = "Arm",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Bee database Armando_Final.xlsx",
outFile = "jbd_Arm_Data.csv",
sheet = "Sheet1",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### o. Dor
From several papers:
1. Dorey, J. B., Fagan-Jeffries, E. P., Stevens, M. I., & Schwarz, M. P. (2020). Morphometric comparisons and novel observations of diurnal and low-light-foraging bees. *Journal of Hymenoptera Research*, 79, 117–144. doi:<https://doi.org/10.3897/jhr.79.57308>
2. Dorey, J. B. (2021). Missing for almost 100 years: the rare and potentially threatened bee Pharohylaeus lactiferus (Hymenoptera, Colltidae). *Journal of Hymenoptera Research*, 81, 165-180. doi: <https://doi.org/10.3897/jhr.81.59365>
3. Dorey, J. B., Schwarz, M. P., & Stevens, M. I. (2019). Review of the bee genus Homalictus Cockerell (Hymenoptera: Halictidae) from Fiji with description of nine new species. *Zootaxa*, 4674(1), 1–46. doi:<https://doi.org/10.11646/zootaxa.4674.1.1>
```{}
```
Dor_Data <- BeeBDC::readr_BeeBDC(dataset = "Dor",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/DoreyData.csv",
outFile = "jbd_Dor_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/")
###### p. VicWam
These data are originally from the Victorian Museum and Western Australian Museum in Australia. However, in their current form they are from Dorey et al. 2021.
1. PADIL. (2020). PaDIL. <https://www.PADIL.gov.au/>
2. Houston, T. F. (2000). Native bees on wildflowers in Western Australia. *Western Australian Insect Study Society*.
3. Dorey, J. B., Rebola, C. M., Davies, O. K., Prendergast, K. S., Parslow, B. A., Hogendoorn, K., . . . Caddy-Retalic, S. (2021). Continental risk assessment for understudied taxa post catastrophic wildfire indicates severe impacts on the Australian bee fauna. *Global Change Biology*, 27(24), 6551-6567. doi:<https://doi.org/10.1111/gcb.15879>
```{}
```
VicWam_Data <- BeeBDC::readr_BeeBDC(dataset = "VicWam",
path = paste0(DataPath, "/Additional_Datasets"),
inFile = "/InputDatasets/Combined_Vic_WAM_databases.xlsx",
outFile = "jbd_VicWam_Data.csv",
dataLicense = "https://creativecommons.org/licenses/by-nc-sa/4.0/",
sheet = "Combined")
### 2.5 Merge all
Remove these spent datasets.
rm(EPEL_Data, ASP_Data, BMin_Data, BMont_Data, Ecd_Data, Gai_Data, CAES_Data,
GeoL_Data, EaCO_Data, FSCA_Data, SMC_Data, Bal_Data, Lic_Data, Arm_Data, Dor_Data,
VicWam_Data)
Read in and merge all. There are more `readr_BeeBDC()` supported than currently implemented and these represent datasets that will be publicly released in the future. See '?`readr_BeeBDC()`' for details.
db_standardized <- db_standardized %>%
dplyr::bind_rows(
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_ASP_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_EPEL_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_BMin_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_BMont_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Ecd_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Gai_data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_CT_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_GeoL_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_EaCo_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_SMC_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Bal_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Lic_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Arm_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_Dor_Data.csv"), col_types = BeeBDC::ColTypeR()),
readr::read_csv(paste0(DataPath, "/Additional_Datasets",
"/jbd_VicWam_Data.csv"), col_types = BeeBDC::ColTypeR())) %>%
# END bind_rows
suppressWarnings(classes = "warning") # End suppressWarnings — due to col_types
### 2.6 Match database_id
If you have prior runs from which you'd like to match *database_id*s with from the current run, you may use the below script to try to match *database_id*s with prior runs.
Read in a prior run of choice.
priorRun <- BeeBDC::fileFinder(path = DataPath,
file = "01_prefilter_database_9Aug22.csv") %>%
readr::read_csv(file = ., col_types = BeeBDC::ColTypeR())
This function will attempt to find the *database_id*s from prior runs.
db_standardized <- BeeBDC::idMatchR(
currentData = db_standardized,
priorData = priorRun,
# First matches will be given preference over later ones
matchBy = tibble::lst(c("gbifID", "dataSource"),
c("catalogNumber", "institutionCode", "dataSource", "decimalLatitude",
"decimalLongitude"),
c("occurrenceID", "dataSource","decimalLatitude","decimalLongitude"),
c("recordId", "dataSource","decimalLatitude","decimalLongitude"),
c("id", "dataSource","decimalLatitude","decimalLongitude"),
# Because INHS was entered as it's own dataset but is now included in the GBIF download...
c("catalogNumber", "institutionCode", "dataSource",
"decimalLatitude","decimalLongitude")),
# You can exclude datasets from prior by matching their prefixs — before first underscore:
excludeDataset = c("ASP", "BMin", "BMont", "CAES", "EaCO", "Ecd", "EcoS",
"Gai", "KP", "EPEL", "CAES", "EaCO", "FSCA", "SMC", "Lic", "Arm",
"VicWam"))
# Remove redundant files
rm(priorRun)
Save the dataset.
db_standardized %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "00_prefilter_database.csv",
sep = "/"))
# 3.0 Initial flags
Read data back in if needed. OutPath_Intermediate (and a few other directories) should be have been created and saved to the global environment by `dirMaker()`.
if(!exists("db_standardized")){
db_standardized <- readr::read_csv(paste(OutPath_Intermediate, "00_prefilter_database.csv",
sep = "/"), col_types = BeeBDC::ColTypeR())}
Normally, you would use the full dataset, as read in above. But, for the sake of this vignette, we will use a combination of two example datasets. These example datasets can further be very useful for testing functions if you're ever feeling a bit confused and overwhelmed!
```{r 3.0, collapse = TRUE}
data("bees3sp", package = "BeeBDC")
data("beesRaw", package = "BeeBDC")
db_standardized <- dplyr::bind_rows(beesRaw,
# Only keep a subset of columns from bees3sp
bees3sp %>% dplyr::select(tidyselect::all_of(colnames(beesRaw)), countryCode))
```
*For more details about the bdc package, please see their [tutorial.](https://brunobrr.github.io/bdc/articles/prefilter.html)*
## 3.1 SciName
Flag occurrences without *scientificName* provided.
```{r 3.1, collapse = TRUE}
check_pf <- bdc::bdc_scientificName_empty(
data = db_standardized,
sci_name = "scientificName")
# now that this is saved, remove it to save space in memory
rm(db_standardized)
```
## 3.2 MissCoords
Flag occurrences with missing *decimalLatitude* and *decimalLongitude*.
```{r 3.2, collapse = TRUE}
check_pf <- bdc::bdc_coordinates_empty(
data = check_pf,
lat = "decimalLatitude",
lon = "decimalLongitude")
```
## 3.3 OutOfRange
Flag occurrences that are not on Earth (outside of -180 to 180 or -90 to 90 degrees).
```{r 3.3, collapse = TRUE}
check_pf <- bdc::bdc_coordinates_outOfRange(
data = check_pf,
lat = "decimalLatitude",
lon = "decimalLongitude")
```
## 3.4 Source
Flag occurrences that don't match the *basisOfRecord* types below.
```{r 3.4, collapse = TRUE}
check_pf <- bdc::bdc_basisOfRecords_notStandard(
data = check_pf,
basisOfRecord = "basisOfRecord",
names_to_keep = c(
# Keep all plus some at the bottom.
"Event",
"HUMAN_OBSERVATION",
"HumanObservation",
"LIVING_SPECIMEN",
"LivingSpecimen",
"MACHINE_OBSERVATION",
"MachineObservation",
"MATERIAL_SAMPLE",
"O",
"Occurrence",
"MaterialSample",
"OBSERVATION",
"Preserved Specimen",
"PRESERVED_SPECIMEN",
"preservedspecimen Specimen",
"Preservedspecimen",
"PreservedSpecimen",
"preservedspecimen",
"S",
"Specimen",
"Taxon",
"UNKNOWN",
"",
NA,
"NA",
"LITERATURE",
"None", "Pinned Specimen", "Voucher reared", "Emerged specimen"
))
```
## 3.5 CountryName
Try to harmonise country names.
### a. prepare dataset
Fix up country names based on common problems above and extract ISO2 codes for occurrences.
```{r 3.5a, collapse = TRUE}
check_pf_noNa <- BeeBDC::countryNameCleanR(
data = check_pf,
# Create a Tibble of common issues in country names and their replacements
commonProblems = dplyr::tibble(problem = c('U.S.A.', 'US','USA','usa','UNITED STATES',
'United States','U.S.A','MX','CA','Bras.','Braz.',
'Brasil','CNMI','USA TERRITORY: PUERTO RICO'),
fix = c('United States of America','United States of America',
'United States of America','United States of America',
'United States of America','United States of America',
'United States of America','Mexico','Canada','Brazil',
'Brazil','Brazil','Northern Mariana Islands','PUERTO.RICO'))
)
```
### b. run function
Get country name from coordinates using a wrapper around the `jbd_country_from_coordinates()` function. Because our dataset is much larger than those used to design **bdc**, we have made it so that you can analyse data in smaller pieces. Additionally, like some other functions in **BeeBDC**, we have implemented parallel operations (using mc.cores = #cores in stepSize = #rowsPerOperation); see '?`jbd_CfC_chunker()`' for details.
NOTE: In an actual run you should use scale = "large"
```{r 3.5b, message=FALSE, warning=FALSE, collapse = TRUE}
suppressWarnings(
countryOutput <- BeeBDC::jbd_CfC_chunker(data = check_pf_noNa,
lat = "decimalLatitude",
lon = "decimalLongitude",
country = "country",
# How many rows to process at a time
stepSize = 1000000,
# Start row
chunkStart = 1,
path = OutPath_Intermediate,
# Normally, please use scale = "large"
scale = "medium",
mc.cores = 1),
classes = "warning")
```
### c. re-merge
Join these datasets.
```{r 3.5ci, collapse = TRUE}
check_pf <- dplyr::left_join(check_pf,
countryOutput,
by = "database_id",
suffix = c("", "CO")) %>%
# Take the new country name if the original is NA
dplyr::mutate(country = dplyr::if_else(is.na(country),
countryCO,
country)) %>%
# Remove duplicates if they arose from left_join!
dplyr::distinct()
```
Save the dataset.
```{r 3.5cii, eval = FALSE, collapse = TRUE}
check_pf %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "01_prefilter_database.csv",
sep = "/"))
```
Read in if needed.
```{r 3.5ciii, eval = FALSE, collapse = TRUE}
if(!exists("check_pf")){
check_pf <- readr::read_csv(paste(DataPath,
"Output", "Intermediate", "01_prefilter_database.csv", sep = "/"),
col_types = BeeBDC::ColTypeR())}
```
Remove these interim datasets.
```{r 3.5civ, collapse = TRUE}
rm(check_pf_noNa, countryOutput)
```
## 3.6 StandardCoNames
Run the function, which standardises country names and adds ISO2 codes, if needed.
```{r 3.6, collapse = TRUE}
# Standardise country names and add ISO2 codes if needed
check_pf <- bdc::bdc_country_standardized(
# Remove the countryCode and country_suggested columns to avoid an error with
# where two "countryCode" and "country_suggested" columns exist (i.e. if the dataset has been
# run before)
data = check_pf %>% dplyr::select(!tidyselect::any_of(c("countryCode", "country_suggested"))),
country = "country"
)
```
## 3.7 TranspCoords
Flag and correct records when *decimalLatitude* and *decimalLongitude* appear to be transposed. We created this chunked version of `bdc::bdc_coordinates_transposed()` because it is very RAM-heavy using our large bee dataset. Like many of our other 'jbd_...' functions there are other improvements - e.g., parallel running.
NOTE: Usually you would use scale = "large", which requires rnaturalearthhires
```{r 3.7, message=FALSE, warning=FALSE, collapse = TRUE}
check_pf <- BeeBDC::jbd_Ctrans_chunker(
# bdc_coordinates_transposed inputs
data = check_pf,
id = "database_id",
lat = "decimalLatitude",
lon = "decimalLongitude",
country = "country",
countryCode = "countryCode",
border_buffer = 0.2, # in decimal degrees (~22 km at the equator)
save_outputs = TRUE,
sci_names = "scientificName",
# chunker inputs
stepSize = 1000000, # How many rows to process at a time
chunkStart = 1, # Start row
append = FALSE, # If FALSE it may overwrite existing dataset
progressiveSave = FALSE,
# In a normal run, please use scale = "large"
scale = "medium",
path = OutPath_Check,
mc.cores = 1
)
```
Get a quick summary of the number of transposed records.
```{r 3.7ii, eval = FALSE, collapse = TRUE}
table(check_pf$coordinates_transposed, useNA = "always")
```
Save the dataset.
```{r 3.7iii, eval = FALSE, collapse = TRUE}
check_pf %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "01_prefilter_database.csv",
sep = "/"))
```
Read the data in again if needed.
```{r 3.7iv, eval = FALSE, collapse = TRUE}
if(!exists("check_pf")){
check_pf <- readr::read_csv(paste(OutPath_Intermediate, "01_prefilter_database.csv",
sep = "/"), col_types = BeeBDC::ColTypeR())}
```
## 3.8 Coord-country
Collect all country names in the *country_suggested* column. We rebuilt a **bdc** function to flag occurrences where the coordinates are inconsistent with the provided country name.
```{r 3.8, collapse = TRUE}
check_pf <- BeeBDC::jbd_coordCountryInconsistent(
data = check_pf,
lon = "decimalLongitude",
lat = "decimalLatitude",
scale = 50,
pointBuffer = 0.01)
```
Save the dataset.
```{r 3.8ii, eval = FALSE}
check_pf %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "01_prefilter_database.csv",
sep = "/"))
```
## 3.9 GeoRefIssue
This function identifies records whose coordinates can potentially be extracted from locality information, which must be manually checked later.
```{r 3.9, eval = TRUE, collapse = TRUE}
xyFromLocality <- bdc::bdc_coordinates_from_locality(
data = check_pf,
locality = "locality",
lon = "decimalLongitude",
lat = "decimalLatitude",
save_outputs = FALSE
)
```
```{r 3.9ii, eval = FALSE}
# Save the resultant data
xyFromLocality %>% readr::write_excel_csv(paste(OutPath_Check, "01_coordinates_from_locality.csv",
sep = "/"))
```
Remove spent data.
```{r 3.9iii, eval = FALSE}
rm(xyFromLocality)
```
## 3.10 Flag Absent
Flag the records marked as "absent".
```{r 3.10, collapse = TRUE}
check_pf <- BeeBDC::flagAbsent(data = check_pf,
PresAbs = "occurrenceStatus")
```
## 3.11 flag License
Flag the records that may not be used according to their license information.
```{r 3.11, collapse = TRUE}
check_pf <- BeeBDC::flagLicense(data = check_pf,
strings_to_restrict = "all",
# DON'T flag if in the following dataSource(s)
excludeDataSource = NULL)
```
## 3.12 GBIF issue
Flag select issues that are flagged by GBIF.
```{r 3.12, collapse = TRUE}
check_pf <- BeeBDC::GBIFissues(data = check_pf,
issueColumn = "issue",
GBIFflags = c("COORDINATE_INVALID", "ZERO_COORDINATE"))
```
## 3.13 Flag Reports
### a. Save flags
Save the flags so far. This function will make sure that you keep a copy of everything that has been flagged up until now. This will be updated throughout the script and can accessed at the end, so be wary of moving files around manually. However, these data will also still be maintained in the main running file, so this is an optional fail-safe.
```{r 3.13a, eval = FALSE}
flagFile <- BeeBDC::flagRecorder(
data = check_pf,
outPath = paste(OutPath_Report, sep =""),
fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
# These are the columns that will be kept along with the flags
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
# TRUE if you want to find a file from a previous part of the script to append to
append = FALSE)
```
Update the *.summary* column
```{r 3.13b, collapse = TRUE}
check_pf <- BeeBDC::summaryFun(
data = check_pf,
# Don't filter these columns (or NULL)
dontFilterThese = NULL,
# Remove the filtering columns?
removeFilterColumns = FALSE,
# Filter to ONLY cleaned data?
filterClean = FALSE)
```
### c. Reporting
Use **bdc** to generate reports.
```{r 3.13c, eval = FALSE}
(report <- bdc::bdc_create_report(data = check_pf,
database_id = "database_id",
workflow_step = "prefilter",
save_report = TRUE)
)
```
## 3.14 Save
Save the intermediate dataset.
```{r 3.14, eval = FALSE}
check_pf %>%
readr::write_excel_csv(., paste(OutPath_Intermediate, "01_prefilter_output.csv",
sep = "/"))
```
# 4.0 Taxonomy
*For more information about the corresponding bdc functions used in this section, see their [tutorial](https://brunobrr.github.io/bdc/articles/taxonomy.html). *
Read in the filtered dataset or rename the 3.x dataset for 4.0.
```{r 4.0, collapse = TRUE}
if(!exists("check_pf")){
database <-
readr::read_csv( paste(OutPath_Intermediate, "01_prefilter_output.csv",
sep = "/"), col_types = BeeBDC::ColTypeR())
}else{
# OR rename and remove
database <- check_pf
# Remove spent dataset
rm(check_pf)}
```
Remove *names_clean* if it already exists (i.e. you have run the following functions before on this dataset before).
```{r 4.0ii, collapse = TRUE}
database <- database %>%
dplyr::select(!tidyselect::any_of("names_clean"))
```
## 4.1 Prep data names
This step cleans the database's *scientificName* column.
**! MAC**: You might need to install gnparser through terminal — brew
brew tap gnames/gn
brew install gnparser
<div class="alert alert-info">
<strong> Attention:</strong> <br>
This can be difficult for a Windows install. Ensure you have the most recent version of R, R Studio, and R packages. Also, check package '**rgnparser**' is installed correctly. If you still can not get the below code to work, you may have to download the latest version of 'gnparser' from [here](https://github.com/gnames/gnparser/releases/tag/v1.6.9). You may then need to manually install it and edit your systems environmental variable PATH to locate 'gnparser.exe'. See [here](https://github.com/gnames/gnparser#installation).
</div>
```{r 4.1, eval = FALSE, collapse = TRUE}
parse_names <-
bdc::bdc_clean_names(sci_names = database$scientificName, save_outputs = FALSE)
```
## The latest gnparser version is v1.7.4
## gnparser has been installed to /home/runner/bin
##
## >> Family names prepended to scientific names were flagged and removed from 0 records.
## >> Terms denoting taxonomic uncertainty were flagged and removed from 0 records.
## >> Other issues, capitalizing the first letter of the generic name, replacing empty names by NA, and removing extra spaces, were flagged and corrected or removed from 1 records.
## >> Infraspecific terms were flagged and removed from 0 records.
Keep only the *.uncer_terms* and *names_clean* columns.
```{r 4.1ii, collapse = TRUE, eval = FALSE}
parse_names <-
parse_names %>%
dplyr::select(.uncer_terms, names_clean)
```
Merge names with the complete dataset.
```{r 4.1iii, collapse = TRUE}
database <- dplyr::bind_cols(database)
rm(parse_names)
```
## 4.2 Harmonise taxonomy
Download the custom taxonomy file from the BeeBDC package and [Discover Life](https://www.discoverlife.org) website.
```{r 4.2, collapse = TRUE, eval = FALSE}
taxonomyFile <- BeeBDC::beesTaxonomy()
```
```{r 4.2secret, collapse = TRUE, eval = TRUE}
# load in the small test dataset i nthe background
system.file("extdata", "testTaxonomy.rda", package="BeeBDC") |>
load()
# Rename the file
taxonomyFile <- testTaxonomy
rm(testTaxonomy)
```
<div class="alert alert-info">
<strong> Attention:</strong> <br>
As of version 1.1.0, BeeBDC now has a new function that can download taxonomies using the taxadb package and transform them into the BeeBDC format. The function, `BeeBDC::taxadbToBeeBDC()`, allows the user to choose their desired provider (e.g., "gbif", "itis"...), version, taxon name and rank, and to save the taxonomy as a readable csv or not. For example for the bee genus Apis:
ApisTaxonomy <- BeeBDC::taxadbToBeeBDC(
name = "Apis",
rank = "Genus",
provider = "gbif",
version = "22.12",
outPath = getwd(),
fileName = "ApisTaxonomy.csv"
)
</div>
Harmonise the names in the occurrence tibble. This flags the occurrences without a matched name and matches names to their correct name according to [Discover Life](https://www.discoverlife.org). You can also use multiple cores to achieve this. See '?`harmoniseR()`' for details.
```{r 4.2ii, collapse = TRUE}
database <- BeeBDC::harmoniseR(path = DataPath, #The path to a folder that the output can be saved
taxonomy = taxonomyFile, # The formatted taxonomy file
data = database,
mc.cores = 1)
```
You don't need this file any more...
```{r 4.2iii, collapse = TRUE}
rm(taxonomyFile)
```
Save the harmonised file.
```{r 4.2iv, eval = FALSE, collapse = TRUE}
database %>%
readr::write_excel_csv(.,
paste(DataPath, "Output", "Intermediate", "02_taxonomy_database.csv",
sep = "/"))
```
## 4.3 Save flags
Save the flags so far. This will find the most-recent flag file and append your new data to it. You can double-check the data and number of columns if you'd like to be thorough and sure that all of data are intact.
```{r 4.3, eval = FALSE, collapse = TRUE}
flagFile <- BeeBDC::flagRecorder(
data = database,
outPath = paste(OutPath_Report, sep =""),
fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
append = TRUE,
printSummary = TRUE)
```
# 5.0 Space
*The final frontier or whatever.*
Read in the latest database.
```{r 5.0, collapse = TRUE}
if(!exists("database")){
database <-
readr::read_csv(paste(OutPath_Intermediate, "02_taxonomy_database.csv", sep = "/"),
col_types = BeeBDC::ColTypeR())}
```
## 5.1 Coordinate precision
This function identifies records with a coordinate precision below a specified number of decimal places. For example, the precision of a coordinate with 1 decimal place is 11.132 km at the equator, i.e., the scale of a large city. The major difference between the **bdc** and **BeeBDC** functions is that `jbd_coordinates_precision()` will only flag occurrences if BOTH latitude and longitude are rounded (as opposed to only one of these).
Coordinates with one, two, or three decimal places present a precision of ~11.1 km, ~1.1 km, and ~111 m at the equator, respectively.
```{r 5.1, collapse = TRUE}
check_space <-
BeeBDC::jbd_coordinates_precision(
data = database,
lon = "decimalLongitude",
lat = "decimalLatitude",
ndec = 2 # number of decimals to be tested
)
```
Remove the spent dataset.
```{r 5.1ii, collapse = TRUE}
rm(database)
```
Save the resulting file.
```{r 5.1iii, eval = FALSE, collapse = TRUE}
check_space %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "03_space_inter_database.csv",
sep = "/"))
```
## 5.2 Common spatial issues
Only run for occurrences through `clean_coordinates()` that are spatially 'valid'.
```{r 5.2, eval = FALSE, collapse = TRUE}
tempSpace <- check_space %>%
dplyr::filter(!.coordinates_empty == FALSE) %>%
dplyr::filter(!.coordinates_outOfRange == FALSE)
```
Next, we will flag common spatial issues using functions of the package **CoordinateCleaner**. It addresses some common issues in biodiversity datasets.
```{r 5.2ii, message=TRUE, warning=FALSE, eval = FALSE, collapse = TRUE}
tempSpace <-
CoordinateCleaner::clean_coordinates(
x = tempSpace,
lon = "decimalLongitude",
lat = "decimalLatitude",
species = "scientificName",
countries = NULL, # Tests if coords are from x country. This is not needed.
tests = c(
"capitals", # records within 0.5 km of capitals centroids
"centroids", # records within 1 km around country and province centroids
"equal", # records with equal coordinates
"gbif", # records within 1 km of GBIF headquarters. (says 1 degree in package, but code says 1000 m)
"institutions", # records within 100m of zoo and herbaria
"zeros" # records with coordinates 0,0
# "seas" # Not flagged as this should be flagged by coordinate country inconsistent
),
capitals_rad = 1000,
centroids_rad = 500,
centroids_detail = "both", # test both country and province centroids
inst_rad = 100, # remove zoo and herbaria within 100m
range_rad = 0,
zeros_rad = 0.5,
capitals_ref = NULL,
centroids_ref = NULL,
country_ref = NULL,
country_refcol = "countryCode",
inst_ref = NULL,
range_ref = NULL,
# seas_scale = 50,
value = "spatialvalid" # result of tests are appended in separate columns
) %>%
# Remove duplicate .summary column that can be replaced later and turn into a tibble
dplyr::select(!tidyselect::starts_with(".summary")) %>%
dplyr::tibble()
```
Re-merge the datasets.
```{r 5.2iii, eval = FALSE, collapse = TRUE}
check_space <- tempSpace %>%
# Re-bind with the records that were removed earlier
dplyr::bind_rows(check_space %>%
dplyr::filter(.coordinates_empty == FALSE |
.coordinates_outOfRange == FALSE) )
```
Remove the temporary dataset.
```{r 5.2iv, eval = FALSE, collapse = TRUE}
rm(tempSpace)
```
Save the intermediate dataset.
```{r 5.2v, eval = FALSE, collapse = TRUE}
check_space %>%
readr::write_excel_csv(paste(OutPath_Intermediate, "03_space_inter_database.csv",
sep = "/"))
```
## 5.3 Diagonal + grid
Finds sequential numbers that could be fill-down errors in lat and long, and groups by the 'groupingColumns'. This is accomplished by using a sliding window with the length determined by minRepeats. Only coordinates of precision 'ndec' (number of decimals in decimal degree format) will be examined. Note, that this function is very RAM-intensive and so the use of multiple threads should be approached with caution depending on your dataset. However, the option is provided.
```{r 5.3, collapse = TRUE}
check_space <- BeeBDC::diagonAlley(
data = check_space,
# The minimum number of repeats needed to find a sequence in for flagging
minRepeats = 6,
ndec = 3,
groupingColumns = c("eventDate", "recordedBy", "datasetName"),
mc.cores = 1)
```
Spatial gridding from rasterisation: Select only the records with more than X occurrences.
```{r 5.3ii, collapse = TRUE}
griddingDF <- check_space %>%
# Exclude NA lat and lon values
tidyr::drop_na(c("decimalLatitude", "decimalLongitude")) %>%
# Group by the dataset name
dplyr::group_by(datasetName) %>%
# Remove rows that aren't unique for lat and long
dplyr::distinct(decimalLongitude, decimalLatitude,
.keep_all = TRUE) %>%
# Find the groups with 4 or more occurrence records
dplyr::filter(dplyr::n() >= 4) %>%
dplyr::ungroup()
```
Run the gridding analysis to find datasets that might be gridded.
```{r 5.3iii, eval = FALSE, collapse = TRUE}
gridded_datasets <- CoordinateCleaner::cd_round(
x = griddingDF,
lon = "decimalLongitude",
lat = "decimalLatitude",
ds = "datasetName",
T1 = 7,
min_unique_ds_size = 4,
test = "both",
value = "dataset",
graphs = FALSE,
verbose = TRUE,
reg_out_thresh = 2,
reg_dist_min = 0.1,
reg_dist_max = 2
) %>%
dplyr::tibble()
# The griddingDF is no longer needed. remove it.
rm(griddingDF)
```
Integrate these results with the main dataset.
```{r 5.3iv, eval = FALSE, collapse = TRUE}
check_space <- check_space %>%
# Join the datasets
dplyr::left_join(
# Select the columns of interest
dplyr::select(gridded_datasets, dataset, lon.flag, lat.flag, summary),
by = c("datasetName" = "dataset")) %>%
# Make new columns with more-consistent naming and change the NA vlaues to = TRUE (not flagged)
dplyr::mutate(.lonFlag = tidyr::replace_na(lon.flag, TRUE),
.latFlag = tidyr::replace_na(lat.flag, TRUE),
.gridSummary = tidyr::replace_na(summary, TRUE)) %>%
# Remove old columns
dplyr::select(!c(lon.flag, lat.flag, summary))
```
Save the gridded_datasets file for later examination.
```{r 5.3 v, eval = FALSE, collapse = TRUE}
gridded_datasets %>%
readr::write_excel_csv(paste(OutPath_Intermediate, "03_space_griddedDatasets.csv",
sep = "/"))
```
Now remove this file.
```{r 5.3vi, eval = FALSE, collapse = TRUE}
rm(gridded_datasets)
```
## 5.4 Uncertainty
Flag records that exceed a *coordinateUncertaintyInMeters* threshold.
```{r 5.4, collapse = TRUE}
check_space <- BeeBDC::coordUncerFlagR(data = check_space,
uncerColumn = "coordinateUncertaintyInMeters",
threshold = 1000)
```
## 5.5 Country checklist
This step identifies mismatches between the [Discover Life](https://www.discoverlife.org) country checklist — `beesChecklist` — for bee species and the dataset, identifying potential misidentifications, outliers, etc.’
Download the country-level checklist.
```{r 5.5, collapse = TRUE, eval = FALSE}
checklistFile <- BeeBDC::beesChecklist()
```
```{r 5.5secret, collapse = TRUE, eval = TRUE}
# load in the small test dataset i nthe background
system.file("extdata", "testChecklist.rda", package="BeeBDC") |>
load()
# Rename the file
taxonomyFile <- testChecklist
rm(testChecklist)
```
```{r 5.5ii, collapse = TRUE}
check_space <- BeeBDC::countryOutlieRs(checklist = checklistFile,
data = check_space,
keepAdjacentCountry = TRUE,
pointBuffer = 0.05,
# Scale of map to return, one of 110, 50, 10 OR 'small', 'medium', 'large'
# Smaller numbers will result in much longer calculation times.
# We have not attempted a scale of 10.
scale = 50,
mc.cores = 1)
```
```{r 5.5iii, eval = FALSE, collapse = TRUE}
# A list of failed species-country combinations and their numbers can be output here
check_space %>%
dplyr::filter(.countryOutlier == FALSE) %>%
dplyr::select(database_id, scientificName, country) %>%
dplyr::group_by(scientificName) %>%
dplyr::mutate(count_scientificName = n()) %>%
dplyr::distinct(scientificName, country, .keep_all = TRUE) %>%
readr::write_excel_csv(paste(OutPath_Intermediate, "03_space_failedCountryChecklist.csv",
sep = "/"))
```
## 5.6 Map spatial errors
Assemble maps of potential spatial errors and outliers, either one flag at a time or using the *.summary* column. First, you need to rebuild the *.summary* column.
Rebuild the *.summary* column.
```{r 5.6, eval = FALSE, collapse = TRUE}
check_space <- BeeBDC::summaryFun(
data = check_space,
dontFilterThese = NULL,
removeFilterColumns = FALSE,
filterClean = FALSE)
```
Use col_to_map in order to map ONE spatial flag at a time or map the *.summary* column for all flags.
```{r 5.6ii, eval = FALSE, collapse = TRUE}
check_space %>%
dplyr::filter(.summary == FALSE) %>% # map only records flagged as FALSE
bdc::bdc_quickmap(
data = .,
lon = "decimalLongitude",
lat = "decimalLatitude",
col_to_map = ".summary",
size = 0.9
)
```
## 5.7 Space report
Create the space report using **bdc**.
```{r 5.7, eval = FALSE, collapse = TRUE}
(report <-
bdc::bdc_create_report(
data = dplyr::tibble(check_space %>% dplyr::select(!.uncer_terms)),
database_id = "database_id",
workflow_step = "space",
save_report = TRUE)
)
```
## 5.8 Space figures
Create figures for the spatial data filtering results.
```{r 5.8, eval = FALSE, collapse = TRUE}
(figures <-
BeeBDC::jbd_create_figures(
data = dplyr::tibble(check_space %>% dplyr::select(!.uncer_terms)),
path = DataPath,
database_id = "database_id",
workflow_step = "space",
save_figures = TRUE)
)
```
For examining the figures, the options are:
- *.cap* = Records around country capital centroid
- *.cen* = Records around country or province centroids
- *.dbl* = Duplicated coordinates per species
- *.equ* = Identical coordinates
- *.otl* = Geographical outliers
- *.gbf* = Records around the GBIF headquarters
- *.inst* = Records around biodiversity institutions
- *.rou* = Rounded (probably imprecise) coordinates
- *.urb* = Records within urban areas — (Likely not relevant for bees.)
You can examine these figures, for example, by running:
figures$.rou
Save interim dataset.
```{r 5.8ii, eval = FALSE, collapse = TRUE}
check_space %>%
readr::write_excel_csv(paste(OutPath_Intermediate, "03_space_inter_database.csv",
sep = "/"))
```
## 5.9 Save flags
Save the flags so far.
```{r 5.9, eval = FALSE, collapse = TRUE}
BeeBDC::flagRecorder(
data = check_space,
outPath = paste(OutPath_Report, sep =""),
fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
append = TRUE,
printSummary = TRUE)
```
## 5.10 Save
Save the intermediate dataset.
```{r 5.10, eval = FALSE, collapse = TRUE}
check_space %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "03_space_database.csv",
sep = "/"))
```
# 6.0 Time
Read in the last database, if needed.
```{r 6.0, collapse = TRUE}
if(!exists("check_space")){
check_time <-
readr::read_csv(paste(OutPath_Intermediate, "03_space_database.csv", sep = "/"),
col_types = BeeBDC::ColTypeR())
}else{
check_time <- check_space
# Remove the spent file
rm(check_space)}
```
You can plot a histogram of dates here.
```{r 6.0ii, collapse = TRUE}
hist(lubridate::ymd_hms(check_time$eventDate, truncated = 5), breaks = 20,
main = "Histogram of eventDates")
```
Filter some silly dates that don't make sense.
```{r 6.0iii, collapse = TRUE}
check_time$year <- ifelse(check_time$year > lubridate::year(Sys.Date()) | check_time$year < 1600,
NA, check_time$year)
check_time$month <- ifelse(check_time$month > 12 | check_time$month < 1,
NA, check_time$month)
check_time$day <- ifelse(check_time$day > 31 | check_time$day < 1,
NA, check_time$day)
```
## 6.1 Recover dates
The `dateFindR()` function will search through some other columns in order to find and rescue dates that may not have made it into the correct columns. It will further update the *eventDate*, *day*, *month*, and *year* columns where these data were a) missing and b) located in one of the searched columns.
```{r 6.1, collapse = TRUE}
check_time <- BeeBDC::dateFindR(data = check_time,
# Years above this are removed (from the recovered dates only)
maxYear = lubridate::year(Sys.Date()),
# Years below this are removed (from the recovered dates only)
minYear = 1700)
```
## 6.2 No eventDate
Flag records that simply lack collection date. :(
```{r 6.2, collapse = TRUE}
check_time <-
bdc::bdc_eventDate_empty(data = check_time, eventDate = "eventDate")
```
## 6.3 Old records
This will flag records prior to the date selected. 1970 is frequently chosen for SDM work. You may not need to filter old records at all, so think critically about your use. We have chosen 1950 as a lower extreme.
```{r 6.3, collapse = TRUE}
check_time <-
bdc::bdc_year_outOfRange(data = check_time,
eventDate = "year",
year_threshold = 1950)
```
## 6.4 Time report
*Not all of time, just the time pertaining to our precise occurrence records.*
Update the *.summary* column.
```{r 6.4, eval = TRUE, collapse = TRUE}
check_time <- BeeBDC::summaryFun(
data = check_time,
# Don't filter these columns (or NULL)
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms"),
# Remove the filtering columns?
removeFilterColumns = FALSE,
# Filter to ONLY cleaned data?
filterClean = FALSE)
```
```{r 6.4ii, eval = FALSE, collapse = TRUE}
( report <-
bdc::bdc_create_report(data = check_time,
database_id = "database_id",
workflow_step = "time",
save_report = FALSE)
)
```
## 6.5 Time figures
Create time results figures.
```{r 6.5, eval = FALSE, collapse = TRUE}
figures <-
BeeBDC::jbd_create_figures(data = check_time,
path = DataPath,
database_id = "database_id",
workflow_step = "time",
save_figures = TRUE)
```
You can check figures by using...
```{r 6.5ii, eval = FALSE, collapse = TRUE}
figures$year
```
Save the time-revised data into the intermediate folder.
```{r 6.5iii, eval = FALSE, collapse = TRUE}
check_time %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "04_time_database.csv",
sep = "/"))
```
## 6.6 Save flags
Save the flags so far.
```{r, eval = FALSE, collapse = TRUE}
BeeBDC::flagRecorder(
data = check_time,
outPath = paste(OutPath_Report, sep =""),
fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
append = TRUE,
printSummary = TRUE)
```
# 7.0 De-duplication
The dataset can be re-read here if it does not already exist.
```{r 7.0, eval = FALSE, collapse = TRUE}
if(!exists("check_time")){
check_time <-
readr::read_csv(paste(OutPath_Intermediate, "04_time_database.csv",
sep = "/"),
col_types = BeeBDC::ColTypeR())}
```
## 7.1 deDuplicate
We will FLAG duplicates here. These input columns can be hacked to de-duplicate as you wish. This function uses user-specified inputs and columns to identify duplicate occurrence records. Duplicates are identified iteratively and will be tallied up, duplicate pairs clustered, and sorted at the end of the function. The function is designed to work with Darwin Core data with a *database_id* column, but it is also modifiable to work with other columns.
I would encourage you to see '?`dupeSummary()`' for more details as this function is quite modifiable to user needs.
```{r 7.1, collapse = TRUE}
check_time <- BeeBDC::dupeSummary(
data = check_time,
path = OutPath_Report,
# options are "ID","collectionInfo", or "both"
duplicatedBy = "collectionInfo",
# The columns to generate completeness info from (and to sort by completness)
completeness_cols = c("decimalLatitude", "decimalLongitude",
"scientificName", "eventDate"),
# The columns to ADDITIONALLY consider when finding duplicates in collectionInfo
collectionCols = c("decimalLatitude", "decimalLongitude", "scientificName", "eventDate",
"recordedBy"),
# The columns to combine, one-by-one with the collectionCols
collectInfoColumns = c("catalogNumber", "otherCatalogNumbers"),
# Custom comparisons — as a list of columns to compare
# RAW custom comparisons do not use the character and number thresholds
CustomComparisonsRAW = dplyr::lst(c("catalogNumber", "institutionCode", "scientificName")),
# Other custom comparisons use the character and number thresholds
CustomComparisons = dplyr::lst(c("gbifID", "scientificName"),
c("occurrenceID", "scientificName"),
c("recordId", "scientificName"),
c("id", "scientificName")),
# The order in which you want to KEEP duplicated based on data source
# try unique(check_time$dataSource)
sourceOrder = c("CAES", "Gai", "Ecd","BMont", "BMin", "EPEL", "ASP", "KP", "EcoS", "EaCO",
"FSCA", "Bal", "SMC", "Lic", "Arm",
"USGS", "ALA", "VicWam", "GBIF","SCAN","iDigBio"),
# Paige ordering is done using the database_id prefix, not the dataSource prefix.
prefixOrder = c("Paige", "Dorey"),
# Set the complexity threshold for id letter and number length
# minimum number of characters when WITH the numberThreshold
characterThreshold = 2,
# minimum number of numbers when WITH the characterThreshold
numberThreshold = 3,
# Minimum number of numbers WITHOUT any characters
numberOnlyThreshold = 5
) %>% # END dupeSummary
dplyr::as_tibble(col_types = BeeBDC::ColTypeR())
```
Save the dataset into the intermediate folder.
```{r 7.1ii, eval = FALSE, collapse = TRUE}
check_time %>%
readr::write_excel_csv(.,
paste(OutPath_Intermediate, "04_2_dup_database.csv",
sep = "/"))
```
## 7.2 Save flags
Save the flags so far.
```{r 7.2, eval = FALSE, collapse = TRUE}
BeeBDC::flagRecorder(
data = check_time,
outPath = paste(OutPath_Report, sep =""),
fileName = paste0("flagsRecorded_", Sys.Date(), ".csv"),
idColumns = c("database_id", "id", "catalogNumber", "occurrenceID", "dataSource"),
append = TRUE,
printSummary = TRUE)
```
# 8.0 Data filtering
The dataset can be re-read here if it does not already exist.
```{r 8.0, eval = FALSE, collapse = TRUE}
if(!exists("check_time")){
check_time <-
readr::read_csv(paste(OutPath_Intermediate, "04_2_dup_database.csv",
sep = "/"), col_types = ColTypeR())}
```
## 8.1 rm Outliers
Read in the most-recent duplicates file (generated by `dupeSummary()`) in order to identify the duplicates of the expert outliers.
```{r 8.1, eval = TRUE, collapse = TRUE}
if(!exists("duplicates")){
duplicates <- BeeBDC::fileFinder(path = DataPath,
fileName = "duplicateRun_") %>%
readr::read_csv()}
```
Identify the outliers and get a list of their database_ids. This would require the source outlier files provided with the [BeeBDC](https://doi.org/10.1101/2023.06.30.547152) paper. These files can further be modified to include more outliers.
check_time <- BeeBDC::manualOutlierFindeR(
data = check_time,
DataPath = DataPath,
PaigeOutliersName = "removedBecauseDeterminedOutlier.csv",
newOutliersName = "^All_outliers_ANB_14March.xlsx",
ColombiaOutliers_all = "All_Colombian_OutlierIDs.csv",
# A .csv with manual outlier records that are too close to otherwise TRUE records
NearTRUE = "nearTRUE.csv",
duplicates = duplicates)
## 8.2 Save uncleaned
Save the uncleaned dataset.
```{r 8.2, eval = TRUE, collapse = TRUE}
# Make sure that the .summary column is updated
check_time <- BeeBDC::summaryFun(
data = check_time,
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
removeFilterColumns = FALSE,
filterClean = FALSE)
```
```{r 8.2ii, eval = FALSE, collapse = TRUE}
# Save the uncleaned dataset
check_time %>% readr::write_excel_csv(.,
paste(OutPath_Intermediate, "05_unCleaned_database.csv",
sep = "/"))
```
## 8.3 Filter
Now clean the dataset of extra columns and failed rows and then save it.
```{r 8.3, eval = TRUE, collapse = TRUE}
cleanData <- BeeBDC::summaryFun(
data = check_time,
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
# Remove the filtering columns?
removeFilterColumns = TRUE,
# Filter to ONLY cleaned data?
filterClean = TRUE)
```
```{r 8.3ii, eval = FALSE, collapse = TRUE}
# Save this CLEANED dataset
cleanData %>% readr::write_excel_csv(.,
paste(OutPath_Intermediate, "05_cleaned_database.csv",
sep = "/"))
```
# 9.0 Figures and tables
## 9.1 Duplicate chordDiagrams
Install **BiocManager** and **ComplexHeatmap** if you missed them at the start.
```{r 9.1, message=FALSE, warning=FALSE, eval = FALSE, collapse = TRUE}
if (!require("BiocManager", quietly = TRUE))
install.packages("BiocManager", repos = "http://cran.us.r-project.org")
BiocManager::install("ComplexHeatmap")
```
Read in the most recent file of flagged duplicates, if it’s not already in your environment.
```{r 9.1ii, eval = TRUE, collapse = TRUE}
if(!exists("duplicates")){
duplicates <- BeeBDC::fileFinder(path = DataPath,
fileName = "duplicateRun_") %>%
readr::read_csv()}
```
Choose the global figure parameters.
```{r 9.1on.exit, include = FALSE}
oldpar <- par(no.readonly = TRUE)
on.exit(oldpar)
```
```{r 9.1iii, eval = FALSE, collapse = TRUE}
par(mar = c(2, 2, 2, 2)/2, mfrow = c(1,1))
```
Create the chordDiagram. You can leave many of the below values out, but we show here the defaults. There are [internally] no duplicates in current our test dataset, so **BeeBDC** will throw an informative error. However, we show the full output figure from our bee dataset below.
```{r 9.1iv, eval=FALSE, fig.fullwidth=TRUE, fig.height=7.5, fig.width=9, collapse = TRUE}
BeeBDC::chordDiagramR(
# The duplicate data from the dupeSummary function output
dupeData = duplicates,
outPath = OutPath_Figures,
fileName = "ChordDiagram.pdf",
# These can be modified to help fit the final pdf that's exported.
width = 9,
height = 7.5,
bg = "white",
# How few distinct dataSources should a group have to be listed as "other"
smallGrpThreshold = 3,
title = "Duplicated record sources",
# The default list of colour palettes to choose from usign the paleteer package
palettes = c("cartography::blue.pal", "cartography::green.pal",
"cartography::sand.pal", "cartography::orange.pal", "cartography::red.pal",
"cartography::purple.pal", "cartography::brown.pal"),
canvas.ylim = c(-1.0,1.0),
canvas.xlim = c(-0.6, 0.25),
text.col = "black",
legendX = grid::unit(6, "mm"),
legendY = grid::unit(18, "mm"),
legendJustify = c("left", "bottom"),
niceFacing = TRUE)
```
![Full chord diagram from Dorey et al. 2023]
<img src="https://photos.smugmug.com/photos/i-Xt3tN8L/0/X5/i-Xt3tN8L-X5.jpg" align="left" />
## 9.2 Duplicate histogram
Read in the uncleaned dataset, if it's not already present.
```{r 9.2, eval = TRUE, collapse = TRUE}
if(!exists("check_time")){
beeData <- readr::read_csv(paste(OutPath_Intermediate, "05_unCleaned_database.csv",
sep = "/"),
col_types = BeeBDC::ColTypeR())
}else{
beeData <- check_time
rm(check_time)
}
```
Create a plot with two bar graphs. One shows the absolute number of duplicate records for each data source, while the other shows the proportion of records that are duplicated within each data source. (*'dataSource'* is simplified to the text before the first underscore).
```{r 9.2ii, warning=FALSE, eval=TRUE, collapse = TRUE}
BeeBDC::dupePlotR(
data = beeData,
# The outPath to save the plot as
outPath = OutPath_Figures,
fileName = "duplicatePlot.pdf",
# Colours in order: duplicate, kept duplicate, unique
dupeColours = c("#F2D2A2","#B9D6BC", "#349B90"),
# Plot size and height
base_height = 7, base_width = 7,
legend.position = c(0.85, 0.8),
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP", CAES = "CAES", Ecd = "Ecd",
returnPlot = TRUE
)
```
## 9.3 Flags by source
Create a compound bar plot that shows the proportion of records that pass or fail each flag (rows) for each data source (columns). The function can also optionally return a point map for a user-specified species when plotMap = TRUE. (*dataSource* is simplified to the text before the first underscore.)
```{r 9.3, fig.width=15, fig.height=9, fig.fullwidth=TRUE, eval=TRUE, collapse = TRUE}
BeeBDC::plotFlagSummary(
data = beeData,
# Colours in order of pass (TRUE), fail (FALSE), and NA
flagColours = c("#127852", "#A7002D", "#BDBABB"),
fileName = paste0("FlagsPlot_", Sys.Date(),".pdf"),
outPath = paste0(OutPath_Figures),
width = 15, height = 9,
# OPTIONAL:
# # Filter to a single species
# speciesName = "Holcopasites heliopsis",
# # column to look in
# nameColumn = "species",
# # Save the filtered data
# saveFiltered = TRUE,
# # Filter column to display on map
# filterColumn = ".summary",
# plotMap = TRUE,
# # amount to jitter points if desired, e.g. 0.25 or NULL
# jitterValue = NULL,
# # Map opacity value for points between 0 and 1
# mapAlpha = 1,
# # If a user wants to output the table used to make the figure, change this to TRUE
# saveTable = FALSE,
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP", CAES = "CAES", 'BMont' = "BMont", 'BMin' = "BMin", Ecd = "Ecd",
Gaiarsa = "Gai", EPEL = "EPEL", VicWam = "VicWam",
returnPlot = TRUE
)
```
## 9.4 Maps
Import CLEANED dataset (you can change this option).
```{r 9.4, eval = TRUE, collapse = TRUE}
if(!exists("cleanData")){
cleanData <- readr::read_csv(paste(OutPath_Intermediate, "05_cleaned_database.csv",
sep = "/"),
col_types = BeeBDC::ColTypeR())}
```
### a. Summary maps
Draw a global summary map for occurrence and species number by country.
```{r 9.4a, eval=FALSE, collapse = TRUE}
BeeBDC::summaryMaps(
data = cleanData,
width = 10, height = 10,
class_n = 3,
class_Style = "fisher",
fileName = "CountryMaps_fisher.pdf",
outPath = OutPath_Figures,
returnPlot = TRUE
)
```
### b. Interactive maps
Uses the occurrence data (preferably uncleaned in order to show pass/fail points) and outputs interactive .html maps, which can be opened in your browser, to a specific directory. The maps can highlight if an occurrence has passed all filtering (*.summary* == TRUE) or failed at least one filter (*.summary* == FALSE). This can be updated by first running `summaryFun()` to choose the columns that you want to be highlighted. This function will also highlight occurrences flagged as expert-identified or country outliers separately. Because the function can have any categorical variable fed into 'speciesColumn', users may choose another column of interest to map; however, maps made using very large categories can be slow to produce and unwieldy to view.
```{r 9.4b, eval = FALSE, collapse = TRUE}
BeeBDC::interactiveMapR(
# occurrence data
data = beeData,
# Directory where to save files
outPath = paste0(OutPath_Figures, "interactiveMaps", sep = "/"),
lon = "decimalLongitude",
lat = "decimalLatitude",
# Occurrence dataset column with species names
speciesColumn = "scientificName",
# Which species to map — a character vector of names or "ALL"
# Note: "ALL" is defined AFTER filtering for country
speciesList = "ALL",
countryList = NULL, # study area
# Point jitter to see stacked points — jitters an amount in decimal degrees
jitterValue = 0.01
)
```
## 9.5 Data providers
Read in the clean data if it's not already in the environment.
```{r 9.5, eval = FALSE, collapse = TRUE}
if(!exists("cleanData")){
cleanData <- readr::read_csv(paste(OutPath_Intermediate, "05_cleaned_database.csv",
sep = "/"),
col_types = BeeBDC::ColTypeR(),
locale = readr::locale(encoding = "UTF-8"))}
```
This function will attempt to find and build a table of data providers that have contributed to the input data, especially using the *'institutionCode'* column. It will also search a variety of other columns to find data providers using an internally set sequence of if-else statements. Hence, this function is quite specific for bee data, but it should work for other taxa in similar institutions (perhaps to a lesser degree).
```{r 9.5ii, eval = TRUE, collapse = TRUE}
# Note, if outPath = NULL then no file will be saved
dataProvTable <- BeeBDC::dataProvTables(data = cleanData,
runBeeDataChecks = TRUE,
outPath = NULL,
fileName = "dataProvTable.csv")
```
## 9.6 Flag summary
The function `flagSummaryTable()` takes a flagged dataset and returns the total number of fails (FALSE) per flag (in columns starting with “.”) and per species. Users may define the column by which to group the summary. While it is intended to work with the *scientificName* column, users may select any grouping column (e.g., *country*).
```{r 9.6, eval = TRUE, collapse = TRUE}
# Note, if outPath = NULL then no file will be saved
summaryTable <- BeeBDC::flagSummaryTable(data = beeData,
column = "scientificName",
outPath = NULL,
fileName = "flagTable.csv")
```
```{r cleanup, include=FALSE, collapse = TRUE}
# Remove the webpage folder
unlink(paste0(dirname(getwd()), "/inst/extdata/WebDir"), recursive = TRUE)
```
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/vignettes/BeeBDC_main.Rmd
|
---
title: "Basic workflow"
output:
rmarkdown::html_vignette:
vignette: >
%\VignetteIndexEntry{Basic workflow}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r libraryChunk, load-packages, include=FALSE}
# markdown packages
library(rgnparser)
library(magrittr)
library(knitr)
library(rmarkdown)
library(rmdformats)
library(prettydoc)
library(htmltools)
library(pkgdown)
# Load core packages
library(devtools)
library(BiocManager)
library(purrr)
library(here)
library(renv)
library(bdc)
library(CoordinateCleaner)
library(dplyr)
library(readr)
library(stringr)
library(lubridate)
library(tidyselect)
library(R.utils)
library(tidyr)
library(ggplot2)
library(forcats)
library(emld)
library(rlang)
library(xml2)
library(mgsub)
library(rvest)
library(rnaturalearth)
library(rnaturalearthdata)
library(countrycode)
library(janitor)
library(circlize)
library(paletteer)
library(cowplot)
library(igraph)
library(ggspatial)
library(sf)
library(parallel)
library(terra)
# Dont detect cores to avoid GitHbub error
old <- options() # code line i
on.exit(options(old)) # code line i+1
options(mc.cores = parallel::detectCores())
```
```{r secretRootPath, include=FALSE}
# Set the RootPath to tempdir
RootPath <- tempdir()
if (!dir.exists(paste0(RootPath, "/Data_acquisition_workflow"))) {
dir.create(paste0(RootPath, "/Data_acquisition_workflow"), recursive = TRUE)
}
```
```{r global-options, include=FALSE}
knitr::opts_chunk$set(error = TRUE,
eval = TRUE,
tidy = TRUE,
warning = FALSE,
root.dir = normalizePath(tempdir()))
```
This workflow is meant to be a basic example workflow of how a user might take a flagged version of our (or some other) occurrence dataset and filter for specific taxa or countries, re-apply flagging functions, re-filter the data, or make maps based of those data.
# 0.0 Script preparation
## 0.1 Working directory
Choose the path to the root folder in which all other folders can be found.
```{r falseRootPath, eval=FALSE}
RootPath <- paste0("/your/path/here")
```
```{r CreateRootPath, warning=FALSE, collapse = TRUE}
# Create the working directory in the RootPath if it doesn't exist already
if (!dir.exists(paste0(RootPath, "/Data_acquisition_workflow"))) {
dir.create(paste0(RootPath, "/Data_acquisition_workflow"), recursive = TRUE)
}
# Set the working directory
setwd(paste0(RootPath,"/Data_acquisition_workflow"))
```
For the first time that you run BeeBDC, and if you want to use the renv package to manage your
packages, you can install renv...
install.packages("renv", repos = "http://cran.us.r-project.org")
and then initialise renv the project.
renv::init(project = paste0(RootPath,"/Data_acquisition_workflow"))
If you have already initialised a project, you can instead just activate it.
```{r activate, collapse = TRUE}
renv::activate(project = paste0(RootPath,"/Data_acquisition_workflow"))
```
## 0.2 Install packages (if needed)
You may need to install gdal on your computer. This can be done on a Mac by using Homebrew in the terminal and the command "brew install gdal".
To start out, you will need to install **BiocManager**, **devtools**, **ComplexHeatmap**, and **rnaturalearthhires** to then install and fully use **BeeBDC**.
```{r installPackages, message=FALSE, warning=FALSE, results=FALSE, collapse = TRUE, eval = FALSE}
if (!require("BiocManager", quietly = TRUE))
install.packages("BiocManager", repos = "http://cran.us.r-project.org")
BiocManager::install("ComplexHeatmap")
```
```{r rnaturalearthhires, eval=FALSE}
# Install remotes if needed
if (!require("remotes", quietly = TRUE))
install.packages("remotes", repos = "http://cran.us.r-project.org")
# Download and then load rnaturalearthhires
remotes::install_github("ropensci/rnaturalearthhires")
install.packages("rnaturalearthhires", repos = "https://ropensci.r-universe.dev", type = "source")
library(rnaturalearthhires)
```
Now install **BeeBDC**.
```{r installBeeBDC, results=TRUE, message=TRUE, eval = FALSE, collapse = TRUE}
install.packages("BeeBDC")
library(BeeBDC)
```
Snapshot the renv environment.
```{r snapshot, collapse = TRUE}
renv::snapshot(project = paste0(RootPath,"/Data_acquisition_workflow"),
prompt = FALSE)
```
Set up the directories used by **BeeBDC**. These directories include where the data, figures, reports, etc. will be saved. The RDoc needs to be a path RELATIVE to the RootPath; i.e., the file path from which the two diverge.
```{r dirMaker, collapse = TRUE, eval = FALSE}
BeeBDC::dirMaker(
RootPath = RootPath,
RDoc = "vignettes/BeeBDC_main.Rmd") %>%
# Add paths created by this function to the environment()
list2env(envir = parent.env(environment()))
```
```{r dirMakerSECRETELY, include = FALSE}
# For the sake of this tutorial, we will not use here::i_am in dirMaker, because we aren't allowed
# to mess with package directories in this way. This will work-around to use the tempdir()
DataPath <- paste0(RootPath, "/Data_acquisition_workflow")
OutPath_Check <- paste0(RootPath, "/Data_acquisition_workflow/Output/Check")
OutPath_Figures <- paste0(RootPath, "/Data_acquisition_workflow/Output/Figures")
OutPath_Intermediate <- paste0(RootPath, "/Data_acquisition_workflow/Output/Intermediate")
OutPath_Report <- paste0(RootPath, "/Data_acquisition_workflow/Output/Report")
# Create these files
if (!dir.exists(DataPath)) {
dir.create(DataPath, recursive = TRUE)}
if (!dir.exists(OutPath_Check)) {
dir.create(OutPath_Check, recursive = TRUE)}
if (!dir.exists(OutPath_Figures)) {
dir.create(OutPath_Figures, recursive = TRUE)}
if (!dir.exists(OutPath_Intermediate)) {
dir.create(OutPath_Intermediate, recursive = TRUE)}
if (!dir.exists(OutPath_Report)) {
dir.create(OutPath_Report, recursive = TRUE)}
```
## 0.3 Load packages
Load packages.
```{r lapply_library, results=FALSE, collapse = TRUE}
lapply(c("ComplexHeatmap", "magrittr"),
library, character.only = TRUE)
```
# 2.0 Taxon example
If you want to filter the dataset to a particular taxon of interest, you can do so quite easily using **dplyr** from the **tidyverse** group of packages. To filter to a selected bee genus, in our case Anthophorini...
```{r 2.0, eval = FALSE}
# Load some package data — the taxonomy and a flagged example dataset
# Download the full beesTaxonomy file
taxonomyFile <- BeeBDC::beesTaxonomy()
```
```{r 2.0secret, collapse = TRUE, eval = TRUE}
# load in the small test dataset in the background
system.file("extdata", "testTaxonomy.rda", package="BeeBDC") |>
load()
# Rename the file
taxonomyFile <- testTaxonomy
rm(testTaxonomy)
```
```{r 2.0ii}
# Load the example beesFlagged dataset
beesFlagged <- BeeBDC::beesFlagged
selectedGenera <- taxonomyFile %>%
# Select only tribe anthophorini (for example)
dplyr::filter(tolower(tribe) == tolower("anthophorini")) %>%
distinct(genus)
# Filter the data
taxonData <- beesFlagged %>%
dplyr::filter(genus %in% selectedGenera$genus)
# View the data
taxonData
```
# 3.0 Country example
Similarly to the above you can filter for only countries of interest. Keep in mind, that sometimes the *country* column may not hold all of the records that fall in that country, if it, or the coordinates, have been entered incorrectly.
```{r 3.0}
# Select your study area
studyArea <- c("Canada", "United states", "Mexico", "Guatemala")
# Filter the data to that area
countryData <- beesFlagged %>%
dplyr::filter(country %in% studyArea)
# View the data
countryData
```
# 4.0 Filtering example
## 4.1 Simple filter
The **BeeBDC** package provides a simple function that can re-build the *.summary* column based off of the filtering columns that are present in the dataset (those starting with "."). you can also choose which filters you DO NOT want to implement using the dontFilterThese argument. In this example, we are also removing all of the filtering columns in the output dataset (removeFilterColumns = TRUE) and filtering to only completely clean occurrences (filterClean = TRUE). For the latter, we are only keeping *.summary* == TRUE.
```{r 4.1}
filteredData <-
BeeBDC::summaryFun(data = beesFlagged,
# Choose the columns to NOT filter (or NULL to filter all columns)
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
# In the output, do you want to REMOVE all filtering columns (TRUE), or keep them (FALSE)
removeFilterColumns = TRUE,
# In the output, do you want to only keep clean data according to your filtering (TRUE),
# Or keep all data and simply update the .summary column (FALSE)
filterClean = TRUE)
```
## 4.2 Uncertainty threshold
You may also want to change the *.uncertaintyThreshold* as we have chosen a somewhat strict default of 1 km in our dataset. Here, we will instead flag to 10 km (threshold = 10000 [m]). Additionally, we use the **magrittr** package pipe (%>%) to feed the outputs directly into `summaryFun()` to filter our data in one action!
```{r 4.2}
filteredData <- beesFlagged %>%
# Remove any exiting .uncertaintyThreshold column
dplyr::select(!tidyselect::any_of(".uncertaintyThreshold")) %>%
# Chose the coordinate uncertainty to filter to...
BeeBDC::coordUncerFlagR(data = .,
uncerColumn = "coordinateUncertaintyInMeters",
# 10 km here
threshold = 10000) %>%
# Now re-do the .summary column and filter the data using this new value
BeeBDC::summaryFun(
data = .,
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms"),
removeFilterColumns = TRUE,
filterClean = TRUE)
```
## 4.2 Date filter
### a. bdc_year_outOfRange
Another column that users are likely to want to pay close attention to is the *.year_outOfRange* column that is set at 1950 in our dataset. In this case, **bdc** provides the function where users can change the year_threshold argument to, in this case, 1970. As with above, we then use `summaryFun()` to get results in one go.
```{r 4.2a}
filteredData <- beesFlagged %>%
# Remove any exisitng .year_outOfRange column
dplyr::select(!".year_outOfRange") %>%
# Chose the minimum year to filter to...
bdc::bdc_year_outOfRange(data = .,
eventDate = "year",
year_threshold = 1970) %>%
# Now re-do the .summary column and filter the data using this new value
BeeBDC::summaryFun(
data = .,
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
removeFilterColumns = TRUE,
filterClean = TRUE)
```
### b. year range
Or, if you're interested in a particular time period, again **dplyr** comes to the rescue with some very straight forward filtering within a year range.
```{r 4.2b}
filteredData <-
# The input dataset
beesFlagged %>%
# Chose the year range...
dplyr::filter(year > 1950 & year < 1970) %>%
# Now re-do the .summary column and filter the data using this new value
BeeBDC::summaryFun(
# Select the input dataset to filter
data = .,
# Choose the columns to NOT filter (or NULL to filter all columns)
dontFilterThese = c(".gridSummary", ".lonFlag", ".latFlag", ".uncer_terms",
".uncertaintyThreshold"),
# In the output, do you want to REMOVE all filtering columns (TRUE), or keep them (FALSE)
removeFilterColumns = TRUE,
# In the output, do you want to only keep clean data according to your filtering (TRUE),
# Or keep all data and simply update the .summary column (FALSE)
filterClean = TRUE)
```
Users may choose any number of filtering steps form the main workflow to include above `summaryFun()`, just use pipes '%>%' between the function and use '.' as the data input because this will feed in the data aoutput from the above function into the proceeding one.
# 5. Summary figures
Now, if you wanted to rebuild some figures, say after you've added or filtered data, then you can use some of the below processes.
## 5.1 Duplicate chordDiagrams
Our `chordDiagramR()` function is very useful and it relies on two great packages, **circlize** and **ComplexHeatmap**. Unfortunately, the latter is not available on CRAN and so must be downloaded using **BiocManager**.
```{r 5.1, eval = FALSE}
if(!require("BiocManager", quietly = TRUE)){
install.packages("BiocManager")}
BiocManager::install("ComplexHeatmap", force = TRUE)
renv::snapshot()
```
We don't actually have an example duplicates dataset with the package, so I'll magic one up behind the scences!
```{r 5.1ii, eval = FALSE}
duplicates <- fileFinder(path = "PATH TO A FOLDER CONTAINING THE duplicateRun_ — could be supp. materials folder",
fileName = "duplicateRun_") %>%
readr::read_csv() %>%
# Select only the stingless bee data
dplyr::filter(database_id %in% stinglessData$database_id |
database_id_match %in% stinglessData$database_id)
```
Then, set some parameters for figure borders and run your data through `chordDiagramR()`.
```{r 5.1on.exit, include = FALSE}
oldpar <- par(no.readonly = TRUE)
on.exit(oldpar)
```
```{r 5.1iii, eval = FALSE}
# Choose the global figure parameters
par(mar = c(2, 2, 2, 2)/2, mfrow = c(1,1))
# Create the chorDiagram. You can leave many of the below values out but we show here
# the defaults
BeeBDC::chordDiagramR(
# The duplicate data from the dupeSummary function output
dupeData = duplicates,
outPath = OutPath_Figures,
fileName = "ChordDiagram.pdf",
# These can be modified to help fit the final pdf that's exported.
width = 9,
height = 7.5,
bg = "white",
# How few distinct dataSources should a group have to be listed as "other"
smallGrpThreshold = 3,
title = "Duplicated record sources",
# The default list of colour palettes to choose from usign the paleteer package
palettes = c("cartography::blue.pal", "cartography::green.pal",
"cartography::sand.pal", "cartography::orange.pal", "cartography::red.pal",
"cartography::purple.pal", "cartography::brown.pal"),
canvas.ylim = c(-1.0,1.0),
canvas.xlim = c(-0.6, 0.25),
text.col = "black",
legendX = grid::unit(6, "mm"),
legendY = grid::unit(18, "mm"),
legendJustify = c("left", "bottom"),
niceFacing = TRUE)
```
## 5.2 Duplicate histogram
In this example, we will use one of the example datasets to show you how this works. We will use beesFlagged, which has been filtered from a larger dataset and contains duplicates from that larger dataset. To print the plot in R, you need to specify returnPlot = TRUE, otherwise it will only save to the disk
```{r 5.2}
data("beesFlagged", package = "BeeBDC")
# Create a figure shoring the total number of duplicates, kept duplicates, and unique
# records for each datasource (simplified to the text before the first underscore) and
# the proportion of the above for each data source
BeeBDC::dupePlotR(
data = beesFlagged,
# The outPath to save the plot as
outPath = tempdir(),
fileName = "Fig3_duplicatePlot.pdf",
# Colours in order: duplicate, kept duplicate, unique
dupeColours = c("#F2D2A2","#B9D6BC", "#349B90"),
# Plot size and height
base_height = 7, base_width = 7,
legend.position = c(0.85, 0.8),
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP",
returnPlot = TRUE
)
```
## 5.3 Flags by source
The `plotFlagSummary()` function is one of the most important for quickly summarising and checking that your data and flags have worked together correctly. It can be a good starting point for error-checking. You will also see in `plotFlagSummary()` that you can filter to particular species and also output quick point maps of those species.
### a. all taxa in dataset ####
# Visualise all flags for each dataSource (simplified to the text before the first underscore)
BeeBDC::plotFlagSummary(
data = beesFlagged,
# Colours in order of pass (TRUE), fail (FALSE), and NA
flagColours = c("#127852", "#A7002D", "#BDBABB"),
fileName = paste0("Fig4_FlagsPlot_", Sys.Date(),".pdf"),
outPath = tempdir(),
width = 15, height = 9,
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP",
returnPlot = TRUE
)
```
###### b. Single sp. summary ####
In fact, lets build one of these single-species example below using the same data and the omnipresent *Apis mellifera*.
```{r 5.3b}
# Visualise all flags for each dataSource (simplified to the text before the first underscore)
# A clever user might also realise the potential to summarise and produce outputs in other columns
BeeBDC::plotFlagSummary(
# WARNING: alternate path if wanting to produce figures for the selected taxonData (2.0 above)
# Select only the taxonData data
data = beesFlagged,
# Colours in order of pass (TRUE), fail (FALSE), and NA
flagColours = c("#127852", "#A7002D", "#BDBABB"),
fileName = paste0("FlagsPlot_Amell", Sys.Date(),".pdf"),
outPath = tempdir(),
width = 15, height = 9,
# OPTIONAL:
# # Filter to species
speciesName = "Apis mellifera Linnaeus, 1758",
# column to look in
nameColumn = "scientificName",
# Save the filtered data
saveFiltered = FALSE,
# Filter column to display on map
filterColumn = ".summary",
plotMap = TRUE,
# amount to jitter points if desired, e.g. 0.25 or NULL
jitterValue = NULL,
# Map opacity value for points between 0 and 1
mapAlpha = 1,
returnPlot = TRUE,
# Extra variables can be fed into forcats::fct_recode() to change names on plot
GBIF = "GBIF", SCAN = "SCAN", iDigBio = "iDigBio", USGS = "USGS", ALA = "ALA",
ASP = "ASP", CAES = "CAES", 'B. Mont.' = "BMont", 'B. Minkley' = "BMin", Ecd = "Ecd",
Gaiarsa = "Gai", EPEL = "EPEL"
)
```
## 5.4 Maps
We can also make some overall summary maps at the country level using `summaryMaps()`. If you get an error about breaks not being unique, then reduce class_n.
```{r 5.4}
BeeBDC::summaryMaps(
data = beesFlagged,
width = 10, height = 10,
class_n = 3,
class_Style = "jenks",
outPath = tempdir(),
fileName = "CountryMaps_jenks.pdf",
returnPlot = TRUE
)
```
# 6.0 Save data
```{r 6.0, eval = FALSE}
mapData %>%
readr::write_excel_csv(paste0(DataPath, "/Output/Intermediate/", "cleanTaxon_",
Sys.Date(), ".csv"))
```
```{r cleanup, include=FALSE, collapse = TRUE}
# Remove the webpage folder
unlink(paste0(dirname(getwd()), "/inst/extdata/WebDir"), recursive = TRUE)
```
|
/scratch/gouwar.j/cran-all/cranData/BeeBDC/vignettes/basic_workflow.Rmd
|
# Workaround to get rid of "No visible binding for global variable" notes
# in package check. This notes are caused by uses of dplyr and tidyr.
if(getRversion() >= "2.15.1") utils::globalVariables(c(
"conc", "Treatment", "SurvivalTime", "idAll", "NSurv", "simQ50", "simQinf95",
"simQsup95", "time", "q50", "qinf95", "qsup95", "Nsurv_q50_valid",
"Nsurv_qinf95_valid", "Nsurv_qsup95_valid", "Conc", "Dataset", "Nsurv",
"qnorm", "qunif", "na.omit", "value", "q_0.025", "q_0.975", "median", "data",
"ppcMatching_valid", "SE_id", "id", "hb_value", "approx", "f_rate", "targConc"))
|
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/R/BeeGUTS-Internal.R
|
#' 'BeeGUTS' package; a package to perform GUTS modelling for Bee experiments.
#'
#' @description Provide tools to analyse the survival toxicity tests performed for
#' bee species. It can be used to fit a Toxicokinetic-Toxicodynamic (TKTD) model
#' adapted for bee standard studies (acute oral, acute contact, and chronic oral studies).
#' The TKTD model used is the General Unified Threshold model of Survival (GUTS).
#'
#' The package follows the concept and assumptions presented in Baas et al (submitted)
#'
#' @docType package
#' @name BeeGUTS-package
#' @aliases BeeGUTS
#' @useDynLib BeeGUTS, .registration = TRUE
#' @import methods
#' @importFrom Rcpp loadModule
#' @import rstantools
#' @import RcppParallel
#' @importFrom rstan sampling
#' @importFrom magrittr %>%
#'
#' @references
#' Baas, J., Goussen, B., Miles, M., Preuss, T.G., Roessing, I. (submitted).
#' BeeGUTS – new integrative TKTD model for honey bees approach moving from single point estimates of toxicity and exposure to a holistic link between exposure and effect.
#'
#' Jager, T., Albert, C., Preuss, T.G. and Ashauer, R. (2011). General Unified Threshold model of Survival - a toxicokinetic-toxicodynamic framework for ecotoxicology.
#' \doi{10.1021/es103092a}
#'
#' Jager, T. and Ashauer, R. (2018). Modelling survival under chemical stress. A comprehensive guide to the GUTS framework. Version 1.0
#' \url{https://leanpub.com/guts_book}
#'
#' EFSA PPR Scientific Opinion (2018). Scientific Opinion on the state of the art of Toxicokinetic/Toxicodynamic (TKTD) effect models for regulatory risk assessment of pesticides for aquatic organisms.
#' \url{https://www.efsa.europa.eu/en/efsajournal/pub/5377}
#'
#' Stan Development Team (2020). RStan: the R interface to Stan. R package version 2.21.2.
#' \url{https://mc-stan.org}
NULL
#' Survival datasets for \emph{Honey bees} exposed to
#' constant concentration of Betacyfluthrin for 10 days.
#'
#' @name betacyfluthrinChronic
#' @docType data
#' @usage data(betacyfluthrinChronic)
#' @format A list of class \code{beeSurvData} constructed by \code{dataGUTS} containing:
#' \describe{
#' \item{\code{nDatasets}}{An integer representing the number of datasets used.}
#' \item{\code{survData}}{A data frame containing the survival information over time
#' for five treatments and a control in a wide format.}
#' \item{\code{survData_long}}{A data frame containing the survival information over time
#' for five treatments and a control in a long format.}
#' \item{\code{concData}}{A data frame containing the concentration information over time
#' for five treatments and a control in a wide format.}
#' \item{\code{concData_long}}{A data frame containing the concentration information over time
#' for five treatments and a control in a long format.}
#' \item{\code{unitData}}{A character string containing the units of the concentration data.}
#' \item{\code{typeData}}{A character string containing the type of data (here Chronic_Oral).}
#' \item{\code{beeSpecies}}{A character string containing the species of bee of interest (here Honey_Bee).}
#' \item{\code{concModel}}{A data frame containing the concentration information recalculated
#' for the species of bee and test type of interest in a wide format.}
#' \item{\code{concModel_long}}{A data frame containing the concentration information recalculated
#' for the species of bee and test type of interest in a long format.}
#' \item{\code{messages}}{A data frame containing the warning messages returned by the function.}
#' }
#' @references Bayer data.
#' @keywords dataset
NULL
#' Model calibration results datasets for \emph{Honey bees} exposed to
#' constant concentration of Betacyfluthrin for 10 days.
#'
#' @name fitBetacyfluthrin_Chronic
#' @docType data
#' @usage data(fitBetacyfluthrin_Chronic)
#' @format A list of class \code{beeSurvFit} constructed by \code{fitBeeGUTS} containing:
#' \describe{
#' \item{\code{stanFit}}{A 'stanfit' object containing the results of the calibration.}
#' \item{\code{data}}{A 'beeSurvData' objects with the user data used for the calibration.}
#' \item{\code{dataFit}}{A list containing the priors and data formatted for the calibration algorithm.}
#' \item{\code{setupMCMC}}{A list containing the setup used for the MCMC.}
#' \item{\code{modelType}}{A character string containing the type of GUTS model used (here 'SD').}
#' \item{\code{distribution}}{A character string containing the distribution used (IT only, here 'NA').}
#' \item{\code{messages}}{A character string containing the error messages if Rhat >1.1 (here 'NA').}
#' }
#' @references Bayer data.
#' @keywords dataset
NULL
|
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/R/BeeGUTS-package.R
|
#' Computes PPC and NRMSE as defined in EFSA 2018
#'
#' @param x an object of class \code{beeSurvFit} or \code{beeSurvPred}
#'
#' @return The function returns a list with three items:
#' \item{PPC}{The criterion, in percent, compares the predicted median number of survivors associated
#' to their uncertainty limits with the observed numbers of survivors.
#' Based on experience, PPC resulting in more than \eqn{50\%} of the
#' observations within the uncertainty limits indicate good model performance (EFSA 2018). A fit of
#' \eqn{100\%} may hide too large uncertainties of prediction (so covering all data).}
#' \item{NRMSE}{The criterion, in percent, is based on the classical root-mean-square error (RMSE),
#' used to aggregate the magnitudes of the errors in predictions for various time-points
#' into a single measure of predictive power. In order to provide a criterion expressed
#' as a percentage, NRMSE is the normalised RMSE by the mean of the observations.
#' EFSA (2018) recognised that a NRMSE of less than 50% indicates good model performance}
#' \item{SPPE}{A list with the Survival Probability Prediction Error per dataset and condition.
#' Each dataset is in a sublist.}
#'
#'
#' @references
#' EFSA PPR Scientific Opinion (2018)
#' \emph{Scientific Opinion on the state of the art of Toxicokinetic/Toxicodynamic (TKTD) effect models for regulatory risk assessment of pesticides for aquatic organisms}
#' \url{https://www.efsa.europa.eu/en/efsajournal/pub/5377}
#'
#' @example
#' data(fitBetacyfluthrin_Chronic)
#' out <- criteriaCheck(fitBetacyfluthrin_Chronic)
#'
#' @export
#'
criteriaCheck<- function(x){
# --- PPC
dfGlobal<- ppc(x) %>%
dplyr::mutate(ppcMatching_valid = ifelse(value<q_0.025|value>q_0.975, 0, 1),
SE_id = (value - median)^2)
dfPPC <- dfGlobal %>%
dplyr::select(data, ppcMatching_valid) %>%
dplyr::group_by(data) %>%
dplyr::summarise(PPC = mean(ppcMatching_valid)*100)
# --- NRMSE
dfNRMSE <- dfGlobal %>%
dplyr::select(value, data, SE_id) %>%
dplyr::group_by(data) %>%
dplyr::summarise(NRMSE = sqrt(mean(SE_id, na.rm = TRUE)) / mean(value , na.rm = TRUE) * 100)
# ---- SPPE
dfDataSurv_long_full <- dplyr::bind_rows(x$data$survData_long)
# extract the indices of the various datasets from the global table
dfDataSurv_index = dfDataSurv_long_full %>%
dplyr::mutate(idAll = dplyr::row_number() ) %>%
dplyr::group_by(Dataset) %>%
dplyr::summarise(idS_lw = min(idAll),
idS_up = max(idAll))
#dfModelSurv <- as.data.frame(x$survModel)
lsNsurv_sim <- rstan::extract(x$stanFit, pars = 'Nsurv_sim')
lsDataSurv <- list()
for (i in 1:x$data$nDatasets) {
# Extract data
dfDataSurv_long <- as.data.frame(x$data$survData_long[[i]])
#Extract the right simulated values for each dataset
lsNsurv_sim_dataset <- lsNsurv_sim[[1]][,dfDataSurv_index$idS_lw[i]:dfDataSurv_index$idS_up[i]]
# Compute quantiles of simulations and compile all in a dataframe
dfDataSurv_long$simQ50 <- apply(lsNsurv_sim_dataset, 2, quantile, 0.5)
lsDataSurv[[i]] <- dfDataSurv_long[dfDataSurv_long$SurvivalTime == max(unique(dfDataSurv_long$SurvivalTime)),]
lsDataSurv[[i]]$NSurvInit <- dfDataSurv_long[dfDataSurv_long$SurvivalTime == min(unique(dfDataSurv_long$SurvivalTime)),"NSurv"]
lsDataSurv[[i]]$SPPE <- (lsDataSurv[[i]]$NSurv - lsDataSurv[[i]]$simQ50) / lsDataSurv[[i]]$NSurvInit * 100
}
return(list(percentPPC = as.data.frame(dfPPC),
percentNRMSE = as.data.frame(dfNRMSE),
SPPE = lsDataSurv)
)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/R/EFSAcriteria.R
|
#' Predict the Lethal Concentration method at which \eqn{x\%} of organisms die for any
#' specified time-point for a \code{beeSurvFit} object
#'
#' Predict median and 95% credible interval of the \eqn{x\%} Lethal Concentration.
#'
#' When class of \code{object} is \code{beeSurvFit}, see \link[=LCx.beeSurvFit]{LCx.beeSurvFit}.
#'
#' @rdname LCX
#'
#' @param object An object used to select a method
#' @param \dots Further arguments to be passed to generic methods
#'
#' @return A \code{LCx} object containing the results of the lethal concentration predictions
#' @export
#'
LCx <- function(object, ...){
UseMethod("LCx")
}
#' Predict the Lethal Concentration at which \eqn{x\%} of organisms die for any
#' specified time-point for a \code{beeSurvFit} object
#'
#' @param object An object of class \code{beeSurvFit}
#' @param X Percentage of individuals dying (e.g., \eqn{50} for the \eqn{LC_{50}})
#' @param timeLCx A scalar giving the time at which \eqn{LC_{x}} is predicted.
#' If \code{NULL}, the latest time point of the experiment used in the calibration is used
#' @param concRange A vector of length 2 with minimal and maximal value of the
#' range of concentration. If \code{NULL}, the range is define between 0 and the
#' highest tested concentration of the calibration experiment.
#' @param nPoints Number of time point in \code{concRange} between 0 and the
#' maximal concentration. 100 by default.
#' @param ... Further arguments to be passed to generic methods
#' @param testType Test type for which the \eqn{LC_{X}} is calculated
#' amongst "Acute_Oral", "Acute_Contact", and "Chronic_Oral". Note that for
#' "Acute_Oral" and "Acute_Contact", the concentration will be reconstructed as
#' in the \link[=dataGUTS]{dataGUTS} function (not recommended as this might not
#' make sense for \eqn{LC_{X}} estimations. Default is "Chronic_Oral"
#'
#' @return A object of class \code{LCx} containing the results of the lethal concentration predictions
#' @export
#'
#' @examples
#' \donttest{
#' data(fitBetacyfluthrin_Chronic)
#' out <- LCx(fitBetacyfluthrin_Chronic)
#' }
LCx.beeSurvFit <- function(object,
X = 50,
testType = "Chronic_Oral",
timeLCx = NULL,
concRange = NULL,
nPoints = 100,
...) {
# Check for correct class
if (!is(object,"beeSurvFit")) {
stop("predict.beeSurvFit: an object of class 'beeSurvFit' is expected")
}
# Set concentration range to test
if(is.null(concRange)){
concRange = seq(0, max(object$dataFit$conc), length.out = nPoints)
} else{
if(length(concRange) != 2){
stop("'concRange' must a vector of length 2 with minimal and maximal value of the range of concentration")
}
if(min(concRange) != 0){
stop("The minimal value of 'concRange' must be 0.")
}
concRange = seq(concRange[1], concRange[2], length.out = nPoints)
}
# Set time of LCx calculation
if(is.null(timeLCx)){
timeLCx = max(object$dataFit$tconc)
cat("No time of LCx calculation entered, maximum time in the calibration",
"dataset of",timeLCx, "taken")
}
# calculate dose
## run prediction with odeGUTS::predict_Nsurv_ode function
if(object$modelType == "SD"){
morseObject <- list(mcmc = rstan::As.mcmc.list(object$stanFit, pars = c("kd_log10", "zw_log10", "bw_log10")),
model_type = object$modelType)
class(morseObject) <- "survFit"
for(i in 1:object$setupMCMC$nChains) {
colnames(morseObject$mcmc[[i]]) <- c("kd_log10", "z_log10", "kk_log10")
}
} else if(object$modelType == "IT") {
morseObject <- list(mcmc = rstan::As.mcmc.list(object$stanFit, pars = c("kd_log10", "mw_log10", "beta_log10")),
model_type = object$modelType)
class(morseObject) <- "survFit"
for(i in 1:object$setupMCMC$nChains) {
colnames(morseObject$mcmc[[i]]) <- c("kd_log10", "alpha_log10", "beta_log10")
}
} else {
stop("Wrong model type. Model type should be 'SD' or 'IT'")
}
# Perform predictions using the odeGUTS package
k <- 1:length(concRange)
if(testType == "Chronic_Oral") {
dtheo <- lapply(k, function(kit) { # conc
tmp <- odeGUTS::predict_ode(morseObject, data.frame(time = c(0,timeLCx),
conc = concRange[kit],
replicate = "rep")
)
tmp <- tmp$df_quantile[tmp$df_quantile[,"time"] == timeLCx,]
})
} else if(testType == "Acute_Oral") {
warning("Calculating LCx for 'Acute_Oral' reconstructed concentrations is
not in line with guidelines and might not make sense. Prefer to use
'Chronic_Oral' for the accepted way of calculating LCx")
dtheo <- lapply(k, function(kit) { # conc
tmpConc <- concAO(as.data.frame(concRange[kit]), expTime = timeLCx, ...)
tmp <- odeGUTS::predict_ode(morseObject, data.frame(time = tmpConc[,1],
conc = tmpConc[,2],
replicate = rep("rep", nrow(tmpConc)))
)
tmp <- tmp$df_quantile[tmp$df_quantile[,"time"] == timeLCx,]
})
} else if(testType == "Acute_Contact") {
warning("Calculating LCx for 'Acute_Contact' reconstructed concentrations is
not in line with guidelines and might not make sense. Prefer to use
'Chronic_Oral' for the accepted way of calculating LCx")
dtheo <- lapply(k, function(kit) { # conc
tmpConc <- concAC(as.data.frame(concRange[kit]), expTime = timeLCx, ...)
tmp <- odeGUTS::predict_ode(morseObject, data.frame(time = tmpConc[,1],
conc = tmpConc[,2],
replicate = rep("rep", nrow(tmpConc)))
)
tmp <- tmp$df_quantile[tmp$df_quantile[,"time"] == timeLCx,]
})
} else {
stop("You need to specifiy a correct data 'test_type' amongst 'Acute_Oral', 'Acute_Contact', or 'Chronic_Oral'.")
}
dtheo <- do.call(rbind.data.frame, dtheo)
colnames(dtheo) <- c("time","concentration","replicate","q50","qinf95","qsup95")
# Calculate LCx
X_prop = ((100 - X)/100)
dfLCx <- pointsLCx(dtheo, X_prop)
out <- list(X_prop = X_prop,
timeLCx = timeLCx,
testType = testType,
modelType = object$modelType,
beeType = object$data$beeSpecies,
dfLCx = dfLCx,
dfDose = dtheo)
class(out) <- c("LCx", class(out))
return(out)
}
# points for LCx. From morse
#
pointsLCx <- function(df_dose, X_prop){
if(min(df_dose$q50) < X_prop & X_prop < max(df_dose$q50)){
LCX_q50 = approx( df_dose$q50, df_dose$concentration, xout = X_prop, ties = mean)$y
} else {
LCX_q50 = NA
warning(paste("No median for survival probability of", X_prop,
" in the range of concentrations under consideration: [",
min(df_dose$concentration), ";", max(df_dose$concentration), "]"))
}
if(min(df_dose$qinf95) < X_prop & X_prop < max(df_dose$qinf95)){
LCX_qinf95 = approx( df_dose$qinf95, df_dose$concentration, xout = X_prop, ties = mean)$y
} else{
LCX_qinf95 = NA
warning(paste("No 95%inf for survival probability of", X_prop ,
" in the range of concentrations under consideration: [",
min(df_dose$concentration), ";", max(df_dose$concentration), "]"))
}
if(min(df_dose$qsup95) < X_prop & X_prop < max(df_dose$qsup95)){
LCX_qsup95 = approx( df_dose$qsup95, df_dose$concentration, xout = X_prop, ties = mean)$y
} else{
LCX_qsup95 = NA
warning(paste("No 95%sup for survival probability of", X_prop,
" in the range of concentrations under consideration: [",
min(df_dose$concentration), ";", max(df_dose$concentration), "]"))
}
df_LCx <- data.frame(quantile = c("median", "quantile 2.5%", "quantile 97.5%"),
LCx = as.numeric(c(LCX_q50, LCX_qinf95, LCX_qsup95)))
# as.numeric is needed here because if all values are NA, LCx has type logical
return(df_LCx)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/R/LCx.R
|
#----------------------------- Internal functions -----------------------------
# Prepare data to be used for the beeGUTS Bayesian inference in stan
dataFitStan <- function(data,
modelType = NULL,
odeControl = NULL,
priorsList = NULL) {
# Check correct user inputs
if (is.null(modelType) || !(modelType %in% c("SD", "IT"))) {
stop("You need to specifiy a correct 'modelType' amongst 'SD' and 'IT'.
'PROPER' is not yet implemented. When selecting 'IT' please also
provide a 'distribution' parameter amongst 'loglogistic' and 'lognormal'.")
}
# Prepare priors
if (is.null(priorsList)) {
priors <- priorsBeeGUTS(data, modelType = modelType)$priorsList
} else {
# ADD CHECK FOR CORRECT PRIOR LIST !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
priors <- priorsList
}
lsOUT <- unlist(list(priors, odeControl), recursive = FALSE)
# Prepare data
## Number of groups.
## Different datasets are treated as additional groups
nDatasets <- data$nDatasets
lsOUT$nDatasets <- nDatasets
nGroups <- c()
for (i in 1:nDatasets) {nGroups<-append(nGroups,length(unique(data$survData_long[[i]]$Treatment)))}
lsOUT$nGroup <- sum(nGroups) # Number of groups
lsOUT$groupDataset <- rep(1:length(nGroups), nGroups) # corresponding dataset for each group
# join the datasets to treat everything as a single group
# Concentrations
dataConc <- dplyr::bind_rows(data$concModel_long) %>%
dplyr::filter(!is.na(Conc)) %>%
dplyr::arrange(Dataset, Treatment, SurvivalTime) %>%
dplyr::mutate(idAll = dplyr::row_number() )
dataConc_id = dataConc %>%
dplyr::group_by(Dataset, Treatment) %>%
dplyr::summarise(idC_lw = min(idAll),
idC_up = max(idAll))
lsOUT$nData_conc <- nrow(dataConc)
lsOUT$conc <- dataConc$Conc
lsOUT$tconc <- dataConc$SurvivalTime
lsOUT$replicate_conc <- dataConc$Treatment
lsOUT$idC_lw <- dataConc_id$idC_lw
lsOUT$idC_up <- dataConc_id$idC_up
# join the datasets to treat everything as a single group
# Survival
dataNsurv <- dplyr::bind_rows(data$survData_long) %>%
dplyr::filter(!is.na(NSurv)) %>%
dplyr::arrange(Dataset, Treatment, SurvivalTime) %>%
dplyr::mutate(idAll = dplyr::row_number() ) %>%
dplyr::group_by(Dataset, Treatment) %>%
dplyr::mutate(Nprec = ifelse( SurvivalTime == min(SurvivalTime), NSurv, dplyr::lag(NSurv) ),
Ninit = max(NSurv)) %>% # since it is grouped by replicate
dplyr::ungroup()
dataNsurv_id = dataNsurv %>%
dplyr::group_by(Dataset, Treatment) %>%
dplyr::summarise(idS_lw = min(idAll),
idS_up = max(idAll))
lsOUT$nData_Nsurv <- nrow(dataNsurv)
lsOUT$Nsurv <- dataNsurv$NSurv
lsOUT$Nprec <- dataNsurv$Nprec
lsOUT$Ninit <- dataNsurv$Ninit
lsOUT$tNsurv <- dataNsurv$SurvivalTime
lsOUT$replicate_Nsurv <- dataNsurv$Treatment
lsOUT$idS_lw <- dataNsurv_id$idS_lw
lsOUT$idS_up <- dataNsurv_id$idS_up
return(lsOUT)
}
# Prepare priors (Adapted from Virgile Baudrot gutsRstan)
priorsBeeGUTS <- function(x, modelType = NULL){
# Remove time = 0
dataSurv <- dplyr::bind_rows(x$survData_long)
dataSurv <- dplyr::filter(dataSurv, SurvivalTime != 0)
dataConc <- dplyr::bind_rows(x$concModel_long)
dataConc <- dplyr::filter(dataConc, SurvivalTime != 0)
#dataSurv <- dplyr::filter(x$survData_long, SurvivalTime != 0)
#dataConc <- dplyr::filter(x$concModel_long, SurvivalTime != 0)
# Parameter calculation of concentration min and max
concMin <- 1e-6 # here consider minimal concentration for prior to be close to 0. Original: min(data$conc[data$conc != 0], na.rm = TRUE) # to remove 0 and NA
concMax <- max(dataConc$Conc, na.rm = TRUE)
timeMin <- min(dataSurv$SurvivalTime)
timeMax <- max(dataSurv$SurvivalTime)
concUniq <- sort(unique(dataConc$Conc))
concUniq_Prec <- dplyr::lag(concUniq)
concMinDelta <- min(concUniq - concUniq_Prec, na.rm = TRUE)
# dominant rate constant: kd
kdMax <- -log(0.001) / timeMin
kdMin <- 1e-6 # Here consider the kd has more chances to go to slow kinetics -log(0.999) / (timeMax)
# background hazard rate
hbMax <- -log(0.5) / timeMin
hbMin <- -log(0.999) / timeMax
# killing rate parameter: bw
bwMax <- -log(0.001) / (timeMin * concMinDelta)
bwMin <- -log(0.999) / (timeMax * (concMax - concMin))
# beta
betaMin_log10 <- -2
betaMax_log10 <- 2
priorsMinMax <- list(
concMin = concMin,
concMax = concMax,
kdMin = kdMin,
kdMax = kdMax,
hbMin = hbMin,
hbMax = hbMax,
bwMin = bwMin,
bwMax = bwMax,
zwMin = concMin,
zwMax = concMax,
mwMin = concMin,
mwMax = concMax,
betaMin = betaMin_log10,
betaMax = betaMax_log10)
elMinMax_general <- c("kdMin", "kdMax", "hbMin", "hbMax")
elMinMax_SD <- c(elMinMax_general, c("bwMin", "bwMax", "zwMin", "zwMax"))
elMinMaxt_IT <- c(elMinMax_general, c("mwMin", "mwMax", "betaMin", "betaMax"))
# elMinMax_PROPER <- c(elMinMax_general, c("bwMin", "bwMax", "mwMin", "mwMax", "betaMin", "betaMax"))
priorsMinMax <- switch(modelType,
IT = priorsMinMax[elMinMaxt_IT],
SD = priorsMinMax[elMinMax_SD])#,
#PROPER = priorsMinMax[elMinMax_PROPER])
##
## Construction of the list of priors
##
priorsList <- list(
## dominant rate constant: kd
kdMean_log10 = .priorMean(kdMin, kdMax),
kdSD_log10 = .priorSD(kdMin, kdMax),
## background hazard rate
hbMean_log10 = .priorMean(hbMin, hbMax),
hbSD_log10 = .priorSD(hbMin, hbMax),
## killing rate parameter: bw
bwMean_log10 = .priorMean(bwMin, bwMax),
bwSD_log10 = .priorSD(bwMin, bwMax),
## non effect threshold: zw
zwMean_log10 = .priorMean(concMin, concMax),
zwSD_log10 = .priorSD(concMin, concMax),
## non effect threshold: scale parameter & median of a log-logistic distribution
mwMean_log10 = .priorMean(concMin, concMax),
mwSD_log10 = .priorSD(concMin, concMax),
## shape parameter of a log-logistic distribution
betaMin_log10 = betaMin_log10,
betaMax_log10 = betaMax_log10
)
elList_general <- c("kdMean_log10", "kdSD_log10", "hbMean_log10", "hbSD_log10")
elList_SD <- c(elList_general, c("bwMean_log10", "bwSD_log10", "zwMean_log10", "zwSD_log10"))
elList_IT <- c(elList_general, c("mwMean_log10", "mwSD_log10", "betaMin_log10", "betaMax_log10"))
# elList_PROPER <- c(elList_general, c("bwMean_log10", "bwSD_log10", "mwMean_log10", "mwSD_log10", "betaMin_log10", "betaMax_log10"))
priorsList <- switch(modelType,
IT = priorsList[elList_IT],
SD = priorsList[elList_SD])#,
# ROPER = priorsList[elList_PROPER])
return(list(priorsList = priorsList,
priorsMinMax = priorsMinMax))
}
# internal --------------------------------------------------------------------
# Compute priors Mean and SD for lognormal distribution
.priorMean <- function(xMin, xMax){
(log10(xMax) + log10(xMin)) / 2
}
.priorSD <- function(xMin, xMax){
(log10(xMax) - log10(xMin)) / 4
}
|
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/R/dataFitStan.R
|
#' Fit a GUTS model for bees survival analysis using Bayesian Inference (stan)
#'
#' @description The function \code{fitBeeGUTS} estimates the parameters of a GUTS model
#' for the stochastic death (SD) or individual tolerance (IT) death mechanisms for
#' survival analysis using Bayesian inference.
#'
#' @param data An object of class \code{beeSurvData}
#' @param modelType A model type between \code{"SD"} for Stochastic Death and
#' \code{"IT"} for Individual Tolerance.
#' @param distribution A distribution for the IT death mechanism. To be chosen between
#' \code{"loglogistic"} and \code{"lognormal"}. Default is \code{"loglogistic"}
#' @param priorsList A list containing the prior distribution for the parameter considered.
#' By default, when no priors are provided (default is \code{NULL}), priors are set automatically
#' based on the experimental design (adapted from Delignette-Muller et al 2017)
#' @param parallel Logical indicating whether parallel computing should be used or not. Default is \code{TRUE}
#' @param nCores A positive integer specifying the number of cores to use.
#' Default is one core less than maximum number of cores available
#' @param nChains A positive integer specifying the number of MCMC chains to run. Default is 3.
#' @param nIter A positive integer specifying the number of iteration to monitor for each MCMC chain. Default is 2000
#' @param nWarmup A positive integer specifying the number of warmup iteration per chain. Default is half the number of iteration
#' @param thin A positive integer specifying the interval between the iterations to monitor. Default is 1 (all iterations are monitored)
#' @param adaptDelta A double, bounded between 0 and 1 and controlling part of the sampling algorithms.
#' See the \code{control} in the function \code{stan} [rstan::stan()] of the package \code{rstan}. The default is 0.95.
#' @param odeIntegrator A string specifying the integrator used to solve the system of
#' differential equations (ODE) in the \code{stan} module. To be chosen between
#' \code{"rk45"} and \code{"bdf"}. Default is \code{"rk45"}.
#' @param relTol A double, bounded between 0 and 1 and controlling the relative tolerance
#' of the accuracy of the solutions generated by the integrator. A smaller tolerance produces
#' more accurate solution at the expanse of the computing time. Default is 1e-8
#' @param absTol A double, bounded between 0 and 1 and controlling the absolute tolerance
#' of the accuracy of the solutions generated by the integrator. A smaller tolerance produces
#' more accurate solution at the expanse of the computing time. Default is 1e-8
#' @param maxSteps A double controlling the maximum number of steps that can be
#' taken before stopping a runaway simulation. Default is 1000
#' @param ... Additional parameters to be passed to \code{sampling} from \code{stan}
#'
#' @details The automated prior determination is modified from Delignette-Muller et al.
#' by considering that the minimal concentration for the prior can be close to 0 (1e-6)
#' whereas the original paper considered the lowest non-zero concentration.
#' Similarly, the minimal kd considered for the prior calculation was reduced to allow
#' more chance to capture slow kinetics.
#'
#' @return The function \code{fitBeeGUTS} returns the parameter estimates
#' of the General Unified Threshold model of Survival (GUTS) in an object
#' of class \code{beeSurvFit}. This object is a list composed of the following:
#' \item{stanFit}{An object of S4 class \code{stanfit}. More information is available
#' in the package \code{rstan}. }
#' \item{data}{The data object provided as argument of the function}
#' \item{dataFit}{A list of data passed to the Stan model object}
#' \item{setupMCMC}{A list containing the setup used for the MCMC chains}
#' \item{modelType}{A character vector specifying the type of GUTS model used between
#' \code{SD} and \code{IT}}
#' \item{distribution}{A character vector specifying the type of distribution used in case \code{IT} was used;
#' \code{NA} otherwise}
#' \item{messages}{A character vector containing warning messages}
#'
#' @references
#' Delignette-Muller, M.L., Ruiz P. and Veber P. (2017).
#' Robust fit of toxicokinetic-toxicodynamic models using prior knowledge contained in the design of survival toxicity tests.
#' \doi{10.1021/acs.est.6b05326}
#'
#' @export
#'
#' @examples
#' \donttest{
#' data(betacyfluthrinChronic)
#' fit <- fitBeeGUTS(betacyfluthrinChronic, modelType = "SD", nIter = 1000, nCores = 2)
#' }
fitBeeGUTS <- function(data, # CHECK CORRECT DATA OBJECT IS USED !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
modelType = NULL,
distribution = "loglogistic",
priorsList = NULL,
parallel = TRUE,
nCores = parallel::detectCores()-1L,
nChains = 3,
nIter = 2000,
nWarmup = floor(nIter / 2),
thin = 1,
adaptDelta = 0.95,
odeIntegrator = "rk45",
relTol = 1e-8,
absTol = 1e-8,
maxSteps = 1000,
...) {
# Check correct user inputs
if (!is(data,"beeSurvData")) {
stop("fitBeeGUTS: a 'data' object of class 'beeSurvData' is expected")
}
if (is.null(modelType) || !(modelType %in% c("SD", "IT"))) {
stop("You need to specifiy a correct 'modelType' amongst 'SD' and 'IT'.
'PROPER' is not yet implemented. When selecting 'IT' please also
provide a 'distribution' parameter amongst 'loglogistic' and 'lognormal'.")
}
if (!(distribution %in% c("loglogistic", "lognormal"))) {
stop("You need to specifiy a correct 'distribution' amongst 'loglogistic' and 'lognormal'.")
}
if (!(odeIntegrator %in% c("rk45"))) {
stop("You need to specifiy a correct 'odeIntegrator' amongst 'rk45'.
'bdf' is not yet implemented")
}
# Regroup control for the ode solver
odeControl <- list(relTol = relTol, absTol = absTol, maxSteps = maxSteps)
# Prepare data for inference with stan
lsFullData <- dataFitStan(data, modelType, odeControl, priorsList)
lsStanData <- lsFullData
lsStanData$replicateConc <- NULL # NECESSARY?????????????????????????????????????????????????????????
lsStanData$replicateNsurv <- NULL # NECESSARY?????????????????????????????????????????????????????????
lsStanData$Ninit <- NULL # NECESSARY?????????????????????????????????????????????????????????
if (modelType == "SD") {
modelObject <- stanmodels$GUTS_SD
}
if (modelType == "IT") {
modelObject <- stanmodels$GUTS_IT
lsStanData$distribution <- switch(distribution, loglogistic = 1, lognormal = 2)
}
# Set options for parallel computing
if (parallel == TRUE) {
op <- options()
options(mc.cores = as.integer(nCores))
on.exit(options(op))
}
# Sample MCMC chains
fit <- rstan::sampling(
object = modelObject,
data = lsStanData,
chains = nChains,
iter = nIter,
warmup = nWarmup,
thin = thin,
control = list(adapt_delta = adaptDelta),
...)
# cleanup parallel computing options
if (parallel == TRUE) {
options(op)
}
# Infos on MCMC chains
setupMCMC <- data.frame(nIter = nIter,
nChains = nChains,
thinInterval = thin,
nWarmup = nWarmup)
## Warnings on fit quality
outRhat <- rstan::summary(fit)$summary[, "Rhat"]
if (!all(outRhat < 1.1, na.rm = TRUE)){
msg <- "
*** Markov chains did not converge! Do not analyze results! ***.
Plot MCMC chains and try the following options:
(1) if one or more chain are a simple stable line, increase 'adapt_delta' (default is 0.95).
(2) if the variabbility between chain is great, you can increase the number of iteration (default is 2000 iteration).
(3) if 'Conditional_Psurv_hat' is greater than 1, the ODE integration is wrong. So you can reduce the tolerance of the ODE integrator."
warning(msg, call. = FALSE)
print(outRhat)
} else {
msg <- "NA"
}
# Return
lsOut <- list(stanFit = fit,
data = data,
dataFit = lsFullData,
setupMCMC = setupMCMC,
modelType = modelType,
distribution = ifelse(modelType == "IT", distribution, "NA"),
messages = msg)
class(lsOut) <- "beeSurvFit"
return(lsOut)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/R/fitBeeGUTS.R
|
# nocov start
.onAttach <- function(...) {
BeeGUTS_lib <- dirname(system.file(package = "BeeGUTS"))
pkgdesc <- suppressWarnings(utils::packageDescription("BeeGUTS", lib.loc = BeeGUTS_lib))
if (length(pkgdesc) > 1) {
builddate <- gsub(';.*$', '', pkgdesc$Packaged)
packageStartupMessage(paste("BeeGUTS (Version ", pkgdesc$Version, ", packaged on the: ", builddate, ")", sep = ""))
}
packageStartupMessage("- For execution on a local, multicore CPU with excess RAM we recommend calling")
packageStartupMessage(" options(mc.cores = parallel::detectCores()-1)")
packageStartupMessage("- In addition to the functions provided by 'BeeGUTS', we recommend using the packages:")
packageStartupMessage(" - 'bayesplot' for posterior analysis, model checking, and MCMC diagnostics.")
packageStartupMessage(" - 'loo' for leave-one-out cross-validation (LOO) using Pareto smoothed")
packageStartupMessage(" importance sampling (PSIS), comparison of predictive errors between models, and")
packageStartupMessage(" widely applicable information criterion (WAIC).")
}
.onUnload <- function (libpath) { library.dynam.unload("BeeGUTS", libpath)}
# nocov end
|
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/R/onAttach.R
|
#' Plotting method for \code{beeSurvData} objects
#'
#' @description This is the generic \code{plot} S3 method for the \code{beeSurvData}
#' class. It plots the number of survivors as a function of time as well as the reconstructed
#' concentrations for \code{"Acute_Oral"} and \code{"Acute_Contact"} test types.
#'
#' @param x An object of class \code{beeSurvData}
#' @param xlab A character string for the label of the x-axis
#' @param ylab1 A character string for the label of the y-axis of the survivor plots
#' @param ylab2 A character string for the label of the y-axis of the concentration plots
#' @param main A character string for the title label plot
#' @param ... Additional parameters to generic plot function (not used)
#'
#' @return A graphic with the input data
#'
#' @import ggplot2
#'
#' @export
#'
#' @examples
#' data(betacyfluthrinChronic)
#' plot(betacyfluthrinChronic)
plot.beeSurvData <- function(x,
...,
xlab = "Time [d]",
ylab1 = "Number of survivors",
ylab2 = "Concentration",
main = paste("Data from a", x$typeData, "test on",
x$beeSpecies)) {
# Check for correct class
if (!is(x,"beeSurvData")) {
stop("plot.beeSurvData: an object of class 'beeSurvData' is expected")
}
plotlist <- list() # list of plots to be returned
for (i in 1:x$nDatasets){
main = paste("Data from a", x$typeData[i], "test on",
x$beeSpecies)
# Extract data
dfDataSurv_long <- as.data.frame(x$survData_long[[i]])
dfDataConc_long <- as.data.frame(x$concData_long[[i]])
dfModelConc_long <- as.data.frame(x$concModel_long[[i]])
ggSurv <- ggplot(data = dfDataSurv_long, aes(x=SurvivalTime, y = NSurv)) +
geom_point() +
xlab(xlab) +
ylab(ylab1) +
facet_grid(~Treatment) +
theme(
strip.background = element_blank(),
strip.text.x = element_blank()
)
ggConc <- ggplot(data = dfModelConc_long, aes(x=SurvivalTime, y = Conc)) +
geom_line() +
geom_point(data = dfDataConc_long) +
xlab(xlab) +
ylab(paste0(ylab2,"\n", x$unitData[[i]])) +
ggtitle(main) +
facet_grid(~Treatment) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank())
ggOut <- cowplot::plot_grid(ggConc, ggSurv, align = "v", nrow = 2)
plotlist <- append(plotlist, list(ggOut))
}
return(plotlist)
}
#' Plotting method for \code{beeSurvFit} objects
#'
#' @description This is the generic \code{plot} S3 method for the \code{beeSurvFit}
#' class. It plots the number of survivors as a function of time as well as the reconstructed
#' concentrations for \code{"Acute_Oral"} and \code{"Acute_Contact"} test types.
#'
#' @param x An object of class \code{beeSurvFit}
#' @param xlab A character string for the label of the x-axis
#' @param ylab1 A character string for the label of the y-axis of the survivor plots
#' @param ylab2 A character string for the label of the y-axis of the concentration plots
#' @param main A character string for the title label plot
#' @param ... Additional parameters to generic plot functions (not used)
#'
#' @return A graphic with the results of the fit
#'
#' @import ggplot2
#' @importFrom stats quantile
#'
#' @export
#'
#' @examples
#' data(fitBetacyfluthrin_Chronic)
#' plot(fitBetacyfluthrin_Chronic)
plot.beeSurvFit <- function(x,
...,
xlab = "Time [d]",
ylab1 = "Number of survivors",
ylab2 = "Concentration",
main = paste("Calibration results for a", x$data$typeData, "test on",
x$data$beeSpecies) ) {
# Check for correct class
if (!is(x,"beeSurvFit")) {
stop("plot.beeSurvFit: an object of class 'beeSurvFit' is expected")
}
dfDataSurv_long_full <- dplyr::bind_rows(x$data$survData_long)
# extract the indices of the various datasets from the global table
dfDataSurv_index = dfDataSurv_long_full %>%
dplyr::mutate(idAll = dplyr::row_number() ) %>%
dplyr::group_by(Dataset) %>%
dplyr::summarise(idS_lw = min(idAll),
idS_up = max(idAll))
#dfModelSurv <- as.data.frame(x$survModel)
lsNsurv_sim <- rstan::extract(x$stanFit, pars = 'Nsurv_sim')
plotlist <- list() # lists to add the plots for each dataset
for (i in 1:x$data$nDatasets) {
# Extract data
dfDataSurv_long <- as.data.frame(x$data$survData_long[[i]])
dfDataConc_long <- as.data.frame(x$data$concData_long[[i]])
dfModelConc_long <- as.data.frame(x$data$concModel_long[[i]])
#Extract the right simulated values for each dataset
lsNsurv_sim_dataset <- lsNsurv_sim[[1]][,dfDataSurv_index$idS_lw[i]:dfDataSurv_index$idS_up[i]]
# Compute quantiles of simulations and compile all in a dataframe
dfDataSurv_long$simQ50 <- apply(lsNsurv_sim_dataset, 2, quantile, 0.5)
dfDataSurv_long$simQinf95 <- apply(lsNsurv_sim_dataset, 2, quantile, 0.025)
dfDataSurv_long$simQsup95 <- apply(lsNsurv_sim_dataset, 2, quantile, 0.975)
yLimits <- c(0, max(dfDataSurv_long$NSurv, dfDataSurv_long$simQsup95))
ggSurv <- ggplot(data = dfDataSurv_long, aes(x=SurvivalTime, y = NSurv)) +
geom_point() +
# geom_pointrange( aes(x = SurvivalTime, y = q50, ymin = qinf95, ymax = qsup95, group = Treatment), color = "blue", size = 0.2) +
geom_line( aes(x = SurvivalTime, y = simQ50, group = Treatment), color = "blue") +
geom_ribbon( aes(x= SurvivalTime, ymin = simQinf95, ymax = simQsup95, group = Treatment), fill = "blue", alpha = 0.2)+
scale_y_continuous(limits = yLimits) +
xlab(xlab) +
ylab(ylab1) +
facet_grid(~Treatment) +
theme(
strip.background = element_blank(),
strip.text.x = element_blank()
)
ggConc <- ggplot(data = dfModelConc_long, aes(x=SurvivalTime, y = Conc)) +
geom_line() +
geom_point(data = dfDataConc_long) +
xlab(xlab) +
ylab(paste0(ylab2,"\n", x$data$unitData[[i]])) +
ggtitle(main[i]) +
facet_grid(~Treatment) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank())
ggOut <- cowplot::plot_grid(ggConc, ggSurv, align = "v", nrow = 2)
plotlist <- append(plotlist, list(ggOut))
}
return(plotlist)
}
#' Plotting method for \code{beeSurvValidation} objects
#'
#' @description This is the generic \code{plot} S3 method for the \code{beeSurvValid}
#' class. It plots the number of survivors as a function of time as well as the reconstructed
#' concentrations for \code{"Acute_Oral"} and \code{"Acute_Contact"} test types.
#'
#' @param x An object of class \code{beeSurvValid}
#' @param xlab A character string for the label of the x-axis
#' @param ylab1 A character string for the label of the y-axis of the survivor plots
#' @param ylab2 A character string for the label of the y-axis of the concentration plots
#' @param main A character string for the title label plot
#' @param ... Additional parameters to generic plot functions (not used)
#'
#' @return A graphic with the results of the validation
#'
#' @import ggplot2
#' @importFrom stats quantile
#'
#' @export
#'
#' @examples
#' \donttest{
#' data(betacyfluthrinChronic) # Load dataset for validation
#' data(fitBetacyfluthrin_Chronic)
#' validation <- validate(fitBetacyfluthrin_Chronic, betacyfluthrinChronic)
#' plot(validation)
#' }
plot.beeSurvValidation <- function(x,
...,
xlab = "Time [d]",
ylab1 = "Number of survivors",
ylab2 = "Concentration",
main = paste("Validation of a BeeGUTS model calibrated for",
x$beeSpecies, "on a ", x$typeData, "for", x$beeSpeciesVal) ) {
# Check for correct class
if (!is(x,"beeSurvValidation")) {
stop("plot.beeSurvValidation: an object of class 'beeSurvValidation' is expected")
}
yLimits <- c(0, max(x$sim$NSurv, x$sim$Nsurv_qsup95_check))
EFSA_criteria <- x$EFSA$Percent_PPC
EFSA_criteria$PPC <- round(EFSA_criteria$PPC, digits = 2)
EFSA_criteria$PPC_global <- ""
EFSA_criteria$PPC_global[1] <- round(x$EFSA$Percent_PPC_global, digits = 2)
EFSA_criteria$NRMSE <- round(x$EFSA$Percent_NRMSE$NRMSE, digits = 2)
EFSA_criteria$NRMSE_global <- ""
EFSA_criteria$NRMSE_global[1] <- round(x$EFSA$Percent_NRMSE_global, digits = 2)
EFSA_criteria$SPPE <- round(x$EFSA$Percent_SPPE$SPPE, digits = 2)
###############################################
colnames(x$sim)[3] <- "Treatment" # Rename column name for plotting purposes
ggSurv <- ggplot(data = x$sim, aes(x = time, y = Nsurv_q50_valid, group = Treatment)) +
geom_line(color = "blue") +
geom_point(data = x$sim, aes(x=time, y=Nsurv, group = Treatment)) +
geom_ribbon( aes(x= time, ymin = Nsurv_qinf95_valid, ymax = Nsurv_qsup95_valid, group = Treatment), fill = "blue", alpha = 0.2)+
scale_y_continuous(limits = yLimits) +
xlab(xlab) +
ylab(ylab1) +
facet_grid(~Treatment) +
theme(
strip.background = element_blank(),
strip.text.x = element_blank()
)
ggConc <- ggplot(data = x$dataModel, aes(x=SurvivalTime, y = Conc)) +
geom_line() +
geom_point(data = x$data) +
xlab(xlab) +
ylab(paste0(ylab2,"\n", x$data$unitData)) +
ggtitle(main) +
facet_grid(~Treatment) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank())
table <- gridExtra::tableGrob(EFSA_criteria, rows=NULL)
ggOut1 <- cowplot::plot_grid(ggConc, ggSurv, align = "v",nrow = 2)
ggOut <- cowplot::plot_grid(ggOut1, table, nrow = 2)
#############################################################################
return(ggOut)
}
#' Plotting method for \code{beeSurvPred} objects
#'
#' @description This is the generic \code{plot} S3 method for the \code{beeSurvPred}
#' class. It plots the predicted number of survivors for the exposure concentration entered by the user.
#'
#' @param x An object of class \code{beeSurvPred}
#' @param xlab A character string for the label of the x-axis
#' @param ylab1 A character string for the label of the y-axis of the survivor plots
#' @param ylab2 A character string for the label of the y-axis of the concentration plots
#' @param main A character string for the title label plot
#' @param ... Additional parameters to generic plot functions (not used)
#'
#' @return A graphic with results of the forward prediction
#'
#' @import ggplot2
#' @importFrom stats quantile
#'
#' @export
#'
#' @examples
#' \donttest{
#' dataPredict <- data.frame(time = c(1:10, 1:10, 1:10),
#' conc = c(rep(5, 10), rep(10, 10), rep(15, 10)),
#' replicate = c(rep("rep1", 10), rep("rep2", 10), rep("rep3", 10)),
#' NSurv = c(rep(5, 10), rep(10, 10), rep(15, 10)))
#' data(fitBetacyfluthrin_Chronic)
#' prediction <- predict(fitBetacyfluthrin_Chronic, dataPredict)
#' plot(prediction)
#' }
plot.beeSurvPred <- function(x,
...,
xlab = "Time [d]",
ylab1 = "Survival probability",
ylab2 = "Concentration",
main = paste("Predictions results for a BeeGUTS", x$modelType,"calibrated for",
x$beeSpecies) ) {
# Check for correct class
if (!is(x,"beeSurvPred")) {
stop("plot.beeSurvPred: an object of class 'beeSurvPred' is expected")
}
ggSurv <- ggplot(data = x$sim, aes(x = time, y = q50, group = replicate)) +
geom_line(color = "blue") +
geom_ribbon( aes(x= time, ymin = qinf95, ymax = qsup95, group = replicate), fill = "blue", alpha = 0.2)+
scale_y_continuous(limits = c(0,1)) +
xlab(xlab) +
ylab(ylab1) +
facet_grid(~replicate) +
theme(
strip.background = element_blank(),
strip.text.x = element_blank()
)
ggConc <- ggplot(data = x$sim, aes(x = time, y = conc)) +
geom_line() +
xlab(xlab) +
ylab(paste0(ylab2,"\n", x$unitData)) +
ggtitle(main) +
facet_grid(~replicate) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank())
ggOut <- cowplot::plot_grid(ggConc, ggSurv, align = "v", nrow = 2)
return(ggOut)
}
#' Plotting method for traces and densities for \code{beeSurvFit} objects
#'
#' @description This is the generic \code{traceplot} S3 method for the \code{beeSurvFit}
#' class. It plots the traces with as well as the densities for the parameters of
#' the GUTS IT or GUTS SD. The traceplot includes by default the warmup iterations,
#' the density plot does not include them
#'
#' @param object An object of class \code{beeSurvFit} to be plotted
#' @param ... Additional parameters to be parsed to generic \code{rstan} plot functions
#' @param incWarmup_trace A logical indicating whether the warmup iterations should be plotted
#' in the traceplot (default TRUE)
#' @param incWarmup_dens A logical indicating whether the warmup iterations should be plotted
#' in the density plot (default FALSE)
#'
#' @return A graphic with the traceplots and densities of the fit
#' @export
#'
#' @examples
#' data(fitBetacyfluthrin_Chronic)
#' traceplot(fitBetacyfluthrin_Chronic)
traceplot <- function(object, ..., incWarmup_trace = TRUE, incWarmup_dens = FALSE){
UseMethod("traceplot")
}
#' @rdname traceplot
#' @export
traceplot.beeSurvFit <- function(object, ..., incWarmup_trace = TRUE, incWarmup_dens = FALSE) {
if (object$modelType == "SD") {
ggTrace <- rstan::stan_trace(object$stanFit,
pars = c("hb_log10", "kd_log10", "zw_log10", "bw_log10"),
inc_warmup = incWarmup_trace,
nrow = 4,
...) +
ggplot2::ggtitle("Traces")
ggplot2::theme(legend.position = "none")
ggDens <- rstan::stan_dens(object$stanFit,
pars = c("hb_log10", "kd_log10", "zw_log10", "bw_log10"),
inc_warmup = incWarmup_dens,
nrow = 4,
separate_chains = TRUE,
...) +
ggplot2::ggtitle("Densities")
ggOut <- cowplot::plot_grid(ggTrace, ggDens, ncol = 2)
}
if (object$modelType == "IT") {
ggTrace <- rstan::stan_trace(object$stanFit,
pars = c("hb_log10", "kd_log10", "mw_log10", "beta_log10"),
inc_warmup = incWarmup_trace,
nrow = 4,
...) +
ggplot2::ggtitle("Traces")
ggplot2::theme(legend.position = "none")
ggDens <- rstan::stan_dens(object$stanFit,
pars = c("hb_log10", "kd_log10", "mw_log10", "beta_log10"),
inc_warmup = incWarmup_dens,
nrow = 4,
separate_chains = TRUE,
...) +
ggplot2::ggtitle("Densities")
ggOut <- cowplot::plot_grid(ggTrace, ggDens, ncol = 2)
}
return(ggOut)
}
#' Plotting method for \code{ppc} objects
#'
#' @param x An object of class \code{ppc}.
#' @param \dots Further arguments to be passed to generic methods.
#'
#' @return an object of class \code{ggplot}.
#'
#' @examples
#' data(fitBetacyfluthrin_Chronic)
#' out <- ppc(fitBetacyfluthrin_Chronic)
#' plot(out)
#'
#' @export
plot.ppc <- function(x, ...) {
Nsurv_ppc <- x
ppc_pct<- round(nrow(Nsurv_ppc[Nsurv_ppc$col=="green",])/nrow(Nsurv_ppc)*100, digits = 2)
nrmse<- round(sqrt(sum((Nsurv_ppc$value-Nsurv_ppc$median)^2, na.rm = TRUE)/nrow(Nsurv_ppc))/mean(Nsurv_ppc$value,na.rm = TRUE)*100, digits=2)
ggOut <-ggplot() +
geom_segment(aes(x = value, xend = value,
y =q_0.025 , yend =q_0.975 ), data = Nsurv_ppc,
color = Nsurv_ppc$col)+
geom_point(aes(x = value, y = median), Nsurv_ppc)+
geom_abline(intercept = 0, slope = 1, size=0.7)+
expand_limits(y = 0) +
expand_limits(x = 0) +
theme_minimal()+
coord_fixed(ratio=1)+
labs(x = "Observed number of survivors",
y= "Predicted number of survivors") +
theme(axis.title = element_text(size=7))+
ggtitle(paste0("Survival \nPPC= ",ppc_pct,"%", "\nNRMSE= ", nrmse,"%"))
return(ggOut)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/R/plotBeeGUTS.R
|
#' Generates an object to be used in posterior predictive check for \code{beeSurvFit}, \code{beeSurvPred}
#'
#' @param x an object used to select a method \code{ppc}
#'
#' @return a \code{data.frame} of class \code{ppc}
#' @export
#'
ppc <- function(x){
UseMethod("ppc")
}
#' Posterior predictive check method for \code{beeSurvFit} objects
#'
#' @param x an object of class \code{beeSurvFit}
#'
#'
#' @return a \code{data.frame} of class \code{ppc}
#'
#' @examples
#' data(fitBetacyfluthrin_Chronic)
#' out <- ppc(fitBetacyfluthrin_Chronic)
#'
#' @export
#'
ppc.beeSurvFit <- function(x){
NsurvPred_all<- as.data.frame(x$stanFit, pars = "Nsurv_ppc")
NsurvPred_quantiles<- NsurvPred_all%>%
tidyr::pivot_longer(cols = tidyr::starts_with('Nsurv'),
names_to = "ppc",
values_to = "value")%>%
dplyr::group_by(ppc)%>%
dplyr::summarise(median = stats::quantile(value, 0.5, na.rm = TRUE),
q_0.025=stats::quantile(value, 0.025, na.rm = TRUE),
q_0.975=stats::quantile(value, 0.975, na.rm = TRUE))
NsurvData_all<- data.frame(value=x$dataFit$Nsurv, id=seq(1,x$dataFit$nData_Nsurv, 1))%>%
dplyr::mutate(ppc=paste0("Nsurv_ppc[",id, "]"))
Nsurv_ppc<- dplyr::full_join( NsurvPred_quantiles, NsurvData_all, by="ppc")%>%
dplyr::mutate(col=ifelse(value<q_0.025|value>q_0.975, "red", "green")) %>%
dplyr::arrange(id)
Nsurv_ppc$data<-"Survival"
class(Nsurv_ppc) <- c("ppc", class(Nsurv_ppc))
return(Nsurv_ppc)
}
|
/scratch/gouwar.j/cran-all/cranData/BeeGUTS/R/ppc.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.