content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#'@title Filters the edges
#'@name filtering
#'
#'@description A function that filters the edges after the maximum entropy is
#' obtained
#'
#'@param edgestoselect The selected edges
#'@param edgestofilter The edges used to filter
#'
#'@return Returns the filtered edges
#'@author Murilo Montanini Breve
#'@import igraph
filtering <- function(edgestoselect, edgestofilter) {
filtered <- NULL
for (t in seq_along(edgestofilter)) {
net <- graph(edges = edgestofilter[[t]], directed = FALSE)
matrix <- as_adjacency_matrix(net)
data <- as.matrix(matrix)
filtered[[t]] <- matrixmultiplication(data, edgestoselect)
}
return(filtered)
}
| /scratch/gouwar.j/cran-all/cranData/BASiNETEntropy/R/filtering.R |
#'@title Compares the matrices
#'@name matrixmultiplication
#'
#'@description A function that compares the matrices 'trainingResult' and
#'the adjacency matrix to produce a filtered adjacency matrix.
#'
#'@param data Adjacency matrix
#'@param histodata 'trainingResult' data
#'
#'@return Returns the filtered adjacency matrix
#'@author Murilo Montanini Breve
matrixmultiplication <- function(data, histodata) {
ordereddata <- data[order(rownames(data)),
order(colnames(data))]
orderedhistogram <- histodata[order(rownames(histodata)),
order(colnames(histodata))]
datanames <- rownames(ordereddata)
histonames <- rownames(orderedhistogram)
selectednames <- intersect(histonames, datanames)
ordereddata <- ordereddata[selectednames, selectednames]
selectedmatrix <- matrix(nrow = length(ordereddata[, 1]),
ncol = length(ordereddata[, 1]))
orderedhistogram <- orderedhistogram[selectednames, selectednames]
for (i in seq_along(ordereddata[, 1])) {
for (j in seq_along(ordereddata[, 1])) {
selectedmatrix[i, j] <- ordereddata[i, j] * orderedhistogram[i, j]
}
}
rownames(selectedmatrix) <- rownames(ordereddata)
colnames(selectedmatrix) <- rownames(ordereddata)
return(selectedmatrix)
}
| /scratch/gouwar.j/cran-all/cranData/BASiNETEntropy/R/matrixmultiplication.R |
#'@title Calculates the maximum entropy
#'@name maxentropy
#'
#'@description A function that calculates the maximum entropy
#'
#'@param histogram The histogram (used in 'training' function)
#'
#'@return Returns the maximum entropy
#'@author Murilo Montanini Breve
maxentropy <- function(histogram) {
totalofpixels <- sum(histogram)
maximum_entropy <- 0
threshold <- NULL
edgesname <- names(histogram)
histogram <- unname(histogram)
descendinghistogram <- sort(histogram, decreasing = TRUE)
curveofentropy <- NULL
for (t in seq_len(4095)) {
P0 <- 0
P1 <- 0
for (i in seq_len(t)) {
P0 <- P0 + descendinghistogram[i] / totalofpixels
}
for (i in (t + 1):4096) {
P1 <- P1 + descendinghistogram[i] / totalofpixels
}
H0 <- 0
H1 <- 0
HT <- 0
H0 <- H0 + entropy(P0)
H1 <- H1 + entropy(P1)
if (is.nan(H1) == TRUE)
H1 <- 0
if (is.nan(H0) == TRUE)
H0 <- 0
HT <- H0 + H1
curveofentropy <- c(curveofentropy, HT)
if (HT > maximum_entropy) {
maximum_entropy <- HT
threshold <- t
}
frequency <- descendinghistogram[threshold]
}
list <- list(maximum_entropy, threshold, frequency, curveofentropy)
names(list) <-c("Max Entropy",
"Threshold",
"Edge frequency",
"Curve of Entropy")
return(list)
}
| /scratch/gouwar.j/cran-all/cranData/BASiNETEntropy/R/maxentropy.R |
#'@title Rescales the results between values from 0 to 1
#'@name preprocessing
#'
#'@description Given the results the data is rescaled for values between
#'0 and 1, so that the length of the sequences does not influence the results.
#'The rescaling of the sequences are made separately
#'
#'@param datah Array with results numerics
#'@param tamM Integer number of mRNA sequences
#'@param tamLNC Integer number of lncRNA sequences
#'@param tamSNC Integer number of sncRNA sequences
#'
#'@return Returns the array with the rescaled values
#'@author Murilo Montanini Breve
preprocessing <- function(datah, tamM, tamLNC, tamSNC) {
range1 <- c()
range2 <- c()
range3 <- c()
mini1 <- c()
maxi1 <- c()
mini2 <- c()
mini3 <- c()
maxi2 <- c()
maxi3 <- c()
if (missing(tamSNC)) {
for (u in seq_len(10)) {
range1 <- c(range1, range(datah[1:tamM, u]))
}
for (u in seq_len(10)) {
range2 <- c(range2, range(datah[(tamM + 1):(tamM + tamLNC), u]))
}
for (j in seq_len(20)) {
if (j %% 2 == 0) {
maxi1 <- c(maxi1, range1[j])
}
if (j %% 2 != 0) {
mini1 <- c(mini1, range1[j])
}
}
for (j in seq_len(20)) {
if (j %% 2 == 0) {
maxi2 <- c(maxi2, range2[j])
}
if (j %% 2 != 0) {
mini2 <- c(mini2, range2[j])
}
}
for (r in seq_len(10)) {
for (i in 1:(tamM + tamLNC)) {
if (i <= tamM)
datah[i, r] = (datah[i, r] - mini1[r]) / (maxi1[r] - mini1[r])
if (i > tamM && i <= tamM + tamLNC)
datah[i, r] = (datah[i, r] - mini2[r]) / (maxi2[r] - mini2[r])
}
}
} else{
for (u in seq_len(10)) {
range1 <- c(range1, range(datah[1:tamM, u]))
}
for (u in seq_len(10)) {
range2 <- c(range2, range(datah[(tamM + 1):(tamM + tamLNC), u]))
}
for (u in seq_len(10)) {
range3 <-
c(range3, range(datah[(tamM + tamLNC + 1):(tamM + tamLNC + tamSNC), u]))
}
for (j in seq_len(20)) {
if (j %% 2 == 0) {
maxi1 <- c(maxi1, range1[j])
}
if (j %% 2 != 0) {
mini1 <- c(mini1, range1[j])
}
}
for (j in seq_len(20)) {
if (j %% 2 == 0) {
maxi2 <- c(maxi2, range2[j])
}
if (j %% 2 != 0) {
mini2 <- c(mini2, range2[j])
}
}
for (j in seq_len(20)) {
if (j %% 2 == 0) {
maxi3 <- c(maxi3, range3[j])
}
if (j %% 2 != 0) {
mini3 <- c(mini3, range3[j])
}
}
for (r in seq_len(10)) {
for (i in 1:(tamM + tamLNC + tamSNC)) {
if (i <= tamM)
datah[i, r] = (datah[i, r] - mini1[r]) / (maxi1[r] - mini1[r])
if (i > tamM && i <= tamM + tamLNC)
datah[i, r] = (datah[i, r] - mini2[r]) / (maxi2[r] - mini2[r])
if (i > tamM + tamLNC)
datah[i, r] = (datah[i, r] - mini3[r]) / (maxi3[r] - mini3[r])
}
}
}
return(datah)
}
| /scratch/gouwar.j/cran-all/cranData/BASiNETEntropy/R/preprocessing.R |
#'@title Selects the edges of the adjacency matrix
#'@name selectingEdges
#'
#'@description A function that selects the edges of the adjacency matrix
#'
#'@param MAX The maximum entropy
#'@param data The adjacency matrix
#'
#'@return Returns the selected edges of the adjacency matrix
#'@author Murilo Montanini Breve
selectingEdges <- function(MAX, data) {
for (u in seq_along(data)) {
if (data[u] > MAX[[3]]) {
data[u] <- 1
} else
data[u] <- 0
}
return(data)
}
| /scratch/gouwar.j/cran-all/cranData/BASiNETEntropy/R/selectingEdges.R |
#'@title Trains the algorithm to select the edges that maximize the entropy
#'@name training
#'
#'@description A function that trains the algorithm to select the edges that
#'maximize the entropy
#'
#'@param mRNA Directory where the file .FASTA lies with the mRNA sequences
#'@param lncRNA Directory where the file .FASTA lies with the lncRNA sequences
#'@param sncRNA Directory where the file .FASTA lies with the sncRNA sequences
#' (optional)
#'
#'@return Returns the edge lists and the 'curveofentropy' function inputs
#'@author Murilo Montanini Breve
#'
#'@importFrom Biostrings readBStringSet
#'@import igraph
#'@import randomForest
#'@importFrom graphics abline axis legend text
#'@importFrom stats sd
#'@importFrom utils write.csv2
#'@export
training <- function(mRNA, lncRNA, sncRNA = NULL) {
MRNA <- readBStringSet(mRNA)
LNCRNA <- readBStringSet(lncRNA)
if (length(sncRNA)) {
SNCRNA <- readBStringSet(sncRNA)
}
vectorm <- NULL
vectorlnc <- NULL
vectorsnc <- NULL
edgeslistmrna <- NULL
edgeslistlncrna <- NULL
edgeslistsncrna <- NULL
mRNAmatrixEdges <- NULL
lncRNAmatrixEdges <- NULL
sncRNAmatrixEdges <- NULL
MAXsnc <- NULL
for (t in seq_len(3)) {
if (t == 1) {
message("[INFO] Analyzing mRNA:")
seq <- c(MRNA)
}
if (t == 2) {
message("[INFO] Analyzing lncRNA:")
seq <- c(LNCRNA)
}
if (t == 3) {
if (length(sncRNA)) {
message("[INFO] Analyzing sncRNA:")
seq <- c(SNCRNA)
} else
break
}
for (u in seq_along(seq)) {
sequence <- strsplit(toString(seq[u]), split = '')
sequence <- sequence[[1]]
aux <- ""
index <- 1
position <- 0
cont <- length(sequence)
comma <- 0
x <- 0
k <- 1
vector <- c()
if (t == 1)
edgeslistmrna[[u]] <- createedges(sequence)
if (t == 2)
edgeslistlncrna[[u]] <- createedges(sequence)
if (t == 3)
edgeslistsncrna[[u]] <- createedges(sequence)
message(u)
}
}
for (f in seq_along(edgeslistmrna)) {
vectorm <- c(vectorm, edgeslistmrna[[f]])
}
for (f in seq_along(edgeslistlncrna)) {
vectorlnc <- c(vectorlnc, edgeslistlncrna[[f]])
}
if (length(sncRNA)) {
for (f in seq_along(edgeslistsncrna)) {
vectorsnc <- c(vectorsnc, edgeslistsncrna[[f]])
}
}
netm <- graph(edges = vectorm, directed = FALSE)
netl <- graph(edges = vectorlnc, directed = FALSE)
matrizm <- as_adjacency_matrix(netm)
datam <- as.matrix(matrizm)
matrizl <- as_adjacency_matrix(netl)
datal <- as.matrix(matrizl)
datam <- datam[order(rownames(datam)), order(colnames(datam))]
datal <- datal[order(rownames(datal)), order(colnames(datal))]
message("[INFO] Analyzing entropy")
MAXm <- maxentropy(datam)
MAXlnc <- maxentropy(datal)
if (length(sncRNA)) {
nets <- graph(edges = vectorsnc, directed = FALSE)
matrizs <- as_adjacency_matrix(nets)
datas <- as.matrix(matrizs)
MAXsnc <- maxentropy(datas)
sncRNAmatrixEdges <- selectingEdges(MAXsnc, datas)
}
message("[INFO] Selecting the edges by the maximum entropy method")
mRNAmatrixEdges <- selectingEdges(MAXm, datam)
lncRNAmatrixEdges <- selectingEdges(MAXlnc, datal)
listMatrix <- list(mRNAmatrixEdges,
lncRNAmatrixEdges,
sncRNAmatrixEdges,
MAXm,
MAXlnc,
MAXsnc)
return(listMatrix)
}
| /scratch/gouwar.j/cran-all/cranData/BASiNETEntropy/R/training.R |
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- out.width = "400px"-----------------------------------------------------
knitr::include_graphics("mrna.jpg")
| /scratch/gouwar.j/cran-all/cranData/BASiNETEntropy/inst/doc/BASiNETEntropy.R |
---
title: "BASiNETEntropy"
author: "BREVE, M., PIMENTA-ZANON, M. and LOPES, F."
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Classification of RNA sequences}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Introduction
<div style="text-align: justify">
The BASiNET package aims to classify messenger RNA and long non-coding RNA, optionally also a third class such as small non-coding RNA may be included. The classification is made from measurements drawn from complex networks, for each RNA sequence a complex network is created. The networks are formed of vertices and edges, the vertices will be formed by words that can have their size defined by the parameter 'word'. Instead of using the threshold approach, this version of BASiNET uses the maximum entropy approach to remove the thresholds. The training step is necessary to obtain the entropy curves and then the edge list to be cut. Finally, all measurements taken from the networks are used for classification using the algorithm Random Forest. There are two data present in the 'BASiNETEntropy' package, "mRNA.fasta" and "ncRNA" with 10 sequences both. These sequences were taken from the data set used in the article (LI, Aimin; ZHANG, Junying; ZHOU, Zhongyin, Plek: a tool for predicting long non-coding messages and based on an improved k-mer scheme BMC bioinformatics, BioMed Central, 2014). These sequences are used to run examples.
</div>
## Instalation
<div style="text-align: justify">
To install BASiNETEntropy correctly it is necessary to install dependencies: igraph, randomForest, and Biostrings. The Biostrings package is in the BioConductor repository, the other packages are available in CRAN. The following commands must be executed in the R for the deployments to be installed.
```
install.packages("igraph")
install.packages("randomForest")
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("Biostrings")
```
</div>
## Classification
<div style="text-align: justify">
The function "classify" applies an RNA classification methodology, at the end of the execution of the function is exposed the result for classification algorithm Random Forest.
</div>
Parameters:
<div style="text-align: justify">
**mRNA** - Directory of an FASTA file containing mRNA sequences.
**lncRNA** - Directory of an FASTA file containing lncRNA sequences.
**sncRNA** - Directory of an FASTA file containing lncRNA sequences, this parameter is optional.
**trainingResult** - The result of the training, (three or two matrices)
**save_dataframe** - Boolean. This parameter saves a .csv file with the features in the current directory. No file is created by default.
**save_model** - Boolean. This parameter saves a .rds file with the model in the current directory. No file is created by default.
</div>
<div style="text-align: justify">
Within the BASiNET package there are two sample files, one for mRNA sequence and one for ncRNA sequences. For the example below you will use these two files.
</div>
Defining parameters:
```
mRNA <- system.file("extdata", "mRNA.fasta", package = "BASiNETEntropy")
lncRNA <- system.file("extdata", "ncRNA.fasta", package = "BASiNETEntropy")
library(BASiNETEntropy)
result <- classify(mRNA=mRNA, lncRNA=lncRNA)
```
<div style="text-align: justify">
After the completion of the function the results for J48 and Random Forest will be shown. For example data the results are J48 = 95.2381% hit, Random Forest = 4.76% error.
To obtain the entropy sum curve:
```
n_mRNA <- 4; n_lncRNA <- 5; n_treshold <- 2
entropymeasures<-trainingresult[[4]][4]
entropythreshold<-trainingresult[[4]][2]
BASiNETEntropy::curveofentropy(entropymeasures,entropythreshold)
```
The entropy sum curve refers to the mRNA class, the threshold at point 986 is found. Therefore, the edges referring to points 1 to 986 will be selected, and the rest will be discarded.
</div>
Example of generated entropy curves:
```{r, out.width = "400px"}
knitr::include_graphics("mrna.jpg")
```
| /scratch/gouwar.j/cran-all/cranData/BASiNETEntropy/inst/doc/BASiNETEntropy.Rmd |
---
title: "BASiNETEntropy"
author: "BREVE, M., PIMENTA-ZANON, M. and LOPES, F."
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Classification of RNA sequences}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Introduction
<div style="text-align: justify">
The BASiNET package aims to classify messenger RNA and long non-coding RNA, optionally also a third class such as small non-coding RNA may be included. The classification is made from measurements drawn from complex networks, for each RNA sequence a complex network is created. The networks are formed of vertices and edges, the vertices will be formed by words that can have their size defined by the parameter 'word'. Instead of using the threshold approach, this version of BASiNET uses the maximum entropy approach to remove the thresholds. The training step is necessary to obtain the entropy curves and then the edge list to be cut. Finally, all measurements taken from the networks are used for classification using the algorithm Random Forest. There are two data present in the 'BASiNETEntropy' package, "mRNA.fasta" and "ncRNA" with 10 sequences both. These sequences were taken from the data set used in the article (LI, Aimin; ZHANG, Junying; ZHOU, Zhongyin, Plek: a tool for predicting long non-coding messages and based on an improved k-mer scheme BMC bioinformatics, BioMed Central, 2014). These sequences are used to run examples.
</div>
## Instalation
<div style="text-align: justify">
To install BASiNETEntropy correctly it is necessary to install dependencies: igraph, randomForest, and Biostrings. The Biostrings package is in the BioConductor repository, the other packages are available in CRAN. The following commands must be executed in the R for the deployments to be installed.
```
install.packages("igraph")
install.packages("randomForest")
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("Biostrings")
```
</div>
## Classification
<div style="text-align: justify">
The function "classify" applies an RNA classification methodology, at the end of the execution of the function is exposed the result for classification algorithm Random Forest.
</div>
Parameters:
<div style="text-align: justify">
**mRNA** - Directory of an FASTA file containing mRNA sequences.
**lncRNA** - Directory of an FASTA file containing lncRNA sequences.
**sncRNA** - Directory of an FASTA file containing lncRNA sequences, this parameter is optional.
**trainingResult** - The result of the training, (three or two matrices)
**save_dataframe** - Boolean. This parameter saves a .csv file with the features in the current directory. No file is created by default.
**save_model** - Boolean. This parameter saves a .rds file with the model in the current directory. No file is created by default.
</div>
<div style="text-align: justify">
Within the BASiNET package there are two sample files, one for mRNA sequence and one for ncRNA sequences. For the example below you will use these two files.
</div>
Defining parameters:
```
mRNA <- system.file("extdata", "mRNA.fasta", package = "BASiNETEntropy")
lncRNA <- system.file("extdata", "ncRNA.fasta", package = "BASiNETEntropy")
library(BASiNETEntropy)
result <- classify(mRNA=mRNA, lncRNA=lncRNA)
```
<div style="text-align: justify">
After the completion of the function the results for J48 and Random Forest will be shown. For example data the results are J48 = 95.2381% hit, Random Forest = 4.76% error.
To obtain the entropy sum curve:
```
n_mRNA <- 4; n_lncRNA <- 5; n_treshold <- 2
entropymeasures<-trainingresult[[4]][4]
entropythreshold<-trainingresult[[4]][2]
BASiNETEntropy::curveofentropy(entropymeasures,entropythreshold)
```
The entropy sum curve refers to the mRNA class, the threshold at point 986 is found. Therefore, the edges referring to points 1 to 986 will be selected, and the rest will be discarded.
</div>
Example of generated entropy curves:
```{r, out.width = "400px"}
knitr::include_graphics("mrna.jpg")
```
| /scratch/gouwar.j/cran-all/cranData/BASiNETEntropy/vignettes/BASiNETEntropy.Rmd |
#####BAT - Biodiversity Assessment Tools
#####Version 2.9.6 (2024-02-16)
#####By Pedro Cardoso, Stefano Mammola, Francois Rigal, Jose Carlos Carvalho
#####Maintainer: [email protected]
#####Reference: Cardoso, P., Rigal, F. & Carvalho, J.C. (2015) BAT - Biodiversity Assessment Tools, an R package for the measurement and estimation of alpha and beta taxon, phylogenetic and functional diversity. Methods in Ecology and Evolution, 6: 232-236.
#####Reference: Mammola, S. & Cardoso, P. (2020) Functional diversity metrics using kernel density n-dimensional hypervolumes. Methods in Ecology and Evolution, 11: 986-995.
#####Changed from v2.9.5:
#####Corrected optim.alpha and optim.beta for sequential analyses
library("ape")
library("geometry")
library("graphics")
library("hypervolume")
library("MASS")
library("methods")
library("nls2")
library("parallel")
library("phytools")
library("stats")
library("terra")
library("utils")
library("vegan")
#' @import ape
#' @import geometry
#' @import graphics
#' @import hypervolume
#' @import methods
#' @import nls2
#' @import parallel
#' @import stats
#' @import utils
#' @import vegan
#' @importFrom MASS stepAIC
#' @importFrom phytools midpoint.root
#' @importFrom terra adjacent cells extract global rast rasterize
#####auxiliary functions
prep <- function(comm, xtree, abund = TRUE){
len <- xtree[[1]] ## length of each branch
A <- xtree[[2]] ## matrix species X branches
minBranch <- min(len[colSums(A)==1]) ## minimum branch length of terminal branches
if(is.data.frame(comm))
comm = as.matrix(comm)
BA <- comm%*%A ## matrix samples X branches
if (!abund) BA = ifelse(BA >= 1, 1, 0)
return (list(lenBranch = len, sampleBranch = BA, speciesBranch = A, minBranch = minBranch))
}
#function to prepare data for all tree analyses, not implemented yet
prepTree <- function(comm, tree, abund){
#prepare tree if needed
if(missing(tree)){
if(missing(comm))
stop("One of comm OR tree must be provided")
tree = hclust(as.dist(matrix(1,ncol(comm),ncol(comm))))
tree$labels = colnames(comm)
} else {
if(is.matrix(tree) || is.data.frame(tree))
tree = tree.build(tree)
else if (is(tree, "dist"))
tree = ape::nj(tree)
}
if(is(tree, "hclust")){
tree = ape::as.phylo(tree)
}
#prepare comm
if(missing(comm))
comm = rep(1, length(tree$tip.label))
if(is.vector(comm))
comm <- matrix(comm, nrow = 1)
if(!abund)
comm <- ifelse(comm > 0, 1, 0)
comm[is.na(comm)] = 0
comm = reorderComm(comm, tree)
return(list(comm = comm, tree = tree))
}
clean <- function(comm, tree = NA){
if(is.vector(comm))
comm <- matrix(comm, nrow = 1)
comm <- as.matrix(comm)
if (!missing(tree)){
comm = reorderComm(comm, tree)
tree <- xTree(tree)
}
return(list(comm, tree))
}
#reorder Spp names in the trait matrix to match comm
reorderTrait <- function(comm, trait){
if(!is.null(colnames(comm)) && !is.null(rownames(trait))){
trait <- trait[match(colnames(comm), rownames(trait)), ]
if (any(colnames(comm) != rownames(trait)))
warning("Species names of comm and trait do not match!")
}
return(trait)
}
reorderComm <- function(comm, tree = NULL){
if(is.vector(comm))
comm = as.matrix(comm, nrow = 1)
if(is(tree, "hclust"))
tree = ape::as.phylo(tree)
if(is(tree, "phylo")){
if(!is.null(tree$tip.label) && !is.null(colnames(comm))){ ##if both tree and comm have species names match and reorder species (columns) in comm
#if some species are missing from comm add 0s
if(length(tree$tip.label) > ncol(comm) && all(colnames(comm) %in% tree$tip.label)){
miss = tree$tip.label[which(!(tree$tip.label %in% colnames(comm)))]
addComm = matrix(0, nrow = nrow(comm), ncol = length(miss))
colnames(addComm) = miss
comm = cbind(comm, addComm)
}
if(length(dim(comm)) == 2)
comm <- comm[,match(tree$tip.label, colnames(comm)), drop = FALSE]
else
comm <- comm[,match(tree$tip.label, colnames(comm)),, drop = FALSE]
if (any(tree$tip.label != colnames(comm)))
warning("Species names of comm and tree do not match!")
}
} else if(is(tree, "dist")){
if(!is.null(colnames(tree)) && !is.null(colnames(comm))){ ##if both tree and comm have species names match and reorder species (columns) in comm
comm <- comm[,match(colnames(tree), colnames(comm))]
if (any(colnames(tree) != colnames(comm)))
warning("Species names of comm and distance do not match!")
}
}
return(comm)
}
nMin <- function(comm){
n <- sum(comm)
for (s in 1:nrow(comm))
n <- min(n, sum(comm[s,]))
return(n)
}
rss <- function(x, y){
return (sum((x-y)^2))
}
logit <- function(x){
return(log(x/(1-x)))
}
revLogit <- function(x){
return(exp(x)/(1+exp(x)))
}
euclid <- function(x, y){
return(sqrt(sum((x - y) ^ 2)))
}
#####xTree function partly adapted from http://owenpetchey.staff.shef.ac.uk/Code/Code/calculatingfd_assets/Xtree.r
#####by Jens Schumacher (described in Petchey & Gaston 2002, 2006)
xTree <- function(tree) {
if(is(tree, "hclust")){
nSpp <- nrow(as.data.frame(tree['order']))
sppEdges <- matrix(0, nSpp, 2 * nSpp - 2)
lenEdges <- vector("numeric", 2 * nSpp - 2)
for(i in 1:(nSpp - 1)) {
if(tree$merge[i, 1] < 0) {
lenEdges[2 * i - 1] <- tree$height[order(tree$height)[i]]
sppEdges[ - tree$merge[i, 1], 2 * i - 1] <- 1
} else {
lenEdges[2 * i - 1] <- tree$height[order(tree$height)[i]] - tree$height[order(tree$height)[tree$merge[i, 1]]]
sppEdges[, 2 * i - 1] <- sppEdges[, 2 * tree$merge[i, 1] - 1] + sppEdges[ , 2 * tree$merge[i, 1]]
}
if(tree$merge[i, 2] < 0) {
lenEdges[2 * i] <- tree$height[order(tree$height)[i]]
sppEdges[ - tree$merge[i, 2], 2 * i] <- 1
} else {
lenEdges[2 * i] <- tree$height[order(tree$height)[i]] - tree$height[order(tree$height)[tree$merge[i, 2]]]
sppEdges[, 2 * i] <- sppEdges[, 2 * tree$merge[i, 2] - 1] + sppEdges[, 2 *tree$merge[i, 2]]
}
}
rownames(sppEdges) <- tree$labels
return(list(lenEdges, sppEdges))
} else if(is(tree, "phylo")){
# old code
# lenEdges <- tree$edge.length
# nSpp <- length(tree$tip.label)
# nEdges <- length(tree$edge.length)
# root <- nSpp + 1
# sppEdges <- matrix(0, nSpp, nEdges)
# for(i in 1:nSpp){
# find = i #start by finding the ith species
# repeat{
# row = which(tree$edge[,2] == find) #locate in which row of the edge table is our species or edge to be found
# sppEdges[i, row] = 1
# find = tree$edge[row,1] #find next edge if any until reaching the root
# if(find == root) break #all edges of this species were found, go to next species
# }
# }
# rownames(sppEdges) <- tree$tip.label
# return(list(lenEdges, sppEdges))
edgeList = tree$edge # edge list
edgeLength = tree$edge.length # edge length
spp = tree$tip.label # species
basal = min(edgeList[,1]) # Basal node
mat = matrix(data = 0, nrow = length(spp), ncol = length(edgeLength)) # empty matrix
for (i in 1:length(spp)) {
x = i
repeat {
mat[i, which(edgeList[,2] == x)] = 1
x = edgeList[which(edgeList[,2] == x), 1]
if (x == basal){
break
}
}
}
return(list(edgeLength, mat))
} else {
cat("Unrecognized tree object!")
}
}
#####observed diversity
sobs <- function(comm, xtree){
if (is.vector(comm))
comm = matrix(comm, nrow = 1)
if (missing(xtree)){
return(length(colSums(comm)[colSums(comm) > 0]))
} else {
data <- prep(comm, xtree)
value <- ifelse (colSums(data$sampleBranch) > 0, 1, 0) # vector of observed branches
return (sum(value*data$lenBranch))
}
}
#####observed abundance
nobs <- function(comm, xtree){
if (is.vector(comm))
comm = matrix(comm, nrow = 1)
if (missing(xtree)){
return(sum(comm))
} else {
data <- prep(comm, xtree)
value <- colSums(data$sampleBranch) # vector of observed branches
return (sum(value*data$lenBranch))
}
}
#####hill numbers
hillobs <- function(comm, q = 0){
#comm must be a vector
comm = comm[comm > 0]
comm = comm / sum(comm) #convert to proportions
if (q == 1)
res = exp(-1*sum(comm * log(comm)))
else
res = (sum(comm^q))^(1/(1-q))
return(res)
}
#####rao quadratic entropy
raoobs <- function(comm, distance){
#comm must be a vector
s = length(comm)
comm = comm / sum(comm) #convert to proportions
distance = sqrt(as.matrix(distance))
res = 0
for(i in 1:s){
for(j in 1:s){
res = res + (distance[i,j] * comm[i] * comm[j])
}
}
return(res)
}
#####diversity of rare species for abundance - singletons, doubletons, tripletons, etc
srare <- function(comm, xtree, n = 1){
if(missing(xtree)){
return(length(colSums(comm)[colSums(comm) == n]))
} else {
data <- prep(comm, xtree)
value <- ifelse (colSums(data$sampleBranch) == n, 1, 0) # vector of branches with given abundance
return (sum(value*data$lenBranch))
}
}
#####diversity of rare species for incidence - uniques, duplicates, triplicates, etc
qrare <- function(comm, xtree, n = 1){
if(missing(xtree)){
comm <- ifelse(comm > 0, 1, 0)
return(length(colSums(comm)[colSums(comm) == n]))
} else {
data <- prep(comm, xtree, FALSE)
value <- ifelse (colSums(data$sampleBranch) == n, 1, 0) # vector of branches with given incidence
return (sum(value*data$lenBranch))
}
}
#####minimum terminal branch length, = 1 in case of TD
minBranch <- function(comm, xtree){
if (missing(xtree)){
return(1)
} else {
data <- prep(comm, xtree)
return(data$minBranch)
}
}
#####non-parametric estimators
chao <- function(obs, s1, s2, mb){
return(obs + (s1*(s1-mb))/(2*(s2+mb)))
}
jack1ab <- function(obs, s1){
return(obs + s1)
}
jack1in <- function(obs, q1, q){
return(obs + q1 * ((q-1)/q))
}
jack2ab <- function(obs, s1, s2){
return(obs + 2*s1 - s2)
}
jack2in <- function(obs, q1, q2, q){
if (q > 1) return(obs + (q1*(2*q-3)/q - q2*(q-2)^2/(q*(q-1))))
else return(obs + 2*q1 - q2)
}
pcorr <- function(obs, s1){
return(1+(s1/obs)^2)
}
#####observed beta (a = shared species/edges, b/c = species/edges exclusive to either site, comm is a 2sites x species matrix)
betaObs <- function(comm, xtree, func = "jaccard", abund = TRUE, comp = FALSE){
if(sum(comm) == 0) ##if no species on any community return 0
return(list(Btotal = 0, Brepl = 0, Brich = 0))
if (!abund || max(comm) == 1) { ##if incidence data
obs1 <- sobs(comm[1,,drop=FALSE], xtree)
obs2 <- sobs(comm[2,,drop=FALSE], xtree)
obsBoth <- sobs(comm, xtree)
a <- obs1 + obs2 - obsBoth
b <- obsBoth - obs2
c <- obsBoth - obs1
} else if (abund & missing(xtree)){ ##if abundance data
a <- 0
b <- 0
c <- 0
for (i in 1:ncol(comm)){
minComm <- min(comm[1,i], comm[2,i])
a <- a + minComm
b <- b + comm[1,i] - minComm
c <- c + comm[2,i] - minComm
}
} else { ##if abundance and tree
##due to the way Soerensen doubles the weight of the a component, using a tree or not will be the same with abundance data.
data <- prep(comm, xtree)
a = sum(data$lenBranch * apply(data$sampleBranch,2,min))
diff = data$lenBranch * (data$sampleBranch[1,] - data$sampleBranch[2,])
b = sum(replace(diff, diff < 0, 0))
c = sum(replace(diff, diff > 0, 0) * -1)
}
denominator <- a + b + c
if(tolower(substr(func, 1, 1)) == "s")
denominator <- denominator + a
betaValues = (list(Btotal = (b+c)/denominator, Brepl = 2*min(b,c)/denominator, Brich = abs(b-c)/denominator))
if(comp){
betaValues$Shared = a
betaValues$Unique1 = b
betaValues$Unique2 = c
}
return(betaValues)
}
#####Auxiliary function to calculate the mean squared deviation (mSD)
msd <- function(dist1, dist2){
#standardize all distances to 0-1
dist1 = dist1 / max(dist1)
dist2 = dist2 / max(dist2)
#calculate mSD between 0 (min quality) and 1 (max quality)
qual = 0
n = length(dist1)
for(i in 1:n)
qual = qual + (dist1[i] - dist2[i])^2
qual = qual / ((n * (n - 1))/2)
qual = 1 - qual
#calculate mSD for all species with same pairwise distance to rescale the min
dist2[] = 1
minQual = 0
for(i in 1:n)
minQual = minQual + (dist1[i] - dist2[i])^2
minQual = minQual / ((n * (n - 1))/2)
minQual = 1 - minQual
#rescale
qual = (qual - minQual) / (1 - minQual)
return(qual)
}
#####Auxiliary function doing most work for sad.*
sad.core <- function(comm, contr = NULL, octaves = TRUE, scale = FALSE, raref = 0, runs = 100){
#prepare data
if(is.vector(comm))
comm = matrix(comm, 1)
if(is.null(contr))
contr = rep(1, ncol(comm))
if(is.vector(contr))
contr = matrix(contr, 1)
if(raref == 1)
raref = min(rowSums(comm))
#get octaves
if(octaves)
nOctaves = as.integer(log2(max(comm))+1)
else
nOctaves = max(comm)
res = matrix(0, nrow = nrow(comm), ncol = nOctaves)
#the core stuff
for(i in 1:nrow(comm)){
#if rarefying data
if(raref > 0){
r = matrix(0, nrow = runs, ncol = nOctaves)
for(j in 1:runs){
samp = rrarefy(comm[i,], sample = raref)
newSad = sad.core(samp, contr[i,], octaves, raref = 0, runs = 0)
r[j, 1:length(newSad)] = newSad
}
res[i,] = apply(r, 2, mean, na.rm = TRUE)
} else {
thisComm = comm[i, comm[i,] > 0]
thisContr = contr[i, comm[i,] > 0]
for(j in unique(thisComm)){
selected <- ifelse(thisComm == j, thisContr, 0) # vector of contribution of species with given abundance
if(octaves)
res[i, as.integer(log2(j)+1)] = sum(res[i, as.integer(log2(j)+1)], selected)
else
res[i, j] = sum(selected)
}
}
}
if(scale)
res = t(apply(res, 1, function(x) x/sum(x)))
return(res)
}
#auxiliary function to resample from any number of sites in mixture
remix <- function(comm, size, replace){
if(is.vector(comm))
comm = as.matrix(comm, nrow = 1)
if(length(size) == 1)
size = rep(size, nrow(comm))
newComm = comm
newComm[] = 0
for(i in 1:nrow(comm)){
if (replace)
thisComm = sample(colnames(comm), size[i], prob = comm[i, ], replace = TRUE)
else
thisComm = sample(rep(colnames(comm), comm[i, ]), size[i], replace = FALSE)
if(length(thisComm) == 0){
thisComm = rep(0, ncol(newComm))
names(thisComm) = colnames(newComm)
} else {
thisComm = table(thisComm)
}
newComm[i, names(thisComm)] = thisComm
}
return(newComm)
}
#base function for sampling optimization (optim.alpha or optim.beta)
optim.div <- function(div, comm, tree, methods, base, seq, abund, runs, prog){
##check if data are correct
if(sum(methods[, 2]) != nrow(comm))
stop("Sum of the methods must be the same as nrow(comm).")
if(length(dim(comm)) == 3) ##number of sites
nSites <- dim(comm)[3]
else
nSites <- 1
if(div == "beta" && nSites < 2)
stop("You need more than 1 site to optimize beta-sampling.")
##convert traits to a tree and order comm if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
if (!missing(tree))
comm = reorderComm(comm, tree)
##preliminary stats
nMethods <- nrow(methods) ##number of methods
if(ncol(methods) == 2)
methods = cbind(methods, fixCost = rep(0, nMethods), varCost = rep(1, nMethods))
lMethods = c() ##method of each sample
for (i in 1:nMethods)
lMethods = c(lMethods, rep(methods[i, 1], methods[i, 2]))
if (missing(base)) ##if no samples to start with for complementarity analysis
base <- rep(0,nMethods)
#basic algorithm, using a grid search to calculate costs and diversity
if(!seq){
#create all combinations
comb = list()
for(i in 1:nMethods)
comb[[i]] = 0:methods[i, 2]
comb = expand.grid(comb)
colnames(comb) = methods[, 1]
#filter to combinations with minimum base
for(i in 1:ncol(comb))
comb = comb[comb[ ,i] >= base[i], ]
#calculate cost of each combination
cost = c()
for(i in 1:nrow(comb))
cost[i] = optim.cost(comb[i, ], methods)
#calculate diversity of each combination
if (prog)
pb <- txtProgressBar(max = nrow(comb), style = 3)
diversity = c()
for(i in 1:nrow(comb)){
if (prog)
setTxtProgressBar(pb, i)
if(div == "alpha")
diversity[i] <- optim.alpha.stats(comm, tree, methods, as.numeric(comb[i,]), runs)
else
diversity[i] <- optim.beta.stats(comm, tree, methods, as.numeric(comb[i,]), abund, runs)
}
#order combinations by diversity (main) and cost
comb = cbind(comb, cost, diversity)
comb = comb[order(comb$cost), ]
comb = comb[order(comb$diversity), ]
rownames(comb) = 1:nrow(comb)
#finalize
if (prog)
close(pb)
#if sequential adding combination with steepest slope at each step
} else {
#prep data
comb <- matrix(base, nrow = 1)
cost <- optim.cost(comb, methods)
if(div == "alpha")
diversity = optim.alpha.stats(comm, tree, methods, comb, runs)
else
diversity = optim.beta.stats(comm, tree, methods, comb, abund, runs)
nMissing <- sum(methods[, 2]) - sum(base)
met = unlist(methods[,2])
if (prog)
pb <- txtProgressBar(max = nMissing, style = 3)
#add one sample at a time
del = c() #combinations that are no longer valid
for(i in 1:nMissing){
if(prog)
setTxtProgressBar(pb, i)
#create all combinations with +1 sample and filter to max number of samples
newComb = matrix(rep(comb[nrow(comb), ], nMethods), nrow = nMethods, byrow = TRUE)
for(i in 1:nMethods)
newComb[i, i] = newComb[i, i] + 1
del = c()
for(i in 1:nrow(newComb)){
if(any(newComb[i, ] > met))
del = c(del, i)
}
if(length(del) > 0)
newComb = newComb[-del, , drop = FALSE]
#calculate cost of each combination
newCost = c()
for(i in 1:nrow(newComb))
newCost[i] = optim.cost(newComb[i, ], methods)
#calculate diversity of each combination
newDiversity = c()
for(i in 1:nrow(newComb)){
if(div == "alpha")
newDiversity[i] <- optim.alpha.stats(comm, tree, methods, as.numeric(newComb[i,]), runs)
else
newDiversity[i] <- optim.beta.stats(comm, tree, methods, as.numeric(newComb[i,]), abund, runs)
}
#choose combination with steepest slope (choose randomly if tie)
slope = c()
for(i in 1:nrow(newComb)){
slope[i] = (newDiversity[i] - diversity[length(diversity)])
slope[i] = slope[i] / (newCost[i] - cost[length(cost)])
}
best = which(slope == max(slope))
if(length(best) > 1)
best = sample(best, 1)
#add new row to comb and new values to cost and diversity
comb = rbind(comb, newComb[best, ])
cost = c(cost, newCost[best])
diversity = c(diversity, newDiversity[best])
}
#finalize
comb = cbind(comb, cost, diversity)
rownames(comb) = 1:nrow(comb)
if (prog)
close(pb)
}
#return results
return(comb)
}
#calculate cost of each combination of samples
optim.cost <- function(samples, methods){
cost = 0
for(i in 1:nrow(methods)){
if(samples[i] > 0){
fixCost = methods[i, 3] #fixed costs
varCost = samples[i] * methods[i, 4] #costs per sample
cost = cost + varCost + fixCost
}
}
return(unlist(cost))
}
##################################################################################
##################################MAIN FUNCTIONS##################################
##################################################################################
#' Alpha diversity (Taxon, Phylogenetic or Functional Diversity - TD, PD, FD).
#' @description Observed richness with possible rarefaction, multiple sites simultaneously.
#' @param comm A sites x species matrix, with either abundance or incidence data.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param raref An integer specifying the number of individuals for rarefaction (individual based).
#' If raref < 1 no rarefaction is made.
#' If raref = 1 rarefaction is made by the minimum abundance among all sites.
#' If raref > 1 rarefaction is made by the abundance indicated.
#' If not specified, default is 0.
#' @param runs Number of resampling runs for rarefaction. If not specified, default is 100.
#' @details TD is equivalent to species richness. Calculations of PD and FD are based on Faith (1992) and Petchey & Gaston (2002, 2006), which measure PD and FD of a community as the total branch length of a tree linking all species represented in such community.
#' PD and FD are calculated based on a tree (hclust or phylo object, no need to be ultrametric). The path to the root of the tree is always included in calculations of PD and FD.
#' The number and order of species in comm must be the same as in tree.
#' The rarefaction option is useful to compare communities with much different numbers of individuals sampled, which might bias diversity comparisons (Gotelli & Colwell 2001)
#' @return A matrix of sites x diversity values (either "Richness" OR "Mean, Median, Min, LowerCL, UpperCL and Max").
#' @references Faith, D.P. (1992) Conservation evaluation and phylogenetic diversity. Biological Conservation, 61, 1-10.
#' @references Gotelli, N.J. & Colwell, R.K. (2001) Quantifying biodiversity: procedures and pitfalls in the measurement and comparison of species richness. Ecology Letters, 4, 379-391.
#' @references Petchey, O.L. & Gaston, K.J. (2002) Functional diversity (FD), species richness and community composition. Ecology Letters, 5, 402-411.
#' @references Petchey, O.L. & Gaston, K.J. (2006) Functional diversity: back to basics and looking forward. Ecology Letters, 9, 741-758.
#' @examples
#' comm <- matrix(c(0,0,1,1,0,0,2,1,0,0), nrow = 2, ncol = 5, byrow = TRUE)
#' trait = 1:5
#' tree <- tree.build(trait)
#' plot(tree, "u")
#' alpha(comm)
#' alpha(comm, raref = 0)
#' alpha(comm, tree)
#' alpha(comm, tree, 2, 100)
#' @export
alpha <- function(comm, tree, raref = 0, runs = 100){
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
#first organize the data
if(!missing(tree)){
cleanData = clean(comm, tree)
comm = cleanData[[1]]
tree = cleanData[[2]]
}
#now let's go for what matters
nComm <- nrow(comm)
if(raref < 1){ # no rarefaction if 0 or negative
results <- matrix(0, nComm, 1)
for (s in 1:nComm){
results[s,1] <- sobs(comm[s,, drop=FALSE], tree)
}
rownames(results) <- rownames(comm)
colnames(results) <- "Richness"
return (results)
}
if (raref == 1)
raref <- nMin(comm) # rarefy by minimum n among all communities
results <- matrix(0, nComm, 6)
for (s in 1:nComm){
res <- c()
for (r in 1:runs){
res <- c(res,sobs(rrarefy(comm[s,], raref), tree))
}
results[s,] <- c(mean(res), quantile(res, 0.5), min(res), quantile(res, 0.025), quantile(res, 0.975), max(res))
}
rownames(results) <- rownames(comm)
colnames(results) <- c("Mean", "Median", "Min", "LowerCL", "UpperCL", "Max")
return (results)
}
#' Alpha diversity accumulation curves (observed and estimated).
#' @description Estimation of alpha diversity of a single site with accumulation of sampling units.
#' @param comm A sampling units x species matrix, with either abundance or incidence data.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param func The class of estimators to be used:
#' If func is partial match of "curve", TD, PD or FD are based on extrapolating the accumulation curve of observed diversity.
#' If func is partial match of "nonparametric", TD, PD or FD are based on non-parametric estimators.
#' If func is partial match of "completeness", PD or FD estimates are based on the completeness of TD (requires a tree to be used).
#' If not specified, default is "nonparametric.
#' @param target True diversity value to calculate the accuracy of curves (scaled mean squared error). If not specified do not calculate accuracy (default), -1 uses the total observed diversity as true diversity and any other value is the true known diversity.
#' @param runs Number of random permutations to be made to the sampling order. If not specified, default is 100.
#' @param prog Present a text progress bar in the R console.
#' @details Observed diversity often is an underestimation of true diversity. Several approaches have been devised to estimate species richness (TD) from incomplete sampling.
#' These include: (1) fitting asymptotic functions to randomised accumulation curves (Soberon & Llorente 1993; Flather 1996; Cardoso et al. in prep.)
#' (2) the use of non-parametric estimators based on the incidence or abundance of rare species (Heltshe & Forrester 1983; Chao 1984, 1987; Colwell & Coddington 1994).
#' A correction to non-parametric estimators has also been recently proposed, based on the proportion of singleton or unique species
#' (species represented by a single individual or in a single sampling unit respectively; Lopez et al. 2012).
#' Cardoso et al. (2014) have proposed a way of adapting these approaches to estimate PD and FD, also adding a third possible approach for
#' these dimensions of diversity: (3) correct PD and FD values based on the completeness of TD, where completeness equals the proportion of estimated true diversity that was observed.
#' Calculations of PD and FD are based on Faith (1992) and Petchey & Gaston (2002, 2006), which measure PD and FD of a community as the total branch length of a tree linking all species represented in such community.
#' PD and FD are calculated based on a tree (hclust or phylo object, no need to be ultrametric). The path to the root of the tree is always included in calculations of PD and FD.
#' The number and order of species in comm must be the same as in tree.
#' @return A matrix of sampling units x diversity values (sampling units, individuals, observed and estimated diversity).
#' The values provided by this function are:
#' @return Sampl - Number of sampling units;
#' @return Ind - Number of individuals;
#' @return Obs - Observed diversity;
#' @return S1 - Singletons;
#' @return S2 - Doubletons;
#' @return Q1 - Uniques;
#' @return Q2 - Duplicates;
#' @return Jack1ab - First order jackknife estimator for abundance data;
#' @return Jack1in - First order jackknife estimator for incidence data;
#' @return Jack2ab - Second order jackknife estimator for abundance data;
#' @return Jack2in - Second order jackknife estimator for incidence data;
#' @return Chao1 - Chao estimator for abundance data;
#' @return Chao2 - Chao estimator for incidence data;
#' @return Clench - Clench or Michaelis-Menten curve;
#' @return Exponential - Exponential curve;
#' @return Rational - Rational function;
#' @return Weibull - Weibull curve;
#' @return The P-corrected version of all non-parametric estimators is also provided.
#' @return Accuracy - if accuracy is to be calculated a list is returned instead, with the second element being the scaled mean squared error of each estimator.
#' @references Cardoso, P., Rigal, F., Borges, P.A.V. & Carvalho, J.C. (2014) A new frontier in biodiversity inventory: a proposal for estimators of phylogenetic and functional diversity. Methods in Ecology and Evolution, 5: 452-461.
#' @references Chao, A. (1984) Nonparametric estimation of the number of classes in a population. Scandinavian Journal of Statistics, 11, 265-270.
#' @references Chao, A. (1987) Estimating the population size for capture-recapture data with unequal catchability. Biometrics 43, 783-791.
#' @references Colwell, R.K. & Coddington, J.A. (1994) Estimating terrestrial biodiversity through extrapolation. Phil. Trans. Roy. Soc. London B 345, 101-118.
#' @references Faith, D.P. (1992) Conservation evaluation and phylogenetic diversity. Biological Conservation, 61, 1-10.
#' @references Flather, C. (1996) Fitting species-accumulation functions and assessing regional land use impacts on avian diversity. Journal of Biogeography, 23, 155-168.
#' @references Heltshe, J. & Forrester, N.E. (1983) Estimating species richness using the jackknife procedure. Biometrics, 39, 1-11.
#' @references Lopez, L.C.S., Fracasso, M.P.A., Mesquita, D.O., Palma, A.R.T. & Riul, P. (2012) The relationship between percentage of singletons and sampling effort: a new approach to reduce the bias of richness estimates. Ecological Indicators, 14, 164-169.
#' @references Petchey, O.L. & Gaston, K.J. (2002) Functional diversity (FD), species richness and community composition. Ecology Letters, 5, 402-411.
#' @references Petchey, O.L. & Gaston, K.J. (2006) Functional diversity: back to basics and looking forward. Ecology Letters, 9, 741-758.
#' @references Soberon, M.J. & Llorente, J. (1993) The use of species accumulation functions for the prediction of species richness. Conservation Biology, 7, 480-488.
#' @examples comm <- matrix(c(1,1,0,0,0,0,2,1,0,0,0,0,2,1,0,0,0,0,2,1), nrow = 4, ncol = 5, byrow = TRUE)
#' tree <- hclust(dist(c(1:5), method="euclidean"), method="average")
#' alpha.accum(comm)
#' alpha.accum(comm, func = "nonparametric")
#' alpha.accum(comm, tree, "completeness")
#' alpha.accum(comm, tree, "curve", runs = 1000)
#' alpha.accum(comm, target = -1)
#' @export
alpha.accum <- function(comm, tree, func = "nonparametric", target = -2, runs = 100, prog = TRUE){
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
#first organize the data
if(!missing(tree)){
cleanData = clean(comm, tree)
comm = cleanData[[1]]
tree = cleanData[[2]]
}
#####function options:
#####nonparametric (TD/PD/FD with non-parametric estimators)
#####completeness (PD/FD with TD completeness correction)
#####curve (TD/PD/FD with curve fitting)
func <- match.arg(func, c("nonparametric", "completeness", "curve"))
#####nonparametric (TD/PD/FD with non-parametric estimators)
switch(func, nonparametric = {
resultsArray <- array(0, dim = c(nrow(comm), 19, runs))
if(target > -2){
smse <- matrix(0, runs, 19)
smsew <- smse
}
if (prog) pb <- txtProgressBar(0, runs, style = 3)
for (r in 1:runs){
comm <- comm[sample(nrow(comm)),, drop=FALSE] #shuffle rows (sampling units)
data <- matrix(0,1,ncol(comm))
runData <- matrix(0,nrow(comm),19)
colnames(data) = colnames(comm)
for (q in 1:nrow(comm)){
data <- rbind(data, comm[q,])
n <- sum(rowSums(data))
obs <- sobs(data, tree)
s1 <- srare(data, tree, 1)
s2 <- srare(data, tree, 2)
q1 <- qrare(data, tree, 1)
q2 <- qrare(data, tree, 2)
mb <- minBranch(data, tree)
j1ab <- jack1ab(obs, s1)
j1abP <- j1ab * pcorr(obs, s1)
j1in <- jack1in(obs, q1, q)
j1inP <- j1in * pcorr(obs, q1)
j2ab <- jack2ab(obs, s1, s2)
j2abP <- j2ab * pcorr(obs, s1)
j2in <- jack2in(obs, q1, q2, q)
j2inP <- j2in * pcorr(obs, q1)
c1 <- chao(obs, s1, s2, mb)
c1P <- c1 * pcorr(obs, s1)
c2 <- chao(obs, q1, q2, mb)
c2P <- c2 * pcorr(obs, q1)
runData[q,] <- c(q, n, obs, s1, s2, q1, q2, j1ab, j1abP, j1in, j1inP, j2ab, j2abP, j2in, j2inP, c1, c1P, c2, c2P)
}
resultsArray[,,r] <- runData
if(exists("smse")){ ##if accuracy is to be calculated
if(r == 1){
if(target == -1){
truediv <- runData[nrow(runData),3]
}else{
truediv <- target
}
}
s <- accuracy(runData, truediv)
smse[r,3] <- s[1,1]
smse[r,8:19] <- s[1,-1]
smsew[r,3] <- s[2,1]
smsew[r,8:19] <- s[2,-1]
}
if (prog) setTxtProgressBar(pb, r)
}
if (prog) close(pb)
#####calculate averages or medians of all runs
results <- matrix(0,nrow(comm),19)
v <- array(0, dim = c(runs))
for (i in 1:nrow(comm)){
for (j in 1:19){
for (k in 1:runs){
v[k] <- resultsArray[i,j,k]
}
if (j < 16 || missing(tree))
results[i,j] <- mean(v)
else
results[i,j] <- median(v)
}
}
if(exists("smse")){ ##calculate accuracy
smse <- colMeans(smse)
smsew <- colMeans(smsew)
}
#####completeness (PD/FD with TD completeness correction)
}, completeness = {
if (missing(tree))
stop("Completeness option not available without a tree...")
results <- alpha.accum(comm, runs = runs)
obs <- matrix(0,nrow(comm),1)
for (r in 1:runs){
comm <- comm[sample(nrow(comm)),, drop=FALSE] #shuffle rows (sampling units)
for (s in 1:nrow(comm)){
obs[s,1] <- obs[s,1] + sobs(comm[1:s,], tree)
}
}
obs <- obs / runs
for (i in 8:19)
results[,i] <- obs * (results[,i] / results[,3])
results[,3] <- obs
#####curve (TD/PD/FD with curve fitting)
}, curve = {
results <- matrix(NA,nrow(comm),7)
results[,1] <- seq(1,nrow(comm)) ##fill samples column
results[,2] <- seq(sum(comm)/nrow(comm),sum(comm), sum(comm)/nrow(comm)) ##fill individuals column
runObs <- rep(0,nrow(comm))
if (prog) pb <- txtProgressBar(0, runs, style = 3)
for (r in 1:runs){
comm <- comm[sample(nrow(comm)),, drop=FALSE] #shuffle rows (sampling units)
for (s in 1:nrow(comm)){
runObs[s] <- runObs[s] + sobs(comm[1:s,,drop=FALSE], tree)
}
if (prog) setTxtProgressBar(pb, r)
}
if (prog) close(pb)
results[,3] <- runObs / runs
rich <- results[nrow(comm),3]
for (s in 3:nrow(results)){ ##fit curves only with 3 or more sampling units
## curve fitting
x <- results[1:s,1]
y <- results[1:s,3]
##Clench
stlist <- data.frame(a = rich, b = c(0.1, 0.5, 1))
form <- y ~ (a*x)/(b+x)
mod <- try(nls2(form, start = stlist, algorithm = "random-search"), silent = TRUE)
curve <- try(nls2(form, start = mod, algorithm = "default"), silent = TRUE)
if(!is(curve, "try-error")){
a <- coef(curve)[1]
results[s,5] <- a
}
##Negative exponential
form <- y ~ a*(1-exp(-b*x))
mod <- try(nls2(form, start = stlist, algorithm = "random-search"), silent = TRUE)
curve <- try(nls2(form, start = mod, algorithm = "default"), silent = TRUE)
if(!is(curve, "try-error")){
a <- coef(curve)[1]
results[s,6] <- a
}
##Rational
stlist <- data.frame(a = rich, b = c(0.1, 0.5, 1, 5, 10), c = c(1, 10, 100, 1000, 10000))
form <- y ~ (c+(a*x))/(b+x)
mod <- try(nls2(form, start = stlist, algorithm = "random-search"), silent = TRUE)
curve <- try(nls2(form, start = mod, algorithm = "default"), silent = TRUE)
if(!is(curve, "try-error")){
a <- coef(curve)[1]
results[s,4] <- a
}
##Weibull
stlist <- data.frame(a = rich, b = c(0,1,10), c = c(0,0.1,1))
form <- y ~ a*(1-exp(-b*(x^c)))
mod <- try(nls2(form, start = stlist, algorithm = "random-search"), silent = TRUE)
curve <- try(nls2(form, start = mod, algorithm = "default"), silent = TRUE)
if(!is(curve, "try-error")){
a <- coef(curve)[1]
results[s,7] <- a
}
}
colnames(results) <- c("Sampl", "Ind", "Obs", "Clench", "Exponential", "Rational", "Weibull")
return (results)
})
colnames(results) <- c("Sampl", "Ind", "Obs", "S1", "S2", "Q1", "Q2", "Jack1ab", "Jack1abP", "Jack1in", "Jack1inP", "Jack2ab", "Jack2abP", "Jack2in", "Jack2inP", "Chao1", "Chao1P", "Chao2", "Chao2P")
if(exists("smse")){
smse <- rbind(smse, smsew)
colnames(smse) <- colnames(results)
rownames(smse) <- c("Raw", "Weighted")
smse <- smse[,-c(1:2,4:7)]
return(list(results, smse))
} else {
return(results)
}
}
#' Alpha diversity estimates.
#' @description Estimation of alpha diversity of multiple sites simultaneously.
#' @param comm A sites x species matrix, with either abundances or number of incidences.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param func The class of estimators to be used:
#' If func is partial match of "nonparametric", TD, PD or FD are based on non-parametric estimators.
#' If func is partial match of "completeness", PD or FD estimates are based on the completeness of TD (requires a tree to be used).
#' If not specified, default is "nonparametric".
#' @details Observed diversity often is an underestimation of true diversity.
#' Non-parametric estimators based on the incidence or abundance of rare species have been proposed to overcome the problem of undersampling (Heltshe & Forrester 1983; Chao 1984, 1987; Colwell & Coddington 1994).
#' A correction to non-parametric estimators has also been recently proposed, based on the proportion (P) of singleton or unique species
#' (species represented by a single individual or in a single sampling unit respectively; Lopez et al. 2012).
#' Cardoso et al. (2014) have proposed a way of adapting non-parametric species richness estimators to PD and FD. They have also proposed correcting PD and FD values based on the completeness of TD, where completeness equals the proportion of estimated true diversity that was observed.
#' Calculations of PD and FD are based on Faith (1992) and Petchey & Gaston (2002, 2006), which measure PD and FD of a community as the total branch length of a tree linking all species represented in such community.
#' PD and FD are calculated based on a tree (hclust or phylo object, no need to be ultrametric). The path to the root of the tree is always included in calculations of PD and FD.
#' The number and order of species in comm must be the same as in tree.
#' @return A matrix of sites x diversity values (individuals, observed and estimated diversity).
#' The values provided by this function are:
#' @return Ind - Number of individuals;
#' @return Obs - Observed diversity;
#' @return S1 - Singletons;
#' @return S2 - Doubletons;
#' @return Jack1ab - First order jackknife estimator for abundance data;
#' @return Jack2ab - Second order jackknife estimator for abundance data;
#' @return Chao1 - Chao estimator for abundance data.
#' @return The P-corrected version of all estimators is also provided.
#' @references Cardoso, P., Rigal, F., Borges, P.A.V. & Carvalho, J.C. (2014) A new frontier in biodiversity inventory: a proposal for estimators of phylogenetic and functional diversity. Methods in Ecology and Evolution, 5: 452-461.
#' @references Chao, A. (1984) Nonparametric estimation of the number of classes in a population. Scandinavian Journal of Statistics, 11, 265-270.
#' @references Chao, A. (1987) Estimating the population size for capture-recapture data with unequal catchability. Biometrics 43, 783-791.
#' @references Colwell, R.K. & Coddington, J.A. (1994) Estimating terrestrial biodiversity through extrapolation. Phil. Trans. Roy. Soc. London B 345, 101-118.
#' @references Faith, D.P. (1992) Conservation evaluation and phylogenetic diversity. Biological Conservation, 61, 1-10.
#' @references Heltshe, J. & Forrester, N.E. (1983) Estimating species richness using the jackknife procedure. Biometrics, 39, 1-11.
#' @references Lopez, L.C.S., Fracasso, M.P.A., Mesquita, D.O., Palma, A.R.T. & Riul, P. (2012) The relationship between percentage of singletons and sampling effort: a new approach to reduce the bias of richness estimates. Ecological Indicators, 14, 164-169.
#' @references Petchey, O.L. & Gaston, K.J. (2002) Functional diversity (FD), species richness and community composition. Ecology Letters, 5, 402-411.
#' @references Petchey, O.L. & Gaston, K.J. (2006) Functional diversity: back to basics and looking forward. Ecology Letters, 9, 741-758.
#' @examples comm <- matrix(c(1,1,0,0,0,0,2,1,0,0,0,0,2,1,0,0,0,0,2,1), nrow = 4, ncol = 5, byrow = TRUE)
#' tree <- hclust(dist(c(1:5), method="euclidean"), method="average")
#' alpha.estimate(comm)
#' alpha.estimate(comm, tree)
#' alpha.estimate(comm, tree, func = "completeness")
#' @export
alpha.estimate <- function(comm, tree, func = "nonparametric"){
if (max(comm) == 1)
stop("No estimates are possible without abundance or incidence frequency data")
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
#####function options:
#####nonparametric (TD/PD/FD with non-parametric estimators)
#####completeness (PD/FD with TD completeness correction)
func <- match.arg(func, c("nonparametric", "completeness"))
#####nonparametric (TD/PD/FD with non-parametric estimators)
switch(func, nonparametric = {
#first organize the data
if(!missing(tree)){
cleanData = clean(comm, tree)
comm = cleanData[[1]]
tree = cleanData[[2]]
}
#now let's go for what matters
results <- matrix(0,0,10)
for (s in 1:nrow(comm)){
data <- comm[s,,drop = FALSE]
obs <- sobs(data, tree)
n <- sum(data)
s1 <- srare(data, tree, 1)
s2 <- srare(data, tree, 2)
mb <- minBranch(data, tree)
j1ab <- jack1ab(obs, s1)
j1abP <- j1ab * pcorr(obs, s1)
j2ab <- jack2ab(obs, s1, s2)
j2abP <- j2ab * pcorr(obs, s1)
c1 <- chao(obs, s1, s2, mb)
c1P <- c1 * pcorr(obs, s1)
results <- rbind(results, c(n, obs, s1, s2, j1ab, j1abP, j2ab, j2abP, c1, c1P))
}
#####completeness (PD/FD with TD completeness correction)
}, completeness = {
if(is.vector(comm))
comm <- matrix(comm, nrow = 1)
comm <- as.matrix(comm)
if (missing(tree))
stop("Completeness option not available without a tree...")
results <- alpha.estimate(comm, tree, "nonparametric")
if(!is.null(tree$labels) && !is.null(colnames(comm))) ##if both tree and comm have species names match and reorder species (columns) in comm
comm <- comm[,match(tree$labels, colnames(comm))]
tree <- xTree(tree)
obs <- matrix(0,nrow(comm),1)
for (s in 1:nrow(comm))
obs[s,1] <- obs[s,1] + sobs(comm[s,], tree)
for (i in 5:10)
results[,i] <- obs[,1] * (results[,i] / results[,2])
results[,2] <- obs[,1]
})
rownames(results) <- rownames(comm)
colnames(results) <- c("Ind", "Obs", "S1", "S2", "Jack1ab", "Jack1abP", "Jack2ab", "Jack2abP", "Chao1", "Chao1P")
return(results)
}
#' Rao quadratic entropy.
#' @description Rao quadratic entropy for Phylogenetic or Functional richness.
#' @param comm A sites x species matrix, with abundance data.
#' @param tree A phylo or hclust object (used only for PD or FD).
#' @param distance A dist object representing the phylogenetic or functional distance between species or alternatively a species x traits matrix or data.frame to calculate distances.
#' @param raref An integer specifying the number of individuals for rarefaction (individual based).
#' If raref < 1 no rarefaction is made.
#' If raref = 1 rarefaction is made by the minimum abundance among all sites.
#' If raref > 1 rarefaction is made by the abundance indicated.
#' If not specified, default is 0.
#' @param runs Number of resampling runs for rarefaction. If not specified, default is 100.
#' @details Rao quadratic entropy (Rao, 1982) measures diversity based on the abundance of species and the dissimilarity between them.
#' @return A matrix of sites x diversity values (either "Rao" OR "Mean, Median, Min, LowerCL, UpperCL and Max").
#' @references Rao, C.R. (1982). Diversity and dissimilarity coefficients: a unified approach. Theoretical Population Biology, 21: 24-43.
#' @examples
#' comm <- matrix(c(1,1,1,1,1,0,100,1,2,0), nrow = 2, ncol = 5, byrow = TRUE)
#' distance = dist(1:5)
#' rao(comm)
#' rao(comm, , distance)
#' rao(comm, hclust(distance), raref = 1)
#' @export
rao <- function(comm, tree, distance, raref = 0, runs = 100){
#convert traits to distance if needed
if(!missing(distance) && (is.matrix(distance) || is.data.frame(distance) || is.vector(distance)))
distance = gower(distance)
if(!missing(tree))
distance = cophenetic(tree)
else if(missing(distance))
distance = as.dist(matrix(1, ncol(comm), ncol(comm)))
#distance with max value of 1
distance = distance/max(distance)
nComm <- nrow(comm)
if(raref < 1){ # no rarefaction if 0 or negative
results <- matrix(0, nComm, 1)
for (s in 1:nComm){
results[s,1] <- raoobs(comm[s,], distance)
}
rownames(results) <- rownames(comm)
colnames(results) <- "Rao"
return (results)
}
if (raref == 1)
raref <- nMin(comm) # rarefy by minimum n among all communities
results <- matrix(0, nComm, 6)
for (s in 1:nComm){
res <- c()
for (r in 1:runs){
rarefiedComm = rrarefy(comm[s,], raref)
res <- c(res, raoobs(rarefiedComm, distance))
}
results[s,] <- c(mean(res), quantile(res, 0.5), min(res), quantile(res, 0.025), quantile(res, 0.975), max(res))
}
rownames(results) <- rownames(comm)
colnames(results) <- c("Mean", "Median", "Min", "LowerCL", "UpperCL", "Max")
return (results)
}
#' Hill numbers.
#' @description Hill numbers with possible rarefaction, multiple sites simultaneously.
#' @param comm A sites x species matrix, with abundance data.
#' @param q Hill number order: q(0) = species richness, q(1) ~ Shannon diversity, q(2) ~ Simpson diversity, and so on...
#' @param raref An integer specifying the number of individuals for rarefaction (individual based).
#' If raref < 1 no rarefaction is made.
#' If raref = 1 rarefaction is made by the minimum abundance among all sites.
#' If raref > 1 rarefaction is made by the abundance indicated.
#' If not specified, default is 0.
#' @param runs Number of resampling runs for rarefaction. If not specified, default is 100.
#' @details Hill numbers are based on the number of equally abundant species that would match the current diversity.
#' Depending on the single parameter they give more or less weight to rare species (Jost 2002).
#' @return A matrix of sites x diversity values (either "Hill q" OR "Mean, Median, Min, LowerCL, UpperCL and Max").
#' @references Hill, M.O. (1973). Diversity and evenness: a unifying notation and its consequences. Ecology, 54: 427-432.
#' @examples comm <- matrix(c(0,0,1,1,0,0,100,1,0,0), nrow = 2, ncol = 5, byrow = TRUE)
#' hill(comm)
#' hill(comm, q = 1)
#' hill(comm, q = 4, 1)
#' @export
hill <- function(comm, q = 0, raref = 0, runs = 100){
nComm <- nrow(comm)
if(raref < 1){ # no rarefaction if 0 or negative
results <- matrix(0, nComm, 1)
for (s in 1:nComm){
results[s,1] <- hillobs(comm[s,], q)
}
rownames(results) <- rownames(comm)
colnames(results) <- paste("Hill", q)
return (results)
}
if (raref == 1)
raref <- nMin(comm) # rarefy by minimum n among all communities
results <- matrix(0, nComm, 6)
for (s in 1:nComm){
res <- c()
for (r in 1:runs){
rarefiedComm = rrarefy(comm[s,], raref)
res <- c(res, hillobs(rarefiedComm, q))
}
results[s,] <- c(mean(res), quantile(res, 0.5), min(res), quantile(res, 0.025), quantile(res, 0.975), max(res))
}
rownames(results) <- rownames(comm)
colnames(results) <- c("Mean", "Median", "Min", "LowerCL", "UpperCL", "Max")
return (results)
}
#' Mixture model.
#' @description Mixture model by Hilario et al. subm.
#' @param comm A sites x species matrix, with abundance data.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree. Will only be used if q = 0, in which case phylogenetic or functional richness are calculated instead of species richness.
#' @param q Hill number order: q(0) = species richness, q(1) ~ Shannon diversity, q(2) ~ Simpson diversity.
#' @param precision Precision of the proportion of each habitat type to be tested.
#' @param replace Boolean indicating whether simulations should be with or without (default) replacement.
#' @param alpha alpha value for significance level.
#' @param param Value is calculated with parametric or non-parametric method. The later is preferable when distribution of estimated values is not normally distributed.
#' @param runs Number of runs for the bootstrap providing confidence limits.
#' @details A tool to assess biodiversity in landscapes containing varying proportions of n environments.
#' @return A matrix with expected diversity at each proportion of different habitats in a landscape.
#' @references Chao et al. (2019) Proportional mixture of two rarefaction/extrapolation curves to forecast biodiversity changes under landscape transformation. Ecology Letters, 22: 1913-1922. https://doi.org/10.1111/ele.13322
#' @references Hilario et al. (subm.) Function ‘mixture’: A new tool to quantify biodiversity change under landscape transformation.
#' @author Renato Hilario & Pedro Cardoso
#' @examples comm <- matrix(c(20,20,20,20,20,9,1,0,0,0,1,1,1,1,1), nrow = 3, ncol = 5, byrow = TRUE)
#' tree = hclust(dist(1:5))
#'
#' hill(comm)
#' alpha(comm, tree)
#'
#' mixture(comm, runs = 10)
#' mixture(comm, tree, replace = TRUE, runs = 10)
#' @export
mixture <- function(comm, tree, q = 0, precision = 0.1, replace = TRUE, alpha = 0.05, param = TRUE, runs = 1000){
if(is.null(colnames(comm)))
colnames(comm) = paste0("Sp", 1:ncol(comm))
if(is.null(rownames(comm)))
rownames(comm) = paste0("Hab", 1:nrow(comm))
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
#create matrix of different habitat proportions
prop = expand.grid(as.list(as.data.frame(matrix(rep(seq(0, 1, precision), nrow(comm)), nrow = (1/precision) + 1, ncol = nrow(comm)))))
prop = prop[rowSums(prop) == 1, ]
colnames(prop) = rownames(comm)
rownames(prop) = 1:nrow(prop)
res = matrix(NA, nrow = nrow(prop), ncol = runs)
for(i in 1:nrow(prop)){
for(j in 1:runs){
abund = remix(comm, round(rowSums(comm) * unlist(prop[i, ])), replace = replace)
abund = matrix(colSums(abund), nrow = 1)
if(missing(tree))
res[i, j] = BAT::hill(abund, q)
else
res[i, j] = alpha(abund, tree)
}
}
cl = c(alpha / 2, 0.5, 1 - alpha / 2)
if(param){
res = t(apply(res, 1, function(x) c(mean(x) + sd(x) * qnorm(cl))))
colnames(res) = cl
} else {
res = t(apply(res, 1, quantile, cl))
}
return(cbind(prop, res))
}
#' Beta diversity (Taxon, Phylogenetic or Functional Diversity - TD, PD, FD).
#' @description Beta diversity with possible rarefaction, multiple sites simultaneously.
#' @param comm A sites x species matrix, with either abundance or incidence data.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param func Partial match indicating whether the Jaccard or Soerensen family of beta diversity measures should be used. If not specified, default is Jaccard.
#' @param abund A boolean (T/F) indicating whether abundance data should be used or converted to incidence before analysis.
#' @param raref An integer specifying the number of individuals for rarefaction (individual based).
#' If raref < 1 no rarefaction is made.
#' If raref = 1 rarefaction is made by the minimum abundance among all sites.
#' If raref > 1 rarefaction is made by the abundance indicated.
#' If not specified, default is 0.
#' @param runs Number of resampling runs for rarefaction. If not specified, default is 100.
#' @param comp Boolean indicating whether beta diversity components (shared and unique fractions) should be returned.
#' @details The beta diversity measures used here follow the partitioning framework independently developed by Podani & Schmera (2011) and Carvalho et al. (2012)
#' and later expanded to PD and FD by Cardoso et al. (2014), where Btotal = Brepl + Brich.
#' Btotal = total beta diversity, reflecting both species replacement and loss/gain;
#' Brepl = beta diversity explained by replacement of species alone; Brich = beta diversity explained by species loss/gain (richness differences) alone.
#' PD and FD are calculated based on a tree (hclust or phylo object, no need to be ultrametric). The path to the root of the tree is always included in calculations of PD and FD.
#' The number and order of species in comm must be the same as in tree.
#' The rarefaction option is useful to compare communities with much different numbers of individuals sampled, which might bias diversity comparisons (Gotelli & Colwell 2001).
#' @return Three distance matrices between sites, one per each of the three beta diversity measures (either "Obs" OR "Mean, Median, Min, LowerCL, UpperCL and Max").
#' @references Cardoso, P., Rigal, F., Carvalho, J.C., Fortelius, M., Borges, P.A.V., Podani, J. & Schmera, D. (2014) Partitioning taxon, phylogenetic and functional beta diversity into replacement and richness difference components. Journal of Biogeography, 41, 749-761.
#' @references Carvalho, J.C., Cardoso, P. & Gomes, P. (2012) Determining the relative roles of species replacement and species richness differences in generating beta-diversity patterns. Global Ecology and Biogeography, 21, 760-771.
#' @references Gotelli, N.J. & Colwell, R.K. (2001) Quantifying biodiversity: procedures and pitfalls in the measurement and comparison of species richness. Ecology Letters, 4, 379-391.
#' @references Podani, J. & Schmera, D. (2011) A new conceptual and methodological framework for exploring and explaining pattern in presence-absence data. Oikos, 120, 1625-1638.
#' @examples comm <- matrix(c(2,2,0,0,0,1,1,0,0,0,0,2,2,0,0,0,0,1,2,2), nrow = 4, ncol = 5, byrow = TRUE)
#' tree <- hclust(dist(c(1:5), method="euclidean"), method="average")
#' beta(comm)
#' beta(comm, abund = FALSE, comp = TRUE)
#' beta(comm, tree)
#' beta(comm, raref = 1)
#' beta(comm, tree, "s", abund = FALSE, raref = 2)
#' @export
beta <- function(comm, tree, func = "jaccard", abund = TRUE, raref = 0, runs = 100, comp = FALSE){
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
#first organize the data
if(!missing(tree)){
cleanData = clean(comm, tree)
comm = cleanData[[1]]
tree = cleanData[[2]]
}
#now let's go for what matters
nComm <- nrow(comm)
if(raref < 1){ # no rarefaction if 0 or negative
results <- array(0, dim=c(nComm, nComm, 3))
rownames(results) = colnames(results) = rownames(comm)
comps <- results
for (i in 1:(nComm-1)){
for (j in (i+1):nComm){
commBoth <- as.matrix(rbind(comm[i,], comm[j,]))
betaValues <- betaObs(commBoth, tree, func, abund, comp)
results[j,i,] <- unlist(betaValues)[1:3]
if(comp)
comps[j,i,] <- unlist(betaValues)[4:6]
}
}
results <- list(Btotal = as.dist(results[,,1]),Brepl = as.dist(results[,,2]),Brich = as.dist(results[,,3]))
if (comp){
results$Shared = round(as.dist(comps[,,1]),3)
results$Unique_to_Cols = round(as.dist(comps[,,2]),3)
results$Unique_to_Rows = round(as.dist(comps[,,3]),3)
}
return(results)
}
if (raref == 1)
raref <- nMin(comm) # rarefy by minimum n among all communities
results <- array(0, dim=c(nComm, nComm, 3, 6))
for (i in 1:(nComm-1)){
for (j in (i+1):nComm){
run <- matrix(0, runs, 3)
for (r in 1:runs){
commBoth <- as.matrix(rbind(rrarefy(comm[i,], raref), rrarefy(comm[j,], raref)))
betaValues <- betaObs(commBoth, tree, func, abund)
run[r,1] <- betaValues$Btotal
run[r,2] <- betaValues$Brepl
run[r,3] <- betaValues$Brich
}
for (b in 1:3){
results[j,i,b,1] <- mean(run[,b])
results[j,i,b,2] <- quantile(run[,b], 0.5)
results[j,i,b,3] <- min(run[,b])
results[j,i,b,4] <- quantile(run[,b], 0.025)
results[j,i,b,5] <- quantile(run[,b], 0.975)
results[j,i,b,6] <- max(run[,b])
}
}
}
results.total <- list(Btotal.mean = as.dist(results[,,1,1]), Btotal.median = as.dist(results[,,1,2]), Btotal.min = as.dist(results[,,1,3]), Btotal.lowCL = as.dist(results[,,1,4]), Btotal.upCL = as.dist(results[,,1,5]), Btotal.max = as.dist(results[,,1,6]))
results.repl <- list(Brepl.mean = as.dist(results[,,2,1]), Brepl.median = as.dist(results[,,2,2]), Brepl.min = as.dist(results[,,2,3]), Brepl.lowCL = as.dist(results[,,2,4]), Brepl.upCL = as.dist(results[,,2,5]), Brepl.max = as.dist(results[,,2,6]))
results.rich <- list(Brich.mean = as.dist(results[,,3,1]), Brich.median = as.dist(results[,,3,2]), Brich.min = as.dist(results[,,3,3]), Brich.lowCL = as.dist(results[,,3,4]), Brich.upCL = as.dist(results[,,3,5]), Brich.max = as.dist(results[,,3,6]))
results <- c(results.total, results.repl, results.rich)
return (results)
}
#' Beta diversity accumulation curves.
#' @description Beta diversity between two sites with accumulation of sampling units.
#' @param comm1 A sampling units x species matrix for the first site, with either abundance or incidence data.
#' @param comm2 A sampling units x species matrix for the second site, with either abundance or incidence data.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param func Partial match indicating whether the Jaccard or Soerensen family of beta diversity measures should be used. If not specified, default is jaccard.
#' @param abund A boolean (T/F) indicating whether abundance data should be used or converted to incidence before analysis.
#' @param runs Number of random permutations to be made to the sampling order. If not specified, default is 100.
#' @param prog Present a text progress bar in the R console.
#' @details As widely recognized for species richness, beta diversity is also biased when communities are undersampled.
#' Beta diversity accumulation curves have been proposed by Cardoso et al. (2009) to test if beta diversity has approached an asymptote when comparing two undersampled sites.
#' The beta diversity measures used here follow the partitioning framework independently developed by Podani & Schmera (2011) and Carvalho et al. (2012)
#' and later expanded to PD and FD by Cardoso et al. (2014), where Btotal = Brepl + Brich.
#' Btotal = total beta diversity, reflecting both species replacement and loss/gain;
#' Brepl = beta diversity explained by replacement of species alone;
#' Brich = beta diversity explained by species loss/gain (richness differences) alone.
#' PD and FD are calculated based on a tree (hclust or phylo object, no need to be ultrametric). The path to the root of the tree is always included in calculations of PD and FD.
#' The number and order of species in comm1 and comm2 must be the same as in tree. Also, the number of sampling units should be similar in both sites.
#' @return Three matrices of sampling units x diversity values, one per each of the three beta diversity measures (sampling units, individuals and observed diversity).
#' @references Cardoso, P., Borges, P.A.V. & Veech, J.A. (2009) Testing the performance of beta diversity measures based on incidence data: the robustness to undersampling. Diversity and Distributions, 15, 1081-1090.
#' @references Cardoso, P., Rigal, F., Carvalho, J.C., Fortelius, M., Borges, P.A.V., Podani, J. & Schmera, D. (2014) Partitioning taxon, phylogenetic and functional beta diversity into replacement and richness difference components. Journal of Biogeography, 41, 749-761.
#' @references Carvalho, J.C., Cardoso, P. & Gomes, P. (2012) Determining the relative roles of species replacement and species richness differences in generating beta-diversity patterns. Global Ecology and Biogeography, 21, 760-771.
#' @references Podani, J. & Schmera, D. (2011) A new conceptual and methodological framework for exploring and explaining pattern in presence-absence data. Oikos, 120, 1625-1638.
#' @examples comm1 <- matrix(c(2,2,0,0,0,1,1,0,0,0,0,2,2,0,0,0,0,0,2,2), nrow = 4, byrow = TRUE)
#' comm2 <- matrix(c(1,1,0,0,0,0,2,1,0,0,0,0,2,1,0,0,0,0,2,1), nrow = 4, byrow = TRUE)
#' tree <- hclust(dist(c(1:5), method="euclidean"), method="average")
#' beta.accum(comm1, comm2)
#' beta.accum(comm1, comm2, func = "Soerensen")
#' beta.accum(comm1, comm2, tree)
#' beta.accum(comm1, comm2, abund = FALSE)
#' beta.accum(comm1, comm2, tree,, FALSE)
#' @export
beta.accum <- function(comm1, comm2, tree, func = "jaccard", abund = TRUE, runs = 100, prog = TRUE){
if(nrow(comm1) < 2 || nrow(comm1) != nrow(comm2))
stop("Both communities should have multiple and the same number of sampling units")
comm1 <- as.matrix(comm1)
comm2 <- as.matrix(comm2)
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
#first organize the data
if(!missing(tree)){
cleanData = clean(comm1, tree)
comm1 = cleanData[[1]]
cleanData = clean(comm2, tree)
comm2 = cleanData[[1]]
tree = cleanData[[2]]
}
#now let's go for what matters
nSamples <- nrow(comm1)
results <- matrix(0,nSamples, 4)
colnames(results) <- c("Sampl", "Btotal", "Brepl", "Brich")
if (prog) pb <- txtProgressBar(0, runs, style = 3)
for (r in 1:runs){
comm1 <- comm1[sample(nSamples),, drop=FALSE] #shuffle sampling units of first community
comm2 <- comm2[sample(nSamples),, drop=FALSE] #shuffle sampling units of second community
for (q in 1:nSamples){
commBoth <- as.matrix(rbind(colSums(comm1[1:q,,drop=FALSE]),colSums(comm2[1:q,,drop=FALSE])))
results[q,1] <- results[q,1] + q
betaValues <- betaObs(commBoth, tree, func, abund)
results[q,2] <- results[q,2] + betaValues$Btotal
results[q,3] <- results[q,3] + betaValues$Brepl
results[q,4] <- results[q,4] + betaValues$Brich
}
if (prog) setTxtProgressBar(pb, r)
}
if (prog) close(pb)
results <- results/runs
return(results)
}
#' Beta diversity among multiple communities.
#' @description Beta diversity with possible rarefaction - multiple sites measure calculated as the average or variance of all pairwise values.
#' @param comm A sites x species matrix, with either abundance or incidence data.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param func Indicates whether the Jaccard or Soerensen family of beta diversity measures should be used. If not specified, default is jaccard.
#' @param abund A boolean (T/F) indicating whether abundance data should be used (TRUE) or converted to incidence (FALSE) before analysis.
#' @param raref An integer specifying the number of individuals for rarefaction (individual based).
#' If raref < 1 no rarefaction is made.
#' If raref = 1 rarefaction is made by the minimum abundance among all sites.
#' If raref > 1 rarefaction is made by the abundance indicated.
#' If not specified, default is 0.
#' @param runs Number of resampling runs for rarefaction. If not specified, default is 100.
#' @details Beta diversity of multiple sites simultaneously is calculated as either the average or the variance among all pairwise comparisons (Legendre, 2014).
#' The beta diversity measures used here follow the partitioning framework independently developed by Podani & Schmera (2011) and Carvalho et al. (2012)
#' and later expanded to PD and FD by Cardoso et al. (2014), where Btotal = Brepl + Brich.
#' Btotal = total beta diversity, reflecting both species replacement and loss/gain;
#' Brepl = beta diversity explained by replacement of species alone;
#' Brich = beta diversity explained by species loss/gain (richness differences) alone.
#' PD and FD are calculated based on a tree (hclust or phylo object, no need to be ultrametric). The path to the root of the tree is always included in calculations of PD and FD.
#' The number and order of species in comm must be the same as in tree.
#' @return A matrix of beta measures x diversity values (average and variance).
#' @references Cardoso, P., Rigal, F., Carvalho, J.C., Fortelius, M., Borges, P.A.V., Podani, J. & Schmera, D. (2014) Partitioning taxon, phylogenetic and functional beta diversity into replacement and richness difference components. Journal of Biogeography, 41, 749-761.
#' @references Carvalho, J.C., Cardoso, P. & Gomes, P. (2012) Determining the relative roles of species replacement and species richness differences in generating beta-diversity patterns. Global Ecology and Biogeography, 21, 760-771.
#' @references Legendre, P. (2014) Interpreting the replacement and richness difference components of beta diversity. Global Ecology and Biogeography, 23: 1324-1334.
#' @references Podani, J. & Schmera, D. (2011) A new conceptual and methodological framework for exploring and explaining pattern in presence-absence data. Oikos, 120, 1625-1638.
#' @examples comm <- matrix(c(2,2,0,0,0,1,1,0,0,0,0,2,2,0,0,0,0,0,2,2), nrow = 4, ncol = 5, byrow = TRUE)
#' tree <- hclust(dist(c(1:5), method="euclidean"), method="average")
#' beta.multi(comm)
#' beta.multi(comm, func = "Soerensen")
#' beta.multi(comm, tree)
#' beta.multi(comm, raref = 1)
#' beta.multi(comm, tree, "s", FALSE, raref = 2)
#' @export
beta.multi <- function(comm, tree, func = "jaccard", abund = TRUE, raref = 0, runs = 100){
pairwise <- beta(comm, tree, func, abund, raref, runs)
Btotal.avg <- mean(pairwise$Btotal)
Brepl.avg <- mean(pairwise$Brepl)
Brich.avg <- mean(pairwise$Brich)
Btotal.var <- sum(pairwise$Btotal)/(ncol(comm)*(ncol(comm)-1))
Brepl.var <- sum(pairwise$Brepl)/(ncol(comm)*(ncol(comm)-1))
Brich.var <- sum(pairwise$Brich)/(ncol(comm)*(ncol(comm)-1))
results <- matrix(c(Btotal.avg, Brepl.avg, Brich.avg, Btotal.var, Brepl.var, Brich.var), nrow = 3, ncol = 2)
colnames(results) <- c("Average", "Variance")
rownames(results) <- c("Btotal", "Brepl", "Brich")
return(results)
}
#' Beta diversity evenness (Taxon, Phylogenetic or Functional Diversity - TD, PD, FD).
#' @description Difference of evenness between pairs of sites.
#' @param comm A sites x species matrix, with either abundance or incidence data.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param distance A dist or matrix object representing the phylogenetic or functional distance between species. If both tree and distance are missing, taxonomic evenness is calculated.
#' @param method Calculate evenness using "expected" values (default) or values based on "contribution" of species to the tree.
#' @param func Calculate evenness using "Camargo" (default) or "Bulla" index.
#' @param abund A boolean (T/F) indicating whether evenness should be calculated using abundance data.
#' @details This measure is simply the pairwise difference of evenness calculated based on the index of Camargo (1993) or Bulla (1994) using the values of both species abundances and edge lengths in the tree (if PD/FD).
#' @details If no tree or distance is provided the result is the original index.
#' @return Distance matrix between sites.
#' @references Bulla, L. (1994) An index of evenness and its associated diversity measure. Oikos, 70: 167-171.
#' @references Camargo, J.A. (1993) Must dominance increase with the number of subordinate species in competitive interactions? Journal of Theoretical Biology, 161: 537-542.
#' @examples comm <- matrix(c(1,2,0,0,0,1,1,0,0,0,0,2,2,0,0,1,1,1,1,100), nrow = 4, byrow = TRUE)
#' distance <- dist(c(1:5), method = "euclidean")
#' tree <- hclust(distance, method = "average")
#' beta.evenness(comm)
#' beta.evenness(comm, tree)
#' beta.evenness(comm, tree, method = "contribution")
#' beta.evenness(comm, tree, abund = FALSE)
#' @export
beta.evenness <- function(comm, tree, distance, method = "expected", func = "camargo", abund = TRUE){
return(dist(evenness(comm, tree, distance, method, func, abund)))
}
#' Phylogenetic/functional originality of species or individuals.
#' @description Average dissimilarity between a species or individual and all others in a community.
#' @param comm A sites x species matrix, with either abundance or incidence data. If missing, the originality using the full tree or distance matrix is calculated.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param distance A dist object representing the phylogenetic or functional distance between species.
#' @param abund A boolean (T/F) indicating whether originality should be calculated per individual (T) or species (F).
#' @param relative A boolean (T/F) indicating whether originality should be relative to the maximum distance between any two species in the tree or distance matrix.
#' @details This is the originality measure of Pavoine et al. (2005) without replacement.
#' @return A matrix of sites x species values.
#' @references Pavoine, S., Ollier, S. & Dufour, A.-B. (2005) Is the originality of a species measurable? Ecology Letters, 8: 579-586.
#' @examples comm <- matrix(c(1,2,0,0,0,1,1,0,0,0,0,2,2,0,0,0,0,1,1,1), nrow = 4, byrow = TRUE)
#' distance <- dist(c(1:5), method="euclidean")
#' tree = hclust(distance)
#'
#' originality(tree = tree)
#' originality(distance = distance)
#' originality(comm, tree)
#' originality(comm, tree, abund = TRUE)
#' originality(comm, tree, relative = TRUE)
#' @export
originality <- function(comm, tree, distance, abund = FALSE, relative = FALSE){
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
if(missing(comm)){
if(!missing(distance))
comm = rep(1, attributes(distance)$Size)
if(!missing(tree))
comm = rep(1, length(tree$order))
}
if(is.vector(comm))
comm <- matrix(comm, nrow = 1)
if(!missing(tree)){
comm = reorderComm(comm, tree)
distance <- cophenetic(tree) #cophenetic distances of species
}else if(missing(distance)){
return(warning("Need one of tree or distance!"))
}
distance <- as.matrix(distance) #convert distance to matrix
if(!abund){
comm <- ifelse(comm > 0, 1, 0)
}
original <- matrix(NA, nrow(comm), ncol(comm))
colnames(original) <- colnames(comm)
rownames(original) <- rownames(comm)
for (r in 1:nrow(comm)){ #cycle through all sites/samples
present <- which(comm[r,] > 0) #which species exist in this site
if(abund)
n <- sum(comm[r, present]) #how many individuals are present in this site
else
n <- length(present) #how many species are present in this site
proportion <- comm[r,present]/sum(comm[r,present]) #proportion incidence/abundance of species in this site
for (c in present){
original[r,c] <- sum(distance[present,c] * proportion, na.rm = TRUE)
original[r,c] <- original[r,c] * n / (n - 1) #correct not to take distance to self into account
}
}
if(relative)
original <- original / max(distance, na.rm=T)
return(original)
}
#' Phylogenetic/functional uniqueness of species.
#' @description Dissimilarity between each species and the single closest in a community.
#' @param comm A sites x species matrix, with either abundance or incidence data. If missing, the uniqueness using the full tree or distance matrix is calculated.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param distance A dist object representing the phylogenetic or functional distance between species.
#' @param relative A boolean (T/F) indicating whether uniqueness should be relative to the maximum distance between any two species in the tree or distance matrix.
#' @details This is equivalent to the originality measure of Mouillot et al. (2013).
#' @return A matrix of sites x species values.
#' @references Mouillot, D., Graham, N.A., Villeger, S., Mason, N.W. & Bellwood, D.R. (2013) A functional approach reveals community responses to disturbances. Trends in Ecology and Evolution, 28: 167-177.
#' @examples comm <- matrix(c(1,2,0,0,0,1,1,0,0,0,0,2,2,0,0,0,0,1,0,1), nrow = 4, byrow = TRUE)
#' distance <- dist(c(1:5), method="euclidean")
#' tree <- hclust(distance, method="average")
#'
#' uniqueness(tree = tree)
#' uniqueness(distance = distance)
#' uniqueness(comm, tree)
#' @export
uniqueness <- function(comm, tree, distance, relative = FALSE){
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
if(missing(comm)){
if(!missing(distance))
comm = rep(1, attributes(distance)[1])
if(!missing(tree))
comm = rep(1, length(tree$order))
}
if(is.vector(comm))
comm <- matrix(comm, nrow = 1)
if(!missing(tree)){
comm = reorderComm(comm, tree)
distance <- cophenetic(tree) #cophenetic distances of species
}else if(missing(distance)){
return(warning("Need one of tree or distance!"))
}
distance <- as.matrix(distance) #convert distance to matrix
comm <- ifelse(comm > 0, 1, 0)
for(i in 1:nrow(distance))
distance[i,i] = NA
unique <- matrix(NA, nrow(comm), ncol(comm))
colnames(unique) <- colnames(comm)
rownames(unique) <- rownames(comm)
for (r in 1:nrow(comm)){ #cycle through all sites/samples
present <- which(comm[r,]>0) #which species exist in this site
for (c in present){
unique[r,c] <- min(distance[present,c], na.rm=T)
}
}
if(relative){
unique <- unique / max(distance, na.rm = T)
}
return(unique)
}
#' Contribution of species or individuals to total phylogenetic/functional diversity.
#' @description Contribution of each species or individual to the total PD or FD of a number of communities.
#' @param comm A sites x species matrix, with either abundance or incidence data. If missing, the contribution of all species to the full tree is calculated.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param abund A boolean (T/F) indicating whether contribution should be weighted by abundance of each species.
#' @param relative A boolean (T/F) indicating whether contribution should be relative to total PD or FD (proportional contribution per individual or species). If FALSE, the sum of contributions for each site is equal to total PD/FD, if TRUE it is 1.
#' @details Contribution is equivalent to the evolutionary distinctiveness index (ED) of Isaac et al. (2007) if done by species and to the abundance weighted evolutionary distinctiveness (AED) of Cadotte et al. (2010) if done by individual.
#' @return A matrix of sites x species values (or values per species if no comm is given).
#' @references Isaac, N.J.B., Turvey, S.T., Collen, B., Waterman, C. & Baillie, J.E.M. (2007) Mammals on the EDGE: conservation priorities based on threat and phylogeny. PLoS One, 2: e296.
#' @references Cadotte, M.W., Davies, T.J., Regetz, J., Kembel, S.W., Cleland, E. & Oakley, T.H. (2010) Phylogenetic diversity metrics for ecological communities: integrating species richness, abundance and evolutionary history. Ecology Letters, 13: 96-105.
#' @examples comm <- matrix(c(1,2,0,0,0,1,1,0,0,0,0,2,2,0,0,0,0,1,0,1), nrow = 4, byrow = TRUE)
#' tree = tree.build(1:5)
#'
#' contribution(comm, tree)
#' contribution(comm, tree, TRUE)
#' contribution(comm, tree, relative = TRUE)
#' @export
contribution <- function(comm, tree, abund = FALSE, relative = FALSE){
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
if(missing(comm))
comm = rep(1, length(tree$order))
if(is.vector(comm))
comm <- matrix(comm, nrow = 1)
comm <- as.matrix(comm)
if(!abund)
comm <- ifelse(comm > 0, 1, 0)
if(!missing(tree)){
if(is(tree, "phylo")){
nEdges <- length(tree$edge.length)
if(!is.null(tree$tip.label) && !is.null(colnames(comm))) ##if both tree and comm have species names match and reorder species (columns) in comm
comm <- comm[,match(tree$tip.label, colnames(comm))]
} else {
nEdges <- length(tree$merge)
if(!is.null(tree$labels) && !is.null(colnames(comm))) ##if both tree and comm have species names match and reorder species (columns) in comm
comm <- comm[,match(tree$labels, colnames(comm))]
}
} else {
tree = nj(as.dist(matrix(1,ncol(comm),ncol(comm))))
}
contrib <- matrix(0, nrow(comm), ncol(comm))
colnames(contrib) <- colnames(comm)
rownames(contrib) <- rownames(comm)
for (i in 1:nrow(comm)){ #cycle through all sites/samples
dataSample <- prep(comm[i,], xTree(tree), TRUE)
valueBranch <- dataSample$lenBranch / dataSample$sampleBranch
valueBranch <- ifelse(valueBranch == Inf, 0, valueBranch)
valueBranch <- ifelse(is.na(valueBranch), 0, valueBranch)
for (j in 1:ncol(comm)){ #cycle through all species
for (k in 1:nEdges){ #cycle through all branches
contrib[i,j] <- contrib[i,j] + (dataSample$speciesBranch[j,k] * valueBranch[k] * comm[i,j])
}
}
}
if(relative){
for(r in 1:nrow(comm))
contrib[r,] <- contrib[r,] / c(alpha(comm[r,], tree))
}
if(abund){ #contribution weighted by abundance
for (r in 1:nrow(comm)){ #cycle through all sites/samples
relAbund = comm[r,] / sum(comm[r,])
contrib[r,] = (contrib[r,] * relAbund) / sum(contrib[r,] * relAbund)
}
}
contrib[comm[] == 0] = NA
return(contrib)
}
#' Phylogenetic/functional dispersion of species or individuals.
#' @description Average dissimilarity between any two species or individuals randomly chosen in a community.
#' @param comm A sites x species matrix, with either abundance or incidence data. If missing, the dispersion using the full tree or distance matrix is calculated.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param distance A dist object representing the phylogenetic or functional distance between species.
#' @param func Calculate dispersion using originality (default), uniqueness or contribution.
#' @param abund A boolean (T/F) indicating whether dispersion should be calculated using individuals (T) or species (F).
#' @param relative A boolean (T/F) indicating whether dispersion should be relative to the maximum distance between any two species in the tree or distance matrix.
#' @details If abundance data is used and a tree is given, dispersion is the quadratic entropy of Rao (1982).
#' If abundance data is not used but a tree is given, dispersion is the phylogenetic dispersion measure of Webb et al. (2002).
#' @return A vector of values per site (or a single value if no comm is given).
#' @references Rao, C.R. (1982) Diversity and dissimilarity coefficients: a unified approach. Theoretical Population Biology, 21: 24-43.
#' @references Webb, C.O., Ackerly, D.D., McPeek, M.A. & Donoghue, M.J. (2002) Phylogenies and community ecology. Annual Review of Ecology and Systematics, 33: 475-505.
#' @examples comm <- matrix(c(1,2,0,0,0,1,1,0,0,0,0,2,2,0,0,0,0,1,1,1), nrow = 4, byrow = TRUE)
#' distance <- dist(c(1:5), method="euclidean")
#' tree <- hclust(distance, method="average")
#' dispersion(tree = tree)
#' dispersion(distance = distance)
#' dispersion(comm, tree)
#' dispersion(comm, tree, abund = FALSE)
#' dispersion(comm, tree, abund = FALSE, relative = FALSE)
#' @export
dispersion <- function(comm, tree, distance, func = "originality", abund = TRUE, relative = TRUE){
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
if(missing(comm)){
if(!missing(distance))
comm = rep(1, attributes(distance)[1])
if(!missing(tree))
comm = rep(1, length(tree$order))
}
if(is.vector(comm))
comm <- matrix(comm, nrow = 1)
if(!abund)
comm = ifelse(comm > 0, 1, 0)
#reorder comm
if(!missing(tree))
comm = reorderComm(comm, tree)
if(func == "originality")
funcValue <- originality(comm, tree, distance, abund, relative)
else if (func == "uniqueness")
funcValue <- uniqueness(comm, tree, distance, relative)
else if (func == "contribution")
funcValue <- contribution(comm, tree, abund, relative)
else
stop(sprintf("Function %s not recognized.", func))
disp <- rep(0,nrow(comm))
for (r in 1:nrow(comm)){ #cycle through all sites/samples
present <- which(comm[r,]>0) #which species exist in this site
proportion <- comm[r,present]/sum(comm[r,present]) #proportion incidence/abundance of species in this site
disp[r] <- sum(funcValue[r,present]*proportion)
}
disp = matrix(disp, ncol = 1)
rownames(disp) <- rownames(comm)
colnames(disp) <- "Dispersion"
return(disp)
}
#' Taxonomic/phylogenetic/functional evenness of species or individuals.
#' @description Regularity of abundances and distances (if PD/FD) between species in a community.
#' @param comm A sites x species matrix, with either abundance or incidence data. If missing, the evenness using the full tree or distance matrix is calculated.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param distance A dist or matrix object representing the phylogenetic or functional distance between species. If both tree and distance are missing, taxonomic evenness is calculated.
#' @param method Calculate evenness using "expected" values (default) or values based on "contribution" of species to the tree.
#' @param func Calculate evenness using "Camargo" (default) or "Bulla" index.
#' @param abund A boolean (T/F) indicating whether evenness should be calculated using abundance data.
#' @details Evenness is calculated based on the index of Camargo (1993) or Bulla (1994) using the values of both species abundances and edge lengths in the tree (if PD/FD).
#' @details If no tree or distance is provided the result is the original index.
#' @details If any site has < 2 species its value will be NA.
#' @return A vector of values per site (or a single value if no comm is given).
#' @references Bulla, L. (1994) An index of evenness and its associated diversity measure. Oikos, 70: 167-171.
#' @references Camargo, J.A. (1993) Must dominance increase with the number of subordinate species in competitive interactions? Journal of Theoretical Biology, 161: 537-542.
#' @examples
#' comm <- matrix(c(1,2,0,0,0,1,1,0,0,0,0,2,2,0,0,1,1,1,1,100), nrow = 4, byrow = TRUE)
#' distance <- dist(c(1:5), method = "euclidean")
#' tree <- hclust(distance, method = "average")
#' evenness(comm)
#' evenness(tree = tree, func = "bulla")
#' evenness(comm, tree)
#' evenness(comm, tree, method = "contribution")
#' evenness(comm, tree, abund = FALSE)
#' @export
evenness <- function(comm, tree, distance, method = "expected", func = "camargo", abund = TRUE){
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
if(missing(comm))
comm = rep(1, length(tree$order))
if(is.vector(comm))
comm <- matrix(comm, nrow = 1)
if(!abund)
comm <- ifelse(comm > 0, 1, 0)
comm[is.na(comm)] = 0
if(!missing(tree)){
comm = reorderComm(comm, tree)
} else if (!missing(distance)){
tree = nj(distance)
} else {
tree = nj(as.dist(matrix(1,ncol(comm),ncol(comm))))
tree$labels = colnames(comm)
}
#calculate evenness
evenness <- rep(0, nrow(comm))
for (i in 1:nrow(comm)){ #cycle through all sites/samples
thisSpp = which(comm[i,] > 0)
thisComm = comm[i, thisSpp] #redo this comm
if(length(thisSpp) < 2){ #if comm has less than 2 species evenness is NA
evenness[i] = NA
next
}
thisTree = ape::as.phylo(tree) #redo tree for thisComm
thisTree = keep.tip(thisTree, thisSpp)
if(method == "expected"){ #if expected
thisTree = prep(thisComm, xTree(thisTree), abund)
thisEdges = which(thisTree$lenBranch > 0 & thisTree$sampleBranch > 0)
thisObs = c()
for(j in thisEdges){ #cycle through all edges of this site/sample
#calculate the observed values as avg abundance per species of edge / length of edge
thisObs = c(thisObs, (thisTree$sampleBranch[j] / sum(thisTree$speciesBranch[,j]) / thisTree$lenBranch[j]))
}
thisObs = thisObs / sum(thisObs)
if(func == "bulla"){
##calculate the expected values as avg length of tree edges
thisExp = 1 / length(thisEdges)
#calculate evenness as the sum of minimum values between observed and expected with correction from Bulla, 1994
evenness[i] = (sum(apply(cbind(thisObs, rep(thisExp, length(thisObs))), 1, min)) - (1/length(thisEdges))) / (1-1/length(thisEdges))
} else if(func == "camargo"){ #if Camargo
nEdges = length(thisObs)
for(j in 1:(nEdges-1)){
for(k in (j+1):nEdges){
evenness[i] = evenness[i] + abs(thisObs[j] - thisObs[k])
}
}
evenness[i] = 1 - (evenness[i] / (nEdges*(nEdges-1)/2))
} else {
stop(sprintf("Function %s not recognized.", func))
}
} else if (method == "contribution") { #if using the contribution of species
contrib = contribution(thisComm, thisTree, abund = abund)
nSp = length(thisComm)
if(func == "bulla"){
##calculate the expected contribution as 1/nSp
thisExp = 1 / nSp
#calculate evenness as the sum of minimum values between observed and expected with correction from Bulla, 1994
evenness[i] = (sum(apply(cbind(contrib, rep(thisExp, nSp)), 1, min)) - (1/nSp)) / (1-1/nSp)
} else if(func == "camargo"){ #if Camargo
for(j in 1:(nSp-1)){
for(k in (j+1):nSp){
evenness[i] = evenness[i] + abs(contrib[j] - contrib[k])
}
}
evenness[i] = 1 - (evenness[i] / (nSp*(nSp-1)/2))
} else {
stop(sprintf("Function %s not recognized.", func))
}
} else {
stop(sprintf("Method %s not recognized.", method))
}
}
evenness = matrix(evenness, ncol = 1)
rownames(evenness) <- rownames(comm)
colnames(evenness) <- "Evenness"
return(evenness)
}
#' Contribution of each species or individual to the total taxonomic/phylogenetic/functional evenness.
#' @description Contribution of each observation to the regularity of abundances and distances (if PD/FD) between species in a community (or individuals in a species).
#' @param comm A sites x species matrix, with either abundance or incidence data. If missing, the evenness using the full tree or distance matrix is calculated.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param distance A dist or matrix object representing the phylogenetic or functional distance between species. If both tree and distance are missing, taxonomic evenness is calculated.
#' @param method Calculate evenness using "expected" values (default) or values based on "contribution" of species to the tree.
#' @param func Calculate evenness using "Camargo" (1993; default) or "Bulla" (1994) index.
#' @param abund A boolean (T/F) indicating whether evenness should be calculated using abundance data.
#' @details Contribution to evenness is calculated using a leave-one-out approach, whereby the contribution of a single observation is the total evenness minus the evenness calculated without that observation. Evenness is based on the index of Camargo (1993) or Bulla (1994) using the values of both species abundances and edge lengths in the tree (if PD/FD).
#' Note that the contribution of a species or individual can be negative, if the removal of an observation increases the total evenness.
#' @details If no tree or distance is provided the result is calculated for taxonomic evenness using the original index.
#' @return A matrix of sites x species (or a vector if no comm is given).
#' @references Bulla, L. (1994) An index of evenness and its associated diversity measure. Oikos, 70: 167-171.
#' @references Camargo, J.A. (1993) Must dominance increase with the number of subordinate species in competitive interactions? Journal of Theoretical Biology, 161: 537-542.
#' @examples comm <- matrix(c(1,2,0,5,5,1,1,0,0,0,0,2,2,0,0,1,1,1,1,100), nrow = 4, byrow = TRUE)
#' distance <- dist(c(1:5), method = "euclidean")
#' tree <- hclust(distance, method = "average")
#' evenness.contribution(comm)
#' evenness.contribution(tree = tree, func = "bulla")
#' evenness.contribution(comm, tree)
#' evenness.contribution(comm, tree, method = "contribution")
#' evenness.contribution(comm, tree, abund = FALSE)
#' @export
evenness.contribution <- function(comm, tree, distance, method = "expected", func = "camargo", abund = TRUE){
#check if right data is provided
if(missing(comm))
comm <- rep(1, length(tree$order))
if(is.vector(comm))
comm <- matrix(comm, nrow = 1)
if(!abund)
comm <- ifelse(comm > 0, 1, 0)
comm[is.na(comm)] = 0
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
if(!missing(tree)){
comm = reorderComm(comm, tree)
} else if (!missing(distance)){
tree = nj(distance)
} else {
tree = nj(as.dist(matrix(1,ncol(comm),ncol(comm))))
tree$labels = colnames(comm)
}
#extract total evenness
tot_evenness <- evenness(comm = comm, tree = tree, distance = distance, method = method, func = func, abund = abund)
#leave-one-out
evenness.contrib <- comm
evenness.contrib[] <- NA
for(i in 1:nrow(evenness.contrib)) {
if(length(comm[i, comm[i,]>0]) < 3) {
warning(paste("Community ", as.character(i), " contains less than 3 species. Cannot evaluate contribution to evenness." ) )
} else {
for(k in 1:length(evenness.contrib[i,])) {
comm2 <- comm[i,]
comm2[k] <- 0 #for each iteration, assign one species to 0
evenness.contrib[i,k] <- (tot_evenness[i] - evenness(comm = comm2, tree = tree, distance = distance, method = method, func = func, abund = abund))
}
}
}
evenness.contrib[comm[] == 0] <- NA
rownames(evenness.contrib) <- rownames(comm)
colnames(evenness.contrib) <- colnames(comm)
return(evenness.contrib)
}
#' Alpha diversity using convex hull hypervolumes.
#' @description Estimation of functional richness of one or multiple sites, based on convex hull hypervolumes.
#' @param comm A 'convhulln' object or list, preferably built with function hull.build.
#' @details Estimates the functional richness (alpha FD) of one or more communities using convex hull hypervolumes.
#' Functional richness is expressed as the total volume of the convex hull.
#' @return One value or a vector of alpha diversity values for each site.
#' @examples comm = rbind(c(1,3,0,5,3), c(3,2,5,0,0))
#' colnames(comm) = c("SpA", "SpB", "SpC", "SpD", "SpE")
#' rownames(comm) = c("Site 1", "Site 2")
#'
#' trait = data.frame(body = c(1,2,3,4,4), beak = c(1,5,4,1,2))
#' rownames(trait) = colnames(comm)
#'
#' hv = hull.build(comm[1,], trait)
#' hull.alpha(hv)
#' hvlist = hull.build(comm, trait, axes = 2)
#' hull.alpha(hvlist)
#' @export
hull.alpha <- function(comm){
#check if right data is provided
if (!(class(comm) %in% c("list", "convhulln")))
stop("A convhulln or list is needed as input data.")
#if single comm
if(is(comm, "convhulln"))
return(comm$vol)
#if multiple comm
alphaValues <- c()
for (i in 1:length(comm))
alphaValues <- c(alphaValues, comm[[i]]$vol)
#finalize
names(alphaValues) <- names(comm)
return(alphaValues)
}
#' Beta diversity partitioning using convex hull hypervolumes.
#' @description Pairwise beta diversity partitioning into replacement and net difference in amplitude components of convex hulls.
#' @param comm A list of 'convhulln' objects, preferably built with function hull.build.
#' @param func Partial match indicating whether the Jaccard (default) or Soerensen family of beta diversity measures should be used.
#' @param comp Boolean indicating whether beta diversity components (shared and unique fractions) should be returned.
#' @details Computes a pairwise decomposition of the overall differentiation among kernel hypervolumes into two components: the replacement (shifts) of space between hypervolumes and net differences between the amount of space enclosed by each hypervolume.
#' The beta diversity measures used here follow the FD partitioning framework where Btotal = Breplacement + Brichness. Beta diversity ranges from 0 (when hypervolumes are identical) to 1 (when hypervolumes are fully dissimilar).
#' See Carvalho & Cardoso (2020) and Mammola & Cardoso (2020) for the full formulas of beta diversity used here.
#' @return Three pairwise distance matrices, one per each of the three beta diversity components.
#' @references Carvalho, J.C. & Cardoso, P. (2020) Decomposing the causes for niche differentiation between species using hypervolumes. Frontiers in Ecology and Evolution. https://doi.org/10.3389/fevo.2020.00243
#' @references Mammola, S. & Cardoso, P. (2020) Functional diversity metrics using kernel density n-dimensional hypervolumes. Methods in Ecology and Evolution. https://doi.org/10.1111/2041-210X.13424
#' @examples comm <- rbind(c(1,1,1,1,1), c(1,1,1,1,1), c(0,0,1,1,1),c(0,0,1,1,1))
#' colnames(comm) = c("SpA","SpB","SpC","SpD", "SpE")
#' rownames(comm) = c("Site 1","Site 2","Site 3","Site 4")
#'
#' trait <- cbind(c(2.2,4.4,6.1,8.3,3),c(0.5,1,0.5,0.4,4))
#' colnames(trait) = c("Trait 1","Trait 2")
#' rownames(trait) = colnames(comm)
#'
#' hvlist = hull.build(comm, trait)
#' hull.beta(hvlist)
#' hvlist = hull.build(comm, trait, axes = 2)
#' hull.beta(hvlist, comp = TRUE)
#' @export
hull.beta <- function(comm, func = "jaccard", comp = FALSE){
#check if right data are provided
if (!(class(comm) %in% c("list")))
stop("A list of convhulln is needed as input data.")
#create matrices to store results
nComm <- length(comm)
Btotal <- Brepl <- Brich <- compA <- compB <- compC <- matrix(NA, nrow = nComm, ncol = nComm)
#calculate beta values
for (i in 1:(nComm-1)){
for(j in (i+1):nComm){
intersection <- geometry::intersectn(comm[[i]]$p, comm[[j]]$p, options = "FA")$ch$vol
unique1 <- comm[[i]]$vol - intersection
unique2 <- comm[[j]]$vol - intersection
union <- unique1 + unique2 + intersection
if(comp){
compA[j,i] = union - unique1 - unique2
compB[j,i] = unique1
compC[j,i] = unique2
}
if(tolower(substr(func, 1, 1)) == "s")
union <- 2 * union - unique1 - unique2
Btotal[j,i] <- (unique1 + unique2) / union
Brepl[j,i] <- 2 * min(unique1, unique2) / union
Brich[j,i] <- abs(unique1 - unique2) / union
}
}
#finalize
rownames(Btotal) <- colnames(Btotal) <- rownames(Brepl) <- colnames(Brepl) <- rownames(Brich) <- colnames(Brich) <- names(comm)
betaValues <- list(Btotal = round(as.dist(Btotal),3), Brepl = round(as.dist(Brepl),3), Brich = round(as.dist(Brich),3))
if (comp){
rownames(compA) <- colnames(compA) <- rownames(compB) <- colnames(compB) <- rownames(compC) <- colnames(compC) <- names(comm)
betaValues$Shared = round(as.dist(compA),3)
betaValues$Unique_to_Cols = round(as.dist(compB),3)
betaValues$Unique_to_Rows = round(as.dist(compC),3)
}
return(betaValues)
}
#' Contribution of each observation to a convex hull hypervolume.
#' @description Contribution of each species or individual to the total volume of one or more convex hulls.
#' @param comm A 'convhulln' object or list, preferably built with function hull.build.
#' @param relative A boolean (T/F) indicating whether contribution should be relative to total PD or FD (proportional contribution per individual or species). If FALSE, the sum of contributions for each site is equal to total PD/FD, if TRUE it is 1.
#' @details The contribution of each observation (species or individual) to the total volume of a convex hull, calculated as the difference in volume between the total convex hull and a second hypervolume lacking this specific observation (i.e., leave-one-out approach; Mammola & Cardoso, 2020).
#' @return A vector or matrix with the contribution values of each species or individual for each site.
#' @references Mammola, S. & Cardoso, P. (2020) Functional diversity metrics using kernel density n-dimensional hypervolumes. Methods in Ecology and Evolution. https://doi.org/10.1111/2041-210X.13424
#' @examples comm = rbind(c(1,3,0,5,3), c(3,2,5,1,0))
#' colnames(comm) = c("SpA", "SpB", "SpC", "SpD", "SpE")
#' rownames(comm) = c("Site 1", "Site 2")
#'
#' trait = data.frame(body = c(1,2,3,4,4), beak = c(1,5,4,1,2))
#' rownames(trait) = colnames(comm)
#'
#' hv = hull.build(comm[1,], trait)
#' hull.contribution(hv)
#' hvlist = hull.build(comm, trait, axes = 2)
#' hull.contribution(hvlist, relative = TRUE)
#' @export
hull.contribution = function(comm, relative = FALSE){
#check if right data is provided
if (!(class(comm) %in% c("list", "convhulln")))
stop("A convhulln or list is needed as input data.")
#if a convex hull is provided go for it.
if(is(comm, "convhulln")){
contrib <- c()
for (i in 1:nrow(comm$p))
contrib <- c(contrib, (comm$vol - geometry::convhulln(comm$p[-i,], options = "FA")$vol))
names(contrib) = rownames(comm$p)
if(relative)
contrib = contrib/sum(contrib)
#if a list is provided just call this same function using hypervolumes.
} else {
#get species names from hypervolumes and order them alphabetically
spp = c()
for(i in 1:length(comm)){
spp = c(spp, rownames(comm[[i]]$p))
}
spp = unique(spp)
spp = spp[order(spp)]
#get values calling this same function one hv at a time
contrib <- matrix(NA, nrow = length(comm), ncol = length(spp))
rownames(contrib) = names(comm)
colnames(contrib) = spp
for (i in 1:length(comm)){
contr = hull.contribution(comm[[i]], relative)
contrib[i, which(spp %in% names(contr))] = contr
}
}
return(contrib)
}
#' Alpha diversity using kernel density hypervolumes.
#' @description Estimation of functional richness of one or multiple sites, based on n-dimensional hypervolumes.
#' @param comm A 'Hypervolume' or 'HypervolumeList' object, preferably built using function kernel.build.
#' @details Estimates the functional richness (alpha FD) of one or more communities using kernel density hypervolumes, as implemented in Blonder et al. (2014, 2018).
#' Functional richness is expressed as the total volume of the n-dimensional hypervolume (Mammola & Cardoso, 2020). Note that the hypervolume is dimensionless, and that only hypervolumes with the same number of dimensions can be compared in terms of functional richness.
#' Given that the density and positions of stochastic points in the hypervolume are probabilistic, the functional richness of the trait space will intimately depend on the quality of input hypervolumes (details in Mammola & Cardoso, 2020).
#' @return A value or vector of alpha diversity values for each site.
#' @references Blonder, B., Lamanna, C., Violle, C. & Enquist, B.J. (2014) The n-dimensional hypervolume. Global Ecology and Biogeography, 23: 595-609.
#' @references Blonder, B., Morrow, C.B., Maitner, B., Harris, D.J., Lamanna, C., Violle, C., ... & Kerkhoff, A.J. (2018) New approaches for delineating n-dimensional hypervolumes. Methods in Ecology and Evolution, 9: 305-319.
#' @references Mammola, S. & Cardoso, P. (2020) Functional diversity metrics using kernel density n-dimensional hypervolumes. Methods in Ecology and Evolution, 11: 986-995.
#' @examples \dontrun{
#' comm = rbind(c(1,3,0,5,3), c(3,2,5,0,0))
#' colnames(comm) = c("SpA", "SpB", "SpC", "SpD", "SpE")
#' rownames(comm) = c("Site 1", "Site 2")
#'
#' trait = data.frame(body = c(1,2,3,4,4), beak = c(1,5,4,1,2))
#' rownames(trait) = colnames(comm)
#'
#' hv = kernel.build(comm[1,], trait)
#' kernel.alpha(hv)
#' hvlist = kernel.build(comm, trait, axes = 0.8)
#' kernel.alpha(hvlist)
#' }
#' @export
kernel.alpha <- function(comm){
#check if right data is provided
if (!(class(comm) %in% c("HypervolumeList", "Hypervolume")))
stop("A Hypervolume or HypervolumeList is needed as input data.")
#convert data if needed
if(is(comm, "Hypervolume")){
alphaValues = get_volume(comm)
names(alphaValues) = "Richness"
return(alphaValues)
}
#calculate alpha values and give them a name
alphaValues <- c()
for (i in 1:length(comm@HVList)){
alphaValues <- c(alphaValues, get_volume(comm@HVList[[i]]))
names(alphaValues[i]) <- comm@HVList[[i]]@Name
}
#return alpha values
return(alphaValues)
}
#' Beta diversity partitioning using kernel density hypervolumes.
#' @description Pairwise beta diversity partitioning into replacement and net difference in amplitude components of n-dimensional hypervolumes.
#' @param comm A 'HypervolumeList' object, preferably built using function kernel.build.
#' @param func Partial match indicating whether the Jaccard or Soerensen family of beta diversity measures should be used. If not specified, default is Jaccard.
#' @param comp Boolean indicating whether beta diversity components (shared and unique fractions) should be returned
#' @details Computes a pairwise decomposition of the overall differentiation among kernel hypervolumes into two components: the replacement (shifts) of space between hypervolumes and net differences between the amount of space enclosed by each hypervolume.
#' The beta diversity measures used here follow the FD partitioning framework developed by Carvalho & Cardoso (2020), where Btotal = Breplacement + Brichness. Beta diversity ranges from 0 (when hypervolumes are identical) to 1 (when hypervolumes are fully dissimilar).
#' See Carvalho & Cardoso (2020) and Mammola & Cardoso (2020) for the full formulas of beta diversity used here.
#' @return Three pairwise distance matrices, one per each of the three beta diversity components. If comp = TRUE also three distance matrices with beta diversity components.
#' @references Carvalho, J.C. & Cardoso, P. (2020) Decomposing the causes for niche differentiation between species using hypervolumes. Frontiers in Ecology and Evolution, 8: 243.
#' @references Mammola, S. & Cardoso, P. (2020) Functional diversity metrics using kernel density n-dimensional hypervolumes. Methods in Ecology and Evolution, 11: 986-995.
#' @examples \dontrun{
#' comm <- rbind(c(1,1,1,1,1), c(1,1,1,1,1), c(0,0,1,1,1),c(0,0,1,1,1))
#' colnames(comm) = c("SpA","SpB","SpC","SpD", "SpE")
#' rownames(comm) = c("Site 1","Site 2","Site 3","Site 4")
#'
#' trait <- cbind(c(2.2,4.4,6.1,8.3,3),c(0.5,1,0.5,0.4,4),c(0.7,1.2,0.5,0.4,5),c(0.7,2.2,0.5,0.3,6))
#' colnames(trait) = c("Trait 1","Trait 2","Trait 3","Trait 4")
#' rownames(trait) = colnames(comm)
#'
#' hvlist = kernel.build(comm, trait)
#' kernel.beta(hvlist)
#' hvlist = kernel.build(comm, trait, axes = 0.9)
#' kernel.beta(hvlist, comp = TRUE)
#' }
#' @export
kernel.beta = function(comm, func = "jaccard", comp = FALSE){
#check if right data is provided
if (!(class(comm) %in% c("HypervolumeList")))
stop("A HypervolumeList is needed as input data.")
#create matrices to store results
nComm <- length(comm@HVList)
Btotal <- Brepl <- Brich <- compA <- compB <- compC <- matrix(NA, nrow = nComm, ncol = nComm)
#calculate beta values and give them a name
commNames <- c()
commNames[nComm] <- comm@HVList[[nComm]]@Name
for (i in 1:(nComm-1)){
for(j in (i+1):(nComm)){
hyperSet <- hypervolume_set(comm@HVList[[i]], comm@HVList[[j]], check.memory = FALSE, verbose = FALSE, num.points.max = 10000)
union <- hyperSet[[4]]@Volume
unique1 <- hyperSet[[5]]@Volume
unique2 <- hyperSet[[6]]@Volume
if(comp){
compA[j,i] = union - unique1 - unique2
compB[j,i] = unique1
compC[j,i] = unique2
}
if(tolower(substr(func, 1, 1)) == "s")
union <- 2 * union - unique1 - unique2
Btotal[j,i] <- (unique1 + unique2) / union
Brepl[j,i] <- 2 * min(unique1, unique2) / union
Brich[j,i] <- abs(unique1 - unique2) / union
}
commNames[i] <- comm@HVList[[i]]@Name
}
#finalize
rownames(Btotal) <- colnames(Btotal) <- rownames(Brepl) <- colnames(Brepl) <- rownames(Brich) <- colnames(Brich) <- commNames
betaValues <- list(Btotal = round(as.dist(Btotal),3), Brepl = round(as.dist(Brepl),3), Brich = round(as.dist(Brich),3))
if (comp){
rownames(compA) <- colnames(compA) <- rownames(compB) <- colnames(compB) <- rownames(compC) <- colnames(compC) <- commNames
betaValues$Shared = round(as.dist(compA),3)
betaValues$Unique_to_Cols = round(as.dist(compB),3)
betaValues$Unique_to_Rows = round(as.dist(compC),3)
}
return(betaValues)
}
#' Functional beta diversity evenness using kernel density hypervolumes.
#' @description Difference of evenness between pairs of sites, measuring the regularity of stochastic points distribution within the total functional space.
#' @param comm A 'HypervolumeList' object, preferably built using function kernel.build.
#' @details This measure is simply the pairwise difference of evenness calculated based on the functional evenness (Mason et al., 2005) of a n-dimensional hypervolume, namely the regularity of stochastic points distribution within the total trait space (Mammola & Cardoso, 2020).
#' Evenness is calculated as the overlap between the observed hypervolume and a theoretical hypervolume where traits and abundances are evenly distributed within the range of their values (Carmona et al., 2016, 2019).
#' @return Distance matrix between sites.
#' @references Carmona, C.P., de Bello, F., Mason, N.W.H. & Leps, J. (2016) Traits without borders: integrating functional diversity across scales. Trends in Ecology and Evolution, 31: 382-394.
#' @references Carmona, C.P., de Bello, F., Mason, N.W.H. & Leps, J. (2019) Trait probability density (TPD): measuring functional diversity across scales based on TPD with R. Ecology, 100: e02876.
#' @references Mason, N.W.H., Mouillot, D., Lee, W.G. & Wilson, J.B. (2005) Functional richness, functional evenness and functional divergence: the primary components of functional diversity. Oikos, 111: 112-118.
#' @references Mammola, S. & Cardoso, P. (2020) Functional diversity metrics using kernel density n-dimensional hypervolumes. Methods in Ecology and Evolution, 11: 986-995.
#' @examples \dontrun{
#' comm <- rbind(c(1,1,1,1,1), c(1,1,1,1,1), c(0,0,1,1,1),c(0,0,1,1,1))
#' colnames(comm) = c("SpA","SpB","SpC","SpD", "SpE")
#' rownames(comm) = c("Site 1","Site 2","Site 3","Site 4")
#'
#' trait <- cbind(c(2.2,4.4,6.1,8.3,3),c(0.5,1,0.5,0.4,4),c(0.7,1.2,0.5,0.4,5),c(0.7,2.2,0.5,0.3,6))
#' colnames(trait) = c("Trait 1","Trait 2","Trait 3","Trait 4")
#' rownames(trait) = colnames(comm)
#'
#' hvlist = kernel.build(comm, trait)
#' kernel.beta.evenness(hvlist)
#' hvlist = kernel.build(comm, trait, axes = 0.9)
#' kernel.beta.evenness(hvlist)
#' }
#' @export
kernel.beta.evenness <- function(comm){
if (!(class(comm) %in% c("HypervolumeList")))
stop("A HypervolumeList is needed as input data.")
return(dist(kernel.evenness(comm)))
}
#' Functional originality of observations in kernel density hypervolumes.
#' @description Average dissimilarity between a species or individual and a sample of random points within the boundaries of the n-dimensional hypervolume.
#' @param comm A 'Hypervolume' or 'HypervolumeList' object, preferably built using function kernel.build.
#' @param frac A value between 0.01 and 1, indicating the fraction of random points to be used in the estimation of originality. Default is 0.1.
#' @param relative A boolean (T/F) indicating whether originality should be relative to the most original species in the community.
#' @details A measure of the originality (sensu Pavoine et al., 2005) of each observation (species or individuals) used to construct the n-dimensional hypervolume. In a probabilistic hypervolume, originality is calculated as the average distance between each observation to a sample of stochastic points within the boundaries of the n-dimensional hypervolume (Mammola & Cardoso, 2020).
#' Originality is a measure of functional rarity (sensu Violle et al., 2017; Carmona et al., 2017) that allows to map the contribution of each observation to the divergence components of FD (Mammola & Cardoso, 2020).
#' The number of sample points to be used in the estimation of the originality is controlled by the frac parameter. Increase frac for less deviation in the estimation, but mind that computation time also increases.
#' @return A vector or matrix with the originality values of each species or individual in each site.
#' @references Carmona, C.P., de Bello, F., Sasaki, T., Uchida, K. & Partel, M. (2017) Towards a common toolbox for rarity: A response to Violle et al. Trends in Ecology and Evolution, 32: 889-891.
#' @references Mammola, S. & Cardoso, P. (2020) Functional diversity metrics using kernel density n-dimensional hypervolumes. Methods in Ecology and Evolution, 11: 986-995.
#' @references Pavoine, S., Ollier, S. & Dufour, A.-B. (2005) Is the originality of a species measurable? Ecology Letters, 8: 579-586.
#' @references Violle, C., Thuiller, W., Mouquet, N., Munoz, F., Kraft, N.J.B., Cadotte, M.W., ... & Mouillot, D. (2017) Functional rarity: the ecology of outliers. Trends in Ecology and Evolution, 32: 356-367.
#' @examples \dontrun{
#' comm = rbind(c(1,3,0,5,3), c(3,2,5,1,0))
#' colnames(comm) = c("SpA", "SpB", "SpC", "SpD", "SpE")
#' rownames(comm) = c("Site 1", "Site 2")
#'
#' trait = data.frame(body = c(1,2,3,4,4), beak = c(1,5,4,1,2))
#' rownames(trait) = colnames(comm)
#'
#' hv = kernel.build(comm[1,], trait)
#' kernel.originality(hv)
#' hvlist = kernel.build(comm, trait)
#' kernel.originality(hvlist)
#' kernel.originality(hvlist, relative = TRUE)
#' }
#' @export
kernel.originality = function(comm, frac = 0.1, relative = FALSE){
#check if right data is provided
if (!(class(comm) %in% c("Hypervolume", "HypervolumeList")))
stop("A Hypervolume or HypervolumeList is needed as input data.")
#check if right frac parameter is provided
if (frac < 0.01 | frac > 1)
stop("The frac parameter should be a number between 0.01 and 1.")
if(is(comm, "Hypervolume")) {
sample.points <- comm@RandomPoints[sample(1:nrow(comm@RandomPoints), nrow(comm@RandomPoints)*frac), ]
originality <- c()
for (i in 1:length(unique(rownames(comm@Data)))){
originality_run <- c()
subHvData <- comm@Data[rownames(comm@Data)[i], ]
for (r in 1:nrow(sample.points))
originality_run <- c(originality_run, dist(c(subHvData, sample.points[r,1:ncol(sample.points)])))
originality <- c(originality, mean(originality_run))
}
names(originality) = rownames(comm@Data)
} else if (is(comm, "HypervolumeList")){
if (all(rownames(comm@HVList[[1]]@Data) == 1:nrow(comm@HVList[[1]]@Data)))
warning("Species names are probably missing from hypervolumes, consider renaming them.")
spp = c()
commNames = c()
for(i in 1:length(comm@HVList)){
spp = c(spp, rownames(comm@HVList[[i]]@Data))
commNames = c(commNames, comm@HVList[[i]]@Name)
}
spp = unique(spp)
spp = spp[order(spp)]
originality = matrix(NA, nrow = length(comm@HVList), ncol = length(spp))
rownames(originality) = commNames
colnames(originality) = spp
for(i in 1:length(comm@HVList)){
originality[i, which(spp %in% rownames(comm@HVList[[i]]@Data))] <- kernel.originality(comm = comm@HVList[[i]], frac = frac)
}
}
#finalize
if(relative){
if(is.matrix(originality))
originality = t(apply(originality, 1, function(x) x / max(x, na.rm = TRUE)))
else
originality <- originality/max(originality, na.rm = TRUE)
}
return(originality)
}
#' Contribution of each observation to the kernel density hypervolume.
#' @description Contribution of each species or individual to the total volume of one or more kernel hypervolumes.
#' @param comm A 'Hypervolume' or 'HypervolumeList' object, preferably built using function kernel.build.
#' @param func Calculate contribution using either closest "neighbor" or leave "one out" approach.
#' @param relative A boolean (T/F) indicating whether contribution should be relative to total FD (proportional contribution per individual or species). If FALSE, the sum of contributions for each site is equal to total FD, if TRUE it is 1.
#' @details Contribution is a measure of functional rarity (sensu Violle et al., 2017; Carmona et al., 2017) that allows to map the contribution of each observation to the richness components of FD (Mammola & Cardoso, 2020).
#' If using func = "neighbor", each random point will be attributed to the closest species. The contribution of each species will be proportional to the number of its points. The sum of contributions of all species is equal to total richness.
#' Note that the contribution of a species or individual can be negative if leave-one-out approach is taken, if the removal of an observation increases the total volume (see Figure 2d in Mammola & Cardoso 2020).
#' This might happen, although not always, in cases when the presence of a given species decreases the average distance between all the species in the community, i.e., when a given species is close to the "average" species of that community, making that community less diverse in some sense (Mammola & Cardoso, 2020).
#' @return A matrix with the contribution values of each species or individual for each site.
#' @references Carmona, C.P., de Bello, F., Sasaki, T., Uchida, K. & Partel, M. (2017) Towards a common toolbox for rarity: A response to Violle et al. Trends in Ecology and Evolution, 32(12): 889-891.
#' @references Mammola, S. & Cardoso, P. (2020) Functional diversity metrics using kernel density n-dimensional hypervolumes. Methods in Ecology and Evolution, 11: 986-995.
#' @references Violle, C., Thuiller, W., Mouquet, N., Munoz, F., Kraft, N.J.B., Cadotte, M.W., ... & Mouillot, D. (2017) Functional rarity: The ecology of outliers. Trends in Ecology and Evolution, 32: 356-367.
#' @examples \dontrun{
#' comm = rbind(c(1,3,0,5,3), c(3,2,5,1,0))
#' colnames(comm) = c("SpA", "SpB", "SpC", "SpD", "SpE")
#' rownames(comm) = c("Site 1", "Site 2")
#'
#' trait = data.frame(body = c(1,2,3,4,4), beak = c(1,5,4,1,2))
#' rownames(trait) = colnames(comm)
#'
#' hv = kernel.build(comm[1,], trait)
#' kernel.contribution(hv)
#' hvlist = kernel.build(comm, trait, axes = 2)
#' kernel.contribution(hvlist)
#' kernel.contribution(hvlist, relative = TRUE)
#' }
#' @export
kernel.contribution = function(comm, func = "neighbor", relative = FALSE){
#check if right data is provided
if (!(class(comm) %in% c("Hypervolume", "HypervolumeList")))
stop("A Hypervolume or HypervolumeList is needed as input data.")
#if hypervolume is provided go for it.
if (is(comm, "Hypervolume")){
contrib <- c()
if(substring(func, 1, 3) == "nei"){
#check if any data points are similar and create vector with similar
simPoints = c()
for(i in 1:nrow(comm@Data)){
simPoints[i] = which(paste(comm@Data[i, ], collapse = "") == apply(comm@Data, 1, paste, collapse = ""))[1]
}
#calculate distance between each random point and each data point
distPoints = matrix(NA, ncol = nrow(comm@Data), nrow = nrow(comm@RandomPoints))
for(d in 1:nrow(comm@Data)){
for(r in 1:nrow(comm@RandomPoints)){
distPoints[r, d] = dist(rbind(comm@RandomPoints[r,], comm@Data[d,]))
}
}
#attribute each random point to each data point, repeated points will have 0
distPoints = apply(distPoints, 1, which.min)
##calculate contribution of each data point
for(d in 1:nrow(comm@Data)){
contrib[d] = sum(distPoints == d)
}
contrib = contrib / length(distPoints) * kernel.alpha(comm)
##divide contribution of each data point by similar species
temp = c()
for(i in 1:nrow(comm@Data)){
dups = which(simPoints == simPoints[i])
temp[i] = contrib[dups[1]] / length(dups)
}
contrib = temp
} else if (substring(func, 1, 3) == "one"){
for (i in 1:nrow(comm@Data)){
if (comm@Method == "Box kernel density estimate")
contrib <- c(contrib, comm@Volume - hypervolume_box(comm@Data[-i,], verbose = FALSE)@Volume)
else if (comm@Method == "Gaussian kernel density estimate")
contrib <- c(contrib, comm@Volume - hypervolume_gaussian(comm@Data[-i,], verbose = FALSE)@Volume)
else if (comm@Method == "One-class support vector machine")
contrib <- c(contrib, comm@Volume - hypervolume_svm(comm@Data[-i,], verbose = FALSE)@Volume)
}
} else {
stop("func not recognized, must be 'neighbor' or 'one out'")
}
names(contrib) = rownames(comm@Data)
} else if (is(comm, "HypervolumeList")) {
if (all(rownames(comm@HVList[[1]]@Data) == 1:nrow(comm@HVList[[1]]@Data)))
warning("Species names are probably missing from hypervolumes, consider renaming them.")
spp = c()
commNames = c()
for(i in 1:length(comm@HVList)){
spp = c(spp, rownames(comm@HVList[[i]]@Data))
commNames = c(commNames, comm@HVList[[i]]@Name)
}
spp = unique(spp)
spp = spp[order(spp)]
contrib = matrix(NA, nrow = length(comm@HVList), ncol = length(spp))
rownames(contrib) = commNames
colnames(contrib) = spp
for(i in 1:length(comm@HVList)){
contrib[i, which(spp %in% rownames(comm@HVList[[i]]@Data))] <- kernel.contribution(comm = comm@HVList[[i]], func = func)
}
}
#finalize
if(relative){
if(is.matrix(contrib))
contrib = t(apply(contrib, 1, function(x) x / sum(x, na.rm = TRUE)))
else
contrib <- contrib/sum(contrib, na.rm = TRUE)
}
return(contrib)
}
#' Functional dispersion of kernel density hypervolumes.
#' @description Average distance to centroid or dissimilarity between random points within the boundaries of the kernel density hypervolume.
#' @param comm A 'Hypervolume' or 'HypervolumeList' object, preferably built using function kernel.build.
#' @param func Function for calculating dispersion. One of 'divergence', 'dissimilarity' or 'regression'.
#' @param frac A value between 0.01 and 1, indicating the fraction of random points to be used. Default is 0.1.
#' @details This function calculates dispersion either: i) as the average distance between stochastic points within the kernel density hypervolume and the centroid of these points (divergence; Laliberte & Legendre, 2010; see also Carmona et al., 2019); ii) as the average distance between all points (dissimilarity, see also function BAT::dispersion); or iii) as the average distance between stochastic points within the kernel density hypervolume and a regression line fitted through the points.
#' The number of stochastic points is controlled by the 'frac' parameter (increase this number for less deviation in the estimation).
#' @return A value or vector of dispersion values for each site.
#' @references Carmona, C.P., de Bello, F., Mason, N.W.H. & Leps, J. (2019) Trait probability density (TPD): measuring functional diversity across scales based on TPD with R. Ecology, 100: e02876.
#' @references Laliberte, E. & Legendre, P. (2010) A distance-based framework for measuring functional diversity from multiple traits. Ecology 91: 299-305.
#' @examples \dontrun{
#' comm = rbind(c(1,3,0,5,3), c(3,2,5,1,0))
#' colnames(comm) = c("SpA", "SpB", "SpC", "SpD", "SpE")
#' rownames(comm) = c("Site 1", "Site 2")
#'
#' trait = data.frame(body = c(1,2,3,4,4), beak = c(1,5,4,1,2))
#' rownames(trait) = colnames(comm)
#'
#' hv = kernel.build(comm[1,], trait)
#' kernel.dispersion(hv)
#' hvlist = kernel.build(comm, trait, axes = 2)
#' kernel.dispersion(hvlist)
#' kernel.dispersion(hvlist, func = "divergence")
#' }
#' @export
kernel.dispersion = function(comm, func = 'dissimilarity', frac = 0.1) {
#check if right data is provided
if (!(class(comm) %in% c("HypervolumeList", "Hypervolume")))
stop("A Hypervolume or HypervolumeList is needed as input data.")
#check if right frac parameter is provided
if (frac < 0.01 | frac > 1)
stop("Frac parameter should be a number between 0.01 and 1.")
if (is(comm, "Hypervolume")){
random_points = comm@RandomPoints[sample(1:nrow(comm@RandomPoints), nrow(comm@RandomPoints)*frac), ]
if (func == "dissimilarity"){
disp <- mean(dist(random_points))
} else if (func == "divergence"){
cent <- get_centroid(comm)
disp <- c()
for (k in 1:comm@Dimensionality){
disp <- cbind(disp, (cent[k] - random_points[, k])^2)
}
disp <- mean(rowSums(disp)^0.5)
} else if (func == "regression"){
disp <- c()
for(m in 1:(ncol(random_points)-1)){ # build all bivariate predictor combinations
for(n in (m+1):ncol(random_points)){
disp <- append(disp,summary(lm(random_points[,m]~random_points[,n]))$r.squared) #get the R^2
}
}
disp <- mean(disp)
} else {
stop(sprintf("Function %s not recognized.", func))
}
names(disp) <- comm@Name
} else if (is(comm, "HypervolumeList")){
disp <- c()
for (i in 1:length(comm@HVList))
disp <- c(disp, kernel.dispersion(comm@HVList[[i]], func = func, frac = frac))
}
return(disp)
}
#' Functional evenness of kernel density hypervolumes.
#' @description Functional evenness of a community, measuring the regularity of stochastic points distribution within the total functional space.
#' @param comm A 'Hypervolume' or 'HypervolumeList' object, preferably built using function kernel.build.
#' @details This function measures the functional evenness (Mason et al., 2005) of a n-dimensional hypervolume, namely the regularity of stochastic points distribution within the total trait space (Mammola & Cardoso, 2020).
#' Evenness is calculated as the overlap between the observed hypervolume and a theoretical hypervolume where traits and abundances are evenly distributed within the range of their values (Carmona et al., 2016, 2019).
#' @return A value or vector of evenness values for each site.
#' @references Carmona, C.P., de Bello, F., Mason, N.W.H. & Leps, J. (2016) Traits without borders: integrating functional diversity across scales. Trends in Ecology and Evolution, 31: 382-394.
#' @references Carmona, C.P., de Bello, F., Mason, N.W.H. & Leps, J. (2019) Trait probability density (TPD): measuring functional diversity across scales based on TPD with R. Ecology, 100: e02876.
#' @references Mason, N.W.H., Mouillot, D., Lee, W.G. & Wilson, J.B. (2005) Functional richness, functional evenness and functional divergence: the primary components of functional diversity. Oikos, 111: 112-118.
#' @references Mammola, S. & Cardoso, P. (2020) Functional diversity metrics using kernel density n-dimensional hypervolumes. Methods in Ecology and Evolution, 11: 986-995.
#' @examples \dontrun{
#' comm = rbind(c(100,3,0,5,3), c(3,2,5,1,0))
#' colnames(comm) = c("SpA", "SpB", "SpC", "SpD", "SpE")
#' rownames(comm) = c("Site 1", "Site 2")
#'
#' trait = data.frame(body = c(1,2,3,4,4), beak = c(1,5,4,1,2))
#' rownames(trait) = colnames(comm)
#'
#' hv = kernel.build(comm[1,], trait)
#' kernel.evenness(hv)
#' hv = kernel.build(comm[1,], trait, abund = FALSE)
#' kernel.evenness(hv)
#' hvlist = kernel.build(comm, trait, axes = 2)
#' kernel.evenness(hvlist)
#' }
#' @export
kernel.evenness = function(comm) {
#check if right data is provided
if (!(class(comm) %in% c("HypervolumeList", "Hypervolume")))
stop("A Hypervolume or HypervolumeList is needed as input data.")
#if hypervolume is provided go for it
if (is(comm, "Hypervolume")) {
#creating a perfectly even hypervolume within the distribution of traits
ref_even <- comm@Data
ref_even[] <- NA
for(j in 1:ncol(comm@Data)){
space <- (range(comm@Data[, j])[2] - range(comm@Data[, j])[1]) / (length(comm@Data[, j]) - 1)
ref_even[, j] <- seq(from = range(comm@Data[,j])[1], to = range(comm@Data[,j])[2], by = space)
}
#get parameters to build even hypervolumes
if (comm@Method == "Box kernel density estimate")
hv_ref <- hypervolume_box(ref_even, verbose = FALSE,
samples.per.point = comm@Parameters$samples.per.point,
kde.bandwidth = comm@Parameters$kde.bandwidth)
else if (comm@Method == "Gaussian kernel density estimate")
hv_ref <- hypervolume_gaussian(ref_even, verbose = FALSE,
weight = NULL,
samples.per.point = comm@Parameters$samples.per.point,
kde.bandwidth = comm@Parameters$kde.bandwidth,
sd.count = comm@Parameters$sd.count,
quantile.requested = comm@Parameters$quantile.requested,
quantile.requested.type = comm@Parameters$quantile.requested.type)
else if (comm@Method == "One-class support vector machine")
hv_ref <- hypervolume_svm(ref_even, verbose = FALSE,
samples.per.point = comm@Parameters$samples.per.point,
svm.nu = comm@Parameters$svm.nu,
svm.gamma = comm@Parameters$svm.gamma,
scale.factor = comm@Parameters$svm.gamma)
#Checking the overlap between the hypervolume and the even hypervolume
set <- hypervolume_set(hv_ref, comm, check.memory = FALSE, distance.factor = 1, verbose = FALSE)
even <- hypervolume_overlap_statistics(set)[1]
names(even) <- comm@Name
} else if (is(comm, "HypervolumeList")){
even <- c()
for(i in 1:length(comm@HVList)){
even <- c(even, kernel.evenness(comm@HVList[[i]]))
message(paste("Evenness of hypervolume", as.character(i), "out of", as.character(length(comm@HVList)), "has been estimated.\n"))
}
}
return(even)
}
#' Contribution of each observation to the evenness of a kernel density hypervolume.
#' @description Contribution of each species or individual to the evenness of one or more kernel hypervolumes.
#' @param comm A 'Hypervolume' or 'HypervolumeList' object, preferably built using function kernel.build.
#' @details The contribution of each observation (species or individual) to the total evenness of a kernel hypervolume. Contribution to evenness is calculated as the difference in evenness between the total hypervolume and a second hypervolume lacking this specific observation (i.e., leave-one-out approach; Mammola & Cardoso, 2020).
#' Note that the contribution of a species or individual can be negative, if the removal of an observation increases the total evenness.
#' @return A vector or matrix with the contribution values of each species or individual for each community or species respectively.
#' @references Mammola, S. & Cardoso, P. (2020) Functional diversity metrics using kernel density n-dimensional hypervolumes. Methods in Ecology and Evolution, 11: 986-995.
#' @examples \dontrun{
#' comm = rbind(c(100,3,0,5,3), c(3,2,5,1,0))
#' colnames(comm) = c("SpA", "SpB", "SpC", "SpD", "SpE")
#' rownames(comm) = c("Site 1", "Site 2")
#'
#' trait = data.frame(body = c(1,2,3,4,4), beak = c(1,5,4,1,2))
#' rownames(trait) = colnames(comm)
#'
#' hv = kernel.build(comm[1,], trait)
#' kernel.evenness.contribution(hv)
#' hvlist = kernel.build(comm, trait)
#' kernel.evenness.contribution(hvlist)
#' hvlist = kernel.build(comm, trait, axes = 0.8)
#' kernel.evenness.contribution(hvlist)
#' }
#' @export
kernel.evenness.contribution = function(comm){
#check if right data is provided
if (!(class(comm) %in% c("Hypervolume", "HypervolumeList")))
stop("A Hypervolume or HypervolumeList is needed as input data.")
#if hypervolume is provided go for it.
if (is(comm, "Hypervolume")){
#extract total evenness:
comm.evenness <- kernel.evenness(comm)
#leave-one-out:
contrib <- c()
for (i in 1:nrow(comm@Data)){
if (comm@Method == "Box kernel density estimate")
contrib <- c(contrib, comm.evenness - kernel.evenness(hypervolume_box(comm@Data[-i, ], verbose = FALSE)))
else if (comm@Method == "Gaussian kernel density estimate")
contrib <- c(contrib, comm.evenness - kernel.evenness(hypervolume_gaussian(comm@Data[-i, ], verbose = FALSE)))
else if (comm@Method == "One-class support vector machine")
contrib <- c(contrib, comm.evenness - kernel.evenness(hypervolume_svm(comm@Data[-i, ], verbose = FALSE)))
}
names(contrib) = rownames(comm@Data)
} else if (is(comm, "HypervolumeList")){
if (all(rownames(comm@HVList[[1]]@Data) == 1:nrow(comm@HVList[[1]]@Data)))
warning("Species names are probably missing from hypervolumes, consider renaming them.")
spp = c()
commNames = c()
for(i in 1:length(comm@HVList)){
spp = c(spp, rownames(comm@HVList[[i]]@Data))
commNames = c(commNames, comm@HVList[[i]]@Name)
}
spp = unique(spp)
spp = spp[order(spp)]
contrib = matrix(NA, nrow = length(comm@HVList), ncol = length(spp))
rownames(contrib) = commNames
colnames(contrib) = spp
for(i in 1:length(comm@HVList)){
contrib[i, which(spp %in% rownames(comm@HVList[[i]]@Data))] <- kernel.evenness.contribution(comm = comm@HVList[[i]])
}
}
return(contrib)
}
#' Pairwise similarity among kernel density hypervolumes.
#' @description Calculate pairwise distance metrics (centroid and minimum distance) and similarity indices (Intersection, Jaccard, Soerensen-Dice) among n-dimensional hypervolumes.
#' @param comm A 'HypervolumeList' object, preferably built using function kernel.build.
#' @details Computes a pairwise comparison between kernel density hypervolumes of multiple species or communities, based on the distance and similarity metrics implemented in hypervolume R package (Blonder et al., 2014, 2018).
#' See Mammola (2019) for a description of the different indices, and a comparison between their performance. Note that computation time largely depends on the number of 'Hypervolume' objects in the list, and scales almost exponentially with the number of hypervolume axes.
#' @return Five pairwise distance matrices, one per each of the distance and similarity indices (in order: distance between centroids, minimum distance, Jaccard overlap, Soerensen-Dice overlap, and Intersection among hypervolumes).
#' @references Blonder, B., Lamanna, C., Violle, C. & Enquist, B.J. (2014) The n-dimensional hypervolume. Global Ecology and Biogeography, 23: 595-609.
#' @references Blonder, B., Morrow, C.B., Maitner, B., Harris, D.J., Lamanna, C., Violle, C., ... & Kerkhoff, A.J. (2018) New approaches for delineating n-dimensional hypervolumes. Methods in Ecology and Evolution, 9: 305-319.
#' @references Mammola, S. (2019) Assessing similarity of n-dimensional hypervolumes: Which metric to use?. Journal of Biogeography, 46: 2012-2023.
#' @examples \dontrun{
#' comm <- rbind(c(1,1,1,1,1), c(1,1,1,1,1), c(0,0,1,1,1),c(0,0,1,1,1))
#' colnames(comm) = c("SpA","SpB","SpC","SpD", "SpE")
#' rownames(comm) = c("Site 1","Site 2","Site 3","Site 4")
#'
#' trait <- cbind(c(2.2,4.4,6.1,8.3,3),c(0.5,1,0.5,0.4,4),c(0.7,1.2,0.5,0.4,5),c(0.7,2.2,0.5,0.3,6))
#' colnames(trait) = c("Trait 1","Trait 2","Trait 3","Trait 4")
#' rownames(trait) = colnames(comm)
#'
#' hvlist = kernel.build(comm, trait)
#' kernel.similarity(hvlist)
#' hvlist = kernel.build(comm, trait, axes = 0.9)
#' kernel.similarity(hvlist)
#' }
#' @export
kernel.similarity <- function(comm) {
#check if right data is provided
if (!(class(comm) %in% c("HypervolumeList")))
stop("A HypervolumeList is needed as input data.")
#create matrices to store results
nComm <- length(comm@HVList)
dist_c <- matrix(nrow = nComm, ncol = nComm, NA)
dist_m <- matrix(nrow = nComm, ncol = nComm, NA)
int <- matrix(nrow = nComm, ncol = nComm, NA)
jac <- matrix(nrow = nComm, ncol = nComm, NA)
sor <- matrix(nrow = nComm, ncol = nComm, NA)
#calculate similarity values and give them a name
commNames <- c()
for (i in 1:nComm){
for(j in i:nComm){
dst_cent <- hypervolume_distance(comm@HVList[[i]], comm@HVList[[j]], type = "centroid", num.points.max = 1000, check.memory = TRUE)
dst_min <- hypervolume_distance(comm@HVList[[i]], comm@HVList[[j]], type = "minimum", check.memory = FALSE)
set <- hypervolume_set(comm@HVList[[i]], comm@HVList[[j]], check.memory = FALSE, verbose = FALSE)
dist_c[j,i] <- dst_cent
dist_m[j,i] <- dst_min
int[j,i] <- get_volume(set)[[3]]
jac[j,i] <- hypervolume_overlap_statistics(set)[1]
sor[j,i] <- hypervolume_overlap_statistics(set)[2]
}
commNames <- c(commNames, comm@HVList[[i]]@Name)
message(paste("Similarity values of the hypervolume", as.character(i), "out of", as.character(nComm), "have been calculated.\n"))
}
#finalize
rownames(dist_c) <- colnames(dist_c) <- rownames(dist_m) <- colnames(dist_m) <- rownames(jac) <- colnames(jac) <- rownames(sor) <- colnames(sor) <- rownames(int) <- colnames(int) <- commNames
similarity <- list(Distance_centroids = as.dist(dist_c), Minimum_distance = as.dist(dist_m), Intersection = as.dist(int), Jaccard = as.dist(jac), Sorensen = as.dist(sor))
return(similarity)
}
#' Hotspots in hypervolumes.
#' @description Identify hotspots in kernel density hypervolumes based on minimum volume needed to cover a given proportion of random points.
#' @param comm A 'Hypervolume' or 'HypervolumeList' object, preferably built using function kernel.build.
#' @param prop Proportion of random points to be included.
#' @details Estimates the hotspots of one or more communities using kernel density hypervolumes as in Carmona et al. (2021).
#' @return A 'Hypervolume' or 'HypervolumeList' with the hotspots of each site.
#' @references Carmona, C.P., et al. (2021) Erosion of global functional diversity across the tree of life. Science Advances, 7: eabf2675. DOI: 10.1126/sciadv.abf2675
#' @examples \dontrun{
#' comm = rbind(c(1,3,0,5,3), c(3,2,5,0,0))
#' colnames(comm) = c("SpA", "SpB", "SpC", "SpD", "SpE")
#' rownames(comm) = c("Site 1", "Site 2")
#'
#' trait = data.frame(body = c(1,2,3,4,4), beak = c(1,5,4,1,2))
#' rownames(trait) = colnames(comm)
#'
#' hv = kernel.build(comm[1,], trait)
#' plot(hv)
#' kernel.alpha(hv)
#'
#' hot = kernel.hotspots(hv, 0.5)
#' plot(hot)
#' kernel.alpha(hot)
#'
#' hvlist = kernel.build(comm, trait)
#' hot = kernel.hotspots(hvlist, 0.1)
#' kernel.alpha(hot)
#' }
#' @export
kernel.hotspots <- function(comm, prop = 0.5){
#check if right data is provided
if (!(class(comm) %in% c("HypervolumeList", "Hypervolume")))
stop("A Hypervolume or HypervolumeList is needed as input data.")
#single volume
if (is(comm, "Hypervolume")){
hot = hypervolume_threshold(comm, num.thresholds = 1000, quantile.requested = prop, quantile.requested.type = "probability", plot = FALSE, verbose = FALSE)
hot = hot$HypervolumesThresholded
hot@Name = comm@Name
} else if (is(comm, "HypervolumeList")) {
#if list call this same function
for (i in 1:length(comm@HVList)){
newHot <- kernel.hotspots(comm@HVList[[i]], prop)
cat(paste("Hotspot", as.character(i), "out of", as.character(length(comm@HVList)), "has been calculated.\n"))
if(i == 1){
hot <- hypervolume_join(newHot)
} else {
hot <- hypervolume_join(hot,newHot)
}
}
}
#return hot
return(hot)
}
#' Gamma diversity (Taxon, Phylogenetic or Functional Diversity - TD, PD, FD).
#' @description Observed richness among multiple sites.
#' @param comm A sites x species matrix, with either abundance or incidence data.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @details TD is equivalent to species richness. Calculations of PD and FD are based on Faith (1992) and Petchey & Gaston (2002, 2006), which measure PD and FD of a community as the total branch length of a tree linking all species represented in such community.
#' PD and FD are calculated based on a tree (hclust or phylo object, no need to be ultrametric). The path to the root of the tree is always included in calculations of PD and FD.
#' The number and order of species in comm must be the same as in tree.
#' @return A single value of gamma.
#' @references Faith, D.P. (1992) Conservation evaluation and phylogenetic diversity. Biological Conservation, 61, 1-10.
#' @references Petchey, O.L. & Gaston, K.J. (2002) Functional diversity (FD), species richness and community composition. Ecology Letters, 5, 402-411.
#' @references Petchey, O.L. & Gaston, K.J. (2006) Functional diversity: back to basics and looking forward. Ecology Letters, 9, 741-758.
#' @examples comm <- matrix(c(0,0,1,1,0,0,2,1,0,0), nrow = 2, ncol = 5, byrow = TRUE)
#' trait = 1:5
#' tree <- hclust(dist(c(1:5), method = "euclidean"), method = "average")
#' alpha(comm)
#' gamma(comm)
#' gamma(comm, trait)
#' gamma(comm, tree)
#' @export
gamma <- function(comm, tree){
comm = matrix(colSums(comm), nrow = 1)
return(alpha(comm, tree))
}
#' Gamma diversity using convex hull hypervolumes.
#' @description Estimation of functional richness of multiple sites, based on convex hull hypervolumes.
#' @param comm A 'convhulln' object or list, preferably built with function hull.build.
#' @details Estimates the functional richness (gamma FD) of multiple communities using convex hull hypervolumes.
#' Functional richness is expressed as the total volume of the convex hull.
#' @return A single value of gamma.
#' @examples comm = rbind(c(1,3,0,5,3), c(3,2,5,0,0))
#' colnames(comm) = c("SpA", "SpB", "SpC", "SpD", "SpE")
#' rownames(comm) = c("Site 1", "Site 2")
#'
#' trait = data.frame(body = c(1,2,3,4,4), beak = c(1,5,4,1,2))
#' rownames(trait) = colnames(comm)
#'
#' hv = hull.build(comm[1,], trait)
#' hull.alpha(hv)
#' hull.gamma(hv)
#' hvlist = hull.build(comm, trait, axes = 2)
#' hull.alpha(hvlist)
#' hull.gamma(hvlist)
#' @export
hull.gamma <- function(comm){
if(is(comm, "convhulln"))
return(hull.alpha(comm))
if(is.list(comm)){
traits = c()
for(i in 1:length(comm))
traits = rbind(traits, comm[[i]]$p)
traits = unique(traits)
comm = hull.build(comm = rep(1, nrow(traits)), trait = traits)
return(hull.alpha(comm))
}
}
#' Gamma diversity using kernel density hypervolumes.
#' @description Estimation of functional richness of multiple sites, based on n-dimensional hypervolumes.
#' @param comm A 'Hypervolume' or 'HypervolumeList' object, preferably built using function kernel.build.
#' @details Estimates the functional richness (gamma FD) of multiple communities using kernel density hypervolumes, as implemented in Blonder et al. (2014, 2018).
#' Functional richness is expressed as the total volume of the n-dimensional hypervolume (Mammola & Cardoso, 2020). Note that the hypervolume is dimensionless, and that only hypervolumes with the same number of dimensions can be compared in terms of functional richness.
#' Given that the density and positions of stochastic points in the hypervolume are probabilistic, the functional richness of the trait space will intimately depend on the quality of input hypervolumes (details in Mammola & Cardoso, 2020).
#' @return A single value of gamma.
#' @references Blonder, B., Lamanna, C., Violle, C. & Enquist, B.J. (2014) The n-dimensional hypervolume. Global Ecology and Biogeography, 23: 595-609.
#' @references Blonder, B., Morrow, C.B., Maitner, B., Harris, D.J., Lamanna, C., Violle, C., ... & Kerkhoff, A.J. (2018) New approaches for delineating n-dimensional hypervolumes. Methods in Ecology and Evolution, 9: 305-319.
#' @references Mammola, S. & Cardoso, P. (2020) Functional diversity metrics using kernel density n-dimensional hypervolumes. Methods in Ecology and Evolution, 11: 986-995.
#' @examples \dontrun{
#' comm = rbind(c(1,3,2,2,2), c(0,0,0,2,2))
#' colnames(comm) = c("SpA", "SpB", "SpC", "SpD", "SpE")
#' rownames(comm) = c("Site 1", "Site 2")
#'
#' trait = data.frame(body = c(1,2,3,4,5), beak = c(1,2,3,4,5))
#' rownames(trait) = colnames(comm)
#'
#' hv = kernel.build(comm[1,], trait)
#' kernel.alpha(hv)
#' kernel.gamma(hv)
#' hvlist = kernel.build(comm, trait)
#' kernel.alpha(hvlist)
#' kernel.gamma(hvlist)
#' }
#' @export
kernel.gamma <- function(comm){
if(is(comm, "Hypervolume"))
return(kernel.alpha(comm))
if(is(comm, "HypervolumeList")){
for(i in 2:length(comm@HVList))
comm@HVList[[1]]@RandomPoints = rbind(comm@HVList[[1]]@RandomPoints, comm@HVList[[i]]@RandomPoints)
comm = hypervolume_threshold(comm[[1]], num.thresholds = 1000, quantile.requested = comm[[1]]@Parameters$quantile.requested, quantile.requested.type = comm[[1]]@Parameters$quantile.requested.type, plot = FALSE, verbose = FALSE)
return(comm$HypervolumesThresholded@Volume)
}
}
#' Community Weighted Mean.
#' @description Average value of each of a series of traits in multiple communities.
#' @param comm A sites x species matrix, with incidence or abundance data about the species in the community.
#' @param trait A species x traits matrix, with trait values for each species in comm.
#' @param abund A boolean (T/F) indicating whether abundance data should be used (TRUE) or converted to incidence (FALSE) before analysis. If not specified, default is TRUE.
#' @param na.rm Remove NA values before calculating cwm.
#' @details Community weighted mean is used to compare communities in terms of their "typical" trait values.
#' @return A sites x trait matrix with mean value per site and trait.
#' @examples comm <- matrix(c(2,5,0,0,0,1,1,0,0,0,0,1,2,0,0,0,0,0,10,1), nrow = 4, ncol = 5, byrow = TRUE)
#' rownames(comm) = c("Site1","Site2","Site3","Site4")
#' colnames(comm) = c("Sp1","Sp2","Sp3","Sp4","Sp5")
#' trait <- data.frame(Trait1 = c(1,0,0,2,0), Trait2 = c(rep("A",2), rep("B",3)))
#' rownames(trait) = colnames(comm)
#' cwm(comm, trait)
#' cwm(comm, trait, FALSE)
#' @export
cwm <- function(comm, trait, abund = TRUE, na.rm = FALSE){
trait = dummy(trait)
if(!abund)
comm[comm > 1] = 1
nSites = nrow(comm)
nTraits = ncol(trait)
nSp = rowSums(comm)
results = matrix(NA, nrow = nSites, ncol = nTraits)
rownames(results) = rownames(comm)
colnames(results) = colnames(trait)
for (s in 1:nSites)
for (t in 1:nTraits)
results[s, t] = sum(comm[s,] * trait[,t], na.rm = na.rm) / nSp[s]
return(results)
}
#' Community Weighted Dispersion.
#' @description Standard deviation value of each of a series of traits in multiple communities.
#' @param comm A sites x species matrix, with incidence or abundance data about the species in the community.
#' @param trait A species x traits matrix, with trait values for each species in comm.
#' @param abund A boolean (T/F) indicating whether abundance data should be used (TRUE) or converted to incidence (FALSE) before analysis. If not specified, default is TRUE.
#' @param na.rm Remove NA values before calculating cwd.
#' @details Community weighted dispersion is used to compare communities in terms of their dispersion of trait values around a mean, reflecting individual trait variability or diversity.
#' @return A sites x trait matrix with sd value per site and trait.
#' @examples comm <- matrix(c(2,5,0,0,0,1,1,0,0,0,0,1,2,0,0,0,0,0,10,1), nrow = 4, ncol = 5, byrow = TRUE)
#' rownames(comm) = c("Site1","Site2","Site3","Site4")
#' colnames(comm) = c("Sp1","Sp2","Sp3","Sp4","Sp5")
#' trait <- matrix(c(1,1,0,0,0,0,2,1,0,0,0,0,2,1,0,0,0,0,2,1), nrow = 5, ncol = 4, byrow = TRUE)
#' rownames(trait) = colnames(comm)
#' colnames(trait) = c("Trait1","Trait2","Trait3","Trait4")
#' cwd(comm, trait)
#' cwd(comm, trait, FALSE)
#' @export
cwd <- function(comm, trait, abund = TRUE, na.rm = FALSE){
trait = dummy(trait)
if(!abund)
comm[comm > 1] = 1
nSites = nrow(comm)
nTraits = ncol(trait)
nSp = rowSums(comm)
results = matrix(NA, nrow = nSites, ncol = nTraits)
rownames(results) = rownames(comm)
colnames(results) = colnames(trait)
cwmean = cwm(comm, trait, abund, na.rm)
for (s in 1:nSites)
for (t in 1:nTraits)
results[s, t] = (sum(comm[s,] * (trait[,t] - cwmean[s,t])^2, na.rm = na.rm) / nSp[s])^0.5
return(results)
}
#' Community Weighted Evenness.
#' @description Evenness value of each of a series of traits in multiple communities.
#' @param comm A sites x species matrix, with incidence or abundance data about the species in the community.
#' @param trait A species x traits matrix, with trait values for each species in comm.
#' @param func Calculate evenness using Camargo (1993; default) or Bulla (1994) index.
#' @param abund A boolean (T/F) indicating whether abundance data should be used (TRUE) or converted to incidence (FALSE) before analysis. If not specified, default is TRUE.
#' @param na.rm Remove NA values before calculating cwe.
#' @details Community weighted evenness is used to compare communities in terms of their evenness of trait values, reflecting trait abundance and distances between values.
#' @return A sites x trait matrix with evenness value per site and trait.
#' @references Bulla, L. (1994) An index of evenness and its associated diversity measure. Oikos, 70: 167-171.
#' @references Camargo, J.A. (1993) Must dominance increase with the number of subordinate species in competitive interactions? Journal of Theoretical Biology, 161: 537-542.
#' @examples comm <- matrix(c(1,1,1,1,0,1,1,0,0,0,0,1,2,0,0,0,0,0,10,1), nrow = 4, ncol = 5, byrow = TRUE)
#' rownames(comm) = c("Site1","Site2","Site3","Site4")
#' colnames(comm) = c("Sp1","Sp2","Sp3","Sp4","Sp5")
#' trait <- matrix(c(4,1,3,4,2,2,2,1,3,3,2,0,1,4,0,0,5,5,2,1), nrow = 5, ncol = 4, byrow = TRUE)
#' rownames(trait) = colnames(comm)
#' colnames(trait) = c("Trait1","Trait2","Trait3","Trait4")
#' cwe(comm, trait)
#' cwe(comm, trait, abund = FALSE)
#' cwe(comm, trait, "bulla")
#' @export
cwe <- function(comm, trait, func = "camargo", abund = TRUE, na.rm = FALSE){
trait = dummy(trait)
if(!abund)
comm[comm > 1] = 1
nSites = nrow(comm)
nTraits = ncol(trait)
results = matrix(NA, nrow = nSites, ncol = nTraits)
rownames(results) = rownames(comm)
colnames(results) = colnames(trait)
for (s in 1:nSites){
for (t in 1:nTraits){
#clean stuff for this run
thisComm = comm[s, comm[s,] > 0] #filter comm
thisTrait = trait[comm[s,] > 0, t] #filter trait values
thisComm = thisComm[order(thisTrait)] #order comm by trait values
thisTrait = thisTrait[order(thisTrait)] #order trait by trait values
#if any trait values are similar, merge in same functional unit
i = 1
while(i < length(thisTrait)){
if(thisTrait[i] == thisTrait[i+1]){
thisComm[i+1] = thisComm[i] + thisComm[i+1]
thisComm = thisComm[-i]
thisTrait = thisTrait[-i]
} else {
i = i + 1
}
}
#if only 1 functional category skip, as evenness does not make sense
nDist = length(thisComm) - 1 #number of links
if(nDist == 0) next
#if only 2 categories use regular evenness without the functional part
if(nDist == 1){
#calculate the observed values as proportional abundance per species
thisObs = thisComm / sum(thisComm, na.rm = na.rm)
if(func == "bulla"){
thisExp = 1 / length(thisComm)
results[s,t] = as.numeric((sum(apply(cbind(thisObs, rep(thisExp, length(thisObs))), 1, min), na.rm = na.rm) - thisExp) / (1 - thisExp))
} else if(func == "camargo"){
results[s,t] = as.numeric(1 - (abs(thisObs[1] - thisObs[2])))
}
next
}
#if more than 2 categories proceed with the functional part
#calculate distances between trait values
disTraits = c()
for(i in 1:nDist)
disTraits[i] = thisTrait[i+1] - thisTrait[i]
#calculate the observed values as proportional abundance per species / distance
thisObs = c()
for(i in 1:nDist) #cycle through all distances of this site/sample
thisObs[i] = mean(as.numeric(thisComm[c(i, i+1)]), na.rm = na.rm) / disTraits[i]
thisObs = thisObs / sum(thisObs, na.rm = na.rm) #sum all observations to 1
if(func == "bulla"){
##calculate the expected values as average length of distances between observations
thisExp = 1 / nDist
#calculate evenness as the sum of minimum values between observed and expected with correction from Bulla, 1994
results[s,t] = (sum(apply(cbind(thisObs, rep(thisExp, length(thisObs))), 1, min), na.rm = na.rm) - thisExp) / (1 - thisExp)
} else if(func == "camargo"){
results[s,t] = 0
for(j in 1:(nDist - 1)){
for(k in (j + 1):nDist){
results[s,t] = results[s,t] + abs(thisObs[j] - thisObs[k])
}
}
results[s,t] = 1 - (results[s,t] / (nDist * (nDist - 1) / 2))
} else {
stop(sprintf("Function %s not recognized.", func))
}
}
}
return(results)
}
#' Scaled mean squared error of accumulation curves.
#' @description Accuracy (scaled mean squared error) of accumulation curves compared with a known true diversity value (target).
#' @param accum A matrix resulting from the alpha.accum or beta.accum functions (sampling units x diversity values).
#' @param target The true known diversity value, with which the curve will be compared. If not specified, default is the diversity observed with all sampling units.
#' @details Among multiple measures of accuracy (Walther & Moore 2005) the SMSE presents several advantages, as it is (Cardoso et al. 2014):
#' (i) scaled to true diversity, so that similar absolute differences are weighted according to how much they represent of the real value;
#' (ii) scaled to the number of sampling units, so that values are independent of sample size;
#' (iii) squared, so that small, mostly meaningless fluctuations around the true value are down-weighted; and
#' (iv) independent of positive or negative deviation from the real value, as such differentiation is usually not necessary.
#' For alpha diversity accuracy may also be weighted according to how good the data is predicted to be. The weight of each point in the curve is proportional to its sampling intensity (i.e. n/Sobs).
#' @return Accuracy values (both raw and weighted) for all observed and estimated curves.
#' @references Cardoso, P., Rigal, F., Borges, P.A.V. & Carvalho, J.C. (2014) A new frontier in biodiversity inventory: a proposal for estimators of phylogenetic and functional diversity. Methods in Ecology and Evolution, 5: 452-461.
#' @references Walther, B.A. & Moore, J.L. (2005) The concepts of bias, precision and accuracy, and their use in testing the performance of species richness estimators, with a literature reviewof estimator performance. Ecography, 28, 815-829.
#' @examples comm1 <- matrix(c(2,2,0,0,0,1,1,0,0,0,0,2,2,0,0,0,0,0,2,2), nrow = 4, ncol = 5, byrow = TRUE)
#' comm2 <- matrix(c(1,1,0,0,0,0,2,1,0,0,0,0,2,1,0,0,0,0,2,1), nrow = 4, ncol = 5, byrow = TRUE)
#' tree <- hclust(dist(c(1:5), method="euclidean"), method="average")
#' acc.alpha = alpha.accum(comm1)
#' accuracy(acc.alpha)
#' accuracy(acc.alpha, 10)
#' acc.beta = beta.accum(comm1, comm2, tree)
#' accuracy(acc.beta)
#' accuracy(acc.beta, c(1,1,0))
#' @export
accuracy <- function(accum, target = -1){
if(ncol(accum) > 5 || accum[nrow(accum), 3] > 1){ #if alpha
if (target == -1)
target <- accum[nrow(accum), 3]
intensTotal = accum[nrow(accum), 2] / accum[nrow(accum), 3] #sampling intensity = final n / final S
if(ncol(accum) > 10){ #if non-parametric
smse <- matrix(0, 13, nrow = 2)
for (i in 1:nrow(accum)){
intensity = accum[i, 2] / accum[i, 3] / intensTotal
error = (accum[i,3] - target)^2 / (target^2 * nrow(accum))
smse[1,1] <- smse[1,1] + error
smse[2,1] <- smse[2,1] + error * intensity
for (j in 2:13){
error = (accum[i,j+6] - target)^2 / (target^2 * nrow(accum))
smse[1,j] <- smse[1,j] + error
smse[2,j] <- smse[2,j] + error * intensity
}
}
rownames(smse) <- c("Raw", "Weighted")
colnames(smse) <- c("Obs", "Jack1ab", "Jack1abP", "Jack1in", "Jack1inP", "Jack2ab", "Jack2abP", "Jack2in", "Jack2inP", "Chao1", "Chao1P", "Chao2", "Chao2P")
}
else{ #if curve
smse <- matrix(0, 5, nrow = 2)
for (i in 3:nrow(accum)){
intensity = accum[i, 2] / accum[i, 3] / intensTotal
for (j in 1:5){
if (!is.na(accum[i,j+2])){
error = (accum[i,j+2] - target)^2 / (target^2 * nrow(accum))
smse[1,j] <- smse[1,j] + error
smse[2,j] <- smse[2,j] + error * intensity
}
}
}
rownames(smse) <- c("Raw", "Weighted")
colnames(smse) <- c("Obs", "Clench", "Exponential", "Rational", "Weibull")
}
} else { #if beta
if (target[1] == -1)
target <- accum[nrow(accum), 2:4]
smse <- rep(0, 3)
for (i in 1:nrow(accum)){
for (j in 1:3)
smse[j] <- smse[j] + (accum[i,j+1] - target[j])^2
}
smse <- smse / nrow(accum)
smse <- list(Btotal=smse[1], Brepl=smse[2], Brich=smse[3])
smse <- c(unlist(smse))
}
return(smse)
}
#' Slope of accumulation curves.
#' @description This is similar to the first derivative of the curves at each of its points.
#' @param accum A matrix resulting from the alpha.accum or beta.accum functions (sampling units x diversity values).
#' @details Slope is the expected gain in diversity when sampling a new individual. The slope of an accumulation curve, of either observed or estimated diversity, allows verifying if the asymptote has been reached (Cardoso et al. 2011).
#' This is an indication of either the completeness of the inventory (low final slopes of the observed curve indicate high completeness) or reliability of the estimators (stability of the slope around a value of 0 along the curve indicates reliability).
#' @return A matrix of sampling units x slope values.
#' @references Cardoso, P., Pekar, S., Jocque, R. & Coddington, J.A. (2011) Global patterns of guild composition and functional diversity of spiders. PLoS One, 6, e21710.
#' @examples comm1 <- matrix(c(2,2,0,0,0,1,1,0,0,0,0,2,2,0,0,0,0,0,2,2), nrow = 4, ncol = 5, byrow = TRUE)
#' comm2 <- matrix(c(1,1,0,0,0,0,2,1,0,0,0,0,2,1,0,0,0,0,2,1), nrow = 4, ncol = 5, byrow = TRUE)
#' tree <- hclust(dist(c(1:5), method="euclidean"), method="average")
#' acc.alpha = alpha.accum(comm1)
#' slope(acc.alpha)
#' acc.beta = beta.accum(comm1, comm2, tree)
#' slope(acc.beta)
#' @export
slope <- function(accum){
if(ncol(accum) > 5 || accum[nrow(accum), 3] > 1){ #if alpha
sl <- accum[,-2]
accum <- rbind(rep(0,ncol(accum)), accum)
for (i in 1:nrow(sl)){
sl[i,1] <- i
for (j in 2:ncol(sl)){
sl[i,j] <- (accum[i+1,j+1]-accum[i,j+1])/(accum[i+1,2]-accum[i,2])
}
}
} else { #if beta
sl <- accum
sl[1,] <- 0
sl[1,1] <- 1
for (i in 2:nrow(sl)){
for (j in 2:ncol(sl)){
sl[i,j] <- (accum[i,j]-accum[i-1,j])
}
}
}
return(sl)
}
#' Coverage of datasets.
#' @description Coverage is a measure of completeness of a dataset.
#' @param comm A matrix of sites x species with abundance values.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @details Calculated as the estimated proportion of individuals that belong to the species (or phylogenetic, or functional diversity) already collected (Chao and Jost 2012).
#' @return A vector with coverage values per site.
#' @references Chao, A. & Jost, L. (2012). Coverage-based rarefaction and extrapolation: standardizing samples by completeness rather than size. Ecology, 93: 2533-2547.
#' @examples comm <- matrix(c(2,1,0,0,100,1,2,0,0,3,1,2,4,0,0,0,0,0,2,2), nrow = 4, ncol = 5, byrow = TRUE)
#' tree <- hclust(dist(c(1:5), method="euclidean"), method="average")
#' coverage(comm)
#' coverage(comm, tree)
#' @export
coverage <- function(comm, tree){
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
if(!missing(tree))
tree = xTree(tree)
cover = comm[,1,drop = FALSE]
for(r in 1:nrow(comm)){
data <- comm[r,,drop = FALSE]
n <- nobs(data, tree)
s1 <- srare(data, tree, 1)
s2 <- srare(data, tree, 2)
cover[r,1] = 1 - (s1/n)*(((n-1)*s1)/((n-1)*s1+2*s2))
}
return(cover)
}
#' Optimization of alpha diversity sampling protocols.
#' @description Optimization of alpha diversity sampling protocols when different methods and multiple samples per method are available.
#' @param comm A samples x species x sites array, with either abundance or incidence data.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param methods A data.frame with the method names (1st column), number of samples per method (2nd column), base cost per method (3rd column, those costs that are fixed once a method is decided), and sample cost per method (those costs that add with each sample of the method, 4th column). If the last two columns are not provided base = 0 and sample = 1. The order of methods must be the same as in comm and the sum of the samples must be the same as nrow(comm).
#' @param base A vector defining a base protocol from which to build upon (complementarity analysis) (length must be equal to number of methods).
#' @param seq By default all combinations will be tested. If TRUE, a sequential approach will be taken, where methods are added based on the previous step. The method added will be the one providing the highest efficiency as quantified by the slope of the accumulation curve.
#' @param runs Number of random permutations to be made to the sample order. Default is 1000.
#' @param prog Present a text progress bar in the R console.
#' @details Often a combination of methods allows sampling maximum plot diversity with minimum effort, as it allows sampling different sub-communities, contrary to using single methods.
#' Cardoso (2009) proposed a way to optimize the number of samples per method when the target is to maximize sampled alpha diversity. It is applied here for TD, PD and FD, and for one or multiple sites simultaneously.
#' PD and FD are calculated based on a tree (hclust or phylo object, no need to be ultrametric).
#' @return A matrix of samples x methods (values being optimum number of samples per method). The last column is the average alpha diversity value, rescaled to 0-1 if made for several sites, where 1 is the true diversity of each site.
#' @references Cardoso, P. (2009) Standardization and optimization of arthropod inventories - the case of Iberian spiders. Biodiversity and Conservation, 18, 3949-3962.
#' @examples comm1 <- matrix(c(1,1,0,2,4,0,0,1,2,0,0,3), nrow = 4, ncol = 3, byrow = TRUE)
#' comm2 <- matrix(c(2,2,0,3,1,0,0,0,5,0,0,2), nrow = 4, ncol = 3, byrow = TRUE)
#' comm <- array(c(comm1, comm2), c(4,3,2))
#' colnames(comm) <- c("Sp1","Sp2","Sp3")
#'
#' methods <- data.frame(method = c("Met1","Met2","Met3"),
#' nSamples = c(1,2,1), fixcost = c(1,1,2), varCost = c(1,1,1))
#' tree <- hclust(dist(c(1:3), method="euclidean"), method="average")
#' tree$labels <- colnames(comm)
#'
#' \dontrun{
#' optim.alpha(comm,,methods)
#' optim.alpha(comm,,methods, seq = TRUE)
#' optim.alpha(comm, tree, methods)
#' optim.alpha(comm,, methods = methods, seq = TRUE, base = c(0,1,1))
#' }
#' @export
optim.alpha <- function(comm, tree, methods, base, seq = FALSE, runs = 1000, prog = TRUE){
return(optim.div("alpha", comm, tree, methods, base, seq, abund = FALSE, runs, prog))
}
#' Efficiency statistics for alpha-sampling.
#' @description Average alpha diversity observed with a given number of samples per method.
#' @param comm A samples x species x sites array, with either abundance or incidence data.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param methods A data.frame with the method names (1st column) and number of samples per method (2nd column). The order of methods must be the same as in comm and the sum of the samples must be the same as nrow(comm).
#' @param samples A vector with the number of samples per method to test.
#' @param runs Number of random permutations to be made to the sample order. Default is 1000.
#' @details Different combinations of samples per method allow sampling different sub-communities.
#' This function allows knowing the average TD, PD or FD values for a given combination, for one or multiple sites simultaneously.
#' PD and FD are calculated based on a tree (hclust or phylo object, no need to be ultrametric).
#' @return A single average alpha diversity value. Rescaled to 0-1 if made for several sites, where 1 is the true diversity of each site.
#' @examples comm1 <- matrix(c(1,1,0,2,4,0,0,1,2,0,0,3), nrow = 4, ncol = 3, byrow = TRUE)
#' comm2 <- matrix(c(2,2,0,3,1,0,0,0,5,0,0,2), nrow = 4, ncol = 3, byrow = TRUE)
#' comm <- array(c(comm1, comm2), c(4,3,2))
#' colnames(comm) <- c("Sp1","Sp2","Sp3")
#'
#' tree <- hclust(dist(c(1:3), method="euclidean"), method="average")
#' tree$labels <- colnames(comm)
#'
#' methods <- data.frame(method = c("Met1","Met2","Met3"), nSamples = c(1,2,1))
#'
#' optim.alpha.stats(comm,, methods, c(0,0,1))
#' optim.alpha.stats(comm, tree, methods, c(0,1,1), runs = 100)
#' @export
optim.alpha.stats <- function(comm, tree, methods, samples, runs = 1000){
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
if(sum(methods[, 2]) != nrow(comm))
stop("sum of the methods must be the same as nrow(comm)")
##preliminary stats
if (!missing(tree)){
comm = reorderComm(comm, tree)
tree <- xTree(tree)
}
if(length(dim(comm)) == 3) ##number of sites
nSites <- dim(comm)[3]
else
nSites <- 1
nMethods <- nrow(methods) ##number of methods
lMethods = c() ##method of each sample
for (i in 1:nMethods)
lMethods = c(lMethods, rep(methods[i, 1], methods[i, 2]))
div <- 0 ##average diversity obtained using this particular combination of samples per method
for (i in 1:nSites){
if (nSites > 1){
site <- as.matrix(comm[,,i])
true <- sobs(site, tree) ##true diversity of each site
} else {
site <- as.matrix(comm)
true <- 1
}
for (r in 1:runs){
addSample <- rep(0, ncol(comm))
for (m in 1:nMethods){
if (samples[m] > 0){
filterList <- site[which(lMethods == methods[m, 1]),, drop = F] ##filter by method m
filterList <- filterList[sample(nrow(filterList), samples[m]),, drop = F] ##randomly select rows
addSample <- rbind(addSample, filterList) ##add random samples
}
}
div <- div + sobs(addSample, tree) / runs / nSites / true
}
}
return(div)
}
#' Optimization of beta diversity sampling protocols.
#' @description Optimization of beta diversity sampling protocols when different methods and multiple samples per method are available.
#' @param comm A samples x species x sites array, with either abundance or incidence data.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param methods A data.frame with the method names (1st column), number of samples per method (2nd column), base cost per method (3rd column, those costs that are fixed once a method is decided), and sample cost per method (those costs that add with each sample of the method, 4th column). If the last two columns are not provided base = 0 and sample = 1. The order of methods must be the same as in comm and the sum of the samples must be the same as nrow(comm).
#' @param base Allows defining a base mandatory protocol from which to build upon (complementarity analysis). It should be a vector with length = number of methods.
#' @param seq By default all combinations will be tested. If TRUE, a sequential approach will be taken, where methods are added based on the previous step. The method added will be the one providing the highest efficiency as quantified by the slope of the accumulation curve.
#' @param abund A boolean (T/F) indicating whether abundance data should be used (TRUE) or converted to incidence (FALSE) before analysis.
#' @param runs Number of random permutations to be made to the sample order. Default is 1000.
#' @param prog Present a text progress bar in the R console.
#' @details Often, comparing differences between sites or the same site along time (i.e. measure beta diversity) it is not necessary to sample exhaustively. A minimum combination of samples targeting different sub-communities (that may behave differently) may be enough to perceive such differences, for example, for monitoring purposes.
#' Cardoso et al. (in prep.) introduce and differentiate the concepts of alpha-sampling and beta-sampling. While alpha-sampling optimization implies maximizing local diversity sampled (Cardoso 2009), beta-sampling optimization implies minimizing differences in beta diversity values between partially and completely sampled communities.
#' This function uses as beta diversity measures the Btotal, Brepl and Brich partitioning framework (Carvalho et al. 2012) and respective generalizations to PD and FD (Cardoso et al. 2014).
#' PD and FD are calculated based on a tree (hclust or phylo object, no need to be ultrametric).
#' @return A matrix of samples x methods (values being optimum number of samples per method). The last column is precision = (1 - average absolute difference from real beta).
#' @references Cardoso, P. (2009) Standardization and optimization of arthropod inventories - the case of Iberian spiders. Biodiversity and Conservation, 18, 3949-3962.
#' @references Cardoso, P., Rigal, F., Carvalho, J.C., Fortelius, M., Borges, P.A.V., Podani, J. & Schmera, D. (2014) Partitioning taxon, phylogenetic and functional beta diversity into replacement and richness difference components. Journal of Biogeography, 41, 749-761.
#' @references Cardoso, P., et al. (in prep.) Optimal inventorying and monitoring of taxon, phylogenetic and functional diversity.
#' @references Carvalho, J.C., Cardoso, P. & Gomes, P. (2012) Determining the relative roles of species replacement and species richness differences in generating beta-diversity patterns. Global Ecology and Biogeography, 21, 760-771.
#' @examples comm1 <- matrix(c(1,1,0,2,4,0,0,1,2,0,0,3), nrow = 4, ncol = 3, byrow = TRUE)
#' comm2 <- matrix(c(2,2,0,3,1,0,0,0,5,0,0,2), nrow = 4, ncol = 3, byrow = TRUE)
#' comm3 <- matrix(c(2,0,0,3,1,0,0,0,5,0,0,2), nrow = 4, ncol = 3, byrow = TRUE)
#' comm <- array(c(comm1, comm2, comm3), c(4,3,3))
#' colnames(comm) <- c("sp1","sp2","sp3")
#'
#' methods <- data.frame(method = c("Met1","Met2","Met3"),
#' nSamples = c(1,2,1), fixcost = c(1,1,2), varCost = c(1,1,1))
#' tree <- hclust(dist(c(1:3), method="euclidean"), method="average")
#' tree$labels <- colnames(comm)
#'
#' \dontrun{
#' optim.beta(comm,,methods)
#' optim.beta(comm,,methods, seq = TRUE)
#' optim.beta(comm, tree, methods)
#' optim.alpha(comm,, methods = methods, seq = TRUE, base = c(0,1,1))
#' }
#' @export
optim.beta <- function(comm, tree, methods, base, seq = FALSE, abund = TRUE, runs = 1000, prog = TRUE){
return(optim.div("beta", comm, tree, methods, base, seq, abund, runs, prog))
}
#' Efficiency statistics for beta-sampling.
#' @description Average absolute difference between sampled and real beta diversity when using a given number of samples per method.
#' @param comm A samples x species x sites array, with either abundance or incidence data.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param methods A data.frame with the method names (1st column) and number of samples per method (2nd column). The order of methods must be the same as in comm and the sum of the samples must be the same as nrow(comm).
#' @param samples A vector with the number of samples per method to test.
#' @param abund A boolean (T/F) indicating whether abundance data should be used (TRUE) or converted to incidence (FALSE) before analysis.
#' @param runs Number of random permutations to be made to the sample order. Default is 1000.
#' @details Different combinations of samples per method allow sampling different sub-communities.
#' This function allows knowing the average absolute difference between sampled and real beta diversity for a given combination, for one or multiple sites simultaneously.
#' PD and FD are calculated based on a tree (hclust or phylo object, no need to be ultrametric).
#' @return A single precision value = (1 - average absolute beta diversity difference value).
#' @examples comm1 <- matrix(c(1,1,0,2,4,0,0,1,2,0,0,3), nrow = 4, ncol = 3, byrow = TRUE)
#' comm2 <- matrix(c(2,2,0,3,1,0,0,0,5,0,0,2), nrow = 4, ncol = 3, byrow = TRUE)
#' comm3 <- matrix(c(2,0,0,3,1,0,0,0,5,0,0,2), nrow = 4, ncol = 3, byrow = TRUE)
#' comm <- array(c(comm1, comm2, comm3), c(4,3,3))
#' colnames(comm) <- c("sp1","sp2","sp3")
#'
#' tree <- hclust(dist(c(1:3), method="euclidean"), method="average")
#' tree$labels <- colnames(comm)
#'
#' methods <- data.frame(method = c("Met1","Met2","Met3"), nSamples = c(1,2,1))
#'
#' optim.beta.stats(comm,,methods, c(1,2,1)) #a complete sample will have 0 difference
#' optim.beta.stats(comm, tree, methods = methods, samples = c(0,1,1), runs = 100)
#' @export
optim.beta.stats <- function(comm, tree, methods, samples, abund = TRUE, runs = 1000){
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
if(sum(methods[, 2]) != nrow(comm))
stop("Sum of the methods must be the same as nrow(comm).")
##preliminary stats
if(length(dim(comm)) == 3){ ##number of sites
nSites <- dim(comm)[3]
}else{
return(message("Need sample data from at least two sites to perform analyses."))
}
nMethods <- nrow(methods) ##number of methods
lMethods = c() ##method of each sample
for (i in 1:nMethods)
lMethods = c(lMethods, rep(methods[i, 1], methods[i, 2]))
diff <- 0 ##average absolute difference between observed and true diversity obtained using this particular combination of samples per method
if(!missing(tree))
comm = reorderComm(comm, tree)
##calculate true beta values
sumComm <- matrix(0, nrow = nSites, ncol = ncol(comm))
for (i in 1:nSites){
sumComm[i,] <- colSums(comm[,,i])
}
true <- beta(sumComm, tree, abund)
##calculate absolute difference between sampled and true beta values
for (r in 1:runs){
sumComm <- matrix(0, nrow = nSites, ncol = ncol(comm))
for (m in 1:nMethods){
if (samples[m] > 0){
filterList <- comm[which(lMethods == methods[m, 1]),,, drop=F] ##filter by method m
filterList <- filterList[sample(nrow(filterList), samples[m]),,, drop=F] ##randomly select rows
for (i in 1:nSites){
sumComm[i,] <- sumComm[i,] + colSums(filterList[,,i, drop=F])
}
}
}
sampleBeta <- beta(sumComm, tree, abund)
for(i in 1:3){
diff <- diff + mean(abs(sampleBeta[[i]] - true[[i]])) / 3 / runs
}
}
return(1 - diff)
}
#' Optimization of spatial sampling.
#' @description Optimization of sampling site distribution in space based on environmental (or other) variables.
#' @param layers A SpatRaster object from package terra.
#' @param n The number of intended sampling sites (clusters).
#' @param latlong Boolean indicating whether latitude and longitude should be taken into account when clustering.
#' @param clusterMap Boolean indicating whether to build a new raster with clusters.
#' @details Optimizing the selection of sampling sites often requires maximizing the environmental diversity covered by them.
#' One possible solution to this problem, here adopted, is performing a k-means clustering using environmental data and choosing the sites closest to the multidimensional environmental centroid of each cluster for sampling (Jimenez-Valverde & Lobo 2004)
#' @return Either a matrix of cells x clusters (also indicating distance to centroid, longitude and latitude of each cell) or a list with such matrix plus the clusterMap.
#' @references Jimenez-Valverde, A., & Lobo, J. M. (2004) Un metodo sencillo para seleccionar puntos de muestreo con el objetivo de inventariar taxones hiperdiversos: el caso practico de las familias Araneidae y Thomisidae (Araneae) en la comunidad de Madrid, Espana. Ecologia, 18: 297-305.
#' @export
optim.spatial <- function(layers, n, latlong = TRUE, clusterMap = TRUE){
for(i in 1:length(layers)){ ##transform all layers to a scale [0,1]
globalMin <- terra::global(layers[[i]], min)[1,1]
globalMax <- terra::global(layers[[i]], max)[1,1]
layers[[i]] <- (layers[[i]] - globalMin)/(globalMax - globalMin)
}
dataMat <- as.matrix(layers)
dataMat <- dataMat[complete.cases(dataMat),]
dataMat <- cbind(dataMat, terra::extract(layers, terra::cells(layers), xy = TRUE)[,1:2]) ##add latlong
if (latlong)
res <- kmeans(dataMat, n) ##do k-means
else
res <- kmeans(dataMat[,-c((ncol(dataMat)-1),ncol(dataMat))], n) ##do k-means
cl = c()
for(c in 1:n){
cData <- dataMat[res$cluster==c,] #filter to cluster c
cCenter <- res$centers[c,]
dist2centroid <- c()
for(r in 1:nrow(cData))
dist2centroid[r] = dist(rbind(cData[r,], cCenter))
cData <- cbind(rep(c, nrow(cData)), dist2centroid, cData[,ncol(cData)], cData[,(ncol(cData)-1)])
colnames(cData) <- c("cluster", "dist2centroid", "lat", "long")
cData <- cData[sort.list(cData[,2]), ]
cl <- rbind(cl, cData)
}
#output raster with clusters
if(clusterMap){
map <- terra::rasterize(terra::extract(layers, terra::cells(layers), xy = TRUE)[,1:2], layers[[1]], res$cluster)
names(map) <- "clusters"
cl <- list(cl, map)
}
return(cl)
}
#' Maps of alpha diversity (Taxon, Phylogenetic or Functional Diversity - TD, PD, FD).
#' @description Observed alpha diversity using rasters of species distributions (presence/absence).
#' @param layers A SpatRaster object of species distributions from package terra.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @details TD is equivalent to species richness. Calculations of PD and FD are based on Faith (1992) and Petchey & Gaston (2002, 2006), which measure PD and FD of a community as the total branch length of a tree linking all species represented in such community.
#' PD and FD are calculated based on a tree (hclust or phylo object, no need to be ultrametric). The path to the root of the tree is always included in calculations of PD and FD.
#' The number and order of species in layers must be the same as in tree.
#' @return A SpatRaster object representing richness in space.
#' @references Faith, D.P. (1992) Conservation evaluation and phylogenetic diversity. Biological Conservation, 61, 1-10.
#' @references Petchey, O.L. & Gaston, K.J. (2002) Functional diversity (FD), species richness and community composition. Ecology Letters, 5, 402-411.
#' @references Petchey, O.L. & Gaston, K.J. (2006) Functional diversity: back to basics and looking forward. Ecology Letters, 9, 741-758.
#' @examples sp1 <- terra::rast(matrix(c(NA,1,1,1,1,0,0,0,0), nrow = 3, ncol = 3, byrow = TRUE))
#' sp2 <- terra::rast(matrix(c(0,0,0,0,1,1,1,1,1), nrow = 3, ncol = 3, byrow = TRUE))
#' sp3 <- terra::rast(matrix(c(0,0,0,1,1,1,0,0,0), nrow = 3, ncol = 3, byrow = TRUE))
#' spp <- c(sp1, sp2, sp3)
#' tree <- hclust(dist(c(1:3), method="euclidean"), method="average")
#' tree$labels = c("Sp1", "Sp2", "Sp3")
#' names(spp) = tree$labels
#' raster.alpha(spp)
#' raster.alpha(spp, tree)
#' @export
raster.alpha <- function(layers, tree){
res = terra::rast(matrix(NA, nrow = nrow(layers), ncol = ncol(layers)))
names(res) = "alpha"
for(r in 1:nrow(layers)){
for(c in 1:ncol(layers)){
if(is.na(sum(layers[r,c])))
res[r,c] = NA
else
res[r,c] = alpha(layers[r,c], tree)
}
}
return(res)
}
#' Maps of beta diversity (Taxon, Phylogenetic or Functional Diversity - TD, PD, FD).
#' @description Observed beta diversity using rasters of species distributions (presence/absence or abundance).
#' @param layers A SpatRaster object of species distributions from package terra.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param func Partial match indicating whether the Jaccard or Soerensen family of beta diversity measures should be used. If not specified, default is Jaccard.
#' @param neighbour Either 8 (default) or 4 cells considered to calculate beta diversiy of each focal cell.
#' @param abund A boolean (T/F) indicating whether abundance data should be used (TRUE) or converted to incidence (FALSE) before analysis.
#' @details The beta diversity measures used here follow the partitioning framework independently developed by Podani & Schmera (2011) and Carvalho et al. (2012)
#' and later expanded to PD and FD by Cardoso et al. (2014), where Btotal = Brepl + Brich.
#' Btotal = total beta diversity, reflecting both species replacement and loss/gain;
#' Brepl = beta diversity explained by replacement of species alone; Brich = beta diversity explained by species loss/gain (richness differences) alone.
#' PD and FD are calculated based on a tree (hclust or phylo object, no need to be ultrametric). The path to the root of the tree is always included in calculations of PD and FD.
#' The number and order of species in layers must be the same as in tree.
#' @return A SpatRaster object with three layers representing Btotal, Brepl and Brich in space.
#' @references Cardoso, P., Rigal, F., Carvalho, J.C., Fortelius, M., Borges, P.A.V., Podani, J. & Schmera, D. (2014) Partitioning taxon, phylogenetic and functional beta diversity into replacement and richness difference components. Journal of Biogeography, 41, 749-761.
#' @references Carvalho, J.C., Cardoso, P. & Gomes, P. (2012) Determining the relative roles of species replacement and species richness differences in generating beta-diversity patterns. Global Ecology and Biogeography, 21, 760-771.
#' @references Gotelli, N.J. & Colwell, R.K. (2001) Quantifying biodiversity: procedures and pitfalls in the measurement and comparison of species richness. Ecology Letters, 4, 379-391.
#' @references Podani, J. & Schmera, D. (2011) A new conceptual and methodological framework for exploring and explaining pattern in presence-absence data. Oikos, 120, 1625-1638.
#' @examples sp1 <- terra::rast(matrix(c(NA,1,1,1,1,0,1,1,0), nrow = 3, ncol = 3, byrow = TRUE))
#' sp2 <- terra::rast(matrix(c(0,0,0,1,1,1,1,1,1), nrow = 3, ncol = 3, byrow = TRUE))
#' sp3 <- terra::rast(matrix(c(0,0,0,1,1,1,1,1,0), nrow = 3, ncol = 3, byrow = TRUE))
#' spp <- c(sp1, sp2, sp3)
#' tree <- hclust(dist(c(1:3), method="euclidean"), method="average")
#' tree$labels = c("Sp1", "Sp2", "Sp3")
#' names(spp) = tree$labels
#' raster.beta(spp)
#' raster.beta(spp, tree)
#' @export
raster.beta <- function(layers, tree, func = "jaccard", neighbour = 8, abund = FALSE){
resTotal = terra::rast(matrix(NA, nrow = nrow(layers), ncol = ncol(layers)))
resRepl = resTotal
resRich = resTotal
for(c in 1:(terra::ncell(layers))){
if(is.na(sum(layers[c]))){
resTotal[c] = NA
resRepl[c] = NA
resRich[c] = NA
} else {
betaValue = matrix(ncol = 3)
adj = terra::adjacent(layers, c, neighbour)
adj = adj[!is.na(adj)]
for(a in adj)
if(!is.na(sum(layers[a])))
betaValue = rbind(betaValue, beta(rbind(layers[c], layers[a]), tree, func = func, abund = abund))
betaValue = betaValue[-1, ,drop = FALSE]
resTotal[c] = mean(unlist(betaValue[, 1]))
resRepl[c] = mean(unlist(betaValue[, 2]))
resRich[c] = mean(unlist(betaValue[, 3]))
}
}
res = c(resTotal, resRepl, resRich)
names(res) = c("Btotal", "Brepl", "Brich")
return(res)
}
#' Maps of phylogenetic/functional dispersion of species or individuals.
#' @description Average dissimilarity between any two species or individuals randomly chosen in a community using rasters of species distributions (presence/absence or abundance).
#' @param layers A SpatRaster object of species distributions from package terra.
#' @param tree A phylo or hclust object or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param distance A dist object representing the phylogenetic or functional distance between species.
#' @param func Calculate dispersion using originality (default), uniqueness or contribution.
#' @param abund A boolean (T/F) indicating whether dispersion should be calculated using individuals (T) or species (F).
#' @param relative A boolean (T/F) indicating whether dispersion should be relative to the maximum distance between any two species in the tree or distance matrix.
#' @details If abundance data is used and a tree is given, dispersion is the quadratic entropy of Rao (1982).
#' If abundance data is not used but a tree is given, dispersion is the phylogenetic dispersion measure of Webb et al. (2002).
#' Note that cells with less than two species cannot have dispersion values.
#' @return A SpatRaster object representing dispersion in space.
#' @references Rao, C.R. (1982) Diversity and dissimilarity coefficients: a unified approach. Theoretical Population Biology, 21: 24-43.
#' @references Webb, C.O., Ackerly, D.D., McPeek, M.A. & Donoghue, M.J. (2002) Phylogenies and community ecology. Annual Review of Ecology and Systematics, 33: 475-505.
#' @examples sp1 <- terra::rast(matrix(c(NA,1,1,1,1,0,0,0,0), nrow = 3, ncol = 3, byrow = TRUE))
#' sp2 <- terra::rast(matrix(c(0,0,0,0,1,1,1,1,1), nrow = 3, ncol = 3, byrow = TRUE))
#' sp3 <- terra::rast(matrix(c(0,0,0,1,1,1,0,0,0), nrow = 3, ncol = 3, byrow = TRUE))
#' spp <- c(sp1, sp2, sp3)
#' tree <- hclust(dist(c(1:3), method="euclidean"), method="average")
#' tree$labels = c("Sp1", "Sp2", "Sp3")
#' names(spp) = tree$labels
#' raster.dispersion(spp, tree)
#' @export
raster.dispersion <- function(layers, tree, distance, func = "originality", abund = FALSE, relative = FALSE){
res = terra::rast(matrix(NA, nrow = nrow(layers), ncol = ncol(layers)))
names(res) = "dispersion"
for(r in 1:nrow(layers)){
for(c in 1:ncol(layers)){
if(is.na(sum(layers[r,c])))
res[r,c] = NA
else
res[r,c] = dispersion(layers[r,c], tree, distance, func, abund, relative)
}
}
return(res)
}
#' Maps of phylogenetic/functional evenness of species or individuals.
#' @description Regularity of distance and abundance between any two species in a community using rasters of species distributions (presence/absence or abundance).
#' @param layers A SpatRaster object of species distributions from package terra.
#' @param tree A phylo or hclust object or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param distance A dist object representing the phylogenetic or functional distance between species.
#' @param method Calculate dispersion using "expected" values (default) or values based on "contribution" of species to the tree.
#' @param func Calculate dispersion using "Camargo" (1993; default) or "Bulla" (1994) index.
#' @param abund A boolean (T/F) indicating whether evenness should be calculated using abundance data.
#' @details If no tree or distance is provided the result is the original index of Bulla with correction.
#' Note that cells with less than two species cannot have evenness values.
#' @return A SpatRaster object representing evenness in space.
#' @references Bulla, L. (1994) An index of evenness and its associated diversity measure. Oikos, 70: 167-171.
#' @references Camargo, J.A. (1993) Must dominance increase with the number of subordinate species in competitive interactions? Journal of Theoretical Biology, 161: 537-542.
#' @examples sp1 <- terra::rast(matrix(c(NA,1,1,1,1,0,0,0,0), nrow = 3, ncol = 3, byrow = TRUE))
#' sp2 <- terra::rast(matrix(c(0,0,0,0,1,1,1,1,1), nrow = 3, ncol = 3, byrow = TRUE))
#' sp3 <- terra::rast(matrix(c(0,0,0,1,1,1,0,0,0), nrow = 3, ncol = 3, byrow = TRUE))
#' spp <- c(sp1, sp2, sp3)
#' tree <- hclust(dist(c(1:3), method="euclidean"), method="average")
#' tree$labels = c("Sp1", "Sp2", "Sp3")
#' names(spp) = tree$labels
#' raster.evenness(spp)
#' raster.evenness(spp, tree)
#' @export
raster.evenness <- function(layers, tree, distance, method = "expected", func = "camargo", abund = TRUE){
res = terra::rast(matrix(NA, nrow = nrow(layers), ncol = ncol(layers)))
names(res) = "evenness"
for(r in 1:nrow(layers)){
for(c in 1:ncol(layers)){
if(is.na(sum(layers[r,c])) || sum(ifelse(layers[r,c] > 0, 1, 0)) < 2)
res[r,c] = NA
else
res[r,c] = evenness(layers[r,c], tree, distance, method, func, abund)
}
}
return(res)
}
#' Species-abundance distribution (SAD).
#' @description Fits the SAD to community abundance data, also using trees and with possible rarefaction.
#' @param comm Either a vector with the abundance per species, or a sites x species matrix.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree.
#' @param octaves a boolean indicating whether octaves should be calculated.
#' @param scale scale y-axis to sum 1.
#' @param raref An integer specifying the number of individuals for rarefaction (individual based).
#' If raref < 1 no rarefaction is made.
#' If raref = 1 rarefaction is made by the minimum abundance among all sites.
#' If raref > 1 rarefaction is made by the abundance indicated.
#' If not specified, default is 0.
#' @param runs Number of resampling runs for rarefaction. If not specified, default is 100.
#' @details The Species Abundance Distribution describes the commonness and rarity in ecological systems. It was recently expanded to accomodate phylegenetic and functional differences between species (Matthews et al., subm.). Classes defined as n = 1, 2-3, 4-7, 8-15, .... Rarefaction allows comparison of sites with different total abundances.
#' @return A vector or matrix with the different values per class per community.
#' @references Matthews et al. (subm.) Phylogenetic and functional dimensions of the species abundance distribution.
#' @examples comm1 <- c(20,1,3,100,30)
#' comm2 <- c(1,2,12,0,45)
#' comm <- rbind(comm1, comm2)
#' tree <- hclust(dist(c(1:5), method="euclidean"), method="average")
#' sad(comm1)
#' sad(comm)
#' sad(comm, octaves = FALSE)
#' sad(comm, tree, scale = TRUE)
#' sad(comm, raref = 1)
#' @export
sad <- function(comm, tree, octaves = TRUE, scale = FALSE, raref = 0, runs = 100){
if(is.vector(comm))
comm <- matrix(comm, nrow = 1)
#SAD with no trees
if(missing(tree)){
contr = comm
contr = ifelse(contr > 0, 1, 0)
#SAD using trees
} else {
if(is(tree, "phylo")){
if(!is.null(tree$tip.label) && !is.null(colnames(comm))) ##if both tree and comm have species names match and reorder species (columns) in comm
comm <- comm[,match(tree$tip.label, colnames(comm))]
} else {
if(!is.null(tree$labels) && !is.null(colnames(comm))) ##if both tree and comm have species names match and reorder species (columns) in comm
comm <- comm[,match(tree$labels, colnames(comm))]
}
contr = contribution(comm, tree, abund = FALSE, relative = FALSE)
contr[is.na(contr)] = 0
}
return(sad.core(comm, contr, octaves, scale, raref, runs))
}
#' Species-abundance distribution (SAD) using convex hulls.
#' @description Fits the SAD to community abundance data using convex hulls.
#' @param comm A 'convhulln' object or list, preferably built with function hull.build.
#' @param octaves a boolean indicating whether octaves should be calculated.
#' @param scale scale y-axis to sum 1.
#' @param raref An integer specifying the number of individuals for rarefaction (individual based).
#' If raref < 1 no rarefaction is made.
#' If raref = 1 rarefaction is made by the minimum abundance among all sites.
#' If raref > 1 rarefaction is made by the abundance indicated.
#' If not specified, default is 0.
#' @param runs Number of resampling runs for rarefaction. If not specified, default is 100.
#' @details The Species Abundance Distribution describes the commonness and rarity in ecological systems. It was recently expanded to accomodate phylegenetic and functional differences between species (Matthews et al., subm.). Classes defined as n = 1, 2-3, 4-7, 8-15, .... Rarefaction allows comparison of sites with different total abundances.
#' @return A vector or matrix with the different values per class per community.
#' @references Matthews et al. (subm.) Phylogenetic and functional dimensions of the species abundance distribution.
#' @examples comm = rbind(c(1,3,0,5,3), c(3,2,5,1,0))
#' colnames(comm) = c("SpA", "SpB", "SpC", "SpD", "SpE")
#' rownames(comm) = c("Site 1", "Site 2")
#'
#' trait = data.frame(body = c(1,2,3,4,4), beak = c(1,5,4,1,2))
#' rownames(trait) = colnames(comm)
#'
#' hv = hull.build(comm, trait)
#' hull.sad(hv, scale = TRUE)
#' hull.sad(hv, octaves = FALSE)
#' hull.sad(hv, raref = TRUE)
#' @export
hull.sad <- function(comm, octaves = TRUE, scale = FALSE, raref = 0, runs = 100){
#check if right data is provided
if (!(class(comm) %in% c("list", "convhulln")))
stop("A convhulln or list is needed as input data.")
#if single comm
if(is(comm, "convhulln"))
comm = list(comm)
#get contribution of each species from the convex hulls
contr = hull.contribution(comm)
contr[is.na(contr)] = 0
#get abundance of each species from the convex hulls
ab = c()
for(i in 1:length(comm)){
ab = rbind(ab, comm[[i]]$comm)
}
comm = ab
return(sad.core(comm, contr, octaves, scale, raref, runs))
}
#' Species-abundance distribution (SAD) using kernel density hypervolumes.
#' @description Fits the SAD to community abundance data based on n-dimensional hypervolumes.
#' @param comm A 'Hypervolume' or 'HypervolumeList' object necessarily built using function kernel.build.
#' @param octaves a boolean indicating whether octaves should be calculated.
#' @param scale scale y-axis to sum 1.
#' @param raref An integer specifying the number of individuals for rarefaction (individual based).
#' If raref < 1 no rarefaction is made.
#' If raref = 1 rarefaction is made by the minimum abundance among all sites.
#' If raref > 1 rarefaction is made by the abundance indicated.
#' If not specified, default is 0.
#' @param runs Number of resampling runs for rarefaction. If not specified, default is 100.
#' @details The Species Abundance Distribution describes the commonness and rarity in ecological systems. It was recently expanded to accomodate phylegenetic and functional differences between species (Matthews et al., subm.). Classes defined as n = 1, 2-3, 4-7, 8-15, .... Rarefaction allows comparison of sites with different total abundances.
#' @return A vector or matrix with the different values per class per community.
#' @references Matthews et al. (subm.) Phylogenetic and functional dimensions of the species abundance distribution.
#' @examples \dontrun{
#' comm = rbind(c(1,3,0,5,3), c(3,2,5,1,0))
#' colnames(comm) = c("SpA", "SpB", "SpC", "SpD", "SpE")
#' rownames(comm) = c("Site 1", "Site 2")
#'
#' trait = data.frame(body = c(1,2,3,4,4), beak = c(1,5,4,1,2))
#' rownames(trait) = colnames(comm)
#'
#' hv = kernel.build(comm, trait)
#' kernel.sad(hv, scale = TRUE)
#' kernel.sad(hv, octaves = FALSE)
#' kernel.sad(hv, raref = TRUE)
#' }
#' @export
kernel.sad <- function(comm, octaves = TRUE, scale = FALSE, raref = 0, runs = 100){
#check if right data is provided
if (!(class(comm) %in% c("Hypervolume", "HypervolumeList")))
stop("A Hypervolume or HypervolumeList is needed as input data.")
#if single comm
if(is(comm, "Hypervolume")){
abund = attributes(comm)$comm
comm = hypervolume_join(comm)
attributes(comm)$comm = abund
}
#get contribution of each species from the kernel hypervolumes
contr = kernel.contribution(comm)
contr[is.na(contr)] = 0
#get abundance of each species from the kernel hypervolumes
comm = attributes(comm)$comm
return(sad.core(comm, contr, octaves, scale, raref, runs))
}
#' Species-area relationship (SAR).
#' @description Fits and compares several of the most supported models for the species (or PD, or FD) -area relationship.
#' @param comm Either a vector with the diversity values per site, or a sites x species matrix.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree (used only to fit the PD or FD-area relationships, requires comm to be a sites x species matrix).
#' @param area A vector with the area per site.
#' @details Larger areas (often islands) usually carry more species. Several formulas were proposed in the past to describe this relationship (Arrhenius 1920, 1921; Gleason 1922).
#' Recently, the same approach began to be used for other measures of diversity, namely phylogenetic (PD) and functional (FD) diversity (Whittaker et al. 2014).
#' The function compares some of the most commonly used and theoretically or empirically suported models.
#' The relationships for PD and FD are calculated based on a tree (hclust or phylo object, no need to be ultrametric).
#' @return A matrix with the different model parameters and explanatory power.
#' @references Arrhenius, O. (1920) Distribution of the species over the area. Meddelanden fran Vetenskapsakadmiens Nobelinstitut, 4: 1-6.
#' @references Arrhenius, O. (1921) Species and area. Journal of Ecology, 9: 95-99.
#' @references Gleason, H.A. (1922) On the relation between species and area. Ecology, 3: 158-162.
#' @references Whittaker, R.J., Rigal, F., Borges, P.A.V., Cardoso, P., Terzopoulou, S., Casanoves, F., Pla, L., Guilhaumon, F., Ladle, R. & Triantis, K.A. (2014) Functional biogeography of oceanic islands and the scaling of functional diversity in the Azores. Proceedings of the National Academy of Sciences USA, 111: 13709-13714.
#' @examples div <- c(1,2,3,4,4)
#' comm <- matrix(c(2,0,0,0,3,1,0,0,2,4,5,0,1,3,2,5,1,1,1,1), nrow = 5, ncol = 4, byrow = TRUE)
#' tree <- hclust(dist(c(1:4), method="euclidean"), method="average")
#' area <- c(10,40,80,160,160)
#' sar(div,,area)
#' sar(comm,,area)
#' sar(comm,tree,area)
#' @export
sar <- function(comm, tree, area){
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
if(is.vector(comm)){
div = comm
} else if (missing(tree)){
div = alpha(comm)
} else {
div = alpha(comm, tree)
}
div = as.vector(div)
if (!missing(tree)){
comm = reorderComm(comm, tree)
tree <- xTree(tree)
}
results <- matrix(NA, 6, 7)
colnames(results) <- c("c", "z", "r2", "AIC", "\U0394 AIC", "AICc", "\U0394 AICc")
rownames(results) <- c("Linear", "Linear (origin)", "Exponential", "Exponential (origin)", "Power", "Power (origin)")
k <- c(3,2,3,2,3,2)
model <- list()
model[[1]] <- try(nls(div ~ c + z*area, start = data.frame(c = 0, z = 1)))
model[[2]] <- try(nls(div ~ z*area, start = data.frame(z = 1)))
model[[3]] <- try(nls(div ~ c + z*log(area), start = data.frame(c = 0, z = 1)))
model[[4]] <- try(nls(div ~ z*log(area), start = data.frame(z = 1)))
model[[5]] <- try(nls(div ~ c + area^z, start = data.frame(c = 0, z = 1)))
model[[6]] <- try(nls(div ~ area^z, start = data.frame(z = 1)))
for(m in 1:length(model)){
if(k[m] == 3){
results[m,1] <- coef(summary(model[[m]]))[1,1]
results[m,2] <- coef(summary(model[[m]]))[2,1]
} else {
results[m,2] <- coef(summary(model[[m]]))[1,1]
}
est <- predict(model[[m]], area=area)
results[m,3] <- r2(div, est)
results[m,4] <- aic(div, est, k[m])
results[m,6] <- aic(div, est, k[m], correct = TRUE)
}
for(m in 1:length(model)){
results[m,5] <- results[m,4] - min(results[,4])
results[m,7] <- results[m,6] - min(results[,6])
}
return(results)
}
#' General dynamic model of oceanic island biogeography (GDM).
#' @description Fits and compares several of the most supported models for the GDM (using TD, PD or FD).
#' @param comm Either a vector with the diversity values per island, or an island x species matrix.
#' @param tree A phylo or hclust object (used only for PD or FD) or alternatively a species x traits matrix or data.frame to build a functional tree (used only to fit the PD or FD GDM, requires comm to be a sites x species matrix).
#' @param area A vector with the area of islands.
#' @param time A vector with the age of islands. If not given, the species-area relationship is returned instead.
#' @details The general dynamic model of oceanic island biogeography was proposed to account for diversity patterns within and across oceanic archipelagos as a function of area and age of the islands (Whittaker et al. 2008).
#' Several different equations have been found to describe the GDM, extending the different SAR models with the addition of a polynomial term using island age and its square (TT2), depicting the island ontogeny.
#' The first to be proposed was an extension of the exponential model (Whittaker et al. 2008), the power model extensions following shortly after (Fattorini 2009; Steinbauer et al. 2013), as was the linear model (Cardoso et al. 2020).
#' The relationships for PD and FD are calculated based on a tree (hclust or phylo object, no need to be ultrametric).
#' @return A matrix with the different model parameters and explanatory power.
#' @references Cardoso, P., Branco, V.V., Borges, P.A.V., Carvalho, J.C., Rigal, F., Gabriel, R., Mammola, S., Cascalho, J. & Correia, L. (2020) Automated discovery of relationships, models and principles in ecology. Frontiers in Ecology and Evolution, 8: 530135.
#' @references Fattorini, S. (2009) On the general dynamic model of oceanic island biogeography. Journal of Biogeography, 36: 1100-1110.
#' @references Steinbauer, M.J, Klara, D., Field, R., Reineking, B. & Beierkuhnlein, C. (2013) Re-evaluating the general dynamic theory of oceanic island biogeography. Frontiers of Biogeography, 5: 185-194.
#' @references Whittaker, R.J., Triantis, K.A. & Ladle, R.J. (2008) A general dynamic theory of oceanic island biogeography. Journal of Biogeography, 35: 977-994.
#' @examples div <- c(1,3,5,8,10)
#' comm <- matrix(c(2,0,0,0,3,1,0,0,2,4,5,0,1,3,2,5,1,1,1,1), nrow = 5, ncol = 4, byrow = TRUE)
#' tree <- hclust(dist(c(1:4), method="euclidean"), method="average")
#' area <- c(10,40,80,160,160)
#' time <- c(1,2,3,4,5)
#' gdm(div,,area,time)
#' gdm(comm,tree,area,time)
#' gdm(div,,area)
#' @export
gdm <- function(comm, tree, area, time){
#convert traits to a tree if needed
if(!missing(tree) && (is.matrix(tree) || is.data.frame(tree) || is.vector(tree)))
tree = tree.build(tree)
if(missing(time))
return(sar(comm,tree,area))
if(is.vector(comm)){
div = comm
} else if (missing(tree)){
div = alpha(comm)
} else {
div = alpha(comm, tree)
}
div = as.vector(div)
if (!missing(tree)){
comm = reorderComm(comm, tree)
tree <- xTree(tree)
}
results <- matrix(NA, 4, 9)
colnames(results) <- c("c", "z", "x", "y", "r2", "AIC", "\U0394 AIC", "AICc", "\U0394 AICc")
rownames(results) <- c("Linear", "Exponential", "Power (area)", "Power (area, time)")
k <- 5
model <- list()
model[[1]] <- try(nls(div ~ c + z*area + x*time + y*time^2, start = data.frame(c=1, z=1, x=1, y=0)))
model[[2]] <- try(nls(div ~ c + z*log(area) + x*time + y*time^2, start = data.frame(c=1, z=1, x=1, y=0)))
model[[3]] <- try(nls(div ~ exp(c + z*log(area) + x*time + y*time^2), start = data.frame(c=1, z=1, x=1, y=0)))
model[[4]] <- try(nls(div ~ exp(c + z*log(area) + x*log(time) + y*log(time)^2), start = data.frame(c=1, z=1, x=1, y=0)))
for(m in 1:length(model)){
results[m,1] <- coef(summary(model[[m]]))[1,1]
results[m,2] <- coef(summary(model[[m]]))[2,1]
results[m,3] <- coef(summary(model[[m]]))[3,1]
results[m,4] <- coef(summary(model[[m]]))[4,1]
est <- predict(model[[m]], area=area, time=time)
results[m,5] <- r2(div, est)
results[m,6] <- aic(div, est, k)
results[m,8] <- aic(div, est, k, correct = TRUE)
}
for(m in 1:length(model)){
results[m,7] <- results[m,6] - min(results[,6])
results[m,9] <- results[m,8] - min(results[,8])
}
return(results)
}
#' Interspecific abundance-occupancy relationship (IAOR).
#' @description Fits and compares several of the most supported models for the IAOR.
#' @param comm A sites x species matrix with abundance values.
#' @details Locally abundant species tend to be widespread while locally rare species tend to be narrowly distributed.
#' That is, for a given species assemblage, there is a positive interspecific abundance-occupancy relationship (Brown 1984).
#' This function compares some of the most commonly used and theoretically or empirically suported models (Nachman 1981; He & Gaston 2000; Cardoso et al. 2020).
#' @return A matrix with the different model parameters and explanatory power.
#' @references Brown, J.H. (1984) On the relationship between abundance and distribution of species. American Naturalist, 124: 255-279.
#' @references Cardoso, P., Branco, V.V., Borges, P.A.V., Carvalho, J.C., Rigal, F., Gabriel, R., Mammola, S., Cascalho, J. & Correia, L. (2020) Automated discovery of relationships, models and principles in ecology. Frontiers in Ecology and Evolution, 8: 530135.
#' @references He, F.L. & Gaston, K.J. (2000) Estimating species abundance from occurrence. American Naturalist, 156: 553-559.
#' @references Nachman, G. (1981) A mathematical model of the functional relationship between density and spatial distribution of a population. Journal of Animal Ecology, 50: 453-460.
#' @examples comm <- matrix(c(4,3,2,1,5,4,3,2,3,2,1,0,6,3,0,0,0,0,0,0), nrow = 5, ncol = 4, byrow = TRUE)
#' iaor(comm)
#' @export
iaor <- function(comm){
results <- matrix(NA, 4, 7)
colnames(results) <- c("a", "b", "r2", "AIC", "\U0394 AIC", "AICc", "\U0394 AICc")
rownames(results) <- c("Linear", "Exponential", "Negative Binomial", "SR")
k <- c(3,3,2,2)
abund <- colMeans(comm) #mean abundance per species (including sites with 0 individuals)
occup <- colMeans(ifelse(comm>0,1,0)) #proportion occupancy per species
model <- list()
model[[1]] <- try(nls(logit(occup) ~ a+b*log(abund), start = data.frame(a = 1, b = 1))) #linear
model[[2]] <- try(nls(occup ~ 1-exp(a*abund^b), start = data.frame(a = -1, b = 1))) #exponential
model[[3]] <- try(nls(occup ~ 1-(1+(abund/a))^(0-a), start = data.frame(a = 0))) #negative binomial
model[[4]] <- try(nls(occup ~ abund/(a+abund), start = data.frame(a = 0))) #SR = Clench with asymptote 1
for(m in 1:length(model)){
if(m < 3){
results[m,1] <- coef(summary(model[[m]]))[1,1]
results[m,2] <- coef(summary(model[[m]]))[2,1]
} else {
results[m,1] <- coef(summary(model[[m]]))[1,1]
}
est <- predict(model[[m]], abund=abund)
if(m==1) est = revLogit(est)
results[m,3] <- r2(occup, est)
results[m,4] <- aic(occup, est, k[m])
results[m,6] <- aic(occup, est, k[m], correct = TRUE)
}
for(m in 1:length(model)){
results[m,5] <- results[m,4] - min(results[,4])
results[m,7] <- results[m,6] - min(results[,6])
}
return(results)
}
#' Create Linnean tree.
#' @description Creates a Linnean tree from taxonomic hierarchy.
#' @param taxa A taxonomic matrix with columns ordered according to linnean hierarchy starting with the highest.
#' @param distance A vector with distances between levels starting with the highest. If not provided distances will be evenly distributed from 1 to 0.
#' @return An hclust with all species.
#' @examples family <- c("Nemesiidae", "Nemesiidae", "Zodariidae", "Zodariidae")
#' genus <- c("Iberesia", "Nemesia", "Zodarion", "Zodarion")
#' species <- c("Imachadoi", "Nungoliant", "Zatlanticum", "Zlusitanicum")
#' taxa <- cbind(family, genus, species)
#' par(mfrow = c(1, 2))
#' plot(linnean(taxa))
#' plot(linnean(taxa, c(2, 0.5, 0.3)))
#' @export
linnean <- function(taxa, distance = NULL){
if(is.null(distance))
distance = seq(from = 1, to = 1/ncol(taxa), by = -1*1/ncol(taxa))
nspp = nrow(taxa)
distTable = matrix(NA, nrow = nspp, ncol = nspp)
colnames(distTable) = rownames(distTable) = taxa[,ncol(taxa)]
for(i in 1:nspp){
for(j in 1:nspp){
level = 0
for(k in 1:ncol(taxa))
if(taxa[i,k] != taxa[j,k])
level = level + 1
if(level == 0)
distTable[i,j] = 0
else
distTable[i,j] = distance[length(distance) - level + 1]
}
}
tree = hclust(as.dist(distTable))
return(tree)
}
#' Dummify variables.
#' @description Convert factor variables to dummy variables.
#' @param trait A species x traits matrix or data.frame.
#' @param convert A vector of column numbers, usually categorical variables, to be converted to dummy variables.
#' @param weight Indicates whether weights of variables should be returned (TRUE/FALSE) or a vector with weights per variable.
#' @details If convert is given the algorithm will convert these column numbers to dummy variables. Otherwise it will convert all columns with factors or characters.
#' @return A matrix with variables converted or, if weight == TRUE or a vector, a list also with weights.
#' @examples trait = data.frame(length = c(2,4,6,3,1), wing = c("A", "B", "A", "A", "B"))
#' dummy(trait)
#' dummy(trait, weight = TRUE)
#' dummy(trait, convert = 2, weight = c(0.9, 0.1))
#' @export
dummy <- function(trait, convert = NULL, weight = FALSE){
traitNames = colnames(trait)
if (is.matrix(trait))
trait <- as.data.frame(trait)
if (length(weight) > 1){
if(length(weight) != ncol(trait))
stop("Length of 'weight' must be equal to the number of columns in 'trait'.")
vecWeight = weight
} else {
vecWeight = rep(1, ncol(trait))
}
newTrait = c()
newWeight = c()
for(i in 1:length(traitNames)){
if(is.null(convert) || (i %in% convert)){
if(is.numeric(trait[,i]) && !(i %in% convert)){
newTrait = cbind(newTrait, trait[,i])
colnames(newTrait)[i] = traitNames[i]
newWeight = c(newWeight, vecWeight[i])
}
if(is.logical(trait[,i]) && !(i %in% convert)){
newTrait = cbind(newTrait, ifelse(trait[,i] == T, 1, 0))
colnames(newTrait)[i] = traitNames[i]
newWeight = c(newWeight, vecWeight[i])
}
if(is.factor(trait[,i]) || is.character(trait[,i]) || (i %in% convert)){
colTrait <- factor(trait[,i])
nValues <- length(colTrait)
nLevels <- length(levels(colTrait))
y <- matrix(0L, nrow = nValues, ncol = nLevels)
colnames(y) <- paste(rep(colnames(trait)[i], nLevels), levels(colTrait), sep = ".")
y[cbind(seq_len(nValues), as.integer(colTrait))] <- 1L
colTrait = y
newTrait = cbind(newTrait, colTrait)
newWeight = c(newWeight, rep((1/nLevels), nLevels) * vecWeight[i])
}
} else {
newTrait = cbind(newTrait, trait[,i])
colnames(newTrait)[ncol(newTrait)] = colnames(trait)[i]
newWeight = c(newWeight, vecWeight[i])
}
}
if(length(weight) > 1 || weight == TRUE)
return(list(trait = newTrait, weight = newWeight/sum(newWeight)))
else
return(newTrait)
}
#' Filling missing data.
#' @description Estimation of missing trait values (NA) based on different methods.
#' @param trait A species x traits matrix (a species or individual for each row and traits as columns).
#' @param method Method for imputing missing data. One of "mean" (mean value of the trait), "median" (median value of the trait), "similar" (input from closest species), "regression" (linear regression), "w_regression" (regression weighted by species distance), or "PCA" (Principal Component Analysis).
#' @param group A vector (string of characters, factorial, etc.) whose values indicate which species belong to the same group as the missing and should be used in the estimation of missing data. If NULL all species will be used.
#' @param weight A hclust, phylo or dist object to calculate the distance between species and use as weights.
#' Note that the order of tip labels in trees or of species in the distance matrix should be the same as the order of species in trait.
#' @param step A boolean (T/F) indicating if a stepwise regression model based on AIC should be performed. Ignored is regression is not used.
#' @details Inputs missing data in the trait matrix based on different methods (see Taugourdeau et al. 2014; Johnson et al. 2021 for comparisons among the performance of different methods).
#' The simplest approach is the average imputation ("mean" or "median"), calculating the mean/median of the values for that trait based on all the observations that are non-missing. It has the advantage of keeping the same mean and the same sample size, but many disadvantages.
#' The "similar" method inputs a systematically chosen value from the closest species who has similar values on other variables.
#' The default method is linear regression ("regression"), where the predicted value is obtained by regressing the missing variable on other variables. This preserves relationships among variables involved in the imputation model, but not variability around predicted values (i.e., may lead to extrapolations).
#' The "w_regression" takes into account the relative distance among species in the imputation of missing traits, based on the phylogenetic or functional distance between missing and non-missing species.
#' The "PCA" method performs PCA with incomplete data sensu Podani et al. (2021).
#' Note that for PCA and regressions methods the performance of the prediction increases as the number of collinear traits increase.
#' @return A trait matrix with missing data (NA) filled with predicted values.
#' If method = "PCA" the function returns the standard output of a principal component analysis as a list with:
#' Eigenvalues
#' Positive eigenvalues
#' Positive eigenvalues as percent
#' Square root of eigenvalues
#' Eigenvectors
#' Component scores
#' Variable scores
#' Object scores in a biplot
#' Variable scores in a biplot
#' @references Johnson, T.F., Isaac, N.J., Paviolo, A. & Gonzalez-Suarez, M. (2021). Handling missing values in trait data. Global Ecology and Biogeography, 30: 51-62.
#' @references Podani, J., Kalapos, T., Barta, B. & Schmera, D. (2021). Principal component analysis of incomplete data. A simple solution to an old problem. Ecological Informatics, 101235.
#' @references Taugourdeau, S., Villerd, J., Plantureux, S., Huguenin-Elie, O. & Amiaud, B. (2014). Filling the gap in functional trait databases: use of ecological hypotheses to replace missing data. Ecology and Evolution, 4: 944-958.
#' @examples \dontrun{
#' trait <- iris[,-5]
#' group <- iris[,5]
#'
#' #Generating some random missing data
#' for (i in 1:10)
#' trait[sample(nrow(trait), 1), sample(ncol(trait), 1)] <- NA
#'
#' #Estimating the missing data with different methods
#' fill(trait, "mean")
#' fill(trait, "mean", group)
#' fill(trait, "median")
#' fill(trait, "median", group)
#' fill(trait, "similar")
#' fill(trait, "similar", group)
#' fill(trait, "regression", step = FALSE)
#' fill(trait, "regression", group, step = TRUE)
#' fill(trait, "w_regression", step = TRUE)
#' fill(trait, "w_regression", weight = dist(trait), step = TRUE)
#' fill(trait, "PCA")
#' }
#' @export
fill <- function(trait, method = "regression", group = NULL, weight = NULL, step = TRUE) {
n_sp <- nrow(trait)
#Initial checking of group
if(is.null(group) == TRUE){
group <- as.factor(rep("group", n_sp))
} else {
if (!(class(group) %in% c("character", "factor", "vector", "numeric")))
stop("Group should be a vector, string of characters, or a factor.")
if(length(group) != n_sp)
stop("The number of elements in the trait matrix should be equal to the number of elements in the grouping vector.")
if(any(is.na(group) == TRUE))
stop("The group cannot contain missing data.")
group <- as.factor(group)
}
#Convert characters to factors if needed
trait[sapply(trait, is.character)] <- lapply(trait[sapply(trait, is.character)], as.factor)
#Convert <NA> to NA if needed
trait[] <- lapply(trait, function(x) {
is.na(levels(x)) <- levels(x) == "NA"
x
})
#Check if there are rows with only missing data
trait <- trait[rowSums(is.na(trait)) != ncol(trait),]
if(nrow(trait) != n_sp)
stop("There are rows in the dataset that contain exclusively missing data. Please omit those and re-try.")
#Provide % of missing data
for(i in 1:ncol(trait))
message(paste("-- Column ", colnames(trait)[i], " contains ", round((sum(is.na(trait[,i]) == TRUE) / nrow(trait) )*100,2), "% of missing data.", sep=''))
#Missing Data Imputation with mean
if(method == "mean") {
for(i in 1:nlevels(group))
for(j in 1:ncol(trait))
trait[is.na(trait[group==levels(group)[i], j]), j] <- mean(trait[group==levels(group)[i], j], na.rm = TRUE)
#Missing Data Imputation with median
} else if(method == "median") {
for(i in 1:nlevels(group))
for(j in 1:ncol(trait))
trait[is.na(trait[group==levels(group)[i], j]), j] <- median(trait[group==levels(group)[i], j], na.rm = TRUE)
#Missing Data Imputation based on the closest species
} else if(method == "similar") {
trait_total <- data.frame()
for (m in 1 : nlevels(group)) {
trait_m <- trait[group == levels(group)[m], ]
for (i in 1 : ncol(trait)){
NA_df <- subset(trait_m, is.na(trait_m[,i]) == TRUE)
if(nrow(NA_df) > 0) {
missing_value <- c()
for (j in 1 : nrow(NA_df)){
close <- which.min(gower(rbind(NA_df[j,], trait_m[-j,]))[1:nrow(trait_m)])
missing_value <- append(missing_value, trait_m[ close, i ] )
}
trait_m[c(rownames(NA_df)), i] <- missing_value
}
}
trait_total <- rbind(trait_total,trait_m)
}
trait <- trait_total
#Missing Data Imputation with linear regression
} else if(method == "regression") {
#Initial checking
if(ncol(trait) < 2)
stop("A minimum of two traits are needed for predicting missing values using regression.")
#With group
if(nlevels(group) > 1) { #If a grouping factor is provided
trait2 <- data.frame(trait, group)
for (i in 1:ncol(trait)){
NA_df <- subset(trait2, is.na(trait2[,i]) == TRUE)
#if the column has missing data, make prediction for NA:
if(nrow(NA_df) > 0) {
missing_value <- c()
for (j in 1:nrow(NA_df)) {
data_j <- NA_df[j,]
#column without missing data
column_names <- names(which(colSums(is.na(NA_df[j,])) == 0))
formula.lm <- as.formula(paste(colnames(trait2)[i], " ~ ", paste(column_names, collapse=" + "), sep=""))
#model fit
model <- lm(formula.lm, data = data.frame(na.omit(trait2)))
#stepwise selection
if(step)
model <- MASS::stepAIC(model, direction = "both", trace = FALSE)
#make the prediction
pred_data <- data.frame(NA_df[j,column_names]) ; colnames(pred_data) <- column_names
missing_value <- append( missing_value , predict(model, newdata = pred_data))
}
trait[c(rownames(NA_df)),i] <- missing_value
}
}
#Without group
} else {
for (i in 1:ncol(trait)){
NA_df <- subset(trait, is.na(trait[,i]) == TRUE)
if(nrow(NA_df) > 0) {
#if the column has missing data, make prediction for NA:
missing_value <- c()
for (j in 1:nrow(NA_df)) {
#column without missing data
column_names <- names(which(colSums(is.na(NA_df[j,])) == 0))
#model formula
formula.lm <- as.formula(paste(colnames(trait)[i], " ~ ", paste(column_names, collapse=" + "), sep=""))
#model fit
model <- lm(formula.lm, data = data.frame(na.omit(trait)))
#stepwise selection
if(step)
model <- MASS::stepAIC(model, direction = "both", trace = FALSE)
#make the prediction
pred_data <- data.frame(NA_df[j,column_names]) ; colnames(pred_data) <- column_names
missing_value <- append(missing_value , predict(model, newdata = pred_data))
}
trait[c(rownames(NA_df)),i] <- missing_value
}
}
}
#Missing Data Imputation with weighted linear regression
} else if(method == "w_regression") {
# Initial checking
if(ncol(trait) < 2)
stop("A minimum of two traits are needed for predicting missing values using weighted regression.")
trait <- data.frame(trait)
for (i in 1:ncol(trait)){
NA_df <- subset(trait, is.na(trait[,i]) == TRUE)
#if the column has missing data, make prediction for NA:
if(nrow(NA_df) > 0) {
missing_value <- c()
for(j in rownames(NA_df)){
j <- as.numeric(j)
#column without missing data
column_names <- names(which(colSums(is.na(trait[j,])) == 0))
#model formula
formula.lm <- as.formula(paste(colnames(trait)[i], " ~ ", paste(column_names, collapse=" + "), sep=""))
#preparing the weight
if(is(weight, "dist"))
weight <- (1 / as.matrix(weight)[j,])
else if(is(weight, "phylo") || is(weight, "hclust"))
weight <- (1 / as.matrix(cophenetic(weight))[j,])
else
weight <- (1 - as.matrix(gower(trait))[j,])
weight[is.infinite(weight) == TRUE] <- max(weight[which(!is.infinite(weight))])
trait2 <- na.omit(data.frame(trait,weight))
#model fit
model <- lm(formula.lm, data = trait2, weights = weight)
#stepwise selection
if(step)
model <- MASS::stepAIC(model, direction = "both", trace = FALSE)
#make the prediction
pred_data <- data.frame(trait[j,column_names]) ; colnames(pred_data) <- column_names
missing_value <- append(missing_value , predict(model, newdata = pred_data))
}
trait[c(rownames(NA_df)),i] <- missing_value
}
}
#Missing Data Imputation based on a PCA
} else if(method == "PCA") {
# Note that this chunk of code for PCA is adapted from Podani et al. (2021)
# Initial checking
if(ncol(trait) < 2)
stop("A minimum of two traits are needed for predicting missing values using PCA")
#scaling
X <- scale(trait, center = TRUE, scale = TRUE)
#correlation
C <- cor(X , use="pairwise.complete.obs")
#Eigenvalue
Eigenvalues<-eigen(C)$values
Eigenvalues.pos<-Eigenvalues[Eigenvalues>0]
Eigenvalues.pos.as.percent<-100*Eigenvalues.pos/sum(Eigenvalues.pos)
#Eigenvectors
V <- eigen(C)$vectors
#Principal components
X2<-X
X2[is.na(X2)] <- 0
PC <- as.matrix(X2) %*% V
#object.standardized
PCstand1 <- PC[,Eigenvalues>0]/sqrt(Eigenvalues.pos)[col(PC[,Eigenvalues>0])]
PCstand2 <- PCstand1 / sqrt(nrow(PC) - 1)
#loadings
loadings<-cor(X,PC,use="pairwise.complete.obs")
#arrows for biplot
arrows<-cor(X,PC,use="pairwise.complete.obs")*sqrt(nrow(X) - 1)
#output
PCA <- list()
PCA$Correlation.matrix<-C
PCA$Eigenvalues<-Eigenvalues
PCA$Positive.Eigenvalues<-Eigenvalues.pos
PCA$Positive.Eigenvalues.as.percent<-100*Eigenvalues.pos/sum(Eigenvalues.pos)
PCA$Square.root.of.eigenvalues <- sqrt(Eigenvalues.pos)
PCA$Eigenvectors<-V
PCA$Component.scores<-PC
PCA$Variable.scores<-loadings
PCA$Biplot.objects<-PCstand2
PCA$Biplot.variables<-arrows
#Final output
trait <- PCA
} else {
stop(sprintf("Method %s not recognized.", method))
}
return(trait)
}
#' Standardize variables.
#' @description Standardize (or normalize) variables in different ways.
#' @param trait A species x traits matrix or data.frame.
#' @param method One of "standard" (standardize to mean = 0 and sd = 1, i.e., use z-score), "range" (rescale with range 0-1), or "rank" (rescale with range 0-1 after ranking).
#' @param convert A vector of column numbers to be standardized. If NULL all will be standardized.
#' @details Standardizing values allows to directly compare variables of interest with inherently different ranges, avoiding artificial distortions of distances between observations.
#' @return A matrix with variables standardized.
#' @examples trait = data.frame(body = c(20,40,60,30,10), beak = c(NA,4,6,3,1))
#' standard(trait)
#' standard(trait, method = "range")
#' standard(trait, method = "rank")
#' @export
standard <- function(trait, method = "standard", convert = NULL){
if(is.vector(trait))
trait = matrix(trait, ncol = 1)
if(is.null(convert))
convert = 1:ncol(trait)
for(i in convert){
#if standardization with mean 0 and sd 1
if(method == "standard"){
trait[,i] = (trait[,i] - mean(trait[,i], na.rm = TRUE)) / sd(trait[,i], na.rm = TRUE)
#if standardization with range 0-1
} else if(method == "range"){
trait[,i] = (trait[,i] - min(trait[,i], na.rm = TRUE)) / (max(trait[,i], na.rm = TRUE) - min(trait[,i], na.rm = TRUE))
#if standardization with ranking of values
} else if(method == "rank"){
trait[,i] = rank(trait[,i], ties.method = "average", na.last = "keep")
trait[,i] = standard(trait[,i], method = "range")
} else {
stop("Method not recognized, must be one of 'standard', 'range' or 'rank'.")
}
}
return(trait)
}
#' Gower distance.
#' @description Calculates Gower distances between observations.
#' @param trait A species x traits matrix or data.frame.
#' @param convert A vector of column numbers, usually categorical variables, to be converted to dummy variables.
#' @param weight A vector of column numbers with weights for each variable. Its length must be equal to the number of columns in trait.
#' @details The Gower distance allows continuous, ordinal, categorical or binary variables, with possible weighting (Pavoine et al. 2009).
#' NAs are allowed as long as each pair of species has at least one trait value in common.
#' If convert is given the algorithm will convert these column numbers to dummy variables. Otherwise it will convert all columns with factors or characters as values.
#' @return A dist object with pairwise distances between species.
#' @references Pavoine et al. (2009) On the challenge of treating various types of variables: application for improving the measurement of functional diversity. Oikos, 118: 391-402.
#' @examples trait = data.frame(body = c(NA,2,3,4,4), beak = c(1,1,1,1,2))
#' gower(trait)
#' gower(trait, weight = c(1, 0))
#' @export
gower <- function(trait, convert = NULL, weight = NULL){
#prepare data
if(is.vector(trait))
trait = matrix(trait, ncol = 1)
if(is.null(weight))
weight = rep(1, ncol(trait))
spNames = rownames(trait)
trait = as.data.frame(trait)
#dummify, standardize, and get weights
trait = dummy(trait, convert, weight = weight)
weight = trait$weight
trait = standard(trait$trait, method = "range")
#calculate gower
nSp = nrow(trait)
res = matrix(0, nrow = nSp, ncol = nSp)
for(i in 1:(nSp-1)){
for(j in (i+1):nSp){
sumWeights = 0
for(t in 1:ncol(trait)){
value1 = trait[i,t]
value2 = trait[j,t]
if(!any(is.na(c(value1, value2)))){
res[j,i] = res[j,i] + ((trait[i,t] - trait[j,t])^2 * weight[t])
sumWeights = sumWeights + weight[t]
}
}
res[j,i] = (res[j,i] / sumWeights)^0.5
}
}
rownames(res) = colnames(res) = spNames
return(as.dist(res))
}
#' Model R2.
#' @description Calculates R2 from the summed squared differences between observed and estimated values.
#' @param obs Either a model or a vector with observed values.
#' @param est A vector with estimated values. Only used if obs is not a model.
#' @param param Number of parameters in the model to calculate the adjusted R2 if > 0. If obs is a model param will be ignored and the number of parameters will be calculated from the model.
#' @details Useful for models or functions that do not provide r2 values.
#' @return The r2 value.
#' @examples obs = c(1,4,5,6)
#' est = c(0,1,4,7)
#'
#' #example using values
#' r2(obs, est)
#' r2(obs, est, param = 1)
#'
#' #example using model
#' mod = lm(obs ~ est)
#' r2(mod)
#' summary(mod)$r.squared
#' r2(mod, param = 1)
#' summary(mod)$adj.r.squared
#'
#' @export
r2 <- function(obs, est = NULL, param = 0){
if(!is.vector(obs)){
if(param > 0)
param = length(obs$coefficients) - 1
est = predict(obs)
obs = obs$model[,1]
}
SSn <- rss(obs, est)
SSd <- sum((obs-mean(obs))^2)
res <- 1-(SSn/SSd)
if(param > 0){
n = length(obs)
res <- 1-(1-res)*((n-1)/(n-param-1))
}
return(res)
}
#' Akaike Information Criterion.
#' @description Calculates the Akaike Information Criterion (AIC) of any model based on observed and estimated values.
#' @param obs Either a model or a vector with observed values.
#' @param est A vector with estimated values. Only used if obs is not a model.
#' @param param Number of parameters in the model. If obs is a model param will be ignored and the number of parameters will be calculated from the model.
#' @param correct Boolean indicating whether the corrected version of AIC (AICc) should be calculated, mostly for models with few observations.
#' @details Useful for models or functions that do not provide logLik values.
#' @return The AIC or AICc value.
#' @examples obs = c(1,4,5,6)
#' est = c(0,1,4,7)
#'
#' #example using values
#' aic(obs, est)
#' aic(obs, est, param = 1)
#' aic(obs, est, param = 1, correct = TRUE)
#'
#' #example using model
#' mod = lm(obs ~ est)
#' aic(mod)
#' extractAIC(mod)[2]
#' aic(mod, correct = TRUE)
#'
#' @export
aic <- function(obs, est = NULL, param = 0, correct = FALSE){
if(!is.vector(obs)){
param = length(obs$coefficients) - 1
est = predict(obs)
obs = obs$model[,1]
}
k = param + 1
n = length(obs)
res = n * log(rss(obs, est)/n) + 2 * k
if(correct)
res = res + (2*k*(k+1))/(n-k-1)
return(res)
}
#' Standard Effect Size.
#' @description Calculates the standard effect size from observed and estimated values.
#' @param obs A single observed value.
#' @param est A vector with estimated values.
#' @param param Value is calculated with parametric or non-parametric method. Because standardized effect sizes may lead to biased conclusions if null values show an asymmetric distribution or deviate from normality, non-parametric effect sizes use probit transformed p-values (Lhotsky et al., 2016).
#' @param p Boolean indicating whether the p-value should be returned.
#' @return The ses value or a vector with ses and p-value.
#' @references Lhotsky et al. (2016) Changes in assembly rules along a stress gradient from open dry grasslands to wetlands. Journal of Ecology, 104: 507-517.
#' @examples est = rnorm(1000, 500, 100)
#'
#' ses(100, est)
#' ses(100, est, param = FALSE)
#' ses(500, est)
#' ses(500, est, param = FALSE)
#' ses(900, est, p = TRUE)
#' ses(900, est, param = FALSE, p = TRUE)
#' @export
ses <- function(obs, est, param = TRUE, p = TRUE){
if(param){
res = (obs - mean(est, na.rm = TRUE)) / sd(est, na.rm = TRUE)
pval = pnorm(res) #converts ses to p
} else {
est = c(obs, est)
pval = (sum(est < obs) + sum(est == obs)/2) / length(est)
res = qnorm(pval) #converts p to ses
}
if(p){
#convert from one-tailed to two-tailed test
pval = pval * 2
if(pval > 1)
pval = 1 - (pval - 1)
res = c(res, pval)
names(res) = c("ses", "p-value")
}
return(res)
}
#' Build functional tree.
#' @description Builds a functional tree from trait or distance data.
#' @param trait A species x traits matrix or data.frame or, alternatively, a dist object.
#' @param distance One of "gower" or "euclidean". Not used if trait is already a dist object.
#' @param func One of "upgma", "mst", "nj", "bionj" or "best".
#' @param fs Only used for func = "nj" OR "bionj". Argument s of the agglomerative criterion: it is coerced as an integer and must at least equal to one.
#' @param convert A vector of column numbers, usually categorical variables, to be converted to dummy variables. Not used if trait is already a dist object.
#' @param weight A vector of column numbers with weights for each variable. Its length must be equal to the number of columns in trait. Not used if trait is already a dist object.
#' @param root A numeric or character specifying the functional outgroup to root the tree.
#' @details The tree will be built using one of four algorithms after traits are dummyfied (if needed) and standardized (always):
#' If func = "upgma" uses average linkage clustering (UPGMA, Cardoso et al. 2014).
#' If func = "mst" uses minimum spanning trees, equivalent to single linkage clustering (Gower & Ross 1969).
#' If func = "nj" uses the original neighbor-joining algorithm of Saitou & Nei (1987).
#' If func = "bionj" uses the modified neighbor-joining algorithm of Gascuel (1997).
#' Any of the neighbor-joining options is usually preferred as they keep distances between species better than UPGMA or MST (Cardoso et al. subm.).
#' If func = "best", chooses the best of the options above based on maximum tree.quality values.
#' If NJ trees are built, the root will be set at the node closest to the midpoint between the two most dissimilar species in the tree or, if root not NULL, at the node provided in parameter root (Podani et al. 2000).
#' Gower distance (Pavoine et al. 2009) allows continuous, ordinal, categorical or binary variables, with possible weighting.
#' NAs are allowed as long as each pair of species has at least one trait value in common. For fs > 0 even if this condition is not met the Q* criterion by Criscuolo & Gascuel (2008) is used to fill missing data.
#' If convert is given the algorithm will convert these column numbers to dummy variables. Otherwise it will convert all columns with factors or characters as values.
#' @return A phylo object representing a functional tree.
#' @references Cardoso et al. (2014) Partitioning taxon, phylogenetic and functional beta diversity into replacement and richness difference components. Journal of Biogeography, 41: 749-761.
#' @references Cardoso et al. (subm.) Using neighbor-joining trees for functional diversity analyses.
#' @references Criscuolo & Gascuel (2008) Fast NJ-like algorithms to deal with incomplete distance matrices. BMC Bioinformatics, 9: 166.
#' @references Gascuel (1997) BIONJ: an improved version of the NJ algorithm based on a simple model of sequence data. Molecular Biology and Evolution, 14: 685–695.
#' @references Gower & Ross (1969) Minimum spanning trees and single linkage cluster analysis. Journal of the Royal Statistical Society, 18: 54-64.
#' @references Pavoine et al. (2009) On the challenge of treating various types of variables: application for improving the measurement of functional diversity. Oikos, 118: 391-402.
#' @references Podani et al. (2000) Additive trees in the analysis of community data. Community Ecology, 1, 33–41.
#' @references Saitou & Nei (1987) The neighbor-joining method: a new method for reconstructing phylogenetic trees. Molecular Biology and Evolution, 4, 406–425.
#' @examples trait = data.frame(body = c(NA,2,3,4,4), beak = c(1,1,1,1,2))
#' plot(tree.build(trait))
#' plot(tree.build(trait, func = "bionj", fs = 1, weight = c(1, 0)), "u")
#' plot(tree.build(trait, func = "best", root = 4))
#' @export
tree.build <- function(trait, distance = "gower", func = "nj", fs = 0, convert = NULL, weight = NULL, root = NULL){
#get distance matrix
if(is(trait, "dist")){
distmatrix = trait
} else {
if (distance == "gower"){
distmatrix = gower(trait, convert, weight)
} else if (distance == "euclidean"){
distmatrix = dist(trait, method = "euclidean")
} else {
stop("Distance should be one of gower or euclidean")
}
}
#build tree
if(func == "upgma"){
tree = hclust(distmatrix, method = "average")
} else if(func == "mst"){
tree = hclust(distmatrix, method = "single")
} else if(func == "nj"){
if(fs < 1){
tree = nj(distmatrix)
} else {
tree = njs(distmatrix, fs)
}
} else if(func == "bionj"){
if(fs < 1){
tree = bionj(distmatrix)
} else {
tree = bionjs(distmatrix, fs)
}
} else if(func == "best"){
#build trees
trees = list()
methods = c("upgma", "mst", "nj", "bionj")
qual = c()
for(i in 1:4){
trees[[i]] = tree.build(trait, distance, func = methods[i], fs, convert, weight, root = root)
qual[i] = tree.quality(distmatrix, trees[[i]])
}
best_tree = which.max(qual)
cat("The best tree is given by", methods[best_tree],"with a quality of", qual[best_tree])
tree = trees[[best_tree]]
} else {
stop("func not recognized!")
}
if(is(tree, "phylo") && !is.rooted(tree)){
if(is.null(root))
tree <- phytools::midpoint.root(tree)
else
tree <- ape::root(tree, outgroup = root)
}
return(tree)
}
#' Convert negative branches of tree.
#' @description Converts negative branch lengths of any tree to zero.
#' @param tree A phylo object.
#' @details Converts branches with negative values to zero while shortening only the two branches immediately below it by the same absolute amount to ensure the tree remains with tips at same distances and there are no polytomies.
#' @return A phylo object.
#' @examples par(mfrow = c(1,2))
#' tree <- ape::read.tree(text='(((A:3, B:3):1,
#' (G:6, (H:5, I:5):1):-2):3, ((C:1, D:1):2, (E:4, F:4):-1):4);')
#' plot(tree)
#'
#' tree = tree.zero(tree)
#' plot(tree)
#' @export
tree.zero <- function (tree){
#identify which branches are negative
neg = which(tree$edge.length < 0)
#go one by one negative branches, do it until there are no negative
while (length(neg) > 0){
for(i in neg){
#get branch length
len = tree$edge.length[i]
#identify which branches are immediately under branch i
under = tree$edge[i,2]
under = which(tree$edge[,1] == under)
#increase length of negative branch
tree$edge.length[i] = 0
#decrease length of branches under
tree$edge.length[under] = tree$edge.length[under] + len
}
neg = which(tree$edge.length < 0)
}
return(tree)
}
#' Quality of tree.
#' @description Assess the quality of a functional tree.
#' @param distance A dist object representing the initial distances between species.
#' @param tree A phylo or hclust object.
#' @details The algorithm calculates the inverse of mean squared deviation between initial and cophenetic distances (Maire et al. 2015) after standardization of all values between 0 and 1 for simplicity of interpretation.
#' A value of 1 corresponds to maximum quality of the functional representation. A value of 0 corresponds to the expected value for a star tree, where all pairwise distances are 1.
#' @return A single value of quality.
#' @references Maire et al. (2015) How many dimensions are needed to accurately assess functional diversity? A pragmatic approach for assessing the quality of functional spaces. Global Ecology and Biogeography, 24: 728:740.
#' @examples trait = data.frame(body = c(1,2,3,4,4), beak = c(1,1,1,1,2))
#' distance = gower(trait)
#'
#' tree = tree.build(trait)
#' tree.quality(distance, tree)
#'
#' tree = tree.build(trait, func = "bionj")
#' tree.quality(distance, tree)
#'
#' tree = tree.build(trait, func = "upgma")
#' tree.quality(distance, tree)
#'
#' tree = tree.build(trait, func = "mst")
#' tree.quality(distance, tree)
#'
#' tree = tree.build(trait, func = "best")
#'
#' distance1 = distance
#' distance1[] = 1
#' tree = hclust(distance1)
#' tree.quality(distance, tree)
#' @export
tree.quality <- function(distance, tree){
tree = as.dist(cophenetic(tree))
return(msd(distance, tree))
}
#' Build hyperspace.
#' @description Builds hyperspace by transforming trait or distance data to use with either hull.build or kernel.build.
#' @param trait A species x traits matrix or data.frame or, alternatively, a dist object.
#' @param distance One of "gower" or "euclidean". Not used if trait is a dist object.
#' @param weight A vector of column numbers with weights for each variable. Its length must be equal to the number of columns in trait. Only used if axes > 0 and if trait is not a dist object.
#' @param axes If 0, no transformation of data is done.
#' If 0 < axes <= 1 a PCoA is done with Gower/euclidean distances and as many axes as needed to achieve this proportion of variance explained are selected.
#' If axes > 1 these many axes are selected.
#' @param convert A vector of column numbers, usually categorical variables, to be converted to dummy variables. Only used if axes > 0 and if trait is not a dist object.
#' @details The hyperspace can be constructed with the given data or data can be transformed using PCoA after traits are dummyfied (if needed) and standardized (always).
#' Gower distance (Pavoine et al. 2009) allows continuous, ordinal, categorical or binary variables, with possible weighting.
#' NAs are allowed as long as each pair of species has at least one trait value in common.
#' If convert is given the algorithm will convert these column numbers to dummy variables. Otherwise it will convert all columns with factors or characters as values.
#' Note that each community should have at least 3 species and more species than traits or axes (if axes > 0) to build convex hull hypervolumes.
#' Transformation of traits is recommended if (Carvalho & Cardoso, 2020):
#' 1) Some traits are not continuous;
#' 2) Some traits are correlated; or
#' 3) There are less species than traits + 1, in which case the number of axes should be smaller.
#' @return A matrix with the coordinates of each species in hyperspace.
#' @references Carvalho, J.C. & Cardoso, P. (2020) Decomposing the causes for niche differentiation between species using hypervolumes. Frontiers in Ecology and Evolution, 8: 243.
#' @references Pavoine et al. (2009) On the challenge of treating various types of variables: application for improving the measurement of functional diversity. Oikos, 118: 391-402.
#' @examples
#' trait = data.frame(body = c(1,2,3,4,4), beak = c(1,5,4,1,2))
#' rownames(trait) = c("SpA", "SpB", "SpC", "SpD", "SpE")
#'
#' hs = hyper.build(trait, weight = c(1,2), axes = 2)
#' plot(hs)
#' @export
hyper.build <- function(trait, distance = "gower", weight = NULL, axes = 1, convert = NULL){
#do gower/euclidean and pcoa if axes is larger than 0
if(is(trait, "dist") || axes > 0){
#get distance matrix
if(!is(trait, "dist")){
if (distance == "gower"){
trait = gower(trait, convert, weight)
} else if (distance == "euclidean"){
trait = dist(trait, method = "euclidean")
} else {
stop("Distance should be one of gower or euclidean")
}
}
trait = ape::pcoa(trait)
if(axes <= 1){
selAxes = cumVar = 0
while(cumVar < axes){
selAxes = selAxes + 1
cumVar = cumVar + trait$values$Relative_eig[selAxes]
}
axes = selAxes
} else {
axes = min(axes, ncol(trait$vectors))
}
trait = trait$vectors[,(1:axes)]
}
colnames(trait) = paste("Axis", 1:ncol(trait), sep = "")
if(is.null(rownames(trait)))
rownames(trait) = paste("Sp", 1:nrow(trait), sep = "")
return(trait)
}
#' Quality of hyperspace.
#' @description Assess the quality of a functional hyperspace.
#' @param distance A dist object representing the initial distances between species.
#' @param hyper A matrix with coordinates data from function hyper.build.
#' @details This is used for any representation using hyperspaces, including convex hull and kernel-density hypervolumes. The algorithm calculates the inverse of the squared deviation between initial and euclidean distances (Maire et al. 2015) after standardization of all values between 0 and 1 for simplicity of interpretation. A value of 1 corresponds to maximum quality of the functional representation. A value of 0 corresponds to the expected value for an hyperspace where all distances between species are 1.
#' @return A single value of quality.
#' @references Maire et al. (2015) How many dimensions are needed to accurately assess functional diversity? A pragmatic approach for assessing the quality of functional spaces. Global Ecology and Biogeography, 24: 728:740.
#' @examples trait = data.frame(body = c(1,2,3,4,4), beak = c(1,1,1,1,2))
#' distance = gower(trait)
#'
#' hyper = hyper.build(trait, axes = 2)
#' hyper.quality(distance, hyper)
#'
#' hyper = hyper.build(trait, axes = 0)
#' hyper.quality(distance, hyper)
#' @export
hyper.quality <- function(distance, hyper){
hyper = dist(hyper)
return(msd(distance, hyper))
}
#' Build convex hull hypervolumes.
#' @description Builds convex hull hypervolumes for each community from incidence and trait data.
#' @param comm A sites x species matrix, data.frame or vector, with incidence data about the species in the community.
#' @param trait A species x traits or axes matrix or data.frame (often from hyper.build) or, alternatively, a dist object.
#' @param distance One of "gower" or "euclidean". Not used if trait is a dist object.
#' @param weight A vector of column numbers with weights for each variable. Its length must be equal to the number of columns in trait. Only used if axes > 0 and if trait is not a dist object.
#' @param axes If 0, no transformation of data is done.
#' If 0 < axes <= 1 a PCoA is done with Gower/euclidean distances and as many axes as needed to achieve this proportion of variance explained are selected.
#' If axes > 1 these many axes are selected.
#' @param convert A vector of column numbers, usually categorical variables, to be converted to dummy variables. Only used if axes > 0 and if trait is not a dist object.
#' @details The hypervolumes can be constructed with the given data or data can be transformed using PCoA after traits are dummyfied (if needed) and standardized (always).
#' Beware that if transformations are required, all communities to be compared should be built simultaneously to guarantee comparability. In such case, one might want to first run hyper.build and use the resulting data in different runs of hull.build.
#' See function hyper.build for more details.
#' @return A 'convhulln' object or a list, representing the hypervolumes of each community.
#' @examples comm = rbind(c(1,3,0,5,3), c(3,2,5,0,0))
#' colnames(comm) = c("SpA", "SpB", "SpC", "SpD", "SpE")
#' rownames(comm) = c("Site 1", "Site 2")
#'
#' trait = data.frame(body = c(1,2,3,4,4), beak = c(1,5,4,1,2))
#' rownames(trait) = colnames(comm)
#'
#' hv = hull.build(comm[1,], trait)
#' plot(hv)
#' hvlist = hull.build(comm, trait)
#' plot(hvlist[[2]])
#' hvlist = hull.build(comm, trait, axes = 2, weight = c(1,2))
#' plot(hvlist[[1]])
#' @export
hull.build <- function(comm, trait, distance = "gower", weight = NULL, axes = 0, convert = NULL){
trait = hyper.build(trait, distance, weight, axes, convert)
if(is.vector(comm))
comm = matrix(comm, nrow = 1)
trait = reorderTrait(comm, trait)
#check if there are communities with less species than traits + 1 and remove them
fewSpp = as.vector(which(rowSums(ifelse(comm > 0, 1, 0)) <= ncol(trait)))
if(length(fewSpp) > 0){
if(nrow(comm) == length(fewSpp))
stop(paste("There are no communities with enough species for convex hull delineation.\n"))
comm2 = comm[-(fewSpp), , drop = FALSE]
for(i in fewSpp)
warning(paste("Site", fewSpp, "does not contain enough species for convex hull delineation and has been removed prior to convex hull estimation.\n"))
comm <- comm2
}
if(is.null(colnames(comm)))
colnames(comm) = rownames(trait)
if(is.null(rownames(comm)))
rownames(comm) = paste("Comm", 1:nrow(comm), sep = "")
#build the convex hulls for each community
hull_list <- list()
for (s in 1:nrow(comm)) {
subComm <- trait[comm[s,] > 0, ]
hull_list[[s]] <- geometry::convhulln(subComm, options = "FA")
rownames(hull_list[[s]]$p) = rownames(subComm)
hull_list[[s]]$comm = comm[s,]
}
names(hull_list) = rownames(comm)
#return
if(length(hull_list) > 1){
return(hull_list)
} else {
return(hull_list[[1]])
}
}
#' Build kernel hypervolumes.
#' @description Builds kernel density hypervolumes from trait data.
#' @param comm A sites x species matrix, data.frame or vector, with incidence or abundance data about the species in the community.
#' @param trait A species x traits or axes matrix or data.frame (often from hyper.build) or, alternatively, a dist object.
#' @param distance One of "gower" or "euclidean". Not used if trait is a dist object.
#' @param method.hv Method for constructing the 'Hypervolume' object. One of "gaussian" (Gaussian kernel density estimation, default), "box" (box kernel density estimation), or "svm" (one-class support vector machine). See respective functions of the hypervolume R package for details.
#' @param abund A boolean (T/F) indicating whether abundance data should be used as weights in hypervolume construction. Only works if method.hv = "gaussian".
#' @param weight A vector of column numbers with weights for each variable. Its length must be equal to the number of columns in trait. Only used if axes > 0 and if trait is not a dist object.
#' @param axes If 0, no transformation of data is done.
#' If 0 < axes <= 1 a PCoA is done with Gower/euclidean distances and as many axes as needed to achieve this proportion of variance explained are selected.
#' If axes > 1 these many axes are selected.
#' @param convert A vector of column numbers, usually categorical variables, to be converted to dummy variables. Only used if axes > 0 and if trait is not a dist object.
#' @param cores Number of cores to be used in parallel processing. If = 0 all available cores are used. Beware that multicore for Windows is not optimized yet and it often takes longer than single core.
#' @param ... further arguments to be passed to hypervolume::hypervolume
#' @details The hypervolumes can be constructed with the given data or data can be transformed using PCoA after traits are dummyfied (if needed) and standardized (always).
#' Beware that if transformations are required, all communities to be compared should be built simultaneously to guarantee comparability. In such case, one might want to first run hyper.build and use the resulting data in different runs of kernel.build.
#' See function hyper.build for more details.
#' @return A 'Hypervolume' or 'HypervolumeList', representing the hypervolumes of each community.
#' @examples \dontrun{
#' comm = rbind(c(1,1,0,5,1), c(3,2,5,0,0))
#' colnames(comm) = c("SpA", "SpB", "SpC", "SpD", "SpE")
#' rownames(comm) = c("Site1", "Site2")
#'
#' trait = data.frame(body = c(1,2,3,1,2), beak = c(1,2,3,2,1))
#' rownames(trait) = colnames(comm)
#'
#' hv = kernel.build(comm[1,], trait)
#' plot(hv)
#' hvlist = kernel.build(comm, trait, abund = FALSE, cores = 2)
#' plot(hvlist)
#' hvlist = kernel.build(comm, trait, method.hv = "box", weight = c(1,2), axes = 2)
#' plot(hvlist)
#' }
#' @export
kernel.build <- function(comm, trait, distance = "gower", method.hv = "gaussian", abund = TRUE, weight = NULL, axes = 0, convert = NULL, cores = 1, ... ){
trait = hyper.build(trait, distance, weight, axes, convert)
if(is.vector(comm))
comm = matrix(comm, nrow = 1)
trait = reorderTrait(comm, trait)
#check if there are communities with no species
fewSpp = as.vector(which(rowSums(comm) == 0))
if(length(fewSpp) > 0){
if(nrow(comm) == length(fewSpp))
stop(paste("There are no communities with species.\n"))
comm2 = comm[-(fewSpp), , drop = FALSE]
for(i in fewSpp)
warning(paste("Site", fewSpp, "does not contain any species and has been removed prior to hypervolume estimation.\n"))
comm <- comm2
}
#general function for lapply (serial), mcapply (Mac/Linux) or parLapply (Win)
parbuild <- function(i, commList, trait, method.hv, abund, ... ){
subComm = commList[[i]]
subTrait <- trait[subComm > 0, ] ##Select traits
subComm <- subComm[subComm > 0]
cat("Building hypervolume", i, "of", length(commList), "\n")
if (method.hv == "box"){
newHv <- hypervolume_box(subTrait, verbose = FALSE, ... )
} else if (method.hv == "svm"){
newHv <- hypervolume_svm(subTrait, verbose = FALSE, ... )
} else if (method.hv == "gaussian"){
if(abund){
weight = subComm / sum(subComm)
newHv <- hypervolume_gaussian(subTrait, weight = weight, verbose = FALSE, ... )
} else {
newHv <- hypervolume_gaussian(subTrait, verbose = FALSE, ... )
}
} else {
stop(sprintf("method.hv %s not recognized.", method.hv))
}
return(newHv)
}
#get number of cores, if cores = 0 uses all available
if(cores == 0){
cores = detectCores()
} else {
cores = min(cores, detectCores())
}
#make list to go for parallel if required
commList = as.list(as.data.frame(t(comm)))
if(cores == 1 || Sys.info()[['sysname']] == 'Windows'){
hv = lapply(seq(commList), parbuild, commList = commList, trait = trait, method.hv = method.hv, abund = abund, ... )
#} else if (Sys.info()[['sysname']] == 'Windows'){
# cl = makeCluster(cores)
# func = paste("hypervolume", method, sep = "_")
# clusterExport(cl, varlist = c("trait", "method", "abund", func))
# hv = parLapply(cl, seq(commList), parbuild, commList = commList, trait = trait, method = method, abund = abund, ... )
# stopCluster(cl)
} else {
hv = mclapply(seq(commList), parbuild, commList = commList, trait = trait, method.hv = method.hv, abund = abund, mc.cores = cores, ... )
}
#name hypervolumes and convert list to HypervolumeList
if(length(hv) > 1){
for(i in 1:length(hv)){
hv[[i]]@Name = rownames(comm)[i]
}
hv = hypervolume_join(hv)
} else {
hv = hv[[1]]
}
#add abundance data to hypervolumes (required for sad)
attributes(hv)$comm = comm
return(hv)
}
#' Simulation of species abundance distributions (SAD).
#' @description Creates artificial communities following given SADs.
#' @param n total number of individuals.
#' @param s number of species.
#' @param sad The SAD distribution type (lognormal, uniform, broken stick or geometric). Default is lognormal.
#' @param sd The standard deviation of lognormal distributions. Default is 1.
#' @details Species Abundance Distributions may take a number of forms. A lognormal SAD probably is the most supported by empirical data, but we include other common types useful for testing multiple algorithms including several of the functions in BAT.
#' @return A matrix of species x abundance per species.
#' @examples comm1 <- sim.sad(10000, 100)
#' comm2 <- sim.sad(10000, 100, sd = 2)
#' comm3 <- sim.sad(10000, 100, sad = "uniform")
#' par(mfrow=c(1,3))
#' hist(log(comm1$Freq))
#' hist(log(comm2$Freq))
#' hist(log(comm3$Freq))
#' @export
sim.sad <- function(n, s, sad = "lognormal", sd = 1) {
if (s > n)
stop("Number of species can't be larger than number of individuals")
sppnames = paste("Sp", 1:s, sep="") ##species names
sad <- match.arg(sad, c("lognormal", "uniform", "broken", "geometric"))
##lognormal distribution
switch(sad, lognormal = {
comm = sample(sppnames, size = n, replace = T, prob = c(rlnorm(s, sdlog = sd)))
##uniform distribution
}, uniform = {
comm = sample(sppnames, size = n, replace = T)
##broken stick distribution
}, broken = {
broken.stick <- function(p){
result = NULL
for(j in 1:p) {
E = 0
for(x in j:p)
E = E+(1/x)
result[j] = E/p
}
return(result)
}
broken.prob = broken.stick(s)
comm = sample(sppnames, size = n, replace = TRUE, prob = c(broken.prob))
}, geometric = {
geo.ser <- function(s, k = 0.3){
result = NULL
for (x in 1:s) {
result[x] = k*(1-k)^(x-1)/(1-(1-k)^s)
}
return(result)
}
geo.prob = geo.ser(s)
comm = sample(sppnames, size = n, replace = TRUE, prob = c(geo.prob))
})
return(as.data.frame(table(comm)))
}
#' Simulation of species spatial distributions.
#' @description Creates artificial communities with given SAD and spatial clustering.
#' @param n total number of individuals.
#' @param s number of species.
#' @param sad The SAD distribution type (lognormal, uniform, broken stick or geometric). Default is lognormal.
#' @param sd The standard deviation of lognormal distributions. Default is 1.
#' @param distribution The spatial distribution of individual species populations (aggregated, random, uniform or gradient). Default is aggregated.
#' @param clust The clustering parameter if distribution is either aggregated or gradient (higher values create more clustered populations). Default is 1.
#' @details The spatial distribution of individuals of given species may take a number of forms.
#' Competitive exclusion may cause overdispersion, specific habitat needs or cooperation may cause aggregation and environmental gradients may cause abundance gradients.
#' @return A matrix of individuals x (species, x coords and y coords).
#' @examples par(mfrow = c(3 ,3))
#' comm = sim.spatial(100, 9, distribution = "uniform")
#' for(i in 1:9){
#' sp <- comm[comm[1] == paste("Sp", i, sep = ""), ]
#' plot(sp$x, sp$y, main = paste("Sp", i), xlim = c(0,1), ylim = c(0,1))
#' }
#' @export
sim.spatial <- function(n, s, sad = "lognormal", sd = 1, distribution = "aggregated", clust = 1){
repeat{
simsad <- sim.sad(n, s, sad, sd)
coords <- matrix(ncol = 2)
for (j in 1:nrow(simsad)){ #species by species
spCoords <- matrix(c(runif(1),runif(1)), ncol=2)
if(simsad[j,2] > 1){
for(i in 2:simsad[j,2]){
repeat{
newcoords <- c(runif(1),runif(1))
##aggregated distribution
if(distribution == "aggregated"){
mindist = 1
for(r in 1:nrow(spCoords)){
mindist <- min(mindist, euclid(newcoords, spCoords[r,]))
}
thres = abs(rnorm(1, sd = 1/clust))/10
if(mindist < thres){
spCoords <- rbind(spCoords, newcoords)
break
}
##random distribution
} else if (distribution == "random"){
spCoords <- rbind(spCoords, newcoords)
break
##uniform distribution
} else if (distribution == "uniform"){
mindist = 1
for(r in 1:nrow(spCoords)){
mindist <- min(mindist, euclid(newcoords, spCoords[r,]))
}
thres = runif(1)
if(mindist > thres){
spCoords <- rbind(spCoords, newcoords)
break
}
##gradient distribution
} else if (distribution == "gradient") {
thres = runif(1)^(1/clust)
if(newcoords[2] > thres){
spCoords <- rbind(spCoords, newcoords)
break
}
} else {
return(message("distribution not recognized"))
}
}
}
}
coords <- rbind(coords, spCoords)
}
spp <- rep(as.character(simsad[,1]), simsad[,2])
comm <- data.frame(Spp = spp, x = coords[-1,1], y = coords[-1,2])
if(nrow(comm) == n && length(which(is.na(comm$x))) == 0){
break
}
}
return(comm)
}
#' Plots of simulated species spatial distributions.
#' @description Plots individuals from artificial communities with given SAD and spatial clustering.
#' @param comm artificial community data from function sim.spatial.
#' @param sad boolean indicating if the SAD plot should also be shown. Default is FALSE.
#' @param s number of species to plot simultaneously. Default is the number of species in comm.
#' @details Function useful for visualizing the results of sim.spatial.
#' @examples comm <- sim.spatial(1000, 24)
#' sim.plot(comm)
#' sim.plot(comm, sad = TRUE)
#' sim.plot(comm, s = 9)
#' @export
sim.plot <- function(comm, sad = FALSE, s = 0){
spp <- length(unique(comm$Spp))
if(s < 1){
if(!sad)
side = ceiling(sqrt(spp))
else
side = ceiling(sqrt(spp+1))
} else{
side = ceiling(sqrt(s))
}
par(mfrow = c(side,side), mar = c(1,1,2,1))
if(sad)
hist(log(table(comm[,1])), main = "All species", xlab = "Abundance (log)", xaxt = "n")
for(i in 1:spp){
sp <- comm[comm[1] == paste("Sp", i, sep=""), ]
plot(sp$x, sp$y, main = paste("Sp", i), xlim = c(0,1), ylim = c(0,1), xlab="", ylab="", xaxt="n", yaxt="n")
}
}
#' Simulation of sampling from artificial communities.
#' @description Simulates a sampling process from artificial communities.
#' @param comm simulated community data from function sim.spatial.
#' @param cells number of cells to divide the simulated space into. Default is 100.
#' @param samples number of samples (cells) to randomly extract. Default is the number of cells (the entire community).
#' @details The space will be divided in both dimensions by sqrt(cells).
#' @details Function useful for simulating sampling processes from the results of sim.spatial.
#' @details May be used as direct input to other functions (e.g. alpha, alpha.accum, beta, beta.accum) to test the behavior of multiple descriptors and estimators.
#' @return A matrix of samples x species (values are abundance per species per sample).
#' @examples comm <- sim.spatial(1000, 10)
#' sim.sample(comm)
#' sim.sample(comm, cells = 10, samples = 5)
#' @export
sim.sample <- function(comm, cells = 100, samples = 0){
side <- round(sqrt(cells),0)
cells = side^2
comm$ind <- 0
xv <- cut(comm$x, seq(0, 1, 1/side))
yv <- cut(comm$y, seq(0, 1, 1/side))
grid1 <- data.frame(table(xv, yv))
grid1 <- grid1[,-3]
s <- 1:cells
for (i in 1:cells){
id <- NULL
id <- which (xv == grid1$xv[s[i]] & yv == grid1$yv[s[i]])
comm$ind[id] <- paste("Sample", i, sep="")
}
comm <- table(comm$ind, comm$Spp) ##entire community
comm <- comm[rownames(comm) != "0", ]
if (samples < 1 || samples > nrow(comm))
samples = nrow(comm)
##number of samples to take
samp <- comm[sample(nrow(comm), samples, replace = FALSE),] ## sampled community
return(samp)
}
#' Simulation of phylogenetic or functional tree.
#' @description Simulates a random tree.
#' @param s number of species.
#' @param m a structural parameter defining the average difference between species. Default is 100. Lower numbers create trees dominated by increasingly similar species, higher numbers by increasingly dissimilar species.
#' @details A very simple tree based on random genes/traits.
#' @return An hclust object.
#' @examples tree <- sim.tree(10)
#' plot(as.dendrogram(tree))
#' tree <- sim.tree(100,10)
#' plot(as.dendrogram(tree))
#' tree <- sim.tree(100,1000)
#' plot(as.dendrogram(tree))
#' @export
sim.tree <- function(s, m = 100){
sim.matrix <- matrix(sample(0:m, ceiling(s*m/50), replace = TRUE), nrow = s, ncol = m)
tree <- hclust(dist(sim.matrix), method = "average")
tree$height <- tree$height / max(tree$height)
return(tree)
}
#' Sample data of spiders in Arrabida (Portugal)
#'
#' A dataset containing the abundance of 338 spider species in each of 320 sampling units. Details are described in:
#' Cardoso, P., Gaspar, C., Pereira, L.C., Silva, I., Henriques, S.S., Silva, R.R. & Sousa, P. (2008) Assessing spider species richness and composition in Mediterranean cork oak forests. Acta Oecologica, 33: 114-127.
#'
#' @docType data
#' @keywords datasets
#' @name arrabida
#' @usage data(arrabida)
#' @format A data frame with 320 sampling units (rows) and 338 species (variables).
NULL
#' Sample data of spiders in Geres (Portugal)
#'
#' A dataset containing the abundance of 338 spider species in each of 320 sampling units. Details are described in:
#' Cardoso, P., Scharff, N., Gaspar, C., Henriques, S.S., Carvalho, R., Castro, P.H., Schmidt, J.B., Silva, I., Szuts, T., Castro, A. & Crespo, L.C. (2008) Rapid biodiversity assessment of spiders (Araneae) using semi-quantitative sampling: a case study in a Mediterranean forest. Insect Conservation and Diversity, 1: 71-84.
#'
#' @docType data
#' @keywords datasets
#' @name geres
#' @usage data(geres)
#' @format A data frame with 320 sampling untis (rows) and 338 species (variables).
NULL
#' Sample data of spiders in Guadiana (Portugal)
#'
#' A dataset containing the abundance of 338 spider species in each of 320 sampling units. Details are described in:
#' Cardoso, P., Henriques, S.S., Gaspar, C., Crespo, L.C., Carvalho, R., Schmidt, J.B., Sousa, P. & Szuts, T. (2009) Species richness and composition assessment of spiders in a Mediterranean scrubland. Journal of Insect Conservation, 13: 45-55.
#'
#' @docType data
#' @keywords datasets
#' @name guadiana
#' @usage data(guadiana)
#' @format A data frame with 192 sampling units (rows) and 338 species (variables).
NULL
#' Functional tree for 338 species of spiders
#'
#' A dataset representing the functional tree for 338 species of spiders captured in Portugal.
#' For each species were recorded: average size, type of web, type of hunting, stenophagy, vertical stratification in vegetation and circadial activity. Details are described in:
#' Cardoso, P., Pekar, S., Jocque, R. & Coddington, J.A. (2011) Global patterns of guild composition and functional diversity of spiders. PLoS One, 6: e21710.
#'
#' @docType data
#' @keywords datasets
#' @name functree
#' @usage data(functree)
#' @format An hclust object with 338 species.
NULL
#' Taxonomic tree for 338 species of spiders (surrogate for phylogeny)
#'
#' A dataset representing an approximation to the phylogenetic tree for 338 species of spiders captured in Portugal.
#' The tree is based on the linnean hierarchy, with different suborders separated by 1 unit, families by 0.75, genera by 0.5 and species by 0.25.
#'
#' @docType data
#' @keywords datasets
#' @name phylotree
#' @usage data(phylotree)
#' @format An hclust object with 338 species.
NULL | /scratch/gouwar.j/cran-all/cranData/BAT/R/BAT.R |
BAYSTAR<-function(x,lagp1,lagp2,Iteration,Burnin,constant,d0,step.thv,thresVar,mu01,v01,mu02,v02,v0,lambda0,refresh,tplot) {
##Time.initial<-Sys.time()
## Initialize
if (missing(constant)){
constant<- 1}
else{
if (!is.vector(constant) || length(constant) != 1)
stop ("'constant' must be a scalar")
if (constant!=0 && constant!=1)
stop ("'constant' must be 1 or 0")
}
if (missing(d0)){
d0<- 3}
else{
if (!is.vector(d0) || length(d0) != 1)
stop ("'d0' must be a scalar")
if (d0 < 0)
stop ("'d0' must be positive")
}
if (missing(step.thv)){
stop ("'step.thv' is missing")
}
if (missing(refresh)){
if(Iteration < 1000){
refresh <- Iteration /2
}
else{
refresh <- 1000
}}
else{
if (!is.vector(refresh) || length(refresh) != 1)
stop ("'refresh' must be a scalar")
if (refresh < 0)
stop ("'refresh' must be positive")
if (refresh > Iteration)
stop ("'refresh' must be less than 'Iteration'")
}
if (missing(tplot)){
tplot ="FALSE"
}
p1<- length(lagp1); p2<- length(lagp2) ## No. of covariate in two regimes
nx<- length(x)
#if (differ ==1){
#yt<-x[2:nx]-x[2:nx-1] }
#else
yt<- x
nob<- length(yt)
if (!missing(thresVar)){
if (length(thresVar) >= nob ){
zt <- thresVar[1:nob]}
else {
stop ("Data for the threshold variable are not enough")}
}
else zt<- yt
## Set initial values
phi.1 <- rep(0.05, p1 + constant)
phi.2 <- rep(0.05, p2 + constant)
sigma.1<- 0.2
sigma.2<- 0.2
lagd<- 1
thres<- median(zt)
accept.r<- 0
sum.r<- 0
## MSE of fitting an AR(p1) model
ar.mse<- ar(yt,aic=FALSE, order.max=p1)
## Sets for the hyper-parameters
if (missing(mu01)){
mu01<- matrix(0,nrow=p1+constant,ncol=1)}
else{
if(!is.matrix(mu01)){
if (!is.vector(mu01) || length(mu01) != 1){
stop("'mu01' must be a scalar or a matrix")}
else{
mu01<- matrix(mu01,nrow=p1+constant,ncol=1)}
}
else{
if (dim(mu01)[1]!=p1+constant || dim(mu01)[2]!=1){
stop("error: The dimensions of 'mu02' are worng!") }
}
}
if (missing(v01)){
v01<- diag(0.1,p1+constant)}
else{
if(!is.matrix(v01)){
if (!is.vector(v01) || length(v01) != 1){
stop("'v01' must be a scalar or a matrix")}
else{
v01<- diag(v01,p1+constant)}
}
else{
if (dim(v01)[1]!=p1+constant || dim(v01)[2]!=p1+constant){
stop("error: The dimensions of 'v01' are worng!") }
}
}
if (missing(mu02)){
mu02<- matrix(0,nrow=p2+constant,ncol=1)}
else{
if(!is.matrix(mu02)){
if (!is.vector(mu02) || length(mu02) != 1){
stop("'mu02' must be a scalar or a matrix")}
else{
mu02<- matrix(mu02,nrow=p2+constant,ncol=1)}
}
else{
if (dim(mu02)[1]!=p2+constant || dim(mu02)[2]!=1){
stop("error: The dimensions of 'mu02' are worng!") }
}
}
if (missing(v02)){
v02<- diag(0.1,p2+constant)}
else{
if(!is.matrix(v02)){
if (!is.vector(v02) || length(v02) != 1){
stop("'v02' must be a scalar or a matrix")}
else{
v02<- diag(v02,p2+constant)}
}
else{
if (dim(v02)[1]!=p2+constant || dim(v02)[2]!=p2+constant){
stop("error: The dimensions of 'v02' are worng!") }
}
}
if (missing(v0)){
v0<- 3}
else{
if (!is.vector(v0) || length(v0) != 1)
stop ("'v0' must be a scalar")
if (v0 < 0)
stop ("'v0' must be positive")
}
if (missing(lambda0)){
lambda0<- ar.mse$var.pred/3}
else{
if (!is.vector(lambda0) || length(lambda0) != 1)
stop ("'lambda0' must be a scalar")
if (lambda0 < 0)
stop ("'lambda0' must be positive")
}
bound.thv<- c(quantile(zt,0.25),quantile(zt,0.75))
## Initialize a matrix for saving all iterative estimates
if(constant==1){
par.set<- matrix(NA,nrow=Iteration,ncol=(length(c(phi.1,phi.2,sigma.1,sigma.2,lagd,thres))+2))}
else{
par.set<- matrix(NA,nrow=Iteration,ncol=length(c(phi.1,phi.2,sigma.1,sigma.2,lagd,thres)))}
loglik.1<-loglik.2<-DIC<-NA ## to calculate DIC
## Start of MCMC sampling
for (igb in 1:Iteration){
if (!missing(thresVar)){
phi.1<- TAR.coeff(1,yt,p1,p2,sigma.1,lagd,thres,mu01,v01,lagp1,lagp2,constant=constant,zt) ## Draw phi.1 from a multivariate normal distribution
phi.2<- TAR.coeff(2,yt,p1,p2,sigma.2,lagd,thres,mu02,v02,lagp1,lagp2,constant=constant,zt) ## Draw phi.2 from a multivariate normal distribution
sigma.1<- TAR.sigma(1,yt,thres,lagd,p1,p2,phi.1,v0,lambda0,lagp1,lagp2,constant=constant,zt) ## Draw sigma.1 from an Inverse-Gamma distribution ## v and lambda are the hyper-parameters of the Gamma prior
sigma.2<- TAR.sigma(2,yt,thres,lagd,p1,p2,phi.2,v0,lambda0,lagp1,lagp2,constant=constant,zt) ## Draw sigma.2 from a Inverse-Gamma distribution
lagd<- TAR.lagd(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,thres,lagp1,lagp2,constant=constant,d0,zt) ## Draw lagd from a multinomial distribution
thresholdt<- TAR.thres(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,lagd,thres,step.r=step.thv,bound.thv,lagp1,lagp2,constant=constant,zt) ## Draw thresholdt by the MH algorithm
}
else{
phi.1<- TAR.coeff(1,yt,p1,p2,sigma.1,lagd,thres,mu01,v01,lagp1,lagp2,constant=constant) ## Draw phi.1 from a multivariate normal distribution
phi.2<- TAR.coeff(2,yt,p1,p2,sigma.2,lagd,thres,mu02,v02,lagp1,lagp2,constant=constant) ## Draw phi.2 from a multivariate normal distribution
sigma.1<- TAR.sigma(1,yt,thres,lagd,p1,p2,phi.1,v0,lambda0,lagp1,lagp2,constant=constant) ## Draw sigma.1 from an Inverse-Gamma distribution ## v and lambda are the hyper-parameters of the Gamma prior
sigma.2<- TAR.sigma(2,yt,thres,lagd,p1,p2,phi.2,v0,lambda0,lagp1,lagp2,constant=constant) ## Draw sigma.2 from a Inverse-Gamma distribution
lagd<- TAR.lagd(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,thres,lagp1,lagp2,constant=constant,d0) ## Draw lagd from a multinomial distribution
thresholdt<- TAR.thres(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,lagd,thres,step.r=step.thv,bound.thv,lagp1,lagp2,constant=constant) ## Draw thresholdt by the MH algorithm
}
sum.r<- sum.r+thresholdt[1] ## Count the number of acceptance
thres<- thresholdt[2] ## Save i-th iterated threshold value
## Compute the unconditional means for each regime
if(constant==1){
c.mean<- c(phi.1[1]/(1-sum(phi.1)+phi.1[1]),phi.2[1]/(1-sum(phi.2)+phi.2[1]))
par.set[igb,]<-c(phi.1,phi.2,sigma.1,sigma.2,thres,c.mean,lagd)
}
else {par.set[igb,]<-c(phi.1,phi.2,sigma.1,sigma.2,thres,lagd)
}
if (!missing(thresVar)){
loglik.1[igb]<-TAR.lik(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,lagd,thres,lagp1,lagp2,constant=constant,thresVar)}
else{
loglik.1[igb]<-TAR.lik(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,lagd,thres,lagp1,lagp2,constant=constant)
}
## Save all iterated estimates of parameters
ncol0<-ncol(par.set)
## Print out for monitoring the estimations of every refresh (1000) iterate
if(igb%%refresh==0){
cat("iteration = ",igb,"\n")
cat("regime 1 = ",round(phi.1,4),"\n")
cat("regime 2 = ",round(phi.2,4),"\n")
cat("sigma^2 1 = ",round(sigma.1,4),"\n")
cat("sigma^2 2 = ",round(sigma.2,4),"\n")
cat("r = ",round(thres,4),"\n")
accept.r<- (sum.r/igb)*100
cat("acceptance rate of r = ", round(accept.r,4),"%", "\n")
## Make a frequency table of delay lag
lag.freq<- rep(0,d0)
for(i in 1:d0){
lag.freq[i]<- sum(par.set[1:igb,ncol0]==i)
}
#lag.freq[1:length(table(par.set[,ncol0]))]<- table(par.set[,ncol0]) ## Frequency table for delay lag
lag.freq<- t(matrix(lag.freq,dimnames=list(c(as.character(1:d0)),c("Freq"))))
cat("Lag choice : ", "\n")
print(lag.freq)
cat("------------","\n")
}
} ## End of MCMC sampling
## Summarize the collected MCMC estimates
mcmc.stat<- TAR.summary(par.set[(Burnin+1):Iteration,1:(ncol0-1)],lagp1,lagp2,constant=constant)
print(round(mcmc.stat,4))
## Calculate the highest posterior probability of delay lag
lag.y<- c(1:d0)
lag.d<- lag.y[lag.freq==max(lag.freq)]
cat("Lag choice : ", "\n")
print(lag.freq)
cat("------------","\n")
cat("The highest posterior prob. of lag is at : ",lag.d,"\n")
## calculate D(E[theta])
if (!missing(thresVar)){
loglik.2<-TAR.lik(yt,p1,p2,mcmc.stat[1:(p1+constant),1],mcmc.stat[(p1+constant+1):(p1+constant+p2+constant),1],mcmc.stat[(p1+constant+p2+constant+1),1],mcmc.stat[(p1+constant+p2+constant+2),1],lag.d,mcmc.stat[(p1+constant+p2+constant+3),1],lagp1,lagp2,constant=constant,thresVar)}
else{
loglik.2<-TAR.lik(yt,p1,p2,mcmc.stat[1:(p1+constant),1],mcmc.stat[(p1+constant+1):(p1+constant+p2+constant),1],mcmc.stat[(p1+constant+p2+constant+1),1],mcmc.stat[(p1+constant+p2+constant+2),1],lag.d,mcmc.stat[(p1+constant+p2+constant+3),1],lagp1,lagp2,constant=constant)
}
DIC<-(2*(-2*sum(loglik.1[(Burnin+1):Iteration]))/length(loglik.1[(Burnin+1):Iteration]))-(-2*loglik.2)
cat(" DIC = ",DIC,"\n")
##################################################
## Trace plots and ACF for all parameter estimates
if(tplot =="TRUE"){
dev.new()
ts.plot(yt)
title("Trend plot of data.")
nnp<- 2*constant+p1+p2+3
kk<- ceiling(nnp/3)
pword<- NULL
if(constant==1){
pword[1:(nnp-3)]<- c(paste("phi1",c(0,lagp1),sep="."),paste("phi2",c(0,lagp2),sep="."))
}
else{
pword[1:(nnp-3)]<- c(paste("phi1",lagp1,sep="."),paste("phi2",lagp2,sep="."))
}
pword[(nnp-2):nnp]<- expression(sigma[1]^2,sigma[2]^2,r)
#pword[(p1+p2+1):(p1+p2+3)]<- expression()
#expression(phi[c(0,lagp1)]^(1),phi[c(0,lagp2)]^(2),sigma[1]^2,sigma[2]^2,r)
dev.new()
par(mfrow=c(kk,3),cex=.6,cex.axis=0.8,lwd=0.1,las=1,ps=12,pch=0.5)
## Trace plots of all MCMC iterations for all estimates
for (i in 1:nnp){
all.t<-length(par.set[,i])
plot.ts(par.set[,i],main=pword[i],xlab="",ylab="",col="blue")
#lines(1:all.t,rep(real.par[i],all.t),col="red")
lines(1:all.t,rep(mcmc.stat[i,"mean"],all.t),col="yellow")
lines(1:all.t,rep(mcmc.stat[i,"lower"],all.t),col="green")
lines(1:all.t,rep(mcmc.stat[i,"upper"],all.t),col="green")
}
dev.new()
par(mfrow=c(kk,3),cex=.6,cex.axis=0.8,lwd=0.1,las=1,ps=12,pch=0.5)
## ACF of collected iterations for all estimates
for (i in 1:nnp){
acf(par.set[(Burnin+1):Iteration,i],main=pword[i],xlab="",ylab="",lag.max=100)}
}
## Calculate the residual for TAR model
maxd<-max(lagp1,lagp2)
if (constant == 1){
con.1<-mcmc.stat[1,1]
par.1<-mcmc.stat[2:(p1+1),1]
con.2<-mcmc.stat[p1+2,1]
par.2<-mcmc.stat[(p1+2+1):(p1+p2+2),1]
thv <-mcmc.stat[p1+p2+2+3,1]
}else{par.1<-mcmc.stat[1:p1,1]
par.2<-mcmc.stat[(p1+1):(p1+p2),1]
thv <-mcmc.stat[p1+p2+2+1,1]
}
residual<-rep(NA,nob-maxd)
for (t in (maxd+1):nob){
if (constant == 1){
if ( yt[t-lag.d] <= thv){
residual[t-maxd]<- yt[t] - sum(con.1,(par.1 * yt[t-lagp1]))
}
else{
residual[t-maxd]<- yt[t] - sum(con.2,(par.2 * yt[t-lagp2]))
}
}
else{
if ( yt[t-lag.d] <= thv){
residual[t-maxd]<- yt[t] - sum(par.1 * yt[t-lagp1])
}
else{
residual[t-maxd]<- yt[t] - sum(par.2 * yt[t-lagp2])
}
}
}
tar<-list(mcmc=par.set,posterior=par.set[(Burnin+1):Iteration,1:(ncol0-1)],coef=round(mcmc.stat,4),residual=residual,lagd=lag.d,DIC=DIC)
return(tar)
##Sys.time()-Time.initial
}
| /scratch/gouwar.j/cran-all/cranData/BAYSTAR/R/BAYSTAR.R |
TAR.coeff<-function(reg,ay,p1,p2,sig,lagd,thres,mu0,v0,lagp1,lagp2,constant=1,thresVar){
p<-max(max(lagp1),max(lagp2))+constant
n<- length(ay)
if (!missing(thresVar)){
if (length(thresVar) > n ){
zt <- thresVar[1:n]
cat("Using only first", n, "elements of threshold Variable\n")
}
else zt<-thresVar
lag.y<- zt[(p+1-lagd):(n-lagd)]
}
else lag.y<- ay[(p+1-lagd):(n-lagd)]
yt<- ay[(p+1):n]
if (reg==1){
ph<-rep(0.01,p1)
y.1<-matrix(yt[lag.y<=thres],ncol=1) ## Arrange vector y of regime 1
x.1<-matrix(NA,nrow=p1,ncol=n-p) ## Arrange matrix X of regime 1
for (i in 1:p1){
x.1[i,]<-ay[(p-lagp1[i]+1):(n-lagp1[i])]}
if(p1>1){
if (constant==1){
tx<-cbind(1,t(x.1[,lag.y<=thres]))
}
else {
tx<-t(x.1[,lag.y<=thres])
}
}
if(p1 == 1){
if (constant==1){
tx<-cbind(1,t(t(x.1[,lag.y<=thres])))
}
else {
tx<-t(t(x.1[,lag.y<=thres]))
}
}
yt<- matrix(yt[lag.y<=thres],ncol=1)
sigma<- (t(tx)%*%tx)/sig+v0 ## Variance of conditional posterior distribution
## Mean of conditional posterior distribution
mu<- solve(sigma,((t(tx)%*%tx)/sig)%*%(solve((t(tx)%*%tx),t(tx)%*%yt))+v0%*%mu0)
ph<- rmvnorm(n = 1, mu, solve(sigma),method="chol") ## Draw ph from the multivariate normal distribution
}
else {
ph<-rep(0.01,p2)
y.2<-matrix(yt[lag.y>thres],ncol=1) ## Arrange vector y of regime 2
x.2<-matrix(NA,nrow=p2,ncol=n-p) ## Arrange matrix X of regime 2
for ( i in 1:p2){
x.2[i,]<-ay[(p-lagp2[i]+1):(n-lagp2[i])]}
if(p2 > 1) {
if (constant==1){
tx<-cbind(1,t(x.2[,lag.y>thres]))
}
else {
tx<-t(x.2[,lag.y>thres])
}
}
if(p2 == 1){
if (constant==1){
tx<-cbind(1,t(t(x.2[,lag.y>thres])))
}
else {
tx<-t(t(x.2[,lag.y>thres]))
}
}
yt<- matrix(yt[lag.y>thres],ncol=1)
sigma<- (t(tx)%*%tx)/sig+v0 ## Variance of conditional posterior distribution
## Mean of conditional posterior distribution
mu<- solve(sigma,((t(tx)%*%tx)/sig)%*%(solve((t(tx)%*%tx),t(tx)%*%yt))+v0%*%mu0)
ph<- rmvnorm(n=1,mu,solve(sigma),method="chol") ## Draw ph from the multivariate normal distribution
}
return(ph)
}
| /scratch/gouwar.j/cran-all/cranData/BAYSTAR/R/TAR.coeff.R |
TAR.lagd<-function(ay,p1,p2,ph.1,ph.2,sig.1,sig.2,thres,lagp1,lagp2,constant=1,d0,thresVar){
loglik<-lik<-pr<-NULL
if (!missing(thresVar)){
for (i in 1:d0){ ## Calculate log-likelihood from d=1 to d=d0
loglik[i]<- TAR.lik(ay,p1,p2,ph.1,ph.2,sig.1,sig.2,i,thres,lagp1,lagp2,constant=constant,thresVar)}}
else {
for (i in 1:d0){ ## Calculate log-likelihood from d=1 to d=d0
loglik[i]<- TAR.lik(ay,p1,p2,ph.1,ph.2,sig.1,sig.2,i,thres,lagp1,lagp2,constant=constant)}}
lik<- (exp(loglik-max(loglik)))*(rev(c(1:d0))/sum(1:d0)) #give weight for lagd
## Comparing the cdf of delay lag with a random probability
## to determine a new delay lag.
lagd<- (sum((cumsum(lik)/sum(lik))<runif(1, min=0, max=1)))+1
return(lagd)
}
| /scratch/gouwar.j/cran-all/cranData/BAYSTAR/R/TAR.lagd.R |
TAR.lik<-function(ay,p1,p2,ph.1,ph.2,sig.1,sig.2,lagd,thres,lagp1,lagp2,constant=1,thresVar){
n<-length(ay)
p<-max(max(lagp1),max(lagp2))+constant
p.1<-matrix(ph.1,nrow=p1+constant,ncol=1) ## Build a matrix p.1 for ph.1
p.2<-matrix(ph.2,nrow=p2+constant,ncol=1) ## Build a matrix p.2 for ph.2
if (!missing(thresVar)){
if (length(thresVar) > n ){
zt <- thresVar[1:n]
cat("Using only first", n, "elements of threshold Variable\n")
}
else {zt<-thresVar}
lag.y<- zt[(p+1-lagd):(n-lagd)]
}
else lag.y<-ay[(p+1-lagd):(n-lagd)]
yt<-ay[(p+1):n]
n1<-sum(lag.y<=thres); n2<-sum(lag.y>thres) ## Count no. of observations for each regime
y.1<-matrix(yt[lag.y<=thres],ncol=1) ## Arrange vector y and matrix X for each regime
y.2<-matrix(yt[lag.y>thres],ncol=1)
x.1<-matrix(NA,nrow=p1,ncol=n-p) ## Arrange matrix X of regime 1
for (i in 1:p1){
x.1[i,]<-ay[(p-lagp1[i]+1):(n-lagp1[i])]}
if(p1 > 1) {
if (constant==1){
tx.1<-cbind(1,t(x.1[,lag.y<=thres]))}
else {tx.1<-t(x.1[,lag.y<=thres])}
}
if(p1 == 1) {
if (constant==1){
tx.1<-cbind(1,t(t(x.1[,lag.y<=thres])))}
else {tx.1<-t(t(x.1[,lag.y<=thres]))}
}
x.2<-matrix(NA,nrow=p2,ncol=n-p) ## Arrange matrix X of regime 2
for ( i in 1:p2){
x.2[i,]<-ay[(p-lagp2[i]+1):(n-lagp2[i])]}
if(p2 > 1){
if (constant==1){
tx.2<-cbind(1,t(x.2[,lag.y>thres]))}
else {tx.2<-t(x.2[,lag.y>thres])}
}
if(p2 == 1){
if (constant==1){
tx.2<-cbind(1,t(t(x.2[,lag.y>thres])))}
else {tx.2<-t(t(x.2[,lag.y>thres]))}
}
ln.li<--((t(y.1-tx.1%*%p.1)%*%(y.1-tx.1%*%p.1))/(2*sig.1))- ## The model log-likelihood function is
((t(y.2-tx.2%*%p.2)%*%(y.2-tx.2%*%p.2))/(2*sig.2))- ## the sum of two normal distributions
((n1/2)*log(sig.1))-((n2/2)*log(sig.2))
return(ln.li)
}
| /scratch/gouwar.j/cran-all/cranData/BAYSTAR/R/TAR.lik.R |
TAR.sigma <-function(reg,ay,thres,lagd,p1,p2,ph,v,lambda,lagp1,lagp2,constant=1,thresVar){
n<-length(ay) ## Total no. of observations
p<-max(max(lagp1),max(lagp2))+constant
yt<- ay[(p+1):n]
if (!missing(thresVar)){
if (length(thresVar) > n ){
zt <- thresVar[1:n]
cat("Using only first", n, "elements of threshold Variable\n")
}
else zt<-thresVar
lag.y<- zt[(p+1-lagd):(n-lagd)]
}
else lag.y<- ay[(p+1-lagd):(n-lagd)]
if (reg==1){
m<- sum(lag.y<=thres)
y<- matrix(yt[lag.y<=thres],ncol=1)
x.1<-matrix(NA,nrow=p1,ncol=n-p) ## Arrange matrix X of regime 1
for (i in 1:p1){
x.1[i,]<-ay[(p-lagp1[i]+1):(n-lagp1[i])]}
if(p1 > 1){
if (constant==1){
tx<-cbind(1,t(x.1[,lag.y<=thres]))}
else {tx<-t(x.1[,lag.y<=thres])}
}
if(p1 == 1){
if (constant==1){
tx<-cbind(1,t(t(x.1[,lag.y<=thres])))}
else {tx<-t(t(x.1[,lag.y<=thres]))}
}
phi<- matrix(ph,nrow=p1+constant,ncol=1)
s2<- (t(y-tx%*%phi)%*%(y-tx%*%phi))/m ## Compute sample variance
}
else{
m<- sum(lag.y>thres)
phi<- matrix(ph,nrow=p2+constant,ncol=1)
y<- matrix(yt[lag.y>thres],ncol=1)
x.2<-matrix(NA,nrow=p2,ncol=n-p) ## Arrange matrix X of regime 2
for ( i in 1:p2){
x.2[i,]<-ay[(p-lagp2[i]+1):(n-lagp2[i])]}
if(p2 > 1){
if (constant==1){
tx<-cbind(1,t(x.2[,lag.y>thres]))}
else {tx<-t(x.2[,lag.y>thres])}
}
if(p2 == 1){
if (constant==1){
tx<-cbind(1,t(t(x.2[,lag.y>thres])))}
else {tx<-t(t(x.2[,lag.y>thres]))}
}
s2<- (t(y-tx%*%phi)%*%(y-tx%*%phi))/m ## Compute sample variance
}
shape<- (v+m)/2 ## Set shape parameter of Gamma distribution
rate<- (v*lambda+m*s2)/2 ## Set rate parameter of Gamma distribution
sigma<- 1/rgamma(1, shape=shape, rate=rate) ## Draw sigma^2 from an Inverse-Gamma distribution.
return(sigma)
}
| /scratch/gouwar.j/cran-all/cranData/BAYSTAR/R/TAR.sigma.R |
TAR.simu<-function(nob,p1,p2,ph.1,ph.2,sig.1,sig.2,lagd,thres,lagp1,lagp2){
## Simulate nmax observations and discard first
## 1000 observations.
nmax<-nob+1000 ## No. of simulated observations
p<-max(p1,p2)+1 ## Set initial period
y<-rep(0,nmax) ## Generate nmax zeros to y
at<-rnorm(nmax,mean=0,sd=1) ## Simulate nmax normal random variables
for (i in p:nmax){ ## Generate data y recursively from time p to nmax
if (y[i-lagd] <= thres) ## Determine the location of the lagged y
## Simulate data from Regime 1
{y[i]=sum(ph.1[1],y[i-lagp1]*ph.1[2:(p1+1)],at[i]*sqrt(sig.1))}
else
## Simulate data from Regime 2
{y[i]=sum(ph.2[1],y[i-lagp2]*ph.2[2:(p2+1)],at[i]*sqrt(sig.2))}
}
yt<-y[1001:nmax] ## Discard first 1000 observations and
## save the observations from 1001 to nmax.
return(yt)
}
| /scratch/gouwar.j/cran-all/cranData/BAYSTAR/R/TAR.simu.R |
TAR.summary <-function(x,lagp1,lagp2,constant=1)
{
n<-ncol(x)
temp<-matrix(NA,n,5)
for(i in 1:n){
temp[i,1]<-mean(x[,i])
temp[i,2]<-quantile(x[,i],0.5) ## Median of estimates
temp[i,3]<-sd(x[,i])
temp[i,4:5]<-quantile(x[,i],c(0.025,0.975)) ## 95% Bayes interval of estimates
colnames(temp)<-c("mean","median","s.d.","lower","upper")
if(constant==1)
{
rownames(temp)<-c(paste("phi1",c(0,lagp1),sep="."),paste("phi2",c(0,lagp2),sep="."),"sigma^2 1","simga^2 2","r","mean1","mean2")
}
else
{
rownames(temp)<-c(paste("phi1",lagp1,sep="."),paste("phi2",lagp2,sep="."),"sigma^2 1","simga^2 2","r")
}
}
return(temp)
}
| /scratch/gouwar.j/cran-all/cranData/BAYSTAR/R/TAR.summary.R |
TAR.thres<-function(ay,p1,p2,ph.1,ph.2,sig.1,sig.2,lagd,thres,step.r=0.02,bound,lagp1,lagp2,constant=1,thresVar){ ## step.r is the step size of the MH sampling.
## bound is the hyper-parameter of a Gamma prior
new.r<- thres+step.r*rnorm(1,mean=0,sd=1) ## Sampling a candidate threshold value
repeat{ ## Check whether the new threshold value is located on U(a,b).
if((new.r< bound[1])|(new.r> bound[2])){ ## If not, repeat to sample a new value.
new.r<- thres+step.r*rnorm(1,mean=0,sd=1)}
else break
}
## The random walk M-H algorithm
if (!missing(thresVar)){
old.lik<- TAR.lik(ay,p1,p2,ph.1,ph.2,sig.1,sig.2,lagd,thres,lagp1,lagp2,constant=constant,thresVar)
new.lik<- TAR.lik(ay,p1,p2,ph.1,ph.2,sig.1,sig.2,lagd,new.r,lagp1,lagp2,constant=constant,thresVar)
}
else{
old.lik<- TAR.lik(ay,p1,p2,ph.1,ph.2,sig.1,sig.2,lagd,thres,lagp1,lagp2,constant=constant)
new.lik<- TAR.lik(ay,p1,p2,ph.1,ph.2,sig.1,sig.2,lagd,new.r,lagp1,lagp2,constant=constant)
}
if((new.lik-old.lik)>log(runif(1))){r.count=1} ## To determine whether update a new value or not.
else{ new.r<- thres; r.count<- 0 }
return(c(r.count,new.r))
}
| /scratch/gouwar.j/cran-all/cranData/BAYSTAR/R/TAR.thres.R |
.onAttach <- function(lib,pkg)
{
# figure out year automatically (probably could be done more elegantly)
date <- date()
x <- regexpr("[0-9]{4}", date)
this.year <- substr(date, x[1], x[1] + attr(x, "match.length") - 1)
# echo output to screen
packageStartupMessage("##\n## On Bayesian analysis of Threshold autoregressive model (BAYSTAR)\n")
packageStartupMessage("## Copyright (C) 2007-", this.year,
" Cathy W. S. Chen, Edward M.H. Lin, F.C. Liu, and Richard Gerlach\n", sep="")
ver <- read.dcf(file.path(lib, pkg, "DESCRIPTION"), "Version")
ver <- as.character(ver)
packageStartupMessage("Version: ", ver, "loaded\n")
# require(mvtnorm, quietly=TRUE)
# require(coda, quietly=TRUE)
} | /scratch/gouwar.j/cran-all/cranData/BAYSTAR/R/zzz.R |
#' Capitalize two-word strings
#'
#' @aliases capit_two_words
#'
#' @description
#' Ancillary function to capitalize the first letter of both words in a
#' two-word string. This can be used for example to capitalize the teams
#' names for the plots title.
#'
#' @usage
#' capit_two_words(two_word_string)
#'
#' @param two_word_string Two-word string.
#'
#' @return
#' Vector with the two words capitalized.
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' capit_two_words("valencia basket")
#'
#' @export
capit_two_words <- function(two_word_string){
two_words <- strsplit(two_word_string, " ")[[1]]
two_words_cap <-paste(toupper(substring(two_words, 1,1)),
substring(two_words, 2),
sep = "", collapse = " ")
return(two_words_cap)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/capit_two_words.R |
#' Efficient Points Scored (EPS)
#'
#' @aliases do_EPS
#'
#' @description
#' A limitation of \code{\link{do_OE}} is that it doesn't rely on the quantity
#' of the player's offense production, that's to say, whether the player
#' provides a lot of offense or not. In addition, it does not give credit
#' for free-throws. An extension of \code{\link{do_OE}} has been defined:
#' the Efficient Points Scored (EPS), which is the result of the product of
#' OE and points scored. Points scored counts free-throws, two-point and
#' three-point field goals. A factor \emph{F} is also added to put the adjusted
#' total points on a points scored scale. With the factor \emph{F}, the sum of the
#' EPS scores for all players in a given season is equal to the sum of the
#' league total points scored in that season.
#'
#' @usage
#' do_EPS(df)
#'
#' @param df Data frame with the games and the players info.
#'
#' @return
#' EPS values.
#'
#' @references
#' Shea, S., Baker, C., (2013). Basketball Analytics:
#' Objective and Efficient Strategies for Understanding
#' How Teams Win. Lake St. Louis, MO: Advanced Metrics, LLC.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{do_OE}}, \code{\link{do_add_adv_stats}}
#'
#' @examples
#' df <- do_join_games_bio("ACB", acb_games_1718, acb_players_1718)
#' df1 <- do_add_adv_stats(df)
#' do_EPS(df1)[1]
#'
#' @export
do_EPS <- function(df){
oe_num <- do_OE(df)
den <- oe_num * df$PTS
F <- sum(df$PTS) / sum(den)
eps_num <- round(F * den, 1)
return(eps_num)
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_EPS.R |
#' Offensive Efficiency (OE)
#'
#' @aliases do_OE
#'
#' @description
#' Offensive Efficiency (OE) is a measure to evaluate the quality of
#' offense produced. OE counts the total number of successful offensive
#' possessions the player was involved in, regarding the player's total
#' number of potential ends of possession.
#'
#' This measure is used in the definition of \code{\link{do_EPS}}.
#'
#' @usage
#' do_OE(df)
#'
#' @param df Data frame with the games and the players info.
#'
#' @note
#' When either both the numerator and denominator of the OE expression
#' are 0 or just the denominator is 0, the function returns a 0.
#'
#' @return
#' OE values.
#'
#' @references
#' Shea, S., Baker, C., (2013). Basketball Analytics:
#' Objective and Efficient Strategies for Understanding
#' How Teams Win. Lake St. Louis, MO: Advanced Metrics, LLC.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{do_EPS}}, \code{\link{do_add_adv_stats}}
#'
#' @examples
#' df <- do_join_games_bio("ACB", acb_games_1718, acb_players_1718)
#' df1 <- do_add_adv_stats(df)
#' # Players with OE = 0:
#' # df1[55, c("Player.x", "FG", "AST", "FGA", "ORB", "TOV")]
#' # Player.x FG AST FGA ORB TOV
#' # Triguero, J. 0 0 0 0 0
#' # OE can be greater than 1, for example:
#' # df1[17, c("Player.x", "FG", "AST", "FGA", "ORB", "TOV")]
#' # Player.x FG AST FGA ORB TOV
#' # Diagne, Moussa 3 0 3 1 0
#' do_OE(df1[1,])
#'
#' @export
do_OE <- function(df){
numer <- df$FG + df$AST
denom <- df$FGA - df$ORB + df$AST + df$TOV
oe_num <- c()
for (i in 1:nrow(df)) {
oe_num[i] <- ifelse(any(c(numer[i], denom[i]) == 0), 0, round(numer[i] / denom[i], 1))
}
return(oe_num)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_OE.R |
#' Advanced statistics
#'
#' @aliases do_add_adv_stats
#'
#' @description
#' This function adds to the whole data frame the advanced statistics
#' for every player in every game.
#'
#' @usage
#' do_add_adv_stats(df)
#'
#' @param df Data frame with the games and the players info.
#'
#' @return
#' Data frame.
#'
#' @details
#' The advanced statistics computed are as follows:
#' \itemize{
#' \item GameSc: Game Score.
#' \item PIE: Player Impact Estimate.
#' \item EFGPerc: Effective Field Goal Percentage.
#' \item ThreeRate: Three points attempted regarding the total field goals attempted.
#' \item FRate: Free Throws made regarding the total field goals attempted.
#' \item STL_TOV: Steal to Turnover Ratio.
#' \item AST_TOV: Assist to Turnover Ratio.
#' \item PPS: Points Per Shot.
#' \item OE: Offensive Efficiency.
#' \item EPS: Efficient Points Scored.
#' }
#'
#' The detailed definition of some of these stats can be found at
#' \url{https://www.basketball-reference.com/about/glossary.html} and
#' \url{https://www.nba.com/stats/help/glossary/}.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{do_OE}}, \code{\link{do_EPS}}
#'
#' @examples
#' df <- do_join_games_bio("ACB", acb_games_1718, acb_players_1718)
#' df1 <- do_add_adv_stats(df)
#'
#' @importFrom magrittr %>%
#' @importFrom dplyr group_by mutate select
#'
#' @export
do_add_adv_stats <- function(df) {
AST <- BLKfv <- DRB <- EFGPerc <- FG <- FGA <- FT <- FTA <- Game <- NULL
GameRes <- GameSc <- GmAST <- GmBLKfv <- GmDRB <- GmFG <- GmFGA <- NULL
GmFT <- GmFTA <- GmORB <- GmPF <- GmPTS <- GmSTL <- GmTOV <- ORB <- NULL
PF <- PTS <- Player.x <- STL <- TOV <- ThreeP <- ThreePA <- TwoP <- NULL
Day <- MP <- TwoPA <- denom_pie <- numer_pie <- NULL
# Total game stats:
df1 <- df %>%
filter(MP != 0) %>%
filter(!is.na(MP)) %>%
filter(GameRes != "NA-NA") %>%
group_by(Game) %>%
mutate(GmPTS = sum(as.numeric(strsplit(as.character(unique(GameRes)), "-")[[1]]))) %>%
mutate(GmFG = sum(TwoP) + sum(ThreeP)) %>%
mutate(GmFGA = sum(TwoPA) + sum(ThreePA)) %>%
mutate(GmFT = sum(FT)) %>%
mutate(GmFTA = sum(FTA)) %>%
mutate(GmDRB = sum(DRB)) %>%
mutate(GmORB = sum(ORB)) %>%
mutate(GmAST = sum(AST)) %>%
mutate(GmSTL = sum(STL)) %>%
mutate(GmBLKfv = sum(BLKfv)) %>%
mutate(GmPF = sum(PF)) %>%
mutate(GmTOV = sum(TOV))
# Game Score and PIE:
df2 <- df1 %>%
group_by(Player.x) %>%
mutate(FG = TwoP + ThreeP) %>%
mutate(FGA = TwoPA + ThreePA) %>%
mutate(FGPerc = ifelse(FGA == 0, 0, round((FG / FGA) * 100))) %>%
mutate(GameSc = PTS + 0.4 * FG - 0.7 * FGA - 0.4 * (FTA - FT) + 0.7 * ORB + 0.3 * DRB +
STL + 0.7 * AST + 0.7 * BLKfv - 0.4 * PF - TOV) %>%
mutate(GameSc = round(GameSc)) %>%
mutate(numer_pie = PTS + FG + FT - FGA - FTA + DRB + (0.5 * ORB) + AST + STL + (0.5 * BLKfv) - PF - TOV) %>%
mutate(denom_pie = GmPTS + GmFG + GmFT - GmFGA - GmFTA + GmDRB + (0.5 * GmORB) +
GmAST + GmSTL + (0.5 * GmBLKfv) - GmPF - GmTOV) %>%
mutate(PIE = round((numer_pie / denom_pie) * 100)) %>%
select(-numer_pie, -denom_pie) %>%
# More stats:
#mutate(TSP = PTS / (2 * (FGA + 0.44 * FTA))) %>% # True Shooting Percentage.
#mutate(TSP = round(TSP * 100)) %>% # For Causeur (first row), this percentage is greater than 100.
mutate(EFGPerc = ifelse(FGA == 0, 0, (FG + 0.5 * ThreeP) / FGA)) %>% # Effective Field Goal Percentage.
mutate(EFGPerc = round(EFGPerc * 100)) %>%
mutate(EFGPerc = ifelse(EFGPerc > 100, 100, EFGPerc)) %>% # FG = 1 ; Three = 1 --> (1 + 0.5 * 1) / 1 > 1
mutate(ThreeRate = ifelse(FGA == 0, 0, round(ThreePA / FGA, 1))) %>% # 3-Point Attempt Rate.
mutate(FRate = ifelse(FGA == 0, 0, round(FT / FGA, 1))) %>% # Free Throw Attempt Rate.
mutate(STL_TOV = ifelse(TOV == 0, STL, round(STL / TOV, 1))) %>% # Steal to Turnover Ratio.
mutate(AST_TOV = ifelse(TOV == 0, AST, round(AST / TOV, 1))) %>% # Assist to Turnover Ratio.
mutate(PPS = ifelse(FGA == 0, 0, round(PTS / FGA, 1))) # Points per Shot.
df2$OE <- do_OE(df2) # Offensive Efficiency.
df2$EPS <- do_EPS(df2) # Efficient Points Scored.
return(df2)
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_add_adv_stats.R |
#' Get games with clutch time
#'
#' @aliases do_clutch_time
#'
#' @description
#' Obtain the games that have clutch time. The clutch time is the game situation when the
#' scoring margin is within 5 points with five or fewer minutes remaining in a game.
#'
#' @usage
#' do_clutch_time(data)
#'
#' @param data Source play-by-play data.
#'
#' @return
#' Data frame of the game that has clutch time.
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' df0 <- do_clutch_time(acb_vbc_cz_pbp_2223)
#' #df0 # If no rows, that means that the game did not have clutch time.
#'
#' @export
do_clutch_time <- function(data) {
period <- time_point <- local_score <- visitor_score <- NULL
# Check if there were overtimes after the fourth period:
per_type <- unique(data$period)
per_sel <- per_type[grep("4C|PR", per_type)]
data_cl <- data.frame()
for (k in per_sel) {
# Clutch time:
df1 <- data %>%
filter(grepl(k, period), time_point < "05:00") %>%
mutate(local_score = as.numeric(local_score),
visitor_score = as.numeric(visitor_score))
diff_score <- abs(df1$local_score[1] - df1$visitor_score[1])
if (diff_score > 5) {
next
}else{
data_cl <- bind_rows(data_cl, df1)
}
}
return(data_cl)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_clutch_time.R |
#' Four factors data frame
#'
#' @aliases do_four_factors_df
#'
#' @description
#' This function computes team's offense and defense four factors.
#' The four factors are Effective Field Goal Percentage (EFGP),
#' Turnover Percentage (TOVP), Offensive Rebound Percentage (ORBP) and
#' Free Throws Rate (FTRate). They are well defined at
#' \url{http://www.rawbw.com/~deano/articles/20040601_roboscout.htm} and
#' \url{https://www.basketball-reference.com/about/factors.html}.
#'
#' As a summary, EFGP is a measure of shooting efficiency; TOVP is
#' the percentage of possessions where the team missed the ball, see
#' \url{https://www.nba.com/thunder/news/stats101.html} to read about
#' the 0.44 coefficient; ORBP measures how many rebounds were offensive
#' from the total of available rebounds; Finally, FTRate is a measure of both
#' how often a team gets to the line and how often they make them.
#'
#' @usage do_four_factors_df(df_games, teams)
#'
#' @param df_games Data frame with the games, players info, advanced stats and
#' eventually recoded teams names.
#' @param teams Teams names.
#'
#' @details
#' Instead of defining the Offensive and Defensive Rebound Percentage
#' as mentioned in the previous links, I have computed just the Offensive
#' Rebound Percentage for the team and for its rivals. This makes easier
#' to have four facets, one per factor, in the ggplot.
#'
#' In order to establish the team rankings, we have to consider these facts:
#' In defense (accumulated statistics of the opponent teams to the team of interest),
#' the best team in each factor is the one that allows the smallest EFGP, the biggest TOVP,
#' the smallest ORBP and the smallest FTRate, respectively.
#'
#' In offense (accumulated statistics of the team of interest), the best team in each factor
#' is the one that has the biggest EFGP, the smallest TOVP,
#' the biggest ORBP and the biggest FTRate, respectively.
#'
#' @return
#' A list with two data frames, \code{df_rank} and \code{df_no_rank}.
#' Both have the same columns:
#' \itemize{
#' \item Team: Team name.
#' \item Type: Either Defense or Offense.
#' \item EFGP, ORBP, TOVP and FTRate.
#' }
#'
#' The \code{df_rank} data frame contains the team ranking label for
#' each statistic between parentheses. Therefore, \code{df_no_rank} is used
#' to create the ggplot with the numerical values and \code{df_rank} is
#' used to add the ranking labels.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{get_four_factors_plot}}
#'
#' @examples
#' df <- do_join_games_bio("ACB", acb_games_1718, acb_players_1718)
#' df1 <- do_add_adv_stats(df)
#' # When only one team is selected the rankings between parentheses
#' # do not reflect the real rankings regarding all the league teams.
#' # The rankings are computed with respect to the number of teams
#' # passed as an argument.
#' df_four_factors <- do_four_factors_df(df1, "Valencia")
#'
#' @importFrom dplyr summarise bind_rows
#'
#' @export
do_four_factors_df <- function(df_games, teams) {
GameID <- Day <- Game <- Team <- Player.x <- FG <- FGA <- ThreeP <- FT <- FTA <- NULL
DRB <- ORB <- TOV <- Type <- EFGP <- TOVP <- ORBP <- FTRate <- NULL
df5 <- data.frame()
for (i in teams) {
#team_GameID <- unique(df_games$GameID[df_games$Team == i])
team_Game <- unique(df_games$Game[df_games$Team == i])
df2 <- df_games %>%
#ungroup() %>%
#filter(grepl(i, Game)) %>%
#filter(GameID %in% team_GameID) %>%
filter(Game %in% team_Game) %>%
select(Day, Game, Team, Player.x, FG, FGA, ThreeP, FT, FTA, DRB, ORB, TOV) %>%
group_by(Team) %>%
mutate(Type = ifelse(Team == i, "Offense", "Defense")) %>%
ungroup()
df3 <- df2 %>%
group_by(Type) %>%
summarise(EFGP = (sum(FG) + 0.5 * sum(ThreeP)) / sum(FGA),
TOVP = sum(TOV) / (sum(FGA) + 0.44 * sum(FTA) + sum(TOV)),
ORB = sum(ORB),
DRB = sum(DRB),
ORBP = NA,
FTRate = sum(FT) / sum(FGA)) %>%
ungroup()
df3$ORBP[1] <- df3$ORB[1] / (df3$ORB[1] + df3$DRB[2])
df3$ORBP[2] <- df3$ORB[2] / (df3$ORB[2] + df3$DRB[1])
df4 <- df3 %>%
select(-ORB, -DRB) %>%
mutate(EFGP = round(EFGP * 100, 2),
TOVP = round(TOVP * 100, 2),
ORBP = round(ORBP * 100, 2),
FTRate = round(FTRate, 2)) %>%
mutate(Team = i) %>%
select(Team, everything())
# Data frame with the four factors for each team, both defense and offense:
df5 <- bind_rows(df5, df4)
}
# The next steps are to add the ranking label for each team in the corresponding factor.
df6 <- df5 %>%
filter(Type == "Defense") %>%
# The best team is the one that allows the worst (smallest) field percentage:
mutate(order_EFGP = Team[order(EFGP)]) %>%
# The best team is the one that allows the biggest turnover percentage:
mutate(order_TOVP = Team[order(TOVP, decreasing = TRUE)]) %>%
# The best team is the one that allows the worst (smallest) offensive rebounding percentage:
mutate(order_ORBP = Team[order(ORBP)]) %>%
# The best team is the one that allows the worst (smallest) free throw rate:
mutate(order_FTRate = Team[order(FTRate)])
df6 <- as.data.frame(df6)
for (i in teams) {
# Find the position of the team in each of the order columns.
orders_cols <- apply(df6[,7:10], 2, function(x){grep(i, x)})
df6[df6$Team == i, 3:6] <- paste(df6[df6$Team == i, 3:6],
" (", orders_cols, ")", sep = "")
}
df7 <- df5 %>%
filter(Type == "Offense") %>%
# The best team is the one that has the best (biggest) field percentage:
mutate(order_EFGP = Team[order(EFGP, decreasing = TRUE)]) %>%
# The best team is the one that has the smallest turnover percentage:
mutate(order_TOVP = Team[order(TOVP)]) %>%
# The best team is the one that has the best (biggest) offensive rebounding percentage:
mutate(order_ORBP = Team[order(ORBP, decreasing = TRUE)]) %>%
# The best team is the one that has the best (biggest) free throw rate:
mutate(order_FTRate = Team[order(FTRate, decreasing = TRUE)])
df7 <- as.data.frame(df7)
for (i in teams) {
orders_cols <- apply(df7[,7:10], 2, function(x){grep(i, x)})
df7[df7$Team == i, 3:6] <- paste(df7[df7$Team == i, 3:6],
" (", orders_cols, ")", sep = "")
}
# Data frame with the four factors for each team, both defense and offense and the ranking label:
df8 <- bind_rows(df6, df7) %>%
select(-contains("order")) %>%
arrange(rev(Team))
return(list(df_rank = df8, df_no_rank = df5))
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_four_factors_df.R |
#' Compute free throw fouls
#'
#' @aliases do_ft_fouls
#'
#' @description
#' Compute how many 1-,2- and 3-free throw fouls has committed or
#' received every player.
#'
#' @usage
#' do_ft_fouls(data, type)
#'
#' @param data Play-by-play data.
#' @param type Either 'comm' (for committed) or 'rec' (for received).
#'
#' @return
#' Data frame with the following columns:
#' \itemize{
#' \strong{team}: Name of the team.
#' \strong{player}: Name of the player.
#' \strong{n_ft_fouls_x}: Number of free throw fouls committed or received.
#' \strong{n_ft_x}: Number of free throws given or got.
#' \strong{n_ft_char}: Type of free throw. Options can be 1TL, 2TL and 3TL.
#' \strong{n}: Number of free throws of each type.
#' }
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' df01 <- do_ft_fouls(acb_vbc_cz_pbp_2223, "comm")
#' #df01
#'
#' df02 <- do_ft_fouls(acb_vbc_cz_pbp_2223, "rec")
#' #df02
#'
#' @importFrom dplyr slice
#'
#' @export
do_ft_fouls <- function(data, type) {
action <- n_ft_char <- team <- player <- n_ft_given <- n_ft_fouls_given <- NULL
n_ft_got <- n_ft_fouls_got <- NULL
data <- data %>%
# I have to remove both the Scrubb brothers and the
# Quintela brothers, because they are not differentiated,
# so we would be summing statistics for two different players.
filter(!player %in% c("Scrubb", "Quintela"))
if (type == "comm") {
df0 <- data %>%
filter(grepl("TL)", action)) %>%
filter(!grepl("T\\u00e9cnica", action)) %>%
mutate(n_ft_char = gsub("\\)", "", gsub(".*\\(", "", action)), .after = action) %>%
mutate(n_ft_given = case_when(
grepl("1TL", action) ~ 1,
grepl("2TL", action) ~ 2,
grepl("3TL", action) ~ 3), .after = n_ft_char)
df1_points_given <- df0 %>%
group_by(team, player) %>%
summarise(n_ft_fouls_given = n(),
n_ft_given = sum(n_ft_given)) %>%
ungroup() %>%
arrange(desc(n_ft_fouls_given))
df1_type_given <- df0 %>%
group_by(team, player, n_ft_char) %>%
summarise(n = n()) %>%
ungroup()
data_res <- left_join(df1_points_given, df1_type_given, by = c("team", "player"))
}
if (type == "rec") {
data <- data %>%
filter(!grepl("T\\u00e9cnica", action))
df2 <- data.frame()
for (i in paste0("(", 1:3, "TL)")) {
pos_ft <- grep(i, data$action)
df0 <- data %>%
slice(pos_ft + 1)
#unique(df0$action) #It must only appear 'Falta Recibida'
#table(df0$action)
df1 <- df0 %>%
group_by(team, player) %>%
summarise(n = n()) %>%
ungroup() %>%
mutate(n_ft_char = i, .before = n)
df2 <- bind_rows(df2, df1)
}
df1_type_got <- df2 %>%
arrange(team, player)
df1_points_got <- df1_type_got %>%
mutate(n_ft_char = gsub("\\(|\\)", "", n_ft_char)) %>%
mutate(n_ft_got = case_when(
grepl("1TL", n_ft_char) ~ 1,
grepl("2TL", n_ft_char) ~ 2,
grepl("3TL", n_ft_char) ~ 3), .after = n_ft_char) %>%
group_by(team, player) %>%
summarise(n_ft_fouls_got = sum(n),
n_ft_got = sum(n_ft_got * n)) %>%
ungroup()
data_res <- left_join(df1_points_got, df1_type_got, by = c("team", "player")) %>%
mutate(n_ft_char = gsub("\\(|\\)", "", n_ft_char)) %>%
arrange(desc(n_ft_fouls_got))
}
return(data_res)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_ft_fouls.R |
#' Join games and players' info
#'
#' @aliases do_join_games_bio
#'
#' @description
#' This function calls the needed ancillary functions to join the games played
#' by all the players in the desired competition (currently ACB, Euroleague
#' and Eurocup) with their personal details.
#'
#' @usage
#' do_join_games_bio(competition, df_games, df_rosters)
#'
#' @param competition String. Options are "ACB", "Euroleague" and "Eurocup".
#' @param df_games Data frame with the games.
#' @param df_rosters Data frame with the biography of the roster players.
#'
#' @return
#' Data frame.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{join_players_bio_age_acb}}, \code{\link{join_players_bio_age_euro}}
#'
#' @examples
#' df <- do_join_games_bio("ACB", acb_games_1718, acb_players_1718)
#'
#' @importFrom dplyr filter
#' @importFrom purrr map_if
#' @importFrom tibble as_tibble
#'
#' @export
do_join_games_bio <- function(competition, df_games, df_rosters) {
CombinID <- NULL # This is needed to avoid the issue:
# 'no visible binding for global variable CombinID' when R CMD check
# Pre-processing:
# Filter by CombinID != 0 and CombinID != "NA":
# CombinID == 0 refers to the players who didn't play any minute in the corresponding game.
# CombinID == NA refers to the rows of "Equipo".
df2 <- df_games %>%
filter(CombinID != 0, CombinID != "NA") %>%
droplevels() %>% # To drop unused levels after filtering by factor.
map_if(is.factor, as.character) %>%
#as_data_frame()
as_tibble()
if (competition == "ACB") {
# Add players bio and age:
df2_1 <- join_players_bio_age_acb(df2, df_rosters)
}
if (competition %in% c("Euroleague", "Eurocup") ) {
# Add players bio and age:
df2_1 <- join_players_bio_age_euro(df2, df_rosters)
}
return(df_all = df2_1)
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_join_games_bio.R |
#' Compute ACB lineups
#'
#' @aliases do_lineup
#'
#' @description
#' Compute all the lineups that a given team shows during a game.
#'
#' @usage
#' do_lineup(data, day_num, game_code, team_sel, verbose)
#'
#' @param data Play-by-play prepared data from a given game.
#' @param day_num Day number.
#' @param game_code Game code.
#' @param team_sel One of the teams' names involved in the game.
#' @param verbose Logical. Decide if information of the computations
#' must be provided or not.
#'
#' @return
#' Data frame. Each row is a different lineup. This is the meaning of the
#' columns that might not be explanatory by themselves:
#' \itemize{
#' \strong{team_in}: Time point when that lineup starts playing together.
#' \strong{team_out}: Time point when that lineup stops playing together
#' (because there is a substitution).
#' \strong{num_players}: Number of players forming the lineup (must be 5 in this case).
#' \strong{time_seconds}: Total of seconds that the lineup played.
#' \strong{diff_points}: Game score in the time that the lineup played.
#' \strong{plus_minus}: Plus/minus achieved by the lineup. This is the difference
#' between the game score of the previous lineup and of the current one.
#' \strong{plus_minus_poss}: Plus/minus per possession.
#' }
#'
#' @note
#' A possession lasts 24 seconds in the ACB league.
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' library(dplyr)
#' df0 <- acb_vbc_cz_pbp_2223
#'
#' day_num <- unique(acb_vbc_cz_pbp_2223$day)
#' game_code <- unique(acb_vbc_cz_pbp_2223$game_code)
#'
#' acb_games_2223_sl <- acb_vbc_cz_sl_2223 %>%
#' filter(period == "1C")
#'
#' df1 <- do_prepare_data(df0, day_num,
#' acb_games_2223_sl, acb_games_2223_info,
#' game_code)
#'
#' df2 <- do_lineup(df1, day_num, game_code, "Valencia Basket", FALSE)
#' #df2
#'
#' @importFrom lubridate period_to_seconds
#' @importFrom stringr str_count
#'
#' @export
do_lineup <- function(data, day_num, game_code, team_sel, verbose) {
team <- action <- player <- period <- time_in <- time_out <- plus_minus <- NULL
local_team <- unique(data$local)
data1 <- data %>%
filter(team == team_sel)
# Add the last row of games' data to have the real final game score
# in case it is not available:
last_row_game <- data[nrow(data),]
last_row_game$time_point <- "00:00"
last_row_game$player <- NA
last_row_game$action <- NA
last_row_game$team <- team_sel
data1 <- bind_rows(data1, last_row_game)
# Get players out:
pl_out <- c(1, which(data1$action == "Sale de la pista"), nrow(data1))
data_res <- data.frame()
for (i in 1:(length(pl_out) - 1)) {
if (verbose) {
cat("ITERATION:", i, "\n")
cat("VALUES:", pl_out, "\n")
}
if (i == 1) {
data2 <- data1 %>%
slice(pl_out[i]:pl_out[i + 1])
lineup <- data2 %>%
filter(action != "Sale de la pista") %>%
filter(player != team_sel) %>%
distinct(player) %>%
pull()
# For cases where brothers receive the same label.
if (length(lineup) == 4) {
if (team_sel == "Monbus Obradoiro") {
lineup <- c(lineup, "Scrubb")
}else if (team_sel == "R\\u00edo Breog\\u00e1n") {
lineup <- c(lineup, "Quintela")
}
}
nr <- nrow(data2)
time_seconds <- period_to_seconds(ms(data2$time_point[1])) - period_to_seconds(ms(data2$time_point[nr]))
diff_points <- ifelse(local_team == team_sel,
data2$local_score[nr] - data2$visitor_score[nr],
data2$visitor_score[nr] - data2$local_score[nr])
data_save <- data.frame(period = data2$period[1],
time_in = data2$time_point[1],
time_out = data2$time_point[nr],
lineup = paste(sort(lineup), collapse = ", "),
time_seconds = time_seconds,
diff_points = diff_points) %>%
mutate(lineup = as.character(lineup))
}else{
if ((pl_out[i] - 1) == pl_out[i - 1]) {
next
}
out_index <- pl_out[i]
if (verbose) {
cat("OUT INDEX:", out_index, "\n")
}
next_index <- NA
# Case of just one replacement:
if ((pl_out[i] + 1) == pl_out[i + 1]) {
out_index <- pl_out[i] + 0:1
next_index <- pl_out[i + 2]
}
# Case of two replacements at the same time:
if (!is.na(pl_out[i + 2]) & (pl_out[i] + 2) == pl_out[i + 2]) {
out_index <- pl_out[i] + 0:2
next_index <- pl_out[i + 3]
}
# Case of three replacements at the same time:
if (!is.na(pl_out[i + 3]) & (pl_out[i] + 3) == pl_out[i + 3]) {
out_index <- pl_out[i] + 0:3
next_index <- pl_out[i + 4]
}
# Case of four replacements at the same time:
if (!is.na(pl_out[i + 4]) & (pl_out[i] + 4) == pl_out[i + 4]) {
out_index <- pl_out[i] + 0:4
next_index <- pl_out[i + 5]
}
# Case of five replacements at the same time:
if (!is.na(pl_out[i + 5]) & (pl_out[i] + 5) == pl_out[i + 5]) {
out_index <- pl_out[i] + 0:5
next_index <- pl_out[i + 6]
}
# No replacement:
if (is.na(next_index)) {
next_index <- pl_out[i + 1]
}
player_out <- data1 %>%
slice(out_index) %>%
pull(player)
data2 <- data1 %>%
slice(out_index[1]:next_index)
player_in <- data2 %>%
filter(action == "Entra a pista") %>%
pull(player)
lineup <- c(setdiff(lineup, player_out), player_in)
# For cases where brothers receive the same label.
if (length(lineup) == 4) {
if (team_sel == "Monbus Obradoiro") {
lineup <- c(lineup, "Scrubb")
}else if (team_sel == "R\\u00edo Breog\\u00e1n") {
lineup <- c(lineup, "Quintela")
}
}
nr <- nrow(data2)
nper <- unique(data2$period)
if (grepl("PR", data2$period[nr]) & data2$period[nr] != data2$period[nr - 1]) {
data2$time_point[nr] <- "00:00"
}
if (length(nper) == 1) {
time_seconds <- period_to_seconds(ms(data2$time_point[1])) - period_to_seconds(ms(data2$time_point[nr]))
}else{
# For the case when the replacement has been done in the next period.
# See for example i=13 of Real Madrid in 103350.
# The same lineup is between "04:25" of 3C and "08:05" of 4C.
if (grepl("PR", data2$period[nr]) & data2$period[nr] != data2$period[nr - 1]) {
time_seconds <- period_to_seconds(ms(data2$time_point[1])) - period_to_seconds(ms(data2$time_point[nr]))
}else{
aux <- period_to_seconds(ms("10:00") - ms(data2$time_point[nr]))
time_seconds <- period_to_seconds(ms(data2$time_point[1])) + aux
}
}
diff_points <- ifelse(local_team == team_sel,
data2$local_score[nr] - data2$visitor_score[nr],
data2$visitor_score[nr] - data2$local_score[nr])
data_save <- data.frame(period = data2$period[1],
time_in = data2$time_point[1],
time_out = data2$time_point[nr],
lineup = paste(sort(lineup), collapse = ", "),
time_seconds = time_seconds,
diff_points = diff_points) %>%
mutate(lineup = as.character(lineup))
} # End of i>1 iteration.
if (verbose) {
cat("LINEUP:", data_save$lineup, "\n")
}
data_res <- bind_rows(data_res, data_save)
} # End of i iteration.
data_res <- data_res %>%
mutate(team = team_sel, .before = period) %>%
mutate(game_code = game_code, .before = team) %>%
mutate(day = day_num, .before = game_code) %>%
mutate(num_players = str_count(lineup, ",") + 1, .after = lineup) %>%
mutate(plus_minus = diff_points - lag(diff_points)) %>%
mutate(lineup_type = "quintet")
data_res$plus_minus[1] <- data_res$diff_points[1]
data_res$time_out[which(data_res$time_out == "10:00")] <- "00:00"
data_res <- data_res %>%
filter(!(time_in == "00:00" & time_out == "00:00")) %>%
filter(!(time_in == "00:00" & time_out == "05:00"))
# Plus/Minus per possession.
data_res1 <- data_res %>%
mutate(plus_minus_poss = ifelse(time_seconds == 0,
plus_minus,
round((24 * plus_minus) / time_seconds, 2)),
.after = plus_minus)
return(data_res1)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_lineup.R |
#' Data frame for the nationalities map
#'
#' @aliases do_map_nats
#'
#' @description
#' This function prepares the data frame with the nationalities
#' to be mapped with \code{\link{get_map_nats}}. It is used inside it.
#
#'
#' @usage
#' do_map_nats(df_stats)
#'
#' @param df_stats Data frame with the statistics and the
#' corrected nationalities.
#'
#' @return
#' List with the following elements:
#' \itemize{
#' \item df_all: Data frame with each country, its latitudes and
#' longitudes and whether it must be coloured or not (depending on
#' if there are players from that country).
#' \item countr_num: Vector with the countries from where there are
#' players and the number of them.
#' \item leng: Number of countries in the world.
#' }
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{get_map_nats}}
#'
#' @importFrom rworldmap getMap
#'
#' @export
do_map_nats <- function(df_stats){
countr_num <- table(df_stats$Nationality)
countr_num <- countr_num[countr_num != 0]
worldMap <- getMap()
leng <- length(worldMap$NAME)
df_all <- data.frame()
for (i in 1:leng) {
df <- data.frame(worldMap@polygons[[i]]@Polygons[[1]]@coords)
name_reg <- as.character(worldMap$NAME[i])
df$region <- name_reg
if (name_reg %in% unique(df_stats$Nationality)) {
df$color_region <- "Yes"
}else{
df$color_region <- "No"
}
colnames(df) <- list("long", "lat", "region", "color_region")
df_all <- rbind(df_all, df)
}
return(list(df_all = df_all, countr_num = countr_num, leng = leng))
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_map_nats.R |
#' Compute offensive fouls
#'
#' @aliases do_offensive_fouls
#'
#' @description
#' Compute how many offensive fouls has committed or received every player.
#'
#' @usage
#' do_offensive_fouls(data, type)
#'
#' @param data Play-by-play data.
#' @param type Either 'comm' (for committed) or 'rec' (for received).
#'
#' @return
#' Data frame with the following columns:
#' \itemize{
#' \strong{team}: Name of the team.
#' \strong{player}: Name of the player.
#' \strong{n_offensive_fouls_x}: Number of offensive fouls.
#' }
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' df01 <- do_offensive_fouls(acb_vbc_cz_pbp_2223, "comm")
#' #df01
#'
#' df02 <- do_offensive_fouls(acb_vbc_cz_pbp_2223, "rec")
#' #df02
#'
#' @export
do_offensive_fouls <- function(data, type) {
action <- team <- player <- n_offensive_fouls_given <- NULL
if (type == "comm") {
df1 <- data %>%
filter(action == "Falta en Ataque") %>%
group_by(team, player) %>%
summarise(n_offensive_fouls_given = n()) %>%
ungroup() %>%
arrange(desc(n_offensive_fouls_given))
}
if (type == "rec") {
pos_att <- grep("Falta en Ataque", data$action)
#df0 <- data %>%
# slice(pos_att + 1)
#unique(df0$action) # It must only appear 'Pérdida'
df0 <- data %>%
slice(pos_att + 2)
#unique(df0$action) # It must only appear 'Falta Recibida'
df1 <- df0 %>%
group_by(team, player) %>%
summarise(n = n()) %>%
ungroup() %>%
arrange(desc(n)) %>%
rename(n_offensive_fouls_got = n)
}
return(df1)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_offensive_fouls.R |
#' Compute when possessions start
#'
#' @aliases do_possession
#'
#' @description
#' Compute when the possession starts for each team during each period of a game.
#'
#' @usage
#' do_possession(data, period_sel)
#'
#' @param data Play-by-play prepared data from a given game.
#' @param period_sel Period of interest. Options can be "xC", where x=1,2,3,4.
#'
#' @return
#' Data frame. This is the meaning of the columns that might not be
#' explanatory by themselves:
#' \itemize{
#' \strong{time_start}: Time point when the action starts.
#' \strong{time_end}: Time point when the action ends.
#' \strong{poss_time}: Duration of the possession.
#' \strong{possession}: Indicates when the possession starts. This is encoded
#' with the Spanish word \emph{inicio} (\emph{start}, in English).
#' \strong{points}: Number of points scored from a given action.
#' }
#'
#' @note
#' 1. A possession lasts 24 seconds in the ACB league.
#'
#' 2. Actions are given in Spanish. A bilingual basketball vocabulary (Spanish/English)
#' is provided in \url{https://www.uv.es/vivigui/docs/basketball_dictionary.xlsx}.
#'
#' 3. The \strong{game_code} column allows us to detect the source website, for example,
#' \url{https://jv.acb.com/es/103389/jugadas}.
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' library(dplyr)
#' df0 <- acb_vbc_cz_pbp_2223
#'
#' day_num <- unique(acb_vbc_cz_pbp_2223$day)
#' game_code <- unique(acb_vbc_cz_pbp_2223$game_code)
#'
#' acb_games_2223_sl <- acb_vbc_cz_sl_2223 %>%
#' dplyr::filter(period == "1C")
#'
#' df1 <- do_prepare_data(df0, day_num,
#' acb_games_2223_sl, acb_games_2223_info,
#' game_code)
#'
#' df2 <- do_possession(df1, "1C")
#' #df2
#'
#' @importFrom dplyr lag lead summarize
#'
#' @export
do_possession <- function(data, period_sel) {
team <- action <- player <- period <- time_point <- block <- NULL
time_start <- time_end <- poss_time <- possession <- NULL
# Two main situations with start possession:
# First one:
data1 <- data %>%
filter(!action %in% c("Quinteto inicial", "Salto perdido", "Tiempo Muerto",
"Sale de la pista", "Entra a pista")) %>%
filter(period == period_sel) %>%
filter(!(player == team & action != "Rebote Defensivo")) %>%
mutate(possession = ifelse(action %in% c("Salto ganado", "Rebote Defensivo", "Recuperaci\\u00f3n"),
"inicio", NA),
.after = action)
if (period_sel != "1C") {
data1$possession[1] <- "inicio"
}
# Reverse situations where some type of Falta Personal is before Falta Recibida.
# This causes errors in the computation of the time possession.
wh_fo <- which(grepl("Falta Personal", data1$action) & data1$team == lag(data1$team))
while (length(wh_fo) != 0) {
for (i in 1:length(wh_fo)) {
# 24 103158 1C wh_fo[4] is 97 (the penultimate row), so wh_fo[i] + 2 does not exist:
if ((wh_fo[i] + 2) > nrow(data1)) {
data1 <- data1[c(1:(wh_fo[i] - 1), wh_fo[i] + 1, wh_fo[i]), ]
}else{
data1 <- data1[c(1:(wh_fo[i] - 1), wh_fo[i] + 1, wh_fo[i], (wh_fo[i] + 2):nrow(data1)), ]
}
}
wh_fo <- which(grepl("Falta Personal", data1$action) & data1$team == lag(data1$team))
}
# Second one:
wh <- which(data1$action == "Asistencia" & data1$team != lead(data1$team))
data1$possession[wh + 1] <- "inicio"
# Other situations with start possession:
# si1 : Check if any dunk came after no assist, for example,
# when finishing an offensive rebound with a direct dunk.
si1 <- which(data1$action == "Mate" & lead(data1$action) != "Asistencia") + 1
si2 <- which(grepl("Tiro de", data1$action) & data1$team != lag(data1$team) & lag(data1$action) != "Falta Personal")
si3 <- which(grepl("Triple", data1$action) & data1$team != lag(data1$team) & lag(data1$action) != "Falta Personal")
si4 <- which(grepl("Tiro Libre", data1$action) & data1$team != lead(data1$team)) + 1
si5 <- which(data1$action == "P\\u00e9rdida" & lead(data1$action) != "Recuperaci\\u00f3n") + 1
si6 <- which(data1$action == "P\\u00e9rdida" & data1$team != lag(data1$team))
si7 <- which(data1$action == "Falta Personal (2TL)" & data1$team == lag(data1$team)) + 1
si8 <- which(data1$action == "Falta en Ataque" & data1$team != lag(data1$team))
si9 <- which(data1$action == "Falta Recibida" & !grepl("Falta Personal", lag(data1$action)))
data1$possession[c(si1, si2, si3, si4, si5, si6, si7, si8, si9)] <- "inicio"
# Correct some inaccuracies, when they are not in the first row:
data1$possession[which(data1$action == "Tiro Libre fallado" & data1$possession == "inicio")] <- NA
data1$possession[which(data1$action == "Falta Personal (1TL)" & data1$possession == "inicio")] <- NA
if (is.na(data1$possession[1])) {
data1$possession[1] <- "inicio"
}
# Create time end and start to compute the possession time:
data2 <- data1 %>%
mutate(time_start = lag(time_point), .before = time_point) %>%
rename(time_end = time_point)
data2$time_start[1] <- "10:00"
data2$time_end[nrow(data2)] <- "00:00"
# Add for each possession a label block to be able to compute the possession time:
ini <- which(data2$possession == "inicio")
block_v <- c()
for (i in 1:(length(ini) - 1)) {
block_v <- c(block_v, rep(ini[i], ini[i + 1] - ini[i]))
}
# If 'inicio' is in the last row of data2:
if (ini[length(ini)] == nrow(data2)) {
block_v <- c(block_v, ini[length(ini)])
}else{
# If not, repeat the needed value as many times as needed. For example, if
# 'inicio' is in the row 84 and data2 has 85 rows, we need to create two 84,
# as 85 - 84 + 1:
reps_need <- nrow(data2) - ini[length(ini)] + 1
block_v <- c(block_v, rep(ini[length(ini)], reps_need))
}
# Note: The block numbers refer to the rows where 'inicio' were located.
# For example, if the second 'inicio' label was in the fourth row,
# the second block will be labelled with a 4.
data3 <- data2 %>%
mutate(block = block_v, .after = period)
# Compute the possession times:
data3_time <- data3 %>%
group_by(block) %>%
summarize(poss_time = period_to_seconds(ms(time_start[1])) -
period_to_seconds(ms(time_end[n()]))) %>%
ungroup()
data4 <- left_join(data3, data3_time, by = "block") %>%
select(period, block, time_start, time_end, poss_time, everything())
# In data4, poss_time goes beyond 24 either because offensive rebounds
# or because fouls received or because transcription typos.
# 103389 1C: In data1 between data1$time_point[84] and data1$time_point[85]
# goes 38 seconds! --> "02:14" and "01:36" ; Also 2C: 08:00 and 07:33
# Add points:
data5 <- data4 %>%
mutate(points = case_when(
action == "Tiro Libre anotado" ~ 1,
action == "Mate" ~ 2,
action == "Tiro de 2 anotado" ~ 2,
action == "Triple anotado" ~ 3),
.after = possession)
return(data5)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_possession.R |
#' Prepare ACB play-by-play data
#'
#' @aliases do_prepare_data
#'
#' @description
#' Prepare the ACB play-by-play data to be analyzed in further steps.
#' It involves correcting some inconsistencies and filtering some
#' unnecessary information.
#'
#' @usage
#' do_prepare_data(data, day_num, data_gsl, data_ginfo, game_code_excel)
#'
#' @param data Source play-by-play data from a given game.
#' @param day_num Day number.
#' @param data_gsl Games' starting lineups.
#' @param data_ginfo Games' basic information.
#' @param game_code_excel Game code.
#'
#' @return
#' Data frame. Each row represents the action happened in the game. It has
#' associated a player, a time point and the game score. The \strong{team}
#' column refers to the team to which the player belongs.
#'
#' @note
#' 1. Actions are given in Spanish. A bilingual basketball vocabulary (Spanish/English)
#' is provided in \url{https://www.uv.es/vivigui/docs/basketball_dictionary.xlsx}.
#'
#' 2. The \strong{game_code} column allows us to detect the source website, for example,
#' \url{https://jv.acb.com/es/103389/jugadas}.
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' library(dplyr)
#' df0 <- acb_vbc_cz_pbp_2223
#'
#' day_num <- unique(acb_vbc_cz_pbp_2223$day)
#' game_code <- unique(acb_vbc_cz_pbp_2223$game_code)
#'
#' acb_games_2223_sl <- acb_vbc_cz_sl_2223 %>%
#' filter(period == "1C")
#'
#' df1 <- do_prepare_data(df0, day_num,
#' acb_games_2223_sl, acb_games_2223_info,
#' game_code)
#' #df1
#'
#' @export
do_prepare_data <- function(data, day_num, data_gsl, data_ginfo, game_code_excel) {
local_score <- visitor_score <- day <- game_code <- game <- action <- NULL
# Correct names:
data$player[which(data$player == "Fern\\u00e1ndez_Juan")] <- "Fern\\u00e1ndez"
data$player[which(data$player == "Fern\\u00e1ndez_J")] <- "Fern\\u00e1ndez"
data$player[which(data$player == "Garc\\u00eda_J")] <- "Garc\\u00eda"
data$player[which(data$player == "Rodr\\u00edguez_S")] <- "Rodr\\u00edguez"
data$player[which(data$player == "Garc\\u00eda_S")] <- "Garc\\u00eda"
data$player[which(data$player == "D\\u00edaz_A")] <- "D\\u00edaz"
data$player[which(data$player == "Diop_I")] <- "Diop"
data$player[which(data$player == "Diop_K")] <- "Diop"
# ----
data1 <- data %>%
mutate(local_score = as.numeric(local_score),
visitor_score = as.numeric(visitor_score))
data1_gsl <- data_gsl %>%
filter(day == day_num, game_code == game_code_excel)
data2 <- bind_rows(data1_gsl, data1)
# Join game information:
data3 <- left_join(data2, data_ginfo, by = c("game_code", "day"))
# Process data:
data4 <- data3 %>%
mutate(local = gsub("-.*", "", game)) %>%
mutate(visitor = gsub(".*-", "", game)) %>%
select(-game) %>%
filter(!action %in% c("Instant Replay", "Tiempo Muerto de TV",
"IR - Challenge entrenador local", "IR - Challenge entrenador visitante",
"IR - Revisi\\u00f3n del tipo de falta", "IR - Revisi\\u00f3n reloj de posesi\\u00f3n",
"IR - Revisi\\u00f3n acci\\u00f3n jugador",
"IR - Revisi\\u00f3n \\u00faltimo jugador en tocar bal\\u00f3n",
"IR - Revisi\\u00f3n por enfrentamiento", "IR - Revisi\\u00f3n de una violaci\\u00f3n",
"IR - Revisi\\u00f3n del reloj de partido", "IR - Revisi\\u00f3n de la validez de una canasta",
"IR - Comprobaci\\u00f3n del tipo de tiro convertido"))
return(data4)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_prepare_data.R |
#' Prepare data for the offensive rebounds computation
#'
#' @aliases do_prepare_data_or
#'
#' @description
#' The computation of the scoring after offensive rebounds requires a
#' specifical data preparation. This function does this data processing.
#'
#' @usage
#' do_prepare_data_or(data, rm_overtime, data_ginfo)
#'
#' @param data Source play-by-play data from a given game.
#' @param rm_overtime Logical. Decide to remove overtimes or not.
#' @param data_ginfo Games' basic information.
#'
#' @return
#' Data frame. Each row represents the action happened in the game.
#' The \strong{points} column is added to transform the action
#' that finished in scoring into numbers.
#'
#' @note
#' 1. Actions are given in Spanish. A bilingual basketball vocabulary (Spanish/English)
#' is provided in \url{https://www.uv.es/vivigui/docs/basketball_dictionary.xlsx}.
#'
#' 2. The \strong{game_code} column allows us to detect the source website, for example,
#' \url{https://jv.acb.com/es/103389/jugadas}.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{do_reb_off_success}}
#'
#' @examples
#' df0 <- acb_vbc_cz_pbp_2223
#'
#' df1 <- do_prepare_data_or(df0, TRUE, acb_games_2223_info)
#' #df1
#'
#' @importFrom dplyr case_when
#'
#' @export
do_prepare_data_or <- function(data, rm_overtime, data_ginfo) {
period <- game <- action <- NULL
# Correct names:
data$player[which(data$player == "Fern\\u00e1ndez_Juan")] <- "Fern\\u00e1ndez"
data$player[which(data$player == "Fern\\u00e1ndez_J")] <- "Fern\\u00e1ndez"
data$player[which(data$player == "Garc\\u00eda_J")] <- "Garc\\u00eda"
data$player[which(data$player == "Rodr\\u00edguez_S")] <- "Rodr\\u00edguez"
data$player[which(data$player == "Garc\\u00eda_S")] <- "Garc\\u00eda"
data$player[which(data$player == "D\\u00edaz_A")] <- "D\\u00edaz"
data$player[which(data$player == "Diop_I")] <- "Diop"
data$player[which(data$player == "Diop_K")] <- "Diop"
# ----
if (rm_overtime) {
data <- data %>%
filter(!grepl("PR", period)) %>%
mutate(period = as.character(period))
}
# Join with games' information to find out later on which team was local
# and which team was visitor:
data1 <- left_join(data, data_ginfo, by = c("game_code", "day"))
# Process data:
data2 <- data1 %>%
mutate(local = gsub("-.*", "", game)) %>%
mutate(visitor = gsub(".*-", "", game)) %>%
select(-game) %>%
filter(!action %in% c("Entra a pista", "Sale de la pista", "Instant Replay",
"Tiempo Muerto", "Tiempo Muerto de TV",
"IR - Challenge entrenador local", "IR - Challenge entrenador visitante",
"IR - Revisi\\u00f3n del tipo de falta", "IR - Revisi\\u00f3n reloj de posesi\\u00f3n",
"IR - Revisi\\u00f3n acci\\u00f3n jugador",
"IR - Revisi\\u00f3n \\u00faltimo jugador en tocar bal\\u00f3n",
"IR - Revisi\\u00f3n por enfrentamiento", "IR - Revisi\\u00f3n de una violaci\\u00f3n",
"IR - Revisi\\u00f3n del reloj de partido", "IR - Revisi\\u00f3n de la validez de una canasta",
"IR - Comprobaci\\u00f3n del tipo de tiro convertido")) %>%
mutate(points = case_when(
action == "Tiro Libre anotado" ~ 1,
action == "Mate" ~ 2,
action == "Tiro de 2 anotado" ~ 2,
action == "Triple anotado" ~ 3))
return(data2)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_prepare_data_or.R |
#' Prepare data for the timeouts computation
#'
#' @aliases do_prepare_data_to
#'
#' @description
#' The computation of the successful timeouts requires a specific data
#' preparation. This function does this data processing.
#'
#' @usage
#' do_prepare_data_to(data, rm_overtime, data_ginfo, data_gcoach)
#'
#' @param data Source play-by-play data from a given game.
#' @param rm_overtime Logical. Decide to remove overtimes or not.
#' @param data_ginfo Games' basic information.
#' @param data_gcoach Coach of each team in each day.
#'
#' @return
#' Data frame. Each row represents the action happened in the game.
#' The \strong{team} column refers in this case both to the team to
#' which the player belongs and the coach of that team. In addition,
#' a \strong{points} column is added to transform the action
#' that finished in scoring into numbers .
#'
#' @note
#' 1. Actions are given in Spanish. A bilingual basketball vocabulary (Spanish/English)
#' is provided in \url{https://www.uv.es/vivigui/docs/basketball_dictionary.xlsx}.
#'
#' 2. The \strong{game_code} column allows us to detect the source website, for example,
#' \url{https://jv.acb.com/es/103389/jugadas}.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{do_time_out_success}}
#'
#' @examples
#' df0 <- acb_vbc_cz_pbp_2223
#'
#' df1 <- do_prepare_data_to(df0, TRUE, acb_games_2223_info, acb_games_2223_coach)
#' #df1
#'
#' @importFrom tidyr unite
#'
#' @export
do_prepare_data_to <- function(data, rm_overtime, data_ginfo, data_gcoach) {
period <- game <- action <- team <- player <- coach <- NULL
if (rm_overtime) {
data <- data %>%
filter(!grepl("PR", period)) %>%
mutate(period = as.character(period))
}
# Join with games' information to find out later on which team was local
# and which team was visitor:
data1 <- left_join(data, data_ginfo, by = c("game_code", "day"))
# Extract all unique actions:
#sort(unique(data1$action))
#data1 %>%
# distinct(action) %>%
# arrange(action) %>%
# pull()
# "Mate" indicates the same as "Tiro de 2 anotado", i.e., two points scored.
# Process data:
data2 <- data1 %>%
mutate(local = gsub("-.*", "", game)) %>%
mutate(visitor = gsub(".*-", "", game)) %>%
select(-game) %>%
filter(!action %in% c("Entra a pista", "Sale de la pista", "Instant Replay", "Tiempo Muerto de TV",
"IR - Challenge entrenador local", "IR - Challenge entrenador visitante",
"IR - Revisi\\u00f3n del tipo de falta", "IR - Revisi\\u00f3n reloj de posesi\\u00f3n",
"IR - Revisi\\u00f3n acci\\u00f3n jugador",
"IR - Revisi\\u00f3n \\u00faltimo jugador en tocar bal\\u00f3n",
"IR - Revisi\\u00f3n por enfrentamiento", "IR - Revisi\\u00f3n de una violaci\\u00f3n",
"IR - Revisi\\u00f3n del reloj de partido", "IR - Revisi\\u00f3n de la validez de una canasta",
"IR - Comprobaci\\u00f3n del tipo de tiro convertido")) %>%
mutate(points = case_when(
action == "Tiro Libre anotado" ~ 1,
action == "Mate" ~ 2,
action == "Tiro de 2 anotado" ~ 2,
action == "Triple anotado" ~ 3))
# Check that points indeed sum the final scores.
#data2 %>%
# group_by(game_code, team) %>%
# summarise(team_points = sum(points, na.rm = TRUE)) %>%
# ungroup()
# Join the coach's name for each team:
data3 <- left_join(data2, data_gcoach, by = c("game_code", "day", "team"))
data4 <- data3 %>%
unite(team, c("team", "coach"), sep = "_", remove = FALSE) %>%
mutate(player = ifelse(action == "Tiempo Muerto",
paste0(player, "_", coach),
player)) %>%
select(-coach)
return(data4)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_prepare_data_to.R |
#' Processing of the ACB website play-by-play data
#'
#' @aliases
#' do_process_acb_pbp
#'
#' @description
#' This function disentangles the play-by-play data coming from the ACB website and
#' creates a common data structure in R.
#'
#' @usage
#' do_process_acb_pbp(game_elem, day, game_code, period, acb_shields, verbose)
#'
#' @param game_elem Character with the tangled play-by-play data.
#' @param day Day of the game.
#' @param game_code Game code.
#' @param period Period of the game.
#' @param acb_shields Data frame with the links to the shields of the ACB teams.
#' @param verbose Logical to display processing information.
#'
#' @return
#' Data frame with eight columns:
#' \itemize{
#' \item period: Period of the game.
#' \item time_point: Time point when the basketball action happens.
#' \item player: Player who performs the action.
#' \item action: Basketball action.
#' \item local_score: Local score at that time point.
#' \item visitor_score: Visitor score at that time point.
#' \item day: Day of the game.
#' \item game_code: Game code.
#' }
#'
#' @note
#' 1. Actions are given in Spanish. A bilingual basketball vocabulary (Spanish/English)
#' is provided in \url{https://www.uv.es/vivigui/docs/basketball_dictionary.xlsx}.
#'
#' 2. The \strong{game_code} column allows us to detect the source website, for example,
#' \url{https://jv.acb.com/es/103389/jugadas}.
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' \dontrun{
#' # Load packages required:
#' library(RSelenium)
#'
#' # Provide the day and game code:
#' day <- "24"
#' game_code <- "103170"
#'
#' # Open an Internet server:
#' rD <- rsDriver(browser = "firefox", chromever = NULL)
#'
#' # Follow this procedure on the server:
#' # 1. Copy and paste the game link https://jv.acb.com/es/103170/jugadas
#' # 2. Click on each period, starting with 1C.
#' # 3. Scroll down to the first row of data.
#' # 4. Go back to R and run the following code:
#'
#' # Set the remote driver:
#' remDr <- rD$client
#'
#' # Get the play-by-play data:
#' game_elem <- remDr$getPageSource()[[1]]
#'
#' # Close the client and the server:
#' remDr$close()
#' rD$server$stop()
#'
#' period <- "1C"
#' data_game <- do_process_acb_pbp(game_elem, day, game_code,
#' period, acb_shields, FALSE)
#' }
#'
#' @importFrom qdapRegex ex_between
#' @importFrom stringr str_match_all
#'
#' @export
do_process_acb_pbp <- function(game_elem, day, game_code, period, acb_shields, verbose) {
# "Final del Partido", "Inicio del Partido"
game_data <- ex_between(game_elem, "Final de Periodo", "Inicio de Periodo")[[1]]
remove_obs <- ifelse(period != "4C", 1, c(1, 2))
game_data_per <- str_split(game_data, period)[[1]][-remove_obs]
# -1 because the last observation is the one indicating the start of the period.
df0 <- data.frame(matrix(NA, nrow = length(game_data_per) - 1, ncol = 8))
colnames(df0) <- c("period", "time_point", "player", "action", "local_score", "visitor_score", "day", "game_code")
# Get the data for each action:
for (i in 1:nrow(df0)) {
if (verbose) {
cat("ITERATION:", i, "\n")
}
game_data_per_i <- str_match_all(game_data_per[i], ">\\s*(.*?)\\s*</span>")[[1]][,1]
game_data_per_ij <- c()
for (j in 1:length(game_data_per_i)) {
game_data_per_ij <- c(game_data_per_ij,
gsub(">", "", gsub("<.*", "", gsub(".*\\\">", "", game_data_per_i[j]))))
}
if (grepl("\\(", game_data_per_ij[4])) {
game_data_per_ij <- game_data_per_ij[-4]
}
if (length(game_data_per_ij) < 5) {
if (game_data_per_ij[2] %in% c("Instant Replay", "Tiempo Muerto de TV",
"IR - Challenge entrenador local", "IR - Challenge entrenador visitante",
"IR - Revisi\\u00f3n del tipo de falta", "IR - Revisi\\u00f3n reloj de posesi\\u00f3n",
"IR - Revisi\\u00f3n acci\\u00f3n jugador",
"IR - Revisi\\u00f3n \\u00faltimo jugador en tocar bal\\u00f3n",
"IR - Revisi\\u00f3n por enfrentamiento", "IR - Revisi\\u00f3n de una violaci\\u00f3n",
"IR - Revisi\\u00f3n del reloj de partido", "IR - Revisi\\u00f3n de la validez de una canasta",
"IR - Comprobaci\\u00f3n del tipo de tiro convertido")) {
if (i == 1) {
# For the case when the first play of the quarter is an instant replay, see for example the second
# quarter of http://jv.acb.com/es/103363/jugadas
game_data_per_ij <- c(game_data_per_ij[1], NA, game_data_per_ij[2], NA, NA)
}else{
game_data_per_ij <- c(game_data_per_ij[1], NA, game_data_per_ij[2],
df0[i - 1, "local_score"], df0[i - 1, "visitor_score"])
}
}else{
link_team <- which(sapply(acb_shields$team_link, grepl, game_data_per_i[2]))
game_data_per_ij <- c(game_data_per_ij[1], acb_shields$team[link_team], game_data_per_ij[2:4])
}
}
df0[i,] <- c(period, game_data_per_ij, day, game_code)
}
df1 <- df0[nrow(df0):1, ]
df1$action <- gsub("1TL", "(1TL)", df1$action)
df1$action <- gsub("2TL", "(2TL)", df1$action)
df1$action <- gsub("3TL", "(3TL)", df1$action)
return(df1)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_process_acb_pbp.R |
#' Check if scoring after offensive rebounds
#'
#' @aliases do_reb_off_success
#'
#' @description
#' For each team and player, locate the position of offensive rebounds
#' and check if they resulted in scoring points.
#'
#' @usage
#' do_reb_off_success(data, day_num, game_code, team_sel, verbose)
#'
#' @param data Play-by-play prepared data from a given game.
#' @param day_num Day number.
#' @param game_code Game code.
#' @param team_sel One of the teams' names involved in the game.
#' @param verbose Logical. Decide if information of the computations
#' must be provided or not.
#'
#' @return
#' List with two data frames, one for the results for the team (\strong{stats_team})
#' and the other for the players (\strong{stats_player}).
#' The team data frame shows the number of offensive rebounds, the number of those
#' that finished in scoring (and the percentage associated) and the total of points
#' scored.
#' The player data frame shows the player who grabbed the offensive rebound, the
#' player who scored and how many points.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{do_prepare_data_or}}
#'
#' @examples
#' df0 <- acb_vbc_cz_pbp_2223
#'
#' day_num <- unique(acb_vbc_cz_pbp_2223$day)
#' game_code <- unique(acb_vbc_cz_pbp_2223$game_code)
#'
#' df1 <- do_prepare_data_or(df0, TRUE, acb_games_2223_info)
#'
#' df2 <- do_reb_off_success(df1, day_num, game_code, "Valencia Basket", FALSE)
#' #df2
#'
#' @importFrom tibble tibble
#'
#' @export
do_reb_off_success <- function(data, day_num, game_code, team_sel, verbose) {
# Locate the position of time outs:
to_pos <- which(data$team == team_sel & data$action == "Rebote Ofensivo")
# For teams:
# To accumulate the total points scored after an offensive rebound:
points_scored <- 0
# To accumulate the times that an offensive rebound generated some points, i.e., was successful:
times_succ <- 0
# For players:
# To accumulate the total points scored after an offensive rebound:
points_scored_pl <- 0
stats_player <- data.frame()
for (i in 1:length(to_pos)) {
to_pos_after <- c()
# For the case when Rebote Ofensivo is in the last row, for example 8/103319 Girona
if ((to_pos[i] + 1) > nrow(data)) {
next
}else{
# In the following loop, :nrow(data) is just for closing the set of rows to consider.
for (j in (to_pos[i] + 1):nrow(data)) {
if (verbose) cat("ROW: ", j, "\n")
# This first "if" is to avoid cases where actions are from different periods.
if (data$period[j] == data$period[j - 1]) {
if (data$team[j] == team_sel) {
to_pos_after <- c(to_pos_after, j)
}else if (data$team[j] != team_sel & grepl("Falta Personal|T\\u00e9cnica",
data$action[j])) {
to_pos_after <- c(to_pos_after, j)
}else{
break()
}
}else{
break()
}
}
# TEAM STATS:
to_pos_after_points <- data[to_pos_after, "points"]$points
to_pos_after_points_nona <- to_pos_after_points[!is.na(to_pos_after_points)]
if (length(to_pos_after_points_nona) != 0) {
# sum() just in case there were more than action with points, such as foul plus free throw.
points_scored <- points_scored + sum(to_pos_after_points_nona)
times_succ <- times_succ + 1
}
# PLAYER STATS:
name_player_reb_off <- data$player[to_pos[i]]
to_pos_after_points_pl <- data[to_pos_after, "points"]$points
to_pos_after_points_pl_nona <- to_pos_after_points[!is.na(to_pos_after_points_pl)]
if (length(to_pos_after_points_pl_nona) != 0) {
points_scored_pl <- sum(to_pos_after_points_pl_nona)
}
name_player_reb_off_succ <- unique(data$player[to_pos_after[!is.na(to_pos_after_points_pl)]])
stats_player_i <- tibble(day = day_num,
game_code = game_code,
team = team_sel,
player_reb_off = name_player_reb_off,
player_reb_off_succ = name_player_reb_off_succ,
points_scored = points_scored_pl)
stats_player <- bind_rows(stats_player, stats_player_i)
}
}
# In 'stats_player' when 'player_reb_off' coincides with 'player_reb_reb_off_succ',
# this means that the player who grabbed the rebound, scored.
times_succ_perc <- round((times_succ / length(to_pos)) * 100, 2)
stats_team <- tibble(day = day_num,
game_code = game_code,
team = team_sel,
times_reb_off = length(to_pos),
times_reb_off_succ = times_succ,
times_reb_off_succ_perc = times_succ_perc,
points_scored = points_scored)
return(list(stats_team = stats_team, stats_player = stats_player))
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_reb_off_success.R |
#' Player game finder data
#'
#' @aliases do_scraping_games
#'
#' @description
#' This function calls the needed ancillary functions to scrape the player game
#' finder data for the desired competition (currently, ACB, Euroleague and Eurocup).
#'
#' @usage
#' do_scraping_games(competition, type_league, nums, year, verbose, accents, r_user)
#'
#' @param competition String. Options are "ACB", "Euroleague" and "Eurocup".
#' @param type_league String. If \code{competition} is ACB, to scrape
#' ACB league games ("ACB"), Copa del Rey games ("CREY") or Supercopa games ("SCOPA").
#' @param nums Numbers corresponding to the website from which scraping.
#' @param year If \code{competition} is either Euroleague or Eurocup, the year
#' when the season starts is needed. 2017 refers to 2017-2018 and so on.
#' @param verbose Should R report information on progress? Default TRUE.
#' @param accents If \code{competition} is ACB, should we keep the Spanish accents?
#' The recommended option is to remove them, so default FALSE.
#' @param r_user Email to identify the user when doing web scraping.
#' This is a polite way to do web scraping and to certify that the user
#' is working as transparently as possible with a research purpose.
#'
#' @return
#' A data frame with the player game finder data for the competition selected.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{scraping_games_acb}}, \code{\link{scraping_games_euro}}
#'
#' @examples
#' \dontrun{
#' # Not needed to scrape every time the package is checked, built and installed.
#' df1 <- do_scraping_games(competition = "ACB", type_league = "ACB", nums = 62001,
#' year = "2017-2018", verbose = TRUE, accents = FALSE,
#' r_user = "[email protected]")
#'
#' df1_eur <- do_scraping_games(competition = "Euroleague", nums = 1,
#' year = "2017", verbose = TRUE,
#' r_user = "[email protected]")
#' }
#'
#' @export
do_scraping_games <- function(competition, type_league, nums, year, verbose, accents, r_user) {
if (competition == "ACB") {
df <- scraping_games_acb(type_league, nums, year, verbose, accents, r_user)
}
if (competition %in% c("Euroleague", "Eurocup") ) {
df <- scraping_games_euro(competition, nums, year, verbose, r_user)
}
if (!competition %in% c("ACB", "Euroleague", "Eurocup")) {
print("This competition is not available.")
}
return(df)
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_scraping_games.R |
#' Players profile data
#'
#' @aliases do_scraping_rosters
#'
#' @description
#' This function calls the needed ancillary functions to scrape the players' profile
#' data for the desired competition (currently, ACB, Euroleague and Eurocup).
#'
#' @usage
#' do_scraping_rosters(competition, pcode, verbose, accents, year, r_user)
#'
#' @param competition String. Options are "ACB", "Euroleague" and "Eurocup".
#' @param pcode Code corresponding to the player's website to scrape.
#' @param verbose Should R report information on progress? Default TRUE.
#' @param accents If \code{competition} is ACB, should we keep the Spanish accents?
#' The recommended option is to remove them, so default FALSE.
#' @param year If \code{competition} is either Euroleague or Eurocup, the year
#' when the season starts is needed. 2017 refers to 2017-2018 and so on.
#' @param r_user Email to identify the user when doing web scraping.
#' This is a polite way to do web scraping and to certify that the user
#' is working as transparently as possible with a research purpose.
#'
#' @return
#' A data frame with the players' information.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{scraping_games_acb}}, \code{\link{scraping_rosters_euro}}
#'
#' @examples
#' \dontrun{
#' # Not needed to scrape every time the package is checked, built and installed.
#' df_bio <- do_scraping_rosters(competition = "ACB", pcode = "56C",
#' verbose = TRUE, accents = FALSE,
#' r_user = "[email protected]")
#'
#' df_bio_eur <- do_scraping_rosters(competition = "Euroleague", pcode = "007969",
#' year = "2017", verbose = TRUE,
#' r_user = "[email protected]")
#' }
#'
#' @export
do_scraping_rosters <- function(competition, pcode, verbose, accents, year, r_user) {
if (competition == "ACB") {
df <- scraping_rosters_acb(pcode, verbose, accents, r_user)
}
if (competition %in% c("Euroleague", "Eurocup") ) {
df <- scraping_rosters_euro(competition, pcode, year, verbose, r_user)
}
return(df)
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_scraping_rosters.R |
#' Accumulated or average statistics
#'
#' @aliases do_stats
#'
#' @description
#' This function computes either the total or the average statistics.
#'
#' @usage
#' do_stats(df_games, type_stats = "Total", season, competition, type_season)
#'
#' @param df_games Data frame with the games, players info, advanced stats and
#' eventually recoded teams names.
#' @param type_stats String. In English, the options are "Total" and "Average" and in
#' Spanish, the options are "Totales" and "Promedio".
#' @param season String indicating the season, for example, 2017-2018.
#' @param competition String. Options are "ACB", "Euroleague" and "Eurocup".
#' @param type_season String with the round of competition, for example regular season
#' or playoffs and so on.
#'
#' @return
#' Data frame.
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' compet <- "ACB"
#' df <- do_join_games_bio(compet, acb_games_1718, acb_players_1718)
#' df1 <- do_add_adv_stats(df)
#' df2 <- do_stats(df1, "Total", "2017-2018", compet, "Regular Season")
#'
#' @importFrom dplyr contains rename n everything summarise_all distinct
#' @importFrom lubridate as.period ms
#'
#' @export
do_stats <- function(df_games, type_stats = "Total", season, competition, type_season){
Number <- TwoPPerc <- ThreePPerc <- FTPerc <- Day <- Date <- NULL
Game <- GameRes <- GameID <- Website <- Player.y <- Height <- NULL
Date_birth <- Licence <- Website_player <- Age <- Month <- Compet <- NULL
FGPerc <- EFGPerc <- ThreeRate <- FRate <- STL_TOV <- AST_TOV <- NULL
PPS <- OE <- EPS <- Season <- Type_season <- MP <- Player.x <- NULL
Name <-Team <- GS <- CombinID <- Nationality <- GP <- NULL
MP_oper <- MP_oper_def <- TwoPA <- TwoP <- ThreePA <- NULL
ThreeP <- FTA <- FT <- FGA <- FG <- TOV <- STL <- AST <- PTS <- NULL
# Remove columns related to strings and also related to percentages:
df2 <- df_games %>%
select(-c(Number, TwoPPerc, ThreePPerc, FTPerc, Season, Type_season,
Day, Date, Game, GameRes, GameID,
Website, Player.y, Height, Date_birth, Website_player, Age, Month, Compet,
contains("Gm"), FGPerc, EFGPerc, ThreeRate, FRate, STL_TOV, AST_TOV, PPS, OE, EPS))
df2_1 <- df2 %>%
filter(MP != 0) %>% # If I don't filter by MP != 0, for example for Josep Puerto there were four games
# where he was on the game roster but only two where he played. So the average must be computed
# regarding two games.
rename(Name = Player.x) %>%
group_by(Name, Team) %>% # Group by Name and Team because for example Llompart played 3 games for
# Valencia and 2 for Tenerife. If I don't group by Team, his total games played for both teams is 5.
mutate(GP = n()) %>%
mutate(GS = sum(GS))
df2_2 <- df2_1 %>%
select(Name, Team, CombinID, Position, Nationality, GP, GS, everything())
df3 <- df2_2 %>%
select(-MP) %>%
group_by(Name, Team, CombinID, Position, Nationality, GP, GS)
# Sum or average all numeric variables:
if (type_stats == "Total" | type_stats == "Totales") {
df3 <- df3 %>%
summarise_all(sum, na.rm = TRUE) # gv <- c(NA, NA) ; sum(gv) is NA but sum(gv, na.rm = TRUE) is 0.
# Sum minutes:
# See do_sum_MP.R to sum the MP:
df3_mp <- df2_2 %>%
group_by(Name, Team, CombinID, Position, Nationality, GP, GS) %>%
mutate(MP_oper = ifelse(all(MP == "0"),
0,
sum(as.numeric(as.period(ms(MP), unit = "sec")), na.rm = TRUE))) %>%
mutate(MP_oper_def = sprintf("%02d:%02d", MP_oper %/% 60, MP_oper %% 60)) %>%
distinct(Name, Team, CombinID, Position, Nationality, GP, GS, MP_oper_def)
}else if (type_stats == "Average" | type_stats == "Promedio") {
df3 <- df3 %>%
summarise_all(mean, na.rm = TRUE)
df3[, 8:ncol(df3)] <- round(df3[, 8:ncol(df3)], 1)
# Average minutes:
# See do_sum_MP.R to sum the MP:
df3_mp <- df2_2 %>%
group_by(Name, Team, CombinID, Position, Nationality, GP, GS) %>%
mutate(MP_oper = ifelse(all(MP == "0"),
0,
floor(mean(as.numeric(as.period(ms(MP), unit = "sec")), na.rm = TRUE)))) %>%
mutate(MP_oper_def = sprintf("%02d:%02d", MP_oper %/% 60, MP_oper %% 60)) %>%
distinct(Name, Team, CombinID, Position, Nationality, GP, GS, MP_oper_def)
}else{
stop("Wrong option.")
}
df3_def <- left_join(df3, df3_mp) %>%
rename(MP = MP_oper_def) %>%
select(Name, Team, CombinID, Position, Nationality, GP, GS, MP, everything())
# Add now the percentages and other variables related to accumulated statistics:
df4 <- df3_def %>%
mutate(TwoPPerc = ifelse(TwoPA == 0, 0, round((TwoP / TwoPA) * 100))) %>%
mutate(ThreePPerc = ifelse(ThreePA == 0, 0, round((ThreeP / ThreePA) * 100))) %>%
mutate(FTPerc = ifelse(FTA == 0, 0, round((FT / FTA) * 100))) %>%
mutate(FGPerc = ifelse(FGA == 0, 0, round((FG / FGA) * 100))) %>% # Field Goal Percentage.
mutate(EFGPerc = ifelse(FGA == 0, 0, (FG + 0.5 * ThreeP) / FGA)) %>% # Effective Field Goal Percentage.
mutate(EFGPerc = round(EFGPerc * 100)) %>%
mutate(EFGPerc = ifelse(EFGPerc > 100, 100, EFGPerc)) %>% # FG = 1 ; Three = 1 --> (1 + 0.5 * 1) / 1 > 1
mutate(ThreeRate = ifelse(FGA == 0, 0, round(ThreePA / FGA, 1))) %>% # 3-Point Attempt Rate.
mutate(FRate = ifelse(FGA == 0, 0, round(FT / FGA, 1))) %>% # Free Throw Attempt Rate.
mutate(STL_TOV = ifelse(TOV == 0, STL, round(STL / TOV, 1))) %>% # Steal to Turnover Ratio.
mutate(AST_TOV = ifelse(TOV == 0, AST, round(AST / TOV, 1))) %>% # Assist to Turnover Ratio.
mutate(PPS = ifelse(FGA == 0, 0, round(PTS / FGA, 1))) # Points per Shot.
df4$OE <- do_OE(df4) # Offensive Efficiency.
df4$EPS <- do_EPS(df4) # Efficient Points Scored.
# Matrix with all the accumulated statistics in the suitable order:
df5 <- df4 %>%
select(1:9, FG, FGA, FGPerc, 10:11, TwoPPerc, 12:13, ThreePPerc, 14:15, FTPerc, everything()) %>%
# To convert the minutes into numeric:
mutate(MP = round(as.numeric(ms(MP), unit = "mins")))
df5$Season <- season
df5$Compet <- competition
if (length(type_season) > 1) { # In some months, more than one round is played.
# In February, there are games from Copa del Rey and ACB regular season.
df5$Type_season <- "All"
}else{
df5$Type_season <- type_season
}
df5$Type_stats <- type_stats
return(df5)
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_stats.R |
#' Compute stats per period
#'
#' @aliases do_stats_per_period
#'
#' @description
#' Compute time played and points scored for a player of interest in any period
#' of the game.
#'
#' @usage
#' do_stats_per_period(data, day_num, game_code, team_sel, period_sel, player_sel)
#'
#' @param data Prepared data from a given game.
#' @param day_num Day number.
#' @param game_code Game code.
#' @param team_sel One of the teams' names involved in the game.
#' @param period_sel Period of interest. Options can be "xC", where x=1,2,3,4.
#' @param player_sel Player of interest.
#'
#' @return
#' Data frame with one row and mainly time played (seconds and minutes) and points
#' scored by the player of interest in the period of interest.
#'
#' @note
#' The \strong{game_code} column allows us to detect the source website, for example,
#' \url{https://jv.acb.com/es/103389/jugadas}.
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' library(dplyr)
#' df0 <- acb_vbc_cz_pbp_2223
#'
#' day_num <- unique(acb_vbc_cz_pbp_2223$day)
#' game_code <- unique(acb_vbc_cz_pbp_2223$game_code)
#'
#' # Remove overtimes:
#' rm_overtime <- TRUE
#' if (rm_overtime) {
#' df0 <- df0 %>%
#' filter(!grepl("PR", period)) %>%
#' mutate(period = as.character(period))
#' }
#'
#' team_sel <- "Valencia Basket" # "Casademont Zaragoza"
#' period_sel <- "1C" # "4C"
#' player_sel <- "Webb" # "Mara"
#'
#' df1 <- df0 %>%
#' filter(team == team_sel) %>%
#' filter(!action %in% c("D - Descalificante - No TL", "Altercado no TL"))
#'
#' df2 <- df1 %>%
#' filter(period == period_sel)
#'
#' df0_inli_team <- acb_vbc_cz_sl_2223 %>%
#' filter(team == team_sel, period == period_sel)
#'
#' df3 <- do_prepare_data(df2, day_num,
#' df0_inli_team, acb_games_2223_info,
#' game_code)
#'
#' df4 <- do_stats_per_period(df3, day_num, game_code, team_sel, period_sel, player_sel)
#' #df4
#'
#' @importFrom tibble is_tibble
#' @importFrom lubridate seconds_to_period
#'
#' @export
do_stats_per_period <- function(data, day_num, game_code, team_sel, period_sel, player_sel) {
action <- player <- points <- time_point <- NULL
data0 <- data %>%
filter(!(time_point == "10:00" & action %in% c("Sale de la pista", "Entra a pista"))) %>%
mutate(action = ifelse(action == "Quinteto inicial", "Entra a pista", action)) %>%
mutate(points = case_when(
action == "Tiro Libre anotado" ~ 1,
action == "Mate" ~ 2,
action == "Tiro de 2 anotado" ~ 2,
action == "Triple anotado" ~ 3))
data1 <- data0 %>%
filter(player == player_sel)
if (nrow(data1) == 0) {
data_res <- NULL
}else{
# Case when there are two consecutive "Entra a pista", for example,
# Hollatz 3C 9/103312
if (nrow(data1) > 1 & all(data1$action[1:2] == "Entra a pista")) {
data1 <- data1[-2,]
}
# Add the last row of games' data to have the real final game score
# in case it is not available:
last_row_game <- data.frame(matrix(NA, nrow = 1, ncol = ncol(data1)))
colnames(last_row_game) <- colnames(data1)
last_row_game$time_point <- "00:00"
data2 <- bind_rows(data1, last_row_game) %>%
mutate(time_point = period_to_seconds(ms(time_point)))
row_in <- which(data2$action == "Entra a pista")
row_out <- which(data2$action == "Sale de la pista")
# For the case when the period finishes with the player:
if (length(row_out) < length(row_in)) {
row_out <- c(row_out, nrow(data2))
}
# For the case when there is neither "Entra a pista" nor "Sale de la pista",
# for example Bamforth 3C 29/103282
if (length(row_in) == 0 & length(row_out) == 0) {
row_in <- 1
row_out <- nrow(data2)
}
total_min <- c()
total_pts <- c()
for (i in 1:length(row_in)) {
tp_time <- data2[c(row_in[i], row_out[i]), "time_point"]
if (is_tibble(tp_time)) {
tp_time <- tp_time %>% pull()
}
total_min <- c(total_min, diff(rev(tp_time)))
tp_pt <- data2[row_in[i]:row_out[i], ] %>%
filter(player == player_sel) %>%
pull(points)
total_pts <- c(total_pts, tp_pt)
}
data_res <- data.frame(day = day_num,
game_code = game_code,
team = team_sel,
player = player_sel,
period = period_sel,
seconds = sum(total_min),
minutes = seconds_to_period(sum(total_min)),
points = sum(total_pts, na.rm = TRUE))
}
return(data_res)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_stats_per_period.R |
#' Accumulated and average statistics for teams
#'
#' @aliases do_stats_teams
#'
#' @description
#' This function computes the total and average statistics for every team.
#'
#' @usage
#' do_stats_teams(df_games, season, competition, type_season)
#'
#' @param df_games Data frame with the games, players info, advanced stats and
#' eventually recoded teams names.
#' @param season String indicating the season, for example, 2017-2018.
#' @param competition String. Options are "ACB", "Euroleague" and "Eurocup".
#' @param type_season String with the round of competition, for example regular season
#' or playoffs and so on.
#'
#' @return
#' A list with two elements:
#' \itemize{
#' \item df_team_total: Data frame with the total statistics for every team.
#' \item df_team_mean: Data frame with the average statistics for every team.
#' }
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' compet <- "ACB"
#' df <- do_join_games_bio(compet, acb_games_1718, acb_players_1718)
#' df$Compet <- compet
#' df_teams <- do_stats_teams(df, "2017-2018", "ACB", "Regular Season")
#' # Total statistics:
#' #df_teams$df_team_total
#' # Average statistics:
#' #df_teams$df_team_mean
#'
#' @importFrom dplyr funs
#'
#' @export
do_stats_teams <- function(df_games, season, competition, type_season){
Compet <- Season <- Type_season <- Name <- CombinID <- NULL
Position <- Nationality <- Type_season <- Type_stats <- NULL
GP <- GS <- MP <- Team <- Game <- Type <- PTSrv <- PTSrv_mean <- NULL
FGPerc <- FGA <- FG <- TwoPPerc <- TwoPA <- TwoP <- NULL
ThreePPerc <- ThreePA <- ThreeP <- FTPerc <- FTA <- NULL
FT <- EFGPerc <- PTS <- NULL
# Get the total statistics for every player:
if (type_season == "All") {
df_games1 <- df_games %>%
filter(Compet == competition,
Season == season)
}else{
df_games1 <- df_games %>%
filter(Compet == competition,
Season == season,
Type_season == type_season)
}
df_games1 <- do_add_adv_stats(df_games1)
# Games played by every team:
games_played <- df_games1 %>%
group_by(Team) %>%
distinct(Game) %>%
count()
if (type_season == "All") {
df_games2 <- do_stats(df_games1,
"Total",
unique(df_games1$Season),
unique(df_games1$Compet),
"All")
}else{
df_games2 <- do_stats(df_games1,
"Total",
unique(df_games1$Season),
unique(df_games1$Compet),
unique(df_games1$Type_season))
}
# Once we have the total statistics for every player, we can get
# the teams' statistics by summing their players' statistics:
df_team <- df_games2 %>%
ungroup() %>%
select(-c(Name, CombinID, Position, Nationality,
Season, Compet, Type_season, Type_stats)) %>%
select(-GP, -GS, -MP, -contains("Perc")) %>%
group_by(Team) %>%
summarise_all(sum) #%>%
df_team1 <- left_join(df_team, games_played) %>%
rename(GP = n)
df_team2 <- df_team1 %>%
select(Team, GP, everything()) %>%
mutate(FGPerc = ifelse(FGA == 0, 0, round((FG / FGA) * 100))) %>%
mutate(TwoPPerc = ifelse(TwoPA == 0, 0, round((TwoP / TwoPA) * 100))) %>%
mutate(ThreePPerc = ifelse(ThreePA == 0, 0, round((ThreeP / ThreePA) * 100))) %>%
mutate(FTPerc = ifelse(FTA == 0, 0, round((FT / FTA) * 100))) %>%
mutate(EFGPerc = ifelse(FGA == 0, 0, (FG + 0.5 * ThreeP) / FGA)) %>% # Effective Field Goal Percentage.
mutate(EFGPerc = round(EFGPerc * 100)) %>%
mutate(EFGPerc = ifelse(EFGPerc > 100, 100, EFGPerc)) %>% # FG = 1 ; Three = 1 --> (1 + 0.5 * 1) / 1 > 1
select(1:5, FGPerc, 6:7, TwoPPerc, 8:9, ThreePPerc, 10:11, FTPerc, 12:27, EFGPerc, everything())
# In order to get the average statistics, we have just to divide by the
# number of games played so far:
df_team_mean <- df_team2 %>%
select(-contains("Perc"))
# -c(1,2) is to discard the team's name and games played:
df_team_mean_aux <- apply(df_team_mean[,-c(1,2)], 2, "/", df_team_mean$GP)
df_team_mean_aux1 <- as.data.frame(df_team_mean_aux)
df_team_mean1 <- cbind(df_team_mean[,1:2], df_team_mean_aux1)
df_team_mean2 <- df_team_mean1 %>%
mutate(FGPerc = ifelse(FGA == 0, 0, round((FG / FGA) * 100))) %>%
mutate(TwoPPerc = ifelse(TwoPA == 0, 0, round((TwoP / TwoPA) * 100))) %>%
mutate(ThreePPerc = ifelse(ThreePA == 0, 0, round((ThreeP / ThreePA) * 100))) %>%
mutate(FTPerc = ifelse(FTA == 0, 0, round((FT / FTA) * 100))) %>%
mutate(EFGPerc = ifelse(FGA == 0, 0, (FG + 0.5 * ThreeP) / FGA)) %>% # Effective Field Goal Percentage.
mutate(EFGPerc = round(EFGPerc * 100)) %>%
mutate(EFGPerc = ifelse(EFGPerc > 100, 100, EFGPerc)) %>% # FG = 1 ; Three = 1 --> (1 + 0.5 * 1) / 1 > 1
select(1:5, FGPerc, 6:7, TwoPPerc, 8:9, ThreePPerc, 10:11, FTPerc, 12:27, EFGPerc, everything())
df_team_mean2[, 3:ncol(df_team_mean2)] <- round(df_team_mean2[, 3:ncol(df_team_mean2)], 1)
# Finally, this is to compute the total and average points received by every team:
teams <- df_team_mean2$Team
df_defense <- data.frame()
for (i in teams) {
team_Game <- unique(df_games1$Game[df_games1$Team == i])
df_defense_team <- df_games1 %>%
filter(Game %in% team_Game) %>%
group_by(Team) %>%
mutate(Type = ifelse(Team == i, "Offense", "Defense")) #%>%
df_defense_team1 <- df_defense_team %>%
filter(Type == "Defense") %>%
ungroup() %>%
summarise(PTSrv = sum(PTS)) %>%
mutate(Team = i) %>%
mutate(PTSrv_mean = round(PTSrv / games_played$n[games_played$Team == i], 1))
df_defense <- bind_rows(df_defense, df_defense_team1)
}
df_team3 <- left_join(df_team2, df_defense) %>%
select(-PTSrv_mean) %>%
select(Team, GP, PTS, PTSrv, everything())
df_team_mean3 <- left_join(df_team_mean2, df_defense) %>%
select(-PTSrv) %>%
select(Team, GP, PTS, PTSrv_mean, everything()) %>%
rename(PTSrv = PTSrv_mean)
return(list(df_team_total = df_team3, df_team_mean = df_team_mean3))
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_stats_teams.R |
#' Compute ACB sub-lineups
#'
#' @aliases do_sub_lineup
#'
#' @description
#' Compute all the sub-lineups that a given team shows during a game. They can
#' be made up of four, three or two players.
#'
#' @usage
#' do_sub_lineup(data, elem_choose)
#'
#' @param data Data frame with the lineups (quintets).
#' @param elem_choose Numeric: 4, 3 or 2.
#'
#' @return
#' Data frame. Each row is a different sub-lineup. This is the meaning of the
#' columns that might not be explanatory by themselves:
#' \itemize{
#' \strong{team_in}: Time point when that sub-lineup starts playing together.
#' \strong{team_out}: Time point when that sub-lineup stops playing together
#' (because there is a substitution).
#' \strong{time_seconds}: Total of seconds that the sub-lineup played.
#' \strong{plus_minus}: Plus/minus achieved by the sub-lineup. This is the difference
#' between the game score of the previous lineup and of the current one.
#' \strong{plus_minus_poss}: Plus/minus per possession.
#' }
#'
#' @note
#' A possession lasts 24 seconds in the ACB league.
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' library(dplyr)
#' df0 <- acb_vbc_cz_pbp_2223
#'
#' day_num <- unique(acb_vbc_cz_pbp_2223$day)
#' game_code <- unique(acb_vbc_cz_pbp_2223$game_code)
#'
#' acb_games_2223_sl <- acb_vbc_cz_sl_2223 %>%
#' filter(period == "1C")
#'
#' df1 <- do_prepare_data(df0, day_num,
#' acb_games_2223_sl, acb_games_2223_info,
#' game_code)
#'
#' df2 <- do_lineup(df1, day_num, game_code, "Valencia Basket", FALSE)
#'
#' df3 <- do_sub_lineup(df2, 4)
#' #df3
#'
#' @importFrom stringr str_split
#' @importFrom utils combn
#'
#' @export
do_sub_lineup <- function(data, elem_choose) {
num_players <- diff_points <- lineup_type <- NULL
if (elem_choose == 4) {
tsl <- "quartet"
}else if (elem_choose == 3) {
tsl <- "trio"
}else if (elem_choose == 2) {
tsl <- "duo"
}
data_res <- data.frame()
for (i in 1:nrow(data)) {
lineup_i <- unlist(str_split(data$lineup[i], ", "))
lineup_sub <- combn(lineup_i, elem_choose, simplify = FALSE)
for (j in 1:length(lineup_sub)) {
data_save <- data[i,]
data_save$lineup <- paste(lineup_sub[[j]], collapse = ", ")
data_save$lineup_type <- tsl
data_save <- data_save %>%
select(-num_players, -diff_points) %>%
mutate(lineup_type = as.character(lineup_type))
data_res <- bind_rows(data_res, data_save)
}
}
return(data_res)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_sub_lineup.R |
#' Check if timeouts resulted in scoring
#'
#' @aliases do_time_out_success
#'
#' @description
#' For each team, locate the position of timeouts and check if they
#' resulted in scoring points.
#'
#' @usage
#' do_time_out_success(data, day_num, game_code, team_sel, verbose)
#'
#' @param data Prepared data from a given game.
#' @param day_num Day number.
#' @param game_code Game code.
#' @param team_sel One of the teams' names involved in the game.
#' @param verbose Logical. Decide if information of the computations
#' must be provided or not.
#'
#' @return
#' Data frame. This is the meaning of the columns:
#' \itemize{
#' \strong{day}: Day number.
#' \strong{game_code}: Game code.
#' \strong{team}: Name of the corresponding team and coach.
#' \strong{times_out_requested}: Number of timeouts requested in the game.
#' \strong{times_out_successful}: Number of timeouts that resulted in scoring.
#' \strong{times_out_successful_perc}: Percentage of successful timeouts.
#' \strong{points_scored}: Total of points achieved after the timeouts.
#' }
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{do_prepare_data_to}}
#'
#' @examples
#' df0 <- acb_vbc_cz_pbp_2223
#'
#' day_num <- unique(acb_vbc_cz_pbp_2223$day)
#' game_code <- unique(acb_vbc_cz_pbp_2223$game_code)
#'
#' df1 <- do_prepare_data_to(df0, TRUE, acb_games_2223_info, acb_games_2223_coach)
#'
#' # sort(unique(df1$team))
#' # "Casademont Zaragoza_Porfirio Fisac" "Valencia Basket_Alex Mumbru"
#'
#' df2 <- do_time_out_success(df1, day_num, game_code,
#' "Casademont Zaragoza_Porfirio Fisac", FALSE)
#' #df2
#'
#' @importFrom dplyr pull
#'
#' @export
do_time_out_success <- function(data, day_num, game_code, team_sel, verbose) {
period <- time_point <- NULL
# Locate the position of time outs:
to_pos_aux <- which(data$player == team_sel & data$action == "Tiempo Muerto")
# Case when the time out is at the last row of the data, for example Joventut in 8/103315:
lr <- which(to_pos_aux == nrow(data))
if (length(lr) != 0) {
to_pos_aux <- to_pos_aux[-which(to_pos_aux == nrow(data))]
}
# Case of no time out requested, for example Unicaja in 6/103297:
if (length(to_pos_aux) == 0) {
data_res <- tibble(day = day_num,
game_code = game_code,
team = team_sel,
times_out_requested = 0,
times_out_successful = NA,
times_out_successful_perc = NA,
points_scored = NA)
}else{
# To delete duplicated time outs, see for example the fourth period 00:08 from 103346.
to_pos_dup <- data[to_pos_aux,] %>%
mutate(to_pos_aux = to_pos_aux) %>%
arrange(desc(to_pos_aux)) %>%
distinct(period, time_point, .keep_all = TRUE) %>%
pull(to_pos_aux)
if (length(to_pos_dup) < length(to_pos_aux)) {
to_pos <- rev(to_pos_dup)
}else{
to_pos <- to_pos_aux
}
points_scored <- 0 # To accumulate the total points scored after asking for a time out.
times_succ <- 0 # To accumulate the times that a time out generated some points, i.e., was successful.
for (i in 1:length(to_pos)) {
to_pos_after <- c()
# In the following loop, :nrow(data) is just for closing the set of rows to consider.
# This especially works for the case when the time out is close to the end, see for
# example row 527 of 14/103384.
for (j in (to_pos[i] + 1):nrow(data)) {
if (verbose) cat("ROW: ", j, "\n")
# This first "if" is to avoid cases where actions are from different periods
if (data$period[j] == data$period[j - 1]) {
if (data$team[j] == team_sel) {
to_pos_after <- c(to_pos_after, j)
}else if (data$team[j] != team_sel & grepl("Falta Personal|T\\u00e9cnica",
data$action[j])) {
to_pos_after <- c(to_pos_after, j)
}else{
break()
}
}else{
break()
}
}
to_pos_after_points <- data[to_pos_after, "points"]$points
to_pos_after_points_nona <- to_pos_after_points[!is.na(to_pos_after_points)]
if (length(to_pos_after_points_nona) != 0) {
# sum() just in case there were more than action with points, such as foul plus free throw.
points_scored <- points_scored + sum(to_pos_after_points_nona)
times_succ <- times_succ + 1
}
}
times_succ_perc <- round((times_succ / length(to_pos)) * 100, 2)
data_res <- tibble(day = day_num,
game_code = game_code,
team = team_sel,
times_out_requested = length(to_pos),
times_out_successful = times_succ,
times_out_successful_perc = times_succ_perc,
points_scored = points_scored)
}
return(data_res)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/do_time_out_success.R |
#' Barplots with monthly stats
#'
#' @aliases get_barplot_monthly_stats
#'
#' @description
#' In all the available basketball websites, the stats are presented for the whole
#' number of games played. This function represents a barplot with the players' stats
#' for each month, which is very useful to analyse the players' evolution.
#'
#' @usage
#' get_barplot_monthly_stats(df_stats, title, size_text = 2.5)
#'
#' @param df_stats Data frame with the statistics.
#' @param title Plot title.
#' @param size_text Label size for each bar. Default 2.5.
#'
#' @return
#' Graphical device.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{capit_two_words}}
#'
#' @examples
#' \dontrun{
#' library(dplyr)
#' compet <- "ACB"
#' df <- do_join_games_bio(compet, acb_games_1718, acb_players_1718)
#' df1 <- do_add_adv_stats(df)
#'
#' months <- c(df %>% distinct(Month))$Month
#' months_order <- c("September", "October", "November", "December",
#' "January", "February", "March", "April", "May", "June")
#' months_plot <- match(months_order, months)
#' months_plot1 <- months_plot[!is.na(months_plot)]
#' months_plot2 <- months[months_plot1]
#'
#' df3_m <- df1 %>%
#' filter(Team == "Real_Madrid",
#' Player.x == "Doncic, Luka") %>%
#' group_by(Month) %>%
#' do(do_stats(., "Average", "2017-2018", "ACB", "Regular Season")) %>%
#' ungroup() %>%
#' mutate(Month = factor(Month, levels = months_plot2)) %>%
#' arrange(Month)
#'
#' stats <- c("GP", "MP", "PTS", "FGA", "FGPerc", "ThreePA",
#' "ThreePPerc", "FTA", "FTPerc",
#' "TRB", "ORB", "AST", "TOV", "STL")
#'
#' df3_m1 <- df3_m %>%
#' select(1:5, stats, 46:50)
#' get_barplot_monthly_stats(df3_m1, paste("; ACB", "2017-2018", "Average", sep = " ; "),
#' 2.5)
#'
#' # For all teams and players:
#' teams <- as.character(sort(unique(df1$Team)))
#' df3_m <- df1 %>%
#' filter(Team == teams[13]) %>%
#' group_by(Month) %>%
#' do(do_stats(., "Average", "2017-2018", "ACB", "Regular Season")) %>%
#' ungroup() %>%
#' mutate(Month = factor(Month, levels = months_plot2)) %>%
#' arrange(Month)
#'
#' df3_m1 <- df3_m %>%
#' select(1:5, stats, 46:50)
#'
#' for (i in unique(df3_m1$Name)) {
#' print(i)
#' print(get_barplot_monthly_stats(df3_m1 %>% filter(Name == i),
#' paste(" ; ACB", "2017-2018", "Average", sep = " ; "),
#' 2.5))
#' }
#' }
#'
#' @importFrom ggplot2 geom_bar facet_grid label_wrap_gen ylim coord_flip
#' @importFrom stats reformulate
#'
#' @export
get_barplot_monthly_stats <- function(df_stats, title, size_text = 2.5){
Team <- Name <- CombinID <- Season <- Compet <- NULL
Type_season <- Type_stats <- variable <- value <- NULL
df_stats1 <- df_stats %>%
#filter(Team %in% team, Name == player) %>% # Team %in% team instead Team == team because some players
# have played for different teams in the same season, e.g., Pedro Llompart played for Valencia and Tenerife
# in season 2017-2018.
select(-c(CombinID, Position, Season, Compet, Type_season, Type_stats))
# Order the months:
#df_stats1$Month <- factor(df_stats1$Month,
# levels = c("September", "October", "November", "December",
# "January", "February", "March", "April", "May"))
df_stats2 <- melt(df_stats1)
# Order the stats:
df_stats2$variable <- factor(df_stats2$variable,
levels = rev(levels(df_stats2$variable)))
#gg <- ggplot(df_stats2, aes(variable, value)) +
# geom_bar(stat = "identity", color = "black", fill = "white") +
# # reformulate works fine to pass string to facet_grid.
# # label_wrap_gen to split facet title in several lines.
# facet_wrap(reformulate("Month"), labeller = label_wrap_gen(width = 1), nrow = nrow_facet) +
# #ylim(min(df_stats2$value), max(df_stats2$value)) +
# geom_text(aes(label = value), hjust = -0.2, size = size_text, color = "red") +
# coord_flip() +
# #ggtitle(paste(paste(team, collapse = " and "), player, title, sep = "; ")) +
# ggtitle(paste(unique(df_stats2$Team), ";", unique(df_stats2$Name), title, sep = " ")) +
# theme(axis.title.x = element_blank(),
# axis.title.y = element_blank(),
# axis.text.x = element_text(size = 7),
# strip.text = element_text(size = 20))
gg <- ggplot(df_stats2, aes(variable, value)) +
geom_bar(stat = "identity", color = "black", fill = "white") +
facet_grid(Month~Name+Team, labeller = label_wrap_gen(width = 1)) +
geom_text(aes(label = value), hjust = -0.2, size = size_text, color = "red") +
coord_flip() +
ggtitle(title) +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text.x = element_text(size = 7),
strip.text = element_text(size = 20))
return(gg)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/get_barplot_monthly_stats.R |
#' Basketball bubble plot
#'
#' @aliases get_bubble_plot
#'
#' @description
#' This plot is a representation of the percentiles of all statistics
#' for a particular player. The figure shows four cells. The first box
#' contains the percentiles between 0 and 24. The second, between 25 and 49.
#' The third, between 50 and 74 and the fourth, between 75 and 100. The
#' percentiles are computed with the function
#' \code{\link[Anthropometry]{percentilsArchetypoid}}.
#' Boxes of the same percentile category are in the same color in the interests
#' of easy understanding.
#'
#' This type of visualization allows the user to analyze each player in a very
#' simple way, since a general idea of those aspects of the game in which the
#' player excels can be obtained.
#'
#' @usage get_bubble_plot(df_stats, player, descr_stats, size_text, size_text_x, size_legend)
#'
#' @param df_stats Data frame with the statistics.
#' @param player Player.
#' @param descr_stats Description of the statistics for the legend.
#' @param size_text Text size inside each box.
#' @param size_text_x Stats labels size.
#' @param size_legend Legend size.
#'
#' @return
#' Graphical device.
#'
#' @author
#' This function has been created using the code from this website:
#' \url{https://www.r-bloggers.com/2017/01/visualizing-the-best/}.
#'
#' @details
#' In the example shown below, it can be seen that Alberto Abalde has a percentile of
#' x in free throws percentage. This means that the x percent of league players has a fewer
#' percentage than him, while there is a (100-x) percent who has a bigger percentage.
#'
#' @seealso
#' \code{\link[Anthropometry]{percentilsArchetypoid}}
#'
#' @examples
#' \dontrun{
#' compet <- "ACB"
#' df <- do_join_games_bio(compet, acb_games_1718, acb_players_1718)
#' df1 <- do_add_adv_stats(df)
#' df2 <- do_stats(df1, "Total", "2017-2018", compet, "Regular Season")
#' # When choosing a subset of stats, follow the order in which they appear
#' # in the data frame.
#' stats <- c("GP", "MP", "PTS", "FGA", "FGPerc", "ThreePA", "ThreePPerc",
#' "FTA", "FTPerc", "TRB", "ORB", "AST", "STL", "TOV")
#' df2_1 <- df2[, c(1:5, which(colnames(df2) %in% stats), 46:49)]
#' descr_stats <- c("Games played", "Minutes played", "Points",
#' "Field goals attempted", "Field goals percentage",
#' "3-point field goals attempted", "3-point percentage",
#' "FTA: Free throws attempted", "Free throws percentage",
#' "Total rebounds", "Offensive rebounds",
#' "Assists", "Steals", "Turnovers")
#' get_bubble_plot(df2_1, "Abalde, Alberto", descr_stats, 6, 10, 12)
#' }
#'
#' @importFrom Anthropometry percentilsArchetypoid
#' @importFrom ggplot2 geom_col geom_vline geom_hline coord_polar scale_color_grey
#' @importFrom ggplot2 scale_y_continuous element_rect scale_fill_manual guides
#' @importFrom grid unit
#'
#' @export
get_bubble_plot <- function(df_stats, player, descr_stats, size_text, size_text_x, size_legend){
Name <- Team <- CombinID <- Position <- Nationality <- NULL
Season <- Compet <- Type_season <- Type_stats <- NULL
stat <- outof4 <- percentile <- descr <- outof4_f <- NULL
df_stats1 <- df_stats %>%
ungroup() %>%
select(-c(Name, Team, CombinID, Position, Nationality,
Season, Compet, Type_season, Type_stats))
percs <- sapply(1:dim(df_stats1)[2], percentilsArchetypoid,
which(df_stats$Name == player), df_stats1, 0)
df_cp <- data.frame(player = player,
percentile = percs,
stat = colnames(df_stats1),
outof4 = ifelse(percs == 100, 4, percs %/% 25 + 1))
df_cp$descr <- descr_stats
# Order is to get the stats legend in the same order as they are displayed in the circular plot.
#df_cp <- df_cp[order(df_cp$stat),]
df_cp <- df_cp[do.call("order", c(df_cp["stat"], list(decreasing = FALSE))),]
labs <- with(df_cp, paste(stat, descr, sep = ": "))
percs_player <- sort(unique(df_cp$outof4))
labels_plot <- c("0-24", "25-49", "50-74", "75-100")
df_cp$outof4_f <- factor(df_cp$outof4, labels = labels_plot[percs_player])
cols_ggplot <- c("#F8766D", "#7CAE00", "#00BFC4", "#C77CFF")
gg <- ggplot(df_cp, aes(x = stat, y = outof4, col = descr, fill = outof4_f)) +
geom_col(alpha = 0.5, width = 1, color = "white") +
scale_color_grey(labels = labs, end = 0) +
scale_fill_manual(values = cols_ggplot[percs_player]) +
geom_hline(yintercept = seq(0, 4, by = 1),
colour = "#949494", size = 0.5, lty = 3) + #949494 is dark grey.
geom_vline(xintercept = seq(0.5, nrow(df_cp), 1),
colour = "#949494", size = 0.4, lty = 1) +
facet_wrap(~player) +
coord_polar() +
scale_y_continuous(limits = c(0, 4), breaks = c(1, 2, 3, 4)) +
labs(x = "", y = "", fill = "Percentiles", col = "") +#, col = "Statistics") +
geom_text(aes(label = percentile), nudge_y = -0.2, size = size_text) +
geom_point(size = 0) +
guides(colour = guide_legend(override.aes = list(size = 5))) +
theme(panel.background = element_rect(fill = "#FFFFFF"), # plot with white (#FFFFFF) background.
strip.background = element_rect(fill = "#FFFFFF"), # title with white background.
#strip.text = element_text(size = 18), # text label
axis.ticks = element_blank(),
axis.text.y = element_blank(),
axis.text.x = element_text(size = size_text_x),
panel.spacing = unit(20, "lines"),
legend.text = element_text(size = size_legend))
return(gg)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/get_bubble_plot.R |
#' Four factors plot
#'
#' @aliases get_four_factors_plot
#'
#' @description
#' Once computed the team's factors and its rankings with
#' \code{\link{do_four_factors_df}}, this function represents them.
#'
#' @usage get_four_factors_plot(df_rank, df_no_rank, team, language)
#'
#' @param df_rank Data frame with the team's offense and
#' defense four factors and its ranking labels.
#' @param df_no_rank Data frame with the team's offense and
#' defense four factors.
#' @param team Team name. Multiple teams can be chosen.
#' @param language Language labels. Current options are 'en' for English
#' and 'es' for Spanish.
#'
#' @return
#' Graphical device.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{do_four_factors_df}}
#'
#' @examples
#' \dontrun{
#' df <- do_join_games_bio("ACB", acb_games_1718, acb_players_1718)
#' df1 <- do_add_adv_stats(df)
#' team <- "Valencia"
#' df_four_factors <- do_four_factors_df(df1, team)
#' # If only one team is represented the ranking between parentheses is just one.
#' get_four_factors_plot(df_four_factors$df_rank,
#' df_four_factors$df_no_rank, team, "en")
#' }
#'
#' @importFrom ggplot2 facet_wrap labs scale_color_grey guides
#'
#' @export
get_four_factors_plot <- function(df_rank, df_no_rank, team, language) {
Team <- Type <- value <- variable <- descr <- NULL
# Set the same order by teams' name:
df_rank <- df_rank %>%
arrange(Team)
df_no_rank <- df_no_rank %>%
arrange(Team)
# Data frame with ranking:
df_rank1 <- df_rank %>%
filter(Team %in% team) #%>%
#select(-Team)
df_rank2 <- melt(df_rank1, id = c("Team", "Type"))
levels(df_rank2$variable) <- c("EFG%", "TOV%", "ORB%", "FTR")
# Data frame without ranking:
df_no_rank1 <- df_no_rank %>%
filter(Team %in% team) #%>%
#select(-Team)
df_no_rank2 <- melt(df_no_rank1, id = c("Team", "Type"))
levels(df_no_rank2$variable) <- c("EFG%", "TOV%", "ORB%", "FTR")
#df_no_rank21 <- df_no_rank2[order(df_no_rank2$Type),]
#df_rank21 <- df_rank2[order(df_rank2$Type),]
df_no_rank21 <- df_no_rank2[do.call("order", c(df_no_rank2["Type"], list(decreasing = FALSE))),]
df_rank21 <- df_rank2[do.call("order", c(df_rank2["Type"], list(decreasing = FALSE))),]
if (language == "en") {
descr_stats <- rep(c(rep("Effective field goal percentage", length(team)),
rep("Turnover percentage", length(team)),
rep("Offensive rebound percentage", length(team)),
rep("Free throws per field goal attempted", length(team))), 2)
subtitle_plot <- "Team ranking for each factor between parentheses"
}else if (language == "es") {
descr_stats <- rep(c(rep("Porcentaje efectivo en tiros de campo", length(team)),
rep("Porcentaje de balones perdidos", length(team)),
rep("Porcentaje de rebotes ofensivos", length(team)),
rep("Tiros libres anotados por cada tiro de campo intentado", length(team))), 2)
subtitle_plot <- "Ranking del equipo en cada factor entre parentesis"
df_no_rank21$Type <- factor(df_no_rank21$Type)
levels(df_no_rank21$Type) <- c("Defensa", "Ataque")
}
df_no_rank21$descr <- descr_stats
labs <- with(df_no_rank21, paste(variable, descr, sep = ": "))
if (length(team) <= 2) {
size_text <- 7
axis_text <- 15
}else if (length(team) == 3){
size_text <- 4
axis_text <- 15
}else if (length(team) == 4){
size_text <- 3
axis_text <- 12
}else{
size_text <- 3
axis_text <- 10
}
gg <- ggplot(df_no_rank21, aes(x = variable, y = value, col = descr, fill = Type)) +
facet_grid(Type~Team, scales = "free_y") +
scale_color_grey(labels = unique(labs), end = 0) +
geom_bar(stat = "identity") +
geom_point(size = 0) +
guides(colour = guide_legend(override.aes = list(size = 10))) +
labs(x = "", y = "", fill = "", col = "", subtitle = subtitle_plot) +
ylim(c(0, max(df_no_rank21$value) + 10)) +
geom_text(aes(label = df_rank21$value), vjust = -1, size = size_text) +
theme(legend.title = element_blank(),
legend.text = element_text(size = 12),
strip.text = element_text(size = 18),
axis.text = element_text(size = axis_text))
return(gg)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/get_four_factors_plot.R |
#' Get all games and rosters
#'
#' @aliases get_games_rosters
#'
#' @description
#' This function is to get all the games and rosters of the
#' competition selected.
#'
#' @usage get_games_rosters(competition, type_league, nums, verbose = TRUE,
#' accents = FALSE, r_user, df0, df_bio0)
#'
#' @param competition String. Options are "ACB", "Euroleague" and "Eurocup".
#' @param type_league String. If \code{competition} is ACB, to scrape
#' ACB league games ("ACB"), Copa del Rey games ("CREY") or Supercopa games ("SCOPA").
#' @param nums Numbers corresponding to the website from which scraping.
#' @param verbose Should R report information on progress? Default TRUE.
#' @param accents If \code{competition} is ACB, should we keep the Spanish accents?
#' The recommended option is to remove them, so default FALSE.
#' @param r_user Email to identify the user when doing web scraping.
#' This is a polite way to do web scraping and to certify that the user
#' is working as transparently as possible with a research purpose.
#' @param df0 Data frame to save the games data.
#' @param df_bio0 Data frame to save the rosters data.
#'
#' @return
#' Data frame.
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' \dontrun{
#' library(readr)
#' # 1. The first time, all the historical data until the last games played can be
#' # directly scraped.
#'
#' # ACB seasons available and corresponding games numbers:
#' acb_nums <- list(30001:30257, 31001:31262, 32001:32264, 33001:33492, 34001:34487,
#' 35001:35494, 36001:36498, 37001:37401, 38001:38347, 39001:39417,
#' 40001:40415, 41001:41351, 42001:42350, 43001:43339, 44001:44341,
#' 45001:45339, 46001:46339, 47001:47339, 48001:48341, 49001:49341,
#' 50001:50339, 51001:51340, 52001:52327, 53001:53294, 54001:54331,
#' 55001:55331, 56001:56333, 57001:57333, 58001:58332, 59001:59331,
#' 60001:60332, 61001:61298,
#' 62001:62135)
#' names(acb_nums) <- paste(as.character(1985:2017), as.character(1986:2018), sep = "-")
#'
#' df0 <- data.frame()
#' df_bio0 <- data.frame(CombinID = NA, Player = NA, Position = NA,
#' Height = NA, Date_birth = NA,
#' Nationality = NA, Licence = NA, Website_player = NA)
#'
#' # All the games and players:
#' get_data <- get_games_rosters(competition = "ACB", type_league = "ACB",
#' nums = acb_nums, verbose = TRUE, accents = FALSE,
#' r_user = "[email protected]",
#' df0 = df0, df_bio0 = df_bio0)
#' acb_games <- get_data$df0
#' acb_players <- get_data$df_bio0
#' write_csv(acb_games, path = "acb_games.csv")
#' write_csv(acb_players, path = "acb_players.csv")
#'
#' # 2. Then, in order to scrape new games as they are played, the df0 and df_bio0 objects are
#' # the historical games and rosters:
#' acb_nums <- list(62136:62153)
#' names(acb_nums) <- "2017-2018"
#' df0 <- read_csv("acb_games.csv", guess_max = 1e5)
#' df_bio0 <- read_csv("acb_players.csv", guess_max = 1e3)
#' get_data <- get_games_rosters(competition = "ACB", type_league = "ACB",
#' nums = acb_nums, verbose = TRUE, accents = FALSE,
#' r_user = "[email protected]",
#' df0 = df0, df_bio0 = df_bio0)
#'
#' # -----
#'
#' # ACB Copa del Rey seasons available and corresponding games numbers (rosters were
#' already downloaded with the ACB league):
#' acb_crey_nums <- list(50001:50004, 51001:51007, 52001:52007, 53033:53039,
#' 54033:54039, 55033:55040, 56033:56040, 57029:57036,
#' 58025:58032, 59038:59045, 60001:60008, 61001:61007,
#' 62001:62007, 63001:63007, 64001:64007, 65001:65007,
#' 66001:66007, 67001:67007, 68001:68007, 69001:69007,
#' 70001:70007, 71001:71007, 72001:72007, 73001:73007,
#' 74001:74007, 75001:75007, 76001:76007, 77001:77007,
#' 78001:78007, 79001:79007, 80001:80007, 81001:81007)
#' names(acb_crey_nums) <- paste(as.character(1985:2016), as.character(1986:2017), sep = "-")
#'
#' df0 <- data.frame()
#' get_data <- get_games_rosters(competition = "ACB", type_league = "CREY",
#' nums = acb_crey_nums, verbose = TRUE, accents = FALSE,
#' r_user = "[email protected]",
#' df0 = df0, df_bio0 = NULL)
#' acb_crey_games <- get_data$df0
#' write_csv(acb_crey_games, path = "acb_crey_games.csv")
#'
#' # -----
#'
#' # ACB Supercopa seasons available and corresponding games numbers (rosters were
#' already downloaded with the ACB league):
#' acb_scopa_nums <- list(1001, 2001, 3001, 4001, 5001:5004, 6001:6004,
#' 7001:7003, 9001:9003, 10001:10003, 11001:11003,
#' 12001:12003, 13001:13003, 14001:14003, 15001:15003,
#' 16001:16003, 17001:17003, 18001:18003, 19001:19003)
#' # I haven't found the data for the supercopa in Bilbao 2007 ; 8001:8003
#' # http://www.acb.com/fichas/SCOPA8001.php
#' names(acb_scopa_nums) <- c(paste(as.character(1984:1987), as.character(1985:1988), sep = "-"),
#' paste(as.character(2004:2006), as.character(2005:2007), sep = "-"),
#' paste(as.character(2008:2018), as.character(2009:2019), sep = "-"))
#'
#' df0 <- data.frame()
#' get_data <- get_games_rosters(competition = "ACB", type_league = "SCOPA",
#' nums = acb_scopa_nums, verbose = TRUE, accents = FALSE,
#' r_user = "[email protected]",
#' df0 = df0, df_bio0 = NULL)
#' acb_scopa_games <- get_data$df0
#' write_csv(acb_scopa_games, path = "acb_scopa_games.csv")
#'
#' # -----
#'
#' # Euroleague seasons available and corresponding games numbers:
#' euroleague_nums <- list(1:128,
#' 1:263, 1:250, 1:251, 1:253, 1:253, 1:188, 1:189,
#' 1:188, 1:188, 1:231, 1:231, 1:231, 1:229, 1:220,
#' 1:220, 1:275, 1:169)
#' names(euroleague_nums) <- 2017:2000
#'
#' df0 <- data.frame()
#' df_bio0 <- data.frame(CombinID = NA, Player = NA, Position = NA,
#' Height = NA, Date_birth = NA,
#' Nationality = NA, Website_player = NA)
#' get_data <- get_games_rosters(competition = "Euroleague", nums = euroleague_nums,
#' verbose = TRUE, r_user = "[email protected]",
#' df0 = df0, df_bio0 = df_bio0)
#' euroleague_games <- get_data$df0
#' euroleague_players <- get_data$df_bio0
#' write_csv(euroleague_games, path = "euroleague_games.csv")
#' write_csv(euroleague_players, path = "euroleague_players.csv")
#'
#' # -----
#'
#' # Eurocup seasons available and corresponding games numbers:
#' eurocup_nums <- list(1:128,
#' 2:186, 1:306, 1:306, 1:366, 1:157, 1:156, 1:156, 1:156,
#' 1:151, 1:326, 1:149, 1:149, 1:239, 1:209, 1:150)
#' names(eurocup_nums) <- 2017:2002
#'
#' df0 <- data.frame()
#' df_bio0 <- data.frame(CombinID = NA, Player = NA, Position = NA,
#' Height = NA, Date_birth = NA,
#' Nationality = NA, Website_player = NA)
#' get_data <- get_games_rosters(competition = "Eurocup", nums = eurocup_nums,
#' verbose = TRUE, r_user = "[email protected]",
#' df0 = df0, df_bio0 = df_bio0)
#' eurocup_games <- get_data$df0
#' eurocup_players <- get_data$df_bio0
#' write_csv(eurocup_games, path = "eurocup_games.csv")
#' write_csv(eurocup_players, path = "eurocup_players.csv")
#'
#' }
#'
#' @export
get_games_rosters <- function(competition, type_league, nums, verbose = TRUE,
accents = FALSE, r_user, df0, df_bio0){
if (competition == "ACB") {
for (i in 1:length(nums)) {
if (verbose){
print(names(nums)[i])
}
df1 <- do_scraping_games(competition = competition, type_league = type_league,
nums = nums[[i]], year = names(nums)[i],
verbose = verbose, accents = accents, r_user = r_user)
df0 <- rbind(df0, df1)
#if (type_league == "ACB") {
pcode <- setdiff(df0$CombinID, df_bio0$CombinID)
pcode1 <- pcode[pcode != 0 & !is.na(pcode)]
if (verbose) {
print(length(pcode1))
}
df_bio1 <- do_scraping_rosters(competition = competition, pcode = pcode1,
verbose = verbose, accents = accents, r_user = r_user)
df_bio0 <- rbind(df_bio0, df_bio1)
#}
if (verbose) {
print("Done!")
}
}
}
if (competition %in% c("Euroleague", "Eurocup") ) {
for (i in 1:length(nums)) {
if (verbose){
print(names(nums)[i])
}
df1 <- do_scraping_games(competition = competition, nums = nums[[i]],
year = names(nums)[i],
verbose = verbose, r_user = r_user)
df0 <- rbind(df0, df1)
pcode <- setdiff(df0$CombinID, df_bio0$CombinID)
pcode1 <- pcode[pcode != 0 & !is.na(pcode)]
print(length(pcode1))
df_bio1 <- do_scraping_rosters(competition = competition, pcode = pcode1,
year = names(nums)[i],
verbose = verbose, r_user = r_user)
df_bio0 <- rbind(df_bio0, df_bio1)
if (verbose) {
print("Done!")
}
}
}
if (!competition %in% c("ACB", "Euroleague", "Eurocup")) {
print("This competition is not available.")
}
return(list(df0 = df0, df_bio0 = df_bio0))
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/get_games_rosters.R |
#' Basketball heatmap
#'
#' @aliases get_heatmap_bb
#'
#' @description
#' The heatmap created with this function allows the user to easily represent
#' the stats for each player. The more intense the color, the more the player
#' highlights in the statistic considered. The plot can be ordered by any
#' statistic. If all the statistics are represented, the offensive statistics are
#' grouped in red, the defensive in green, the rest in purple and the advanced in pink.
#' Otherwise, the default color is red.
#'
#' @usage
#' get_heatmap_bb(df_stats, team, levels_stats = NULL, stat_ord, base_size = 9, title)
#'
#' @param df_stats Data frame with the statistics.
#' @param team Team.
#' @param levels_stats Statistics classified in several categories to plot.
#' If this is NULL, all the statistics are included in the data frame. Otherwise,
#' the user can define a vector with the variables to represent.
#' @param stat_ord To sort the heatmap on one particular statistic.
#' @param base_size Sets the font size in the theme used. Default 9.
#' @param title Plot title.
#'
#' @return
#' Graphical device.
#'
#' @author
#' This function has been created using the code from these websites:
#' \url{https://learnr.wordpress.com/2010/01/26/ggplot2-quick-heatmap-plotting/} and
#' \url{https://stackoverflow.com/questions/13016022/ggplot2-heatmaps-using-different-gradients-for-categories/13016912}
#'
#' @examples
#' \dontrun{
#' compet <- "ACB"
#' df <- do_join_games_bio(compet, acb_games_1718, acb_players_1718)
#' df1 <- do_add_adv_stats(df)
#' df2 <- do_stats(df1, "Total", "2017-2018", compet, "Regular Season")
#' teams <- as.character(rev(sort(unique(df2$Team))))
#' get_heatmap_bb(df2, teams[6], NULL, "MP", 9, paste(compet, "2017-2018", "Total", sep = " "))
#' }
#'
#' @importFrom plyr ddply
#' @importFrom dplyr select_if ungroup arrange desc
#' @importFrom stats reorder
#' @importFrom reshape2 melt
#' @importFrom ggplot2 ggplot aes geom_tile scale_fill_gradientn scale_x_discrete scale_y_discrete
#' @importFrom ggplot2 element_blank element_text theme_grey theme geom_text ggtitle
#' @importFrom scales rescale
#'
#' @export
get_heatmap_bb <- function(df_stats, team, levels_stats = NULL, stat_ord, base_size = 9, title){
Team <- CombinID <- Nationality <- Season <- Compet <- NULL
Type_season <- Type_stats <- Month <- MP <- NULL
value <- Variable <- Name <- rescaleoffset <- NULL
df <- df_stats %>%
filter(Team == team) %>%
ungroup(CombinID) %>%
select(-c(Team, CombinID, Position, Nationality, Season, Compet, Type_season, Type_stats)) #%>%
#arrange(desc(MP))
df_order <- data.frame(df)
#df_order1 <- df_order[order(df_order[, stat_ord], decreasing = TRUE), ]
df_order1 <- df_order[do.call("order", c(df_order[stat_ord], list(decreasing = TRUE))), ]
if (is.null(levels_stats)) {
levels_stats <- list("Offensive" = c("PTS", "FG", "FGA", "FGPerc",
"TwoP", "TwoPA", "TwoPPerc",
"ThreeP", "ThreePA", "ThreePPerc",
"FT", "FTA", "FTPerc",
"ORB", "AST", "TOV", "Counteratt",
"BLKag", "Dunks", "PFrv"),
"Defensive" = c("DRB", "STL", "BLKfv", "PF"),
"Other" = c("GP", "GS", "MP", "TRB", "PlusMinus", "PIR"),
"Advanced" = c("GameSc", "PIE", "EFGPerc", "ThreeRate", "FRate",
"STL_TOV", "AST_TOV", "PPS", "OE"))#, "EPS"))
}else{
levels_stats <- levels_stats
df_order1 <- df_order1[, c("Name", unlist(levels_stats))]
}
is_zero <- function(x) !all(x == 0) # To remove the columns that only contain zeros.
df1 <- df_order1 %>%
select_if(is_zero) #%>%
#select(Name, unlist(levels_stats))
df.m <- melt(df1)
df.s <- ddply(df.m, ~variable, transform, rescale = scale(value))
# This is needed when some column has the same value for all the players,
# so the column rescale has NaN values.
nas_rescale <- which(is.na(df.s$rescale))
if (length(nas_rescale ) != 0) {
df.s$rescale[nas_rescale] <- 0
}
df.s$Category <- df.s$variable
levels(df.s$Category) <- levels_stats
df.s$rescaleoffset <- df.s$rescale + 100 * (as.numeric(df.s$Category) - 1)
scalerange <- range(df.s$rescale)
if (length(levels_stats) == 1) {
gradientends <- scalerange
colorends <- c("white", "red")
}else{
gradientends <- scalerange + rep(c(0, 100, 200, 300), each = 2)
colorends <- c("white", "red", "white", "green", "white", "blue", "white", "pink")
}
df.s$Variable <- reorder(df.s$variable, as.numeric(df.s$Category))
df.s$Name <- factor(df.s$Name, levels = rev(unique(df.s$Name)))
gg <- ggplot(df.s, aes(Variable, Name)) +
geom_tile(aes(fill = rescaleoffset), colour = "white") +
scale_fill_gradientn(colours = colorends, values = rescale(gradientends)) +
scale_x_discrete("", expand = c(0, 0)) +
scale_y_discrete("", expand = c(0, 0)) +
theme_grey(base_size = base_size) +
# To add the values to each tile:
geom_text(aes(label = value), size = 2.8) +
ggtitle(paste(capit_two_words(team), title, sep = " "))
if ("Offensive" %in% names(levels_stats)) {
gg <- gg +
geom_vline(xintercept = c(1.5, 4.5, 7.5, 10.5, 13.5)) +
# This is to box the columns related to all shots.
geom_segment(aes(x = 1.5, xend = 13.5, y = 0.5, yend = 0.5)) +
geom_segment(aes(x = 1.5, xend = 13.5, y = nrow(df1) + 0.5, yend = nrow(df1) + 0.5))
}
if (length(levels_stats) == 1 & any(names(levels_stats) != "Offensive")) {
gg <- gg +
theme(legend.position = "none",
axis.ticks = element_blank(),
axis.text.x = element_text(hjust = 0, size = 12))
}else{
gg <- gg +
theme(legend.position = "none",
axis.ticks = element_blank(),
axis.text.x = element_text(angle = 300, hjust = 0, size = 6))
}
return(gg)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/get_heatmap_bb.R |
#' Nationalities map
#'
#' @aliases get_map_nats
#'
#' @description
#' A world map is represented. The countries from where there are players
#' in the competition selected are in green color.
#'
#' @usage
#' get_map_nats(df_stats)
#'
#' @param df_stats Data frame with the statistics and the corrected nationalities.
#'
#' @return
#' Graphical device.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{do_map_nats}}
#'
#' @examples
#' \dontrun{
#' compet <- "ACB"
#' df <- do_join_games_bio(compet, acb_games_1718, acb_players_1718)
#' df1 <- do_add_adv_stats(df)
#' df2 <- do_stats(df1, "Total", "2017-2018", compet, "Regular Season")
#' get_map_nats(df2)
#' }
#'
#' @importFrom ggplot2 geom_polygon
#'
#' @export
get_map_nats <- function(df_stats){
long <- lat <- region <- color_region <- NULL
df_all <- do_map_nats(df_stats)$df_all
P <- ggplot() +
geom_polygon(data = df_all, aes(x = long, y = lat, group = region,
fill = color_region), colour = "white") +
theme(axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.title = element_blank(),
legend.position = "none")
return(P)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/get_map_nats.R |
#' Population pyramid
#'
#' @aliases get_pop_pyramid
#'
#' @description
#' This is the code to get a population pyramid with the number of both Spanish
#' and foreigner players along the seasons for the ACB league.
#' This aids in discussion of nationality imbalance.
#'
#' @usage get_pop_pyramid(df, title, language)
#'
#' @param df Data frame that contains the ACB players' nationality.
#' @param title Title of the plot
#' @param language String, "eng" for English labels; "esp" for Spanish labels.
#'
#' @return
#' Graphical device.
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' \dontrun{
#' # Load the data_app_acb file with the ACB games
#' # from seasons 1985-1986 to 2017-2018:
#' load(url("http://www.uv.es/vivigui/softw/data_app_acb.RData"))
#' title <- " Number of Spanish and foreign players along the ACB seasons \n Data from www.acb.com"
#' get_pop_pyramid(data_app_acb, title, "eng")
#' }
#'
#' @importFrom ggplot2 guide_legend
#' @importFrom dplyr count
#'
#' @export
get_pop_pyramid <- function(df, title, language) {
Nationality <- Nationality_1 <- NULL
Number <- Player.x <- Season <- NULL
if (language == "eng") {
cat1 <- "Spanish"
cat2 <- "Foreigner"
xlab <- "Season\n"
ylab <- "Number of players"
}else if (language == "esp") {
cat1 <- "Espana"
cat2 <- "Extranjero"
xlab <- "Temporada\n"
ylab <- "Numero de jugadores"
}else{
stop("Language must be 'eng' for English and 'esp' for Spanish.")
}
df1 <- df %>%
mutate(Nationality_1 = ifelse(Nationality == "Spain", cat1, cat2)) %>%
distinct(Player.x, Season, Nationality_1) %>%
group_by(Season) %>%
count(Nationality_1) %>%
mutate(Number = ifelse(Nationality_1 == cat2, -n, n)) %>%
#rename(Number = n) %>%
select(Season, Nationality_1, Number)
df1$Season <- as.factor(df1$Season)
g1 <- ggplot(data = df1, aes(x = Season, fill = Nationality_1, group = Nationality_1)) +
geom_bar(aes(y = Number), stat = "identity") + #, subset(df1, df1$Nationality_1 == "Spanish")) +
#geom_bar(aes(y = Number), stat = "identity", subset(df1, df1$Nationality_1 == "Foreigner")) +
geom_text(aes(y = Number, label = Number),
subset(df1, df1$Nationality_1 == cat1),
size = 4, hjust = -0.1) +
geom_text(aes(y = Number, label = Number * (-1)),
subset(df1, df1$Nationality_1 == cat2),
size = 4, hjust = -0.1) +
coord_flip() +
labs(x = xlab, y = ylab,
title = title,
fill = guide_legend(title = "")) +
scale_y_continuous(breaks = seq(-270, 270, 30), labels = abs(seq(-270, 270, 30))) +
scale_x_discrete(limits = rev(levels(df1$Season))) #+
#theme_few(base_size = 14) # @importFrom ggthemes theme_few
return(g1)
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/get_pop_pyramid.R |
#' Shooting plot
#'
#' @aliases get_shooting_plot
#'
#' @description
#' This plot represents the number of shots attempted and scored by every
#' player of the same team, together with the scoring percentage.
#' The players are sortered by percentage.
#'
#' @usage get_shooting_plot(df_stats, team, type_shot, min_att, title, language)
#'
#' @param df_stats Data frame with the statistics.
#' @param team Team.
#' @param type_shot Numeric with values 1-2-3: 1 refers to free throws,
#' 2 refers to two point shots and 3 refers to three points shots.
#' @param min_att Minimum number of attempts by the player to
#' be represented in the plot.
#' @param title Plot title.
#' @param language Language labels. Current options are 'en' for English
#' and 'es' for Spanish.
#'
#' @return
#' Graphical device.
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' \dontrun{
#' compet <- "ACB"
#' df <- do_join_games_bio(compet, acb_games_1718, acb_players_1718)
#' df1 <- do_add_adv_stats(df)
#' df2 <- do_stats(df1, "Total", "2017-2018", compet, "Regular Season")
#' get_shooting_plot(df2, "Valencia", 3, 1,
#' paste("Valencia", compet, "2017-2018", sep = " "), "en")
#' }
#'
#' @importFrom ggplot2 geom_segment theme_minimal scale_x_continuous
#'
#' @export
get_shooting_plot <- function(df_stats, team, type_shot, min_att, title, language){
Team <- Name <- FT <- FTA <- TwoP <- TwoPA <- ThreeP <- ThreePA <- NULL
total_att <- total_sco <- perc_sco <- perc_no_sco <- total_no_sco <- NULL
if (type_shot == 1) {
df1 <- df_stats %>%
ungroup() %>%
filter(Team == team) %>%
select(Name, FT, FTA) %>%
group_by(Name) %>%
summarise(total_att = sum(FTA), total_sco = sum(FT))
if (language == "en") {
color1 <- "Free throws scored"
color2 <- "Free throws missed"
}else if (language == "es") {
color1 <- "Tiros libres anotados"
color2 <- "Tiros libres fallados"
}
}else if (type_shot == 2) {
df1 <- df_stats %>%
ungroup() %>%
filter(Team == team) %>%
select(Name, TwoP, TwoPA) %>%
group_by(Name) %>%
summarise(total_att = sum(TwoPA), total_sco = sum(TwoP))
if (language == "en") {
color1 <- "Two-points scored"
color2 <- "Two-points missed"
}else if (language == "es") {
color1 <- "Tiros de dos anotados"
color2 <- "Tiros de dos fallados"
}
}else if (type_shot == 3) {
df1 <- df_stats %>%
ungroup() %>%
filter(Team == team) %>%
select(Name, ThreeP, ThreePA) %>%
group_by(Name) %>%
summarise(total_att = sum(ThreePA), total_sco = sum(ThreeP))
if (language == "en") {
color1 <- "Three-points scored"
color2 <- "Three-points missed"
}else if (language == "es") {
color1 <- "Tiros de tres anotados"
color2 <- "Tiros de tres fallados"
}
}
df_tm <- df1 %>%
mutate(total_no_sco = total_att - total_sco) %>%
mutate(perc_sco = round(ifelse(total_att == 0, 0, (total_sco / total_att) * 100), 1)) %>%
mutate(perc_no_sco = round(100 - perc_sco, 1)) %>%
filter(total_att >= min_att) %>%
arrange(perc_sco)
df_tm$Name <- factor(df_tm$Name, levels = df_tm$Name)
gg <- ggplot(df_tm) +
geom_segment(aes(0, Name, xend = perc_sco, yend = Name, color = color1),
size = 13) +
geom_segment(aes(perc_sco, Name, xend = perc_sco + perc_no_sco, yend = Name,
color = color2), size = 13) +
geom_text(aes(x = 1, y = Name, label = total_sco), hjust = 0,
nudge_x = 0.01, size = 7) +
geom_text(aes(x = 99, y = Name, label = total_no_sco), hjust = 1,
nudge_x = -0.01, size = 7) +
geom_text(aes(x = 104, y = Name, label = total_att), hjust = 1,
nudge_x = -0.01, nudge_y = 0.14, size = 5) +
geom_text(aes(x = 110, y = Name, label = perc_sco), hjust = 1,
nudge_x = -0.01, nudge_y = 0.14, size = 5) +
labs(x = NULL, y = NULL) +
scale_x_continuous(breaks = seq(0, 100, 25), labels = c("0%", "25%", "50%", "75%", "100%")) +
#@importFrom hrbrthemes scale_x_percent scale_color_ipsum
#scale_x_percent(breaks = seq(0,100,25), labels = c("0%", "25%", "50%", "75%", "100%")) +
#scale_color_ipsum(name = NULL) +
theme_minimal(base_size = 17) +
theme(#axis.text.x = element_text(hjust = c(0, 0.5, 0.5, 0.5, 1)),
legend.position = c(0.7, 1.025),
legend.direction = "horizontal",
legend.title = element_blank()) +
ggtitle(paste(capit_two_words(team), title, sep = " "))
return(gg)
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/get_shooting_plot.R |
#' Similar players to archetypoids
#'
#' @aliases get_similar_players
#'
#' @description
#' Similar players to the archetypoids computed with
#' \code{\link[Anthropometry]{archetypoids}} according to a similarity threshold.
#'
#' @usage get_similar_players(atype, threshold, alphas, cases, data, variables, compet, season)
#'
#' @param atype Number assigned to the archetypoid (1:length(\code{cases})) from which
#' searching the players who most resemble to it.
#' @param threshold Similarity threshold.
#' @param alphas Alpha values of all the players.
#' @param cases Archetypoids.
#' @param data Data frame with the statistics.
#' @param variables Statistics used to compute the archetypoids.
#' @param compet Competition.
#' @param season Season.
#'
#' @return
#' Data frame with the features of the similar players.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link[Anthropometry]{archetypoids}}
#'
#' @examples
#' (s0 <- Sys.time())
#' # Turn off temporarily some negligible warnings from the
#' # archetypes package to avoid missunderstandings. The code works well.
#' library(Anthropometry)
#' df <- do_join_games_bio("ACB", acb_games_1718, acb_players_1718)
#' df1 <- do_add_adv_stats(df)
#' df2 <- do_stats(df1, "Total", "2017-2018", "ACB", "Regular Season")
#' df3 <- df2[which(df2$Position == "Guard")[1:31], c("MP", "PTS", "Name")]
#' preproc <- preprocessing(df3[,1:2], stand = TRUE, percAccomm = 1)
#' set.seed(4321)
#' suppressWarnings(lass <- stepArchetypesRawData(preproc$data, 1:2,
#' numRep = 20, verbose = FALSE))
#' res <- archetypoids(2, preproc$data, huge = 200, step = FALSE, ArchObj = lass,
#' nearest = "cand_ns", sequ = TRUE)
#' # The S3 class of anthrCases from Anthropometry has been updated.
#' cases <- anthrCases(res)
#' df3[cases,] # https://github.com/r-quantities/units/issues/225
#' alphas <- round(res$alphas, 4)
#' df3_aux <- df2[which(df2$Position == "Guard")[1:31], ]
#' get_similar_players(1, 0.99, alphas, cases, df3_aux, c("MP", "PTS"),
#' unique(df3_aux$Compet), unique(df3_aux$Season))
#' s1 <- Sys.time() - s0
#' s1
#'
#' @export
get_similar_players <- function(atype, threshold, alphas, cases, data, variables, compet, season) {
vec <- which(alphas[atype,] > threshold)
vec <- vec[!vec == cases[atype]]
good_coef <- t(alphas[,vec])
good_dat <- data[vec, c("Name", "Position", "Team", variables, "CombinID")]
#good_dat1 <- cbind(good_dat, good_coef)
good_dat1 <- cbind(as.data.frame(good_dat), good_coef)
#good_dat2 <- good_dat1[order(good_dat1[, colnames(good_dat1) == atype], decreasing = TRUE),]
cols <- as.character(atype)
good_dat2 <- good_dat1[do.call("order", c(good_dat1[cols], list(decreasing = TRUE))),]
# I have also added as.data.frame here:
good_dat3 <- rbind(as.data.frame(data[cases[atype], c("Name", "Position", "Team",
variables, "CombinID")]),
good_dat2[, c("Name", "Position", "Team", variables, "CombinID")])
if (compet == "ACB") {
good_dat3$CombinID <- paste("http://www.acb.com/jugador.php?id=", good_dat3$CombinID, sep = "")
}else if (compet == "Euroleague") {
good_dat3$CombinID <- paste("http://www.euroleague.net/competition/players/showplayer?pcode=",
good_dat3$CombinID, "&seasoncode=E", substr(season, 1, 4), sep = "")
}else if (compet == "Eurocup") {
good_dat3$CombinID <- paste("http://www.eurocupbasketball.com/eurocup/competition/players/showplayer?pcode=",
good_dat3$CombinID, "&seasoncode=U", substr(season, 1, 4), sep = "")
}
colnames(good_dat3)[6] <- "Web_info"
return(good_dat3)
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/get_similar_players.R |
#' Similar teams to archetypoids
#'
#' @aliases get_similar_teams
#'
#' @description
#' Similar teams to the archetypoids computed with
#' \code{\link[Anthropometry]{archetypoids}} according to a similarity threshold.
#'
#' @usage get_similar_teams(atype, threshold, alphas, cases, data, variables)
#'
#' @param atype Number assigned to the archetypoid (1:length(\code{cases})) from which
#' searching the players who most resemble to it.
#' @param threshold Similarity threshold.
#' @param alphas Alpha values of all the players.
#' @param cases Archetypoids.
#' @param data Data frame with the statistics.
#' @param variables Statistics used to compute the archetypoids.
#'
#' @return
#' Data frame with the features of the similar teams.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link[Anthropometry]{archetypoids}}
#'
#' @examples
#' \dontrun{
#' (s0 <- Sys.time())
#' library(Anthropometry)
#' df <- do_join_games_bio("ACB", acb_games_1718, acb_players_1718)
#' df$Compet <- "ACB"
#' df_teams <- do_stats_teams(df, "2017-2018", "ACB", "Regular Season")
#' df_team_total <- df_teams$df_team_total
#'
#' df3 <- df_team_total[, c("PTS", "PTSrv", "Team")]
#' preproc <- preprocessing(df3[,1:2], stand = TRUE, percAccomm = 1)
#' set.seed(4321)
#' lass <- stepArchetypesRawData(preproc$data, 1:2, numRep = 20, verbose = FALSE)
#' res <- archetypoids(2, preproc$data, huge = 200, step = FALSE, ArchObj = lass,
#' nearest = "cand_ns", sequ = TRUE)
#' cases <- anthrCases(res)
#' df3[cases,]
#' alphas <- round(res$alphas, 4)
#'
#' get_similar_teams(1, 0.95, alphas, cases, df_team_total, c("PTS", "PTSrv"))
#' s1 <- Sys.time() - s0
#' s1
#' }
#'
#' @export
get_similar_teams <- function(atype, threshold, alphas, cases, data, variables) {
vec <- which(alphas[atype,] > threshold)
vec <- vec[!vec == cases[atype]]
good_coef <- t(alphas[,vec])
good_dat <- data[vec, c("Team", variables)]
#good_dat1 <- cbind(good_dat, good_coef)
good_dat1 <- cbind(as.data.frame(good_dat), good_coef)
#good_dat2 <- good_dat1[order(good_dat1[, colnames(good_dat1) == atype], decreasing = TRUE),]
cols <- as.character(atype)
good_dat2 <- good_dat1[do.call("order", c(good_dat1[cols], list(decreasing = TRUE))),]
good_dat3 <- rbind(as.data.frame(data[cases[atype], c("Team", variables)]),
good_dat2[, c("Team", variables)])
return(good_dat3)
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/get_similar_teams.R |
#' Season-by-season stats
#'
#' @aliases get_stats_seasons
#'
#' @description
#' This function represents the average values of a set of statistics
#' for certain players in every season where the players played. It gives
#' an idea of the season-by-season performance.
#'
#' @usage get_stats_seasons(df, competition, player, variabs, type_season, add_text, show_x_axis)
#'
#' @param df Data frame with the games and the players info.
#' @param competition Competition.
#' @param player Players's names.
#' @param variabs Vector with the statistics to plot.
#' @param type_season String with the round of competition, for example regular season
#' or playoffs and so on.
#' @param add_text Boolean. Should text be added to the plot points?
#' @param show_x_axis Boolean. Should x-axis labels be shown in the plot?
#'
#' @return
#' List with two elements:
#' \itemize{
#' \item gg Graphical device.
#' \item df_gg Data frame associated with the plot.
#' }
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' \dontrun{
#' competition <- "ACB"
#' df <- do_join_games_bio("ACB", acb_games_1718, acb_players_1718)
#' df$Compet <- competition
#' player <- "Carroll, Jaycee"
#' variabs <- c("GP", "MP", "PTS", "EFGPerc", "TRB", "AST", "TOV", "PIR")
#' plot_yearly <- get_stats_seasons(df, competition, player, variabs, "All", TRUE, TRUE)
#' plot_yearly$gg
#' # There are only games from the regular season in this demo data frame.
#' plot_yearly1 <- get_stats_seasons(df, competition, player, variabs, "Regular Season",
#' TRUE, TRUE)
#' plot_yearly1$gg
#' }
#'
#' @importFrom ggplot2 geom_point geom_line
#' @importFrom dplyr do
#'
#' @export
get_stats_seasons <- function(df, competition, player, variabs, type_season,
add_text, show_x_axis){
Compet <- Player.x <- Season <- Name <- Team <- NULL
Type_season <- Age <- value <- variable <- NULL
if (type_season == "All") {
df1 <- df %>%
filter(Compet == competition,
Player.x %in% player)
}else{
df1 <- df %>%
filter(Compet == competition,
Player.x %in% player,
Type_season == type_season)
}
# It might happen that the player hasn't played the competition.
if (nrow(df1) == 0) {
return(NA)
}
df2 <- df1 %>%
group_by(Player.x) %>%
do(do_add_adv_stats(.))
df_gg <- data.frame()
df_all <- data.frame()
for (i in 1:length(player)) {
df3 <- data.frame()
df21 <- df2 %>%
filter(Player.x == player[i])
for (j in rev(unique(df21$Season))) {
df2_loop <- df21 %>%
filter(Season == j)
if (type_season == "All") {
df3_loop <- do_stats(df2_loop,
"Average",
unique(df2_loop$Season),
unique(df2_loop$Compet),
"All")
}else{
df3_loop <- do_stats(df2_loop,
"Average",
unique(df2_loop$Season),
unique(df2_loop$Compet),
unique(df2_loop$Type_season))
}
df3 <- bind_rows(df3, df3_loop)
}
df4 <- df3 %>%
select(Name, Team, Season, variabs)
Date_birth <- unique(df21$Date_birth)
# Age of Player at the start of October 1st of that season.
# For instance, season 2013-2014, age at October 1st 2013.
#df4$Age <- round((as.Date(paste("1/10/", substr(df4$Season, 1, 4), sep = ""), "%d/%m/%Y") -
# as.Date(Date_birth, "%d/%m/%Y")) / 365.25, 1)
# Age of Player at the start of February 1st of that season.
# For instance, season 2013-2014, age at February 1st 2014.
if (competition == "ACB") {
year_season <- sapply(strsplit(df4$Season, "-"), `[`, 2)
}else{
year_season <- sapply(strsplit(df4$Season, "-"), `[`, 2)
year_season <- as.numeric(paste(20, year_season, sep = ""))
}
df4$Age <- trunc((as.Date(paste("1/02/", year_season, sep = ""), "%d/%m/%Y") -
as.Date(Date_birth, "%d/%m/%Y")) / 365.25)
df_gg <- rbind(df_gg, df4)
df5 <- melt(df4 %>% select(-Age))
df_all <- rbind(df_all, df5)
}
if (length(player) == 1){
size_x_axis <- 13
size_x_strip <- 17
}else{
size_x_axis <- 5
size_x_strip <- 7
}
gg1 <- ggplot(df_all, aes(x = Season, y = value, group = variable, color = variable)) +
geom_point() +
geom_line() +
facet_wrap(~Name, scales = "free") +
labs(x = "", y = "") +
theme(legend.title = element_blank(),
strip.text.x = element_text(size = size_x_strip),
axis.text.x = element_blank(),
axis.text.y = element_text(size = 15))
if (add_text) {
gg1 <- gg1 + geom_text(aes(label = value), hjust = -0.5, color = "black")
}
if (show_x_axis) {
gg1 <- gg1 + theme(axis.text.x = element_text(angle = 30, size = size_x_axis))
}
return(list(gg = gg1, df_gg = df_gg))
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/get_stats_seasons.R |
#' League cross table
#'
#' @aliases get_table_results
#'
#' @description
#' The league results are represented with a cross table.
#'
#' @usage get_table_results(df, competition, season)
#'
#' @param df Data frame with the games and the players info.
#' @param competition Competition.
#' @param season Season.
#'
#' @return
#' List with these two elements:
#' \itemize{
#' \item plot_teams Graphical device with the cross table.
#' \item wins_teams Vector with the team wins.
#' }
#'
#' @author
#' Guillermo Vinue
#'
#' @examples
#' \dontrun{
#' df <- do_join_games_bio("ACB", acb_games_1718, acb_players_1718)
#' df$Compet <- "ACB"
#'
#' gg <- get_table_results(df, "ACB", "2017-2018")
#'
#' gg$wins_teams
#' gg$plot_teams
#' }
#'
#' @importFrom ggplot2 geom_tile
#' @importFrom tidyr separate
#'
#' @export
get_table_results <- function(df, competition, season){
Compet <- Type_season <- Season <- Game <- GameRes <- Win_num <- NULL
Team <- Local <- Visitor <- Local_points <- Visitor_points <- Win <- NULL
if (competition == "DIA") {
df1 <- df %>%
filter(Type_season == "Regular_Season",
Season == season) %>%
distinct(Game, GameRes, Team)
}else{
df1 <- df %>%
filter(Compet == competition,
Type_season == "Regular Season",
Season == season) %>%
distinct(Game, GameRes, Team)
}
df11 <- df1 %>%
group_by(Game) %>%
mutate(Local = Team[1], Visitor = Team[2]) %>%
distinct(Game, GameRes, Local, Visitor)
if (competition == "ACB") {
df2 <- df11 %>%
ungroup() %>%
separate(GameRes, c("Local_points", "Visitor_points"), " - ") %>%
select(-Game)
}else{
df2 <- df11 %>%
ungroup() %>%
separate(GameRes, c("Local_points", "Visitor_points"), "-") %>%
select(-Game)
}
df2$GameRes <- df11$GameRes
df2$Local <- as.character(df2$Local)
df2$Visitor <- as.character(df2$Visitor)
df3 <- df2 %>%
mutate(Local_points = as.numeric(Local_points)) %>%
mutate(Visitor_points = as.numeric(Visitor_points)) %>%
mutate(Win = ifelse(Local_points > Visitor_points, Local, Visitor))
wins_teams <- sort(table(df3$Win), decreasing = TRUE)
df4 <- df3 %>%
select(-Local_points, -Visitor_points) %>%
arrange(Local, Visitor) %>%
mutate(Win_num = ifelse(Local == Win, 1, 2)) %>%
select(-Win)
df41 <- df4 %>%
mutate(Win_num = as.character(Win_num))
gg <- ggplot(data = df41, aes(x = Visitor, y = Local, fill = Win_num)) +
geom_tile(colour = "white",size = 0.2) +
labs(title = paste("Regular Season", competition, season, sep = " ")) +
theme(axis.ticks = element_blank(),
legend.title = element_blank(),
axis.text.x = element_text(angle = 30)) +
geom_text(aes(label = GameRes), size = 4)
return(list(plot_teams = gg, wins_teams = wins_teams))
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/get_table_results.R |
#' Join ACB games and players' info
#'
#' @aliases join_players_bio_age_acb
#'
#' @description
#' This function joins the ACB games with the players' bio
#' and computes the players' age at each game.
#'
#' @usage
#' join_players_bio_age_acb(df_games, df_rosters)
#'
#' @param df_games Data frame with the games.
#' @param df_rosters Data frame with the biography of the roster players.
#'
#' @return
#' Data frame.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{do_join_games_bio}}
#'
#' @examples
#' df <- join_players_bio_age_acb(acb_games_1718, acb_players_1718)
#'
#' @importFrom dplyr filter left_join mutate
#' @importFrom purrr map_if
#' @importFrom tidyr drop_na
#' @importFrom lubridate month
#'
#' @export
join_players_bio_age_acb <- function(df_games, df_rosters){
Date_birth <- Player.x <- CombinID <- Date <- Age <- NULL
df_bio_updated1 <- df_rosters %>%
filter(Date_birth != "-</td>") %>%
filter(Position != "|") %>%
droplevels() %>% # To drop unused levels after filtering by factor
map_if(is.factor, as.character) %>%
#as_data_frame()
as_tibble()
# Merge both data frames using column CombinID:
df_merg <- left_join(df_games, df_bio_updated1, by = "CombinID")
# Remove rows with NA (Some players such as Tavares had two CombinID in the games scraping
# but only one exists as a website, so there will be no biography data for these players
# in the games with the wrong duplicated CombinID):
df_merg1 <- df_merg %>% drop_na(Position)
# Compute player's age the day of the game:
df_merg_ages <- df_merg1 %>%
# Dividing by 365.25 is accurate enough.
mutate(Age = (as.Date(Date, "%d/%m/%Y") - as.Date(Date_birth, "%d/%m/%Y")) / 365.25) %>%
mutate(Age = round(Age, 5))
# Add month:
df_merg_ages$Month <- month(df_merg_ages$Date, label = TRUE, abbr = FALSE)
return(df_merg_ages)
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/join_players_bio_age_acb.R |
#' Join Euroleague and Eurocup games and players' info
#'
#' @aliases join_players_bio_age_euro
#'
#' @description
#' This function joins the Euroleague/Eurocup games with the players' bio
#' and computes the players' age at each game.
#'
#' @usage
#' join_players_bio_age_euro(df_games, df_rosters)
#'
#' @param df_games Data frame with the games.
#' @param df_rosters Data frame with the biography of the roster players.
#'
#' @return
#' Data frame.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{do_join_games_bio}}
#'
#' @examples
#' df <- join_players_bio_age_euro(euroleague_games_1718, euroleague_players_1718)
#'
#' @importFrom dplyr filter left_join mutate
#' @importFrom purrr map_if
#' @importFrom lubridate month
#'
#' @export
join_players_bio_age_euro <- function(df_games, df_rosters){
Nationality <- Date_birth <- Date <- Age <- NULL
df_bio_updated1 <- df_rosters %>%
filter(Nationality != "</div>") %>%
droplevels() %>% # To drop unused levels after filtering by factor
map_if(is.factor, as.character) %>%
#as_data_frame()
as_tibble()
# Merge both data frames using column CombinID
df_merg <- left_join(df_games, df_bio_updated1, by = "CombinID")
df_merg$Position[which(is.na(df_merg$Position))] <- "Unknown"
df_merg1 <- df_merg %>%
filter(!is.na(Date_birth)) %>%
filter(Date_birth != "01/01/1753")
# Compute player's age the day of the game:
df_merg_ages <- df_merg1 %>%
# Dividing by 365.25 is accurate enough.
mutate(Age = (as.Date(Date, "%d/%m/%Y") - as.Date(Date_birth, "%d/%m/%Y")) / 365.25) %>%
mutate(Age = round(Age, 5))
# Add month:
df_merg_ages$Month <- month(df_merg_ages$Date, label = TRUE, abbr = FALSE)
return(df_merg_ages)
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/join_players_bio_age_euro.R |
#' ACB player game finder data
#'
#' @aliases scraping_games_acb
#'
#' @description
#' This is the new function to obtain the ACB box score data.
#'
#' @usage
#' scraping_games_acb(code, game_id, season = "2020-2021",
#' type_season = "Regular Season",
#' user_email, user_agent_goo)
#'
#' @param code Game code.
#' @param game_id Game id.
#' @param season Season, e.g. 2022-2023.
#' @param type_season Type of season, e.g. 'Regular season'.
#' @param user_email Email's user to identify the user when doing web scraping.
#' This is a polite way to do web scraping and to certify that the user
#' is working as transparently as possible with a research purpose.
#' @param user_agent_goo User-agent to identify the user when doing web scraping.
#' This is a polite way to do web scraping and to certify that the user
#' is working as transparently as possible with a research purpose.
#'
#' @return
#' A data frame with the player game finder data (box score data).
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{scraping_games_acb_old}}
#'
#' @examples
#' \dontrun{
#' # Not needed to scrape every time the package is checked, built and installed.
#' user_email <- "yours"
#' user_agent_goo <- "yours"
#' df1 <- scraping_games_acb("103350", 1, "2022_2023", "Regular Season",
#' user_email, user_agent_goo)
#' }
#'
#' @importFrom rvest html_nodes html_text
#' @importFrom readr parse_number
#' @importFrom stringi stri_replace_all_charclass
#' @importFrom dplyr across mutate_at vars relocate
#'
#' @export
scraping_games_acb <- function(code, game_id, season = "2020-2021", type_season = "Regular Season",
user_email, user_agent_goo) {
Number <- Player <- TwoP <- ThreeP <- FT <- RB <- Team <- GameRes <- NULL
Coach <- Website <- NULL
col_stats <- c("Number", "Player", "MP", "PTS", "TwoP", "TwoPPerc",
"ThreeP", "ThreePPerc", "FT", "FTPerc", "TRB", "RB",
"AST", "STL", "TOV", "Counteratt", "BLKfv", "BLKag",
"Dunks", "PF", "PFrv", "PlusMinus", "PIR")
url_base <- "http://www.acb.com/partido/estadisticas/id/"
url_link <- paste(url_base, code, sep = "")
link_content <- GET(url_link,
user_agent(str_c(user_agent_goo, R.version$version.string, sep = ", ")),
add_headers(from = user_email))
if (link_content$status == 404) {
stop("URL not found. Please check if it exists.")
}else{
url_html <- read_html(url_link)
# Basic games' data:
url_data <- url_html %>%
html_nodes(xpath = './/div[@class="datos_fecha roboto_bold colorweb_4 float-left bg_principal"]') %>%
html_text()
url_data1 <- trimws(strsplit(url_data, "\\|")[[1]])
# Referees:
url_refs <- url_html %>%
html_nodes(xpath = './/div[@class="datos_arbitros bg_gris_claro colorweb_2 float-left roboto_light"]') %>%
html_text()
url_refs1 <- gsub(".*: ", "", url_refs)
# Result and teams involved:
url_res <- url_html %>%
html_nodes(xpath = './/h6[@class="colorweb_4 bg_azul_oscuro roboto_bold"]') %>%
html_text()
res <- paste(parse_number(url_res), collapse = " - ")
teams <- paste(gsub(paste(parse_number(url_res), collapse = "|"), "", url_res), collapse = "-")
teams1 <- stri_replace_all_charclass(teams, "\\p{WHITE_SPACE}", "")
teams_sep <- strsplit(teams1, "-")[[1]]
# GAME:
url_tab <- url_html %>%
html_table(fill = TRUE)
score <- url_tab[[1]]
score1 <- score[,-c(2,ncol(score))]
score2 <- sapply(score1, function(x) paste(x[1], x[2], sep = "-"))[-1]
# HOME:
stats_home <- url_tab[[2]]
stats_home1 <- stats_home[-1,]
colnames(stats_home1) <- stats_home[1,]
colnames(stats_home1) <- col_stats
coach_home <- stats_home1$Player[stats_home1$Number == "E"]
stats_home2 <- stats_home1 %>%
filter(!Number %in% c("E", "5f")) %>% # "",
filter(Player != "Total")
# Change the blank cells to 0:
stats_home3 <- stats_home2 %>%
mutate(across(everything(), ~ifelse(.== "", 0, as.character(.)))) %>%
mutate(Team = teams_sep[1],
Coach = coach_home)
# AWAY:
stats_away <- url_tab[[3]]
stats_away1 <- stats_away[-1,]
colnames(stats_away1) <- stats_away[1,]
colnames(stats_away1) <- col_stats
coach_away <- stats_away1$Player[stats_away1$Number == "E"]
stats_away2 <- stats_away1 %>%
filter(!Number %in% c("E", "5f")) %>% # "",
filter(Player != "Total")
# Change the blank cells to 0:
stats_away3 <- stats_away2 %>%
mutate(across(everything(), ~ifelse(.== "", 0, as.character(.)))) %>%
mutate(Team = teams_sep[2],
Coach = coach_away)
# Join home and away
stats_game <- rbind(stats_home3, stats_away3) %>%
mutate(GS = ifelse(grepl("\\*", Number), 1, 0), .after = Number) %>%
separate(TwoP, c("TwoP", "TwoPA"), sep = "/", fill = "left") %>%
separate(ThreeP, c("ThreeP", "ThreePA"), sep = "/", fill = "left") %>%
separate(FT, c("FT", "FTA"), sep = "/", fill = "left") %>%
separate(RB, c("DRB", "ORB"), sep = "\\+", fill = "left") %>%
mutate_at(vars(contains("Perc")), ~gsub("%", "", .)) %>%
mutate(Season = season,
Type_season = type_season,
Day = gsub("JORNADA ", "", url_data1[1]),
Date = url_data1[2],
Game = tolower(gsub("-", " - ", teams1)),
GameRes = res,
GameID = game_id,
Website = url_link) %>%
relocate(Team, .after = GameRes) %>%
mutate(Periods = paste(score2, collapse = " ; "),
Time = url_data1[3],
Place = url_data1[4],
Audience = url_data1[5],
Referees = url_refs1) %>%
relocate(Coach, .after = Website)
stats_game[is.na(stats_game)] <- "0"
# PLAYERS:
url_player <- read_html(url_link) %>%
html_nodes(xpath = './/td[@class="nombre jugador ellipsis"]') %>%
as.character()
url_player1 <- sapply(strsplit(url_player, '\\/ver*.'), `[`, 2)
url_player2 <- gsub("-.*", "", url_player1)
#url_player3 <- url_player2[!is.na(url_player2)]
stats_game1 <- stats_game %>%
mutate(CombinID = url_player2, .after = Website)
return(data_game = stats_game1)
}
}
| /scratch/gouwar.j/cran-all/cranData/BAwiR/R/scraping_games_acb.R |
#' Old ACB player game finder data
#'
#' @aliases scraping_games_acb_old
#'
#' @description
#' This function allowed us to get all the player game finder data for
#' all the desired ACB seasons available from:
#' \url{https://www.acb.com}. It was an old version that worked before the
#' internal structure of the ACB website changed. The updated function is
#' now \code{\link{scraping_games_acb}}.
#'
#' @usage
#' scraping_games_acb_old(type_league, nums, year, verbose = TRUE,
#' accents = FALSE, r_user = "[email protected]")
#'
#' @param type_league String. If \code{competition} is ACB, to scrape
#' ACB league games ("ACB"), Copa del Rey games ("CREY") or Supercopa games ("SCOPA").
#' @param nums Numbers corresponding to the website to scrape.
#' @param year Season, e.g. 2017-2018.
#' @param verbose Should R report information on progress? Default TRUE.
#' @param accents Should we keep the Spanish accents? The recommended
#' option is to remove them, so default FALSE.
#' @param r_user Email to identify the user when doing web scraping.
#' This is a polite way to do web scraping and to certify that the user
#' is working as transparently as possible with a research purpose.
#'
#' @details
#' The official website of the Spanish basketball league ACB used to present the
#' statistics of each game in a php website, such as:
#' https://www.acb.com/fichas/LACB62090.php.
#'
#' In some cases, https://www.acb.com/fichas/LACB60315.php
#' didn't exist, so for these cases is where we can use the
#' \code{httr} package.
#'
#' @return
#' A data frame with the player game finder data.
#'
#' @note
#' In addition to use the email address to stay identifiable, the function also
#' contains two headers regarding the R platform and version used.
#'
#' Furthermore, even though in the robots.txt file at
#' \url{https://www.acb.com/robots.txt}, there is no information about scraping
#' limitations and all robots are allowed to have complete access,
#' the function also includes the command \code{Sys.sleep(2)}
#' to pause between requests for 2 seconds. In this way, we don't bother the server
#' with multiple requests and we do carry out a friendly scraping.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{do_scraping_games}}
#'
#' @examples
#' \dontrun{
#' # Not needed to scrape every time the package is checked, built and installed.
#' df1 <- scraping_games_acb_old(type_league = "ACB", nums = 62001:62002, year = "2017-2018",
#' verbose = TRUE, accents = FALSE,
#' r_user = "[email protected]")
#' }
#'
#' @importFrom stringr word str_sub str_extract str_replace str_c
#' @importFrom httr GET user_agent add_headers
#' @importFrom xml2 read_html
#' @importFrom rvest html_table
#' @importFrom plyr .
#' @importFrom stringi stri_trans_general stri_extract_all_regex
#'
#' @export
scraping_games_acb_old <- function(type_league, nums, year, verbose = TRUE, accents = FALSE,
r_user = "[email protected]"){
#Auxiliar matrix to save the statistics in the same file.
#stats1 <- matrix(0, nrow = 1, ncol = 34)
stats1 <- c()
for (jorn in 1:length(nums)) {
if (verbose) {
print(paste("Day", jorn))
}
#To go through all the websites:
if (type_league == "CREY") {
website <- paste("http://www.acb.com/fichas/CREY", nums[jorn], ".php", sep = "")
}else if (type_league == "SCOPA"){
website <- paste("http://www.acb.com/fichas/SCOPA", nums[jorn], ".php", sep = "")
}else if (type_league == "ACB"){
website <- paste("http://www.acb.com/fichas/LACB", nums[jorn], ".php", sep = "")
}else{
print("Valid options are ACB, CREY or SCOPA")
}
if (verbose) {
print(website)
}
# This is just to check that the website exists, because with readLines the website
# can be directly scraped.
#get_website <- GET(website)
get_website <- GET(website,
user_agent(str_c(R.version$platform, R.version$version.string, sep = ", ")),
add_headers(from = r_user))
if (get_website$status_code == 404) { # The status code 404 is for
# the websites that cannot be found, i.e., the websites that
# don't exist.
print("Web doesn't exist")
next
}
if (verbose) {
print("Ready to scrape")
}
#pl_page <- readLines(website, encoding = "utf8")
pl_page <- readLines(website)
# We identify 'naranjaclaro' because is the color which is only once
# for all the player's statistics. Then, from this line, we can get them.
orange <- grep('"naranjaclaro"', pl_page)
if (type_league == "CREY") {
# These are the seasons where there is not column Plus/Minus, so they must have 35 columns:
if (nums[jorn] %in% c(50001:50004, 51001:51007, 52001:52007, 53033:53039,
54033:54039, 55033:55040, 56033:56040, 57029:57036,
58025:58032, 59038:59045, 60001:60008, 61001:61007,
62001:62007, 63001:63007, 64001:64007, 65001:65007,
66001:66007, 67001:67007, 68001:68007, 69001:69007,
70001:70007, 71001:71007, 72001:72007, 73001:73007,
74001:74007, 75001:75007)) {
numCols <- 35
}else{
numCols <- 36
}
}else if (type_league == "SCOPA"){
# These are the seasons where there is not column Plus/Minus, so they must have 35 columns::
if (nums[jorn] %in% c(3001, 4001, 5001:5004, 6001:6004, 7001:7003,
9001:9003, 10001:10003, 11001:11003, 12001, 12003)) {
# http://www.acb.com/fichas/SCOPA12002.php # There is Plus/Minus.
# http://www.acb.com/fichas/SCOPA12003.php # There is not Plus/Minus.
numCols <- 35
}else{
numCols <- 36
}
}else{
# These are the seasons where there is not column Plus/Minus:
# Warning: in the season 1999-2000, there is only column Plus/Minus in the regular season,
# but not in the playoffs, thus added 44307:44341.
if (nums[jorn] %in% c(35001:35494, 36001:36498, 37001:37401, 38001:38347, 39001:39417,
40001:40415, 41001:41351, 42001:42350, 43001:43339, 44307:44341,
45001:45339, 46001:46339, 47001:47339, 48001:48341, 49001:49341,
50001:50339, 51001:51340, 52001:52327, 53001:53294, 54001:54331,
55001:55331)) {
numCols <- 35
}else{
numCols <- 36
}
}
# Matrix with the data of each website:
stats <- matrix(0, nrow = length(orange), ncol = numCols)
if (numCols == 35) {
colnames(stats) <- c("Number", "GS", "Player", "MP", "PTS", "TwoP", "TwoPA", "TwoPPerc", "ThreeP",
"ThreePA", "ThreePPerc", "FT", "FTA", "FTPerc", "TRB", "DRB", "ORB", "AST",
"STL", "TOV", "Counteratt", "BLKfv", "BLKag", "Dunks", "PF", "PFrv",
"PIR", "Day", "Date", "Game", "GameRes", "Team", "GameID", "Website",
"CombinID")
last_cols <- 23
}else{
colnames(stats) <- c("Number", "GS", "Player", "MP", "PTS", "TwoP", "TwoPA", "TwoPPerc", "ThreeP",
"ThreePA", "ThreePPerc", "FT", "FTA", "FTPerc", "TRB", "DRB", "ORB", "AST",
"STL", "TOV", "Counteratt", "BLKfv", "BLKag", "Dunks", "PF", "PFrv",
"PlusMinus", "PIR", "Day", "Date", "Game", "GameRes",
"Team", "GameID", "Website", "CombinID")
last_cols <- 24
}
# BLKfv are blocks in favor ; BLKag are blocks against.
# PF are personal fouls commited ; PFrv are personal fouls received.
# PIR is Performace Index Rating.
# CombinID is the unique ID of the players and allows us to univocally identify each player.
# This is especially very useful to distinguish the players with the same name, see
# scraping_acb_rosters_from_acb.R
equip <- -1 # This is an auxiliary value to put the corresponding row
# of "Equipo" in the right place. I have to do this because I cannot be
# sure that "Equipo" goes in the rows 13 and 26 (that would happen if each
# team played with 12 players, but this doesn't always happen. It may
# happen that a team has only 10 ready players for a game).
players <- list()
for (i in seq_along(orange)) {
players[[i]] <- pl_page[c(orange[i] - 1, orange[i]:(orange[i] + 21))] # + 21 because there are 21 rows
# after naranjaclaro with values to fill.
# To put the player's name (and for "Equipo"):
aux5 <- strsplit(players[[i]][2], ">")[[1]][3]
aux5_1 <- unlist(strsplit(aux5, "</a"))
if (is.na(aux5_1)) {
stats[i,3] <- "Equipo"
if (i != length(orange)) {
equip <- i
}
}else{
stats[i,3] <- aux5_1
}
# This is for the players who didn't play and all
# their statistics are empty:
auxNA <- gsub(" <td class=\"grisclaro\" width=\"| <td class=\"blanco\" width=\"|</td>", "", players[[i]][4])
auxNA1 <- gsub("\">", "", auxNA)
auxNA2 <- str_sub(auxNA1, 3)
if (auxNA2 == " ") {
auxNA3 <- strsplit(players[[i]][2], ">")[[1]][3]
stats[i,3] <- unlist(strsplit(auxNA3, "</a")) # Playe's name who didn't play.
stats[i,seq(1,length(orange))[-3]] <- 0 # We put a 0 for all those empty statistics.
next
}
# This is to put the number of each player's T-Shirt.
# For "Equipo" we put a 0.
aux1 <- strsplit(players[[i]][1], "<td class=\"")
aux2 <- word(aux1[[1]], 2)
aux3 <- gsub("width=\"|\">|</td>", "", aux2[2])
if (i == equip || i == length(orange)) {
stats[i,1] <- 0
}else{
stats[i,1] <- as.numeric(str_sub(aux3, 3))
}
# This is to say if the player started the game or not.
aux4 <- word(aux1[[1]], 1)
if (strsplit(aux4, "\"")[[2]] == "gristit") {
stats[i,2] <- 1 # started the game.
}else{
stats[i,2] <- 0 # Didn't start the game.
}
for (j in c(5,7,9)) { # This is to divide the 2 and 3-field
# shots scored/ attempted and free throws.
# 5,7,9 are the columns that correspond with '/'.
if (j == 5) {
index1 <- j + 1
index2 <- j + 2
index3 <- j + 3
}else if (j == 7) {
index1 <- j + 2
index2 <- j + 3
index3 <- j + 4
}else if (j == 9) {
index1 <- j + 3
index2 <- j + 4
index3 <- j + 5
}
aux7 <- gsub(" <td class=\"grisclaro\" width=\"| <td class=\"blanco\" width=\"|</td>", "", players[[i]][j])
aux8 <- gsub("\">", "", aux7)
aux9 <- str_sub(aux8, 3)
aux10 <- as.numeric(strsplit(aux9, "/")[[1]])
stats[i,index1] <- aux10[1]
stats[i,index2] <- aux10[2]
aux11 <- gsub(" <td class=\"grisclaro\" width=\"| <td class=\"blanco\" width=\"|</td>", "", players[[i]][j+1])
aux12 <- gsub("\">", "", aux11)
aux13 <- str_sub(aux12, 3)
aux14 <- as.numeric(strsplit(aux13, "%")[[1]])
stats[i,index3] <- aux14
} # End of loop j in c(5,7,9)
# This is for the total rebounds:
aux15 <- gsub(" <td class=\"grisclaro\" width=\"| <td class=\"blanco\" width=\"|</td>", "", players[[i]][11])
aux16 <- gsub("\">", "", aux15)
aux17 <- str_sub(aux16, 3)
stats[i,15] <- as.numeric(aux17)
# This is to divide the offensive and defensive rebounds.
aux18 <- gsub(" <td class=\"grisclaro\" width=\"| <td class=\"blanco\" width=\"|</td>", "", players[[i]][12])
aux19 <- gsub("\">", "", aux18)
aux20 <- str_sub(aux19, 3)
aux21 <- strsplit(aux20, "+")[[1]]
if (length(aux21) == 4) { # This is needed if the number of offensive or defensive rebounds (and on consequence the
# total rebounds) is 10 or more than 10.
nums_rebs <- as.numeric(stri_extract_all_regex(aux20, "[0-9]+")[[1]])
if (nums_rebs[1] > nums_rebs[2]) { # For example 10+2
stats[i,16] <- as.numeric(paste(aux21[1], aux21[2], sep = "")) # 10 or more defensive rebounds.
stats[i,17] <- as.numeric(aux21[4]) # Less than 10 offensive rebounds.
}else{# For example 4+13, see Hopkins in 45274.php
stats[i,16] <- as.numeric(aux21[1]) # Less than 10 defensive rebounds.
stats[i,17] <- as.numeric(paste(aux21[3], aux21[4], sep = "")) # 10 or more offensive rebounds.
}
}else if (length(aux21) == 5) { # It can be the case, see 30031.php player King, Winfred where
# we have 10+10 rebounds, so aux21 is "1" "0" "+" "1" "0"
# so the length in this case is 5, not 4.
stats[i,16] <- as.numeric(paste(aux21[1], aux21[2], sep = "")) # 10 or more defensive rebounds.
stats[i,17] <- as.numeric(paste(aux21[4], aux21[5], sep = "")) # 10 or more offensive rebounds.
}else{# Less than 10 total rebounds (for example 4+3)
stats[i,16] <- as.numeric(aux21[1])
stats[i,17] <- as.numeric(aux21[3])
}
# This is to remove the two values that appear before
# the minutes and points of each player.
for (k in c(4:5)) {
aux22 <- gsub(" <td class=\"grisclaro\" width=\"| <td class=\"blanco\" width=\"|</td>", "", players[[i]][k - 1])
aux23 <- gsub("\">", "", aux22)
if (k == 4 & (i == equip || i == length(orange))) {
stats[i,k] <- 0 # For "Equipo".
}else{
stats[i,k] <- substring(aux23, 3, nchar(aux23))
}
}
# For the statistics from assists until rating (PIR).
for (l in c(14:last_cols)) {
aux24 <- gsub(" <td class=\"grisclaro\" width=\"| <td class=\"blanco\" width=\"|</td>", "", players[[i]][l - 1])
aux25 <- gsub("\">", "", aux24)
stats[i,l + 4] <- substring(aux25, 3, nchar(aux25))
}
aux_comb <- strsplit(players[[i]][2], ">")[[1]][2]
aux_comb1 <- strsplit(aux_comb , "id=")[[1]][2]
aux_comb2 <- gsub("\"", "", aux_comb1)
stats[i,numCols] <- aux_comb2
} # End of loop i.
# For "Day", "Date", "Game", "GameRes", "Tem", "GameID"
date_ref <- grep('class="estnegro"', pl_page)
gv <- gsub(" <td width=\"| |</td>", "", pl_page[date_ref + 1])
stats[,numCols - 7] <- as.numeric(strsplit(gv, " ")[[1]][3]) # Day.
stats[,numCols - 6] <- as.character(strsplit(gv, " ")[[1]][5]) # Date.
date_ref <- grep('class="estverdel"', pl_page)
gv1 <- gsub("<td colspan=\"10\" class=\"estverdel\">" , "", pl_page[date_ref])
gv2 <- strsplit(gv1, "</td>")
gv3 <- gsub(" ", "", gv2[[1]])
len_gv3 <- sapply(strsplit(gv3, " "), length)
gv4 <- word(gv3, 1, len_gv3 - 1)
gv5 <- gsub(" ", "", gv2[[2]])
len_gv5 <- sapply(strsplit(gv5, " "), length)
gv6 <- word(gv5, 1, len_gv5 - 1)
stats[,numCols - 5] <- paste(gv4, gv6, sep = " - ") # Game.
stats[,numCols - 5] <- tolower(stats[,numCols - 5])
gv7 <- word(gv3, -1)
gv8 <- word(gv5, -1)
stats[,numCols - 4] <- paste(gv7, gv8, sep = " - ") # GameRes.
# To put the teams' names:
stats[1:(equip - 1),numCols - 3] <- gv4
stats[(equip + 1):(length(orange) - 1),numCols - 3] <- gv6
stats[,numCols - 3] <- tolower(stats[,numCols - 3])
stats[,numCols - 2] <- rep(jorn, length(orange)) # GameID
stats[,numCols - 1] <- website
# Right Spanish accents in R:
# Scrape lookup table of accented char html codes,
# from the 4th table on this page:
ref_url <- 'http://www.w3schools.com/charsets/ref_html_8859.asp'
#html is deprecated.
char_table <- read_html(ref_url) %>% html_table %>% `[[`(4)
# 4 means that the table of interest in this website is the fourth.
# Fix names:
names(char_table) <- names(char_table) %>% tolower %>% gsub(' ', '_', .)
# Names with the rights accents
#stats1[,3] <- mgsub(char_table$entity_name, char_table$character, stats1[,3])
# WARNING: SEE THIS BEHAVIOUR BOTH IN WINDOWS AND LINUX:
##stats1[,3] <- gsub("", "u", stats1[,3]) # This is because the accented u is
# not replaced rightly with mgsub.
#stats1[,31] <- mgsub(char_table$entity_name, char_table$character, stats1[,31])
#stats1[,33] <- mgsub(char_table$entity_name, char_table$character, stats1[,33])
for (row in 1:nrow(stats)) {
# Players' names:
enti <- str_extract(pattern = char_table$entity_name, stats[row,3])
repl <- char_table$character[!is.na(enti)]
if (length(repl) != 0) {
if (length(repl) > 1) { # The player's name may contain several special accents, such as Mumbru, Alex.
aux_pl <- stats[row,3]
aux1_pl <- c()
for (re in 1:length(repl)) {
aux1_pl <- str_replace(aux_pl, enti[!is.na(enti)][re], repl[re])
aux_pl <- aux1_pl
}
rm(aux_pl)
stats[row,3] <- aux1_pl
}else{
#stats[row,3] <- str_replace(stats[row,3], enti[!is.na(enti)], repl) # The problem
# with this sentence is that if there is the same accent in both the name and surname,
# for example, Nogues, Jose I. (accented e in Nogues and Jose) in
# https://www.acb.com/fichas/LACB60017.php, only the first accent is rightly replaced,
# so we have to use:
stats[row,3] <- as.character(gsub(enti[!is.na(enti)], repl, stats[row,3]))
}
}
# Teams that played the game:
enti <- str_extract(pattern = char_table$entity_name, stats[row,numCols - 5])
repl <- char_table$character[!is.na(enti)]
if (length(repl) != 0) {
if (length(repl) > 1) {
aux_pl <- stats[row,numCols - 5]
aux1_pl <- c()
for (re in 1:length(repl)) {
aux1_pl <- str_replace(aux_pl, enti[!is.na(enti)][re], repl[re])
aux_pl <- aux1_pl
}
rm(aux_pl)
stats[row,numCols - 5] <- aux1_pl
}else{
stats[row,numCols - 5] <- str_replace(stats[row,numCols - 5], enti[!is.na(enti)], repl)
}
}
# Teams that played the game separately:
enti <- str_extract(pattern = char_table$entity_name, stats[row,numCols - 3])
repl <- char_table$character[!is.na(enti)]
if (length(repl) != 0) {
if (length(repl) > 1) {
aux_pl <- stats[row,numCols - 3]
aux1_pl <- c()
for (re in 1:length(repl)) {
aux1_pl <- str_replace(aux_pl, enti[!is.na(enti)][re], repl[re])
aux_pl <- aux1_pl
}
rm(aux_pl)
stats[row,numCols - 3] <- aux1_pl
}else{
stats[row,numCols - 3] <- str_replace(stats[row,numCols - 3], enti[!is.na(enti)], repl)
}
}
} # End of loop row.
if (!accents) {
stats[,3] <- stri_trans_general(stats[,3], "Latin-ASCII")
stats[,numCols - 5] <- stri_trans_general(stats[,numCols - 5], "Latin-ASCII")
stats[,numCols - 3] <- stri_trans_general(stats[,numCols - 3], "Latin-ASCII")
}
if (numCols == 35) { # This is because in order to merge all the data frames for all the seasons,
# the data frames must have the same number of columns, so I have to add the Plus/Minus column:
stats <- cbind(stats, PlusMinus = NA) # Add in the new column.
stats <- stats[,c(1:26, 36, 27:35)] # Reorder columns.
}
if (verbose) {
print("Done")
}
if (type_league == "CREY") {
type_season <- "Copa del Rey"
}else if (type_league == "SCOPA"){
type_season <- "Supercopa"
}else{
if (nums[jorn] %in% c(30225:30257, 31225:31262, 32225:32264, 33433:33492, 34433:34487,
35409:35494, 36409:36498, 37342:37401, 38281:38347, 39381:39417,
40381:40415, 41307:41351, 42307:42350, 43307:43339, 44307:44341,
45307:45339, 46307:46339, 47307:47339, 48307:48341, 49307:49341,
50307:50339, 51307:51340, 52307:52337, 53273:53294, 54307:54331,
55307:55331, 56307:56333, 57307:57333, 58307:58333, 59307:59331,
60307:60332, 61273:61298, 62307:62332, 63307:63332)) {
type_season <- "Playoffs"
}else{
type_season <- "Regular Season"
}
}
stats <- cbind(stats[, 1:28], Type_season = type_season, stats[, 29:36]) # Add the season.
stats1 <- rbind(stats1, stats)
Sys.sleep(2)
} # End loop jorn.
#stats1 <- stats1[-1,]
stats1 <- cbind(stats1[, 1:28], Season = year, stats1[, 29:37]) # Add the season.
#stats1 <- as.data.frame(stats1)
return(stats1)
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/scraping_games_acb_old.R |
#' Euroleague and Eurocup player game finder data
#'
#' @aliases scraping_games_euro
#'
#' @description
#' This function should allow us to get all the player game finder data for
#' all the desired Euroleague and Eurocup seasons available from
#' \url{https://www.euroleaguebasketball.net/euroleague/game-center/} and
#' \url{https://www.euroleaguebasketball.net/eurocup/game-center/}, respectively.
#'
#' NOTE (2023): The Euroleague and Eurocup websites have changed their format, so
#' this function will need to be updated.
#'
#' @usage
#' scraping_games_euro(competition, nums, year, verbose = TRUE,
#' r_user = "[email protected]")
#'
#' @param competition String. Options are "Euroleague" and "Eurocup".
#' @param nums Numbers corresponding to the website from which scraping.
#' @param year Year when the season starts. 2017 refers to 2017-2018 and so on.
#' @param verbose Should R report information on progress? Default TRUE.
#' @param r_user Email to identify the user when doing web scraping.
#' This is a polite way to do web scraping and to certify that the user
#' is working as transparently as possible with a research purpose.
#'
#' @details
#' See the examples in \code{\link{get_games_rosters}} to see the game numbers
#' to scrape in each season.
#'
#'
#' @return
#' A data frame with the player game finder data.
#'
#' @note
#' In addition to use the email address to stay identifiable, the function also
#' contains two headers regarding the R platform and version used.
#'
#' Furthermore, in the robots.txt file located at
#' \url{https://www.euroleaguebasketball.net/robots.txt}
#' there is no Crawl-delay field. However, we assume crawlers to pause between
#' requests for 15 seconds. This is done by adding to the function the command
#' \code{Sys.sleep(15)}.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{do_scraping_games}}
#'
#' @examples
#' \dontrun{
#' # Not needed to scrape every time the package is checked, built and installed.
#' # It takes 15 seconds.
#' df1 <- do_scraping_games(competition = "Euroleague", nums = 1:2,
#' year = "2017", verbose = TRUE, r_user =
#' "[email protected]")
#' }
#'
#' @importFrom stringr word str_sub str_extract str_replace str_c
#' @importFrom httr GET user_agent add_headers
#' @importFrom xml2 read_html
#' @importFrom stringi stri_trans_general stri_extract_all_regex
#'
#' @export
scraping_games_euro <- function(competition, nums, year, verbose = TRUE,
r_user = "[email protected]"){
#Auxiliar matrix to save the statistics in the same file.
stats1 <- c()
for (jorn in 1:length(nums)) {
if (verbose) {
print(jorn)
print(nums[jorn])
}
#To go through all the websites:
if (competition == "Euroleague") {
website <- paste("http://www.euroleague.net/main/results/showgame?gamecode=",
nums[jorn], "&seasoncode=E", year, sep = "")
}else if (competition == "Eurocup") {
website <- paste("http://www.eurocupbasketball.com/eurocup/games/results/showgame?gamecode=",
nums[jorn], "&seasoncode=U", year, sep = "")
}
if (verbose) {
print(website)
}
# This is just to check that the website exists, because with readLines the website
# can be directly scraped.
#get_website <- GET(website)
get_website <- GET(website,
user_agent(str_c(R.version$platform, R.version$version.string, sep = ", ")),
add_headers(from = r_user))
if (get_website$status_code == 404 | get_website$status_code == 500) {
# The status code 404 is for the websites that cannot be found, i.e., the websites that don't exist.
# The status code 500 is for the websites under maintenance, so the websites don't exist either.
print("Web doesn't exist")
next
}
if (verbose) {
print("Ready to scrape")
}
# https://stat.ethz.ch/pipermail/r-help/2006-July/108654.html
pl_page <- readLines(website, warn = FALSE)
# We identify "PlayerContainer" because is the word which is only once
# for all the player's statistics. Then, from this line, we can get them.
playCont <- grep("PlayerContainer", pl_page)
numCols <- 38
# Matrix with the data of each website:
stats <- matrix(0, nrow = length(playCont), ncol = numCols)
colnames(stats) <- c("Number", "GS", "Player", "MP", "PTS", "TwoP", "TwoPA", "TwoPPerc", "ThreeP",
"ThreePA", "ThreePPerc", "FT", "FTA", "FTPerc", "TRB", "DRB", "ORB", "AST",
"STL", "TOV", "Counteratt", "BLKfv", "BLKag", "Dunks", "PF", "PFrv",
"PlusMinus", "PIR", "Season", "Type_season", "Day", "Date", "Game", "GameRes",
"Team", "GameID", "Website", "CombinID")
# CombinID is the unique ID of the players and allows us to univocally identify each player.
players <- list()
equip <- c()
for (i in seq_along(playCont)) {
players[[i]] <- pl_page[c(playCont[i] - 1, playCont[i]:(playCont[i] + 16))]
# + 16 because there are 16 rows after PlayerContainer with values to fill.
# CombinID
pcode <- strsplit(strsplit(players[[i]][2], "pcode=")[[1]][2], "&seasoncode")[[1]][1]
stats[i, numCols] <- pcode
# To put the player's name (and for "Equipo"):
aux_n <- strsplit(players[[i]][2], ">")[[1]][3]
aux_n_1 <- unlist(strsplit(aux_n, "</a"))
if (aux_n_1 == "Team") {
stats[i,3] <- "Team"
# This is to know the number of players of each team to put in the corresponding rows their team name.
equip[i] <- i # If the non-NA values are 13 and 26, this means that there are 12 players in each team
}else{
# https://stackoverflow.com/questions/6364783/capitalize-the-first-letter-of-both-words-in-a-two-word-string
stats[i,3] <- tools::toTitleCase(tolower(aux_n_1))
}
# This is to put the number of each player's T-Shirt.
# For "Equipo" we put a 0.
aux1 <- gsub("\t<td>", "", players[[i]][1])
aux2 <- gsub("</td>", "", aux1)
if (aux2 == "") {
stats[i,1] <- 0 # For the row Team.
}else{
stats[i,1] <- as.numeric(aux2)
}
# This is to say if the player started the game or not.
if (grepl("PlayerStartFive", players[[i]][2])) {
stats[i,2] <- 1 # started the game.
}else{
stats[i,2] <- 0 # Didn't start the game.
}
# Minutes played:
aux7 <- gsub("\t<td>", "", players[[i]][3])
aux8 <- gsub("</td>", "", aux7)
if (aux8 == "DNP") {
next
}else if (aux8 == " ") {
stats[i,4] <- 0 # For the row Team.
}else{
stats[i,4] <- aux8
}
# Points:
aux9 <- gsub("\t<td>", "", players[[i]][4])
aux10 <- gsub("</td>", "", aux9)
if (aux10 == " ") {
stats[i,5] <- 0
}else{
stats[i,5] <- aux10
}
for (j in c(5,6,7)) { # This is to divide the 2 and 3-field
# shots scored/ attempted and free throws.
# 5,6,7 are the columns that correspond with '/'.
if (j == 5) {
index1 <- j + 1
index2 <- j + 2
index3 <- j + 3
}else if (j == 6) {
index1 <- j + 3
index2 <- j + 4
index3 <- j + 5
}else if (j == 7) {
index1 <- j + 5
index2 <- j + 6
index3 <- j + 7
}
aux11 <- gsub("\t<td>", "", players[[i]][j])
aux12 <- gsub("</td>", "", aux11)
if (aux12 == " ") { # This means that the player didn't shot from this distance,
# so their values remain zero.
stats[i,index1] <- 0
stats[i,index2] <- 0
stats[i,index3] <- 0 # percentage
}else{
aux13 <- as.numeric(strsplit(aux12, "/")[[1]])
stats[i,index1] <- aux13[1]
stats[i,index2] <- aux13[2]
stats[i,index3] <- round(aux13[1] / aux13[2], 2) # percentage
}
} # End of loop j in c(5,6,7)
# This is for the rest of statistics:
rest <- 8:18
indexes <- c(17, 16, 15, 18:20, 22, 23, 25, 26, 28)
for (j in 1:length(rest)) {
aux14 <- gsub("\t<td>", "", players[[i]][rest[j]])
aux15 <- gsub("</td>", "", aux14)
if (aux15 == " ") {
stats[i, indexes[j]] <- 0
}else{
stats[i, indexes[j]] <- as.numeric(aux15)
}
}
} # End of loop i.
# For "Season", "Type_season", "Day", "Date", "Game", "GameRes", "Tem", "GameID":
# Season, Type_season and Day:
ref <- grep("gc-title", pl_page)
ref1 <- pl_page[ref + 1]
ref2 <- strsplit(ref1, "span")[[1]]
stats[,numCols - 9] <- gsub("</", "", gsub(">", "", ref2[2])) # Season.
stats[,numCols - 8] <- gsub("</", "", gsub(">", "", ref2[4])) # Type_season.
stats[,numCols - 7] <- gsub("</", "", gsub(">", "", ref2[6])) # Day.
# Date: All the following is needed to put the date in the same format as ACB tables.
date_ref <- grep("date", pl_page)
date_ref1 <- pl_page[date_ref[2]]
date_ref2 <- gsub(" <div class=\"date cet\">| </div>", "", date_ref1)
date_ref3 <- strsplit(date_ref2, "CET")[[1]][1]
date_ref4 <- gsub(" ", "", date_ref3)
date_ref5 <- gsub(",", "", date_ref4)
# https://www.r-bloggers.com/date-formats-in-r/
date_ref6 <- as.Date(date_ref5, format = "%B %d %Y")
date_ref7 <- gsub("-", "/", date_ref6)
date_ref8 <- strsplit(date_ref7, "/")[[1]]
date_ref9 <- paste(date_ref8[3], date_ref8[2], date_ref8[1], sep = "/")
stats[,numCols - 6] <- date_ref9 # Date.
# Game, GameRes, Team, GameID:
# Game:
game_ref <- grep("game-score", pl_page)
game_ref1_local <- pl_page[game_ref + 5]
game_ref2_local <- strsplit(game_ref1_local, "<span class=\"name\">")[[1]]
game_ref1_road <- pl_page[game_ref + 12]
game_ref2_road <- strsplit(game_ref1_road, "<span class=\"name\">")[[1]]
game_local <- tolower(gsub("</", "", gsub("</span>", "", game_ref2_local[2]))) # Local team of the game.
game_road <- tolower(gsub("</", "", gsub("</span>", "", game_ref2_road[2]))) # Road team of the game
stats[,numCols - 5] <- paste(game_local, game_road, sep = "-") # Game
# GameRes:
res_ref1_local <- pl_page[game_ref + 7]
res_ref2_local <- strsplit(res_ref1_local, "<span class=\"score\">")[[1]]
res_ref1_road <- pl_page[game_ref + 14]
res_ref2_road <- strsplit(res_ref1_road, "<span class=\"score\">")[[1]]
res_local <- gsub("</", "", gsub("</span>", "", res_ref2_local[2]))
res_road <- gsub("</", "", gsub("</span>", "", res_ref2_road[2]))
stats[,numCols - 4] <- paste(res_local, res_road, sep = "-") # GameRes
# Team: To put the teams' names:
equip1 <- which(!is.na(equip))
if (length(equip1) > 1) {
stats[1:(equip1[1] - 1), numCols - 3] <- game_local
stats[(equip1[1] + 1):equip1[2], numCols - 3] <- game_road
# It can happen like in http://www.euroleague.net/main/results/showgame?gamecode=14&seasoncode=E2006
# that there is no row Team for one team:
}else{
stats[1:(equip1[1] - 1), numCols - 3] <- game_local
stats[(equip1[1] + 1):nrow(stats), numCols - 3] <- game_road
}
# GameID:
stats[,numCols - 2] <- rep(jorn, length(playCont))
stats[,numCols - 1] <- website
stats1 <- rbind(stats1, stats)
if (verbose) {
print("Done")
}
# Crawl-delay to pause between requests for 15 seconds.
Sys.sleep(15)
} # End loop jorn.
#stats1 <- as.data.frame(stats1)
#stats1$GS <- as.numeric(stats1$GS)
return(stats1)
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/scraping_games_euro.R |
#' ACB players' profile
#'
#' @aliases
#' scraping_rosters_acb
#'
#' @description
#' This function allows us to obtain the basic information of each player,
#' including his birth date. Then, we will be able to compute the age that
#' each player had in the date that he played each game.
#' The website used to collect information is \url{https://www.acb.com}.
#'
#' @usage
#' scraping_rosters_acb(pcode, verbose = TRUE, accents = FALSE,
#' r_user = "[email protected]")
#'
#' @param pcode Code corresponding to the player's website to scrape.
#' @param verbose Should R report information on progress? Default TRUE.
#' @param accents Should we keep the Spanish accents? The recommended
#' option is to remove them, so default FALSE.
#' @param r_user Email user to identify the user when doing web scraping.
#' This is a polite way to do web scraping and to certify that the user
#' is working as transparently as possible with a research purpose.
#'
#' @return
#' Data frame with eight columns:
#' \itemize{
#' \item CombinID: Unique ID to identify the players.
#' \item Player: Player's name.
#' \item Position: Player's position on the court.
#' \item Height: Player's height.
#' \item Date_birth: Player's birth date.
#' \item Nationality: Player's nationality.
#' \item Licence: Player's licence.
#' \item Website_player: Website.
#' }
#'
#' @details
#' Some players have a particular licence, which does not necessarily match with their
#' nationality, in order not to be considered as a foreign player, according to the
#' current ACB rules.
#'
#' @note
#' In addition to use the email address to stay identifiable, the function also
#' contains two headers regarding the R platform and version used.
#'
#' Furthermore, even though in the robots.txt file at
#' \url{https://www.acb.com/robots.txt}, there is no information about scraping
#' limitations and all robots are allowed to have complete access,
#' the function also includes the command \code{Sys.sleep(2)}
#' to pause between requests for 2 seconds. In this way, we don't bother the server
#' with multiple requests and we do carry out a friendly scraping.
#'
#' @author
#' Guillermo Vinue
#'
#' @seealso
#' \code{\link{do_scraping_rosters}}
#'
#' @examples
#' \dontrun{
#' # Not needed to scrape every time the package is checked, built and installed.
#' df_bio <- scraping_rosters_acb("56C", verbose = TRUE, accents = FALSE,
#' r_user = "[email protected]")
#' }
#'
#' @importFrom stringr str_extract str_replace str_c
#' @importFrom httr GET user_agent add_headers
#' @importFrom xml2 read_html
#' @importFrom rvest html_table
#' @importFrom plyr .
#'
#' @export
scraping_rosters_acb <- function(pcode, verbose = TRUE,
accents = FALSE, r_user = "[email protected]"){
df <- NULL
#for (i in 1:length(pcode))
len_pcode <- length(pcode)
# Instead of using 1:len_pcode, we can use seq_len(len_pcode) to avoid the backwards sequence bug.
for (i in seq_len(len_pcode)) {
if (verbose) {
print(pcode[i])
}
website <- paste("http://www.acb.com/jugador.php?id=", pcode[i], sep = "")
if (verbose) {
print(website)
}
# This is just to check that the website exists, because with readLines the website
# can be directly scraped.
#get_website <- GET(website)
get_website <- GET(website,
user_agent(str_c(R.version$platform, R.version$version.string, sep = ", ")),
add_headers(from = r_user))
if (get_website$status_code == 404) { # The status code 404 is for
# the websites that cannot be found, i.e., the websites that
# don't exist.
print("Web doesn't exist")
next
}
if (verbose) {
print("Ready to scrape")
}
html_pl <- readLines(website, warn = FALSE) # There are some minor unimportant warnings.
# Date of birth:
born <- grep("fecha de nac", html_pl)
if (length(born) == 0) { # This means that web doesn't exist.
print("Web doesn't exist")
next
}
born1 <- html_pl[born + 1]
born2 <- gsub(" <td class=\"datojug\">", "", born1)
born3 <- sub(".*,", "", born2)
born4 <- substr(gsub(" ", "", born3), 1, 10)
# Position:
posit <- grep("posic", html_pl)
posit1 <- html_pl[posit + 1]
posit2 <- gsub(" <td class=\"datojug\">", "", posit1)
if (length(posit2) == 1) {
posit3 <- gsub(" ", "", substr(posit2, 1, 2))[1]
}else if (length(posit2) == 2) { # This is because for some players, there are
# two elements in posit2, but in some cases the player's position is in
# the second element and in other cases it is in the first element.
posit3 <- gsub(" ", "", substr(posit2[2], 1, 2))[1]
if (posit3 == "" | posit3 == "19") { # See for example Bodiroga AYG for "" and
# Keith Robinson AS4 for 19.
posit3 <- gsub(" ", "", substr(posit2[1], 1, 2))[1]
}
}
# Name:
name <- grep('"portadader"', html_pl)
name1 <- html_pl[name + 1]
name2 <- gsub(" <div id=\"portadadertop\">", "", name1)
name3 <- gsub("</div><br>", "", name2)
# Right Spanish accents in R:
# Scrape lookup table of accented char html codes, from the 4th table on this page:
ref_url <- 'http://www.w3schools.com/charsets/ref_html_8859.asp'
#html is deprecated.
char_table <- read_html(ref_url) %>% html_table %>% `[[`(4)
# 4 means that the table of interest in this website is the fourth.
# Fix names:
names(char_table) <- names(char_table) %>% tolower %>% gsub(' ', '_', .)
# Names with the rights accents
#name3 <- mgsub(char_table$entity_name, char_table$character, name3)
# WARNING: SEE THIS BEHAVIOUR BOTH IN WINDOWS AND LINUX:
#stats1[,3] <- gsub("", "u", stats1[,3]) # This is because the accented u is
# not replaced rightly with mgsub.
enti <- str_extract(pattern = char_table$entity_name, name3)
repl <- char_table$character[!is.na(enti)]
if (length(repl) != 0) {
if (length(repl) > 1) { # The player's name may contain several special accents, such as Alex Mumbru.
aux_pl <- name3
aux1_pl <- c()
for (re in 1:length(repl)) {
aux1_pl <- str_replace(aux_pl, enti[!is.na(enti)][re], repl[re])
aux_pl <- aux1_pl
}
rm(aux_pl)
name3 <- aux1_pl
}else{
name3 <- str_replace(name3, enti[!is.na(enti)], repl)
}
enti <- str_extract(pattern = char_table$entity_name, name3)
repl <- char_table$character[!is.na(enti)]
if (length(repl) != 0) { # It can happen that the name and the surname has the same special character,
# such as Andres Jimenez, so we have to repeat the procedure.
name3 <- str_replace(name3, enti[!is.na(enti)], repl)
}
}
if (!accents) {
name3 <- stri_trans_general(name3, "Latin-ASCII")
}
# Nationality and licence:
nac_lic <- grep("nacionalidad | licencia", html_pl)
nac_lic1 <- html_pl[nac_lic + 1]
nac_lic2 <- gsub(" <td class=\"datojug\">", "", nac_lic1)
nac_lic3 <- sub("</td>", "", nac_lic2)
aux_nac_lic <- strsplit(nac_lic3, " ")[[1]]
nac <- aux_nac_lic[1]
if (length(aux_nac_lic) == 3) {
lic <- aux_nac_lic[3]
}else{
lic <- "NA"
}
# Height:
heig <- gsub(" ", "", substr(posit2, 4, 8))[1]
df1 <- data.frame(pcode[i], name3, posit3, heig, born4, nac, lic, website)
df <- rbind.data.frame(df, df1)
Sys.sleep(2)
}
if (!is.null(df)) {
colnames(df) <- c("CombinID", "Player", "Position", "Height",
"Date_birth", "Nationality", "Licence", "Website_player")
}
return(df)
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/scraping_rosters_acb.R |
#' Euroleague and Eurocup players' profile
#'
#' @aliases
#' scraping_rosters_euro
#'
#' @description
#' This function should allow us to obtain the basic information of each
#' Euroleague/Eurocup player, including his birth date. Then, we will
#' be able to compute the age that each player had in the date that he
#' played each game. The websites used to collect information are
#' \url{https://www.euroleaguebasketball.net/euroleague/} and
#' \url{https://www.euroleaguebasketball.net/eurocup/}.
#'
#' @usage
#' scraping_rosters_euro(competition, pcode, year, verbose = TRUE,
#' r_user = "[email protected]")
#'
#' @param competition String. Options are "Euroleague" and "Eurocup".
#' @param pcode Code corresponding to the player's website to scrape.
#' @param year Year when the season starts. 2017 refers to 2017-2018 and so on.
#' @param verbose Should R report information on progress? Default TRUE.
#' @param r_user Email user to identify the user when doing web scraping.
#' This is a polite way to do web scraping and to certify that the user
#' is working as transparently as possible with a research purpose.
#'
#' @return
#' Data frame with seven columns:
#' \itemize{
#' \item CombinID: Unique ID to identify the players.
#' \item Player: Player's name.
#' \item Position: Player's position on the court.
#' \item Height: Player's height.
#' \item Date_birth: Player's birth date.
#' \item Nationality Player's nationality.
#' \item Website_player: Website.
#' }
#'
#' @author
#' Guillermo Vinue
#'
#' @note
#' In addition to use the email address to stay identifiable, the function also
#' contains two headers regarding the R platform and version used.
#'
#' \url{https://www.euroleaguebasketball.net/robots.txt}
#' there is no Crawl-delay field. However, we assume crawlers to pause between
#' requests for 15 seconds. This is done by adding to the function the command
#' \code{Sys.sleep(15)}.
#'
#' @seealso
#' \code{\link{do_scraping_rosters}}
#'
#' @examples
#' \dontrun{
#' # Not needed to scrape every time the package is checked, built and installed.
#' # It takes 15 seconds.
#' df_bio <- scraping_rosters_euro("Euroleague", "005791", "2017", verbose = TRUE,
#' r_user = "[email protected]")
#' }
#'
#' @importFrom httr GET user_agent add_headers
#' @importFrom stringr str_extract str_replace str_c
#'
#' @export
scraping_rosters_euro <- function(competition, pcode, year, verbose = TRUE,
r_user = "[email protected]"){
df <- NULL
#for (i in 1:length(pcode))
len_pcode <- length(pcode)
# Instead of using 1:len_pcode, we can use seq_len(len_pcode) to avoid the backwards sequence bug.
for (i in seq_len(len_pcode)) {
if (verbose) {
print(i)
print(pcode[i])
}
if (competition == "Euroleague") {
website <- paste("http://www.euroleague.net/competition/players/showplayer?pcode=",
pcode[i], "&seasoncode=E", year, sep = "")
}else if (competition == "Eurocup") {
website <- paste("http://www.eurocupbasketball.com/eurocup/competition/players/showplayer?pcode=",
pcode[i], "&seasoncode=U", year, sep = "")
}
if (verbose) {
print(website)
}
# This is just to check that the website exists, because with readLines the website
# can be directly scraped.
#get_website <- GET(website)
get_website <- GET(website,
user_agent(str_c(R.version$platform, R.version$version.string, sep = ", ")),
add_headers(from = r_user))
if (get_website$status_code == 404) { # The status code 404 is for
# the websites that cannot be found, i.e., the websites that
# don't exist.
print("Web doesn't exist")
next
}
if (verbose) {
print("Ready to scrape")
}
html_pl <- readLines(website, warn = FALSE) # There are some minor unimportant warnings.
# Name:
playerdata <- grep("player-data", html_pl)
name1 <- html_pl[playerdata + 2]
name2 <- gsub("<div class=\"name\">", "", name1)
name3 <- gsub("</div>", "", name2)
name4 <- gsub(" ", "", name3)
name5 <- tools::toTitleCase(tolower(gsub(",", ", ", name4)))
# Date of birth:
born <- grep("summary-second", html_pl)
born1 <- html_pl[born + 2]
# There are players with no height such as:
# http://www.eurocupbasketball.com/eurocup/competition/players/showplayer?pcode=BMO&seasoncode=U2002
if (unique(grepl("Nationality", born1))) {
born1 <- html_pl[born + 1]
heig4 <- NA
}else{
# Height:
heig1 <- html_pl[born + 1]
heig2 <- gsub("<span>Height:", "", heig1)
heig3 <- gsub("</span>", "", heig2)
heig4 <- gsub(" ", "", heig3)
}
born2 <- gsub("<span>Born:", "", born1)
born3 <- gsub("</span>", "", born2)
if (length(born3) > 1) {
born3 <- born3[1]
born4 <- gsub(",", "",gsub(" ", "", born3))
}else{
born4 <- gsub(",", "", born3)
}
born5 <- as.Date(born4, format = "%d %B %Y")
born6 <- gsub("-", "/", born5)
born7 <- strsplit(born6, "/")[[1]]
born8 <- paste(born7[3], born7[2], born7[1], sep = "/")
# Position:
pos <- grep("summary-first", html_pl)
pos1 <- html_pl[pos + 4]
pos2 <- gsub("<span>", "", pos1)
pos3 <- gsub("</span>", "", pos2)
pos4 <- gsub(" ", "", pos3)
# Some players don't have the t-shirt number, e.g.
# http://www.euroleague.net/competition/players/showplayer?pcode=ABN&seasoncode=E2000
if (pos4 == "") {
pos1 <- html_pl[pos + 3]
pos2 <- gsub("<span>", "", pos1)
pos3 <- gsub("</span>", "", pos2)
pos4 <- gsub(" ", "", pos3)
}
# There are many websites where the position is not available, e.g.,
# http://www.euroleague.net/competition/players/showplayer?pcode=BXN&seasoncode=E2003
if (!(pos4 %in% c("Guard", "Forward", "Center"))) {
# "Guard", "Forward", "Center" are the three positions available from the euroleague.
pos4 <- NA
}
# There are some players who played the eurocup who don't have the position, but others do!
# These are some scraping issues because of the websites structure.
# Sergio Rodriguez:
# http://www.eurocupbasketball.com/eurocup/competition/players/showplayer?pcode=CVM&seasoncode=U2005
# http://www.euroleague.net/competition/players/showplayer?pcode=CVM&seasoncode=E2004
# http://www.euroleague.net/competition/players/showplayer?pcode=CVM&seasoncode=E2017
# Felipe Reyes:
# http://www.eurocupbasketball.com/eurocup/competition/players/showplayer?pcode=AAX&seasoncode=U2002
# http://www.euroleague.net/competition/players/showplayer?pcode=AAX&seasoncode=E2017
# Nationality:
nat <- grep("summary-second", html_pl)
nat1 <- html_pl[nat + 3]
# There are players with no height such as:
# http://www.eurocupbasketball.com/eurocup/competition/players/showplayer?pcode=BMO
if (nat1 == " </div>") {
nat1 <- html_pl[born + 2]
}
nat2 <- gsub("<span>Nationality:", "", nat1)
nat3 <- gsub("</span>", "", nat2)
nat4 <- trimws(nat3)
df1 <- data.frame(pcode[i], name5, pos4, heig4, born8, nat4, website)
if (verbose) {
print(df1)
}
df <- rbind.data.frame(df, df1)
# Crawl-delay asks to pause between requests for 15 seconds.
Sys.sleep(15)
}
if (!is.null(df)) {
colnames(df) <- c("CombinID", "Player", "Position", "Height",
"Date_birth", "Nationality", "Website_player")
}
return(df)
} | /scratch/gouwar.j/cran-all/cranData/BAwiR/R/scraping_rosters_euro.R |
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----packages, message=FALSE, eval=FALSE--------------------------------------
# # Firstly, load BAwiR and other packages that will be used in the paper:
# library(BAwiR) # 1.3
# library(tidyverse) # 1.3.2
# library(FSA) # 0.8.22
# library(gridExtra) # 2.3
## ----figure 1, eval=FALSE-----------------------------------------------------
# # Code for Figure 1:
# # Load the data_app_acb file with the ACB games from the 1985-1986 season to the 2017-2018 season:
# load(url("http://www.uv.es/vivigui/softw/data_app_acb.RData"))
# title <- " Number of Spanish and foreign players along the ACB seasons \n Data from www.acb.com"
# get_pop_pyramid(data_app_acb, title, "eng")
## ----data, message=FALSE, eval=FALSE------------------------------------------
# # Create the data with games and players' info, add the advanced stats and compute the total numbers:
# df0 <- do_join_games_bio("ACB", acb_games_1718, acb_players_1718)
# df1 <- do_add_adv_stats(df0)
# df2 <- do_stats(df1, "Total", "2017-2018", "ACB", "Regular Season")
## ----figure 2, eval=FALSE-----------------------------------------------------
# # Code for Figure 2:
# df3 <- df2[which(df2$Position == "Center"), c("MP", "PTS", "Name", "CombinID")]
# df3 <- df3[df3$MP > 100,]
# ggplot(df3, aes(x = c(df3[,1])[[1]], y = c(df3[,2])[[1]], group = Name)) +
# geom_point() +
# geom_text(aes(label = Name), size = 2, vjust = -0.8) +
# labs(x = colnames(df3)[1], y = colnames(df3)[2],
# title = "ACB 2017-2018, Regular Season. Total stats. Centers.")
## ----table 2, eval=FALSE------------------------------------------------------
# # Code for Table 2:
# df4 <- df3 %>%
# mutate(Player_info = paste("http://www.acb.com/jugador.php?id=", CombinID, sep = "")) %>%
# select(-CombinID)
# df5 <- df4[order(df4[,1][[1]], decreasing = TRUE),]
# headtail(df5, 3)
## ----figure 3, eval=FALSE-----------------------------------------------------
# # Code for Figure 3:
# stats <- c("GP", "MP", "PTS", "FGPerc", "FTPerc", "TRB", "AST", "TOV", "PlusMinus", "PIR")
# descr_stats <- c("Games played", "Minutes played", "Points", "Field goals percentage",
# "Free throws percentage", "Total rebounds", "Assists", "Turnovers",
# "Plus/minus", "Performance index rating")
# df2_1 <- df2 %>%
# select(1:5, stats, 46:49)
#
# perc_plot_doncid <- get_bubble_plot(df2_1, "Doncic, Luka", descr_stats, 3, 7, 8) +
# theme(strip.text.x = element_blank()) +
# ggtitle(label = "Doncic, Luka",
# subtitle = "ACB 2017-2018, Regular Season. Total stats.") +
# theme(plot.title = element_text(size = 20))
#
# perc_plot_abalde <- get_bubble_plot(df2_1, "Abalde, Alberto", descr_stats, 3, 7, 8) +
# theme(strip.text.x = element_blank()) +
# ggtitle(label = "Abalde, Alberto",
# subtitle = "ACB 2017-2018, Regular Season. Total stats.") +
# theme(plot.title = element_text(size = 20))
#
# grid.arrange(perc_plot_doncid, perc_plot_abalde, ncol = 2)
## ----figure 4, message=FALSE, eval=FALSE--------------------------------------
# # Code for Figure 4:
# months <- c(df0 %>% distinct(Month))$Month
# months_order <- c("September", "October", "November", "December", "January",
# "February", "March", "April", "May", "June")
# months_plot <- match(months_order, months)
# months_plot1 <- months_plot[!is.na(months_plot)]
# months_plot2 <- months[months_plot1]
#
# df1_m <- df1 %>%
# filter(Player.x %in% c("Doncic, Luka", "Abalde, Alberto")) %>%
# group_by(Month) %>%
# do(do_stats(., "Average", "2017-2018", "ACB", "Regular Season")) %>%
# ungroup() %>%
# mutate(Month = factor(Month, levels = months_plot2)) %>%
# arrange(Month)
#
# df1_m1 <- df1_m %>%
# select(1:5, stats, 46:50) %>%
# select(-EPS)
# max_val <- max(df1_m1[,colnames(df1_m1) %in% stats])
# min_val <- min(df1_m1[,colnames(df1_m1) %in% stats])
# get_barplot_monthly_stats(df1_m1, "ACB 2017-2018, Regular Season. Monthly average stats.", 3) +
# scale_y_continuous(limits = c(min_val - 10, max_val + 10))
## ----figure 5, message=FALSE, eval=FALSE--------------------------------------
# # Code for Figure 5:
# df0$Compet <- "ACB"
# plot_yearly <- get_stats_seasons(df0, "ACB", c("Doncic, Luka", "Abalde, Alberto"),
# stats[1:4], "Regular Season", TRUE, FALSE)
# plot_yearly$gg +
# labs(title = "ACB 2017-2018, Regular Season. Yearly average stats.") +
# theme(strip.text.x = element_text(size = 15))
## ----figure 6, message=FALSE, eval=FALSE--------------------------------------
# # Code for Figure 6:
# levels_stats <- list("Offensive" = c("PTS", "FG", "FGA", "FGPerc",
# "TwoP", "TwoPA", "TwoPPerc",
# "ThreeP", "ThreePA", "ThreePPerc",
# "FT", "FTA", "FTPerc", "ORB", "AST"),
# "Defensive" = c("DRB", "STL", "PF"),
# "Other" = c("GP", "MP", "TRB", "PlusMinus", "PIR"),
# "Advanced" = c("EFGPerc", "PPS"))
# get_heatmap_bb(df2, "Real_Madrid", levels_stats, "PlusMinus", 9,
# paste("ACB", "2017-2018, Regular Season.", "Total stats.", sep = " "))
## ----figure 7, eval=FALSE-----------------------------------------------------
# # Code for Figure 7:
# get_shooting_plot(df2, "Real_Madrid", 3, 1, "ACB 2017-2018, Regular Season.", "en") +
# theme(plot.title = element_text(size = 15))
## ----figure 8, eval=FALSE-----------------------------------------------------
# # Code for Figure 8:
# df1_10 <- df1 %>%
# filter(Day <= 10)
# teams <- as.character(rev(sort(unique(df2$Team))))
# df_four_factors <- do_four_factors_df(df1_10, teams)
# get_four_factors_plot(df_four_factors$df_rank, df_four_factors$df_no_rank,
# c("Real_Madrid", "Valencia"), "en") +
# ggtitle("ACB 2017-2018, Regular Season.")
## ----figure 9, eval=FALSE-----------------------------------------------------
# # Code for Figure 9:
# df0$Compet <- "ACB"
# gg <- get_table_results(df0, "ACB", "2017-2018")
# gg$plot_teams
## ----figure 10, eval=FALSE----------------------------------------------------
# # Code for Figure 10:
# get_map_nats(df2) +
# ggtitle("ACB 2017-2018, Regular Season.")
## ----session info-------------------------------------------------------------
sessionInfo()
| /scratch/gouwar.j/cran-all/cranData/BAwiR/inst/doc/BAwiR.R |
---
title: "Visualization of European basketball data"
author: "Guillermo Vinue"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Visualization of European basketball data}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
This document contains all the needed R code to reproduce the results described in the paper *A Web Application for Interactive Visualization of European Basketball Data* ([https://doi.org/10.1089/big.2018.0124](https://doi.org/10.1089/big.2018.0124){target="_blank"}), which presents the dashboard available at [https://www.uv.es/vivigui/AppEuroACB.html](https://www.uv.es/vivigui/AppEuroACB.html){target="_blank"}. This dashboard belongs to the platform available at [https://www.uv.es/vivigui/basketball_platform.html](https://www.uv.es/vivigui/basketball_platform.html){target="_blank"}.
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
```{r packages, message=FALSE, eval=FALSE}
# Firstly, load BAwiR and other packages that will be used in the paper:
library(BAwiR) # 1.3
library(tidyverse) # 1.3.2
library(FSA) # 0.8.22
library(gridExtra) # 2.3
```
```{r figure 1, eval=FALSE}
# Code for Figure 1:
# Load the data_app_acb file with the ACB games from the 1985-1986 season to the 2017-2018 season:
load(url("http://www.uv.es/vivigui/softw/data_app_acb.RData"))
title <- " Number of Spanish and foreign players along the ACB seasons \n Data from www.acb.com"
get_pop_pyramid(data_app_acb, title, "eng")
```
```{r data, message=FALSE, eval=FALSE}
# Create the data with games and players' info, add the advanced stats and compute the total numbers:
df0 <- do_join_games_bio("ACB", acb_games_1718, acb_players_1718)
df1 <- do_add_adv_stats(df0)
df2 <- do_stats(df1, "Total", "2017-2018", "ACB", "Regular Season")
```
```{r figure 2, eval=FALSE}
# Code for Figure 2:
df3 <- df2[which(df2$Position == "Center"), c("MP", "PTS", "Name", "CombinID")]
df3 <- df3[df3$MP > 100,]
ggplot(df3, aes(x = c(df3[,1])[[1]], y = c(df3[,2])[[1]], group = Name)) +
geom_point() +
geom_text(aes(label = Name), size = 2, vjust = -0.8) +
labs(x = colnames(df3)[1], y = colnames(df3)[2],
title = "ACB 2017-2018, Regular Season. Total stats. Centers.")
```
```{r table 2, eval=FALSE}
# Code for Table 2:
df4 <- df3 %>%
mutate(Player_info = paste("http://www.acb.com/jugador.php?id=", CombinID, sep = "")) %>%
select(-CombinID)
df5 <- df4[order(df4[,1][[1]], decreasing = TRUE),]
headtail(df5, 3)
```
```{r figure 3, eval=FALSE}
# Code for Figure 3:
stats <- c("GP", "MP", "PTS", "FGPerc", "FTPerc", "TRB", "AST", "TOV", "PlusMinus", "PIR")
descr_stats <- c("Games played", "Minutes played", "Points", "Field goals percentage",
"Free throws percentage", "Total rebounds", "Assists", "Turnovers",
"Plus/minus", "Performance index rating")
df2_1 <- df2 %>%
select(1:5, stats, 46:49)
perc_plot_doncid <- get_bubble_plot(df2_1, "Doncic, Luka", descr_stats, 3, 7, 8) +
theme(strip.text.x = element_blank()) +
ggtitle(label = "Doncic, Luka",
subtitle = "ACB 2017-2018, Regular Season. Total stats.") +
theme(plot.title = element_text(size = 20))
perc_plot_abalde <- get_bubble_plot(df2_1, "Abalde, Alberto", descr_stats, 3, 7, 8) +
theme(strip.text.x = element_blank()) +
ggtitle(label = "Abalde, Alberto",
subtitle = "ACB 2017-2018, Regular Season. Total stats.") +
theme(plot.title = element_text(size = 20))
grid.arrange(perc_plot_doncid, perc_plot_abalde, ncol = 2)
```
```{r figure 4, message=FALSE, eval=FALSE}
# Code for Figure 4:
months <- c(df0 %>% distinct(Month))$Month
months_order <- c("September", "October", "November", "December", "January",
"February", "March", "April", "May", "June")
months_plot <- match(months_order, months)
months_plot1 <- months_plot[!is.na(months_plot)]
months_plot2 <- months[months_plot1]
df1_m <- df1 %>%
filter(Player.x %in% c("Doncic, Luka", "Abalde, Alberto")) %>%
group_by(Month) %>%
do(do_stats(., "Average", "2017-2018", "ACB", "Regular Season")) %>%
ungroup() %>%
mutate(Month = factor(Month, levels = months_plot2)) %>%
arrange(Month)
df1_m1 <- df1_m %>%
select(1:5, stats, 46:50) %>%
select(-EPS)
max_val <- max(df1_m1[,colnames(df1_m1) %in% stats])
min_val <- min(df1_m1[,colnames(df1_m1) %in% stats])
get_barplot_monthly_stats(df1_m1, "ACB 2017-2018, Regular Season. Monthly average stats.", 3) +
scale_y_continuous(limits = c(min_val - 10, max_val + 10))
```
```{r figure 5, message=FALSE, eval=FALSE}
# Code for Figure 5:
df0$Compet <- "ACB"
plot_yearly <- get_stats_seasons(df0, "ACB", c("Doncic, Luka", "Abalde, Alberto"),
stats[1:4], "Regular Season", TRUE, FALSE)
plot_yearly$gg +
labs(title = "ACB 2017-2018, Regular Season. Yearly average stats.") +
theme(strip.text.x = element_text(size = 15))
```
```{r figure 6, message=FALSE, eval=FALSE}
# Code for Figure 6:
levels_stats <- list("Offensive" = c("PTS", "FG", "FGA", "FGPerc",
"TwoP", "TwoPA", "TwoPPerc",
"ThreeP", "ThreePA", "ThreePPerc",
"FT", "FTA", "FTPerc", "ORB", "AST"),
"Defensive" = c("DRB", "STL", "PF"),
"Other" = c("GP", "MP", "TRB", "PlusMinus", "PIR"),
"Advanced" = c("EFGPerc", "PPS"))
get_heatmap_bb(df2, "Real_Madrid", levels_stats, "PlusMinus", 9,
paste("ACB", "2017-2018, Regular Season.", "Total stats.", sep = " "))
```
```{r figure 7, eval=FALSE}
# Code for Figure 7:
get_shooting_plot(df2, "Real_Madrid", 3, 1, "ACB 2017-2018, Regular Season.", "en") +
theme(plot.title = element_text(size = 15))
```
```{r figure 8, eval=FALSE}
# Code for Figure 8:
df1_10 <- df1 %>%
filter(Day <= 10)
teams <- as.character(rev(sort(unique(df2$Team))))
df_four_factors <- do_four_factors_df(df1_10, teams)
get_four_factors_plot(df_four_factors$df_rank, df_four_factors$df_no_rank,
c("Real_Madrid", "Valencia"), "en") +
ggtitle("ACB 2017-2018, Regular Season.")
```
```{r figure 9, eval=FALSE}
# Code for Figure 9:
df0$Compet <- "ACB"
gg <- get_table_results(df0, "ACB", "2017-2018")
gg$plot_teams
```
```{r figure 10, eval=FALSE}
# Code for Figure 10:
get_map_nats(df2) +
ggtitle("ACB 2017-2018, Regular Season.")
```
```{r session info}
sessionInfo()
``` | /scratch/gouwar.j/cran-all/cranData/BAwiR/inst/doc/BAwiR.Rmd |
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----packages, message=FALSE, eval=FALSE--------------------------------------
# # Firstly, load BAwiR and other packages that will be used in the paper:
# library(BAwiR) # 1.3
# library(tidyverse) # 1.3.2
## ----data, eval=FALSE---------------------------------------------------------
# df0 <- acb_vbc_cz_pbp_2223
#
# day_num <- unique(acb_vbc_cz_pbp_2223$day)
# game_code <- unique(acb_vbc_cz_pbp_2223$game_code)
## ----processing, eval=FALSE---------------------------------------------------
# acb_games_2223_sl <- acb_vbc_cz_sl_2223 %>%
# filter(period == "1C")
#
# df1 <- do_prepare_data(df0, day_num,
# acb_games_2223_sl, acb_games_2223_info,
# game_code)
## ----lineups, eval=FALSE------------------------------------------------------
# # Lineups and sub-lineups:
# data_li <- do_lineup(df1, day_num, game_code, "Valencia Basket", FALSE)
# data_subli <- do_sub_lineup(data_li, 4)
## ----possessions, eval=FALSE--------------------------------------------------
# # Possessions:
# data_poss <- do_possession(df1, "1C")
## ----timeouts, eval=FALSE-----------------------------------------------------
# # Timeouts:
# df1_to <- do_prepare_data_to(df0, TRUE, acb_games_2223_info, acb_games_2223_coach)
# data_to <- do_time_out_success(df1_to, day_num, game_code,
# "Casademont Zaragoza_Porfirio Fisac", FALSE)
## ----periods, eval=FALSE------------------------------------------------------
# # Periods:
# df0_per <- df0
#
# rm_overtime <- TRUE # Decide if remove overtimes.
# if (rm_overtime) {
# df0 <- df0 %>%
# filter(!grepl("PR", period)) %>%
# mutate(period = as.character(period))
# }
#
# team_sel <- "Valencia Basket" # "Casademont Zaragoza"
# period_sel <- "1C" # "4C"
# player_sel <- "Webb" # "Mara"
#
# df1 <- df0 %>%
# filter(team == team_sel) %>%
# filter(!action %in% c("D - Descalificante - No TL", "Altercado no TL"))
#
# df2 <- df1 %>%
# filter(period == period_sel)
#
# df0_inli_team <- acb_vbc_cz_sl_2223 %>%
# filter(team == team_sel, period == period_sel)
#
# df3 <- do_prepare_data(df2, day_num,
# df0_inli_team, acb_games_2223_info,
# game_code)
#
# data_per <- do_stats_per_period(df3, day_num, game_code, team_sel, period_sel, player_sel)
#
# # Clutch time:
# data_clutch <- do_clutch_time(acb_vbc_cz_pbp_2223)
## ----fouls, eval=FALSE--------------------------------------------------------
# # Free throw fouls:
# data_ft_comm <- do_ft_fouls(df0, "comm")
# data_ft_rec <- do_ft_fouls(df0, "rec")
#
# # Offensive fouls:
# data_off_comm <- do_offensive_fouls(df0, "comm")
# data_off_rec <- do_offensive_fouls(df0, "rec")
## ----rebounds, eval=FALSE-----------------------------------------------------
# # Offensive rebounds:
# df1_or <- do_prepare_data_or(df0, TRUE, acb_games_2223_info)
# data_or <- do_reb_off_success(df1_or, day_num, game_code, "Valencia Basket", FALSE)
## ----session info-------------------------------------------------------------
sessionInfo()
| /scratch/gouwar.j/cran-all/cranData/BAwiR/inst/doc/BAwiR_pbp.R |
---
title: "Analysis of Spanish play-by-play data"
author: "Guillermo Vinue"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Analysis of Spanish play-by-play data}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
<!--This document contains all the needed R code to reproduce the results described in the paper *A Web Application for Interactive Visualization of European Basketball Data* ([https://doi.org/10.1089/big.2018.0124](https://doi.org/10.1089/big.2018.0124){target="_blank"}), which presents the dashboard available at [https://www.uv.es/vivigui/AppEuroACB.html](https://www.uv.es/vivigui/AppEuroACB.html){target="_blank"}.-->
This document contains all the needed R code to reproduce the results described in the paper *A Basketball Big Data Platform for Box Score and Play-by-Play Data*, that has been submitted for publication. It presents the dashboard available at [https://www.uv.es/vivigui/AppPBP.html](https://www.uv.es/vivigui/AppPBP.html){target="_blank"}. This dashboard belongs to the platform available at [https://www.uv.es/vivigui/basketball_platform.html](https://www.uv.es/vivigui/basketball_platform.html){target="_blank"}.
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
```{r packages, message=FALSE, eval=FALSE}
# Firstly, load BAwiR and other packages that will be used in the paper:
library(BAwiR) # 1.3
library(tidyverse) # 1.3.2
```
The following data file is an illustration of the type of play-by-play data available from the Spanish ACB league.
```{r data, eval=FALSE}
df0 <- acb_vbc_cz_pbp_2223
day_num <- unique(acb_vbc_cz_pbp_2223$day)
game_code <- unique(acb_vbc_cz_pbp_2223$game_code)
```
Do some first data processing:
```{r processing, eval=FALSE}
acb_games_2223_sl <- acb_vbc_cz_sl_2223 %>%
filter(period == "1C")
df1 <- do_prepare_data(df0, day_num,
acb_games_2223_sl, acb_games_2223_info,
game_code)
```
```{r lineups, eval=FALSE}
# Lineups and sub-lineups:
data_li <- do_lineup(df1, day_num, game_code, "Valencia Basket", FALSE)
data_subli <- do_sub_lineup(data_li, 4)
```
```{r possessions, eval=FALSE}
# Possessions:
data_poss <- do_possession(df1, "1C")
```
```{r timeouts, eval=FALSE}
# Timeouts:
df1_to <- do_prepare_data_to(df0, TRUE, acb_games_2223_info, acb_games_2223_coach)
data_to <- do_time_out_success(df1_to, day_num, game_code,
"Casademont Zaragoza_Porfirio Fisac", FALSE)
```
```{r periods, eval=FALSE}
# Periods:
df0_per <- df0
rm_overtime <- TRUE # Decide if remove overtimes.
if (rm_overtime) {
df0 <- df0 %>%
filter(!grepl("PR", period)) %>%
mutate(period = as.character(period))
}
team_sel <- "Valencia Basket" # "Casademont Zaragoza"
period_sel <- "1C" # "4C"
player_sel <- "Webb" # "Mara"
df1 <- df0 %>%
filter(team == team_sel) %>%
filter(!action %in% c("D - Descalificante - No TL", "Altercado no TL"))
df2 <- df1 %>%
filter(period == period_sel)
df0_inli_team <- acb_vbc_cz_sl_2223 %>%
filter(team == team_sel, period == period_sel)
df3 <- do_prepare_data(df2, day_num,
df0_inli_team, acb_games_2223_info,
game_code)
data_per <- do_stats_per_period(df3, day_num, game_code, team_sel, period_sel, player_sel)
# Clutch time:
data_clutch <- do_clutch_time(acb_vbc_cz_pbp_2223)
```
```{r fouls, eval=FALSE}
# Free throw fouls:
data_ft_comm <- do_ft_fouls(df0, "comm")
data_ft_rec <- do_ft_fouls(df0, "rec")
# Offensive fouls:
data_off_comm <- do_offensive_fouls(df0, "comm")
data_off_rec <- do_offensive_fouls(df0, "rec")
```
```{r rebounds, eval=FALSE}
# Offensive rebounds:
df1_or <- do_prepare_data_or(df0, TRUE, acb_games_2223_info)
data_or <- do_reb_off_success(df1_or, day_num, game_code, "Valencia Basket", FALSE)
```
```{r session info}
sessionInfo()
``` | /scratch/gouwar.j/cran-all/cranData/BAwiR/inst/doc/BAwiR_pbp.Rmd |
---
title: "Visualization of European basketball data"
author: "Guillermo Vinue"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Visualization of European basketball data}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
This document contains all the needed R code to reproduce the results described in the paper *A Web Application for Interactive Visualization of European Basketball Data* ([https://doi.org/10.1089/big.2018.0124](https://doi.org/10.1089/big.2018.0124){target="_blank"}), which presents the dashboard available at [https://www.uv.es/vivigui/AppEuroACB.html](https://www.uv.es/vivigui/AppEuroACB.html){target="_blank"}. This dashboard belongs to the platform available at [https://www.uv.es/vivigui/basketball_platform.html](https://www.uv.es/vivigui/basketball_platform.html){target="_blank"}.
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
```{r packages, message=FALSE, eval=FALSE}
# Firstly, load BAwiR and other packages that will be used in the paper:
library(BAwiR) # 1.3
library(tidyverse) # 1.3.2
library(FSA) # 0.8.22
library(gridExtra) # 2.3
```
```{r figure 1, eval=FALSE}
# Code for Figure 1:
# Load the data_app_acb file with the ACB games from the 1985-1986 season to the 2017-2018 season:
load(url("http://www.uv.es/vivigui/softw/data_app_acb.RData"))
title <- " Number of Spanish and foreign players along the ACB seasons \n Data from www.acb.com"
get_pop_pyramid(data_app_acb, title, "eng")
```
```{r data, message=FALSE, eval=FALSE}
# Create the data with games and players' info, add the advanced stats and compute the total numbers:
df0 <- do_join_games_bio("ACB", acb_games_1718, acb_players_1718)
df1 <- do_add_adv_stats(df0)
df2 <- do_stats(df1, "Total", "2017-2018", "ACB", "Regular Season")
```
```{r figure 2, eval=FALSE}
# Code for Figure 2:
df3 <- df2[which(df2$Position == "Center"), c("MP", "PTS", "Name", "CombinID")]
df3 <- df3[df3$MP > 100,]
ggplot(df3, aes(x = c(df3[,1])[[1]], y = c(df3[,2])[[1]], group = Name)) +
geom_point() +
geom_text(aes(label = Name), size = 2, vjust = -0.8) +
labs(x = colnames(df3)[1], y = colnames(df3)[2],
title = "ACB 2017-2018, Regular Season. Total stats. Centers.")
```
```{r table 2, eval=FALSE}
# Code for Table 2:
df4 <- df3 %>%
mutate(Player_info = paste("http://www.acb.com/jugador.php?id=", CombinID, sep = "")) %>%
select(-CombinID)
df5 <- df4[order(df4[,1][[1]], decreasing = TRUE),]
headtail(df5, 3)
```
```{r figure 3, eval=FALSE}
# Code for Figure 3:
stats <- c("GP", "MP", "PTS", "FGPerc", "FTPerc", "TRB", "AST", "TOV", "PlusMinus", "PIR")
descr_stats <- c("Games played", "Minutes played", "Points", "Field goals percentage",
"Free throws percentage", "Total rebounds", "Assists", "Turnovers",
"Plus/minus", "Performance index rating")
df2_1 <- df2 %>%
select(1:5, stats, 46:49)
perc_plot_doncid <- get_bubble_plot(df2_1, "Doncic, Luka", descr_stats, 3, 7, 8) +
theme(strip.text.x = element_blank()) +
ggtitle(label = "Doncic, Luka",
subtitle = "ACB 2017-2018, Regular Season. Total stats.") +
theme(plot.title = element_text(size = 20))
perc_plot_abalde <- get_bubble_plot(df2_1, "Abalde, Alberto", descr_stats, 3, 7, 8) +
theme(strip.text.x = element_blank()) +
ggtitle(label = "Abalde, Alberto",
subtitle = "ACB 2017-2018, Regular Season. Total stats.") +
theme(plot.title = element_text(size = 20))
grid.arrange(perc_plot_doncid, perc_plot_abalde, ncol = 2)
```
```{r figure 4, message=FALSE, eval=FALSE}
# Code for Figure 4:
months <- c(df0 %>% distinct(Month))$Month
months_order <- c("September", "October", "November", "December", "January",
"February", "March", "April", "May", "June")
months_plot <- match(months_order, months)
months_plot1 <- months_plot[!is.na(months_plot)]
months_plot2 <- months[months_plot1]
df1_m <- df1 %>%
filter(Player.x %in% c("Doncic, Luka", "Abalde, Alberto")) %>%
group_by(Month) %>%
do(do_stats(., "Average", "2017-2018", "ACB", "Regular Season")) %>%
ungroup() %>%
mutate(Month = factor(Month, levels = months_plot2)) %>%
arrange(Month)
df1_m1 <- df1_m %>%
select(1:5, stats, 46:50) %>%
select(-EPS)
max_val <- max(df1_m1[,colnames(df1_m1) %in% stats])
min_val <- min(df1_m1[,colnames(df1_m1) %in% stats])
get_barplot_monthly_stats(df1_m1, "ACB 2017-2018, Regular Season. Monthly average stats.", 3) +
scale_y_continuous(limits = c(min_val - 10, max_val + 10))
```
```{r figure 5, message=FALSE, eval=FALSE}
# Code for Figure 5:
df0$Compet <- "ACB"
plot_yearly <- get_stats_seasons(df0, "ACB", c("Doncic, Luka", "Abalde, Alberto"),
stats[1:4], "Regular Season", TRUE, FALSE)
plot_yearly$gg +
labs(title = "ACB 2017-2018, Regular Season. Yearly average stats.") +
theme(strip.text.x = element_text(size = 15))
```
```{r figure 6, message=FALSE, eval=FALSE}
# Code for Figure 6:
levels_stats <- list("Offensive" = c("PTS", "FG", "FGA", "FGPerc",
"TwoP", "TwoPA", "TwoPPerc",
"ThreeP", "ThreePA", "ThreePPerc",
"FT", "FTA", "FTPerc", "ORB", "AST"),
"Defensive" = c("DRB", "STL", "PF"),
"Other" = c("GP", "MP", "TRB", "PlusMinus", "PIR"),
"Advanced" = c("EFGPerc", "PPS"))
get_heatmap_bb(df2, "Real_Madrid", levels_stats, "PlusMinus", 9,
paste("ACB", "2017-2018, Regular Season.", "Total stats.", sep = " "))
```
```{r figure 7, eval=FALSE}
# Code for Figure 7:
get_shooting_plot(df2, "Real_Madrid", 3, 1, "ACB 2017-2018, Regular Season.", "en") +
theme(plot.title = element_text(size = 15))
```
```{r figure 8, eval=FALSE}
# Code for Figure 8:
df1_10 <- df1 %>%
filter(Day <= 10)
teams <- as.character(rev(sort(unique(df2$Team))))
df_four_factors <- do_four_factors_df(df1_10, teams)
get_four_factors_plot(df_four_factors$df_rank, df_four_factors$df_no_rank,
c("Real_Madrid", "Valencia"), "en") +
ggtitle("ACB 2017-2018, Regular Season.")
```
```{r figure 9, eval=FALSE}
# Code for Figure 9:
df0$Compet <- "ACB"
gg <- get_table_results(df0, "ACB", "2017-2018")
gg$plot_teams
```
```{r figure 10, eval=FALSE}
# Code for Figure 10:
get_map_nats(df2) +
ggtitle("ACB 2017-2018, Regular Season.")
```
```{r session info}
sessionInfo()
``` | /scratch/gouwar.j/cran-all/cranData/BAwiR/vignettes/BAwiR.Rmd |
---
title: "Analysis of Spanish play-by-play data"
author: "Guillermo Vinue"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Analysis of Spanish play-by-play data}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
<!--This document contains all the needed R code to reproduce the results described in the paper *A Web Application for Interactive Visualization of European Basketball Data* ([https://doi.org/10.1089/big.2018.0124](https://doi.org/10.1089/big.2018.0124){target="_blank"}), which presents the dashboard available at [https://www.uv.es/vivigui/AppEuroACB.html](https://www.uv.es/vivigui/AppEuroACB.html){target="_blank"}.-->
This document contains all the needed R code to reproduce the results described in the paper *A Basketball Big Data Platform for Box Score and Play-by-Play Data*, that has been submitted for publication. It presents the dashboard available at [https://www.uv.es/vivigui/AppPBP.html](https://www.uv.es/vivigui/AppPBP.html){target="_blank"}. This dashboard belongs to the platform available at [https://www.uv.es/vivigui/basketball_platform.html](https://www.uv.es/vivigui/basketball_platform.html){target="_blank"}.
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
```{r packages, message=FALSE, eval=FALSE}
# Firstly, load BAwiR and other packages that will be used in the paper:
library(BAwiR) # 1.3
library(tidyverse) # 1.3.2
```
The following data file is an illustration of the type of play-by-play data available from the Spanish ACB league.
```{r data, eval=FALSE}
df0 <- acb_vbc_cz_pbp_2223
day_num <- unique(acb_vbc_cz_pbp_2223$day)
game_code <- unique(acb_vbc_cz_pbp_2223$game_code)
```
Do some first data processing:
```{r processing, eval=FALSE}
acb_games_2223_sl <- acb_vbc_cz_sl_2223 %>%
filter(period == "1C")
df1 <- do_prepare_data(df0, day_num,
acb_games_2223_sl, acb_games_2223_info,
game_code)
```
```{r lineups, eval=FALSE}
# Lineups and sub-lineups:
data_li <- do_lineup(df1, day_num, game_code, "Valencia Basket", FALSE)
data_subli <- do_sub_lineup(data_li, 4)
```
```{r possessions, eval=FALSE}
# Possessions:
data_poss <- do_possession(df1, "1C")
```
```{r timeouts, eval=FALSE}
# Timeouts:
df1_to <- do_prepare_data_to(df0, TRUE, acb_games_2223_info, acb_games_2223_coach)
data_to <- do_time_out_success(df1_to, day_num, game_code,
"Casademont Zaragoza_Porfirio Fisac", FALSE)
```
```{r periods, eval=FALSE}
# Periods:
df0_per <- df0
rm_overtime <- TRUE # Decide if remove overtimes.
if (rm_overtime) {
df0 <- df0 %>%
filter(!grepl("PR", period)) %>%
mutate(period = as.character(period))
}
team_sel <- "Valencia Basket" # "Casademont Zaragoza"
period_sel <- "1C" # "4C"
player_sel <- "Webb" # "Mara"
df1 <- df0 %>%
filter(team == team_sel) %>%
filter(!action %in% c("D - Descalificante - No TL", "Altercado no TL"))
df2 <- df1 %>%
filter(period == period_sel)
df0_inli_team <- acb_vbc_cz_sl_2223 %>%
filter(team == team_sel, period == period_sel)
df3 <- do_prepare_data(df2, day_num,
df0_inli_team, acb_games_2223_info,
game_code)
data_per <- do_stats_per_period(df3, day_num, game_code, team_sel, period_sel, player_sel)
# Clutch time:
data_clutch <- do_clutch_time(acb_vbc_cz_pbp_2223)
```
```{r fouls, eval=FALSE}
# Free throw fouls:
data_ft_comm <- do_ft_fouls(df0, "comm")
data_ft_rec <- do_ft_fouls(df0, "rec")
# Offensive fouls:
data_off_comm <- do_offensive_fouls(df0, "comm")
data_off_rec <- do_offensive_fouls(df0, "rec")
```
```{r rebounds, eval=FALSE}
# Offensive rebounds:
df1_or <- do_prepare_data_or(df0, TRUE, acb_games_2223_info)
data_or <- do_reb_off_success(df1_or, day_num, game_code, "Valencia Basket", FALSE)
```
```{r session info}
sessionInfo()
``` | /scratch/gouwar.j/cran-all/cranData/BAwiR/vignettes/BAwiR_pbp.Rmd |
BBoptim <- function(par, fn, gr=NULL, method=c(2,3,1),
lower=-Inf, upper=Inf, project=NULL, projectArgs=NULL,
control=list(), quiet=FALSE, ...)
{
ctrl <- list(maxit = 1500, M = c(50, 10), ftol=1.e-10, gtol = 1e-05, maxfeval = 10000,
maximize = FALSE, trace = TRUE, triter = 10, eps = 1e-07, checkGrad=NULL)
namc <- names(control)
if (!all(namc %in% names(ctrl)))
stop("unknown names in control: ", namc[!(namc %in% names(ctrl))])
if(is.matrix(par)) stop("argument par should not be a matrix in BBoptim.")
ctrl[namc] <- control
M <- ctrl$M
maxit <- ctrl$maxit
ftol <- ctrl$ftol
gtol <- ctrl$gtol
maxfeval <- ctrl$maxfeval
maximize <- ctrl$maximize
trace <- ctrl$trace
triter <- ctrl$triter
eps <- ctrl$eps
checkGrad <- ctrl$checkGrad
control.pars <- expand.grid(method=method, M=M)
feval <- iter <- 0
ans.best <- NULL
ans.best.value <- Inf
for (i in 1: nrow(control.pars) ) {
cpars <- unlist(control.pars[i, ])
temp <- try(spg(par=par, fn=fn, gr=gr, method=cpars[1], project=project,
lower=lower, upper=upper, projectArgs=projectArgs,
control=list(M=as.numeric(cpars[2]), maxit=maxit,
maximize=maximize, trace=trace, triter=triter,
maxfeval=maxfeval, eps=eps, gtol=gtol, ftol=ftol,
checkGrad=checkGrad),
quiet=quiet, alertConvergence=FALSE, ...), silent=TRUE)
if (!inherits(temp, "try-error")) {
feval <- feval + temp$feval
iter <- iter + temp$iter
if (temp$convergence == 0) {
ans.best <- temp
ans.best$feval <- feval
ans.best$iter <- iter
ans.best$cpar <- cpars
break
}
else if (temp$value < ans.best.value) {
ans.best <- temp
ans.best.value <- ans.best$value
ans.best$feval <- feval
ans.best$iter <- iter
ans.best$cpar <- cpars
}
}
} # "i" loop completed
if (is.null(ans.best)) stop("All calls to spg failed. Last error: ", temp)
if(!quiet) {if (ans.best$convergence != 0)
cat (" Unsuccessful convergence.\n")
else cat (" Successful convergence.\n")
}
ans.best
}
| /scratch/gouwar.j/cran-all/cranData/BB/R/BBoptim.R |
BBsolve <- function(par, fn, method=c(2,3,1), control=list(), quiet=FALSE, ...)
{
ctrl <- list(maxit = 1500, M = c(50, 10), tol = 1e-07, trace = FALSE,
triter = 10, noimp = 100, NM = c(TRUE, FALSE))
namc <- names(control)
if (!all(namc %in% names(ctrl)))
stop("unknown names in control: ", namc[!(namc %in% names(ctrl))])
if(is.matrix(par)) stop("argument par should not be a matrix in BBsolve.")
ctrl[namc] <- control
M <- ctrl$M
maxit <- ctrl$maxit
tol <- ctrl$tol
trace <- ctrl$trace
triter <- ctrl$triter
noimp <- ctrl$noimp
NM <- if (length(par) > 1 & length(par) <= 20) ctrl$NM else FALSE
control.pars <- expand.grid(method=method, M=M, NM=NM)
feval <- iter <- 0
ans.best.value <- Inf
for (i in 1: nrow(control.pars) ) {
cpars <- unlist(control.pars[i, ])
#cat("Try : ", i, "Method = ", cpars[1], "M = ", cpars[2], "Nelder-Mead = ", cpars[3], "\n")
temp <- try(dfsane(par=par, fn, method=cpars[1],
control=list(M=as.numeric(cpars[2]), NM=cpars[3],
maxit=maxit, tol=tol, trace=trace, triter=triter,
noimp=noimp),
quiet=quiet, alertConvergence=FALSE, ...), silent=TRUE)
if (!inherits(temp, "try-error")) {
feval <- feval + temp$feval
iter <- iter + temp$iter
if (temp$convergence == 0) {
ans.best <- temp
ans.best$feval <- feval
ans.best$iter <- iter
ans.best$cpar <- cpars
break
}
else if (temp$residual < ans.best.value) {
ans.best <- temp
ans.best.value <- ans.best$residual
ans.best$feval <- feval
ans.best$iter <- iter
ans.best$cpar <- cpars
}
}
} # "i" loop completed
if(!quiet) {if (ans.best$convergence != 0)
cat (" Unsuccessful convergence.\n")
else cat (" Successful convergence.\n")
}
ans.best
}
| /scratch/gouwar.j/cran-all/cranData/BB/R/BBsolve.R |
dfsane <- function (par, fn, method = 2, control = list(),
quiet=FALSE, alertConvergence=TRUE, ...)
{
ctrl <- list(maxit = 1500, M = 10, tol = 1e-07, trace = !quiet,
triter = 10, noimp = 100, NM=FALSE, BFGS=FALSE)
namc <- names(control)
if (!all(namc %in% names(ctrl)))
stop("unknown names in control: ", namc[!(namc %in% names(ctrl))])
ctrl[namc] <- control
maxit <- ctrl$maxit
M <- ctrl$M
tol <- ctrl$tol
trace <- ctrl$trace
triter <- ctrl$triter
noimp <- ctrl$noimp
NM <- ctrl$NM
BFGS <- ctrl$BFGS
fargs <- list(...)
####
lsm <- function(x, fn, F, fval, alfa, M, lastfv, eta, fcnt,
bl, fargs) {
maxbl <- 100
gamma <- 1e-04
sigma1 <- 0.1
sigma2 <- 0.5
lam1 <- lam2 <- 1
cbl <- 0
fmax <- max(lastfv)
while (cbl < maxbl) {
d <- -alfa * F
xnew <- x + lam1 * d
Fnew <- try(do.call(fn, append(list(xnew), fargs)))
fcnt = fcnt + 1
if (class(Fnew) == "try-error" || any(is.nan(Fnew)))
return(list(xnew = NA, Fnew = NA, fcnt = fcnt,
bl = bl, lsflag = 1, fune = NA))
fune1 <- sum(Fnew * Fnew)
if (fune1 <= (fmax + eta - (lam1^2 * gamma * fval))) {
if (cbl >= 1)
bl <- bl + 1
return(list(xnew = xnew, Fnew = Fnew, fcnt = fcnt,
bl = bl, lsflag = 0, fune = fune1))
}
xnew <- x - lam2 * d
Fnew <- try(do.call(fn, append(list(xnew), fargs)))
fcnt = fcnt + 1
if (class(Fnew) == "try-error" || any(is.nan(Fnew)))
return(list(xnew = NA, Fnew = NA, fcnt = fcnt,
bl = bl, lsflag = 1, fune = NA))
fune2 <- sum(Fnew * Fnew)
if (fune2 <= (fmax + eta - (lam2^2 * gamma * fval))) {
if (cbl >= 1)
bl <- bl + 1
return(list(xnew = xnew, Fnew = Fnew, fcnt = fcnt,
bl = bl, lsflag = 0, fune = fune2))
}
lamc <- (2 * fval * lam1^2)/(2 * (fune1 + (2 * lam1 -
1) * fval))
c1 <- sigma1 * lam1
c2 <- sigma2 * lam1
lam1 <- if (lamc < c1)
c1
else if (lamc > c2)
c2
else lamc
lamc <- (2 * fval * lam2^2)/(2 * (fune2 + (2 * lam2 -
1) * fval))
c1 <- sigma1 * lam2
c2 <- sigma2 * lam2
lam2 <- if (lamc < c1)
c1
else if (lamc > c2)
c2
else lamc
cbl <- cbl + 1
}
return(list(xnew = xnew, Fnew = Fnew, fcnt = fcnt, bl = bl,
lsflag = 2, fune = fune))
}
n <- length(par)
fcnt <- iter <- bl <- 0
alfa <- eta <- 1
eps <- 1e-10
lastfv <- rep(0, M)
U <- function(x, ...) drop(crossprod(fn(x, ...)))
## We do initial Nelder-Mead start-up
if (NM) {
res <- try(optim(par=par, fn=U, method="Nelder-Mead", control=list(maxit=100), ...), silent=TRUE)
if (class(res) == "try-error") {
cat(res)
stop("\nFailure in Nelder-Mead Start. Try another starting value \n")
}
else if (any(is.nan(res$par)))
stop("Failure in Nelder-Mead Start (NaN value). Try another starting value \n")
par <- res$par
fcnt <- as.numeric(res$counts[1])
}
F <- try(fn(par, ...))
fcnt <- fcnt + 1
if (class(F) == "try-error")
stop("Failure in initial functional evaluation. \n" )
else if (!is.numeric(F) || !is.vector(F))
stop("Function must return a vector numeric value.")
else if (any(is.nan(F), is.infinite(F), is.na(F)))
stop ("Failure in initial functional evaluation. \n" )
else if (length(F) == 1) if (!quiet)
warning("Function returns a scalar. Function BBoptim or spg is better.")
F0 <- normF <- sqrt(sum(F * F))
if (trace)
cat("Iteration: ", 0, " ||F(x0)||: ", F0/sqrt(n), "\n")
pbest <- par
normF.best <- normF
lastfv[1] <- normF^2
flag <- 0
knoimp <- 0
while (normF/sqrt(n) > tol & iter <= maxit) {
if ( (abs(alfa) <= eps) | (abs(alfa) >= 1/eps) )
alfa <- if (normF > 1)
1
else if (normF >= 1e-05 & normF <= 1)
1/normF
else if (normF < 1e-05)
1e+05
## Steplength for first iteration is scaled properly
##
if (iter==0) {
alfa <- min(1/normF, 1)
alfa1 <- alfa2 <- alfa
}
temp <- alfa2
alfa2 <- alfa
if (normF <= 0.01) alfa <- alfa1 # retard scheme
alfa1 <- temp
ls.ret <- lsm(x = par, fn = fn, F = F, fval = normF^2,
alfa, M = M, lastfv = lastfv, eta, fcnt, bl, fargs)
fcnt <- ls.ret$fcnt
bl <- ls.ret$bl
flag <- ls.ret$lsflag
if (flag > 0)
break
Fnew <- ls.ret$Fnew
pnew <- ls.ret$xnew
fune <- ls.ret$fune
pF <- sum((pnew - par) * (Fnew - F))
pp <- sum((pnew - par)^2)
FF <- sum((Fnew - F)^2)
alfa <- if (method == 1)
pp/pF
else if (method == 2)
pF/FF
else if (method == 3)
sign(pF) * sqrt(pp/FF)
if (is.nan(alfa))
alfa <- eps
par <- pnew
F <- Fnew
fun <- fune
normF <- sqrt(fun)
if (normF < normF.best) {
pbest <- par
normF.best <- normF
knoimp <- 0
} else knoimp <- knoimp + 1
iter <- iter + 1
lastfv[1 + iter%%M] <- fun
eta <- F0/(iter + 1)^2
if (trace && (iter%%triter == 0))
cat("iteration: ", iter, " ||F(xn)|| = ", normF,
"\n")
## Iterations are stoppedwhen there is no decrease in ||F(x_n|| over `noimp' consecutive iterations
##
if (knoimp == noimp) {
flag <- 3
break
}
} # main loop complete
conv <- if (flag == 0) {
if (normF.best/sqrt(n) <= tol)
list(type = 0, message = "Successful convergence")
else if (iter > maxit)
list(type = 1, message = "Maximum limit for iterations exceeded")
else
list(type = 2, message = "Method stagnated")
}
else if (flag == 1)
list(type = 3, message = "Failure: Error in function evaluation")
else if (flag == 2)
list(type = 4, message = "Failure: Maximum limit on steplength reductions exceeded")
else if (flag == 3)
list(type = 5, message = "Lack of improvement in objective function")
## We do final "optim" iterations using "L-BFGS-B" when type=2 or 5
if (BFGS & (conv$type==2 |conv$type==5) ) {
if (!quiet) cat(" Calling `L-BFGS-B' in `optim' \n")
res <- try(optim(par=pbest, fn=U, method="L-BFGS-B",
control=list(pgtol=1.e-08, factr=1000, maxit=200), ...),
silent=TRUE)
if (!inherits(res, "try-error") && !any(is.nan(res$par)) ) {
normF.new <- sqrt(res$value)
if (normF.new < normF.best) {
normF.best <- normF.new
pbest <- res$par
}
}
fcnt <- fcnt + as.numeric(res$counts[1])
if (normF.best/sqrt(length(par)) <= tol)
conv <- list(type = 0, message = "Successful convergence")
}
if(alertConvergence && ( 0 != conv$type))
warning("Unsuccessful convergence.")
## We return "pbest" and "normF.best" #####
return(list(par = pbest, residual = normF.best/sqrt(length(par)),
fn.reduction = F0 - normF.best, feval = fcnt, iter = iter,
convergence = conv$type, message = conv$message))
}
| /scratch/gouwar.j/cran-all/cranData/BB/R/dfsane.R |
multiStart <- function(par, fn, gr = NULL, action = c("solve", "optimize"),
method = c(2, 3, 1), lower=-Inf, upper=Inf,
project=NULL, projectArgs=NULL,
control = list(), quiet=FALSE, details = FALSE, ...)
{
if (is.null(dim(par)))
par <- matrix(par, nrow = 1, ncol = length(par))
dtls <- list()
cvg <- vector("logical", length = nrow(par))
values <- rep(NA, length = nrow(par))
action <- match.arg(action)
feval <- iter <- 0
pmat <- matrix(NA, nrow(par), ncol(par))
for (k in 1:nrow(par)) {
if (!quiet) cat("Parameter set : ", k, "... \n")
if (action == "solve")
ans <- try(BBsolve(par[k, ], fn = fn, method = method,
control = control, quiet=quiet, ...), silent=TRUE)
if (action == "optimize")
ans <- try(BBoptim(par[k, ], fn = fn, gr = gr,
method = method, lower=lower, upper=upper,
project=project, projectArgs=projectArgs,
control = control, quiet=quiet, ...), silent=TRUE)
if (inherits(ans, "try-error")) next
cvg[k] <- (ans$convergence == 0)
values[k] <- if (action == "solve") ans$residual else ans$value
pmat[k, ] <- ans$par
if (details) dtls <- append(dtls, ans)
}
ans.ret <- list(par = pmat, fvalue = values, converged = cvg)
if (details) attr(ans.ret, "details") <- dtls
ans.ret
}
| /scratch/gouwar.j/cran-all/cranData/BB/R/multiStart.R |
projectLinear <- function(par, A, b, meq) {
# A projection function to incorporate linear equalities and inequalities in nonlinear optimization using `spg'
#
# The inequalities are defined such that: A %*% x - b > 0
n <- length(par)
if (meq > 0 | any(b - c(A %*% par) > 0)){
par <- par + quadprog::solve.QP(Dmat=diag(1,n), dvec=rep(0, n), Amat=t(A),
bvec = b - c(A %*% par), meq=meq, factorized=TRUE)$solution
}
par
}
| /scratch/gouwar.j/cran-all/cranData/BB/R/project.R |
sane <- function (par, fn, method = 2, control = list(),
quiet=FALSE, alertConvergence=TRUE, ...) {
ctrl <- list(maxit = 1500, M = 10, tol = 1e-07, trace = !quiet,
triter = 10, noimp = 100, NM=FALSE, BFGS=FALSE)
namc <- names(control)
if (!all(namc %in% names(ctrl)))
stop("unknown names in control: ", namc[!(namc %in% names(ctrl))])
ctrl[namc] <- control
maxit <- ctrl$maxit
M <- ctrl$M
tol <- ctrl$tol
trace <- ctrl$trace
triter <- ctrl$triter
noimp <- ctrl$noimp
NM <- ctrl$NM
BFGS <- ctrl$BFGS
fargs <- list(...)
lineSearch <- function(x, fn, F, fval, dg, M, lastfv, sgn,
lambda, fcnt, bl, fargs) {
maxbl <- 100
gamma <- 1e-04
sigma1 <- 0.1
sigma2 <- 0.5
cbl <- 0
fmax <- max(lastfv)
gpd <- -2 * abs(dg)
while (cbl < maxbl) {
xnew <- x + lambda * sgn * F
Fnew <- try(do.call(fn, append(list(xnew), fargs)))
fcnt = fcnt + 1
if (class(Fnew) == "try-error" || any(is.nan(Fnew)))
return(list(xnew = NA, Fnew = NA, fcnt = fcnt,
bl = bl, lsflag = 1, fune = NA))
else fune <- sum(Fnew * Fnew)
if (fune <= (fmax + lambda * gpd * gamma)) {
if (cbl >= 1)
bl <- bl + 1
return(list(xnew = xnew, Fnew = Fnew, fcnt = fcnt,
lambda = lambda, bl = bl, lsflag = 0, fune = fune))
}
else {
lamc <- -(gpd * lambda^2)/(2 * (fune - fval -
lambda * gpd))
c1 <- sigma1 * lambda
c2 <- sigma2 * lambda
if (lamc < c1)
lambda <- c1
else if (lamc > c2)
lambda <- c2
else lambda <- lamc
cbl <- cbl + 1
}
}
return(list(xnew = NA, Fnew = NA, fcnt = fcnt, lambda = NA,
bl = bl, lsflag = 2, fune = NA))
}
n <- length(par)
fcnt <- iter <- bl <- 0
alfa <- 1
eps <- 1e-10
h <- 1.e-07
lastfv <- rep(0, M)
U <- function(x, ...) drop(crossprod(fn(x, ...)))
## We do initial Nelder-Mead start-up
if (NM) {
res <- try(optim(par=par, fn=U, method="Nelder-Mead", control=list(maxit=100), ...), silent=TRUE)
if (class(res) == "try-error") {
cat(res)
stop("\nFailure in Nelder-Mead Start. Try another starting value \n")
}
else if (any(is.nan(res$par)))
stop("Failure in Nelder-Mead Start (NaN value). Try another starting value \n")
par <- res$par
fcnt <- as.numeric(res$counts[1])
}
F <- try(fn(par, ...))
fcnt <- fcnt + 1
if (class(F) == "try-error")
stop(" Failure in initial functional evaluation.")
else if (!is.numeric(F) || !is.vector(F))
stop("Function must return a vector numeric value.")
else if (any(is.nan(F), is.infinite(F), is.na(F)))
stop(" Failure in initial functional evaluation.")
else if (length(F) == 1) if (!quiet)
warning("Function returns a scalar. Function BBoptim or spg is better.")
F0 <- normF <- sqrt(sum(F * F))
if (trace)
cat("Iteration: ", 0, " ||F(x0)||: ", F0/sqrt(n), "\n")
pbest <- par
normF.best <- normF
lastfv[1] <- normF^2
flag <- 0
knoimp <- 0
while (normF/sqrt(n) > tol & iter <= maxit) {
Fa <- try(fn(par + h * F, ...))
fcnt <- fcnt + 1
if (class(Fa) == "try-error" || any(is.nan(Fa))) {
flag <- 1
break
}
dg <- (sum(F * Fa) - normF^2)/h
if (abs(dg/normF^2) < eps | is.nan(dg) | is.infinite(dg)) {
flag <- 3
break
}
if ((alfa <= eps) | (alfa >= 1/eps))
alfa <- if (normF > 1)
1
else if (normF >= 1e-05 & normF <= 1)
normF
else if (normF < 1e-05)
1e-05
sgn <- if (dg > 0)
-1
else 1
######## change made on Aug 29, 2008
## Steplength for first iteration is scaled properly
##
if (iter==0) {
alfa <- max(normF, 1)
alfa1 <- alfa2 <- alfa
}
temp <- alfa2
alfa2 <- alfa
if (normF <= 0.01) alfa <- alfa1
alfa1 <- temp
lambda <- 1/alfa
ls.ret <- lineSearch(x = par, fn = fn, F = F, fval = normF^2,
dg = dg, M = M, lastfv = lastfv, sgn, lambda, fcnt,
bl, fargs)
fcnt <- ls.ret$fcnt
bl <- ls.ret$bl
flag <- ls.ret$lsflag
if (flag > 0)
break
lambda <- ls.ret$lambda
Fnew <- ls.ret$Fnew
pnew <- ls.ret$xnew
fune <- ls.ret$fune
alfa <- if (method == 1)
sum(F * (F - Fnew))/(lambda * sum(F * F))
else if (method == 2)
sum((F - Fnew)^2)/(lambda * sum(F * (F - Fnew)))
else if (method == 3)
sqrt(sum((F - Fnew)^2)/(lambda^2 * sum(F * F)))
if (is.nan(alfa))
alfa <- eps
par <- pnew
F <- Fnew
fun <- fune
normF <- sqrt(fun)
if (normF < normF.best) {
pbest <- par
normF.best <- normF
knoimp <- 0
} else knoimp <- knoimp + 1
if (knoimp == noimp) {
flag <- 4
break
}
iter <- iter + 1
lastfv[1 + iter%%M] <- fun
if (trace && (iter%%triter == 0))
cat("\n iteration: ", iter, " ||F(xn)|| = ", normF,
"\n")
}
if (flag == 0) {
if (normF.best/sqrt(n) <= tol)
conv <- list(type = 0, message = "Successful convergence")
if (iter > maxit)
conv <- list(type = 1, message = "Maximum number of iterations exceeded")
}
else if (flag == 1)
conv <- list(type = 2, message = "Error in function evaluation")
else if (flag == 2)
conv <- list(type = 3, message = "Maximum limit on steplength reductions exceeded")
else if (flag == 3)
conv <- list(type = 4, message = "Anomalous iteration")
else if (flag == 4)
conv <- list(type = 5, message = "Lack of improvement in objective function")
## We do final "optim" iterations using "L-BFGS-B" when type=4 or 5
if (BFGS & (conv$type==4 | conv$type==5) ) {
if (!quiet) cat("Calling `L-BFGS-B' in `optim' \n")
res <- try(optim(par=pbest, fn=U, method="L-BFGS-B",
control=list(pgtol=1.e-08, factr=1000, maxit=200), ...),
silent=TRUE)
if (!inherits(res, "try-error") && !any(is.nan(res$par)) ) {
normF.new <- sqrt(res$value)
if (normF.new < normF.best) {
normF.best <- normF.new
pbest <- res$par
}
}
fcnt <- fcnt + as.numeric(res$counts[1])
if (normF.best/sqrt(length(par)) <= tol)
conv <- list(type = 0, message = "Successful convergence")
}
if(alertConvergence && ( 0 != conv$type))
warning("Unsuccessful convergence.")
return(list(par = pbest, residual = normF.best/sqrt(length(par)),
fn.reduction = F0 - normF.best, feval = fcnt, iter = iter,
convergence = conv$type, message = conv$message))
}
| /scratch/gouwar.j/cran-all/cranData/BB/R/sane.R |
spg <- function(par, fn, gr=NULL, method=3, lower=-Inf, upper=Inf,
project=NULL, projectArgs=NULL,
control=list(), quiet=FALSE, alertConvergence=TRUE, ... ) {
box <- if (any(is.finite(upper))) TRUE
else if (any(is.finite(lower))) TRUE
else FALSE
prj <- if (box) TRUE
else if (!is.null(project)) TRUE
else FALSE
if (is.character(project)) project <- get(project, mode="function")
if (box){
if (is.null(project) ){
# upper and lower for default. Expand if scalar
if(is.null(projectArgs)) projectArgs <- list()
if( (!is.null(projectArgs$lower)) | (!is.null(projectArgs$upper)))
warning("Using lower and upper spg arguments, ",
"not using those specified in projectArgs.")
projectArgs$lower <-
if (length(lower)==1) rep(lower, length(par)) else lower
projectArgs$upper <-
if (length(upper)==1) rep(upper, length(par)) else upper
# default previously called projectBox
project <- function(par, lower, upper) {
# Projecting to ensure that box-constraints are satisfied
par[par < lower] <- lower[par < lower]
par[par > upper] <- upper[par > upper]
return(par)
}
}
if (identical(project, projectLinear)){
if( (!is.null(projectArgs$lower)) | (!is.null(projectArgs$upper)))
warning("Using lower and upper spg arguments, ",
"not using those specified in projectArgs.")
if(is.null(projectArgs$A)) stop(
"projectLinear requires the A matrix to be specified in projectArgs.")
if(is.null(projectArgs$b)) stop(
"projectLinear requires the b vector to be specified in projectArgs.")
# upper and lower. Expand if scalar
if (length(lower)==1) lower <- rep(lower, length(par))
if (any(zi <- is.finite(lower))){
projectArgs$A <- rbind(projectArgs$A, diag(length(par))[zi,])
projectArgs$b <- c(projectArgs$b, lower[zi])
}
if (length(upper)==1) upper <- rep(upper, length(par))
if (any(zi <- is.finite(upper))){
projectArgs$A <- rbind(projectArgs$A, diag(-1, length(par))[zi,])
projectArgs$b <- c(projectArgs$b, -upper[zi])
}
}
}
# control defaults
# Added `ftol' to the control list: RV change on 02-06-2011
ctrl <- list(M=10, maxit=1500, ftol=1.e-10, gtol=1.e-05, maxfeval=10000, maximize=FALSE,
trace=!quiet, triter=10, eps=1e-7, checkGrad=NULL, checkGrad.tol=1.e-06)
namc <- names(control)
if (! all(namc %in% names(ctrl)) )
stop("unknown names in control: ", namc[!(namc %in% names(ctrl))])
ctrl[namc ] <- control
M <- ctrl$M
maxit <- ctrl$maxit
ftol <- ctrl$ftol # RV change on 02-06-2011
gtol <- ctrl$gtol
maxfeval <- ctrl$maxfeval
maximize <- ctrl$maximize
trace <- ctrl$trace
triter <- ctrl$triter
eps <- ctrl$eps
checkGrad <- ctrl$checkGrad
checkGrad.tol <- ctrl$checkGrad.tol
grNULL <- is.null(gr)
fargs <- list(...)
func <- if (maximize) function(par, ...) c(-fn(par, ...))
else function(par, ...) c( fn(par, ...))
# first evaluate the function to be sure the initial guess works
# and use the timing to decide if the analytic gradient should be checked.
# c() in next is for case of a 1x1 matrix value
f.time <- system.time(f <- try(func(par, ...),silent=TRUE), gcFirst=FALSE)
feval <- 1
# set the default for checking the gradient based on how long
# a function evaluation takes, and the number of parameters.
# 6* is a very crude approximation of how many funtion evaluations are
# necessary for each dimension.
if (is.null(checkGrad))
if (((f.time[1]+f.time[2]) * 6*length(par)) < 10) {
checkGrad <-TRUE
}
else {
checkGrad <- FALSE
if(!grNULL) warning(
"Default checking of gradient turned off because of time require.",
"See the help for spg to enable this.")
}
if (class(f)=="try-error" )
stop("Failure in initial function evaluation!", f)
else if ( !is.numeric(f) || 1 != length(f) )
stop("function must return a scalar numeric value!")
else if (is.nan(f) | is.infinite(f) | is.na(f) )
stop("Failure in initial function evaluation!")
f0 <- fbest <- f
################ local function
nmls <- function(p, f, d, gtd, lastfv, feval, func, maxfeval, fargs ){
# Non-monotone line search of Grippo with safe-guarded quadratic interpolation
gamma <- 1.e-04
fmax <- max(lastfv)
alpha <- 1
pnew <- p + alpha*d
fnew <- try(do.call(func, append(list(pnew) , fargs )),silent=TRUE)
feval <- feval + 1
if (class(fnew)=="try-error" | is.nan(fnew) )
return(list(p=NA, f=NA, feval=NA, lsflag=1))
while(fnew > fmax + gamma*alpha*gtd) {
if (alpha <= 0.1) alpha <- alpha/2
else {
atemp <- -(gtd*alpha^2) / (2*(fnew - f - alpha*gtd))
if (atemp < 0.1 | atemp > 0.9*alpha) atemp <- alpha/2
alpha <- atemp
}
pnew <- p + alpha*d
fnew <- try(do.call(func, append(list(pnew), fargs )), silent=TRUE)
feval <- feval + 1
if (class(fnew)=="try-error" | is.nan(fnew) )
return(list(p=NA, f=NA, feval=NA, lsflag=1))
if (feval > maxfeval)
return(list(p=NA, f=NA, feval=NA, lsflag=2))
} #while condition loop ends
return(list(p=pnew, f=fnew, feval=feval, lsflag=0))
}
#############################################
if (!grNULL & checkGrad) {
requireNamespace("numDeriv", quietly = TRUE)
grad.num <- numDeriv::grad(x=par, func=fn, ...)
grad.analytic <- gr(par, ...)
max.diff <- max(abs((grad.analytic - grad.num) / (1 + abs(fn(par, ...)))))
if(!max.diff < checkGrad.tol) {
cat("Gradient check details: max. relative difference in gradients= ",
max.diff,
"\n\n analytic gradient:", grad.analytic,
"\n\n numerical gradient:", grad.num
)
stop("Analytic gradient does not seem correct! See comparison above. ",
"Fix it, remove it, or increase checkGrad.tol." )
}
}
################ local function
# Simple gr numerical approximation. Using func, f and eps from calling env.
# used when user does not specify gr.
if (grNULL) gr <-function(par, ...) {
df <- rep(NA,length(par))
for (i in 1:length(par)) {
dx <- par
dx[i] <- dx[i] + eps
df[i] <- (func(dx, ...) - f)/eps
}
df
}
#############################################
# Initialization
lmin <- 1.e-30
lmax <- 1.e30
iter <- 0
lastfv <- rep(-1.e99, M)
fbest <- NA
fchg <- Inf # RV change on 02-06-2011
# this switch is not needed for the numerical grad because the
# sign of func is switched.
grad <- if (maximize & !grNULL) function(par, ...) -gr(par, ...)
else function(par, ...) gr(par, ...)
# Project initial guess
if (prj){
par <- try(do.call(project, append(list(par), projectArgs)), silent=TRUE)
if (class(par) == "try-error")
stop("Failure in projecting initial guess!", par)
}
if (any(is.nan(par), is.na(par)) ) stop("Failure in initial guess!")
pbest <- par
g <- try(grad(par, ...),silent=TRUE)
if (class(g)=="try-error" )
stop("Failure in initial gradient evaluation!", g)
else if (any(is.nan(g)) )
stop("Failure in initial gradient evaluation!")
lastfv[1] <- fbest <- f
pg <- par - g
if (prj){
pg <- try(do.call(project, append(list(pg), projectArgs)),silent=TRUE)
if (class(pg)=="try-error" ) stop("Failure in initial projection!", pg)
}
if (any(is.nan(pg))) stop("Failure in initial projection!")
pg <- pg - par
pg2n <- sqrt(sum(pg*pg))
pginfn <- max(abs(pg))
gbest <- pg2n
if (pginfn != 0) lambda <- min(lmax, max(lmin, 1/pginfn))
if (trace) cat("iter: ",0, " f-value: ", f0 * (-1)^maximize, " pgrad: ",pginfn, "\n")
#######################
# Main iterative loop
#######################
lsflag <- NULL # for case when tol is already ok initially and while loop is skipped
while( pginfn > gtol & iter <= maxit & fchg > ftol) { # RV change on 02-06-2011
iter <- iter + 1
d <- par - lambda * g
if (prj){
d <- try(do.call(project, append(list(d), projectArgs)), silent=TRUE)
if (class(d) == "try-error" | any(is.nan(d)) ) {
lsflag <- 4
break
}
}
d <- d - par
gtd <- sum(g * d)
if(is.infinite(gtd)){
lsflag <- 4
break
}
nmls.ans <- nmls(par, f, d, gtd, lastfv, feval , func, maxfeval, fargs)
lsflag <- nmls.ans$lsflag
if(lsflag != 0) break
fchg <- abs(f - nmls.ans$f) # RV change on 02-06-2011
f <- nmls.ans$f
pnew <- nmls.ans$p
feval <- nmls.ans$feval
lastfv[(iter %% M) + 1] <- f
gnew <- try(grad(pnew, ...),silent=TRUE)
if (class(gnew)=="try-error" | any(is.nan(gnew)) ){
lsflag <- 3
break
}
s <- pnew - par
y <- gnew - g
sts <- sum(s*s)
yty <- sum(y*y)
sty <- sum(s*y)
if (method==1) lambda <-
if (sts==0 | sty < 0) lmax else min(lmax, max(lmin, sts/sty))
else
if (method==2) lambda <-
if (sty < 0 | yty == 0) lmax else min(lmax, max(lmin, sty/yty))
else
if (method==3) lambda <-
if (sts==0 | yty == 0) lmax else min(lmax, max(lmin, sqrt(sts/yty)))
par <- pnew
g <- gnew
pg <- par - g
if (prj){
pg <- try(do.call(project, append(list(pg), projectArgs)), silent=TRUE)
if (class(pg) == "try-error" | any(is.nan(pg)) ) {
lsflag <- 4
break
}
}
pg <- pg - par
pg2n <- sqrt(sum(pg*pg))
pginfn <- max(abs(pg))
f.rep <- (-1)^maximize * f
if (trace && (iter%%triter == 0))
cat("iter: ",iter, " f-value: ", f.rep, " pgrad: ",pginfn, "\n")
if (f < fbest) {
fbest <- f
pbest <- pnew
gbest <- pginfn
}
} # while condition loop concludes
if (is.null(lsflag)) {
if (!quiet) warning("convergence tolerance satisified at intial parameter values.")
lsflag <- 0
}
if (lsflag==0) {
if (pginfn <= gtol | fchg <= ftol) conv <- list(type=0, message="Successful convergence")
if (iter >= maxit) conv <- list(type=1, message="Maximum number of iterations exceeded")
f.rep <- (-1)^maximize * fbest # This bug was fixed by Ravi Varadhan. March 29, 2010.
par <- pbest
} else {
par <- pbest
f.rep <- f <- (-1)^maximize * fbest
pginfn <- gbest
if (lsflag==1) conv <- list(type=3, message="Failure: Error in function evaluation")
if (lsflag==2) conv <- list(type=2, message="Maximum function evals exceeded")
if (lsflag==3) conv <- list(type=4, message="Failure: Error in gradient evaluation")
if (lsflag==4) conv <- list(type=5, message="Failure: Error in projection")
}
if(alertConvergence && ( 0 != conv$type))
warning("Unsuccessful convergence.")
return(list(par=par, value=f.rep, gradient =pginfn,
fn.reduction=(-1)^maximize * (f0 - f),
iter=iter, feval=feval, convergence=conv$type, message=conv$message))
}
| /scratch/gouwar.j/cran-all/cranData/BB/R/spg.R |
##############################################################
# A high-degree polynomial system (R.B. Kearfoot, ACM 1987)
# There are 12 real roots (and 126 complex roots to this system!)
##############################################################
require("BB")
hdp <- function(x) {
f <- rep(NA, length(x))
f[1] <- 5 * x[1]^9 - 6 * x[1]^5 * x[2]^2 + x[1] * x[2]^4 + 2 * x[1] * x[3]
f[2] <- -2 * x[1]^6 * x[2] + 2 * x[1]^2 * x[2]^3 + 2 * x[2] * x[3]
f[3] <- x[1]^2 + x[2]^2 - 0.265625
f
}
# Multiple starting values
set.seed(123)
p0 <- matrix(runif(600), 200, 3) # 200 starting values, each of length 3
# dfsane() with default parameters
ans.df <- matrix(NA, nrow(p0), ncol(p0))
for (i in 1:nrow(p0)) {
tmp <- dfsane(par=p0[i, ], fn=hdp, control=list(trace=FALSE))
if (tmp$conv == 0) ans.df[i, ] <- tmp$par
}
ans.df <- ans.df[!is.na(ans.df[,1]), ]
dim(ans.df) # note that only 69 successes
# BBsolve()
ans <- multiStart(par=p0, fn=hdp)
sum(ans$conv) # number of successful runs
pmat <- ans$par[ans$conv, ] # selecting only converged solutions
pc <- princomp(pmat)
biplot(pc) # you can see all 12 solutions beautifully like on a clock!
| /scratch/gouwar.j/cran-all/cranData/BB/demo/multiStart.R |
# commented out optim examples because of time limitations testing on CRAN
###################################################
if(!require("BB")) stop("this requires package BB.")
if(!require(numDeriv))stop("this requires package numDeriv.")
if(!require("setRNG"))stop("this requires setRNG.")
# This was used for tests conducted on March 25, 2008, using set.seed(test.rng).
# iseed <- 1236
# Replaced April 7, 2008, with setRNG to ensure rng and normal generators are set too.
# Changed from kind="Wichmann-Hill", normal.kind="Box-Muller",
# (back) to "Mersenne-Twister", normal.kind="Inversion", Jan 15, 2009
test.rng <- list(kind="Mersenne-Twister", normal.kind="Inversion", seed=1236)
old.seed <- setRNG(test.rng)
fr <- function(x) { ## Rosenbrock Banana function
x1 <- x[1]
x2 <- x[2]
100 * (x2 - x1 * x1)^2 + (1 - x1)^2
}
rosbkext.f <- function(x){
p <- x
n <- length(p)
sum (100*(p[1:(n-1)]^2 - p[2:n])^2 + (p[1:(n-1)] - 1)^2)
}
sc2.f <- function(x){
nvec <- 1:length(x)
sum(nvec * (exp(x) - x)) / 10
}
sc2.g <- function(x){
nvec <- 1:length(x)
nvec * (exp(x) - 1) / 10
}
trig.f <- function(x){
n <- length(x)
i <- 1:n
f <- n - sum(cos(x)) + i*(1 - cos(x)) - sin(x)
sum(f*f)
}
brown.f <- function(x) {
p <- x
n <- length(p)
odd <- seq(1,n,by=2)
even <- seq(2,n,by=2)
sum((p[odd]^2)^(p[even]^2 + 1) + (p[even]^2)^(p[odd]^2 + 1))
}
froth <- function(p){
# Freudenstein and Roth function (Broyden, Mathematics of Computation 1965, p. 577-593)
f <- rep(NA,length(p))
f[1] <- -13 + p[1] + (p[2]*(5 - p[2]) - 2) * p[2]
f[2] <- -29 + p[1] + (p[2]*(1 + p[2]) - 14) * p[2]
sum (f * f)
}
chen.f <- function(x) {
v <- log(x) + exp(x)
f <- (v - sqrt(v^2 + 5e-04))/2
sum (f * f)
}
valley.f <- function(x) {
c1 <- 1.003344481605351
c2 <- -3.344481605351171e-03
n <- length(x)
f <- rep(NA, n)
j <- 3 * (1:(n/3))
jm2 <- j - 2
jm1 <- j - 1
f[jm2] <- (c2 * x[jm2]^3 + c1 * x[jm2]) * exp(-(x[jm2]^2)/100) - 1
f[jm1] <- 10 * (sin(x[jm2]) - x[jm1])
f[j] <- 10 * (cos(x[jm2]) - x[j])
sum(f*f)
}
broydt.f <- function(x) {
n <- length(x)
f <- rep(NA, n)
f[1] <- ((3 - 0.5*x[1]) * x[1]) - 2*x[2] + 1
tnm1 <- 2:(n-1)
f[tnm1] <- ((3 - 0.5*x[tnm1]) * x[tnm1]) - x[tnm1-1] - 2*x[tnm1+1] + 1
f[n] <- ((3 - 0.5*x[n]) * x[n]) - x[n-1] + 1
sum(f*f)
}
#########################################################################################
p0 <- rnorm(2,sd=2)
system.time(ans.spg <- spg(par=p0, fn=fr))[1]
system.time(ans.spg <- spg(par=p0, fn=fr, method=1))[1]
system.time(ans.spg <- spg(par=p0, fn=fr, method=2))[1]
#system.time(ans.opt <- optim(par=p0, fn=fr, method="L-BFGS-B"))[1]
#########################################################################################
p0 <- rnorm(200,sd=2)
system.time(ans.spg <- spg(par=p0, fn=sc2.f))[1]
system.time(ans.spg <- spg(par=p0, fn=sc2.f, method=1))[1]
system.time(ans.spg <- spg(par=p0, fn=sc2.f, method=2))[1]
#system.time(ans.opt <- optim(par=p0, fn=sc2.f, method="L-BFGS-B"))[1]
# This demonstrates the value of providing "exact" gradient information
# Much faster computation, as well as better convergence
system.time(ans.spg <- spg(par=p0, fn=sc2.f, gr=sc2.g))[1]
##########
p0 <- rnorm(200,sd=2)
system.time(ans.spg <- spg(par=p0, fn=brown.f))[1]
system.time(ans.spg <- spg(par=p0, fn=brown.f, meth=1))[1]
system.time(ans.spg <- spg(par=p0, fn=brown.f, method=2))[1]
#system.time(ans.opt <- optim(par=p0, fn=brown.f, method="L-BFGS-B"))[1]
##########
p0 <- rnorm(200,sd=2)
system.time(ans.spg <- spg(par=p0, fn=rosbkext.f))[1]
#system.time(ans.spg <- spg(par=p0, fn=rosbkext.f, meth=1))[1]
#system.time(ans.spg <- spg(par=p0, fn=rosbkext.f, method=2))[1]
#system.time(ans.opt <- optim(par=p0, fn=rosbkext.f, method="L-BFGS-B"))[1]
##########
p0 <- rnorm(200,sd=5)
system.time(ans.spg <- spg(par=p0, fn=trig.f))[1]
system.time(ans.spg <- spg(par=p0, fn=trig.f, meth=1))[1]
system.time(ans.spg <- spg(par=p0, fn=trig.f, method=2))[1]
#system.time(ans.opt <- optim(par=p0, fn=trig.f, method="L-BFGS-B"))[1]
##########
p0 <- rexp(500)
system.time(ans.spg <- spg(par=p0, fn=chen.f, lower=0))[1]
system.time(ans.spg <- spg(par=p0, fn=chen.f, lower=0, meth=1))[1]
system.time(ans.spg <- spg(par=p0, fn=chen.f, lower=0, method=2))[1]
#system.time(ans.opt <- optim(par=p0, fn=chen.f, lower=0, method="L-BFGS-B"))[1]
##########
p0 <- rnorm(99, sd=2)
system.time(ans.spg <- spg(par=p0, fn=valley.f))[1]
system.time(ans.spg <- spg(par=p0, fn=valley.f, meth=1))[1]
system.time(ans.spg <- spg(par=p0, fn=valley.f, method=2))[1]
#system.time(ans.opt <- optim(par=p0, fn=valley.f, method="L-BFGS-B"))[1]
##########
p0 <- rnorm(500, sd=2)
system.time(ans.spg <- spg(par=p0, fn=broydt.f))[1]
system.time(ans.spg <- spg(par=p0, fn=broydt.f, meth=1))[1]
system.time(ans.spg <- spg(par=p0, fn=broydt.f, method=2))[1]
#system.time(ans.opt <- optim(par=p0, fn=broydt.f, method="L-BFGS-B"))[1]
#########################################
p0 <- rpois(2,10)
ans.spg <- spg(par=p0, fn=froth)
ans.spg
ans.spg <- spg(par=p0, fn=froth, meth=1)
ans.spg
ans.spg <- spg(par=p0, fn=froth, method=2)
ans.spg
#optim(par=p0, fn=froth, method="L-BFGS-B")
###############################################################
poissmix.loglik <- function(p,y) {
i <- 0:(length(y)-1)
loglik <- y*log(p[1]*exp(-p[2])*p[2]^i/exp(lgamma(i+1)) +
(1 - p[1])*exp(-p[3])*p[3]^i/exp(lgamma(i+1)))
return (sum(loglik) )
}
###############################################################
# Real data from Hasselblad (JASA 1969)
poissmix.dat <- data.frame(death=0:9, freq=c(162,267,271,185,111,61,27,8,3,1))
lo <- c(0.001,0,0)
hi <- c(0.999, Inf, Inf)
y <- poissmix.dat$freq
p0 <- runif(3,c(0.2,1,1),c(0.8,5,8)) # randomly generated starting values
t.spg <- system.time(ans.spg <- spg(par=p0, fn=poissmix.loglik, y=y,
projectArgs=list(lower=lo, upper=hi), control=list(maximize=T)))[1]
t.spg <- system.time(ans.spg <- spg(par=p0, fn=poissmix.loglik, y=y,
projectArgs=list(lower=lo, upper=hi), control=list(maximize=T), meth=1))[1]
t.spg <- system.time(ans.spg <- spg(par=p0, fn=poissmix.loglik, y=y,
projectArgs=list(lower=lo, upper=hi), control=list(maximize=T), meth=2))[1]
#ans.opt <- optim(par=p0, fn=poissmix.loglik, y=y, method="L-BFGS-B", lower=lo, upper=hi,
# control=list(fnscale=-1))
grad(ans.spg$par, func=poissmix.loglik, y=y)
#grad(ans.opt$par, func=poissmix.loglik, y=y)
###############################################################
##
dvm <- function (theta, mu, kappa)
{
1/(2 * pi * besselI(x = kappa, nu = 0, expon.scaled = TRUE)) *
(exp(cos(theta - mu) - 1))^kappa
}
##
rmixedvm <- function (n, mu1, mu2, kappa1, kappa2, p) {
temp <- runif(n)
n1 <- sum(temp <= p)
y <- c(rvm(n1,mu1,kappa1),rvm(n-n1,mu2,kappa2))
return(y)
}
##
rvm <- function (n, mean, k)
{
vm <- c(1:n)
a <- 1 + (1 + 4 * (k^2))^0.5
b <- (a - (2 * a)^0.5)/(2 * k)
r <- (1 + b^2)/(2 * b)
obs <- 1
while (obs <= n) {
U1 <- runif(1, 0, 1)
z <- cos(pi * U1)
f <- (1 + r * z)/(r + z)
c <- k * (r - f)
U2 <- runif(1, 0, 1)
if (c * (2 - c) - U2 > 0) {
U3 <- runif(1, 0, 1)
vm[obs] <- sign(U3 - 0.5) * acos(f) + mean
vm[obs] <- vm[obs]%%(2 * pi)
obs <- obs + 1
}
else {
if (log(c/U2) + 1 - c >= 0) {
U3 <- runif(1, 0, 1)
vm[obs] <- sign(U3 - 0.5) * acos(f) + mean
vm[obs] <- vm[obs]%%(2 * pi)
obs <- obs + 1
}
}
}
vm
}
#
vmmix.loglik <- function(x,y){
p <- x
sum(log(p[5]*dvm(y,p[1],p[2])+(1-p[5])*dvm(y,p[3],p[4])))
}
y <- rmixedvm(n=500, mu1=pi/2, mu2=3*pi/2, kappa1=1.9, kappa2=2.2, p=0.67)
p <- c(pi/4,2,pi,1,0.5)
lo <- rep(0.001,5)
hi <- c(Inf, Inf, Inf, Inf, 0.999)
p0 <- c(runif(5,c(0,0.1,0,0.1,0.2),c(2*pi,5,2*pi,5,0.8)))
t.spg <- system.time(ans.spg <- spg(par=p0, fn=vmmix.loglik, y=y,
projectArgs=list(lower=lo, upper=hi), control=list(maximize=T, M=20)))[1]
t.spg <- system.time(ans.spg <- spg(par=p0, fn=vmmix.loglik, y=y,
projectArgs=list(lower=lo, upper=hi), method=1,
control=list(maximize=T, M=20)))[1]
t.spg <- system.time(ans.spg <- spg(par=p0, fn=vmmix.loglik, y=y,
projectArgs=list(lower=lo, upper=hi), method=2,
control=list(maximize=T, M=20)))[1]
#ans.opt <- optim(par=p0, fn=vmmix.loglik, y=y, method="L-BFGS-B", lower=lo, upper=hi,
# control=list(fnscale=-1))
grad(ans.spg$par, func=vmmix.loglik, y=y)
#grad(ans.opt$par, func=vmmix.loglik, y=y)
| /scratch/gouwar.j/cran-all/cranData/BB/demo/nlmin.R |
if(!require("BB")) stop("this requires package BB.")
if(!require("setRNG"))stop("this requires setRNG.")
#nsim <- 100
# nsim reduced from 100 to 20 because of testing time constraints on CRAN
nsim <- 20
# This was used for tests conducted on March 25, 2008, using setRNG(test.rng).
# iseed <- 1236
# Replaced with setRNG to ensure rng and normal generators are set too.
# Changed from kind="Wichmann-Hill", normal.kind="Box-Muller",
# (back) to "Mersenne-Twister", normal.kind="Inversion", Jan 15, 2009
test.rng <- list(kind="Mersenne-Twister", normal.kind="Inversion", seed=1236)
old.seed <- setRNG(test.rng)
#
# Some examples illustrating the use of SANE & DFSANE
#
expo1 <- function(x) {
# From La Cruz and Raydan, Optim Methods and Software 2003, 18 (583-599)
n <- length(x)
f <- rep(NA, n)
f[1] <- exp(x[1] - 1) - 1
f[2:n] <- (2:n) * (exp(x[2:n] - 1) - x[2:n])
f
}
p0 <- rnorm(100)
ans1 <- dfsane(par=p0, fn=expo1, method=1)
ans2 <- dfsane(par=p0, fn=expo1, method=2)
ans3 <- dfsane(par=p0, fn=expo1, method=3)
ans4 <- sane(par=p0, fn=expo1)
c(ans1$resid, ans2$resid,ans3$resid, ans4$resid)
c(ans1$feval, ans2$feval,ans3$feval,ans4$feval)
setRNG(test.rng)
dfsane1.expo1 <- dfsane2.expo1 <- dfsane3.expo1 <- sane.expo1 <- matrix(NA, nsim, 4)
for (i in 1:nsim) {
cat("Simulation" , i, "\n")
p0 <- rnorm(100)
ans <- sane(par=p0, fn=expo1, control=list(trace=F))
if (!is.null(ans)) sane.expo1[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=expo1, method=1, control=list(trace=F))
if (!is.null(ans)) dfsane1.expo1[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=expo1, method=2, control=list(trace=F))
if (!is.null(ans)) dfsane2.expo1[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=expo1, method=3, control=list(trace=F))
if (!is.null(ans)) dfsane3.expo1[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
}
apply(sane.expo1, 2, summary)
apply(dfsane1.expo1, 2, summary)
apply(dfsane2.expo1, 2, summary)
apply(dfsane3.expo1, 2, summary)
#expo1.results <- list(dfsane1=dfsane1.expo1, dfsane2=dfsane2.expo1, dfsane3=dfsane3.expo1, sane=sane.expo1)
#dput(expo1.results, file="e:/bb/package/expo1.results")
# expo1.results <- dget(file="e:/bb/package/expo1.results")
############
expo3 <- function(p) {
# From La Cruz and Raydan, Optim Methods and Software 2003, 18 (583-599)
n <- length(p)
f <- rep(NA, n)
onm1 <- 1:(n-1)
f[onm1] <- onm1/10 * (1 - p[onm1]^2 - exp(-p[onm1]^2))
f[n] <- n/10 * (1 - exp(-p[n]^2))
f
}
n <- 100
p0 <- (1:n)/(4*n^2)
p0 <- rnorm(n, sd=4)
ans1 <- dfsane(par=p0, fn=expo3, method=1)
ans2 <- dfsane(par=p0, fn=expo3, method=2)
ans3 <- dfsane(par=p0, fn=expo3, method=3)
ans4 <- sane(par=p0, fn=expo3)
c(ans1$resid, ans2$resid,ans3$resid, ans4$resid)
c(ans1$feval, ans2$feval,ans3$feval,ans4$feval)
setRNG(test.rng)
dfsane1.expo3 <- dfsane2.expo3 <- dfsane3.expo3 <- sane.expo3 <- matrix(NA, nsim, 4)
for (i in 1:nsim) {
cat("Simulation" , i, "\n")
p0 <- rnorm(100)
ans <- sane(par=p0, fn=expo3, control=list(trace=F))
if (!is.null(ans)) sane.expo3[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=expo3, method=1, control=list(trace=F))
if (!is.null(ans)) dfsane1.expo3[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=expo3, method=2, control=list(trace=F))
if (!is.null(ans)) dfsane2.expo3[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=expo3, method=3, control=list(trace=F))
if (!is.null(ans)) dfsane3.expo3[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
}
apply(sane.expo3, 2, summary)
apply(dfsane1.expo3, 2, summary)
apply(dfsane2.expo3, 2, summary)
apply(dfsane3.expo3, 2, summary)
#expo3.results <- list(dfsane1=dfsane1.expo3, dfsane2=dfsane2.expo3, dfsane3=dfsane3.expo3, sane=sane.expo3)
#dput(expo3.results, file="e:/bb/package/expo3.results")
####################################################
froth <- function(p){
# Freudenstein and Roth function (Broyden, Mathematics of Computation 1965, p. 577-593)
f <- rep(NA,length(p))
f[1] <- -13 + p[1] + (p[2]*(5 - p[2]) - 2) * p[2]
f[2] <- -29 + p[1] + (p[2]*(1 + p[2]) - 14) * p[2]
f
}
p0 <- c(3,2) # this gives the zero of the system
ans1 <- dfsane(par=p0, fn=froth, method=1)
ans2 <- dfsane(par=p0, fn=froth, method=2)
ans3 <- dfsane(par=p0, fn=froth, method=3)
ans4 <- sane(par=p0, fn=froth)
c(ans1$resid, ans2$resid,ans3$resid, ans4$resid)
c(ans1$feval, ans2$feval,ans3$feval,ans4$feval)
p0 <- c(1,1) # this gives the local minimum that is not the zero of the system
ans1 <- dfsane(par=p0, fn=froth, method=1)
ans2 <- dfsane(par=p0, fn=froth, method=2)
ans3 <- dfsane(par=p0, fn=froth, method=3)
ans4 <- sane(par=p0, fn=froth)
c(ans1$resid, ans2$resid,ans3$resid, ans4$resid)
c(ans1$feval, ans2$feval,ans3$feval,ans4$feval)
p0 <- rpois(2,10) # trying random starts
ans1 <- dfsane(par=p0, fn=froth, method=1)
ans2 <- dfsane(par=p0, fn=froth, method=2)
ans3 <- dfsane(par=p0, fn=froth, method=3)
ans4 <- sane(par=p0, fn=froth)
c(ans1$resid, ans2$resid,ans3$resid, ans4$resid)
c(ans1$feval, ans2$feval,ans3$feval,ans4$feval)
###########################################
trigexp <- function(x) {
n <- length(x)
F <- rep(NA, n)
F[1] <- 3*x[1]^2 + 2*x[2] - 5 + sin(x[1] - x[2]) * sin(x[1] + x[2])
tn1 <- 2:(n-1)
F[tn1] <- -x[tn1-1] * exp(x[tn1-1] - x[tn1]) + x[tn1] * ( 4 + 3*x[tn1]^2) +
2 * x[tn1 + 1] + sin(x[tn1] - x[tn1 + 1]) * sin(x[tn1] + x[tn1 + 1]) - 8
F[n] <- -x[n-1] * exp(x[n-1] - x[n]) + 4*x[n] - 3
F
}
p0 <- rnorm(100, sd=3)
ans1 <- dfsane(par=p0, fn=trigexp, method=1)
ans2 <- dfsane(par=p0, fn=trigexp, method=2)
ans3 <- dfsane(par=p0, fn=trigexp, method=3)
ans4 <- sane(par=p0, fn=trigexp)
c(ans1$resid, ans2$resid,ans3$resid, ans4$resid)
c(ans1$feval, ans2$feval,ans3$feval,ans4$feval)
setRNG(test.rng)
dfsane1.trigexp <- dfsane2.trigexp <- dfsane3.trigexp <- sane.trigexp <- matrix(NA, nsim, 4)
for (i in 1:nsim) {
cat("Simulation" , i, "\n")
p0 <- rnorm(100)
ans <- sane(par=p0, fn=trigexp)
if (!is.null(ans)) sane.trigexp[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=trigexp, method=1)
if (!is.null(ans)) dfsane1.trigexp[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=trigexp, method=2)
if (!is.null(ans)) dfsane2.trigexp[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=trigexp, method=3)
if (!is.null(ans)) dfsane3.trigexp[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
}
apply(sane.trigexp, 2, summary)
apply(dfsane1.trigexp, 2, summary)
apply(dfsane2.trigexp, 2, summary)
apply(dfsane3.trigexp, 2, summary)
#trigexp.results <- list(dfsane1=dfsane1.trigexp, dfsane2=dfsane2.trigexp, dfsane3=dfsane3.trigexp, sane=sane.trigexp)
#dput(trigexp.results, file="e:/bb/package/trigexp.results")
###########################################
valley <- function(x) {
c1 <- 1.003344481605351
c2 <- -3.344481605351171e-04
n <- length(x)
f <- rep(NA, n)
j <- 3 * (1:(n/3))
jm2 <- j - 2
jm1 <- j - 1
f[jm2] <- (c2 * x[jm2]^3 + c1 * x[jm2]) * exp(-(x[jm2]^2)/100) - 1
f[jm1] <- 10 * (sin(x[jm2]) - x[jm1])
f[j] <- 10 * (cos(x[jm2]) - x[j])
f
}
p0 <- rnorm(102, sd=3) # number of unknowns must be a multiple of 3
ans1 <- dfsane(par=p0, fn=valley, method=1)
ans2 <- dfsane(par=p0, fn=valley, method=2)
ans3 <- dfsane(par=p0, fn=valley, method=3)
ans4 <- sane(par=p0, fn=valley)
c(ans1$resid, ans2$resid,ans3$resid, ans4$resid)
c(ans1$feval, ans2$feval,ans3$feval,ans4$feval)
setRNG(test.rng)
dfsane1.valley <- dfsane2.valley <- dfsane3.valley <- sane.valley <- matrix(NA, nsim, 4)
for (i in 1:nsim) {
cat("Simulation" , i, "\n")
p0 <- rnorm(102)
ans <- sane(par=p0, fn=valley, control=list(trace=F))
if (!is.null(ans)) sane.valley[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=valley, method=1, control=list(trace=F))
if (!is.null(ans)) dfsane1.valley[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=valley, method=2, control=list(trace=F))
if (!is.null(ans)) dfsane2.valley[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=valley, method=3, control=list(trace=F))
if (!is.null(ans)) dfsane3.valley[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
}
apply(sane.valley, 2, summary)
apply(dfsane1.valley, 2, summary)
apply(dfsane2.valley, 2, summary)
apply(dfsane3.valley, 2, summary)
#valley.results <- list(dfsane1=dfsane1.valley, dfsane2=dfsane2.valley, dfsane3=dfsane3.valley, sane=sane.valley)
#dput(valley.results, file="e:/bb/package/valley.results")
###########################################
broydt <- function(x) {
n <- length(x)
f <- rep(NA, n)
f[1] <- ((3 - 0.5*x[1]) * x[1]) - 2*x[2] + 1
tnm1 <- 2:(n-1)
f[tnm1] <- ((3 - 0.5*x[tnm1]) * x[tnm1]) - x[tnm1-1] - 2*x[tnm1+1] + 1
f[n] <- ((3 - 0.5*x[n]) * x[n]) - x[n-1] + 1
f
}
p0 <- rnorm(500, sd=5)
ans1 <- dfsane(par=p0, fn=broydt, method=1)
ans2 <- dfsane(par=p0, fn=broydt, method=2)
ans3 <- dfsane(par=p0, fn=broydt, method=3)
ans4 <- sane(par=p0, fn=broydt)
c(ans1$resid, ans2$resid,ans3$resid, ans4$resid)
c(ans1$feval, ans2$feval,ans3$feval,ans4$feval)
setRNG(test.rng)
dfsane1.broydt <- dfsane2.broydt <- dfsane3.broydt <- sane.broydt <- matrix(NA, nsim, 4)
for (i in 1:nsim) {
cat("Simulation" , i, "\n")
p0 <- rnorm(100)
ans <- sane(par=p0, fn=broydt, control=list(trace=F))
if (!is.null(ans)) sane.broydt[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=broydt, method=1, control=list(trace=F))
if (!is.null(ans)) dfsane1.broydt[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=broydt, method=2, control=list(trace=F))
if (!is.null(ans)) dfsane2.broydt[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=broydt, method=3, control=list(trace=F))
if (!is.null(ans)) dfsane3.broydt[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
}
apply(sane.broydt, 2, summary)
apply(dfsane1.broydt, 2, summary)
apply(dfsane2.broydt, 2, summary)
apply(dfsane3.broydt, 2, summary)
#broydt.results <- list(dfsane1=dfsane1.broydt, dfsane2=dfsane2.broydt, dfsane3=dfsane3.broydt, sane=sane.broydt)
#dput(broydt.results, file="e:/bb/package/broydt.results")
######################################
brent <- function(x) {
n <- length(x)
tnm1 <- 2:(n-1)
F <- rep(NA, n)
F[1] <- 3 * x[1] * (x[2] - 2*x[1]) + (x[2]^2)/4
F[tnm1] <- 3 * x[tnm1] * (x[tnm1+1] - 2 * x[tnm1] + x[tnm1-1]) + ((x[tnm1+1] - x[tnm1-1])^2) / 4
F[n] <- 3 * x[n] * (20 - 2 * x[n] + x[n-1]) + ((20 - x[n-1])^2) / 4
F
}
p0 <- rnorm(100)
ans1 <- dfsane(par=p0, fn=brent, method=1)
ans2 <- dfsane(par=p0, fn=brent, method=2)
ans3 <- dfsane(par=p0, fn=brent, method=3)
ans4 <- sane(par=p0, fn=brent)
c(ans1$resid, ans2$resid,ans3$resid, ans4$resid)
c(ans1$feval, ans2$feval,ans3$feval,ans4$feval)
setRNG(test.rng)
dfsane1.brent <- dfsane2.brent <- dfsane3.brent <- sane.brent <- matrix(NA, nsim, 4)
for (i in 1:nsim) {
cat("Simulation" , i, "\n")
p0 <- rnorm(100)
ans <- sane(par=p0, fn=brent, control=list(trace=F))
if (!is.null(ans))sane.brent[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=brent, method=1, control=list(trace=F))
if (!is.null(ans))dfsane1.brent[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=brent, method=2, control=list(trace=F))
if (!is.null(ans)) dfsane2.brent[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=brent, method=3, control=list(trace=F))
if (!is.null(ans)) dfsane3.brent[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
}
apply(sane.brent, 2, summary)
apply(dfsane1.brent, 2, summary)
apply(dfsane2.brent, 2, summary)
apply(dfsane3.brent, 2, summary)
#brent.results <- list(dfsane1=dfsane1.brent, dfsane2=dfsane2.brent, dfsane3=dfsane3.brent, sane=sane.brent)
#dput(brent.results, file="e:/bb/package/brent.results")
######################################
troesch <- function(x) {
n <- length(x)
tnm1 <- 2:(n-1)
F <- rep(NA, n)
h <- 1 / (n+1)
h2 <- 10 * h^2
F[1] <- 2 * x[1] + h2 * sinh(10 * x[1]) - x[2]
F[tnm1] <- 2 * x[tnm1] + h2 * sinh(10 * x[tnm1]) - x[tnm1-1] - x[tnm1+1]
F[n] <- 2 * x[n] + h2 * sinh(10* x[n]) - x[n-1] - 1
F
}
p0 <- rnorm(100)
ans1 <- dfsane(par=p0, fn=troesch, method=1)
ans2 <- dfsane(par=p0, fn=troesch, method=2)
ans3 <- dfsane(par=p0, fn=troesch, method=3)
ans4 <- sane(par=p0, fn=troesch)
c(ans1$resid, ans2$resid,ans3$resid, ans4$resid)
c(ans1$feval, ans2$feval,ans3$feval,ans4$feval)
setRNG(test.rng)
dfsane1.troesch <- dfsane2.troesch <- dfsane3.troesch <- sane.troesch <- matrix(NA, nsim, 4)
for (i in 1:nsim) {
cat("Simulation" , i, "\n")
#p0 <- rnorm(100) # this doesn't work for "sane"; but works well for "dfsane"
p0 <- runif(100) # this works for all schemes
ans <- sane(par=p0, fn=troesch, control=list(trace=F))
if (!is.null(ans)) sane.troesch[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=troesch, method=1, control=list(trace=F))
if (!is.null(ans)) dfsane1.troesch[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=troesch, method=2, control=list(trace=F))
if (!is.null(ans)) dfsane2.troesch[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
ans <- dfsane(par=p0, fn=troesch, method=3, control=list(trace=F))
if (!is.null(ans)) dfsane3.troesch[i, ] <- c(ans$resid, ans$feval, ans$iter, ans$conv)
}
apply(sane.troesch, 2, summary)
apply(dfsane1.troesch, 2, summary)
apply(dfsane2.troesch, 2, summary)
apply(dfsane3.troesch, 2, summary)
#troesch.results <- list(dfsane1=dfsane1.troesch, dfsane2=dfsane2.troesch, dfsane3=dfsane3.troesch, sane=sane.troesch)
#dput(troesch.results, file="e:/bb/package/troesch.results")
# troesch.results <- dget(file="e:/bb/package/troesch.results")
######################################
| /scratch/gouwar.j/cran-all/cranData/BB/demo/nlsolve.R |
### R code from vignette source 'BB.Stex'
###################################################
### code chunk number 1: BB.Stex:9-10
###################################################
options(continue=" ")
###################################################
### code chunk number 2: BB.Stex:24-25
###################################################
library("BB")
###################################################
### code chunk number 3: BB.Stex:31-32 (eval = FALSE)
###################################################
## help(package=BB)
###################################################
### code chunk number 4: BB.Stex:56-58
###################################################
require("setRNG")
setRNG(list(kind="Wichmann-Hill", normal.kind="Box-Muller", seed=1236))
###################################################
### code chunk number 5: BB.Stex:69-82
###################################################
expo3 <- function(p) {
# From La Cruz and Raydan, Optim Methods and Software 2003, 18 (583-599)
n <- length(p)
f <- rep(NA, n)
onm1 <- 1:(n-1)
f[onm1] <- onm1/10 * (1 - p[onm1]^2 - exp(-p[onm1]^2))
f[n] <- n/10 * (1 - exp(-p[n]^2))
f
}
p0 <- runif(10)
ans <- dfsane(par=p0, fn=expo3)
ans
###################################################
### code chunk number 6: BB.Stex:95-112
###################################################
trigexp <- function(x) {
n <- length(x)
F <- rep(NA, n)
F[1] <- 3*x[1]^2 + 2*x[2] - 5 + sin(x[1] - x[2]) * sin(x[1] + x[2])
tn1 <- 2:(n-1)
F[tn1] <- -x[tn1-1] * exp(x[tn1-1] - x[tn1]) + x[tn1] * ( 4 + 3*x[tn1]^2) +
2 * x[tn1 + 1] + sin(x[tn1] - x[tn1 + 1]) * sin(x[tn1] + x[tn1 + 1]) - 8
F[n] <- -x[n-1] * exp(x[n-1] - x[n]) + 4*x[n] - 3
F
}
n <- 10000
p0 <- runif(n)
ans <- dfsane(par=p0, fn=trigexp, control=list(trace=FALSE))
ans$message
ans$resid
###################################################
### code chunk number 7: BB.Stex:118-124
###################################################
froth <- function(p){
f <- rep(NA,length(p))
f[1] <- -13 + p[1] + (p[2]*(5 - p[2]) - 2) * p[2]
f[2] <- -29 + p[1] + (p[2]*(1 + p[2]) - 14) * p[2]
f
}
###################################################
### code chunk number 8: BB.Stex:130-133
###################################################
p0 <- c(3,2)
dfsane(par=p0, fn=froth, control=list(trace=FALSE))
BBsolve(par=p0, fn=froth)
###################################################
### code chunk number 9: BB.Stex:142-146
###################################################
p0 <- c(1,1)
BBsolve(par=p0, fn=froth)
dfsane(par=p0, fn=froth, control=list(trace=FALSE))
###################################################
### code chunk number 10: BB.Stex:156-161
###################################################
# two values generated independently from a poisson distribution with mean = 10
p0 <- rpois(2,10)
BBsolve(par=p0, fn=froth)
dfsane(par=p0, fn=froth, control=list(trace=FALSE))
###################################################
### code chunk number 11: BB.Stex:177-188
###################################################
# Example
# A high-degree polynomial system (R.B. Kearfott, ACM 1987)
# There are 12 real roots (and 126 complex roots to this system!)
#
hdp <- function(x) {
f <- rep(NA, length(x))
f[1] <- 5 * x[1]^9 - 6 * x[1]^5 * x[2]^2 + x[1] * x[2]^4 + 2 * x[1] * x[3]
f[2] <- -2 * x[1]^6 * x[2] + 2 * x[1]^2 * x[2]^3 + 2 * x[2] * x[3]
f[3] <- x[1]^2 + x[2]^2 - 0.265625
f
}
###################################################
### code chunk number 12: BB.Stex:195-201
###################################################
setRNG(list(kind="Wichmann-Hill", normal.kind="Box-Muller", seed=123))
p0 <- matrix(runif(300), 100, 3) # 100 starting values, each of length 3
ans <- multiStart(par=p0, fn=hdp, action="solve")
sum(ans$conv) # number of successful runs = 99
pmat <- ans$par[ans$conv, ] # selecting only converged solutions
###################################################
### code chunk number 13: BB.Stex:205-207
###################################################
ans <- round(pmat, 4)
ans[!duplicated(ans), ]
###################################################
### code chunk number 14: BB.Stex:212-214
###################################################
pc <- princomp(pmat)
biplot(pc) # you can see all 12 solutions beautifully like on a clock!
###################################################
### code chunk number 15: BB.Stex:223-234
###################################################
fleishman <- function(x, r1, r2) {
b <- x[1]
c <- x[2]
d <- x[3]
f <- rep(NA, 3)
f[1] <- b^2 + 6 * b * d + 2 * c^2 + 15 * d^2 - 1
f[2] <- 2*c * (b^2 + 24*b*d + 105*d^2 + 2) - r1
f[3] <- b*d + c^2 * (1 + b^2 + 28 * b * d) + d^2 * (12 + 48 * b* d +
141 * c^2 + 225 * d^2) - r2/24
f
}
###################################################
### code chunk number 16: BB.Stex:243-254
###################################################
rmat <- matrix(NA, 10, 2)
rmat[1,] <- c(1.75, 3.75)
rmat[2,] <- c(1.25, 2.00)
rmat[3,] <- c(1.00, 1.75)
rmat[4,] <- c(1.00, 0.50)
rmat[5,] <- c(0.75, 0.25)
rmat[6,] <- c(0.50, 3.00)
rmat[7,] <- c(0.50, -0.50)
rmat[8,] <- c(0.25, -1.00)
rmat[9,] <- c(0.0, -0.75)
rmat[10,] <- c(-0.25, 3.75)
###################################################
### code chunk number 17: BB.Stex:260-301
###################################################
# 1
setRNG(list(kind="Mersenne-Twister", normal.kind="Inversion", seed=13579))
ans1 <- matrix(NA, nrow(rmat), 3)
for (i in 1:nrow(rmat)) {
x0 <- rnorm(3) # random starting value
temp <- BBsolve(par=x0, fn=fleishman, r1=rmat[i,1], r2=rmat[i,2])
if (temp$conv == 0) ans1[i, ] <- temp$par
}
ans1 <- cbind(rmat, ans1)
colnames(ans1) <- c("skew", "kurtosis", "B", "C", "D")
ans1
# 2
setRNG(list(kind="Mersenne-Twister", normal.kind="Inversion", seed=91357))
ans2 <- matrix(NA, nrow(rmat), 3)
for (i in 1:nrow(rmat)) {
x0 <- rnorm(3) # random starting value
temp <- BBsolve(par=x0, fn=fleishman, r1=rmat[i,1], r2=rmat[i,2])
if (temp$conv == 0) ans2[i, ] <- temp$par
}
ans2 <- cbind(rmat, ans2)
colnames(ans2) <- c("skew", "kurtosis", "B", "C", "D")
ans2
# 3
setRNG(list(kind="Mersenne-Twister", normal.kind="Inversion", seed=79135))
ans3 <- matrix(NA, nrow(rmat), 3)
for (i in 1:nrow(rmat)) {
x0 <- rnorm(3) # random starting value
temp <- BBsolve(par=x0, fn=fleishman, r1=rmat[i,1], r2=rmat[i,2])
if (temp$conv == 0) ans3[i, ] <- temp$par
}
ans3 <- cbind(rmat, ans3)
colnames(ans3) <- c("skew", "kurtosis", "B", "C", "D")
ans3
###################################################
### code chunk number 18: BB.Stex:329-339
###################################################
poissmix.loglik <- function(p,y) {
# Log-likelihood for a binary Poisson mixture distribution
i <- 0:(length(y)-1)
loglik <- y * log(p[1] * exp(-p[2]) * p[2]^i / exp(lgamma(i+1)) +
(1 - p[1]) * exp(-p[3]) * p[3]^i / exp(lgamma(i+1)))
return (sum(loglik) )
}
# Data from Hasselblad (JASA 1969)
poissmix.dat <- data.frame(death=0:9,
freq=c(162,267,271,185,111,61,27,8,3,1))
###################################################
### code chunk number 19: BB.Stex:345-347
###################################################
lo <- c(0,0,0) # lower limits for parameters
hi <- c(1, Inf, Inf) # upper limits for parameters
###################################################
### code chunk number 20: BB.Stex:353-362
###################################################
p0 <- runif(3,c(0.2,1,1),c(0.8,5,8)) # a randomly generated vector of length 3
y <- c(162,267,271,185,111,61,27,8,3,1)
ans1 <- spg(par=p0, fn=poissmix.loglik, y=y,
lower=lo, upper=hi, control=list(maximize=TRUE, trace=FALSE))
ans1
ans2 <- BBoptim(par=p0, fn=poissmix.loglik, y=y,
lower=lo, upper=hi, control=list(maximize=TRUE))
ans2
###################################################
### code chunk number 21: BB.Stex:375-381
###################################################
require(numDeriv)
hess <- hessian(x=ans2$par, func=poissmix.loglik, y=y)
# Note that we have to supplied data vector `y'
hess
se <- sqrt(diag(solve(-hess)))
se
###################################################
### code chunk number 22: BB.Stex:389-400
###################################################
# 3 randomly generated starting values
p0 <- matrix(runif(30, c(0.2,1,1), c(0.8,8,8)), 10, 3, byrow=TRUE)
ans <- multiStart(par=p0, fn=poissmix.loglik, action="optimize",
y=y, lower=lo, upper=hi, control=list(maximize=TRUE))
# selecting only converged solutions
pmat <- round(cbind(ans$fvalue[ans$conv], ans$par[ans$conv, ]), 4)
dimnames(pmat) <- list(NULL, c("fvalue","parameter 1","parameter 2","parameter 3"))
pmat[!duplicated(pmat), ]
| /scratch/gouwar.j/cran-all/cranData/BB/inst/doc/BB.R |
### R code from vignette source 'BBvignetteJSS.Stex'
###################################################
### code chunk number 1: BBvignetteJSS.Stex:84-85
###################################################
options(prompt="R> ", continue="+ ")
###################################################
### code chunk number 2: BBvignetteJSS.Stex:144-145 (eval = FALSE)
###################################################
## vignette("BB", package = "BB")
###################################################
### code chunk number 3: BBvignetteJSS.Stex:149-150 (eval = FALSE)
###################################################
## vignette("BBvignetteJSS", package = "BB")
###################################################
### code chunk number 4: BBvignetteJSS.Stex:156-158
###################################################
nsim <- 10 # 1000
nboot <- 50 # 500
###################################################
### code chunk number 5: BBvignetteJSS.Stex:381-393
###################################################
require("BB")
froth <- function(p){
r <- rep(NA, length(p))
r[1] <- -13 + p[1] + (p[2] * (5 - p[2]) - 2) * p[2]
r[2] <- -29 + p[1] + (p[2] * (1 + p[2]) - 14) * p[2]
r
}
p0 <- rep(0, 2)
dfsane(par = p0, fn = froth, control = list(trace = FALSE))
sane(par = p0, fn = froth, control = list(trace = FALSE))
BBsolve(par = p0, fn = froth)
###################################################
### code chunk number 6: BBvignetteJSS.Stex:434-438
###################################################
require("setRNG")
test.rng <- list(kind = "Mersenne-Twister", normal.kind = "Inversion",
seed = 1234)
old.seed <- setRNG(test.rng)
###################################################
### code chunk number 7: BBvignetteJSS.Stex:509-856
###################################################
expo3 <- function(p) {
n <- length(p)
r <- rep(NA, n)
onm1 <- 1:(n-1)
r[onm1] <- onm1/10 * (1 - p[onm1]^2 - exp(-p[onm1]^2))
r[n] <- (n/10) * (1 - exp(-p[n]^2))
r
}
dfsane1.expo3 <- dfsane2.expo3 <- sane1.expo3 <- sane2.expo3 <- bbs.expo3 <-
bbs.expo3 <- matrix(NA, nsim, 5,
dimnames = list(NULL, c("value", "feval", "iter", "conv", "cpu")))
old.seed <- setRNG(test.rng)
cat("Simulation test 1: ")
for (i in 1:nsim) {
cat(i, " ")
p0 <- rnorm(500)
t1 <- system.time(ans <-
sane(par = p0, fn = expo3, method = 1, control = list(trace = FALSE)))[1]
sane1.expo3[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t1)
t2 <- system.time(ans <-
sane(par = p0, fn = expo3, method = 2, control = list(trace = FALSE)))[1]
sane2.expo3[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t2)
t3 <- system.time(ans <-
dfsane(par = p0, fn = expo3, method = 1, control = list(trace = FALSE)))[1]
dfsane1.expo3[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t3)
t4 <- system.time(ans <-
dfsane(par = p0, fn = expo3, method = 2, control = list( trace = FALSE)))[1]
dfsane2.expo3[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t4)
t5 <- system.time(ans <-
BBsolve(par = p0, fn = expo3, control = list(trace = FALSE)))[1]
bbs.expo3[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t5)
}
cat("\n")
table1.test1 <- rbind(
c(apply( sane1.expo3, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum( sane1.expo3[,4] > 0)),
c(apply(dfsane1.expo3, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum(dfsane1.expo3[,4] > 0)),
c(apply( sane2.expo3, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum( sane2.expo3[,4] > 0)),
c(apply(dfsane2.expo3, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum(dfsane2.expo3[,4] > 0)),
c(apply( bbs.expo3, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum( bbs.expo3[,4] > 0))
)
dimnames(table1.test1) <- list(
c("sane-1", "dfsane-1", "sane-2", "dfsane-2", "BBsolve"), NULL)
table1.test1
###################### test function 2 ######################
trigexp <- function(x) {
n <- length(x)
r <- rep(NA, n)
r[1] <- 3*x[1]^2 + 2*x[2] - 5 + sin(x[1] - x[2]) * sin(x[1] + x[2])
tn1 <- 2:(n-1)
r[tn1] <- -x[tn1-1] * exp(x[tn1-1] - x[tn1]) + x[tn1] * ( 4 + 3*x[tn1]^2) +
2 * x[tn1 + 1] + sin(x[tn1] - x[tn1 + 1]) * sin(x[tn1] + x[tn1 + 1]) - 8
r[n] <- -x[n-1] * exp(x[n-1] - x[n]) + 4*x[n] - 3
r
}
old.seed <- setRNG(test.rng)
dfsane1.trigexp <- dfsane2.trigexp <- sane1.trigexp <- sane2.trigexp <-
matrix(NA, nsim, 5,
dimnames=list(NULL,c("value", "feval", "iter", "conv", "cpu")))
cat("Simulation test 2: ")
for (i in 1:nsim) {
cat(i, " ")
p0 <- rnorm(500)
t1 <- system.time(ans <-
sane(par=p0, fn=trigexp, method=1, control=list( trace=FALSE)))[1]
sane1.trigexp[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t1)
t2 <- system.time(ans <-
sane(par=p0, fn=trigexp, method=2, control=list( trace=FALSE)))[1]
sane2.trigexp[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t2)
t3 <- system.time(ans <-
dfsane(par=p0, fn=trigexp, method=1, control=list( trace=FALSE)))[1]
dfsane1.trigexp[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t3)
t4 <- system.time(ans <-
dfsane(par=p0, fn=trigexp, method=2, control=list( trace=FALSE)))[1]
dfsane2.trigexp[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t4)
}
cat("\n")
table1.test2 <- rbind(
c(apply( sane1.trigexp, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum( sane1.trigexp[,4] > 0)),
c(apply(dfsane1.trigexp, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum(dfsane1.trigexp[,4] > 0)),
c(apply( sane2.trigexp, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum( sane2.trigexp[,4] > 0)),
c(apply(dfsane2.trigexp, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum(dfsane2.trigexp[,4] > 0))
)
dimnames(table1.test2) <- list(
c("sane-1", "dfsane-1", "sane-2", "dfsane-2"), NULL)
table1.test2
###################### test function 3 ######################
broydt <- function(x, h=2) {
n <- length(x)
r <- rep(NA, n)
r[1] <- ((3 - h * x[1]) * x[1]) - 2 * x[2] + 1
tnm1 <- 2:(n-1)
r[tnm1] <- ((3 - h * x[tnm1]) * x[tnm1]) - x[tnm1-1] - 2 * x[tnm1+1] + 1
r[n] <- ((3 - h * x[n]) * x[n]) - x[n-1] + 1
r
}
old.seed <- setRNG(test.rng)
dfsane1.broydt <- dfsane2.broydt <- sane1.broydt <- sane2.broydt <-
matrix(NA, nsim, 5,
dimnames=list(NULL,c("value", "feval", "iter", "conv", "cpu")))
cat("Simulation test 3: ")
for (i in 1:nsim) {
cat(i, " ")
p0 <- -runif(500)
t1 <- system.time(ans <-
sane(par=p0, fn=broydt, method=1, control=list(trace=FALSE)))[1]
sane1.broydt[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t1)
t2 <- system.time(ans <-
sane(par=p0, fn=broydt, method=2, control=list(trace=FALSE)))[1]
sane2.broydt[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t2)
t3 <- system.time(ans <-
dfsane(par=p0, fn=broydt, method=1, control=list(trace=FALSE)))[1]
dfsane1.broydt[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t3)
t4 <- system.time(ans <-
dfsane(par=p0, fn=broydt, method=2, control=list(trace=FALSE)))[1]
dfsane2.broydt[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t4)
}
cat("\n")
table1.test3 <- rbind(
c(apply( sane1.broydt, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum( sane1.broydt[,4] > 0)),
c(apply(dfsane1.broydt, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum(dfsane1.broydt[,4] > 0)),
c(apply( sane2.broydt, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum( sane2.broydt[,4] > 0)),
c(apply(dfsane2.broydt, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum(dfsane2.broydt[,4] > 0))
)
dimnames(table1.test3) <- list(
c("sane-1", "dfsane-1", "sane-2", "dfsane-2"), NULL)
table1.test3
###################### test function 4 ######################
extrosbk <- function(x) {
n <- length(x)
r <- rep(NA, n)
j <- 2 * (1:(n/2))
jm1 <- j - 1
r[jm1] <- 10 * (x[j] - x[jm1]^2)
r[j] <- 1 - x[jm1]
r
}
old.seed <- setRNG(test.rng)
dfsane1.extrosbk <- dfsane2.extrosbk <- sane1.extrosbk <- sane2.extrosbk <-
bbs.extrosbk <- matrix(NA, nsim, 5,
dimnames = list(NULL,c("value", "feval", "iter", "conv", "cpu")))
cat("Simulation test 4: ")
for (i in 1:nsim) {
cat(i, " ")
p0 <- runif(500)
t1 <- system.time(ans <-
sane(par = p0, fn = extrosbk, method = 1, control = list( M = 10, noimp = 100, trace = FALSE)))[1]
sane1.extrosbk[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t1)
t2 <- system.time(ans <-
sane(par = p0, fn = extrosbk, method = 2, control = list( M = 10, noimp = 100, trace = FALSE)))[1]
sane2.extrosbk[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t2)
t3 <- system.time(ans <-
dfsane(par = p0, fn = extrosbk, method = 1, control = list( M = 10, noimp = 100, trace = FALSE)))[1]
dfsane1.extrosbk[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t3)
t4 <- system.time(ans <-
dfsane(par = p0, fn = extrosbk, method = 2, control = list( M = 10, noimp = 100, trace = FALSE)))[1]
dfsane2.extrosbk[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t4)
t5 <- system.time(ans <-
BBsolve(par = p0, fn = extrosbk, control = list(trace = FALSE)))[1]
bbs.extrosbk[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t5)
}
cat("\n")
table1.test4 <- rbind(
c(apply( sane1.extrosbk, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum( sane1.extrosbk[,4] > 0)),
c(apply(dfsane1.extrosbk, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum(dfsane1.extrosbk[,4] > 0)),
c(apply( sane2.extrosbk, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum( sane2.extrosbk[,4] > 0)),
c(apply(dfsane2.extrosbk, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum(dfsane2.extrosbk[,4] > 0)),
c(apply( bbs.extrosbk, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum( bbs.extrosbk[,4] > 0))
)
dimnames(table1.test4) <- list(
c("sane-1", "dfsane-1", "sane-2", "dfsane-2", "BBsolve"), NULL)
table1.test4
###################### test function 5 ######################
troesch <- function(x) {
n <- length(x)
tnm1 <- 2:(n-1)
r <- rep(NA, n)
h <- 1 / (n+1)
h2 <- 10 * h^2
r[1] <- 2 * x[1] + h2 * sinh(10 * x[1]) - x[2]
r[tnm1] <- 2 * x[tnm1] + h2 * sinh(10 * x[tnm1]) - x[tnm1-1] - x[tnm1+1]
r[n] <- 2 * x[n] + h2 * sinh(10 * x[n]) - x[n-1] - 1
r
}
old.seed <- setRNG(test.rng)
dfsane1.troesch <- dfsane2.troesch <- sane1.troesch <- sane2.troesch <-
matrix(NA, nsim, 5,
dimnames = list(NULL,c("value", "feval", "iter", "conv", "cpu")))
cat("Simulation test 5: ")
for (i in 1:nsim) {
cat(i, " ")
p0 <- sort(runif(500))
t1 <- system.time(ans <-
sane(par = p0, fn = troesch, method = 1, control = list(trace = FALSE)))[1]
sane1.troesch[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t1)
t2 <- system.time(ans <-
sane(par = p0, fn = troesch, method = 2, control = list(trace = FALSE)))[1]
sane2.troesch[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t2)
t3 <- system.time(ans <-
dfsane(par = p0, fn = troesch, method = 1, control = list(trace = FALSE)))[1]
dfsane1.troesch[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t3)
t4 <- system.time(ans <-
dfsane(par = p0, fn = troesch, method = 2, control = list(trace = FALSE)))[1]
dfsane2.troesch[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t4)
}
cat("\n")
table1.test5 <- rbind(
c(apply( sane1.troesch, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum( sane1.troesch[,4] > 0)),
c(apply(dfsane1.troesch, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum(dfsane1.troesch[,4] > 0)),
c(apply( sane2.troesch, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum( sane2.troesch[,4] > 0)),
c(apply(dfsane2.troesch, 2, summary)[c(4, 2,5), c(3, 2, 5)], sum(dfsane2.troesch[,4] > 0))
)
dimnames(table1.test5) <- list(
c("sane-1", "dfsane-1", "sane-2", "dfsane-2"), NULL)
table1.test5
###################### test function 6 ######################
chandraH <- function(x, c=0.9) {
n <- length(x)
k <- 1:n
mu <- (k - 0.5)/n
dterm <- outer(mu, mu, function(x1,x2) x1 / (x1 + x2) )
x - 1 / (1 - c/(2*n) * rowSums(t(t(dterm) * x)))
}
old.seed <- setRNG(test.rng)
dfsane1.chandraH <- dfsane2.chandraH <- sane1.chandraH <- sane2.chandraH <-
matrix(NA, nsim, 5,
dimnames = list(NULL,c("value", "feval", "iter", "conv", "cpu")))
cat("Simulation test 6: ")
for (i in 1:nsim) {
cat(i, " ")
p0 <- runif(500)
t1 <- system.time(ans <-
sane(par = p0, fn = chandraH, method = 1, control = list(trace = FALSE)))[1]
sane1.chandraH[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t1)
t2 <- system.time(ans <-
sane(par = p0, fn = chandraH, method = 2, control = list(trace = FALSE)))[1]
sane2.chandraH[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t1)
t3 <- system.time(ans <-
dfsane(par = p0, fn = chandraH, method = 1, control = list(trace = FALSE)))[1]
dfsane1.chandraH[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t1)
t4 <- system.time(ans <-
dfsane(par = p0, fn = chandraH, method = 2, control = list(trace = FALSE)))[1]
dfsane2.chandraH[i, ] <- c(ans$residual, ans$feval, ans$iter, ans$convergence, t1)
}
cat("\nSimulations for table 1 complete.\n")
table1.test6 <- rbind(
c(apply( sane1.chandraH, 2, summary)[c(4, 2, 5), c(3, 2, 5)], sum( sane1.chandraH[,4] > 0)),
c(apply(dfsane1.chandraH, 2, summary)[c(4, 2, 5), c(3, 2, 5)], sum(dfsane1.chandraH[,4] > 0)),
c(apply( sane2.chandraH, 2, summary)[c(4, 2, 5), c(3, 2, 5)], sum( sane2.chandraH[,4] > 0)),
c(apply(dfsane2.chandraH, 2, summary)[c(4, 2, 5), c(3, 2, 5)], sum(dfsane2.chandraH[,4] > 0))
)
dimnames(table1.test6) <- list(
c("sane-1", "dfsane-1", "sane-2", "dfsane-2"), NULL)
table1.caption <- paste("Results of numerical experiments for 6 standard test problems.",
nsim, "randomly generated starting values were used for each problem. Means
and inter-quartile ranges (in parentheses) are shown. Default control
parameters were used in all the algorithms.")
table1 <- rbind(table1.test1, table1.test2, table1.test3, table1.test4,
table1.test5, table1.test6)
#dimnames(table1) <- list(dimnames(table1.test1)[[1]],
# c("", "# Iters", "", "", "# Fevals", "", "", "CPU (sec)", "", "# Failures"))
cgroups <- c("# Iters", "# Fevals", "CPU (sec)","# Failures")
rgroups <- c("\\emph{1. Exponential function 3}",
"\\emph{2. Trigexp function}",
"\\emph{3. Broyden's tridiagonal function}",
"\\emph{4. Extended Rosenbrock function}",
"\\emph{5. Troesch function}",
"\\emph{6. Chandrasekhar's H-equation}")
###################################################
### code chunk number 8: BBvignetteJSS.Stex:860-874
###################################################
require("Hmisc")
latex(table1,
file="",
caption=table1.caption, caption.loc='bottom',
#align = "cccccccccc",
#colheads="Methods & \\# Iters & \\# Fevals & CPU (sec) & \\# Failures \\\\",
cgroups = cgroups, n.cgroups= c(3,3,3,1),
rgroups = rgroups, n.rgroups= c(5,4,4,5,4,4),
dec=3,
label="table:stdexpmtsGENERATED",
landscape=FALSE, size="small",
numeric.dollar=TRUE)
###################################################
### code chunk number 9: BBvignetteJSS.Stex:916-926
###################################################
hdp <- function(x) {
r <- rep(NA, length(x))
r[1] <- 5*x[1]^9 - 6*x[1]^5 * x[2]^2 + x[1] * x[2]^4 + 2*x[1] * x[3]
r[2] <- -2 * x[1]^6 * x[2] + 2 * x[1]^2 * x[2]^3 + 2 * x[2] * x[3]
r[3] <- x[1]^2 + x[2]^2 - 0.265625
r
}
old.seed <- setRNG(test.rng)
p0 <- matrix(runif(900), 300, 3)
###################################################
### code chunk number 10: BBvignetteJSS.Stex:929-930
###################################################
ans <- multiStart(par = p0, fn = hdp, action = "solve")
###################################################
### code chunk number 11: BBvignetteJSS.Stex:933-938
###################################################
sum(ans$conv)
pmat <- ans$par[ans$conv, ]
ord1 <- order(pmat[, 1])
ans <- round(pmat[ord1, ], 4)
ans[!duplicated(ans), ]
###################################################
### code chunk number 12: BBvignetteJSS.Stex:992-1026
###################################################
U.eqn <- function(beta) {
Xb <- c(X %*% beta)
c(crossprod(X, Y - (obs.period * exp(Xb))))
}
poisson.sim <- function(beta, X, obs.period) {
Xb <- c(X %*% beta)
mean <- exp(Xb) * obs.period
rpois(nrow(X), lambda = mean)
}
old.seed <- setRNG(test.rng)
n <- 500
X <- matrix(NA, n, 8)
X[,1] <- rep(1, n)
X[,3] <- rbinom(n, 1, prob=0.5)
X[,5] <- rbinom(n, 1, prob=0.4)
X[,7] <- rbinom(n, 1, prob=0.4)
X[,8] <- rbinom(n, 1, prob=0.2)
X[,2] <- rexp(n, rate = 1/10)
X[,4] <- rexp(n, rate = 1/10)
X[,6] <- rnorm(n, mean = 10, sd = 2)
obs.period <- rnorm(n, mean = 100, sd = 30)
beta <- c(-5, 0.04, 0.3, 0.05, 0.3, -0.005, 0.1, -0.4)
Y <- poisson.sim(beta, X, obs.period)
res <- dfsane(par = rep(0,8), fn = U.eqn,
control = list(NM = TRUE, M = 100, trace = FALSE))
res
glm(Y ~ X[,-1], offset = log(obs.period),
family = poisson(link = "log"))
###################################################
### code chunk number 13: BBvignetteJSS.Stex:1086-1129
###################################################
aft.eqn <- function (beta, X, Y, delta, weights = "logrank") {
deltaF <- delta == 1
Y.zeta <- Y - c(X %*% beta)
ind <- order(Y.zeta, decreasing = TRUE)
dd <- deltaF[ind]
n <- length(Y.zeta)
tmp <- apply(X[ind, ], 2, function (x) cumsum(x))
if (weights == "logrank") {
c1 <- colSums(X[deltaF, ])
r <- (c1 - colSums(tmp[dd, ] / (1:n)[dd])) / sqrt(n)
}
if (weights == "gehan") {
c1 <- colSums(X[deltaF, ]* ((1:n)[order(ind)][deltaF]))
r <- (c1 - colSums(tmp[dd, ])) / ( n * sqrt(n))
}
r
}
old.seed <- setRNG(test.rng)
n <- 1000
X <- matrix(NA, n, 8)
X[,1] <- rbinom(n, 1, prob=0.5)
X[,2] <- rbinom(n, 1, prob=0.4)
X[,3] <- rbinom(n, 1, prob=0.4)
X[,4] <- rbinom(n, 1, prob=0.3)
temp <- as.factor(sample(c("0", "1", "2"), size=n, rep=T,
prob=c(1/3,1/3,1/3)))
X[,5] <- temp == "1"
X[,6] <- temp == "2"
X[,7] <- rexp(n, rate=1/10)
X[,8] <- rnorm(n)
eta.true <- c(0.5, -0.4, 0.3, -0.2, -0.1, 0.4, 0.1, -0.6)
Xb <- drop(X %*% eta.true)
old.seed <- setRNG(test.rng)
par.lr <- par.gh <- matrix(NA, nsim, 8)
stats.lr <- stats.gh <- matrix(NA, nsim, 5)
sumDelta <- rep(NA, nsim)
t1 <- t2 <-0
###################################################
### code chunk number 14: BBvignetteJSS.Stex:1159-1188
###################################################
cat("Simulation for Table 2: ")
for (i in 1:nsim) {
cat( i, " ")
err <- rlnorm(n, mean=1)
Y.orig <- Xb + err
cutoff <- floor(quantile(Y.orig, prob=0.5))
cens <- runif(n, cutoff, quantile(Y.orig, prob=0.95))
Y <- pmin(cens, Y.orig)
delta <- 1 * (Y.orig <= cens)
sumDelta[i] <- sum(delta)
t1 <- t1 + system.time(ans.eta <-
dfsane(par=rep(0,8), fn=aft.eqn,
control = list(NM = TRUE, trace = FALSE),
X=X, Y=Y, delta = delta, weights = "logrank"))[1]
par.lr[i,] <- ans.eta$par
stats.lr[i, ] <- c(ans.eta$iter, ans.eta$feval, as.numeric(t1),
ans.eta$conv, ans.eta$resid)
t2 <- t2 + system.time(ans.eta <-
dfsane(par=rep(0,8), fn=aft.eqn,
control = list(NM = TRUE, trace = FALSE),
X=X, Y=Y, delta = delta, weights="gehan"))[1]
par.gh[i,] <- ans.eta$par
stats.gh[i, ] <- c(ans.eta$iter, ans.eta$feval, as.numeric(t2),
ans.eta$conv, ans.eta$resid)
invisible({gc(); gc()})
}
cat("\n")
###################################################
### code chunk number 15: BBvignetteJSS.Stex:1191-1209
###################################################
print(t1/nsim)
print(t2/nsim)
print(mean(sumDelta))
mean.lr <- signif(colMeans(par.lr),3)
bias.lr <- mean.lr - eta.true
sd.lr <- signif(apply(par.lr, 2, sd),3)
mean.gh <- signif(colMeans(par.gh),3)
bias.gh <- mean.gh - eta.true
sd.gh <- signif(apply(par.gh, 2, sd),3)
signif(colMeans(stats.lr),3)
signif(colMeans(stats.gh),3)
###################################################
### code chunk number 16: BBvignetteJSS.Stex:1212-1231
###################################################
table2 <- cbind( eta.true, mean.lr, bias.lr, sd.lr, mean.gh, bias.gh, sd.gh)
dimnames(table2) <- list( c("$X_1$", "$X_2$", "$X_3$", "$X_4$",
"$X_5$", "$X_6$", "$X_7$", "$X_8$"), NULL)
table2.caption <- paste("Simulation results for the rank-based regression
in accelerated failure time model (", nsim, "simulations). Estimates were obtained using
the \\code{dfsane} algorithm with \\code{M=100}.")
latex(table2,
caption=table2.caption, caption.loc='bottom',
file="",
colheads=c("", "Log-rank", "Gehan"),
label="table:aftGENERATED",
landscape=FALSE, size="small",
dec=3, numeric.dollar=TRUE,
extracolheads=c( #"Parameter",
"Truth", "Mean", "Bias", "Std. Dev.",
"Mean", "Bias", "Std. Dev."),
double.slash=FALSE)
###################################################
### code chunk number 17: BBvignetteJSS.Stex:1278-1280
###################################################
require("survival")
attach(pbc)
###################################################
### code chunk number 18: BBvignetteJSS.Stex:1283-1313
###################################################
Y <- log(time)
delta <- status == 2
X <- cbind(age, log(albumin), log(bili), edema, log(protime))
missing <- apply(X, 1, function(x) any(is.na(x)))
Y <- Y[!missing]
X <- X[!missing, ]
delta <- delta[!missing]
####### Log-rank estimator #######
t1 <- system.time(ans.lr <-
dfsane(par=rep(0, ncol(X)), fn = aft.eqn,
control=list(NM = TRUE, M = 100, noimp = 500, trace = FALSE),
X=X, Y=Y, delta=delta))[1]
# With maxit=5000 this fails with "Lack of improvement in objective function"
# not with "Maximum limit for iterations exceeded"
t1
ans.lr
####### Gehan estimator #######
t2 <- system.time(ans.gh <-
dfsane(par = rep(0, ncol(X)), fn = aft.eqn,
control = list(NM = TRUE, M = 100, noimp = 500, trace = FALSE),
X=X, Y=Y, delta=delta, weights = "gehan"))[1]
t2
ans.gh
###################################################
### code chunk number 19: BBvignetteJSS.Stex:1321-1334 (eval = FALSE)
###################################################
## # This source defines functions l1fit and aft.fun
## source("http://www.columbia.edu/~zj7/aftsp.R")
## # N.B. aft.fun resets the RNG seed by default to a fixed value,
## # and does not reset it. Beware.
##
##
## require("quantreg")
## t3 <- system.time(ans.jin <-
## aft.fun(x=X, y=Y, delta=delta, mcsize=1))[1]
##
## t3
##
## ans.jin$beta
###################################################
### code chunk number 20: BBvignetteJSS.Stex:1337-1344
###################################################
# without Jin's results
U <- function(x, func, ...) sqrt(mean(func(x, ...)^2))
# result from Jin et al. (2003) gives higher residuals
table3.ResidualNorm <- c(
U(ans.gh$par, func=aft.eqn, X=X, Y=Y, delta=delta,
weights="gehan"),
U(ans.lr$par, func=aft.eqn, X=X, Y=Y, delta=delta))
###################################################
### code chunk number 21: BBvignetteJSS.Stex:1348-1359 (eval = FALSE)
###################################################
## # with Jin's results
## U <- function(x, func, ...) sqrt(mean(func(x, ...)^2))
## # result from Jin et al. (2003) gives higher residuals
## table3.ResidualNorm <- c(
## U(ans.gh$par, func=aft.eqn, X=X, Y=Y, delta=delta,
## weights="gehan"),
## U(ans.jin$beta[1,], func=aft.eqn, X=X, Y=Y, delta=delta,
## weights="gehan"),
## U(ans.lr$par, func=aft.eqn, X=X, Y=Y, delta=delta),
## U(ans.jin$beta[2,], func=aft.eqn, X=X, Y=Y, delta=delta))
##
###################################################
### code chunk number 22: BBvignetteJSS.Stex:1363-1376
###################################################
# Bootstrap to obtain standard errors
Y <- log(time)
delta <- status==2
X <- cbind(age, log(albumin), log(bili), edema, log(protime))
missing <- apply(X, 1, function(x) any(is.na(x)))
Y.orig <- Y[!missing]
X.orig <- X[!missing, ]
delta.orig <- delta[!missing]
old.seed <- setRNG(test.rng)
lr.boot <- gh.boot <- matrix(NA, nboot, ncol(X))
time1 <- time2 <- 0
###################################################
### code chunk number 23: BBvignetteJSS.Stex:1379-1398
###################################################
cat("Bootstrap sample: ")
for (i in 1:nboot) {
cat(i, " ")
select <- sample(1:nrow(X.orig), size=nrow(X.orig), rep=TRUE)
Y <- Y.orig[select]
X <- X.orig[select, ]
delta <- delta.orig[select]
time1 <- time1 + system.time(ans.lr <-
dfsane(par = rep(0, ncol(X)), fn = aft.eqn,
control = list(NM = TRUE, M = 100, noimp = 500, trace = FALSE),
X=X, Y=Y, delta=delta))[1]
time2 <- time2 + system.time(ans.gh <-
dfsane(par = rep(0, ncol(X)), fn = aft.eqn,
control = list(NM = TRUE, M = 100, noimp = 500, trace = FALSE),
X=X, Y=Y, delta=delta, weights = "gehan"))[1]
lr.boot[i,] <- ans.lr$par
gh.boot[i,] <- ans.gh$par
}
cat("\n")
###################################################
### code chunk number 24: BBvignetteJSS.Stex:1401-1449 (eval = FALSE)
###################################################
## time3 <- system.time( ans.jin.boot <-
## aft.fun(x = X.orig, y = Y.orig, delta = delta.orig,
## mcsize = nboot))[1]
##
## time1
##
## time2
##
## time3
##
## colMeans(lr.boot)
## # Results on different systems and versions of R:
## # [1] -0.02744423 1.09871350 -0.59597720 -0.84169498 -0.95067376
## # [1] -0.02718006 1.01484050 -0.60553894 -0.83216296 -0.82671339
## # [1] -0.02746916 1.09371431 -0.59630955 -0.84170621 -0.94147407
##
## sd(lr.boot) * (499/500)
## # Results on different systems and versions of R:
## # [1] 0.005778319 0.497075716 0.064839483 0.306026261 0.690452468
## # [1] 0.006005054 0.579962922 0.068367668 0.307980986 0.665742686
## # [1] 0.005777676 0.504362828 0.064742446 0.309687062 0.695128194
##
## colMeans(gh.boot)
## # Results on different systems and versions of R:
## # [1] -0.0263899 1.4477801 -0.5756074 -0.9990443 -2.0961280
## # [1] -0.02616728 1.41126364 -0.58311902 -1.00953045 -2.01724976
## # [1] -0.02633854 1.45577255 -0.57439183 -0.99630007 -2.12363711
##
## sd(gh.boot) * (499/500)
## # Results on different systems and versions of R:
## # [1] 0.006248941 0.519016144 0.068759981 0.294145730 0.919565487
## # [1] 0.005599693 0.571631837 0.075018323 0.304463597 1.043196254
## # [1] 0.006183826 0.518332233 0.068672881 0.291036025 0.917733660
##
##
## ans.jin.boot$beta
##
## sqrt(diag(ans.jin.boot$betacov[,,2])) # log-rank
## # Results on different systems and versions of R:
## # [1] 0.005304614 0.470080732 0.053191766 0.224331718 0.545344403
## # [1] 0.00517431 0.44904332 0.05632078 0.24613883 0.54826652
## # [1] 0.00517431 0.44904332 0.05632078 0.24613883 0.54826652
##
## sqrt(diag(ans.jin.boot$betacov[,,1])) # Gehan
## # Results on different systems and versions of R:
## # [1] 0.005553049 0.522259799 0.061634483 0.270337048 0.803683570
## # [1] 0.005659013 0.522871858 0.062670939 0.283731999 0.775959845
## # [1] 0.005659013 0.522871858 0.062670939 0.283731999 0.775959845
###################################################
### code chunk number 25: BBvignetteJSS.Stex:1454-1495
###################################################
table3.caption <- paste("Rank-based regression of the accelerated failure time (AFT) model
for the primary biliary cirrhosis (PBC) data set. Point estimates and
standard errors (in parentheses) are provided. Standard errors
for \\code{dfsane} are obtained from", nboot, "bootstrap samples.")
table3.part1 <- cbind(
colMeans(gh.boot), sd(gh.boot) * (499/500),
colMeans(lr.boot), sd(lr.boot) * (499/500)
)
dimnames(table3.part1) <- list(
c("age", "log(albumin)", "log(bili)", "edema", "log(protime)"), NULL)
latex(table3.part1,
file="",
#align = "c|cc||cc",
#halign = "c|cc||cc",
#colheads=c("", "", "Gehan","", "Log-rank"),
#extracolheads=c("Covariate",
# "\\code{dfsane}", "",
# "\\code{dfsane}", ""),
dec=3,
label="table:pbcGENERATEDp1",
landscape=FALSE, size="small",
numeric.dollar=TRUE)
table3.ResidualNorm <- matrix(table3.ResidualNorm, 1,2)
dimnames(table3.ResidualNorm) <- list(
"Residual norm $\\frac{\\|F(x_n)\\|}{\\sqrt{p}}$" , NULL)
latex(table3.ResidualNorm,
file="",
caption=table3.caption, caption.loc='bottom',
align = "c|c||c",
dec=3,
label="table:pbcGENERATEDp2",
landscape=FALSE, size="small",
numeric.dollar=TRUE)
###################################################
### code chunk number 26: BBvignetteJSS.Stex:1498-1544 (eval = FALSE)
###################################################
## # This version of the table requires Jin's code results
##
## table3.caption <- paste("Rank-based regression of the accelerated failure time (AFT) model
## for the primary biliary cirrhosis (PBC) data set. Point estimates and
## standard errors (in parentheses) are provided. Standard errors
## for \\code{dfsane} are obtained from", nboot, "bootstrap samples.")
##
## table3.part1 <- cbind(
## colMeans(gh.boot), sd(gh.boot) * (499/500),
## ans.jin.boot$beta[1,], # Gehan
## sqrt(diag(ans.jin.boot$betacov[,,1])), # Gehan
## colMeans(lr.boot), sd(lr.boot) * (499/500),
## ans.jin.boot$beta[2,], # log-rank
## sqrt(diag(ans.jin.boot$betacov[,,2])) # log-rank
## )
##
## dimnames(table3.part1) <- list(
## c("age", "log(albumin)", "log(bili)", "edema", "log(protime)"), NULL)
##
## latex(table3.part1,
## file="",
## align = "c|cccc||cccc",
## halign = "c|cccc||cccc",
## colheads=c("", "", "Gehan","", "", "","Log-rank", "", ""),
## extracolheads=c("Covariate",
## "\\code{dfsane}", "", "\\citet{JinLinWeiYin03}", "",
## "\\code{dfsane}", "", "\\citet{JinLinWeiYin03}", ""),
## dec=3,
## label="table:pbcGENERATEDp1",
## landscape=FALSE, size="small",
## numeric.dollar=TRUE)
##
## table3.ResidualNorm <- matrix(table3.ResidualNorm, 1,4)
## dimnames(table3.ResidualNorm) <- list(
## "Residual norm $\\frac{\\|F(x_n)\\|}{\\sqrt{p}}$" , NULL)
##
## latex(table3.ResidualNorm,
## file="",
## caption=table3.caption, caption.loc='bottom',
## align = "c|cc||cc",
## dec=3,
## label="table:pbcGENERATEDp2",
## landscape=FALSE, size="small",
## numeric.dollar=TRUE)
##
##
| /scratch/gouwar.j/cran-all/cranData/BB/inst/doc/BBvignetteJSS.R |
require(BB)
##########################
# Gaussian mixture density
dgaussmix <- function (p) {
prop <- p[1:nmix]
mu <- p[(nmix+1):(2*nmix)]
sigma <- p[(2*nmix+1)]
sapply(y, function(y)sum(prop*dnorm(y,mean=mu,sd=sqrt(sigma))))
}
# generating random numbers from a Gaussian mixture
rgaussmix <- function (n, prop, mu, sigma) {
nmix <- length(mu)
imix <- sample(1:nmix, size=n, prob=prop, rep=TRUE)
y <- rnorm(n, mean = mu[imix], sd = sqrt(sigma))
return(y)
}
# Gaussian mixture minus log-likelihood
gaussmix.mloglik <- function(p){
- sum(log(dgaussmix(p)))
}
# Gradient of Gaussian mixture log-likelihood
gaussmix.grad <- function(p){
g <- rep(NA, length(p))
f <- dgaussmix(p)
pj <- p[1:nmix]
mu <- p[(nmix+1): (2*nmix)]
sigma <- p[2*nmix + 1]
phi <- outer(y, mu, function(y, mu) dnorm(y,mean=mu,sd=sqrt(sigma)))
g[1:nmix] <- - colSums(phi/f)
phi2 <- outer(y, mu, function(y, mu) (y - mu)/sigma)
fimuj <- t(t(phi * phi2) * pj)
g[(nmix+1): (2*nmix)] <- - colSums(fimuj/f)
phi3 <- outer(y, mu, function(y, mu) (y - mu)^2/sigma)
fisig <- apply(t(t(phi * ( 1 - phi3) ) * pj), 1, sum)
g[2*nmix+1] <- sum(fisig / f) / (2 * sigma)
g
}
heq <- function(x) {
x[1] + x[2] + x[3] + x[4] - 1
}
hin <- function(x) {
h <- rep(NA, 9)
h[1] <- x[1]
h[2] <- x[2]
h[3] <- x[3]
h[4] <- x[4]
h[5] <- 1 - x[1]
h[6] <- 1 - x[2]
h[7] <- 1 - x[3]
h[8] <- 1 - x[4]
h[9] <- x[9]
h
}
Amat <- matrix(0, 10, 9)
Amat[1, 1:4] <- 1 # corresponds to equality
Amat[2,1] <- Amat[3,2] <- Amat[4,3] <- Amat[5,4] <- Amat[10,9] <- 1
Amat[6, 1] <- Amat[7, 2] <- Amat[8, 3] <- Amat[9, 4] <- -1
b <- c(1,0,0,0,0,-1,-1,-1,-1, 0)
meq <- 1
# A data realization
p <- c(0.2,0.4,0.2,0.2)
nmix <- length(p)
mu <- c(0,3,7,11)
sigma <- 2
npts <- 500
set.seed(12345)
y <- rgaussmix(npts, p, mu, sigma)
ymean <- mean(y)
ysd <- sd(y)
p0 <- rep(1/nmix, nmix)
ymean0 <- ymean + ysd * runif(nmix, -1.2, 1.2)
ysd0 <- ysd
par0 <- c(p0,ymean0, ysd0^2)
# # The inequalities are defined such that: Amat %*% x - b > 0
# with solve.QP in version 2014.1-1 next does not work with EPS=1e-7
# [1] "Failure: Error in projection"
ans <- spg(par=par0, fn=gaussmix.mloglik, gr=gaussmix.grad,
project="projectLinear", projectArgs=list(A=Amat, b=b, meq=meq))
# # Does work!
# require("quadprog")
#
# projectLinearOld <- function (par, A, b, meq){
# n <- length(par)
# if (meq > 0 | any(b - c(A %*% par) > 0)) {
# ans <- solve.QP(Dmat = diag(1, n), dvec = rep(0, n),
# Amat = t(A), bvec = b - c(A %*% par), meq = meq, factorized = TRUE)
# par <- par + ans$solution
# }
# par
# }
#
# ans <- spg(par=par0, fn=gaussmix.mloglik, gr=gaussmix.grad,
# project="projectLinearOld", projectArgs=list(A=Amat, b=b, meq=meq))
if (0 != ans$convergence) stop("test did not converge!")
fuzz <- 5e-5
if(fuzz < max(abs(ans$par -
c( 0.2103359277577284137, 0.2191738028962620377, 0.2174358494266191433,
0.3530544199193904609, 7.0060291485783237064, 11.2527073428970716407,
-0.0166017473519236673, 2.9360474287487265954, 2.0609328632879644339)))){
#above is Mint 3.11.0-12-generic #19-Ubuntu SMP x86_64
# Windows . using R version 3.1.1 (2014-07-10)
# * using platform: x86_64-w64-mingw32 (64-bit)
# 'i386'
# [1] 0.2103348878325739246 0.2191732399236242523 0.2174403704585151642
# [4] 0.3530515017852868809 7.0060519130170799684 11.2527120792098980218
# [7] -0.0165690026560232316 2.9360806878664402753 2.0609368544772692644
# arch 'x64'
# [1] 0.2103355761431467408 0.2191734750015318922 0.2174374428015042882
# [4] 0.3530535060538171899 7.0060405011166100309 11.2527109132279594661
# [7] -0.0165922248714034798 2.9360604372491612146 2.0609297716047816351
print(ans$par, digits=18)
cat("difference:\n")
print(ans$par -
c( 0.2103359277577284137, 0.2191738028962620377, 0.2174358494266191433,
0.3530544199193904609, 7.0060291485783237064, 11.2527073428970716407,
-0.0166017473519236673, 2.9360474287487265954, 2.0609328632879644339),
digits=18)
stop("converged to different parameter values!")
}
if(fuzz < max(abs(ans$value - 1388.64728677794915))){
print(ans$value, digits=18)
stop("converged to different function value!")
}
ans
# $par
# [1] 0.21033488 0.21917325 0.21744042 0.35305145 7.00605176 11.25271191 # -0.01656842 2.93608090 2.06093738
#
# $value
# [1] 1388.647
#
# $gradient
# [1] 0.0001082927
#
# $fn.reduction
# [1] 64.40326
#
# $iter
# [1] 945
#
# $feval
# [1] 1069
#
# $convergence
# [1] 0
#
# $message
# [1] "Successful convergence"
##########################
# Gaussian mixture density using upper and lower
Amat <- matrix(0, 1, 9)
Amat[1, 1:4] <- 1 # corresponds to equality
b <- 1
meq <- 1
ans2 <- spg(par=par0, fn=gaussmix.mloglik, gr=gaussmix.grad,
lower=c(rep(0,4), rep(-Inf, 4), 0), upper=c(rep(1,4), rep(Inf, 4), Inf),
project="projectLinear", projectArgs=list(A=Amat, b=b, meq=meq))
if(fuzz < max(abs(ans$par - ans2$par))){
print(ans2$par, digits=18)
cat("difference:\n")
print(ans$par - ans2$par, digits=18)
stop("converged to different parameter values with lower and upper!")
}
if(fuzz < max(abs(ans$value - ans2$value))){
print(ans2$value, digits=18)
stop("converged to different function value with lower and upper!")
}
ans2
##########################
| /scratch/gouwar.j/cran-all/cranData/BB/inst/slowTests/LinearInequality_Gaussmix.R |
################################################################
require(BB)
# Note that r0 may not converge with a different seed
set.seed(1234)
p0 <- rnorm(2)
fuzz <- 1e-6
# Extended from example in project.Rd
fn <- function(x) (x[1] - 3/2)^2 + (x[2] - 1/8)^4
gr <- function(x) c(2 * (x[1] - 3/2) , 4 * (x[2] - 1/8)^3)
Amat <- matrix(c(1, 0, 0, 1, -1, 0, 0, -1), 4, 2, byrow=TRUE)
b <- c(0, 0, -0.5, -0.5)
meq <- 0
r0 <- spg(par=p0, fn=fn, gr=gr, project="projectLinear",
projectArgs=list(A=Amat, b=b, meq=meq))
if (0 != r0$convergence) stop("lower-upper test 1 did not converge!")
if(fuzz < max(abs(r0$par - c( 0.500000000000000000, 0.136700820055768862)))){
print(r0$par, digits=18)
stop("lower-upper test 0 converged to different parameter values!")
}
if(fuzz < max(abs(r0$value - 1.00000001874412625 ))){
print(r0$value, digits=18)
stop("lower-upper test 0 converged to different function value!")
}
r0
# $par
# [1] 0.5000000 0.1367008
# $value
# [1] 1
# $gradient
# [1] 6.407799e-06
# $fn.reduction
# [1] 6.328745
# $iter
# [1] 10
# $feval
# [1] 11
# $convergence
# [1] 0
# $message
# [1] "Successful convergence"
#############
# Note that the above should be the same as all the following:
#############
r1 <- spg(par=p0, fn=fn, gr=gr, lower=0, upper=0.5,
project="projectLinear",
projectArgs=list(A=matrix(NA,0,2), b=vector("numeric", 0), meq=0))
if(fuzz < max(abs(r0$par - r1$par))){
print(r1$par, digits=18)
stop("lower-upper test 1 converged to different parameter values!")
}
if(fuzz < max(abs(r0$value - r1$value ))){
print(r1$value, digits=18)
stop("lower-upper test 1 converged to different function value!")
}
#############
r2 <- spg(par=p0, fn=fn, gr=gr, lower=0, upper=0.5)
if(fuzz < max(abs(r0$par - r2$par))){
print(r2$par, digits=18)
stop("lower-upper test 2 converged to different parameter values!")
}
if(fuzz < max(abs(r0$value - r2$value ))){
print(r2$value, digits=18)
stop("lower-upper test 2 converged to different function value!")
}
#############
r3 <- spg(par=p0, fn=fn, gr=gr, lower=c(0,0), upper=c(0.5, 0.5))
if(fuzz < max(abs(r0$par - r3$par))){
print(r3$par, digits=18)
stop("lower-upper test 3 converged to different parameter values!")
}
if(fuzz < max(abs(r0$value - r3$value ))){
print(r3$value, digits=18)
stop("lower-upper test 3 converged to different function value!")
}
#############
r4 <- BBoptim(par=p0, fn=fn, gr=gr, lower= 0, upper= 0.5)
if(fuzz < max(abs(r0$par - r4$par))){
print(r4$par, digits=18)
stop("lower-upper test 4 converged to different parameter values!")
}
if(fuzz < max(abs(r0$value - r4$value ))){
print(r4$value, digits=18)
stop("lower-upper test 4 converged to different function value!")
}
#############
set.seed(12345) # r0 above fails to converge with this seed
pmat <- matrix(rnorm(40), 20, 2) # 20 starting values each of length 2
r5 <- multiStart(par=pmat, fn=fn, gr=gr,
lower=c(0,0), upper=c(0.5, 0.5), action="optimize")
r5$par[r5$converged, ] #converged solutions
unique(r5$par[r5$converged, ] )
# [,1] [,2]
# [1,] 0.5 0.1141247
# [2,] 0.5 0.1146409
# [3,] 0.5 0.1122670
# [4,] 0.5 0.1145620
# [5,] 0.5 0.1135830
# [6,] 0.5 0.1355988
# [7,] 0.5 0.1116386
print(unique(r5$fvalue[r5$converged] ), digits=18)
# [1] 1.00000001398813110 1.00000001151578677 1.00000002628593299
# [4] 1.00000001187043175 1.00000001699065155 1.00000001261909199
# [7] 1.00000003187147524
# unique and choosing [1,] are too sensitive to seed setting, and
# will not be robust (especially on different platforms)
# if(0.01 < max(abs(r0$par - unique(r5$par[r5$converged,])[1,]))){
# print(r5$par[r5$converged,], digits=18)
# stop("lower-upper test 5 converged to different parameter values!")
# }
if(fuzz < max(abs(r0$value - r5$fvalue[r5$converged] ))){
print(r5$fvalue, digits=18)
stop("lower-upper test 5 converged to different function value!")
}
################################################################
## Rosenbrock Banana function from project.Rd with
## additional lower and upper constraint
fr <- function(x) {
x1 <- x[1]
x2 <- x[2]
100 * (x2 - x1 * x1)^2 + (1 - x1)^2
}
# Impose a constraint that sum(x) = 1
p0 <- c(0.4, 0.94) # need feasible starting point
r6 <- spg(par=p0, fn=fr, lower=c(-0.6, -Inf), upper=c(0.6, Inf),
project="projectLinear", projectArgs=list(A=matrix(1, 1, 2), b=1, meq=1),
control=list(maxit=5000))
print(r6$value, digits=18)
print(r6$par, digits=18)
if(fuzz < max(abs(r6$par - c( 0.599999994039535522, 0.400000005960464478)))){
print(r6$par, digits=18)
stop("lower-upper test 6 converged to different parameter values!")
}
if(fuzz < max(abs(r6$value - 0.320000109672563315 ))){
print(r6$value, digits=18)
stop("lower-upper test 6 converged to different function value!")
}
################################################################
| /scratch/gouwar.j/cran-all/cranData/BB/inst/slowTests/lower-upper.R |
require(BB)
set.seed(123)
rosbkext.f <- function(x){
n <- length(x)
sum (100*(x[1:(n-1)]^2 - x[2:n])^2 + (x[1:(n-1)] - 1)^2)
}
n <- 20
p0 <- rnorm(n)
# 2 constraints: parameters sum to 1 and the last parameter is non-negative
# x[1] + ... + x[n] = 1
# x[n] >= 0
Amat <- rbind(rep(1,n), c(rep(0,n-1),1))
b <- c(1, 0)
# with projectLinear as in release 2014.1-1 next gave
# Failure in initial projection!Error in solve.QP(dvec = rep(0, n), Amat = t(A),
# bvec = b - c(A %*% par), :
# Error in projection
ans <- spg(par=p0, fn=rosbkext.f, project="projectLinear",
projectArgs=list(A=Amat, b=b, meq=1),
control=list(maxit=2500, ftol=1e-14, gtol = 1.e-10))
delta <- ans$par - c(
5.46001058136910467e-01, 3.00133145466337903e-01, 9.30077533280728036e-02,
1.18680513875347674e-02, 3.38851273609307169e-03, 3.25955271864946869e-03, 3.25870517822328398e-03, 3.25869904467344929e-03, 3.25870153610197805e-03, 3.25869504035516781e-03, 3.25869964139831152e-03, 3.25870210072135361e-03, 3.25869386108257157e-03, 3.25870148633357684e-03, 3.25869879980698884e-03, 3.25869926113893892e-03, 3.25869852720016805e-03, 3.25856193090499902e-03, 3.23766981846091914e-03, -6.09863722023096244e-19)
if(1e-5 < max(abs((delta)))){
cat("ans$par:\n")
print(ans$par, digits=18)
cat("difference:\n")
print(delta , digits=18)
stop("converged to different parameter values!")
}
if(1e-12 < max(abs(ans$value - 17.4152583858026482 ))){
cat("ans$value\n")
print(ans$value, digits=18)
stop("converged to different function value!")
}
if(1e-12 < abs(sum(ans$par) - 1.0 )){
cat("ans$par:\n")
print(ans$par, digits=18)
cat("constraint sum to 1.0 not satified. Value:\n")
print(sum(ans$par) , digits=18)
stop("constraint sum to 1.0 not satified!")
}
if((0.0 - 1e-12) > ans$par[length(ans$par)]){
cat("ans$par:\n")
print(ans$par, digits=18)
cat("last parameter positive not satified. Value:\n")
print(ans$par[length(ans$par)] , digits=18)
stop("constraint sum to 1.0 not satified!")
}
ans
# 2014 version and previous gave following (not very different) but was
# indicating convergence when it had not really been obtained.
# Controls ftol and gtol have been tightened.
# $par
# [1] 5.460011e-01 3.001331e-01 9.300775e-02 1.186805e-02 3.388513e-03
# [6] 3.259553e-03 3.258705e-03 3.258699e-03 3.258702e-03 3.258695e-03
# [11] 3.258700e-03 3.258702e-03 3.258694e-03 3.258701e-03 3.258699e-03
# [16] 3.258699e-03 3.258699e-03 3.258562e-03 3.237670e-03 -6.098637e-19
#
# $value
# [1] 17.41526
#
# $gradient
# [1] 0.0001785687
#
# $fn.reduction
# [1] 5362.369
#
# $iter
# [1] 52
#
# $feval
# [1] 53
#
# $convergence
# [1] 0
#
# $message
# [1] "Successful convergence"
| /scratch/gouwar.j/cran-all/cranData/BB/inst/slowTests/spg_constraints.R |
#####################################################################################
#
#
# FUNCTIONS THAT IMPLEMENT VARIOUS BAESIAN MODELS
#
#
#####################################################################################
# get.posterior(par, data, skel, knots.x, Gr, p.bkg)
#
# returns: wrapper to the target function.
# called from DEoptim
# arguments
# par: DEoptim parameter
# data: an object of type data
# skel: DEoptim parameter
# knots.x: the spline knot x-values.
# Gr: low-r G(r) information
# p.bkg: the probability that a single pixel contains "only" background.
get.posterior <- function(par, data, skel=NA, knots.x, Gr=NA, p.bkg=.5){
par <- relist(par, skel)
knots.y <- if(is.null(par$knots.y)) NA else par$knots.y
alpha <- if(is.null(par$alpha)) 1 else par$alpha
ADP <- if(is.null(par$ADP)) NA else par$ADP
knots.n <- length(knots.x)
pars <- if(is.null(par$pars)) NA else par$pars
if(!is.na(ADP)){
# recalculate coherent baseline
n.atoms <- data$fitADP$n.atoms
ADP <- rep(ADP, length(n.atoms))
scatter.length <- data$fitADP$scatter.length
N_total <- sum(n.atoms)
f.av2 <- (sum(n.atoms*scatter.length)/N_total)^2
f2.av <- sum(n.atoms*scatter.length^2)/N_total
L <- (f.av2-f2.av)/f.av2
expADP <- 0
for(i in 1:length(n.atoms))
expADP <- expADP + n.atoms[i]*scatter.length[i]^2*exp(-ADP[i]*data$x^2)/N_total/f2.av
data$SB <- 1-expADP*(1-L)
}
if(!is.na(pars[1]))
posterior <- logPosteriorAnalyt(data=data, pars=pars, p.bkg=p.bkg)
else{
posterior <- logPosterior(data=data, alpha=alpha, knots.x=knots.x,
knots.y=knots.y, Gr=Gr, p.bkg=p.bkg)
}
return(posterior)
}
####################################################################################
# logPosterior(data, alpha, knots.x, knots.y, Gr, p.bkg)
#
# returns: the optimization function from Fischer et al.: the negative log of
# the posterior.
# arguments
# data: an object of type data. See reference manual.
# alpha: scale parameter.
# knots.x: the spline knot x-values.
# knots.y: the spline knot y-values ('c' in Fischer et al.).
# Gr: low-r G(r) information
# p.bkg: the probability that a single pixel contains "only" background.
logPosterior <- function(data, alpha, knots.x, knots.y, Gr=NA, p.bkg=0.5) {
Phi <- basisMatrix(x=data$x, knots.x=knots.x)
# 1. Prior
psi.prior <- logPriorF(knots.x, knots.y)$prior
# 2. Likelihood:
psi.likelihood.bkg <- logLikelihoodBkg( knots.y=knots.y, y=data$y-data$SB, Phi=Phi, p.bkg=p.bkg, sigma=data$sigma)
psi.likelihood.signal <- logLikelihoodSignal(knots.y=knots.y, y=data$y-data$SB, Phi=Phi, p.bkg=p.bkg, sigma=data$sigma, lambda=data$lambda)
max.log <- pmax(psi.likelihood.bkg, psi.likelihood.signal) # parallel max --> vector of max values
# avoiding inf values:
psi.likelihood <- max.log + log(exp(psi.likelihood.bkg - max.log) + exp(psi.likelihood.signal - max.log))
psi.likelihood <- -sum(psi.likelihood)
# 3. Gr=-4*Pi*rho*r restriction
psi.gr <- 0
if(!is.na(Gr[1]))
psi.gr <- logProbabilityBkgR(y=data$y-data$SB, Gr=Gr, alpha=alpha, Phi=Phi, knots.y=knots.y)
# 4. Together
psi <- psi.prior + psi.likelihood + psi.gr
return(psi)
}
logPosteriorAnalyt <- function(data, pars, p.bkg=0.5) {
# 2. Likelihood:
psi.likelihood.bkg <- logLikelihoodBkgAnalyt(pars=pars, x=data$x, y=data$y-data$SB, p.bkg=p.bkg, sigma=data$sigma)
psi.likelihood.signal <- logLikelihoodSignalAnalyt(pars=pars, x=data$x, y=data$y-data$SB, p.bkg=p.bkg, sigma=data$sigma, lambda=data$lambda)
max.log <- pmax(psi.likelihood.bkg, psi.likelihood.signal) # parallel max --> vector of max values
# avoiding inf values:
psi.likelihood <- max.log + log(exp(psi.likelihood.bkg - max.log) + exp(psi.likelihood.signal - max.log))
psi.likelihood <- -sum(psi.likelihood)
return(psi.likelihood)
}
####################################################################################
# logProbabilityBkgR(y, knots.y, alpha, Phi, Gr)
#
# returns: the negative log of G(r)-part of the likelihood.
# arguments
# y: signal values.
# knots.y: the spline knot y-values ('c' in Fischer et al.).
# alpha: scale parameter.
# Phi: basis matrix.
# Gr: low-r G(r) information.
logProbabilityBkgR <- function(y, knots.y, alpha, Phi, Gr){
if(is.na(Gr$type1))
psi.gr.r1 <- 0
else if(Gr$type1=="gaussianNoise")
psi.gr.r1 <- logLikelihoodGrGauss(y=y, knots.y=knots.y, alpha=alpha, Phi=Phi, bkg.r=Gr$bkg.r,
sigma.r=Gr$sigma.r, matrix.FT=Gr$matrix.FT1)$likelihood
else if(Gr$type1=="correlatedNoise")
psi.gr.r1 <- logLikelihoodGrCorr(knots.y=knots.y, Phi=Phi, bkg.r=Gr$bkg.r,
KG.inv=Gr$KG.inv, matrix.FT=Gr$matrix.FT1)$likelihood
if(is.na(Gr$type2))
psi.gr.r2 <- 0
else if(Gr$type2=="secondDeriv")
psi.gr.r2 <- logPriorBkgRSmooth(bkg.r=Gr$matrix.FT2 %*% (Phi %*% t(t(knots.y))), D=Gr$D)$likelihood
else if(Gr$type2=="gaussianProcess")
psi.gr.r2 <- logPriorBkgRGP(bkg.r=Gr$matrix.FT2 %*% (Phi %*% t(t(knots.y))), covMatrix=Gr$covMatrix)$likelihood
return(psi.gr.r1 + psi.gr.r2)
}
########################################
# logPriorF(knots.x, knots.y)
#
# returns: the negative log of the prior (due to Fischer et al.)
# arguments:
# knots.x: the spline knot x-values ('xi' in Fischer et al.)
# knots.y: the spline knot y-values ('c' in Fischer et al.)
logPriorF <- function(knots.x, knots.y, Hessian=FALSE){
E <- length(knots.y)
if (length(knots.x) != E) {
stop("Inconsistent lengths of parameters knots.y and knots.x!")
}
D <- DMatrix(knots.x=knots.x)
detD <- D$det
D <- D$matrix
cDc <- as.vector(t(knots.y) %*% D %*% t(t(knots.y)))
# Prior: separate terms according to which variables contribute
prior.E <- (0.5 * E * log(pi) - log(gamma(0.5 * E))) - sum(log(1:E))
# "Depends" on knots.x, but constant as long as knots.x OK.
prior.E.xi <- -0.5 * log(detD)
prior.E.xi.cc <- 0.5 * E * log(cDc)
prior <- prior.E + prior.E.xi + prior.E.xi.cc
hess <- NA
if(Hessian==TRUE)
hess <- E/2 * (2*(D / cDc) - (4 * D %*% t(t(knots.y)) %*% t(knots.y) %*% D) / (cDc ^ 2) )
return (list(prior=prior, grad=(t(knots.y) %*% D) * E / cDc, hess=hess))
}
#####################################################################################
# logPriorBkgRSmooth(bkg.r, D, Hessian, Phi, matrix.FT, knots.y)
#
# returns: prior in the r-space
# arguments
logPriorBkgRSmooth <- function(bkg.r, D, Hessian=FALSE, Phi=NA, matrix.FT=NA, knots.y=NA){
E <- length(bkg.r)
cDc <- as.vector(t(bkg.r) %*% D %*% t(t(bkg.r)))
# Prior: separate terms according to which variables contribute
prior.E <- 0.5 * E * log(pi) - log(gamma(0.5 * E)) - sum(log(1:E))
# "Depends" on knots.x, but constant as long as knots.x OK.
# prior.E.xi <- -0.5 * log(detD)
prior.E.xi <- 0
prior.E.xi.cc <- 0.5 * E * log(cDc)
prior <- prior.E + prior.E.xi + prior.E.xi.cc
hess <- NA
if(Hessian==TRUE){
D <- t(Phi) %*% t(matrix.FT) %*% D %*% matrix.FT %*% Phi
hess <- E/2 * (2*(D / cDc) - (4 * D %*% t(t(knots.y)) %*% t(knots.y) %*% D) / (cDc ^ 2) )
}
return (list(likelihood=prior, hess=hess))
}
#####################################################################################
# logPriorBkgRGP(bkg.r, covMatrix, Hessian)
#
# returns: prior in the r-space due to covariance matrix
# arguments
logPriorBkgRGP <- function(bkg.r, covMatrix, Hessian=FALSE){
N <- length(bkg.r)
f <- covMatrix$factor
term.volume <- 0.5*N*log(2*pi)
term.det <- 0.5*log(covMatrix$det) - 0.5*N*log(f)
term.bkg <- f*0.5*t(bkg.r) %*% covMatrix$inv %*% t(t(bkg.r))
prior <- term.volume + term.det + term.bkg
hess <- NA
if(Hessian==TRUE)
hess <- f*covMatrix$inv
return (list(likelihood=prior, hess=hess))
}
#####################################################################################
# logLikelihoodBkg(knots.y, y, Phi, p.bkg, sigma)
#
# returns: the background-only contribution to the likelihood
# arguments
# knots.y: 'c' in Fischer et al.: vector of spline knot y-values
# y: vector of datapoints
# Phi: spline matrix taking 'c' into 'y'
# p.bkg: the probability that a single pixel contains "only" background.
# sigma: vector of experimental uncertainties
logLikelihoodBkg <- function(knots.y, y, Phi, p.bkg, sigma) {
deviation <- (y - Phi %*% t(t(knots.y))) # 'y-bkg'_i should be eps_i
deviation.norm <- deviation / sigma
likelihood <- log(p.bkg) - 0.5 * log(2 * pi) - log(sigma) - 0.5 * deviation.norm ^ 2
likelihood
}
logLikelihoodBkgAnalyt <- function(pars, x, y, p.bkg, sigma) {
deviation <- (y - bkg.analyt(pars,x)) # 'y-bkg'_i should be eps_i
deviation.norm <- deviation / sigma
likelihood <- log(p.bkg) - 0.5 * log(2 * pi) - log(sigma) - 0.5 * deviation.norm ^ 2
likelihood
}
#####################################################################################
# logLikelihoodSignal(knots.y, y, Phi, p.bkg, sigma, lambda)
#
# returns: the signal-containing contribution to the likelihood
# arguments
# knots.y: 'c' in Fischer et al.: vector of spline knot y-values
# y: vector of datapoints; 'd' at Fisher et al.
# Phi: spline matrix taking 'c' into 'y'
# p.bkg: The probability that a single pixel contains "only" background.
# sigma: vector of experimental uncertainties
# lambda: mean signal magnitude for signal-containing pixels. Either
# vector of length y or a scalar value
logLikelihoodSignal <- function(knots.y, y, Phi, p.bkg, sigma, lambda) {
rho <- sigma / lambda
z <- (y - (Phi %*% t(t(knots.y)))) / lambda
qq <- z / rho - rho
likelihood <- log(1 - p.bkg) - log(lambda) + pnorm(log.p=TRUE, q=qq) - z + 0.5 * (rho ^ 2)
likelihood
}
logLikelihoodSignalAnalyt <- function(pars, x, y, p.bkg, sigma, lambda) {
rho <- sigma / lambda
z <- (y - bkg.analyt(pars,x)) / lambda
qq <- z / rho - rho
likelihood <- log(1 - p.bkg) - log(lambda) + pnorm(log.p=TRUE, q=qq) - z + 0.5 * (rho ^ 2)
likelihood
}
#####################################################################################
# logLikelihoodGrCorr(knots.y, Phi, bkg.r, KG.inv, matrix.FT, Hessian)
#
# returns: the 'G(r)=-4*Pi*rho*r restriction' contribution to the likelihood.
# arguments
logLikelihoodGrCorr <- function(knots.y, Phi, bkg.r, KG.inv, matrix.FT, Hessian=FALSE){
M.Phi <- matrix.FT %*% Phi
KG.inv.M.Phi <- KG.inv %*% M.Phi
mu <- t(KG.inv.M.Phi) %*% bkg.r # KG.inv is a symmetric matrix
J <- t(KG.inv.M.Phi) %*% M.Phi
psi.gr <- 0.5 * (t(knots.y) %*% J) %*% t(t(knots.y)) - t(mu) %*% t(t(knots.y))
psi.gr <- sum(psi.gr)
hess <- NA
if(Hessian==TRUE)
hess <- J
return (list(likelihood=psi.gr, hess=hess))
}
#####################################################################################
#logLikelihoodGrGauss(y, knots.y, alpha, Phi, bkg.r, sigma.r, matrix.FT, Hessian=FALSE)
#
# returns: the 'G(r)=-4*Pi*rho*r restriction' contribution to the likelihood.
# arguments
logLikelihoodGrGauss <- function(y, knots.y, alpha, Phi, bkg.r, sigma.r, matrix.FT, Hessian=FALSE){
############
# y = SIGNAL - SB
deviation <- bkg.r/alpha + matrix.FT %*% ( (1-1/alpha)*y - Phi %*% t(t(knots.y)) )
deviation.norm <- deviation / sigma.r
psi.gr <- 0.5 * log(2 * pi) + log(sigma.r) + 0.5 * deviation.norm^2
psi.gr <- sum(psi.gr)
hess <- NA
if(Hessian==TRUE){
Mprime <- matrix.FT%*%Phi / (sqrt(2)*sigma.r)
hess <- 2*t(Mprime) %*% Mprime
}
return (list(likelihood=psi.gr, hess=hess))
}
#########################################################
# basisSpline(x, knots.x, knots.i, deriv)
#
# returns: the i'th spline basis function (i.e. that is nonzero (=1)
# only in the ith knot) or its derivative at points x.
# arguments
# x: x-values where we evaluate the basis functions.
# knots.x: knot positions
# knots.i: which basis function to return
# deriv: which derivative of the spline function (0 to 3)
basisSpline <- function(x, knots.x, knots.i, deriv=0){
y <- 0*knots.x
y[knots.i] <- 1
basisSpline <- splinefun(x=knots.x, y=y, method="natural")
return(basisSpline(x=x, deriv=deriv))
}
#########################################################
# basisMatrix(x, knots.x,deriv)
#
# returns: matrix whose columns are spline basis functions
# values at points x; if an arbitrary function is
# known only at points knots.x, multiplying it by
# basisMatrix results in function spline approximations
# at points x
# arguments
# x: x-values where we evaluate the basis functions.
# knots.x: knot positions
# deriv: which derivative of the spline function?
basisMatrix <- function(x, knots.x, deriv=0) {
E <- length(knots.x)
N <- length(x)
bM <- matrix(nrow=N, ncol=E)
for (i in 1:E) {
bM[, i] <- basisSpline(x=x, knots.x=knots.x, knots.i=i, deriv=deriv)
}
return(bM)
}
########################################################################################
# DMatrix(knots.x, only.trace, robust.factor)
#
# returns: the second derivative overlap matrix ('D' in Fischer et al.
# (1999)) of the spline basis functions with knots at 'knots.x'.
# arguments
# knots.x: spline knot positions
# onlyTrace: if true, returns trace(D) (a single number) instead of D.
# robustFactor: We add a constant to all eigenvalues to promote stability;
# this is the ratio of that constant to the smallest
# eigenvalue which is SUPPOSED to be nonzero.
DMatrix <- function(knots.x, onlyTrace=FALSE, robustFactor=1e-12) {
# cat("Calculating basis matrix...")
N <- length(knots.x)
ddM <- basisMatrix(x=knots.x, knots.x=knots.x, deriv=2)
# cat(" done!\n")
x.mat <- matrix(rep(knots.x, N), nrow=N)
dy <- apply(ddM, 2, diff)
dx <- apply(x.mat, 2, diff)
# slope[i, j] is the slope of the i'th segment of the j'th basis function;
# intercept[i, j] is its y-intercept
slope <- dy / dx
intercept <- ddM[-1, ] - slope * x.mat[-1, ]
d.x3 <- diff(knots.x ^ 3)
d.x2 <- diff(knots.x ^ 2)
# The for-loop code below reduces to this if we only care about diagonal
# elements (which is the case for computing the trace).
if (onlyTrace) {
return (sum((slope ^ 2) * d.x3 / 3.0 + (slope * intercept) * d.x2 +
intercept ^ 2 * diff(knots.x)))
}
# If we're this far, we need to compute the whole matrix.
DD <- matrix(0, nrow=N, ncol=N)
# computing basis function support:
N2 <- floor(N/2)
phi.N2 <- basisSpline(x=knots.x, knots.x=knots.x, knots.i=N2, deriv=2)
max.phi <- max(phi.N2)
x3.factor <- (max(knots.x)^3-min(knots.x)^3)/3
supp <- min(length(which(abs(phi.N2)>1e-9*max.phi/x3.factor)), N-1) # only i-supp to i+supp matters
# cat("Calculating overlap intergals...\n")
for (i in 1:N) {
# if (i %% 100 == 0)
# cat("...x=", knots.x[i], "\n")
relevant <- min(i+supp, N)
phi.i.supp.min <- max(1, i-ceiling(supp/2))
phi.i.supp.max <- min(N-1, i+floor(supp/2))
for (j in i:relevant) {
phi.j.supp.min <- max(1, j-ceiling(supp/2))
phi.j.supp.max <- min(N-1, j+floor(supp/2))
phi.ij.supp <- max(phi.i.supp.min, phi.j.supp.min):min(phi.i.supp.max, phi.j.supp.max)
prod.ss <- slope[phi.ij.supp, i] * slope[phi.ij.supp, j] / 3.0
prod.si <- 0.5 * (slope[phi.ij.supp, i] * intercept[phi.ij.supp, j] + slope[phi.ij.supp, j] * intercept[phi.ij.supp, i])
prod.ii <- intercept[phi.ij.supp, i] * intercept[phi.ij.supp, j]
integrals <- prod.ss * d.x3[phi.ij.supp] + prod.si * d.x2[phi.ij.supp] + prod.ii * diff(knots.x)[phi.ij.supp]
DD[i, j] <- DD[j, i] <- sum(integrals)
}
}
# cat("...done!\n")
# cat("Calculating D-matrix eigenvalues...")
# "modified" determinant (i.e., product of highest (N-2) eigenvalues)
eigenvals <- eigen(x=DD, symmetric=TRUE, only.values=TRUE)$values
# cat(" done!\n")
detD <- prod(rev(eigenvals)[-(1:2)])
# Make sure we avoid negative eigenvalues!
lowest.true.eigenval <- sort(eigenvals)[3]
return (list(matrix=DD + lowest.true.eigenval * robustFactor * diag(nrow(DD)), det=detD))
}
#########################################################
# get.bkg(x, knots.x, knots.y)
#
# returns: natural spline approximation of function knots.y
# arguments
# x: points at which function should be approximated
# knots.x: knot position
# knots.y: knot values
get.bkg <- function(x, knots.x, knots.y){
bM <- basisMatrix(x=x, knots.x=knots.x)
get.bkg <- c(bM %*% t(t(knots.y)))
return(get.bkg)
}
bkg.analyt <- function(pars, x){
bkg.analyt <- pars[1]*exp(-pars[2]*x)*x^pars[3] + pars[4]/((x-pars[5])^2+pars[6]^2)
}
| /scratch/gouwar.j/cran-all/cranData/BBEST/R/bayesian.R |
#####################################################################################
#
#
# FUNCTIONS TO PERFORM BACKGROUND FIT VIA DIFFERENTIAL EVOLUTION ALGORITHM
#
#
#####################################################################################
# set.control(CR, F, NP, itermax, parallelType)
#
# returns: wrapper to set.DEoptim.control
# arguments
# CR: crossover probability from interval [0,1]
# F: differential weighting factor from interval [0,2]
# NP: number of population members
# itermax: the maximum iteration (population generation) allowed
# parallelType: defines the type of parallelization to employ.
set.control <- function(CR=.85, F=.7, NP=300, itermax=2000, parallelType=1){
control <- list()
control$CR <- CR
control$F <- F
control$NP <- NP
control$itermax <- itermax
control$parallelType <- parallelType
return(control)
}
#####################################################################################
# set.DEoptim.control(control, knots.n)
#
# returns: DEoptim control parameters
# arguments
# control: return value of set.control
# knots.n: knots number
set.DEoptim.control <- function(control=list(), knots.n){
if(is.null(control$CR))
CR <- 0.85
else
CR <- control$CR
if(is.null(control$F))
F <- 0.7
else
F <- control$F
if(is.null(control$NP))
NP <- round(2.5*knots.n*10)
else
NP <- control$NP
if(is.null(control$itermax))
itermax <- 2000
else
itermax <- control$itermax
if(is.null(control$parallelType))
parallelType <- 1
else
parallelType <- control$parallelType
DE.control <- DEoptim.control(CR=CR, F=F, NP=NP, itermax=itermax, parallelType=parallelType,
# packages = list("PerformanceAnalytics"),
parVar=list("basisMatrix", "basisSpline",
"logPosterior", "logPriorF", "DMatrix", "logLikelihoodBkg", "logLikelihoodSignal",
"get.bkg", "logProbabilityBkgR", "logLikelihoodGrGauss", "logLikelihoodGrCorr",
"logPriorBkgRSmooth", "logPriorBkgRGP", "Dx", "invert.order", "set.SB",
"logPosteriorAnalyt", "bkg.analyt", "logLikelihoodBkgAnalyt", "logLikelihoodSignalAnalyt"))
DE.control
}
#####################################################################################
# do.fit(data, bounds.lower, bounds.upper, scale, knots.x, knots.n, stdev, control, p.bkg, save.to)
#
# returns: Performs evolutionary global background optimization via DEoptim
# arguments
# data: object of type data. Contatins experimental data and fit parameters
# bounds.lower, lower and upeer boundaries for background
# bounds.upper:
# scale: scale factor
# knots.x: the spline knot x-values.
# knots.n: number of knots.
# stdev: Whether to calculate uncertainty in background estimation
# control: he return value of set.control
# p.bkg: the probability that a single pixel contains "only" background
# save.to: the name of the file where the results will be saved
do.fit <- function(data, bounds.lower, bounds.upper, scale=c(1,1), knots.x=NA,
knots.n=NA, analytical=FALSE, stdev=TRUE, control=list(), p.bkg=.5, save.to=""){
# 1. Prepare data to fit...
cat("Preparing data... \n")
if( (scale[1]==1) && (scale[2]==1) ) scale <- NA
cov.r <- ADP <- knots.y <- pars <- NA
alpha <- 1
if(is.null(data$Gr))
Gr <- NA
else{
Gr <- data$Gr
data$Gr <- NA
}
if(is.null(data$SB))
data$SB <- rep(0, length(data$x))
else if (is.na(data$SB[1]))
data$SB <- rep(0, length(data$x))
# 2. prepare fit params...
if(is.na(knots.x[1]) && !is.na(knots.n))
knots.x <- seq(min(data$x), max(data$x), length=knots.n)
if(is.na(knots.n))
knots.n <- length(knots.x)
if(analytical==TRUE){
iter.0 <- as.relistable(list(pars=rep(0, 6)))
bounds.lower <- c(pars=c(-abs(bounds.lower[1]*10), -5, -5, -20, 0, 0))
bounds.upper <- c(pars=c(abs(bounds.upper[1]*10), 10, 5, 2000,
max(data$x), max(data$x)*2))
knots.n <- 6
}
else{
iter.0 <- as.relistable(list(knots.y=rep(0, knots.n)))
bounds.lower <- c(knots.y=rep(bounds.lower[1], knots.n))
bounds.upper <- c(knots.y=rep(bounds.upper[1], knots.n))
}
if(!is.na(scale[1])){
iter.0$alpha <- 0
bounds.lower <- c(bounds.lower, alpha=scale[1])
bounds.upper <- c(bounds.upper, alpha=scale[2])
}
if(!is.null(data$fitADP)){
if(data$fitADP$oneADP==TRUE)
nn <- 1
else
nn <- length(data$fitADP$n.atoms)
iter.0$ADP <- rep(0, nn)
bounds.lower <- c(bounds.lower, ADP=rep(data$fitADP$ADP.lim[1], nn))
bounds.upper <- c(bounds.upper, ADP=rep(data$fitADP$ADP.lim[2], nn))
}
DE.control <- set.DEoptim.control(control, knots.n)
# 3. Starting fit!
#
# initial guess
# cc <- matrix(nrow=DE.control$NP, ncol=length(knots.x))
# Phi <- basisMatrix(data$x, knots.x)
# for(ii in 1:DE.control$NP){
# spar=(0.9-0.5)/DE.control$NP*ii+0.5
# lowpass.spline <- smooth.spline(data$x,data$y-data$SB, spar = spar) ## Control spar for amount of smoothing
# cc[ii,] <- c(solve(t(Phi)%*%Phi) %*% t(Phi) %*% (predict(lowpass.spline, data$x)$y) )
# }
# DE.control$initialpop=cc
cat("Starting DifEv algorithm... \n")
doPbkgIter <- FALSE
if(p.bkg==-1){
doPbkgIter <- TRUE
p.bkg <- 0.02
}
DEoptim.fit <- DEoptim(get.posterior, lower = bounds.lower, upper = bounds.upper,
control= DE.control, skel=iter.0, data=data,
knots.x=knots.x, Gr=Gr, p.bkg=p.bkg)
if(doPbkgIter){
cat("\n\n P.bkg iteration... \n\n")
if(analytical==TRUE){
pars <- DEoptim.fit$optim$bestmem[1:6]
bkg <- bkg.analyt(pars=pars, x=data$x)
}
else{
knots.y <- DEoptim.fit$optim$bestmem[1:knots.n]
bkg <- get.bkg(x=data$x, knots.x=knots.x, knots.y=knots.y)
}
dev.norm <- (data$y-bkg-data$SB)/data$sigma
p.bkg <- 0.5*50^((1-dev.norm^2)/8)
DEoptim.fit <- DEoptim(get.posterior, lower = bounds.lower, upper = bounds.upper,
control= DE.control, skel=iter.0, data=data,
knots.x=knots.x, Gr=Gr, p.bkg=p.bkg)
}
if(analytical==TRUE){
pars <- DEoptim.fit$optim$bestmem[1:6]
bkg <- bkg.analyt(pars=pars, x=data$x)
}
else{
knots.y <- DEoptim.fit$optim$bestmem[1:knots.n]
bkg <- get.bkg(x=data$x, knots.x=knots.x, knots.y=knots.y)
}
if(!is.null(data$fitADP)){
ADP <- tail(DEoptim.fit$optim$bestmem, nn)
data <- set.SB(data, n.atoms=data$fitADP$n.atoms,
scatter.length=data$fitADP$scatter.length, ADP=ADP)
}
data$Gr <- Gr
if(!is.na(scale[1])){ # recalculating signal and params...
alpha <- DEoptim.fit$optim$bestmem[knots.n+1]
data$y <- (data$y-bkg-data$SB)*alpha + data$SB + bkg
data$lambda <- data$lambda*alpha
data$sigma <- data$sigma*alpha
if(!is.na(Gr[1]))
data$Gr$sigma.r <- Gr$sigma.r * alpha
}
cat("Background estimation complete! \n")
if(stdev==TRUE && analytical==FALSE){
cat("Calculating uncertainty in background... \n")
stdev <- get.hess(data, knots.x, knots.y, Gr=data$Gr, r=seq(0, 2, 0.01), p.bkg=p.bkg)
}
curves <- list(y=data$y, bkg=bkg, SB=data$SB)
knots <- list(x=knots.x, y=knots.y)
fit.details <- list(lambda=data$lambda, sigma=data$sigma, knots.n=knots.n,
control=control, Gr=data$Gr, n.atoms=data$fitADP$n.atoms,
scatter.length=data$fitADP$scatter.length, id=data$id,
bounds.lower=bounds.lower['knots.y1'],
bounds.upper=bounds.upper['knots.y1'])
fit.results <- list(x=data$x, curves=curves, uncrt=stdev, knots=knots,
pars=pars, scale=alpha, ADP=ADP, fit.details=fit.details)
if(save.to!=""){
cat("Saving results to file ", save.to, "\n")
save(fit.results, file=save.to)
}
cat("...done! \n")
return(fit.results)
}
#####################################################################################
# do.fit(data, bounds.lower, bounds.upper, knots.n.left, knots.n.right, x.boundary, control, save.to)
#
# returns: Performs evolutionary global background optimization via DEoptim
# (wrapper to do.fit) for several banks.
# arguments
# data: object of type data. Contatins experimental data and fit parameters
# bounds.lower, lower and upeer boundaries for background
# bounds.upper:
# knots.n.left, specify knot positions. knots.n.left and knots.n.right knots are created on the
# knots.n.right, left and on the right of x.boundary point, respectively
# x.boundary:
# control: he return value of set.control
# save.to: the name of the file where the results will be saved
do.fit.banks <- function(data, bounds.lower, bounds.upper, knots.n.left=NA,
knots.n.right=NA, x.boundary=NA, analytical=FALSE, control, save.to=""){
N <- length(data)
fit.res <- list()
knots.x <- NA
for(i in 1:N){
cat("\n\n ===================================\n\n")
cat("Fitting bank # ",i,"\n\n")
x <- data[[i]]$x
if(analytical==FALSE){
dx <- ((max(x)-x.boundary)/(knots.n.right-1) + x.boundary/(knots.n.left-1) )/2
knots.x <- seq(0, x.boundary, length=knots.n.left)
knots.x <- c(knots.x, seq(x.boundary+dx, max(x), length=knots.n.right))
}
fit.res[[i]] <- do.fit(data[[i]], bounds.lower, bounds.upper, knots.x=knots.x,
analytical=analytical, stdev=FALSE, control=control, save.to="")
fit.res[[i]]$fit.details$id <- data[[i]]$id
}
if(save.to!=""){
cat("Saving results to file ", save.to, "\n")
save(fit.res, file=save.to)
}
fit.res
}
#####################################################################################
# do.fit(data, bounds.lower, bounds.upper, knots.n.left, knots.n.right, x.boundary, control, save.to)
#
do.iter <- function(fit.results, local=TRUE, eps=1e-4, n.iter=10000, save.to=""){
dat <- list(x=fit.results$x, y=fit.results$curves$y, SB=fit.results$curves$SB,
lambda=fit.results$fit.details$lambda, sigma=fit.results$fit.details$sigma)
knots.x <- fit.results$knots$x
knots.y <- fit.results$knots$y
cat("Adjusting baseline...", save.to, "\n")
if(local)
cc <- grad.descent(data=dat, knots.x, knots.y, Gr=NA, p.bkg=0.5, eps=eps, N=n.iter)
else{
control <- fit.results$fit.details$control
bounds.lower <- fit.results$fit.details$bounds.lower
bounds.upper <- fit.results$fit.details$bounds.upper
cat("\n\n Starting DifEv to find bkg with no low-r constraints... \n\n")
ff <- do.fit(dat, bounds.lower, bounds.upper, knots.x=knots.x,
stdev=FALSE, control=control)
cc <- ff$knots$y
}
if(any(is.na(cc))) stop("perhaps adjust parameters")
l <- fit.results$curves$bkg
l.no.r <- get.bkg(dat$x, knots.x, cc)
r <- seq(0, 1.0, 0.005)
gr <- sineFT(f.Q=dat$y-l-1, Q=dat$x, r=r)
gr.no.r <- sineFT(f.Q=dat$y-l.no.r-1, Q=dat$x, r=r)
d <- gr.no.r- gr # < 0!!! Difference between fits with and without Gr info
l.corr <- 0
dr <- r[2]-r[1]
for(i in 1:length(dat$x)){
l.corr[i] <- sum(d*sin(dat$x[i]*r)*dr/dat$x[i])
}
dat$SB <- dat$SB - l.corr
cat("\n\n Estimating background for the corrected data... \n\n")
if(local)
cc2 <- grad.descent(data=dat, knots.x, cc, Gr=fit.results$fit.details$Gr, p.bkg=0.5, eps=eps, N=n.iter)
else{
Gr <- fit.results$fit.details$Gr
dat <- set.Gr(dat, r1=Gr$r1, r2=Gr$r2, rho.0=Gr$rho.0,
type1=Gr$type1, type2=Gr$type2)
ff2 <- do.fit(dat, bounds.lower, bounds.upper, knots.x=knots.x,
stdev=FALSE, control=control)
cc2 <- ff2$knots$y
}
if(any(is.na(cc2))) stop("perhaps adjust parameters")
l2 <- get.bkg(dat$x, knots.x, cc2)
fit.results$curves$bkg <- l2
fit.results$knots$y <- cc2
fit.results$curves$corr <- l.corr
if(length(fit.results$uncrt)>1){
cat("Calculating uncertainty in background... \n")
stdev <- get.hess(dat, knots.x, knots.y, Gr=fit.results$fit.details$Gr, r=seq(0, 2, 0.01), p.bkg=.5)
}
if(save.to!=""){
cat("Saving results to file ", save.to, "\n")
save(fit.results, file=save.to)
}
return(fit.results)
}
# MATMUL
#matmul <- function(A, B){
# if(!is.loaded("matmul"))
# dyn.load("./matmul.dll")
# matrix(.C("matmul_matmul_R",
# heightA=as.integer(nrow(A)),
# widthA=as.integer(ncol(A)),
# widthB=as.integer(ncol(B)),
# A=as.double(c(A)),
# rstrideA=as.integer(ncol(A)),
# B=as.double(c(B)),
# rstrideB=as.integer(ncol(B)),
# C=as.double(rep(0, nrow(A)*ncol(B))),
# rstrideC=as.integer(ncol(B)))$C,
# nrow=nrow(A), ncol=ncol(B))
#} | /scratch/gouwar.j/cran-all/cranData/BBEST/R/fits.R |
guide <- function(){
cat("Hello! This function will guide you through Bayesian background estimation procedure in 7 simple steps. Be careful, it has limited functionality and can be unstable to missteps and misspells! In this guide some of the parameters are set to their recommended values. Therefore, if you want to adjust them, use corresponding functions 'manually'. If you do not understand the meaning of the requested parameters please refer to the reference manual. \n")
# STEP I
# 1. Read data
# 2. Plot data
act <- TRUE
while(act){
cat("=============\n")
cat("1. ")
dat <- step1()
step2(dat)
cat("\n Do you want to return and re-do this step? Please type 'yes', 'no' or 'exit' to leave the guide \n")
answ <- readline("")
if(answ=="exit"){
cat("saving results... done!\n")
return(dat)
}
if(answ=="no")
act <- FALSE
}
cat("\n")
# STEP II
# 3. trim data
# 4. plot data
act <- TRUE
while(act){
cat("=============\n")
cat("2. ")
dat2 <- step3(dat)
step2(dat2)
cat("\n Do you want to return and re-do this step? Please type 'yes', 'no' or 'exit' to leave the guide \n")
answ <- readline("")
if(answ=="exit"){
cat("saving results... done!\n")
return(dat)
}
if(answ=="no"){
act <- FALSE
dat <- dat2
}
}
cat("\n")
# STEP III
# 5. Coherent baseline
act <- TRUE
while(act){
cat("=============\n")
cat("3. ")
dat <- step5(dat)
cat("\n Do you want to return and re-do this step? Please type 'yes', 'no' or 'exit' to leave the guide \n")
answ <- readline("")
if(answ=="exit"){
cat("saving results... done!\n")
return(dat)
}
if(answ=="no")
act <- FALSE
}
cat("\n")
# STEP IV
# 6. Noise
# 7. Plot
act <- TRUE
while(act){
cat("=============\n")
cat("4. ")
dat <- step6(dat)
step7(dat)
cat("\n Do you want to return and re-do this step? Please type 'yes', 'no' or 'exit' to leave the guide \n")
answ <- readline("")
if(answ=="exit"){
cat("saving results... done!\n")
return(dat)
}
if(answ=="no")
act <- FALSE
}
cat("\n")
# STEP V
# 8. Lambda
act <- TRUE
while(act){
cat("=============\n")
cat("5. ")
dat <- step8(dat)
cat("\n Do you want to return and re-do this step? Please type 'yes', 'no' or 'exit' to leave the guide \n")
answ <- readline("")
if(answ=="exit"){
cat("saving results... done!\n")
return(dat)
}
if(answ=="no")
act <- FALSE
}
cat("\n")
# STEP VI
# 9. Gr
act <- TRUE
while(act){
cat("=============\n")
cat("6. ")
dat <- step9(dat)
cat("\n Do you want to return and re-do this step? Please type 'yes', 'no' or 'exit' to leave the guide \n")
answ <- readline("")
if(answ=="exit"){
cat("saving results... done!\n")
return(dat)
}
if(answ=="no")
act <- FALSE
}
cat("\n")
# STEP VII
# 10. fit params
act <- TRUE
while(act){
cat("=============\n")
cat("7. ")
ctrl <- step10()
cat("\n Do you want to return and re-do this step? Please type 'yes', 'no' or 'exit' to leave the guide \n")
answ <- readline("")
if(answ=="exit"){
cat("saving results... done!\n")
return(dat)
}
if(answ=="no")
act <- FALSE
}
cat("\n")
cat("That's it! Press enter to start fit. Be patient, it may take a while \n")
tmp <- scan()
fit.res <- do.fit(data=dat, bounds.lower=ctrl$bl, bounds.upper=ctrl$bu,
scale=ctrl$sc, knots.x=ctrl$kx, knots.n=ctrl$kn, p.bkg=.5,
stdev=TRUE, control=ctrl$ctrl, save.to=ctrl$st)
# Plot fit results
cat("\n Do you want to plot background estimation (to do this packages 'ggplot2' and 'gridExtra' have to be installed)? Please type 'yes' or 'no' \n")
answ <- readline("")
if(answ=="yes")
mPlot.results(fit.res)
# Calculate and plot GR
gr<-NA
cat("\n Do you want to calculate and plot corrected PDF (to do this package 'ggplot2' have to be installed)? Please type 'yes' or 'no' \n")
answ <- readline("")
if(answ=="yes"){
if(is.null(dat$Gr$rho.0)){
cat("Please provide the atomic number density: \n")
rho.0 <- readline("")
rho.0 <- as.numeric(unlist(strsplit(rho.0, ",")))[1]
}
else
rho.0 <- dat$Gr$rho.0
gr <- calc.Gr(fit.res, rho.0, r.max=10)
}
# Save results
cat("\n Do you want to save fit results to a text file? If yes, please type file name. If no, press enter \n")
answ <- readline("")
if(answ!="")
write.fit.results(fit.res, file = answ)
# Last note...
cat("Finishing the guide... \n If this guide was called as 'myVar <- guide()' then: \n")
cat("myVar$data contains experimental data, estimated noise, lambda and baseline. See set.data in reference manual for details \n")
cat("myVar$fit.res contains results of the fit. See do.fit \n")
cat("myVar$Gr contains the calculated corrected PDF. See calc.Gr \n")
if(answ!="")
cat("file ", answ, "contains fit results in a text format \n")
if(ctrl$st!="")
cat("file ", ctrl$st, "contains fit results in a R format \n")
return(list(fit.res=fit.res, data=dat, gr=gr))
}
#########################################################
#########################################################
step1 <- function(){
cat("First step is to read data. Do you want to read data from \n 1. text file \n or \n 2. .sqb-file, returned by PDFgetN? \n Type 1 or 2 and press Enter. \n")
step1 <- readline("")
step1 <- as.numeric(unlist(strsplit(step1, ",")))
if(step1==1){
cat("Provide file name (): \n")
step1.name <- readline("")
dat <- read.data(file=step1.name)
}
else if(step1==2){
cat("Provide file name: \n")
step1.name <- readline("")
dat <- read.sqb(file=step1.name)
}
else{
stop("Please type 1 or 2.")
}
return(dat)
}
#########################################################
step2 <- function(dat){
cat("Do you want to plot data? Please type 'yes' or 'no' (no quotes) \n")
step2 <- readline("")
if(step2=="yes"){
cat("Provide plotting region on this step. \n Enter x coordinate minimum \n")
step2.x1 <- readline("")
step2.x1 <- as.numeric(unlist(strsplit(step2.x1, ",")))
cat("Enter x coordinate maximum \n")
step2.x2 <- readline("")
step2.x2 <- as.numeric(unlist(strsplit(step2.x2, ",")))
cat("Enter y coordinate minimum \n")
step2.y1 <- readline("")
step2.y1 <- as.numeric(unlist(strsplit(step2.y1, ",")))
cat("Enter y coordinate maximum \n")
step2.y2 <- readline("")
step2.y2 <- as.numeric(unlist(strsplit(step2.y2, ",")))
plot(dat$x, dat$y, t="l", xlab="x", ylab="y",
xlim=c(step2.x1, step2.x2), ylim=c(step2.y1, step2.y2))
}
}
#########################################################
step3 <- function(dat){
cat("Do you want to truncate data (x-region)? Please type 'yes' or 'no'\n")
step3 <- readline("")
if(step3=="yes"){
cat("Enter new x minimum value \n")
step3.min <- readline("")
step3.min <- as.numeric(unlist(strsplit(step3.min, ",")))
cat("Enter new x maximum value \n")
step3.max <- readline("")
step3.max <- as.numeric(unlist(strsplit(step3.max, ",")))
dat <- trim.data(dat, x.min=step3.min, x.max=step3.max)
}
return(dat)
}
#########################################################
step5 <- function(dat){
cat("Do your data contain smooth baseline that shouldn't be subtracted (for example elastic coherent baseline in neutron scattering) and wasn't specified on step 1? Please type 'yes' or 'no'")
step5 <- readline("")
if(step5=="yes"){
cat("For neutron total scattering experiment we can calculate smooth baseline as a sum of elastic coherent scattering and Laue diffuse scattering. If you have other form of baseline please provide it on step 1 (include it in your text file as a third column with header 'SB') or specify text file that contains it. For neutron scattering experiment, do you \n 1. know APD values \n 2. want to fit ADP values \n 3. it isn't a neutron scattering experiment (baseline will be set to 0)? \n Please type 1, 2 or 3 \n")
step5.choice <- readline("")
step5.choice <- as.numeric(unlist(strsplit(step5.choice, ",")))
if(step5.choice==3){
if(any(is.na(dat$SB)))
dat$SB <- rep(0, length(dat$x))
}
if((step5.choice==1) || (step5.choice==2)){
cat("Type number of atoms of each type per unit cell divided by space and press double enter (e.g. for NaCl you should type '4 4') \n")
n.atoms <- scan()
cat("Type neutron scattering length for atoms of each type divided by space and press double enter (e.g. for NaCl you should type '3.63 9.58') \n")
scatter.length <- scan()
cat("Do you want to use single value for the ADP for all atom types? Please type 'yes' or 'no' \n")
step5.oneADP <- readline("")
if(step5.oneADP=="yes")
oneADP=TRUE
else
oneADP=FALSE
if(step5.choice==1){
cat("Please provide the ADP(s) and press double enter \n")
step5.ADP <- scan()
dat <- set.SB(dat, fit=FALSE, oneADP=oneADP, n.atoms=n.atoms,
scatter.length=scatter.length, ADP=step5.ADP)
}
if(step5.choice==2){
cat("Please provide the limits for the ADP fit (upper and lower bounds, divided by space) \n")
step5.ADP.lim <- scan()
dat <- set.SB(dat, fit=TRUE, oneADP=oneADP, n.atoms=n.atoms,
scatter.length=scatter.length, ADP.lim=step5.ADP.lim)
}
}
}
return(dat)
}
#########################################################
step6 <- function(dat){
cat("Although noise in diffraction experiments is per se Poisson, various corrections can destroy its structure. We suggest considering the experimental uncertainty as having Gaussian distribution with x-dependent amplitude. Splitting x-region into N segments and estimating Gaussian standard deviation over these segments allows us to approximate the true noise-distribution. The other way to approximate noise level is to consider it uniform. In that case the best approximation can be obtained on a signal-free region, i.e. on a region that contains only background. \n Please type integer number N if you want to divide x-range into N segments for independent noise-level estimation or type bounds for a signal-free region (two numbers divided by space), and press double enter \n")
step6 <- scan("")
cat("\n Thanks!\n")
cat("Please type estimated error variance or leave it empty: \n")
sigma2 <- scan("")
if(length(sigma2) == 0)
sigma2 <- c(0.1)
if(length(step6)==1)
dat <- set.sigma(dat, n.regions=step6, sigma2 = sigma2)
if(length(step6)==2)
dat <- set.sigma(dat, x.bkg.only=step6, sigma2 = sigma2[1])
return(dat)
}
#########################################################
step7 <- function(dat){
cat("\n Do you want to plot data +/- 2 sd for estimated noise level? Please type 'yes' or 'no'\n")
step7 <- readline("")
if(step7=="yes"){
cat("Provide plotting region on this step. \n Enter x coordinate minimum \n")
step7.x1 <- readline("")
step7.x1 <- as.numeric(unlist(strsplit(step7.x1, ",")))
cat("Enter x coordinate maximum \n")
step7.x2 <- readline("")
step7.x2 <- as.numeric(unlist(strsplit(step7.x2, ",")))
cat("Enter y coordinate minimum \n")
step7.y1 <- readline("")
step7.y1 <- as.numeric(unlist(strsplit(step7.y1, ",")))
cat("Enter y coordinate maximum \n")
step7.y2 <- readline("")
step7.y2 <- as.numeric(unlist(strsplit(step7.y2, ",")))
plot(dat$x, dat$y, t="l", xlab="x", ylab="y",
xlim=c(step7.x1, step7.x2), ylim=c(step7.y1, step7.y2))
lines(dat$x, dat$smoothed, col=2)
lines(dat$x, dat$smoothed + 2* dat$sigma, col=4)
lines(dat$x, dat$smoothed - 2* dat$sigma, col=4)
}
}
#########################################################
step8 <- function(dat){
cat("On this step we estimate the mean signal magnitude (lambda). lambda is calculated as a linear piecewise function which is equal to lambda_0 outside the [x.min, x.max] region. Inside this region lambda is approximated by a line connecting points (x_1;lambda_1) and (x_2;lambda_2). Estimate these parameters to obtain a line that connects centres of the two most distant peaks. High accuracy isn't required on this step. \n Type lambda_0, lambda_1, lambda_2, x_1, and x_2 divided by spaces and press double enter")
step8 <- scan("")
dat <- set.lambda(dat, lambda_0=step8[1], lambda_1=step8[2],
lambda_2=step8[3], x_1=step8[4], x_2=step8[5])
return(dat)
}
#########################################################
step9 <- function(dat){
cat("Do you want to include information on low-r G(r) behaviour in Bayesian model (see reference manual for details)? Please type 'yes' or 'no'\n")
step9 <- readline("")
if(step9=="yes"){
cat("Provide the atomic number density: \n")
step9.rho <- readline("")
step9.rho <- as.numeric(unlist(strsplit(step9.rho, ",")))
cat("Indicate bounds for a low-r peak-free region (normally 0..1-2 Angstrom). Type two numbers divided by space and press double enter: \n")
step9.r <- scan()
dat <- set.Gr(dat, r1=seq(step9.r[1], step9.r[2], 0.005), rho.0=step9.rho,
type1="gaussianNoise")
}
return(dat)
}
#########################################################
step10 <- function(){
cat("Now the data are ready. Lets specify fit parameters. \n")
cat("Enter bounds for background estimation (you want lower and upper bounds to be some smaller and bigger than real minimum and maximum background values, respectively). Put two numbers divided by space and press double enter \n")
step10.bd <- scan("")
cat("Enter bounds for normalization parameter. If you don't want normalization parameter to be fitted type '1 1' (no quotes) \n")
step10.sc <- scan("")
cat("Enter spline knot positions. Put more knots in the region where background demonstrates less smooth behaviour. To use N equidistant knots simply type integer number N \n")
step10.kn <- scan("")
if(length(step10.kn)==1){
knots.x <- NA
knots.n <- step10.kn
}
else{
knots.x <- step10.kn
knots.n <- length(knots.x)
}
cat("Enter the maximum iteration (population generation) allowed and number of population members (NP). For the most tasks it is best to set NP to be at least 10-15 times the length of the parameter vector, which includes spline knot positions, and, optionally, normalization and ADP parameters. Type two numbers divided by space and press double enter \n")
step10.pp <- scan("")
step10.cl <- set.control(NP=step10.pp[2], itermax=step10.pp[1], parallelType=1)
cat("Enter name of the file where the results will be saved. The usual extension is '.RData'. If you don't want to save results to file simply press enter \n")
step10.fi <- readline("")
ctrl <- list(bl=step10.bd[1], bu=step10.bd[2], scale=step10.sc,
kx=knots.x, kn=knots.n, ctrl=step10.cl, st=step10.fi)
return(ctrl)
}
| /scratch/gouwar.j/cran-all/cranData/BBEST/R/guide.R |
#####################################################################################
#
#
# FUNCTIONS TO LOAD AND PREPARE DATA FOR THE FIT
# See reference manual for description
#
############################################
#
# GUI
#
#
runUI <- function()
shiny::runApp(system.file('gui', package='BBEST'), launch.browser=TRUE)
####################################
# FUNCTIONS FOR SEPARATE DATA BANKS
#
read.sqa <- function(file = stop("'file' must be specified")){
sqa <- scan(file=file, what="list", sep="\n")
N <- length(sqa)
i.start <- 0
nBanks <- 0
for(i in 1:N){
if(strsplit(sqa[i], split=" ")[[1]][1]=="#L"){
i.start[nBanks+1] <- i+1
nBanks <- nBanks + 1
}
}
ids <- 0
for(i in 1:nBanks)
ids[i] <- strsplit(sqa[i.start[i]-4], split = " ")[[1]][4]
dat <- read.table(file=file, header=FALSE, col.names=c("x", "y", "e1", "e2", "e3"))
res <- list()
bank <- 1
i.start <- 1
for(i in 2:length(dat$x)){
if(dat$x[i] < dat$x[i-1]){
res[[bank]] <- list()
class(res[[bank]]) <- "data"
res[[bank]]$x <- dat$x[i.start:(i-1)]
res[[bank]]$y <- dat$y[i.start:(i-1)]
res[[bank]]$SB <- rep(0, length(res[[bank]]$x))
res[[bank]]$id <- ids[bank]
bank <- bank + 1
i.start <- i
}
}
res[[bank]] <- list()
class(res[[bank]]) <- "data"
res[[bank]]$x <- dat$x[i.start:length(dat$x)]
res[[bank]]$y <- dat$y[i.start:length(dat$x)]
res[[bank]]$SB <- rep(0, length(res[[bank]]$x))
res[[bank]]$id <- ids[bank]
for(i in 1:bank)
res[[bank]]$y[ which( is.na(res[[bank]]$y) ) ] <- 0
return(res)
}
###
prepare.banks.data <- function(data, n.banks=4, lambda_1, lambda_2, lambda_0, x_1, x_2,
n.atoms, scatter.length, ADP, n.regions){
for(i in 1:n.banks){
cat("\n\n==================================\n\n")
cat("Preparing bank # ", i, "\n")
cat("\n")
data[[i]] <- set.sigma(data[[i]], n.regions=n.regions)
data[[i]] <- set.lambda(data[[i]], lambda=NA, lambda_1, lambda_2, lambda_0, x_1, x_2)
data[[i]] <- set.SB(data[[i]], SB=NA, n.atoms, scatter.length, ADP, fit=FALSE)
}
data
}
###
write.fix <- function(fit.results, file = stop("'file' must be specified")){
N <- length(fit.results)
if(!is.null(fit.results$fit.details)){
fit.results <- list(fit.results)
N <- 1
}
options(warn=-1)
for(i in 1:N){
if(i==1) apnd <- FALSE else apnd <- TRUE
write(c(paste("#S ",i," Correction File for Bank ",fit.results[[i]]$fit.details$id,sep=""), "#L Q MULT ADD"), file=file, append=apnd)
res <- cbind(fit.results[[i]]$x, rep(1,length(fit.results[[i]]$x)), -fit.results[[i]]$curves$bkg)
write.table(res, file=file, append=TRUE, col.names=FALSE, row.names=FALSE, quote=FALSE, sep="\t")
}
options(warn=0)
}
####################################
# GENERAL PURPOSE FUNCTIONS
#
read.sqb <- function(file = stop("'file' must be specified")){
sqb <- scan(file=file, what="list", sep="\n")
N <- length(sqb)
for(i in 1:N){
if(strsplit(sqb[i], split=" ")[[1]][1]=="#L"){
i.start <- i+1
break
}
}
# _!_ This is very poor implementation...
# _!_ but it works
Tfile <- file()
for(i in i.start:N)
write(sqb[i], file=Tfile, append=TRUE)
dat <- read.table(Tfile)
unlink(Tfile)
dat[which(is.na(dat[,2])),2] <- 0
return(list(x=dat[,1], y=dat[,2], sigma=rep(0, length(dat[,1])), lambda=rep(0, length(dat[,1])), SB=rep(0, length(dat[,1])) ))
}
###
set.data <- function(x, y, sigma=NA, lambda=NA, SB=NA){
data <- list()
data$x <- x
data$y <- y
if(length(sigma)==1)
data$sigma <- rep(sigma, length(x))
else
data$sigma <- sigma
data$lambda <- lambda
data$SB <- SB
class(data) <- "data"
return(data)
}
###
read.data <- function(file = stop("'file' must be specified"), ...){
data <- read.table(file=file, header=TRUE,...)
if(is.null(data$sigma))
data$sigma <- NA
if(is.null(data$lambda))
data$lambda <- NA
if(is.null(data$SB))
data$SB <- 0
if(is.null(data$x))
colnames(data)[1] <- "x"
if(is.null(data$y))
colnames(data)[2] <- "y"
data$y[which(is.na(data$y))] <- 0
class(data) <- "data"
return(data)
}
###
trim.data <- function(data, x.min, x.max){
ind.min <- which(abs(data$x-x.min)==min(abs(data$x-x.min)))
ind.max <- which(abs(data$x-x.max)==min(abs(data$x-x.max)))
cut <- ind.min:ind.max
dat <- list()
class(dat) <- "data"
dat$x <- data$x[cut]
dat$y <- data$y[cut]
if(!is.null(data$SB)) dat$SB <- data$SB[cut] else dat$SB <- rep(0, length(dat$x))
if(!is.null(data$sigma)) dat$sigma <- data$sigma[cut] else dat$sigma <- rep(NA, length(dat$x))
if(!is.null(data$lambda)) dat$lambda <- data$lambda[cut] else dat$lambda <- rep(NA, length(dat$x))
if(!is.null(data$smoothed)) dat$smoothed <- data$smoothed[cut] else dat$smoothed <- rep(NA, length(dat$x))
if(!is.null(data$id)) dat$id <- data$id
return(dat)
}
###
set.sigma <- function(data, sigma=NA, x.bkg.only=NA, n.regions=10, hmax=250, sigma2=c(0.1)){
y.smoothed <- NA
k <- hmax
if(length(sigma2) == 1) sigma2 <- rep(sigma2, n.regions)
if(is.na(sigma[1])){
if(is.na(x.bkg.only[1])){
if(length(k)==1)
k <- rep(k, n.regions)
n <- floor(length(data$x)/n.regions)
x.bkg.i <- 1:n
sigma <- 0
y.smoothed <- 0
for(i in 1:n.regions){
cat("\n step ", i, " of ", n.regions, "\n\n")
if(i==n.regions)
x.bkg.i <- x.bkg.i[1]:length(data$x)
y.sm <- aws::aws(data$y[x.bkg.i], hmax=k[i], sigma2 = sigma2[i])@theta
sig <- sqrt(mean((y.sm-data$y[x.bkg.i])^2))
sigma <- c(sigma, rep(sig, length(x.bkg.i)))
y.smoothed <- c(y.smoothed, y.sm)
x.bkg.i <- x.bkg.i + n
}
y.smoothed <- y.smoothed[-1]
sigma <- sigma[-1]
}
else{
x.min.i <- which(abs(data$x-x.bkg.only[1])==min(abs(data$x-x.bkg.only[1])))
x.max.i <- which(abs(data$x-x.bkg.only[2])==min(abs(data$x-x.bkg.only[2])))
x.bkg.i <- x.min.i:x.max.i
y.smoothed <- aws::aws(data$y[x.bkg.i], hmax=k, sigma2 = sigma2[1])@theta
sigma <- rep(sqrt(mean((y.smoothed-data$y[x.bkg.i])^2)), length(data$x))
y.smoothed <- c(data$y[1:(x.min.i-1)], y.smoothed, data$y[(x.max.i+1):length(data$x)])
}
}
else{
if(length(sigma)==1) sigma <- rep(sigma, length(data$x))
}
data$sigma <- sigma
data$smoothed <- y.smoothed
return(data)
}
###
set.lambda <- function(data, lambda=NA, lambda_1=NA, lambda_2=NA, lambda_0=NA, x_1=NA, x_2=NA){
if(is.na(lambda[1])){
if(is.na(x_1)) x_1 <- min(data$x)
if(is.na(x_2)) x_2 <- max(data$x)
lambda <- rep(lambda_0, length(data$x))
a0 <- (lambda_1 - lambda_2) / (x_1-x_2)
b0 <- (lambda_1*x_2 - lambda_2*x_1) / (x_2-x_1)
ind.max <- which(abs(data$x-x_2)==min(abs(data$x-x_2)))
ind.min <- which(abs(data$x-x_1)==min(abs(data$x-x_1)))
lambda[ind.min:ind.max] <- a0 * data$x[ind.min:ind.max] + b0
}
lambda[which(lambda<=0)] <- 1e-6
data$lambda <- lambda
return(data)
}
###
set.SB <- function(data, SB=NA, n.atoms=NA, scatter.length=NA, ADP=NA, fit=FALSE, oneADP=TRUE, ADP.lim=c(0, 0.05)){
if(is.na(SB[1])){
if(is.na(n.atoms) || is.na(scatter.length))
stop("Please provide SB or parameters n.atoms and scatter.length\n")
if(is.na(ADP) && !fit)
stop("Please provide ADP or set fit=TRUE\n")
if(fit==TRUE){
data$fitADP <- list(n.atoms=n.atoms, scatter.length=scatter.length, oneADP=oneADP, ADP.lim=ADP.lim)
SB <- rep(0, length(data$x))
}
else{
data$fitADP <- NULL
if(length(ADP)==1) ADP <- rep(ADP, length(n.atoms))
N_total <- sum(n.atoms)
f.av2 <- (sum(n.atoms*scatter.length)/N_total)^2
f2.av <- sum(n.atoms*scatter.length^2)/N_total
expADP <- 0
for(j in 1:length(data$x))
expADP[j] <- sum(n.atoms*scatter.length^2*exp(-ADP*data$x[j]^2)/N_total/f2.av)
L <- (f.av2-f2.av)/f.av2
SB <- 1-expADP*(1-L)
}
}
data$SB <- SB
return(data)
}
###
set.Gr <- function(data, r1=seq(0, 1, 0.005), r2=NA, rho.0,
type1="gaussianNoise", type2=NA, sigma.f=NA, l=NA){
K.DI <- list()
KG.inv <- matrix.FT1 <- matrix.FT2 <- sigma.r <- bkg.r <- ff <- D<- NA
K.DI$inv <- K.DI$det <- NA
if(is.na(type1))
cat("No constraints on G(r) behaviour included. \n")
else if(type1=="gaussianNoise"){
# noise in r-space
matrix.FT1 <- sineFT.matrix(Q=data$x, r=r1)
delta <- c(diff(data$x)[1], diff(data$x))
sigma.r <- 0
cat("Calculating r-space noise... \n")
for(j in 1:length(r1)){
sigma.r[j] <- sum((2/pi*delta*data$x*sin(data$x*r1[j])*data$sigma)^2)
sigma.r[j] <- sqrt(sigma.r[j])
}
# avoid dividing by zero
if(sigma.r[1]==0)
sigma.r[1] <- sigma.r[2]
cat("Calculating FT of the experimental data... \n")
bkg.r <- sineFT(f.Q=data$y-1, Q=data$x, r=r1) + 4 * pi * rho.0 * r1
# SB should be excluded from bkg estimation
# by setting term SB!
# data$y=S(Q)=F(Q)+1!
}
else if(type1=="correlatedNoise"){
matrix.FT1 <- sineFT.matrix(Q=data$x, r=r1)
cat("Calculating noise covariance matrix in r-space... \n")
KG <- noise.cov.matrix.r(r=r1, Q=data$x, sigma=data$sigma)
diag(KG) <- diag(KG) + abs(min(eigen(KG)$values)) * 1e4 # avoid singularity
KG.inv <- solve(KG)
cat("Calculating FT of the experimental data... \n")
bkg.r <- sineFT(f.Q=data$y-1, Q=data$x, r=r1) + 4 * pi * rho.0 * r1
}
else
stop("Wrong type of low-r Gr contribution to likelihood. Should be either 'gaussianNoise' or 'correlatedNoise'\n")
if(is.na(type2))
cat("No constraints on bkg(r) behaviour included. \n")
else if(type2=="gaussianProcess"){
matrix.FT2 <- sineFT.matrix(Q=data$x, r=r2)
K <- covMatrixSE(x=r2, sig=sigma.f, l=l)
ff <- K$factor
K <- K$cov
K.DI <- covMatrix.DI(K)
}
else if(type2=="secondDeriv"){
matrix.FT2 <- sineFT.matrix(Q=data$x, r=r2)
D <- DMatrix(knots.x=r2)$matrix
}
else
stop("Wrong type of low-r2 condition. Should be either 'gaussianProcess' or 'secondDeriv'\n")
cat("...done! \n")
data$Gr <- list(type1=type1, type2=type2, sigma.r=sigma.r, bkg.r=bkg.r,
matrix.FT1=matrix.FT1, matrix.FT2=matrix.FT2, KG.inv=KG.inv, D=D,
covMatrix=list(inv=K.DI$inv, det=K.DI$det, factor=ff), rho.0=rho.0, r1=r1, r2=r2)
return(data)
}
###
write.fit.results <- function(fit.results, file = stop("'file' must be specified")){
x <- fit.results$x
y <- fit.results$curves$y - fit.results$curves$bkg
SB <- fit.results$curves$SB
bkg <- fit.results$curves$bkg
if(length(fit.results$uncrt)>1)
stdev <- fit.results$uncrt$stdev
else
stdev <- rep(NA, length(x))
scale <- fit.results$scale
f <- fit.results$fit.details$scatter.length
N <- fit.results$fit.details$n.atoms
ADP <- fit.results$ADP
if(is.null(f)) f <- NA
if(is.null(N)) N <- NA
if(is.null(ADP)) ADP <- NA
m <- cbind(f, N, ADP)
exp.data <- (y-SB)/scale + bkg + SB
res <- cbind(x, y, stdev, SB, bkg, exp.data)
knots.x <- fit.results$knots$x
knots.y <- fit.results$knots$y
knots <- cbind(knots.x, knots.y)
knots <- format(knots,digits=6)
options(warn=-1)
write(c("# scale factor:", scale), file=file, append=FALSE)
cat("\n", file=file, append=TRUE)
write(c("# Atomic Displacement Parameters:"), file=file, append=TRUE)
write.table(m, file=file, append=TRUE, col.names=c("f","N","ADP"), row.names=FALSE, quote=FALSE)
cat("\n", file=file, append=TRUE)
write(c("# knots positions:"), file=file, append=TRUE)
write.table(knots, file=file, append=TRUE, col.names=TRUE, row.names=FALSE, quote=FALSE, sep="\t")
cat("\n", file=file, append=TRUE)
cat("############################################################## \n", file=file, append=TRUE)
cat("# fit results \n", file=file, append=TRUE)
cat("# columns: x; (scaled) corrected y; standard deviation in y due to noise and bkg uncertainty; coherent baseline; estimated background; raw data \n", file=file, append=TRUE)
write.table(res, file=file, append=TRUE, col.names=TRUE, row.names=FALSE, quote=FALSE, sep="\t")
options(warn=0)
}
sqa.split <- function(file = stop("'file' must be specified")){
sqa <- scan(file=file, what="list", sep="\n")
N <- length(sqa)
i.start <- 0
nBanks <- 0
for(i in 1:N){
if(strsplit(sqa[i], split=" ")[[1]][1]=="#L"){
i.start[nBanks+1] <- i+1
nBanks <- nBanks + 1
}
}
i.start[nBanks+1] <- length(sqa)+5
name <- 0
for(i in 1:nBanks){
name <- strsplit(file, '[.]')[[1]]
name <- paste(name[-length(name)], collapse = '.')
name <- paste(name, "_b", i, ".sqa", sep="")
writeLines(sqa[ (i.start[i]-4):(i.start[i+1]-5)], con = name, sep = "\n", useBytes = FALSE)
}
}
fix.merge <- function(outfile, infile1, infile2, ...){
files <- list(infile1, infile2, ...)
N <- length(files)
file_tmp <- scan(file=infile1, what="list", sep="\n")
name_str_arr <- strsplit(file_tmp[1], split = " ")[[1]]
name_str_arr[2] <- 1
file_tmp[1] <- paste(name_str_arr, collapse = ' ')
writeLines(file_tmp, con = outfile, sep = "\n", useBytes = FALSE)
for(i in 2:N){
file_tmp <- scan(file=files[[i]], what="list", sep="\n")
name_str_arr <- strsplit(file_tmp[1], split = " ")[[1]]
name_str_arr[2] <- i
file_tmp[1] <- paste(name_str_arr, collapse = ' ')
write(file_tmp, file = outfile, sep = "\n", append=TRUE)
}
}
| /scratch/gouwar.j/cran-all/cranData/BBEST/R/interface.R |
################################################################
# test.signal(x, lambda, sigma, x.delta, knots.n, peaks.widthRange, peaks.n)
#
# returns: Generates a random function with smooth background, returning
# separate curves for the background, the peaks, and the noise.
# list() with the following elements:
# $x: x-values for this function (same as this function received)
# $curves: full-res curves (data.frame() with following columns):
# $y: total curve (bkg + peaks + noise)
# $bkg: background contribution
# $SB: zero
# $noise: IID Gaussian noise
# $knots: spline knots (data.frame() with following columns):
# $x: x-values of knots
# $y: y-values of knots
# arguments:
# x: numeric vector of x-values
# lambda: maximum half-height of any peak
# sigma: noise level
# x.delta: minimum spacing between consecutive spline knots
# knots.n: number of spline knots for background (less means smoother)
# peaks.widthRange: range in peak widths
# peaks.n: number of peaks to add
test.signal <- function(x, lambda, sigma, x.delta, knots.n, peaks.widthRange, peaks.n) {
# 1. Generate peaks.
# Assume mean height roughly half peak height (hence "2/lambda"):
peaksAmplitude <- rexp(n=peaks.n, rate=(2 / lambda))
# random peaks width
peaksWidth <- runif(n=peaks.n, min=peaks.widthRange[1], max=peaks.widthRange[2])
# separate peak position by 2 sigma
peaksCenter <- runif(n=peaks.n,
min=min(x) + (2 * peaksWidth),
max=max(x) - (2 * peaksWidth))
# Assume Gaussian peak shape
N <- length(x)
peaks <- 0
for(i in 1:N){
peaks[i] <- 0
for(j in 1:peaks.n)
peaks[i] <- peaks[i] + peaksAmplitude[j]*exp(-0.5 * ((x[i] - peaksCenter[j]) / peaksWidth[j]) ^ 2)
}
# 2. Add noise
noise <- rnorm(sd=sigma, n=N) # add Poisson noise?
# 3. Construct background
knots.x <- 0
for(i in 1:knots.n){
knots.x[i] <- runif(n=1, min=min(x), max=max(x)-(knots.n-1)*x.delta)
}
knots.x <- sort(knots.x)
knots.x <- knots.x + ((1:knots.n) - 1) * x.delta # this procedure was checked and works correctly
knots.y <- rnorm(n=knots.n, sd=0.3)
bkg <- get.bkg(x=x, knots.x=knots.x, knots.y=knots.y)
if(min(bkg) < 0){
knots.y <- knots.y + abs(min(bkg))
bkg <- bkg + abs(min(bkg))
}
# 4. put everything together
noisyPeaks <- peaks + noise
signal <- noisyPeaks + bkg
return (list(x=x, y=signal, sigma=rep(sigma, length(x)),
SB=rep(0, length(x)), lambda=rep(lambda, length(x)),
knots=list(x=knots.x, y=knots.y), bkg=bkg))
}
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
# (Cookbook for R)
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
# require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
#########################################################
#mPlot.results(fit.results, label.x, label.y){
#
# returns: plots background estimate and corrected signal
# arguments
# fit.results: return value of do.fit
# label.x,
# label.y: plot labels
mPlot.results <- function(fit.results, label.x="x", label.y="y", xlim=NA, ylim=NA){
# require(ggplot2)
if(length(fit.results$uncrt) == 1 && is.na(fit.results$pars)){
dat <- list(x=fit.results$x, y=fit.results$curves$y, SB=fit.results$curves$SB,
lambda=fit.results$fit.details$lambda, sigma=fit.results$fit.details$sigma)
if(!is.null(fit.results$curves$corr))
dat$y <- dat$y + fit.results$curves$corr
fit.results$uncrt <- get.hess(dat, fit.results$knots$x, fit.results$knots$y, Gr=NA, r=seq(0, 2, 0.01), p.bkg=.5)
}
if(!is.na(fit.results$pars)){
fit.results$uncrt <- list()
fit.results$uncrt$stdev <- rep(0, length(fit.results$x))
}
if(any(is.na(ylim)))
ylim <- c(min(fit.results$curves$y), max(fit.results$curves$y))
if(any(is.na(xlim)))
xlim <- c(min(fit.results$x), max(fit.results$x))
if(any(is.na(fit.results$knots$x)) || any(is.na(fit.results$knots$y))){
fit.results$knots$x <- 0
fit.results$knots$y <- 0
}
#to avoid NOTEs in package check:
x=y=bkg=stdev=signal=variable=value=NULL
curves <- data.frame(x = fit.results$x, y = fit.results$curves$y-fit.results$curves$SB,
bkg = fit.results$curves$bkg)
melted.curves <- reshape2::melt(curves, id.vars="x")
ribbon <- data.frame(x = fit.results$x, bkg = fit.results$curves$bkg, stdev = fit.results$uncrt$stdev,
signal = fit.results$curves$y-fit.results$curves$bkg)
knots <- data.frame(x=fit.results$knots$x, y=fit.results$knots$y)
p1<- (h <- ggplot2::ggplot(melted.curves, aes(x=x, y=value))
+ geom_line(aes(colour=variable, group=variable))
+ scale_color_manual(name="Plot legend", values = c("black", "red", "brown"), labels = c("experimental data", "background guess (+/-2sd)", "uncertainty interval"))
+ geom_ribbon(data=ribbon, aes(ymin=bkg-2*stdev, ymax=bkg+2*stdev, y=bkg), fill = "brown", alpha=0.5)
+ geom_point(data=knots, aes(x=x, y=y), colour="red", size=2)
+ xlab(label.x)
+ ylab(label.y)
+ xlim(xlim)
+ ylim(ylim)
+ ggtitle("Background Estimation")
)
# print(p1)
curves2 <- data.frame(x = fit.results$x, signal = fit.results$curves$y-fit.results$curves$bkg, SB=fit.results$curves$SB)
melted.curves2 <- reshape2::melt(curves2, id.vars="x")
ylim[1] <- ylim[1] + min(fit.results$curves$SB-fit.results$curves$bkg)
ylim[2] <- ylim[2] + max(fit.results$curves$SB-fit.results$curves$bkg)
p2<- (h2 <- ggplot2::ggplot(melted.curves2, aes(x=x, y=value))
+ geom_line(aes(colour=variable, group=variable))
+ scale_color_manual(name="Plot legend", values = c("blue", "green", "gray20"), labels = c("corrected signal", "coherent baseline"))
+ geom_ribbon(data=ribbon, aes(ymin=signal-2*stdev, ymax=signal+2*stdev, y=signal), fill = "gray20", alpha=0.5)
+ xlab(label.x)
+ ylab(label.y)
+ xlim(xlim)
# + ylim(ylim)
+ ggtitle("Corrected Signal")
)
multiplot(p1, p2, cols=1)
}
#########################################################
# calc.Gr(fit.results, rho.0, r.min, r.max, Q.min, Q.max, nsd)
#
# returns: the PDF function. Also plots PDF and corresponding confidence interval
# arguments
# fit.results: return value of do.fit
# rho.0: atomic number density for the material
# r.min, plot G(r) form r.min to r.max
# r.max:
# Q.min, cut S(Q) at the interval between Q.min
# Q.max: and Q.max
# nsd: number of standard devitions to plot the uncertainty
calc.Gr <- function(fit.results, rho.0, plot=TRUE, r.min=0, r.max=5, dr=0.01, Q.min=NA, Q.max=NA, nsd=2, gr.compare=NA){
r <- seq(r.min, r.max, dr)
SQ <- fit.results$curves$y-fit.results$curves$bkg
Q <- fit.results$x
if(is.na(Q.max)) Q.max <- max(Q)
if(is.na(Q.min)) Q.min <- Q.max*.95
ind.max <- which(abs(Q-Q.max)==min(abs(Q-Q.max)))
ind.min <- which(abs(Q-Q.min)==min(abs(Q-Q.min)))
cut <- which(abs(SQ[ind.min:ind.max]-1) ==min(abs(SQ[ind.min:ind.max]-1) ))[1] + ind.min - 1
dat <- list(x=Q[1:cut], y=fit.results$curves$y[1:cut], SB=fit.results$curves$SB[1:cut],
lambda=fit.results$fit.details$lambda[1:cut], sigma=fit.results$fit.details$sigma[1:cut])
if(!is.na(fit.results$fit.details$Gr[1])){
cat("Recalculating G(r) information... \n")
dat <- set.Gr(dat, r1=r, rho.0=rho.0, type1="gaussianNoise")
}
else
dat$Gr <- NA
cat("Calculating standard deviation... \n")
if(!is.null(fit.results$curves$corr))
dat$SB <- dat$SB - fit.results$curves$corr[1:cut]
if(is.na(fit.results$pars)){
knots.x <- fit.results$knots$x
knots.y <- fit.results$knots$y
stdev <- get.hess(dat, knots.x, knots.y, Gr=dat$Gr, r=r, p.bkg=.5)$stdev.r
}
else
stdev <- rep(0, length(r))
if(!is.null(fit.results$curves$corr))
dat$SB <- dat$SB + fit.results$curves$corr[1:cut]
cat("Calculating Pair Distribution Function... \n")
gr <- sineFT(f.Q=dat$y-fit.results$curves$bkg[1:cut]-1, Q=dat$x, r=r)
stdev <- stdev*nsd
if(plot==TRUE)
fplot.Gr(r=r, gr=gr, stdev=stdev, rho.0=rho.0, nsd=nsd, gr.compare=gr.compare)
return(list(r=r, gr=gr, stdev=stdev/nsd))
}
fplot.Gr <- function(r, gr, stdev, rho.0, nsd=2, gr.compare=NA, xlim=NA, ylim=NA, title="corrected G(r)"){
# require(ggplot2)
cat("Plotting... \n")
if(any(is.na(ylim)))
ylim=c(min(gr)*1.1-abs(max(stdev)), max(gr)*1.1+abs(max(stdev)))
else{
ylim[1] <- ylim[1]*1.1 - abs(max(stdev))
ylim[2] <- ylim[2]*1.1 + abs(max(stdev))
}
if(any(is.na(xlim)))
xlim=c(min(r), max(r))
#to avoid NOTEs in package check:
x=y=variable=value=NULL
if(!is.na(gr.compare[1])){
curves <- data.frame(x = r, y = gr, gr.compare=gr.compare, l = -4*pi*rho.0*r)
vals = c("black", "red", "darkblue", "blue")
labs = c(title, "G(r) to compare", paste("uncertainty interval (+/-",nsd,"sd)", sep=""))
}
else{
curves <- data.frame(x = r, y = gr, l = -4*pi*rho.0*r)
vals = c("black", "darkblue", "blue")
labs = c(title, paste("uncertainty interval (+/-",nsd,"sd)", sep=""))
}
melted.curves <- reshape2::melt(curves, id.vars="x")
ribbon <- data.frame(x = r, y = gr, stdev = stdev)
options(warn=-1)
p1<-(h <- ggplot2::ggplot(melted.curves, aes(x=x, y=value))
+ geom_line(aes(colour=variable, group=variable))
+ geom_ribbon(data=ribbon, aes(ymin=y-stdev, ymax=y+stdev, y=y), fill = "blue", alpha=0.5)
+ scale_color_manual(name="Plot legend", values = vals, labels = labs)
+ xlab("r")
+ ylab("G(r)")
+ xlim(xlim)
+ ylim(ylim)
+ ggtitle(title)
)
print(p1)
options(warn=0)
}
#####################################
# DATA BANKS
###
mPlot.sqa <- function(data){
N <- length(data)
n.x <- n.y <- 1
if(N>=2) n.y <- 2
if(N>=3) n.x <- 2
par(mfrow=c(n.x, n.y), mar=c(2,4,2,1))
for(i in 1:N)
plot(data[[i]]$x, data[[i]]$y, t="l", ylab=paste("bank ", i, sep=" "))
par(mfrow=c(1,1), mar=c(5, 4, 4, 2) + 0.1)
}
###
mPlot.results.banks <- function(fit.results, label.x="x", label.y="y", xlim=NA, ylim=NA){
N <- length(fit.results)
n.x <- n.y <- 1
if(N>=2) n.y <- 2 # number of columns; maximum=2
if(N>=3) n.x <- ceiling(N/2) # number of rows
if(is.null(dim(xlim)) || is.null(dim(ylim)))
xlim <- ylim <- matrix(NA, nrow=N, ncol=2)
for(i in 1:N){
if(any(is.na(ylim[i,])))
ylim[i, ] <- c(min(fit.results[[i]]$curves$y-fit.results[[i]]$curves$SB),
max(fit.results[[i]]$curves$y-fit.results[[i]]$curves$SB))
}
for(i in 1:N){
if(any(is.na(xlim[i,])))
xlim[i, ] <- c(min(fit.results[[i]]$x), max(fit.results[[i]]$x))
}
for(i in 1:N){
fit.res <- fit.results[[i]]
if(any(is.na(fit.res$knots$x)) || any(is.na(fit.res$knots$y))){
fit.res$knots$x <- 0
fit.res$knots$y <- 0
}
if(length(fit.res$uncrt) == 1 && is.na(fit.res$pars) ){
dat <- list(x=fit.res$x, y=fit.res$curves$y, SB=fit.res$curves$SB,
lambda=fit.res$fit.details$lambda, sigma=fit.res$fit.details$sigma)
if(!is.null(fit.res$curves$corr))
dat$y <- dat$y + fit.res$curves$corr
fit.res$uncrt <- get.hess(dat, fit.res$knots$x, fit.res$knots$y, Gr=NA, r=seq(0, 2, 0.01), p.bkg=.5)
}
else{
fit.res$uncrt <- list()
fit.res$uncrt$stdev <- rep(0, length(fit.res$x))
}
#to avoid NOTEs in package check:
x=y=bkg=stdev=signal=variable=value=NULL
curves <- data.frame(x = fit.res$x, y = fit.res$curves$y-fit.res$curves$SB,
bkg = fit.res$curves$bkg)
melted.curves <- reshape2::melt(curves, id.vars="x")
ribbon <- data.frame(x = fit.res$x, bkg = fit.res$curves$bkg, stdev = fit.res$uncrt$stdev,
signal = fit.res$curves$y-fit.res$curves$bkg)
knots <- data.frame(x=fit.res$knots$x, y=fit.res$knots$y)
assign(paste("p", i, sep=""),
(h <- ggplot2::ggplot(melted.curves, aes(x=x, y=value))
+ geom_line(aes(colour=variable, group=variable))
+ geom_ribbon(data=ribbon, aes(ymin=bkg-2*stdev, ymax=bkg+2*stdev, y=bkg), fill = "brown", alpha=0.5)
+ scale_color_manual(name="Plot legend", values = c("black", "red", "brown"))
+ geom_point(data=knots, aes(x=x, y=y), colour="red", size=2)
+ xlab(label.x)
+ ylab(label.y)
+ xlim(xlim[i,])
+ ylim(ylim[i,])
+ theme(legend.position = "none")))
}
Layout <- grid.layout(nrow = n.x+1, ncol = n.y,
widths = unit(rep(2,length=n.y), rep("null", length=n.y)),
heights = unit(c(rep(1,length=n.x), 0.4), rep("null", length=(n.x+1))))
vplayout <- function(...) {
grid.newpage()
pushViewport(viewport(layout = Layout))
}
subplot <- function(x, y){
viewport(layout.pos.row = x, layout.pos.col = y)
}
vplayout()
for(i in 1:N){
pp <- get(paste("p", i, sep=""))
xx <- ceiling(i/n.y)
yy <- (i+1) %% 2 + 1
print(pp, vp = subplot(xx, yy))
}
empty.curves <- reshape2::melt(data.frame(x = c(0), y = c(0), bkg = c(0)) , id.vars="x")
options(warn=-1)
legend <- (h <- ggplot2::ggplot(empty.curves, aes(x=x, y=value))
+ geom_line(aes(colour=variable, group=variable))
+ scale_color_manual(name="", values = c("black", "red"), labels = c("experimental data", "background guess (+/-2sd)"))
+ theme(legend.position=c(.5, .5),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.margin = unit(c(0,0,0,0), "lines"),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
line = element_blank(),
legend.direction='horizontal',
legend.box='vertical'
)
)
print(legend, vp = subplot(n.x+1, 1:n.y))
options(warn=0)
}
| /scratch/gouwar.j/cran-all/cranData/BBEST/R/plot.R |
#############################################
# !!!
#
# Part of this code was taken from: https://github.com/rstudio/shiny-incubator
#
# !!!
#' Initialize progress
#'
#' Call this function in your \code{shinyUI} definition if you intend
#' to use progress in \code{server.R}.
#' @seealso \code{\link{withProgress}}, \code{\link{Progress}}
#' @export
progressInit <- function() {
addResourcePath('progress', system.file('progress',
package='BBEST'))
tagList(
singleton(
tags$head(
tags$script(src='progress/progress.js'),
tags$link(rel='stylesheet', type='text/css',
href='progress/progress.css')
)
)
)
}
#' Reporting progress (object-oriented API)
#'
#' Reports progress to the user during long-running operations.
#'
#' This package exposes two distinct programming APIs for working with
#' progress. \code{\link{withProgress}} and \code{\link{setProgress}}
#' together provide a simple function-based interface, while the
#' \code{Progress} reference class provides an object-oriented API.
#'
#' Instantiating a \code{Progress} object causes a progress panel to be
#' created, and it will be displayed the first time the \code{set}
#' method is called. Calling \code{close} will cause the progress panel
#' to be removed.
#'
#' \strong{Methods}
#' \describe{
#' \item{\code{initialize(session, min = 0, max = 1)}}{
#' Creates a new progress panel (but does not display it).
#' }
#' \item{\code{set(message = NULL, detail = NULL, value = NULL)}}{
#' Updates the progress panel. When called the first time, the
#' progress panel is displayed.
#' }
#' \item{\code{close()}}{
#' Removes the progress panel. Future calls to \code{set} and
#' \code{close} will be ignored.
#' }
#' }
#'
#' @param session The Shiny session object, as provided by
#' \code{shinyServer} to the server function.
#' @param min The value that represents the starting point of the
#' progress bar. Must be less than \code{max}.
#' @param max The value that represents the end of the progress bar.
#' Must be greater than \code{min}.
#' @param message A single-element character vector; the message to be
#' displayed to the user, or \code{NULL} to hide the current message
#' (if any).
#' @param detail A single-element character vector; the detail message
#' to be displayed to the user, or \code{NULL} to hide the current
#' detail message (if any). The detail message will be shown with a
#' de-emphasized appearance relative to \code{message}.
#' @param value Single-element numeric vector; the value at which to set
#' the progress bar, relative to \code{min} and \code{max}.
#' \code{NULL} hides the progress bar, if it is currently visible.
#'
#' @examples
#' \dontrun{
#' # server.R
#' shinyServer(function(input, output, session) {
#' output$plot <- renderPlot({
#' progress <- Progress$new(session, min=1, max=15)
#' on.exit(progress$close())
#'
#' progress$set(message = 'Calculation in progress',
#' detail = 'This may take a while...')
#'
#' for (i in 1:15) {
#' progress$set(value = i)
#' Sys.sleep(0.5)
#' }
#' plot(cars)
#' })
#' })
#' }
#' @seealso \code{\link{progressInit}}, \code{\link{withProgress}}
#' @rdname Progress
#' @export
Progress <- setRefClass(
'Progress',
fields = list(
.session = 'ANY',
.id = 'character',
.min = 'numeric',
.max = 'numeric',
.closed = 'logical'
),
methods = list(
initialize = function(session, min = 0, max = 1) {
.closed <<- FALSE
.session <<- session
.id <<- paste(as.character(as.raw(runif(8, min=0, max=255))), collapse='')
.min <<- min
.max <<- max
.session$sendCustomMessage('shiny-progress-open', list(id = .id))
},
set = function(message = NULL, detail = NULL, value = NULL) {
if (.closed) {
# TODO: Warn?
return()
}
data <- list(id = .id)
if (!missing(message))
data$message <- message
if (!missing(detail))
data$detail <- detail
if (!missing(value)) {
if (is.null(value) || is.na(value))
data$value <- NULL
else {
data$value <- min(1, max(0, (value - .min) / (.max - .min)))
}
}
.session$sendCustomMessage('shiny-progress-update', data)
},
close = function() {
if (.closed) {
# TODO: Warn?
return()
}
.session$sendCustomMessage('shiny-progress-close',
list(id = .id))
}
)
)
.currentProgress <- new.env()
#' Reporting progress (functional API)
#'
#' Reports progress to the user during long-running operations.
#'
#' This package exposes two distinct programming APIs for working with
#' progress. \code{withProgress} and \code{setProgress} together provide
#' a simple function-based interface, while the \code{\link{Progress}}
#' reference class provides an object-oriented API.
#'
#' Use \code{withProgress} to wrap the scope of your work; doing so will
#' cause a new progress panel to be created, and it will be displayed the
#' first time \code{setProgress} is called. When \code{withProgress} exits,
#' the corresponding progress panel will be removed.
#'
#' Generally, \code{withProgress}/\code{setProgress} should be
#' sufficient; the exception is if the work to be done is asynchronous
#' (this is not common) or otherwise cannot be encapsulated by a single
#' scope. In that case, you can use the \code{Progress} reference class.
#'
#' @param session The Shiny session object, as provided by
#' \code{shinyServer} to the server function.
#' @param expr The work to be done. This expression should contain calls
#' to \code{setProgress}.
#' @param min The value that represents the starting point of the
#' progress bar. Must be less than \code{max}.
#' @param max The value that represents the end of the progress bar.
#' Must be greater than \code{min}.
#' @param env The environment in which \code{expr} should be evaluated.
#' @param quoted Whether \code{expr} is a quoted expression (this is not
#' common).
#' @param message A single-element character vector; the message to be
#' displayed to the user, or \code{NULL} to hide the current message
#' (if any).
#' @param detail A single-element character vector; the detail message
#' to be displayed to the user, or \code{NULL} to hide the current
#' detail message (if any). The detail message will be shown with a
#' de-emphasized appearance relative to \code{message}.
#' @param value Single-element numeric vector; the value at which to set
#' the progress bar, relative to \code{min} and \code{max}.
#' \code{NULL} hides the progress bar, if it is currently visible.
#'
#' @examples
#' \dontrun{
#' # server.R
#' shinyServer(function(input, output, session) {
#' output$plot <- renderPlot({
#' withProgress(session, min=1, max=15, {
#' setProgress(message = 'Calculation in progress',
#' detail = 'This may take a while...')
#' for (i in 1:15) {
#' setProgress(value = i)
#' Sys.sleep(0.5)
#' }
#' })
#' plot(cars)
#' })
#' })
#' }
#' @seealso \code{\link{progressInit}}, \code{\link{Progress}}
#' @rdname withProgress
#' @export
withProgress <- function(session, expr, min = 0, max = 1,
env=parent.frame(), quoted=FALSE) {
func <- shiny::exprToFunction(expr, env, quoted)
p <- Progress$new(session, min = min, max = max)
.currentProgress$stack <- c(p, .currentProgress$stack)
on.exit({
.currentProgress$stack <- .currentProgress$stack[-1]
p$close()
})
return(func())
}
#' @rdname withProgress
#' @export
setProgress <- function(message = NULL, detail = NULL, value = NULL) {
if (is.null(.currentProgress$stack) || length(.currentProgress$stack) == 0) {
warning('setProgress was called outside of withProgress; ignoring')
return()
}
args <- list()
if (!missing(message))
args$message <- message
if (!missing(detail))
args$detail <- detail
if (!missing(value))
args$value <- value
do.call(.currentProgress$stack[[1]]$set, args)
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/BBEST/R/progress.R |
##################################################################
#
#
# FUNCTIONS TO WORK WITH R-SPACE
#
# Authors: Charles R. Hogg III, Anton Gagin
#
#####################################################################################
# noise.cov.r(r1, r2, Q, sigma)
# Computes the covariance between two points in G(r) due to noise in S(Q)
# (the latter is assumed i.i.d. Gaussian).
#
# Args:
# r1: (numeric) One r-value to consider
# r2: (numeric) The other r-value to consider
# Q: (numeric vector) The Q-values where we have data.
# sigma: (numeric) The standard deviation of the noise in S.
#
# Returns:
# (numeric) The covariance between G(r1) and G(r2) due to noise in S(Q).
noise.cov.r <- function(r1, r2, Q, sigma) {
delta <- diff(Q)
delta <- c(delta[1], delta)
f.sum <- sum((2*sigma * Q * delta/pi ) ^ 2 * sin(Q * r1) * sin(Q * r2))
return (f.sum)
}
#####################################################################################
# noise.cov.vector.r(r1, r2, Q, sigma)
noise.cov.vector.r <- Vectorize(noise.cov.r, vectorize.args=c("r1", "r2"))
#####################################################################################
# noise.cov.matrix.r(r, Q, sigma)
#
# Computes the covariance matrix in G(r) due to noise in S(Q) (the latter is
# assumed i.i.d. Gaussian).
#
# Args:
# r: (numeric vector) The r-values where we evaluate this covariance.
# Q: (numeric vector) The Q-values where we have data.
# sigma: (numeric) The standard deviation of the noise in S.
#
# Returns:
# A (N x N) matrix M, where N is the length of r, such that M[i, j] gives
# the covariance between r[i] and r[j].
noise.cov.matrix.r <- function(r, Q, sigma) {
N <- length(r)
M <- matrix(nrow=N, noise.cov.vector.r(r1=rep(r, each=N), r2=rep(r, N), Q=Q, sigma=sigma))
return (M)
}
sineFT.matrix <- function(Q, r) {
# Computes the matrix which converts a function at the given Q-points into
# r-space using a Fourier sine transform (via Simpson equation).
#
# Args:
# Q: (numeric vector) The Q-values where the function is evaluated.
# r: (numeric vector) The r-values where the function is evaluated.
#
# Returns:
# A (N.r x N.Q) numeric matrix which effects the Q-to-r sine Fourier
# transform.
dQ <- Dx(Q)
N.Q <- length(Q)
return(sapply(X=1:N.Q, Q=Q, dQ=dQ, r=r,
FUN=function(i, Q, dQ, r) {
factor <- 2*(i%%2+1)/3
if(i==1 || i==length(Q))
factor <- 1/3
2/pi * Q[i] * sin(Q[i]*r) * dQ[i]*factor
}))
}
sineFT <- function(f.Q, Q, r) {
# Computes the FT at the given Q-points into
# r-space using a Fourier sine transform.
N.Q <- length(Q)
N.r <- length(r)
dQ <- Dx(Q)
f.r <- 0 * r
for (i in seq(2, N.Q-1, 2)) {
f.r <- f.r + 2/pi *Q[i]*sin(Q[i] * r) * f.Q[i] * dQ[i] * 4/3
}
for (i in seq(3, N.Q-1, 2)) {
f.r <- f.r + 2/pi *Q[i]*sin(Q[i] * r) * f.Q[i] * dQ[i] * 2/3
}
f.r <- f.r + 2/pi*(Q[1]*sin(Q[1]*r)*f.Q[1]*dQ[1] + Q[N.Q]*sin(Q[N.Q]*r)*f.Q[N.Q]*dQ[N.Q] )*1/3
return (f.r)
}
invert.order <- function(i) {
# Inverts the result of the 'order' function.
#
# Args:
# i: Numeric vector with a permutation of the integers from 1:length(i).
#
# Returns:
# Numeric vector r such that r[i] == 1:length(i).
r <- i + NA
for (j in 1:length(i)) {
r[i[j]] <- j
}
return (r)
}
Dx <- function(x) {
# Compute the width for each x (specifically, its Voronoi cell size) to aid
# in numerical integration.
#
# Args:
# x: A sorted numeric vector of x-values
n <- length(x)
i <- order(x)
x.sort <- x[i]
dx <- diff(c(x.sort[1], 0.5 * (x.sort[-1] + x.sort[-n]), x.sort[n]))
return (dx[invert.order(i)])
}
| /scratch/gouwar.j/cran-all/cranData/BBEST/R/r-space.R |
#####
#
# Check what is ready
whatIsSpecified <- function(data){
N <- length(data)
res <- list()
for(i in 1:N){
res[[i]] <- list()
res[[i]]$x <- TRUE
res[[i]]$y <- TRUE
res[[i]]$lambda <- TRUE
res[[i]]$sigma <- TRUE
res[[i]]$SB <- TRUE
res[[i]]$smoothed <- TRUE
if( is.null(data[[i]]$x) || any(is.na(data[[i]]$x)) || !any(data[[i]]$x!=0) )
res[[i]]$x <- FALSE
if( is.null(data[[i]]$y) || any(is.na(data[[i]]$y)) || !any(data[[i]]$y!=0) )
res[[i]]$y <- FALSE
if( is.null(data[[i]]$lambda) || any(is.na(data[[i]]$lambda)) || !any(data[[i]]$lambda!=0) )
res[[i]]$lambda <- FALSE
if( is.null(data[[i]]$sigma) || any(is.na(data[[i]]$sigma)) || !any(data[[i]]$sigma!=0) )
res[[i]]$sigma <- FALSE
if( is.null(data[[i]]$SB) || any(is.na(data[[i]]$SB)) || !any(data[[i]]$SB!=0) )
res[[i]]$SB <- FALSE
if( is.null(data[[i]]$smoothed) || any(is.na(data[[i]]$smoothed)) || !any(data[[i]]$smoothed!=0) )
res[[i]]$smoothed <- FALSE
}
return(res)
} | /scratch/gouwar.j/cran-all/cranData/BBEST/R/specified.R |
##################################################################
#
#
# FUNCTIONS TO CALCULATE UNCERTAINTY IN BACKGOUND ESTIMATION
#
#
##################################################################
# get.hessian(data, knots.x, knots.y, Gr, r, p.bkg)
#
# computes Hessian matrix of the target function
# for lazy calculation get.hess.numerically is recommended
# as more stable function
get.hess <- function(data, knots.x, knots.y, Gr=NA, r=seq(0, 2, 0.01), p.bkg=0.5){
x <- data$x
y <- data$y-data$SB
sigma <- data$sigma
lambda <- data$lambda
Phi <- basisMatrix(x=x, knots.x=knots.x)
bkg <- Phi %*% t(t(knots.y))
# 1. Prior
# cat("Calculating prior hessian... \n")
D <- DMatrix(knots.x=knots.x)$matrix
cDc <- as.vector(t(knots.y) %*% D %*% t(t(knots.y)))
E <- length(knots.y)
bkg.pp <- D %*% t(t(knots.y)) # second derivative of bkg
hess.prior <- E/2 * (2*(D / cDc) - (4 * bkg.pp %*% t(bkg.pp)) / (cDc ^ 2) )
# 2. Likelihood:
# cat("Calculating likelihood hessian...")
deviation <- y - bkg
deviation.norm <- deviation/sigma
grad.f <- as.vector(-deviation/sigma^2)
hess.f <- as.vector(-1/sigma^2)
funct.f <- (log(p.bkg) - 0.5 * log(2 * pi) - log(sigma) - 0.5 * deviation.norm ^ 2)
f <- list(funct=funct.f, grad=grad.f, hess=hess.f)
#######
rho <- sigma/lambda
z <- (y - bkg) / lambda
qq <- z / rho - rho
funct.h <- log(1 - p.bkg) - log(lambda) + pnorm(log.p=TRUE, q=qq) - z + 0.5 * (rho ^ 2)
gamma.q <- exp(-0.5 * qq ^ 2 - pnorm(q=qq, log.p=TRUE)) / (rho * sqrt(2 * pi))
grad.h <- as.vector(-(1 - gamma.q) / lambda)
hess.h <- as.vector(-gamma.q * (gamma.q + qq / rho) / (lambda ^ 2))
h <- list(funct=funct.h, grad=grad.h, hess=hess.h)
#######
f.fract <- as.vector(1 / (1 + exp(h$funct - f$funct)))
h.fract <- 1 - f.fract
grad.g <- hess.g <- NA
grad.g <- (f.fract * f$grad + (1 - f.fract) * h$grad)
f.contr <- f.fract*f$hess + f.fract*f$grad*f$grad
h.contr <- h.fract*h$hess + h.fract*h$grad*h$grad
hess.gg <- -(f.contr + h.contr - (grad.g)^2)
Phi.prime <- hess.gg*Phi
hess.g <- t(Phi.prime) %*% Phi
# 3. Gr=-4*Pi*rho*r restriction
hess.gr <- 0
if(!is.na(Gr[1])){
# cat("Calculating Gr hessian...")
if(is.na(Gr$type1))
hess.gr.r1 <- 0
else if(Gr$type1=="gaussianNoise")
hess.gr.r1 <- logLikelihoodGrGauss(y=data$y-data$SB, knots.y=knots.y, alpha=1, Phi=Phi, bkg.r=Gr$bkg.r,
sigma.r=Gr$sigma.r, matrix.FT=Gr$matrix.FT1, Hessian=TRUE)$hess
else if(Gr$type1=="correlatedNoise")
hess.gr.r1 <- logLikelihoodGrCorr(knots.y=knots.y, Phi=Phi, bkg.r=Gr$bkg.r,
KG.inv=Gr$KG.inv, matrix.FT=Gr$matrix.FT1, Hessian=TRUE)$hess
if(is.na(Gr$type2))
hess.gr.r2 <- 0
else if(Gr$type2=="secondDeriv")
hess.gr.r2 <- logPriorBkgRSmooth(bkg.r=Gr$matrix.FT2 %*% bkg, D=Gr$D, Hessian=TRUE, Phi=Phi,
matrix.FT=Gr$matrix.FT2, knots.y=knots.y)$hess
else if(Gr$type2=="gaussianProcess")
hess.gr.r2 <- logPriorBkgRGP(bkg.r=Gr$matrix.FT2 %*% bkg, covMatrix=Gr$covMatrix, Hessian=TRUE)$hess
hess.gr <- hess.gr.r1 + hess.gr.r2
# cat(" done!\n")
}
# 4. Summing up
hess <- hess.prior + hess.gr + hess.g
#hess <- hess.g
hess.inv <- solve(hess)
# 5. Converting Hessian into Q-space from a spline-space
Phi <- basisMatrix(x=data$x, knots.x=knots.x)
H <- Phi%*%hess.inv%*%t(Phi) # hessian in Q-space (inverted...)
# that is, covariance matrix
H <- H + diag((data$sigma)^2, length(data$x))
cov.diag <- diag(H)
cov.diag[which(cov.diag<0)]<-0
stdev <- sqrt(cov.diag)
# 6. Converting Hessian into r-space
MFT <- sineFT.matrix(Q=data$x, r=r)
cov.r <- MFT %*% H %*% t(MFT)
cov.diag.r <- diag(cov.r)
cov.diag.r[which(cov.diag.r<0)]<-0
stdev.r <- sqrt(cov.diag.r)
# return(list(stdev=stdev, stdev.r=stdev.r, hess=hess, cov.matrix=hess.inv, cov.matrix.r=cov.r, hess.gg=hess.gg, H=H))
return(list(stdev=stdev, stdev.r=stdev.r, hess=hess, cov.matrix=hess.inv, cov.matrix.r=cov.r, hess.gg=hess.gg))
}
##################################################################
#get.hess.numerically(data, knots.x, knots.y, Gr, r, p.bkg, h1)
#
# computes a list with elements
# stdev: estimated standard deviations for a reconstructed signal.
# stdev.r: estimated standard deviations for a reconstructed signal in r-space.
# hess: Hessian matrix for a target function.
# cov.matrix: covariance matrix, i.e. the inverse of the Hessian.
# cov.matrix.r: covariance matrix in r-space.
get.hess.numerically <- function(data, knots.x, knots.y, Gr=NA, r=seq(0, 2, 0.01), p.bkg=0.5, h=1e-4){
n <- length(knots.y)
incr <- function(cc, h, i=0, j=0){
if(i!=0)
cc[i] <- cc[i]+h
if(j!=0)
cc[j] <- cc[j]+h
return(cc)
}
# 1. Computing Hessian matrix
hess <- matrix(0, nrow=n, ncol=n)
for(i in 1:n){
cat("knot.i = ", i, " of ", n,"\n")
for(j in 1:n){
a1 <- logPosterior(data=data, alpha=1, knots.x=knots.x, knots.y=incr(cc=knots.y,h=h,i=i,j=j), Gr=Gr, p.bkg=p.bkg)
a2 <- logPosterior(data=data, alpha=1, knots.x=knots.x, knots.y=incr(cc=knots.y,h=h,i=i, j=0), Gr=Gr, p.bkg=p.bkg)
a3 <- logPosterior(data=data, alpha=1, knots.x=knots.x, knots.y=incr(cc=knots.y,h=h,i=0, j=j), Gr=Gr, p.bkg=p.bkg)
a4 <- logPosterior(data=data, alpha=1, knots.x=knots.x, knots.y=knots.y, Gr=Gr, p.bkg=p.bkg)
hess[i,j] <- (a1-a2-a3+a4)/h^2
}
}
# 2. Computing inverse
# U <- regularized.cholesky(hess)
# hess.inv <- chol2inv(U)
hess.inv <- solve(hess)
# 3. Converting Hessian into Q-space from a spline-space
Phi <- basisMatrix(x=data$x, knots.x=knots.x)
H <- Phi%*%hess.inv%*%t(Phi) # hessian in Q-space (inverted...)
# that is, covariance matrix
H <- H + diag((data$sigma)^2, length(data$x))
cov.diag <- diag(H)
cov.diag[which(cov.diag<0)]<-0
stdev <- sqrt(cov.diag)
# 4. Converting Hessian into r-space
MFT <- sineFT.matrix(Q=data$x, r=r)
cov.r <- MFT %*% H %*% t(MFT)
cov.diag.r <- diag(cov.r)
cov.diag.r[which(cov.diag.r<0)]<-0
stdev.r <- sqrt(cov.diag.r)
return(list(stdev=stdev, stdev.r=stdev.r, hess=hess, cov.matrix=hess.inv, cov.matrix.r=cov.r))
}
##############################################################################################
# grad.descent(data, knots.x, knots.y, Gr, p.bkg, N)
#
# Gradient descent method to find a local minimum for a psi(c)
grad.descent <- function(data, knots.x, knots.y, Gr=NA, p.bkg=0.5, eps=1e-3, N=10000){
x.p <- knots.y
x.pp <- x.p
lambda <- c(abs(0.0001/get.deriv(data=data, knots.x, x.p, Gr=Gr, p.bkg=0.5)))
N <- round(N, digits=-2)
if(N==0) N <- 100
cat("iterations will stop once convergence is reached \n")
cat("indicating % of itermax... \n")
for(j in 1:(N/100)){
cat("...",(j-1)/N*100*100, "% done \n")
for(i in 1:100){
grad.f <- as.vector(get.deriv(data=data, knots.x=knots.x, knots.y=x.p, Gr=Gr, p.bkg=p.bkg))
# grad.f <- as.vector(get.deriv.numerically(data=data, knots.x=knots.x, knots.y=x.p, Gr=Gr, p.bkg=p.bkg))
x.f <- x.p - lambda*grad.f
x.p <- x.f
}
if(max(abs(x.pp-x.f))<eps){
cat("\n convergence reached! \n")
break
}
x.pp <- x.f
}
if(j == (N/100)){
cat("convergence not reached! \n")
return(knots.y)
}
else
return(x.f)
}
#####################################################################################
# get.deriv.numerically(data, knots.x, knots.y, Gr, p.bkg, h)
#
# Function to calculate derivate of psi(c) over c.
# More stable than get.deriv
get.deriv.numerically <- function(data, knots.x, knots.y, Gr=NA, p.bkg=0.5, h=1e-6){
n <- length(knots.y)
incr <- function(cc, h, i=0){
cc[i] <- cc[i]+h
cc
}
der <- 0
for(i in 1:n){
a1 <- logPosterior(data=data, alpha=1, knots.x=knots.x, knots.y=incr(cc=knots.y,h=h,i=i), Gr=Gr, p.bkg=p.bkg)
a2 <- logPosterior(data=data, alpha=1, knots.x=knots.x, knots.y=knots.y, Gr=Gr, p.bkg=p.bkg)
der[i] <- (a1-a2)/h
}
return(der)
}
#####################################################################################
# get.deriv(data, knots.x, knots.y, Gr, p.bkg,)
#
# Function to calculate derivate of psi(c) over c.
# Less stable than get.deriv.numerically
get.deriv <- function(data, knots.x, knots.y, Gr=NA, p.bkg=0.5){
x <- data$x
y <- data$y-data$SB
sigma <- data$sigma
lambda <- data$lambda
Phi <- basisMatrix(x=x, knots.x=knots.x)
bkg <- Phi %*% t(t(knots.y))
# 1. Prior
# cat("Calculating prior hessian... \n")
D <- DMatrix(knots.x=knots.x)$matrix
cDc <- as.vector(t(knots.y) %*% D %*% t(t(knots.y)))
E <- length(knots.y)
# 2. Likelihood:
deviation <- y - bkg
norm.dev <- deviation/sigma
grad.f <- as.vector(deviation/sigma^2)*Phi
funct.f <- (log(p.bkg) - 0.5 * log(2 * pi) - log(sigma) - 0.5 * norm.dev ^ 2)
f <- list(funct=funct.f, grad=grad.f)
#######
rho <- sigma/lambda
z <- (y - bkg) / lambda
qq <- z / rho - rho
funct.h <- (log(1 - p.bkg) - log(lambda) + pnorm(log.p=TRUE, q=qq) - z + 0.5 * (rho ^ 2))
gamma.q <- exp(-0.5 * qq ^ 2 - pnorm(q=qq, log.p=TRUE)) / (rho * sqrt(2 * pi))
grad.h <- as.vector((1 - gamma.q) / lambda) * Phi
h <- list(funct=funct.h, grad=grad.h)
#######
f.fract <- as.vector(1 / (1 + exp(h$funct - f$funct)))
grad.g <- hess.g <- NA
grad.g <- f.fract * f$grad + (1 - f.fract) * h$grad
# 4. Summing up
grad.g <- f.fract * f$grad + (1 - f.fract) * h$grad
grad <- -colSums(grad.g) + (t(knots.y) %*% D) * E / cDc
########
if(!is.na(Gr[1])){
matrix.FT <- Gr$matrix.FT1
sigma.r <- Gr$sigma.r
Mprime <- matrix.FT%*%Phi / (sqrt(2)*sigma.r)
b.prime <- Gr$bkg.r / (sqrt(2)*sigma.r)
grad.gr <- 2*t(Mprime) %*% Mprime %*%knots.y- 2*t(Mprime)%*%b.prime
grad <- as.vector(grad) + as.vector(grad.gr)
}
return(grad)
}
# author: Eric Cai
# http://www.r-bloggers.com/scripts-and-functions-using-r-to-implement-the-golden
# -section-search-method-for-numerical-optimization/
golden.search = function(data, lower.bound=-.01, upper.bound=.01, tolerance=1e-6, knots.x, knots.y, Gr, p.bkg, grad.f){
f <- function(lambda){
logPosterior(data=data, alpha=1, knots.x=knots.x, knots.y=(knots.y - lambda*grad.f), Gr=Gr, p.bkg=p.bkg)
}
golden.ratio = 2/(sqrt(5) + 1)
x1 = upper.bound - golden.ratio*(upper.bound - lower.bound)
x2 = lower.bound + golden.ratio*(upper.bound - lower.bound)
f1 = f(x1)
f2 = f(x2)
iteration = 0
while(abs(upper.bound - lower.bound) > tolerance){
iteration = iteration + 1
if (f2 > f1){
upper.bound = x2
x2 = x1
f2 = f1
x1 = upper.bound - golden.ratio*(upper.bound - lower.bound)
f1 = f(x1)
}
else{
lower.bound = x1
x1 = x2
f1 = f2
x2 = lower.bound + golden.ratio*(upper.bound - lower.bound)
f2 = f(x2)
}
}
estimated.minimizer = (lower.bound + upper.bound)/2
estimated.minimizer
}
#####################################################################################
# regularized.cholesky(Matr, eps.max, eps.min, numTries)
#
# Regularized Cholesky matrix decomposition
regularized.cholesky <- function(Matr, eps.max=1e-2, eps.min=1e-20, numTries=17) {
baseVal <- min(diag(Matr))
U <- try(chol(Matr), silent=T)
epsilon <- eps.min
I <- diag(nrow(Matr))
while (!is.null(attr(U, "class")) && attr(U, "class") == "try-error" && epsilon <= eps.max) {
U <- try(chol(Matr + baseVal * epsilon * I), silent=T)
epsilon <- epsilon * 10
}
if (epsilon >= eps.max) stop("We just couldn't Cholesky-decompose this matrix\n")
return (U)
}
row.outer.product <- function(Phi) {
# Computes the (3d) array consisting of the outer product of each row of Phi
# with itself.
#
# Args:
# Phi: A (R x C) numeric matrix (usually a spline matrix, where the
# columns correspond to the knots, and the rows correspond to evaluation
# points).
#
# Returns:
# A 3-index numeric array, whose dimensions are c(C, C, R), such that the
# matrix at [*, *, i] is the outer product of Phi[i, ] with itself.
rows <- nrow(Phi)
cols <- ncol(Phi)
ppt <- array(apply(Phi, 1, function(x) x %*% t(x)), dim=c(cols, cols, rows))
return (ppt)
}
#########################################################
# covMatrixSE(x, sig, l, noiseFactor)
#
# returns: covariance matrix for a squared-exponential cov function
# arguments
# x: datapoints
# sig: vertical scale for variations
# l: horizontal scale for variations: correlation length
# either a sigle value or vector of length 'length(x)'
covMatrixSE <- function(x, sig=0.05, l=0.1){
N <- length(x)
covX <- matrix(nrow=N, ncol=N)
if(length(l)==1){
covX <- outer(X=x, Y=x, FUN=function(x, y) {
sig^2*exp( -0.5*((x - y) / l)^2 )
})
}
else{ # make it faster: l[i]^2+l[j]^2 calculate only one time
# also use symmetric properties
for(i in 1:N)
for(j in 1:N)
covX[i,j] <- sig^2*sqrt( 2*l[i]*l[j]/(l[i]^2+l[j]^2) )*
exp( -(x[i]-x[j])^2/(l[i]^2+l[j]^2) )
}
# covX <- covX - sig^2 + diag(sig^2, N)
factor <- 1
while (det(covX)==0) {
covX <- covX*1e1
factor <- factor*1e1
}
list(cov=covX, factor=factor)
}
#########################################################
# covMatrix.DI(covMatrix)
#
# returns: determinant and inverse of the covariance matrix
# arguments
# covMatrix: covariance matrix
covMatrix.DI <- function(covMatrix){
U <- regularized.cholesky(covMatrix) # change to carefulChol
covMatrix.inv <- chol2inv(U)
covMatrix.det <- det(U)
list(inv=covMatrix.inv, det=covMatrix.det)
} | /scratch/gouwar.j/cran-all/cranData/BBEST/R/uncertainty.R |
##########################################################################################
# #
# SHINY SERVER #
# #
##########################################################################################
options(shiny.maxRequestSize=30*1024^2)
## MODULE STRUCTURE: (OLD...)
##
## LOAD DATA
## READ DATA [[depends on: vals$nB; changes: vals$dat]]
## SET ADDITIONAL PARAMETERS
## TRUNCATE DATA
## SET LAMBDA
## SET BASELINE
## RETURN BASELINE
## SET SIGMAlk
## RENDERING:
## TRUNCATE
## BKG BOUNDS
## SET R-SPACE LIKELIHOOD
## SET GR
## RENDERING:
## SORRY GR (FOR BANKS) [[depends on: vals$nB]]
## SET PARAMETERS FOR DIFEV AND DO FIT
## READ INPUTS AND DO FIT [[depends on: vals$dat, vals$nB, vals$datGr; changes: vals$fitRes]]
## FIT RESULTS
## DOWNLOAD
## DOWNLOAD RDATA BUTTON
## DOWNLOAD RDATA Handler
## DOWNLOAD TEXT BUTTON
## DOWNLOAD TEXT Handler
## DOWNLOAD FIX BUTTON
## DOWNLOAD FIX Handler
## DOWNLOAD GR BUTTON
## DOWNLOAD GR Handler
## CALCULATE
## INPUTS FOR GR
## HEADER
## MIN(R)
## MAX(R)
## DR
## CALC GR BUTTON
## CALCULATE GR HANDLER
## DO ITERATION
## HEADER
## EPS
## N.ITER
## DO BUTTON
## RENDER OUTPUT
## OUTPUT TABLE
## OUTPUT DATA PLOT
## DOWNLOAD DATA HANDLER
## SHOWS PROGRESS
## RENDER FIT RESULTS
## FIT RESULTS PLOT SQ
## FIT RESULTS PLOT GR
shinyServer(function(input, output, session) {
## initialization:
## dat = main data variable
## nB = number of banks
## Gr = calculated PDF
## datGr = data plus Gr likelihood info
## fitRes = results of the fit
## fitIter = results of the fit after iteration
## fitResFinal = fitIter, if exists; fitRes, if not.
## helps to leave fitRes untouched
vals <- shiny::reactiveValues(dat=list(list()), XInit=list(), nB=1,
Gr=list(), estGr=list(), datGr=list(list()),
fitRes=list(list()), fitResIter=list(list()),
fitResFinal=list(list()),
xlim=NA, ylim=NA, yRescale=c(0,1))
##########################################################################################
# #
# LOAD DATA #
# #
##########################################################################################
########################
## == READ DATA ==
shiny::observe({
inFile <- input$datafile
isolate({
if (is.null(inFile))
return(NULL)
## don't ask me why...
write(inFile$name, file="01x000.tmp")
ext <- scan(file="01x000.tmp", what="list", sep='\n') # get extension
ext <- tail(strsplit(inFile$name, '[.]')[[1]], 1)
file.remove("01x000.tmp")
vals$nB <- 1
if(ext=="sqa"){
vals$dat <- read.sqa(file=inFile$datapath)
vals$nB <- length(vals$dat)
}
else if(ext=="sqb" || ext=="sq"){
vals$dat[[1]] <- read.sqb(file=inFile$datapath)
}
else if(ext=="csv" || ext=="txt"){ # another don't ask me why...
dat.tmp <- read.csv(inFile$datapath, header=input$headerCB, sep=input$separatorRB)
vals$dat[[1]] <- dat.tmp
}
else if(ext=="RData"){
L <- sapply(inFile$datapath, function(x) mget(load(x)), simplify = FALSE)
L <- L[[1]]$fit.results
if(!is.null(L)){
N <- length(L)
vals$nB <- N
for(i in 1:N){
vals$fitRes[[i]] <- L[[i]]
dat <- list(x=L[[i]]$x, y=L[[i]]$curves$y, SB=L[[i]]$curves$SB,
sigma=L[[i]]$fit.details$sigma, lambda=L[[i]]$fit.details$lambda)
vals$dat[[i]] <- dat
}
if(N==1) vals$datGr[[1]] <- L[[1]]$fit.details$Gr
}
}
vals$xlim <- vals$ylim <- matrix(NA, nrow=vals$nB, ncol=2)
for(i in 1:vals$nB) vals$XInit[[i]] <- vals$dat[[i]]$x
vals$yRescale<- c(0,1)
})
})
##########################################################################################
# #
# SET ADDITIONAL PARAMETERS #
# #
##########################################################################################
###########################
## == TRUNCATE DATA ==
shiny::observe({
input$truncLimits
Sys.sleep(1)
isolate({
trunc <- input$truncLimits
if(is.null(trunc))
return(NULL)
if (trunc != ""){
tr <- as.numeric(unlist(strsplit(trunc, ",")))
if( (length(tr)==2) && !any(is.na(tr))){
if((tr[1] == min(vals$dat[[1]]$x)) && (tr[2] == max(vals$dat[[1]]$x)))
return(NULL)
inFile <- input$datafile
if (is.null(inFile))
return(NULL)
write(inFile$name, file="01x001.tmp")
ext <- scan(file="01x001.tmp", what="list", sep='\n') # get extension
ext <- tail(strsplit(inFile$name, '[.]')[[1]], 1)
file.remove("01x001.tmp")
vals$dat <- list(list())
vals$nB <- 1
if(ext=="sqa"){
vals$dat <- read.sqa(file=inFile$datapath)
}
else if(ext=="sqb" || ext=="sq"){
vals$dat[[1]] <- read.sqb(file=inFile$datapath)
}
else if(ext=="csv" || ext=="txt"){ # another don't ask me why...
dat.tmp <- read.csv(inFile$datapath, header=input$headerCB, sep=input$separatorRB)
vals$dat[[1]] <- dat.tmp
}
else if(ext=="RData"){
L <- sapply(inFile$datapath, function(x) mget(load(x)), simplify = FALSE)
L <- L[[1]]
L <- L$fit.results
if(!is.null(L)){
if(is.null(L$x) && !is.null(L[[1]]$x)){ #number of banks
N <- length(L)
vals$nB <- N
for(i in 1:N){
vals$fitRes[[i]] <- L[[i]]
dat <- list(x=L[[i]]$x, y=L[[i]]$curves$y, SB=L[[i]]$curves$SB,
sigma=L[[i]]$fit.details$sigma, lambda=L[[i]]$fit.details$lambda)
vals$dat[[i]] <- dat
}
}
else{ #single function
vals$fitRes[[1]] <- L
dat <- list(x=L$x, y=L$curves$y, SB=L$curves$SB, sigma=L$fit.details$sigma, lambda=L$fit.details$lambda)
datGr <- L$fit.details$Gr
vals$dat[[1]] <- dat
vals$datGr[[1]] <- datGr
vals$nB <- 1
}
}
}
vals$dat[[1]] <- trim.data(vals$dat[[1]], tr[1], tr[2])
lambda <- input$lambda
if (lambda != ""){
lam <- as.numeric(unlist(strsplit(lambda, ",")))
if(length(lam)==5)
vals$dat[[1]] <- set.lambda(vals$dat[[1]], lambda=NA, lambda_1=lam[2], lambda_2=lam[4],
lambda_0=lam[5], x_1=lam[1], x_2=lam[3])
}
if(input$setSB){
n.atoms <- as.numeric(unlist(strsplit(input$SBNAtoms, ",")))
f <- as.numeric(unlist(strsplit(input$SBScLen, ",")))
oneADP <- input$oneADP
if(!input$fitADP)
ADP <- as.numeric(unlist(strsplit(input$ADP, ",")))
else
ADP <- NA
if( (length(n.atoms)==length(f)) && ( (length(f)==length(ADP)) || ( (oneADP==TRUE) && (length(ADP)==1) ) || (input$fitADP==TRUE) ) && (length(f)>0) )
vals$dat[[1]] <- set.SB(vals$dat[[1]], SB=NA, n.atoms=n.atoms, scatter.length=f, ADP=ADP, fit=input$fitADP, oneADP=oneADP)
}
vals$dat[[1]]$sigma <- NULL
vals$xlim <- vals$ylim <- matrix(NA, nrow=vals$nB, ncol=2)
}
}
})
})
##########################
## == SET LAMBDA ==
shiny::observe({
input$lambda
isolate({ ## react on change
if(is.null(input$lambda))
return(NULL)
lambda <- input$lambda
if (lambda != ""){
lam <- as.numeric(unlist(strsplit(lambda, ",")))
if((length(lam)==5) && !any(is.na(lam))){
for(i in 1:vals$nB){
vals$dat[[i]] <- set.lambda(vals$dat[[i]], lambda=NA, lambda_1=lam[2], lambda_2=lam[4],
lambda_0=lam[5], x_1=lam[1], x_2=lam[3])
}
}
}
})
})
##########################
## == SET BASELINE ==
shiny::observe({
input$SBNAtoms
input$SBScLen
input$ADP
input$oneADP
input$fitADP
if(input$setSB){
isolate({
n.atoms <- as.numeric(unlist(strsplit(input$SBNAtoms, ",")))
f <- as.numeric(unlist(strsplit(input$SBScLen, ",")))
oneADP <- input$oneADP
if(!input$fitADP)
ADP <- as.numeric(unlist(strsplit(input$ADP, ",")))
else
ADP <- NA
if( (length(n.atoms)==length(f)) && # numbers of atoms and sc lengths are ready
( (length(f)==length(ADP)) || ((oneADP==TRUE) && (length(ADP)==1)) || # ADP factor(s) is(are) ready
(input$fitADP==TRUE) ) && # smth was indicated
(length(f)>0)
){
for(i in 1:vals$nB)
vals$dat[[i]] <- set.SB(vals$dat[[i]], SB=NA, n.atoms=n.atoms, scatter.length=f, ADP=ADP, fit=input$fitADP, oneADP=oneADP)
}
else{ # smth was indicated
for(i in 1:vals$nB)
vals$dat[[i]]$SB <- rep(0, length(vals$dat[[i]]$x))
}
})
}
})
############################
## == RETURN BASELINE ==
## restores baseline to the value specified in datafile if 'set/recalculate baseline' was cancelled
shiny::observe({
input$setSB
isolate({
if(!input$setSB){
inFile <- input$datafile
if (is.null(inFile))
return(NULL)
## don't ask me why...
write(inFile$name, file="01x002.tmp")
ext <- scan(file="01x002.tmp", what="list", sep='\n') # get extension
ext <- tail(strsplit(inFile$name, '[.]')[[1]], 1)
file.remove("01x002.tmp")
dat <- list(list())
if(ext=="sqa")
dat <- read.sqa(file=inFile$datapath)
else if(ext=="sqb" || ext=="sq" )
dat[[1]] <- read.sqb(file=inFile$datapath)
else{ # another don't ask me why...
dat.tmp <- read.csv(inFile$datapath, header=input$headerCB, sep=input$separatorRB)
dat[[1]] <- dat.tmp
}
wis <- whatIsSpecified(dat)
if(wis[[1]]$SB==TRUE){
tr <- as.numeric(unlist(strsplit(input$truncLimits, ",")))
if( !(length(tr)==2) || any(is.na(tr))){
tr <- 0
tr[1] = min(vals$dat[[1]]$x)
tr[2] = max(vals$dat[[1]]$x)
}
for(i in 1:vals$nB){
if(vals$nB==1) dat[[1]] <- trim.data(dat[[1]], tr[1], tr[2])
vals$dat[[i]]$SB <- dat[[i]]$SB
}
}
vals$xlim <- vals$ylim <- matrix(NA, nrow=vals$nB, ncol=2)
}
})
})
############################
## == SET SIGMA ==
shiny::observe({
input$calcSigmaButton
isolate({
sigPar <- as.numeric(unlist(strsplit(input$sigma, ",")))
k <- as.numeric(unlist(strsplit(input$sigmaTS, ",")))
progress <- shiny::Progress$new(session)
mess <- "Calculating, please wait..."
progress$set(message = mess, value = 0.1)
if( length(sigPar)==1 && !is.na(sigPar) && !any(is.na(k)) ){
for(i in 1:vals$nB){
vals$dat[[i]] <- set.sigma(vals$dat[[i]], n.regions=sigPar, sigma2 = k)
progress$set(message = mess, value = (i/vals$nB-0.01))
}
}
if( length(sigPar)==2 && !any(is.na(sigPar)) && !is.na(k) ){
for(i in 1:vals$nB){
vals$dat[[i]] <- set.sigma(vals$dat[[i]], x.bkg.only=sigPar, sigma2=k)
progress$set(message = mess, value = (i/vals$nB-0.01))
}
}
progress$set(message = 'Calculating, please wait...', value = 0.999)
progress$close()
})
})
############################
## == SET R-SIGMA AND PLOT G(R) ==
shiny::observe({
input$plotPrelimGr
isolate({
gridparam <- as.numeric(unlist(strsplit(input$rGrid, ",")))
progress <- shiny::Progress$new(session)
mess <- "Calculating, please wait... \n\n"
wis <- whatIsSpecified(vals$dat)
if(!is.null(input$bankNo))
bankNo <- as.numeric(input$bankNo)
else
bankNo <- 1
if(wis[[bankNo]]$sigma && (length(gridparam)==3)){
progress$set(message = mess, value = 0.1)
minR = gridparam[1]
maxR = gridparam[2]
dr = gridparam[3]
r <- seq(minR, maxR, dr)
sigma.r <- 0
delta <- c(diff(vals$dat[[bankNo]]$x)[1], diff(vals$dat[[bankNo]]$x))
cat("Calculating r-space noise... \n\n")
progress$set(message = mess, value = 0.25)
for(j in 1:length(r)){
sigma.r[j] <- sum((2/pi*delta*vals$dat[[bankNo]]$x*sin(vals$dat[[bankNo]]$x*r[j])*vals$dat[[bankNo]]$sigma)^2)
sigma.r[j] <- sqrt(sigma.r[j])
}
# avoid dividing by zero
if(sigma.r[1]==0)
sigma.r[1] <- sigma.r[2]
progress$set(message = mess, value = 0.75)
cat("Calculating FT of the experimental data... \n\n")
gr <- sineFT(f.Q=vals$dat[[bankNo]]$y-1, Q=vals$dat[[bankNo]]$x, r=r)
vals$estGr <- list(r=r, gr=gr, stdev=sigma.r)
progress$set(message = mess, value = 0.999)
}
else if(!wis[[bankNo]]$sigma && wis[[bankNo]]$x){
progress$set(message = "Estimate Q-space noise first!", value = 0.0)
Sys.sleep(2)
}
else if(length(gridparam)!=3 && wis[[bankNo]]$x){
progress$set(message = "Set r-space grid!", value = 0.0)
Sys.sleep(2)
}
progress$close()
})
})
###################################
## ##
## RENDERING OUTPUT ##
## ##
###################################
####################################
## OUTPUT TRUNCATE DATA
output$truncLimitsR <- shiny::renderUI({
if (vals$nB!=1)
return(helpText("not available for banks..."))
if (is.null(vals$dat[[1]]) || (length(vals$dat[[1]]$x)==0))
return(textInput("truncLimits", label = c("Type minimum x, maximum x"), value =""))
truncLim <- toString(c(min(vals$dat[[1]]$x), max(vals$dat[[1]]$x)))
textInput("truncLimits", label = c("Type minimum x, maximum x"), value = truncLim)
})
####################################
## OUTPUT SQA SPLIT DATA
output$sqaSplit <- shiny::renderUI({
if (vals$nB==1)
return(NULL)
downloadButton('downloadSqaSplit', 'Split by banks and download')
})
output$downloadSqaSplit <- shiny::downloadHandler(
filename = function() { paste('banks', '.zip', sep='') },
content = function(file) {
inFile <- input$datafile
sqa <- scan(file=inFile$datapath, what="list", sep="\n")
N <- length(sqa)
i.start <- 0
nBanks <- 0
for(i in 1:N){
if(strsplit(sqa[i], split=" ")[[1]][1]=="#L"){
i.start[nBanks+1] <- i+1
nBanks <- nBanks + 1
}
}
i.start[nBanks+1] <- length(sqa)+5
name <- 0
for(i in 1:nBanks){
name[i] <- strsplit(inFile$name, '[.]')[[1]]
name[i] <- paste(name[i], "_b", i, ".sqa", sep="")
writeLines(sqa[ (i.start[i]-4):(i.start[i+1]-5)], con = name[i], sep = "\n", useBytes = FALSE)
}
zip(zipfile=file, files=name)
if(file.exists(paste0(file, ".zip"))) {file.rename(paste0(file, ".zip"), file)}
}
)
####################################
## OUTPUT BKG BOUNDS
output$bkgBoundsR <- shiny::renderUI({
if (is.null(vals$dat[[1]]) || (length(vals$dat[[1]]$y)==0))
return(textInput("bkgBounds", label = strong("Lower and upper bounds for background")))
bkgBndsArray <- matrix(0, nrow=vals$nB, ncol=2)
sbBndsArray <- matrix(0, nrow=vals$nB, ncol=2)
for(i in 1:vals$nB)
bkgBndsArray[i,] <- c(min(vals$dat[[i]]$y), max(vals$dat[[i]]$y))
bkgBnds <- c(min(bkgBndsArray[,1]), max(bkgBndsArray[,2]))
isSBAvail <- whatIsSpecified(vals$dat)[[1]]$SB
if(isSBAvail==TRUE){
for(i in 1:vals$nB)
sbBndsArray[i,] <- c(min(vals$dat[[i]]$SB), max(vals$dat[[i]]$SB))
}
sbBnds <- c(min(sbBndsArray[,1]), max(sbBndsArray[,2]))
bkgBnds[1] <- signif(bkgBnds[1] - sbBnds[2] - 0.2*abs(bkgBnds[1]) - 0.2*abs(sbBnds[2]), 3)
bkgBnds[2] <- signif(bkgBnds[2] - sbBnds[1] + 0.2*abs(bkgBnds[2]) + 0.2*abs(sbBnds[1]), 3)
return(textInput("bkgBounds", label = strong("Lower and upper bounds for background"), value = toString(bkgBnds)))
})
##########################################################################################
# #
# SET R-SPACE LIKELIHOOD #
# #
##########################################################################################
########################
## == SET Gr ==
shiny::observe({
input$setGrButton
isolate({
rmin <- input$rminInclGr
rmax <- input$rmaxInclGr
dr <- input$drInclGr
rho <- input$rhoInclGr
if(input$GrNoiseType=="gauss")
type="gaussianNoise"
if(input$GrNoiseType=="correlated")
type="correlatedNoise"
sigmaIsAvail <- whatIsSpecified(vals$dat)[[1]]$sigma
if(!is.na(rho) && !is.na(rmin) &&
!is.na(rmax) && !is.na(dr) && sigmaIsAvail){
progress <- shiny::Progress$new(session)
mess <- "Calculating, please wait..."
progress$set(message = mess, value = 0.1)
r1 <- seq(rmin, rmax, dr)
for(i in 1:vals$nB){
dat <- list(x=vals$dat[[i]]$x, y=vals$dat[[i]]$y, sigma=vals$dat[[i]]$sigma)
dat <- set.Gr(dat, r1=r1, rho.0=rho, type1=type)
progress$set(message = mess, value = i/vals$nB-0.01)
vals$datGr[[i]] <- dat$Gr
}
progress$set(message = mess, value = 0.999)
progress$close()
}
})
})
###################################
## ##
## RENDERING OUTPUT ##
## ##
###################################
####################################
## SORRY GR
output$GrNoteForBanks <- shiny::renderUI({
if(vals$nB > 1)
return(span(h4("We recommend not to use this option for individual data banks!"), style = "color:red"))
else
return(NULL)
})
##########################################################################################
# #
# SET PARAMETERS FOR DIFEV AND DO FIT #
# #
##########################################################################################
output$fitWithR <- shiny::renderUI({
if( (vals$nB>1))
return(
radioButtons('fitWith', strong('Fit background with'),
choices=c("spline functions"='fitWith.splines',
"analytical function"='fitWith.analyt'),
selected='fitWith.analyt')
)
else
return(
radioButtons('fitWith', strong('Fit background with'),
choices=c("spline functions"='fitWith.splines',
"analytical function"='fitWith.analyt'),
selected='fitWith.splines')
)
})
########################
## == Do Fit ==
shiny::observe({
if(input$doFit==0)
return(NULL)
isolate({ ## react on change
is.x <- is.y <- is.sigma <- is.lambda <-TRUE
is.NP <- is.F <- is.CR <- is.itermax <- TRUE
is.bounds <- is.knots <- is.scale <- TRUE
wis <- whatIsSpecified(vals$dat)
for(i in 1:vals$nB){
if(!wis[[i]]$x)is.x <- FALSE
if(!wis[[i]]$y)is.y <- FALSE
if(!wis[[i]]$SB)is.SB <- FALSE
if(!wis[[i]]$sigma)is.sigma <- FALSE
if(!wis[[i]]$lambda)is.lambda <- FALSE
}
if( !( is.numeric(input$fitNP) && (input$fitNP>2) ) )
is.NP <- FALSE
if( !( is.numeric(input$fitItermax) && (input$fitItermax>2) ) )
is.itermax <- FALSE
if( !( is.numeric(input$fitCR) && (input$fitCR>0) && (input$fitCR<1) ) )
is.CR <- FALSE
if( !( is.numeric(input$fitF) && (input$fitF>0) && (input$fitF<2) ) )
is.F <- FALSE
if( !( !is.na(input$bkgBounds) && (length(as.numeric(unlist(strsplit(input$bkgBounds, ","))))==2) &&
!any(is.na(as.numeric(unlist(strsplit(input$bkgBounds, ","))))) ) )
is.bounds <- FALSE
if( !( !is.na(input$fitKnots) && !any(is.na(as.numeric(unlist(strsplit(input$fitKnots, ","))))) ) )
is.knots <- FALSE
if( !( !is.na(input$fitScale) && (length(as.numeric(unlist(strsplit(input$fitScale, ","))))==2) &&
!any(is.na(as.numeric(unlist(strsplit(input$fitScale, ","))))) ) )
is.scale <- FALSE
if(!is.x || !is.y || !is.sigma || !is.lambda ||
!is.NP || !is.itermax || !is.CR || !is.F ||
!is.bounds || !is.knots || !is.scale)
return(NULL)
if(!is.null(input$fitADP) && input$fitADP==TRUE){ #if we want to fit ADP
if(!((!is.null(vals$datGr[[1]])) && (length(vals$datGr[[1]])>1)))
return(NULL)
}
CR <- input$fitCR
F <- input$fitF
NP <- input$fitNP
itermax <- input$fitItermax
p.bkg <- input$pbkg
ctrl <- set.control(CR=CR, F=F, NP=NP, itermax=itermax, parallelType=1)
bounds <- as.numeric(unlist(strsplit(input$bkgBounds, ",")))
scale <- as.numeric(unlist(strsplit(input$fitScale, ",")))
knots <- as.numeric(unlist(strsplit(input$fitKnots, ",")))
if(is.null(knots) || any(is.na(knots)))
knots <- 20
knots.n <- knots.x <- NA
if(length(knots)==1)
knots.n <- knots
else
knots.x <- knots
progress <- shiny::Progress$new(session)
mess <- "Calculating, please wait. This may take a while..."
progress$set(message = mess, value = 0.1)
if(length(vals$datGr[[1]])>1)
Gr <- vals$datGr[[i]]
else
Gr <- NULL
for(i in 1:vals$nB){
dat <- list(x=vals$dat[[i]]$x, y=vals$dat[[i]]$y, SB=vals$dat[[i]]$SB,
sigma=vals$dat[[i]]$sigma, lambda=vals$dat[[i]]$lambda,
Gr=Gr, fitADP=vals$dat[[i]]$fitADP, id=vals$dat[[i]]$id)
if(vals$nB>1)
progress$set(message = mess, value = (i/vals$nB-0.01))
else
progress$set(message = mess, value = 0.5)
analyt <- {input$fitWith=='fitWith.analyt'}
vals$fitRes[[i]] <- do.fit(dat, bounds.lower=bounds[1], bounds.upper=bounds[2],
scale=scale, knots.x=knots.x, knots.n=knots.n, analytical=analyt,
stdev=TRUE, control=ctrl, p.bkg=p.bkg, save.to="")
}
progress$set(message = 'Calculating, please wait...', value = 0.999)
vals$fitResFinal <- list(list())
cat("\n Done! \n")
progress$close()
})
})
##########################################################################################
# #
# FIT RESULTS #
# #
##########################################################################################
#################################
## ##
## DOWNLOAD ##
## ##
#################################
###############################
## DOWNLOAD RDATA BUTTON
output$downloadRDataR <- shiny::renderUI({
if( (length(vals$fitRes[[1]]) > 1))
return(downloadButton('downloadRData', 'Download fit results as .RData file'))
else
return(NULL)
})
####################################
## DOWNLOAD TO RDATA FILE!
output$downloadRData <- shiny::downloadHandler(filename = function() { paste('fit.results', '.RData', sep='') }, content = function(file) {
fit.results <- vals$fitResFinal
save(fit.results, file=file)
})
###############################
## DOWNLOAD TEXT BUTTON
output$downloadFitResAsTxtR <- shiny::renderUI({
if( (length(vals$fitRes[[1]]) > 1) && (vals$nB == 1) )
return(downloadButton('downloadFitResAsTxt', HTML('Download fit results as text file ')))
else
return(NULL)
})
####################################
## DOWNLOAD TO TEXT FILE!
output$downloadFitResAsTxt <- shiny::downloadHandler(filename = function() { paste('fit.results', '.txt', sep='') }, content = function(file) {
fit.res <- vals$fitResFinal[[1]]
write.fit.results(fit.res, file = "fit.tmp")
writeLines(readLines("fit.tmp"), file)
file.remove("fit.tmp")
})
###############################
## DOWNLOAD FIX BUTTON
output$downloadFixR <- shiny::renderUI({
if( (length(vals$fitRes[[1]]) > 1) && (vals$nB >= 1) )
return(downloadButton('downloadFix', HTML(paste("Download .fix file for", em("PDFgetN")))))
else
return(NULL)
})
####################################
## DOWNLOAD TO FIX FILE!
output$downloadFix <- shiny::downloadHandler(filename = function() { paste('corrections', '.fix', sep='') }, content = function(file) {
fit.res <- vals$fitRes
for(i in 1:vals$nB){
N <- length(fit.res[[i]]$x)
NInit <- length(vals$XInit[[i]])
if(N < NInit){
fit.res[[i]]$x <- vals$XInit[[i]]
fit.res[[i]]$curves$bkg <- c(fit.res[[i]]$curves$bkg, rep(0, NInit-N))
}
}
write.fix(fit.res, file = "fix.tmp")
writeLines(readLines("fix.tmp"), file)
file.remove("fix.tmp")
})
####################################
## APPEND FIX BUTTON
output$messageFixR <- shiny::renderUI({
if( (length(vals$fitRes[[1]]) > 1) && (vals$nB >= 1) )
return((h4("Append to existing .fix file")))
else
return(NULL)
})
output$selectFixR <- shiny::renderUI({
if( (length(vals$fitRes[[1]]) > 1) && (vals$nB >= 1) )
return( fileInput('fixfile', strong('Append to existing .fix file'), accept=c('.fix')) )
else
return(NULL)
})
output$appendFixR <- shiny::renderUI({
if( (length(vals$fitRes[[1]]) > 1) && (vals$nB >= 1) && !is.null(input$fixfile))
return(downloadButton('appendFix', HTML(paste("Download it here"))))
else
return(NULL)
})
####################################
## APPEND TO FIX FILE!
output$appendFix <- shiny::downloadHandler(filename = function() { paste('corrections', '.fix', sep='') }, content = function(file) {
inFile <- input$fixfile
fit.res <- vals$fitRes
for(i in 1:vals$nB){
N <- length(fit.res[[i]]$x)
NInit <- length(vals$XInit[[i]])
if(N < NInit){
fit.res[[i]]$x <- vals$XInit[[i]]
fit.res[[i]]$curves$bkg <- c(fit.res[[i]]$curves$bkg, rep(0, NInit-N))
}
}
writeLines(readLines(inFile$datapath), "01x001.tmp")
N <- length(fit.res)
if(!is.null(fit.res$fit.details)){
fit.res <- list(fit.res)
N <- 1
}
options(warn=-1)
for(i in 1:N){
write(c(paste("#S ",i," Correction File for Bank ",fit.res[[i]]$fit.details$id,sep=""), "#L Q MULT ADD"), file="01x001.tmp", append=TRUE)
res <- cbind(fit.res[[i]]$x, rep(1,length(fit.res[[i]]$x)), -fit.res[[i]]$curves$bkg)
write.table(res, file="01x001.tmp", append=TRUE, col.names=FALSE, row.names=FALSE, quote=FALSE, sep="\t")
}
options(warn=0)
writeLines(readLines("01x001.tmp"), file)
file.remove("01x001.tmp")
})
####################################
## DOWNLOAD GR AS TEXT!
output$downloadGrR <- shiny::renderUI({
if( (vals$nB>1) || (length(vals$fitRes[[1]]) == 0))
return(NULL)
else
return(downloadButton('downloadGr', 'Download G(r) as text file'))
})
output$downloadGr <- shiny::downloadHandler(filename = function() { paste('Gr', '.txt', sep='') }, content = function(file) {
gr <- vals$Gr
write.table(data.frame(gr), file, row.names=FALSE, quote=FALSE, sep="\t")
})
#################################
## ##
## CALCULATE ##
## ##
#################################
####################################
## ==INPUTS FOR GR==
output$outHeaderGr <- shiny::renderUI({
if( (vals$nB>1) || (length(vals$fitRes[[1]]) == 0))
return(NULL)
else
return(h4("Calculate and plot G(r)"))
})
###
output$rminCalcGrR <- shiny::renderUI({
if( (vals$nB>1) || (length(vals$fitRes[[1]]) == 0))
return(NULL)
else
return(shiny::numericInput("rminCalcGr", min=0, max=100, step=0.1,
label = strong("min(r)"),
value = 0))
})
output$rmaxCalcGrR <- shiny::renderUI({
if( (vals$nB>1) || (length(vals$fitRes[[1]]) == 0))
return(NULL)
else
return(shiny::numericInput("rmaxCalcGr", min=2, max=100, step=0.1,
label = strong("max(r)"),
value = 10))
})
output$drCalcGrR <- renderUI({
if( (vals$nB>1) || (length(vals$fitRes[[1]]) == 0))
return(NULL)
else
return(shiny::numericInput("drCalcGr", min=0.001, max=0.5, step=0.001,
label = div( span(strong("grid spacing")), span(strong(em("dr"))) ),
value = 0.01))
})
output$calcGrButtonR <- renderUI({
if( (vals$nB>1) || (length(vals$fitRes[[1]]) == 0))
return(NULL)
else
return(shiny::actionButton("calcGrButton", label = strong("Calculate")))
})
####################################
## ==CALCULATE GR==
shiny::observe({
if(is.null(input$calcGrButton))
return(NULL)
if(input$calcGrButton==0)
return(NULL)
isolate({
if( (vals$nB>1) || (length(vals$fitRes[[1]]) == 0))
return(NULL)
fit.res <- vals$fitResFinal[[1]]
progress <- Progress$new(session)
mess <- "Calculating G(r), please wait..."
progress$set(message = mess, value = 0.5)
if(is.numeric(input$rhoInclGr))
rho.0 <- input$rhoInclGr
else
rho.0 <- 0
if(is.numeric(input$rminCalcGr))
minR <- input$rminCalcGr
else
minR <- 0
if(is.numeric(input$rmaxCalcGr))
maxR <- input$rmaxCalcGr
else
maxR <- 10
if(is.numeric(input$drCalcGr))
dr <- input$drCalcGr
else
dr <- 0.01
vals$Gr <- calc.Gr(fit.results=fit.res, rho.0=rho.0, r.min=minR, r.max=maxR, dr=dr, plot=FALSE)
progress$set(message = "Done!", value = 0.999)
progress$close()
})
})
###############################
## ##
## = ITERATIONS = ##
## ##
###############################
####################################
## == INPUTS ==
output$iterHeader <- shiny::renderUI({
if( (vals$nB>1) || (length(vals$fitRes[[1]]) == 0) ||
length(vals$datGr[[1]])==0 || !is.na(vals$fitRes[[1]]$pars))
return(NULL)
else
return(h4("Perform iterative Bayesian background estimation"))
})
output$iterTechniqueR <- shiny::renderUI({
if( (vals$nB>1) || (length(vals$fitRes[[1]]) == 0) ||
length(vals$datGr[[1]])==0 || !is.na(vals$fitRes[[1]]$pars))
return(NULL)
else
return(radioButtons('iterTechnique', '',
choices=c("Local gradient descent algorithm"='local', "Global DifEv algorithm"='global'),
selected='global'
))
})
output$iterEpsR <- shiny::renderUI({
if( (vals$nB>1) || (length(vals$fitRes[[1]]) == 0) ||
length(vals$datGr[[1]])==0 || !is.na(vals$fitRes[[1]]$pars) )
return(NULL)
else
return(numericInput("iterEps", label = strong("Convergence tolerance"),
min=0, max=0.1, step=1e-4, value = 1e-3))
})
output$iterNIterR <- shiny::renderUI({
if( (vals$nB>1) || (length(vals$fitRes[[1]]) == 0) ||
length(vals$datGr[[1]])==0 || !is.na(vals$fitRes[[1]]$pars))
return(NULL)
else
return(numericInput("iterNIter", label = strong("The maximum iteration for a gradient descent method"),
min=0, max=1e6, step=1e5, value = 1e5))
})
output$doIterationR <- shiny::renderUI({
if( (vals$nB>1) || (length(vals$fitRes[[1]]) == 0) ||
length(vals$datGr[[1]])==0 || !is.na(vals$fitRes[[1]]$pars) )
return(NULL)
else
return(shiny::actionButton("doIteration", label = strong("Try iteration")))
})
####################################
## == PERFOMING ITERATION ==
shiny::observe({
if(is.null(input$doIteration))
return(NULL)
if(input$doIteration==0)
return(NULL)
isolate({
if( (vals$nB>1) || (length(vals$fitRes[[1]]) == 0) || (length(vals$datGr[[1]])==0) )
return(NULL)
fit.res <- vals$fitRes[[1]]
progress <- Progress$new(session)
mess <- "Calculating, please wait..."
progress$set(message = mess, value = 0.3)
rho.0 <- fit.res$fit.details$Gr$rho.0
if(is.numeric(input$iterNIter))
n.iter <- input$iterNIter
else
n.iter <- 100000
if(is.numeric(input$iterEps))
eps <- input$iterEps
else
eps <- 1e-3
if(input$iterTechnique=="local")
local=TRUE
else
local=FALSE
fit.res <- do.iter(fit.results=fit.res, local=local, eps=eps, n.iter=n.iter, save.to="")
vals$fitResIter[[1]] <- fit.res
mess <- "Recalculating G(r)..."
progress$set(message = mess, value = 0.75)
# Recalculating G(r)...
if(is.numeric(input$rminCalcGr))
minR <- input$rminCalcGr
else
minR <- 0
if(is.numeric(input$rmaxCalcGr))
maxR <- input$rmaxCalcGr
else
maxR <- 10
if(is.numeric(input$drCalcGr))
dr <- input$drCalcGr
else
dr <- 0.01
vals$Gr <- calc.Gr(fit.results=fit.res, rho.0=rho.0, r.min=minR, r.max=maxR, dr=dr, plot=FALSE)
progress$set(message = mess, value = 0.99)
progress$close()
})
})
####
shiny::observe({
if(length(vals$fitResIter[[1]])==0)
vals$fitResFinal <- vals$fitRes
else
vals$fitResFinal <- vals$fitResIter
})
##########################################################################################
# #
# RENDERING OUTPUT #
# #
##########################################################################################
####################################
## == OUTPUT TABLE ==
output$datatable <- renderTable({
if (length(vals$dat[[1]])==0)
return(data.frame())
dat.table <- list()
for(i in 1:vals$nB){
dat.table[[i]] <- unclass(vals$dat[[i]])
dat.table[[i]]$fitADP <- dat.table[[i]]$Gr <- NULL
dat.table[[i]] <- data.frame(dat.table[[i]])
for(j in 1:length(colnames(dat.table[[i]]))){
colnames(dat.table[[i]])[j] <- if(vals$nB==1) paste(colnames(dat.table[[i]])[j],sep="") else paste(colnames(dat.table[[i]])[j],toString(i), sep="")
}
}
k <- 1
while(k < vals$nB){
k <- k + 1
dat.table[[1]] <- cbind(dat.table[[1]], dat.table[[k]])
}
return(dat.table[[1]])
})
####################################
## DOWNLOAD DATA
output$downloadData <- shiny::downloadHandler(filename = function() { paste('data', '.txt', sep='') }, content = function(file) {
# if(length(vals$dat[[1]])==0)
# return(NULL)
dat.table <- list()
for(i in 1:vals$nB){
dat.table[[i]] <- unclass(vals$dat[[i]])
dat.table[[i]]$fitADP <- dat.table[[i]]$Gr <- NULL
dat.table[[i]] <- data.frame(dat.table[[i]])
for(j in 1:length(colnames(dat.table[[i]]))){
colnames(dat.table[[i]])[j] <- if(vals$nB==1) paste(colnames(dat.table[[i]])[j],sep="") else paste(colnames(dat.table[[i]])[j],toString(i), sep="")
}
}
k <- 1
while(k < vals$nB){
k <- k + 1
dat.table[[1]] <- cbind(dat.table[[1]], dat.table[[k]])
}
write.table(dat.table[[1]], file, row.names=FALSE, quote=FALSE, sep="\t")
})
####################################
## == OUTPUT DATA PLOT ==
###############
# SELECT BANK
output$selectBank <- shiny::renderUI({
if (vals$nB==1)
return(NULL)
choices <- list()
for(i in 1:vals$nB){
name <- paste("Showing: Bank #", i)
id <- paste(i)
choices[[name]] <- id
}
return(
selectInput("bankNo", label = "",
choices = choices,
selected = "1",
width='160px')
)
})
###############
# PLOT FUNCTION
dataPlotFunc <- function(onHover=TRUE){
dat <- vals$dat
toPlot <- whatIsSpecified(dat)
N <- vals$nB
n.x <- n.y <- 1
if(N>=2) n.y <- 2
if(N>=3) n.x <- 2
par(mfrow=c(1, 1), mar=c(5,4,1,1))
# par(oma = c(2, 1, 1, 1))
if(!is.null(input$bankNo))
bankNo <- as.numeric(input$bankNo)
else
bankNo <- 1
if(N==1){
xlab=paste("x")
ylab=paste("y")
}
else{
xlab=paste("x", bankNo, sep="")
ylab=paste("y", bankNo, sep="")
}
xlim <- c(min(vals$dat[[bankNo]]$x), max(vals$dat[[bankNo]]$x))
ylim <- c(min(vals$dat[[bankNo]]$y), max(vals$dat[[bankNo]]$y))
if(!is.null(input$selectPlot) && input$selectPlot==paste("bank", bankNo, sep="")){
xlim <- input$plotLimX
ylim <- input$plotLimY
}
plot(x=dat[[bankNo]]$x, y=dat[[bankNo]]$y, t="l", xlab=xlab, ylab=ylab,
xlim=xlim, ylim=ylim, lwd=2)
par(xpd=TRUE)
if(onHover){
hover <- input$mainHover
if(!is.null(hover)){
abline(v=hover$x, lty=2)
abline(h=hover$y, lty=2)
legend(hover$x, hover$y, sprintf("x=%.4g y=%.4g", hover$x, hover$y), bty="n", pt.lwd=0, text.col=2, cex=0.7)
}
click <- input$mainClick
if(!is.null(click)){
input$mainClick
isolate({
abline(v=click$x, lty=2)
abline(h=click$y, lty=2)
legend(click$x, click$y, sprintf("x=%.4g y=%.4g", click$x, click$y), bty="n", pt.lwd=0, text.col=2, cex=0.7)
})
}
}
par(xpd=FALSE)
if(toPlot[[bankNo]]$SB) lines(dat[[bankNo]]$x, dat[[bankNo]]$SB, col=3, lwd=2)
if(toPlot[[bankNo]]$sigma){
if(toPlot[[bankNo]]$smoothed){
lines(dat[[bankNo]]$x, dat[[bankNo]]$smoothed, col="cyan", lwd=2)
lines(dat[[bankNo]]$x, dat[[bankNo]]$smoothed+2*dat[[bankNo]]$sigma, col=2)
lines(dat[[bankNo]]$x, dat[[bankNo]]$smoothed-2*dat[[bankNo]]$sigma, col=2)
}
else{
lines(dat[[bankNo]]$x, dat[[bankNo]]$y+2*dat[[bankNo]]$sigma, col=2)
lines(dat[[bankNo]]$x, dat[[bankNo]]$y-2*dat[[bankNo]]$sigma, col=2)
}
}
if(toPlot[[bankNo]]$lambda) lines(dat[[bankNo]]$x, dat[[bankNo]]$lambda, col=6, lwd=2)
}
###############
# PLOT RENDER
output$dataPlot <- renderPlot({
dat <- vals$dat
if (length(dat[[1]])==0)
return(NA)
toPlot <- whatIsSpecified(dat)
if (!toPlot[[1]]$x || !toPlot[[1]]$y)
return(NA)
dataPlotFunc()
})
legendPlotFunc <- function(){
par(mfrow=c(1,1), mar=c(1, 2, 2, 2) + 0.1)
par(fig = c(0, 1, 0, 1), oma = c(0, 0, 0, 0), mar = c(0, 0, 0, 0), new = TRUE)
# par(fig = c(0, 1, 0, 1), oma = c(3, 3, 3, 3), mar = c(0, 0, 0, 0), new = TRUE)
plot(0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n")
legend("bottom", c("data", "baseline", "lambda", "smoothed", "+/-2*stdev"), xpd = TRUE, horiz = TRUE,
inset = c(0,0), bty = "n", lty=1, col = c(1,3,6,"cyan",2), lwd=2, cex = 1)# par(xpd=FALSE)
}
output$legendPlot <- renderPlot({
if (length(vals$dat[[1]])==0)
return(NA)
legendPlotFunc()
})
###############
# DOWNLOAD BUTTON
output$downloadMainPlotR <- shiny::renderUI({
dat <- vals$dat
if (length(dat[[1]])==0)
return(NULL)
toPlot <- whatIsSpecified(dat)
if (!toPlot[[1]]$x || !toPlot[[1]]$y)
return(NULL)
return(downloadButton('downloadMainPlot', 'Download plot'))
})
####################################
## DOWNLOAD HADLER
output$downloadMainPlot <- shiny::downloadHandler(
filename = function() { 'data.png' },
content = function(file) {
plotToPng <- function(){
dataPlotFunc(onHover=FALSE)
legendPlotFunc()
}
png(file, width=12, height=8, units="in", res=600, pointsize=12)
print(plotToPng())
dev.off()
}
)
###############
# DOWNLOAD BUTTON
output$downloadestGrPlotR <- shiny::renderUI({
dat <- vals$dat
if (length(dat[[1]])==0)
return(NULL)
if (length(vals$estGr)==0)
return(NULL)
toPlot <- whatIsSpecified(dat)
if (!toPlot[[1]]$x || !toPlot[[1]]$y)
return(NULL)
return(downloadButton('downloadestGrPlot', 'Download plot'))
})
####################################
## DOWNLOAD HADLER
output$downloadestGrPlot <- shiny::downloadHandler(
filename = function() { 'estGr.png' },
content = function(file) {
PDF <- vals$estGr
stdev <- PDF$stdev*2
gr <- PDF$gr
r <- PDF$r
rho.0 <- 0
xlim <- ylim <- NA
if(!is.null(input$selectPlot) && input$selectPlot==paste("estgr")){
xlim <- input$plotLimX
ylim <- input$plotLimY
}
png(file, width=12, height=8, units="in", res=600, pointsize=12)
print(fplot.Gr(r=r, gr=gr, stdev=stdev, rho.0=rho.0, xlim=xlim, ylim=ylim, title="Estimated G(r)"))
dev.off()
}
)
###############
# DOWNLOAD BUTTON
output$downloadestGrDataR <- shiny::renderUI({
dat <- vals$dat
if (length(dat[[1]])==0)
return(NULL)
if (length(vals$estGr)==0)
return(NULL)
toPlot <- whatIsSpecified(dat)
if (!toPlot[[1]]$x || !toPlot[[1]]$y)
return(NULL)
return(downloadButton('downloadestGrData', 'Download G(r) as text file'))
})
####################################
## DOWNLOAD HADLER
output$downloadestGrData <- shiny::downloadHandler(
filename = function() { 'estGr.txt' },
content = function(file) {
PDF <- vals$estGr
stdev <- PDF$stdev
gr <- PDF$gr
r <- PDF$r
write.table(data.frame(r, gr, stdev, gr-2*stdev, gr+2*stdev), file, row.names=FALSE, quote=FALSE, sep="\t")
}
)
#############################################
## SHOWS PROGRESS IN PARAMETER ESTIMATIONS
output$progress <- shiny::renderUI({
# if(length(vals$dat[[1]])==0)
# return(h3(" "))
turnGreen <- whatIsSpecified(vals$dat)
x.pr <- span(" x ", style = "color:#33CC00")
y.pr <- span("y ", style = "color:#33CC00")
SB.pr <- span("SB ", style = "color:#33CC00")
sigma.pr <- span(HTML("ε "), style = "color:#33CC00")
lambda.pr <- span(HTML("λ "), style = "color:#33CC00")
Gr.pr <- span("G(r) ", style = "color:#33CC00")
DifEv.pr <- span("DifEv ", style = "color:#33CC00")
# write(vals$ind, file="aaa.txt")
for(i in 1:vals$nB){
if(!turnGreen[[i]]$x) x.pr <- span(" x ", style = "color:red")
if(!turnGreen[[i]]$y) y.pr <- span("y ", style = "color:red")
if(!turnGreen[[i]]$SB) SB.pr <- span("SB ", style = "color:#B8B8B8")
if(!turnGreen[[i]]$sigma) sigma.pr <- span(HTML("ε "), style = "color:red")
if(!turnGreen[[i]]$lambda) lambda.pr <- span(HTML("λ "), style = "color:red")
# write(turnGreen[[i]], file="aa.txt")
}
if(!((!is.null(vals$datGr[[1]])) && (length(vals$datGr[[1]])>1)))
if(is.null(input$fitADP) || input$fitADP==FALSE)
Gr.pr <- span("G(r) ", style = "color:#B8B8B8 ")
else
Gr.pr <- span("G(r) ", style = "color:red")
DifEv <- TRUE
if( !( is.numeric(input$fitNP) && (input$fitNP>2) ) )
DifEv <- FALSE
if( !( is.numeric(input$fitItermax) && (input$fitItermax>2) ) )
DifEv <- FALSE
if( !( is.numeric(input$fitCR) && (input$fitCR>0) && (input$fitCR<1) ) )
DifEv <- FALSE
if( !( is.numeric(input$fitF) && (input$fitF>0) && (input$fitF<2) ) )
DifEv <- FALSE
if( is.null(input$bkgBounds) || is.na(input$bkgBounds) || (length(as.numeric(unlist(strsplit(input$bkgBounds, ","))))!=2) ||
any(is.na(as.numeric(unlist(strsplit(input$bkgBounds, ","))))) )
DifEv <- FALSE
if( is.na(input$fitKnots) || any(is.na(as.numeric(unlist(strsplit(input$fitKnots, ","))))) )
DifEv <- FALSE
if( !( !is.na(input$fitScale) && (length(as.numeric(unlist(strsplit(input$fitScale, ","))))==2) &&
!any(is.na(as.numeric(unlist(strsplit(input$fitScale, ","))))) ) )
DifEv <- FALSE
if(DifEv==FALSE) DifEv.pr <- span("DifEv ", style = "color:red")
## returns
h3(x.pr, y.pr, lambda.pr, SB.pr, sigma.pr, Gr.pr, DifEv.pr, align="left")
})
##################################################
## ##
## RENDER FIT RESULTS ##
## ##
##################################################
shiny::observe({
input$selectPlot
input$plotLimY
input$plotLimX
isolate({
if(is.null(dim(vals$xlim)) || is.null(dim(vals$ylim)) || dim(vals$xlim)!=c(vals$nB,2) || dim(vals$ylim)!=c(vals$nB,2))
vals$xlim <- vals$ylim <- matrix(NA, nrow=vals$nB, ncol=2)
for(i in 1:vals$nB){
if(!is.null(input$selectPlot) && input$selectPlot==paste("fit", i, sep="")){
vals$xlim[i,] <- input$plotLimX
vals$ylim[i,] <- input$plotLimY
}
}
})
})
####################################
## == FIT RESULTS PLOT -- SQ ==
output$fitResPlot <- renderPlot({
if( (length(vals$fitRes[[1]]) > 1) ){
fit.res <- vals$fitRes
xlim <- vals$xlim
ylim <- vals$ylim
N <- vals$nB
if(N>1)
return(mPlot.results.banks(fit.res, xlim=xlim, ylim=ylim))
else
return(mPlot.results(fit.res[[1]], xlim=xlim, ylim=ylim))
}
else
return(NA)
})
#################
# DOWNLOAD BUTTON
output$downloadFitResPlotR <- shiny::renderUI({
if( (length(vals$fitRes[[1]]) > 1))
return(downloadButton('downloadFitResPlot', 'Download plot'))
else
return(NULL)
})
##################
## DOWNLOAD HADLER
output$downloadFitResPlot <- shiny::downloadHandler(
filename = function() { 'fitPlot.png' },
content = function(file) {
xlim <- vals$xlim
ylim <- vals$ylim
png(file, width=12, height=8, units="in", res=600, pointsize=12)
print(if(vals$nB>1) {mPlot.results.banks(vals$fitRes, xlim=xlim, ylim=ylim)}
else {mPlot.results(vals$fitRes[[1]], xlim=xlim, ylim=ylim)} )
dev.off()
}
)
####################################
## == FIT RESULTS PLOT -- Gr ==
output$GrPlot <- renderPlot({
if( (length(vals$fitRes[[1]]) > 1) && (vals$nB==1) && (length(vals$Gr)!=0)){
PDF <- vals$Gr
stdev <- PDF$stdev*2
gr <- PDF$gr
r <- PDF$r
rho.0 <- 0
if(!is.null(input$rhoInclGr) && is.numeric(input$rhoInclGr))
rho.0 <- input$rhoInclGr
if(!is.null(vals$datGr[[1]]$rho.0))
rho.0 <- vals$datGr[[1]]$rho.0
xlim <- ylim <- NA
if(!is.null(input$selectPlot) && input$selectPlot==paste("gr")){
xlim <- input$plotLimX
ylim <- input$plotLimY
}
fplot.Gr(r=r, gr=gr, stdev=stdev, rho.0=rho.0, xlim=xlim, ylim=ylim)
}
else
return(NA)
})
####################################
## == FIT RESULTS PLOT -- Gr ==
output$prelimGrPlot <- renderPlot({
if(length(vals$estGr)!=0){
PDF <- vals$estGr
stdev <- PDF$stdev*2
gr <- PDF$gr
r <- PDF$r
rho.0 <- 0
if(!is.null(input$rhoInclGr) && is.numeric(input$rhoInclGr))
rho.0 <- input$rhoInclGr
if(!is.null(vals$datGr[[1]]$rho.0))
rho.0 <- vals$datGr[[1]]$rho.0
xlim <- ylim <- NA
if(!is.null(input$selectPlot) && input$selectPlot==paste("estgr")){
xlim <- input$plotLimX
ylim <- input$plotLimY
}
fplot.Gr(r=r, gr=gr, stdev=stdev, rho.0=rho.0, xlim=xlim, ylim=ylim, title="Estimated G(r)")
}
else
return(NA)
})
#################
# DOWNLOAD BUTTON
output$downloadGrPlotR <- shiny::renderUI({
if( (length(vals$fitRes[[1]]) > 1) && (vals$nB==1) && (length(vals$Gr)!=0))
return(downloadButton('downloadGrPlot', 'Download plot'))
else
return(NULL)
})
##################
## DOWNLOAD HADLER
output$downloadGrPlot <- shiny::downloadHandler(
filename = function() { 'Gr.png' },
content = function(file) {
rho.0 <- if(!is.null(vals$datGr[[1]]$rho.0)) {vals$datGr[[1]]$rho.0} else {input$rhoInclGr}
xlim <- ylim <- NA
if(!is.null(input$selectPlot) && input$selectPlot==paste("gr")){
xlim <- input$plotLimX
ylim <- input$plotLimY
}
png(file, width=12, height=8, units="in", res=600, pointsize=12)
print(fplot.Gr(r=vals$Gr$r, gr=vals$Gr$gr, stdev=vals$Gr$stdev*2,
rho.0=rho.0, xlim=xlim, ylim=ylim))
dev.off()
}
)
# observe({
# # Initially will be empty
# if (is.null(input$mainClick)){
# return(NULL)
# }
# if (input$selectRegion==0){
# return(NULL)
# }
# isolate({
# vals$xlim[vals$selInd] <- input$mainClick$x
# vals$ylim[vals$selInd] <-input$mainClick$y
# if(vals$selInd==1)
# vals$selInd <- 2
# else
# vals$selInd <- 1
# })
# })
# observe({
# input$resetRegion
# if (input$resetRegion==0)
# return(NULL)
# isolate({
# if(!is.null(vals$dat[[1]]$x) && !is.null(vals$dat[[1]]$y)){
# x <- vals$dat[[1]]$x
# y <- vals$dat[[1]]$y
# vals$xlim <- c(min(x), max(x))
# vals$ylim <- c(min(y), max(y))
# }
# })
# })
# output$lims <- renderTable({
# if (length(vals$dat[[1]])==0)
# return(data.frame())
# dat.table <- matrix(c(vals$xlim[1], vals$xlim[2], vals$ylim[1], vals$ylim[2]), nrow=2, ncol=2, byrow=FALSE)
# dat.table <- data.frame(dat.table)
# return(dat.table)
# })
####################################
## == PLOT OPTIONS ==
output$selectPlotR <- shiny::renderUI({
if(length(vals$dat[[1]])==0)
return(NULL)
choices <- list()
# BANKS
if (vals$nB>1){
for(i in 1:vals$nB){
name <- paste("Data bank #", i)
id <- paste("bank", i, sep="")
choices[[name]] <- id
}
if(length(vals$fitRes[[1]]) > 1){
for(i in 1:vals$nB){
id <- paste("fit", i, sep="")
name <- paste("Background estimation for bank #", i)
choices[[name]] <- id
}
}
} # SINGLE DATASET
else{
choices <- list("Data plot"=paste("bank", 1, sep=""))
if(length(vals$fitRes[[1]]) > 1)
choices[["Background estimation"]] <- paste("fit", 1, sep="")
if(length(vals$Gr)!=0)
choices[["Corrected G(r)"]] = "gr"
if(length(vals$estGr)!=0)
choices[["Estimated G(r)"]] = "estgr"
}
return(
selectInput("selectPlot",
label = strong("Select plot to change"),
choices = choices,
width="100%")
)
})
output$youCanSeePlot <- shiny::renderUI({
if(length(vals$dat[[1]])==0 || is.null(input$selectPlot))
return(NULL)
selectPlot <- substr(input$selectPlot, 1, 3)
if(selectPlot=="ban" || selectPlot=="est")
s1 <- div(span("(you can find it on the"), span(em("'Data Plot'"), style = "color:#0000FF;"), span("inset)"))
else
s1 <- div(span("(you can find it on the"), span(em("'Fit Results Plot'"), style = "color:#0000FF;"), span("inset)"))
return(s1)
})
output$axisLimsTxt <- shiny::renderUI({
if(length(vals$dat[[1]])==0 || is.null(input$selectPlot))
return(NULL)
return(strong("Set axis limits"))
})
output$plotLimXR <- shiny::renderUI({
if(length(vals$dat[[1]])==0 || is.null(input$selectPlot))
return(NULL)
wis <- whatIsSpecified(vals$dat)
if (!wis[[1]]$x)
return(NULL)
ps <- input$selectPlot
if (vals$nB>1){
for(i in 1:vals$nB){
fitN <- paste("fit", i, sep="")
bankN <- paste("bank", i, sep="")
if(ps==fitN || ps==bankN){
minX <- min(vals$dat[[i]]$x)
maxX <- max(vals$dat[[i]]$x)
}
}
}
else{
if(ps=="gr"){
minX <- min(vals$Gr$r)
maxX <- max(vals$Gr$r)
}
else if(ps=="estgr"){
minX <- min(vals$estGr$r)
maxX <- max(vals$estGr$r)
}
else{
minX <- min(vals$dat[[1]]$x)
maxX <- max(vals$dat[[1]]$x)
}
}
dx=(maxX-minX)/1000
return(sliderInput("plotLimX", strong("x limits"),
min = (minX-0.1*abs(minX)), max = (maxX+0.1*abs(maxX)),
step=dx, value = c(minX, maxX))
)
})
output$plotLimYR <- shiny::renderUI({
if(length(vals$dat[[1]])==0 || is.null(input$selectPlot))
return(NULL)
wis <- whatIsSpecified(vals$dat)
if (!wis[[1]]$y)
return(NULL)
ps <- input$selectPlot
if (vals$nB>1){
for(i in 1:vals$nB){
fitN <- paste("fit", i, sep="")
bankN <- paste("bank", i, sep="")
if(ps==fitN || ps==bankN){
if(wis[[i]]$SB){
minY <- min(vals$dat[[i]]$y-vals$dat[[i]]$SB)
maxY <- max(vals$dat[[i]]$y-vals$dat[[i]]$SB)
}
else{
minY <- min(vals$dat[[i]]$y)
maxY <- max(vals$dat[[i]]$y)
}
}
}
}
else{
if(ps=="gr"){
minY <- min(vals$Gr$gr)
maxY <- max(vals$Gr$gr)
}
else if(ps=="estgr"){
minY <- min(vals$estGr$gr)
maxY <- max(vals$estGr$gr)
}
else{
if(wis[[1]]$SB){
minY <- min(vals$dat[[1]]$y-vals$dat[[1]]$SB)
maxY <- max(vals$dat[[1]]$y-vals$dat[[1]]$SB)
}
else{
minY <- min(vals$dat[[1]]$y)
maxY <- max(vals$dat[[1]]$y)
}
}
}
maxYnew <- vals$yRescale[2]*(maxY-minY) + minY
minYnew <- vals$yRescale[1]*(maxY-minY) + minY
maxY <- maxYnew
minY <- minYnew
dy=(maxY-minY)/2500
return(sliderInput("plotLimY", strong("y limits"),
min = minY-0.4*abs(minY), max = maxY+0.4*abs(maxY),
step=dy, value = c(minY, maxY))
)
})
shiny::observe({
if(input$rescaleY==0)
return(NULL)
isolate({ ## react on change
if(length(vals$dat[[1]])==0 || is.null(input$selectPlot))
return(NULL)
wis <- whatIsSpecified(vals$dat)
if (!wis[[1]]$y)
return(NULL)
ps <- input$selectPlot
if (vals$nB>1){
for(i in 1:vals$nB){
fitN <- paste("fit", i, sep="")
bankN <- paste("bank", i, sep="")
if(ps==fitN || ps==bankN){
if(wis[[i]]$SB){
minY <- min(vals$dat[[i]]$y-vals$dat[[i]]$SB)
maxY <- max(vals$dat[[i]]$y-vals$dat[[i]]$SB)
}
else{
minY <- min(vals$dat[[i]]$y)
maxY <- max(vals$dat[[i]]$y)
}
}
}
}
else{
if(ps=="gr"){
minY <- min(vals$Gr$gr)
maxY <- max(vals$Gr$gr)
}
else if(ps=="estgr"){
minY <- min(vals$estGr$gr)
maxY <- max(vals$estGr$gr)
}
else{
if(wis[[1]]$SB){
minY <- min(vals$dat[[1]]$y-vals$dat[[1]]$SB)
maxY <- max(vals$dat[[1]]$y-vals$dat[[1]]$SB)
}
else{
minY <- min(vals$dat[[1]]$y)
maxY <- max(vals$dat[[1]]$y)
}
}
}
ylim <- input$plotLimY
vals$yRescale <- (ylim - minY)/(maxY-minY)
})
})
shiny::observe({
if(input$resetY==0)
return(NULL)
isolate({
vals$yRescale <- c(0,1)
})
})
})
| /scratch/gouwar.j/cran-all/cranData/BBEST/inst/gui/server.R |
############################################################################
##
##
## TITLE PANEL
## SIDEBAR PANEL
## MAIN MENU
## LOCAL MENU
## LOAD DATA
## PREPARE DATA
## TRUNCATE DATA
## LAMBDA
## BASELINE
## NOISE
## G(r) INFORMATION
## DIFFERENTIAL EVOLUTION
## FIT RESULTS
## Plot Bounds
## MAIN PANEL
# CB Check Box
# RB Radio Button
# TI Text Input
#
# mainRB: RadioButton Main Menu
# data file: upload data
# headerCB: datafile with header
#
shinyUI(fluidPage(
############################################################################
## === TITLE PANEL ===
# titlePanel(title="Bayesian Background Estimation", windowTitle="BBEST"),
tags$head(
tags$title('BBEST'),
h2(span("B", style="letter-spacing: -0.3em; color:#0055FF"), span("ayesian ", style = "color:#0099FF;"),
span("B", style="letter-spacing: -0.3em; color:#0055FF"), span("ackground", style = "color:#0099FF;"),
span("E", style="letter-spacing: -0.3em; color:#0055FF"), span("S", style="letter-spacing: -0.3em; color:#0055FF"),
span("T", style="letter-spacing: -0.3em; color:#0055FF"), span("imation", style = "color:#0099FF;") )
),
############################################################################
######################################
## === SIDEBAR PANEL ===
sidebarLayout(
sidebarPanel(
######################################
## == MAIN MENU ==
tags$table(style="border: 3px solid #E8E8E8; border-style:solid; width: 100%;" ,
tags$tr(
tags$td(
h3("Main Menu"),
p(),
radioButtons('mainRB', '',
choices=c("Load data"='load',
"Set additional parameters"='prepare',
"Set real-space condition"='gr',
"Optimize background with DifEv"='difev',
"Fit results"='save',
"Plot options"='plot'),
selected='load'
)
)
)
),
######################################
## == LOCAL MENU ==
tags$hr(),
tags$table(style="border: 3px solid #E8E8E8; padding:15px;border-style:solid; width: 100%;" ,
tags$tr(
tags$td(
## = LOAD DATA =
conditionalPanel(
condition = "input.mainRB == 'load'",
h3("Load Data"),
p(),
fileInput('datafile', strong('Choose RData, CSV, text, .sqa or .sqb file'),
accept=c('text/csv', 'text/comma-separated-values,text/plain', '.csv', '.sqa', '.sqb', '.sq', '.RData')),
p(),
checkboxInput('headerCB', strong('Data include header'), TRUE),
p(),
radioButtons('separatorRB', strong('Separator'),
c(Comma=',',
Semicolon=';',
Tab='\t',
Space=' '),
selected='\t'
),
p(),
uiOutput("sqaSplit")
),
## = PREPARE DATA =
conditionalPanel(
condition = "input.mainRB == 'prepare'",
h3("Prepare data"),
p(),
## TRUNCATE DATA
strong("Truncate data"),
uiOutput("truncLimitsR"),
p(),
## LAMBDA
strong("Useful signal level"),
textInput("lambda", label = c("Type x_1, lambda_1, x_2, lambda_2, lambda_0"), value = ""),
p(),
## BASELINE
strong("Baseline"),
checkboxInput("setSB", "Set/Recalculate baseline"),
conditionalPanel(
condition = "input.setSB == true",
textInput("SBNAtoms", label = c("Type number of atoms of each type per unit cell"), value = ""),
textInput("SBScLen", label = c("Type neutron scattering lengths"), value = ""),
checkboxInput("oneADP", "use one ADP for all atoms"),
checkboxInput("fitADP", "fit ADP(s)"),
conditionalPanel(
condition = "input.fitADP == false",
textInput("ADP", label = c("Type ADP(s)"), value = "")
)
),
# p(),
tags$hr(),
## NOISE
strong("Noise level"),
textInput("sigma", label = c("Type number of regions or bounds for a signal-free region"), value = ""),
textInput("sigmaTS", label = c("Type estimated noise variance"), value = "0.1"),
p(style="margin:0; padding:0;"),
actionButton("calcSigmaButton", label = "Estimate noise"),
p(),
## P.BKG
strong("P(bkg)"),
numericInput("pbkg", min=-1, max=1, step=0.01,
label = "Type probability that data points contain no signal contribution (only background). Type '-1' to estimate P(bkg) iteratively",
value = 0.5),
p(),
## G(R) PLOT
strong("G(r)"),
checkboxInput("estGr", "Estimate PDF"),
conditionalPanel(
condition = "input.estGr == true",
textInput("rGrid", label = c("Type minimum r, maximum r, grid spacing dr"), value = ""),
p(style="margin:0; padding:0;"),
actionButton("plotPrelimGr", label = "Plot G(r)")
)
),
## = G(r) INFORMATION =
conditionalPanel(
condition = "input.mainRB == 'gr'",
p(),
uiOutput("GrNoteForBanks"),
h3("Low-r Correction"),
p(),
checkboxInput("inclGr",
strong(HTML(paste("Use low-", em("r"), " conditions in ", em("G(r) "),
"to construct the ", em("r"), "-space likelihood", sep = "")))),
tags$hr(),
conditionalPanel(
condition = "input.inclGr == true",
selectInput("GrNoiseType", label = strong("Condition type"),
choices = list("Gaussian noise" = "gauss",
"Correlated noise" = "correlated"),
selected = "gauss"),
numericInput("rhoInclGr", value=NA, min=0, max=1, step=0.0000001,
label = strong(HTML(paste("Number density of the material ρ", tags$sub(0), sep = "")))),
numericInput("rminInclGr", min=0, max=2.5, step=0.001,
label = strong("minimum r"),
value = 0),
numericInput("rmaxInclGr", min=0, max=2.5, step=0.001,
label = HTML(paste(strong("maximum r"), "(should be below the shortest possible interatomic distance)")),
value = 2),
numericInput("drInclGr", min=0.001, max=0.05, step=0.001,
label = div( span(strong("grid spacing")), span(strong(em("dr"))) ),
value = 0.005),
p(),
actionButton("setGrButton", label = strong("Submit")),
p(),
HTML(paste("(make sure", strong(HTML("ε")), "(noise level) has been estimated)"))
)
),
# span("B", style="letter-spacing: -0.3em;")
## = DIFFERENTIAL EVOLUTION =
conditionalPanel(
condition = "input.mainRB == 'difev'",
h3("DifEv Parameters"),
p(),
numericInput("fitNP", min=10, max=1000, step=10,
label = strong("Number of population members"),
value = 100),
p(),
numericInput("fitItermax", min=10, max=10000, step=10,
label = strong("Number of iterations"),
value = 500),
p(),
numericInput("fitCR",
label = strong("Crossover probability (CR)"),
min=0, max=1, step=0.01, value = .85),
p(),
numericInput("fitF",
label = strong("Differential weighting factor (F)"),
min=0, max=2, step=0.01, value = .7),
p(),
textInput("fitScale",
label = strong("Lower and upper bounds for scale factor fit"),
value = "1, 1"),
p(),
uiOutput("bkgBoundsR"),
p(),
uiOutput("fitWithR"),
p(),
conditionalPanel(
condition = "input.fitWith == 'fitWith.splines'",
textInput("fitKnots",
label = strong("Number of splines or spline knot positions"),
value = 20
)
),
p(),
actionButton("doFit", label = strong("Start fit"))
),
## = FIT RESULTS =
conditionalPanel(
condition = "input.mainRB == 'save'",
h3("Fit Results"),
p(),
uiOutput("downloadRDataR"),
p(),
uiOutput("downloadFitResAsTxtR"),
p(),
uiOutput("downloadFixR"),
p(),
br(),
# uiOutput("messageFixR"),
# p(),
uiOutput("selectFixR"),
uiOutput("appendFixR"),
p(),
br(),
uiOutput("outHeaderGr"),
p(),
uiOutput("rminCalcGrR"),
uiOutput("rmaxCalcGrR"),
uiOutput("drCalcGrR"),
uiOutput("calcGrButtonR"),
p(),
uiOutput("downloadGrR"),
p(),
br(),
uiOutput("iterHeader"),
p(),
uiOutput("iterTechniqueR"),
uiOutput("iterEpsR"),
uiOutput("iterNIterR"),
uiOutput("doIterationR")
)
)
)
),
## = Plot Options =
conditionalPanel(
condition = "input.mainRB == 'plot'",
tags$table(style="border: 3px solid #E8E8E8; padding:2px;border-style:solid; width:100%; height: 450px;" ,
tags$tr(valign="top",
tags$td(
p(),
uiOutput("selectPlotR"),
p(),
uiOutput("youCanSeePlot"),
p(),
p(),
uiOutput("axisLimsTxt"),
p(),
uiOutput("plotLimXR"),
p(),
uiOutput("plotLimYR"),
p(),
actionButton("rescaleY", label = strong("rescale Y slider")),
actionButton("resetY", label = strong("reset Y slider"))
)
)
)
) # end of the last conditional panel
), ## end of == sidebar panel ==
############################################################################
## === MAIN PANEL ===
mainPanel(
tabsetPanel(
tabPanel("Data Plot",
progressInit(),
tags$table(style="border-style:none; width:100%",
tags$tr(
tags$td(uiOutput("progress"), align="center", style="width:70%"),
tags$td(uiOutput("selectBank"), align="left", style="width:30%")
)
),
plotOutput("dataPlot", click="mainClick", hover="mainHover", width='750px'),
plotOutput("legendPlot", height="20px", width='750px'),
uiOutput('downloadMainPlotR'),
plotOutput("prelimGrPlot"),
uiOutput('downloadestGrPlotR'),
uiOutput('downloadestGrDataR')),
tabPanel("Data Table",
downloadButton('downloadData', 'Download'), tags$hr(),
tableOutput('datatable')),
tabPanel("Help", includeHTML("./help/help.html")),
tabPanel("Fit Results Plot",
plotOutput("fitResPlot"),
uiOutput('downloadFitResPlotR'), p(),
plotOutput("GrPlot"),
uiOutput('downloadGrPlotR'))
)
)
) ## end of === layout ===
)
)
| /scratch/gouwar.j/cran-all/cranData/BBEST/inst/gui/ui.R |
#' BBI : Benthic Biotic Indices calculation function
#'
#' @description The \code{BBI} package is meant to calculate Benthic Biotic Indices from
#' composition data, obtained whether from morphotaxonomic inventories or
#' sequencing data. Based on reference ecological weights publicly available for
#' a set of commonly used marine biotic indices, such as AMBI (Borja et al., 2000)
#' NSI and ISI indices (Rygg 2013).
#'
#' @param data A data frame containing samples as columns and taxa as rows, with
#' species (or last taxonomic rank) in the first column \code{data}
#'
#' @return Function \code{BBI} returns a list of containing:
#' \enumerate{
#' \item \code{found} - the amount of taxa that matched an entry in the
#' database and the amount that did not
#' \item \code{BBI} - the BBI values per sample
#' \item \code{table} - the subset of composition data that contains only taxa with
#' at least a match in one of the BBI
#' \item \code{taxa} - the list of taxa that matched an entry and the correspondant OTU, if from NGS data.
#'
#' @example BBI(my_table, log = FALSE)
#' @author Tristan Cordier
#' @export
#' BBI()
# sourcing the others of functions
source("R/nEQR.R")
source("R/status.R")
# BBI main function
BBI <- function(data, log=FALSE) {
# if data is not a data.frame
data <- as.data.frame(data)
# Loggin the search?
if(log) log_file <- file(paste("Log_BBI_", format(Sys.time(), "%Y_%a_%b_%dth-%H.%M.%S"), ".txt", sep=""),open="a")
## import the reference BI table !!!!
eco_index <- read.table(paste(system.file(package="BBI"), "/TABLE_REF.Rd", sep=""), header=TRUE, sep="\t", dec=",")
# fetch the taxa list from data
tax_n <- data[,1]
# ugly trick for indexing later
tax_n <- cbind(as.character(tax_n), as.character(tax_n))
# get the OTU id if sequencing data
otu_id <- rownames(data)
## initiate counting stuffs
cpt_found <- 0
cpt_not <- 0
found_index <- c()
# need to convert each comumns into numeric (WEIRD behaviour : check http://stackoverflow.com/questions/3418128/how-to-convert-a-factor-to-an-integer-numeric-without-a-loss-of-information)
if (is.factor(data[,2]) == T) for (i in 2:dim(data)[2]) data[,i] <- as.numeric(levels(data[,i])[data[,i]])
# in case of NGS data, removing unassigned OTU to speed up the process
otu_list <- grep("OTU", tax_n[,1], ignore.case = F)
if (length(otu_list) > 0) tax_n <- tax_n[-otu_list,]
# then isolate the compositon data from taxa (dat <- composition data AND tax_n <- taxa list)
if (length(otu_list) > 0)
{
dat <- data[-otu_list,2:dim(data)[2]]
} else {
dat <- data[,2:dim(data)[2]]
}
# creating the list of taxa with reference ecological weights binded
out <- as.data.frame(array(NA, c(dim(tax_n)[1],7)))
dimnames(out)[[2]] <- c("query","AMBI", "ITI_GROUP", "ISI_value", "NSI", "NSI.group", "Bentix")
# storing the last rank of assignements (original query) and the processed one
queries <- as.data.frame(array(NA, c(dim(tax_n)[1],2)))
colnames(queries) <- c("original","cleaned")
# for each taxa
for (i in 1:dim(tax_n)[1])
{
# keep the last value in taxonomy assignment (note that here ';' is used as separator)
sp <- tail(unlist(strsplit(as.character(tax_n[i,2]), split=";", fixed=TRUE)),1)
# storing the original query
queries[i,"original"] <- sp
### if uncultured or unknown as last rank, get the one before
n=1
if (length(grep(";", as.character(tax_n[i,2]))) > 1 & length(grep("uncultu", sp, ignore.case = T)) > 0 | length(grep("unknown", sp, ignore.case = T)) > 0)
{
while (length(grep("uncultu", sp, ignore.case = T)) > 0 | length(grep("unknown", sp, ignore.case = T)) > 0)
{
n = n+1
sp <- tail(strsplit(as.character(tax_n[i,2]), ";")[[1]], n)[1]
}
}
# if we got a species or sp. as assignement, replace "+" by " " and other cleaning taxa name
sp <- gsub("+", " ", sp, fixed=TRUE)
sp <- gsub("_", " ", sp, fixed=TRUE)
sp <- gsub(" Cmplx.", "", sp, fixed=TRUE)
sp <- gsub(" environmental sample", "", sp, fixed=TRUE)
sp <- gsub(" indet.", "", sp, fixed=TRUE)
sp <- gsub(" indet", "", sp, fixed=TRUE)
## check if "sp" instead of "sp."
if (length(strsplit(sp, split=" sp", fixed=TRUE)[[1]]) == 1) sp <- strsplit(sp, split=" sp", fixed=TRUE)[[1]][1]
# check if something after sp.
if (length(strsplit(sp, split="sp.", fixed=TRUE)[[1]]) > 1)
{
sp <- strsplit(sp, split=" sp.", fixed=TRUE)[[1]][1]
} else if (length(strsplit(sp, split=" cf.", fixed=TRUE)[[1]]) > 1)
{
sp <- strsplit(sp, split=" cf.", fixed=TRUE)[[1]][1]
} else if (length(strsplit(sp, split="(", fixed=TRUE)[[1]]) > 1)
{
sp <- strsplit(sp, split=" (", fixed=TRUE)[[1]][1]
}
# storing the cleaned query
queries[i,"cleaned"] <- sp
# check if there a value in reference eco values
y <- grep(sp, eco_index[,"species"], ignore.case = TRUE)
message(paste("Processing : ", sp, " - ", length(y), " match in database so far", sep=""))
if(log) cat(paste("Processing : ", sp, " - ", length(y), " match in database so far", sep=""), file=log_file, fill=T, append=T)
# if the "exact" species is not matching and there a value for genus level
if (length(y) == 0)
{
sp <- strsplit(sp, split=" ", fixed=TRUE)[[1]][1]
message(paste(" No match, trying exact match of the genus", sp))
if(log) cat(paste(" No match, trying exact match of the genus", sp), file=log_file, fill=T, append=T)
#sp <- paste(sp, "sp.", sep=" ")
# and then exact match
y <- grep(paste("^", sp, " sp.", "$", sep=""), eco_index[,"species"], ignore.case = TRUE)
# if still not, there is no "genus sp." in the table, grab the species of the genus and median value
if (length(y) == 0)
{
message(paste(" No exact match, trying to match other species of the genus", sp))
if(log) cat(paste(" No exact match, trying to match other species of the genus", sp), file=log_file, fill=T, append=T)
y <- grep(paste(sp, " ",sep=""), eco_index[,"species"], ignore.case = F)
if (length(y) >= 1)
{
message(paste(" Found ", length(y), " match for ", sp, ". Taking the median values of ", length(y), " multiple values", sep = ""))
if(log) cat(paste(" Found ", length(y), " match for ", sp, ". Taking the median values of ", length(y), " multiple values", sep = ""), file=log_file, fill=T, append=T)
ambi <- eco_index[y,"AMBI"]
iti <- eco_index[y,"Iti_group"]
isi <- eco_index[y,"ISI.2012"]
nsi <- eco_index[y,"NSI.value"]
nsi_g <- eco_index[y,"NSI.group"]
ben <- eco_index[y,"bentix"]
out[i,"AMBI"] <- ceiling(median(as.numeric(as.vector(ambi)))) # round to the ceiling value if median is not integer
out[i,"ITI_GROUP"] <- median(as.numeric(as.vector(iti)))
out[i,"ISI_value"] <- median(as.numeric(as.vector(isi)))
out[i,"NSI"] <- median(as.numeric(as.vector(nsi)))
out[i,"NSI.group"] <- ceiling(median(as.vector(nsi_g)))
out[i,"Bentix"] <- ceiling(median(as.numeric(as.vector(ben))))
message(paste(" Done - ", sp, " AMBI value ", out[i,"AMBI"], sep=""))
if(log) cat(paste(" Done - ", sp, " AMBI value ", out[i,"AMBI"], sep=""), file=log_file, fill=T, append=T)
cpt_found <- cpt_found + 1
found_index <- c(found_index, 1)
} else {
message(" Not found.")
if(log) cat(" Not found.", file=log_file, fill=T, append=T)
cpt_not <- cpt_not + 1
found_index <- c(found_index, 0)
}
} else if (length(y) >= 1)
{
message(paste(" Found ", length(y), " match for ", sp, ". Taking the median values of ", length(y), " multiple values", sep = ""))
if(log) cat(paste(" Found ", length(y), " match for ", sp, ". Taking the median values of ", length(y), " multiple values", sep = ""), file=log_file, fill=T, append=T)
ambi <- eco_index[y,"AMBI"]
iti <- eco_index[y,"Iti_group"]
isi <- eco_index[y,"ISI.2012"]
nsi <- eco_index[y,"NSI.value"]
nsi_g <- eco_index[y,"NSI.group"]
ben <- eco_index[y,"bentix"]
out[i,"AMBI"] <- ceiling(median(as.numeric(as.vector(ambi)))) # round to the ceiling value if median is not integer
out[i,"ITI_GROUP"] <- median(as.numeric(as.vector(iti)))
out[i,"ISI_value"] <- median(as.numeric(as.vector(isi)))
out[i,"NSI"] <- median(as.numeric(as.vector(nsi)))
out[i,"NSI.group"] <- ceiling(median(as.vector(nsi_g)))
out[i,"Bentix"] <- ceiling(median(as.numeric(as.vector(ben))))
message(paste(" Done - ", sp, " AMBI value ", out[i,"AMBI"], sep=""))
if(log) cat(paste(" Done - ", sp, " AMBI value ", out[i,"AMBI"], sep=""), file=log_file, fill=T, append=T)
cpt_found <- cpt_found + 1
found_index <- c(found_index, 1)
}
}
# if there is at least a match
else if (length(y) > 0)
{
#message(sp)
tmp <- eco_index[y,]
if (length(y) > 1)
{
message(paste(" ", sp, " matched ", length(y), " entries in database. Trying exact match", sep=""))
if(log) cat(paste(" ", sp, " matched ", length(y), " entries in database. Trying exact match", sep=""), file=log_file, fill=T, append=T)
# if more than one match, try exact match
y <- grep(paste("^", sp, "$", sep=""), eco_index[,"species"], ignore.case = TRUE)
# if no perfect match, try adding a sp.
if (length(y) == 0)
{
sp <- paste(sp, "sp.", sep=" ")
message(paste(" No exact match. Trying exact match with ", sp, sep=""))
if(log) cat(paste(" No exact match. Trying exact match with ", sp, sep=""), file=log_file, fill=T, append=T)
y <- grep(paste("^", sp, "$", sep=""), eco_index[,"species"], ignore.case = TRUE)
}
message(paste(" Found ", length(y), " match for ", sp, sep=""))
if(log) cat(paste(" Found ", length(y), " match for ", sp, sep=""), file=log_file, fill=T, append=T)
if (length(y) == 1)
{
ambi <- eco_index[y,"AMBI"]
iti <- eco_index[y,"Iti_group"]
isi <- eco_index[y,"ISI.2012"]
nsi <- eco_index[y,"NSI.value"]
nsi_g <- eco_index[y,"NSI.group"]
ben <- eco_index[y,"bentix"]
out[i,"AMBI"] <- as.numeric(as.vector(ambi))
out[i,"ITI_GROUP"] <- as.numeric(as.vector(iti))
out[i,"ISI_value"] <- as.numeric(as.vector(isi))
out[i,"NSI"] <- as.numeric(as.vector(nsi))
out[i,"NSI.group"] <- as.vector(nsi_g)
out[i,"Bentix"] <- as.numeric(as.vector(ben))
message(paste(" Done - ", sp, " AMBI value ", eco_index[y,"AMBI"], sep=""))
if(log) cat(paste(" Done - ", sp, " AMBI value ", eco_index[y,"AMBI"], sep=""), file=log_file, fill=T, append=T)
cpt_found <- cpt_found + 1
found_index <- c(found_index, 1)
} else {
message(paste(" Taking median of the ", length(y), " match for ", sp, sep=""))
if(log) cat(paste(" Taking median of the ", length(y), " match for ", sp, sep=""), file=log_file, fill=T, append=T)
ambi <- eco_index[y,"AMBI"]
iti <- eco_index[y,"Iti_group"]
isi <- eco_index[y,"ISI.2012"]
nsi <- eco_index[y,"NSI.value"]
nsi_g <- eco_index[y,"NSI.group"]
ben <- eco_index[y,"bentix"]
out[i,"AMBI"] <- ceiling(median(as.numeric(as.vector(ambi)))) # ceiling value if not integer
out[i,"ITI_GROUP"] <- median(as.numeric(as.vector(iti)))
out[i,"ISI_value"] <- median(as.numeric(as.vector(isi)))
out[i,"NSI"] <- median(as.numeric(as.vector(nsi)))
out[i,"NSI.group"] <- ceiling(median(as.vector(nsi_g)))
out[i,"Bentix"] <- ceiling(median(as.numeric(as.vector(ben))))
message(paste(" Done - ", sp, " AMBI value ", out[i,"AMBI"], sep=""))
if(log) cat(paste(" Done - ", sp, " AMBI value ", out[i,"AMBI"], sep=""), file=log_file, fill=T, append=T)
cpt_found <- cpt_found + 1
found_index <- c(found_index, 1)
}
# if it is a genus only (deeper assignments might or not have a value)
if (length(unlist(strsplit(sp, split=" "))) == 1 & is.na(out[i,"AMBI"]) == T)
{
message(paste(" ", sp, " is is a genus query. Making the query : ", sp, " sp.", sep=""))
if(log) cat(paste(" ", sp, " is is a genus query. Making the query : ", sp, " sp.", sep=""), file=log_file, fill=T, append=T)
# make the query "query sp."
sp <- paste(sp,"sp.", sep=" ")
y <- grep(sp, eco_index[,"species"], fixed=TRUE)
# if there was not only species of the genus (no value for the genus itself)
if (length(y) > 0)
{
message(paste(" Found - ", length(y), " for ", sp, sep=""))
if(log) cat(paste(" Found - ", length(y), " for ", sp, sep=""), file=log_file, fill=T, append=T)
tmp <- eco_index[y,]
ambi <- eco_index[y,"AMBI"]
iti <- eco_index[y,"Iti_group"]
isi <- eco_index[y,"ISI.2012"]
nsi <- eco_index[y,"NSI.value"]
nsi_g <- eco_index[y,"NSI.group"]
ben <- eco_index[y,"bentix"]
out[i,"AMBI"] <- as.numeric(as.vector(ambi))
out[i,"ITI_GROUP"] <- as.numeric(as.vector(iti))
out[i,"ISI_value"] <- as.numeric(as.vector(isi))
out[i,"NSI"] <- as.numeric(as.vector(nsi))
out[i,"NSI.group"] <- as.vector(nsi_g)
out[i,"Bentix"] <- as.numeric(as.vector(ben))
message(paste(" Done - ", sp, sep=""))
if(log) cat(paste(" Done - ", sp, sep=""), file=log_file, fill=T, append=T)
cpt_found <- cpt_found + 1
found_index <- c(found_index, 1)
}
}
} else {
message(paste(" Found ", length(y), " matche for ", sp, sep=""))
if(log) cat(paste(" Found ", length(y), " matche for ", sp, sep=""), file=log_file, fill=T, append=T)
ambi <- eco_index[y,"AMBI"]
iti <- eco_index[y,"Iti_group"]
isi <- eco_index[y,"ISI.2012"]
nsi <- eco_index[y,"NSI.value"]
nsi_g <- eco_index[y,"NSI.group"]
ben <- eco_index[y,"bentix"]
out[i,"AMBI"] <- as.numeric(as.vector(ambi))
out[i,"ITI_GROUP"] <- as.numeric(as.vector(iti))
out[i,"ISI_value"] <- as.numeric(as.vector(isi))
out[i,"NSI"] <- as.numeric(as.vector(nsi))
out[i,"NSI.group"] <- as.vector(nsi_g)
out[i,"Bentix"] <- as.numeric(as.vector(ben))
message(paste(" Done - ", sp, " AMBI: ", eco_index[y,"AMBI"], sep=""))
if(log) cat(paste(" Done - ", sp, " AMBI: ", eco_index[y,"AMBI"], sep=""), file=log_file, fill=T, append=T)
cpt_found <- cpt_found + 1
found_index <- c(found_index, 1)
}
}
out[i,"query"] <- sp
}
# message the results of matching search
message(paste("==== Found match :", cpt_found, " Not found :", cpt_not, "===="))
if(log) cat(paste("==== Found match :", cpt_found, " Not found :", cpt_not, "===="), file=log_file, fill=T, append=T)
# bind all taxa, eco-weights, and composition data
output <- cbind(tax_n, out, dat)
# ugly trick to deal if whether or not sequencong data
otu_id_list <- cbind(tax_n, otu_id)
## compute shannon (base 2) on taxa that got at least one value (not the best, but it is computed like that in Norway)
tmp_sha <- t(dat[,1:dim(dat)[2]])
tmp_sha[is.na(tmp_sha)] <- 0
output_shannon <- vegan::diversity(tmp_sha, index="shannon", base = 2)
# to make the rownames having unique names when same species is identified...
dimnames(output)[[1]] <- make.unique(tax_n[,1])
# to keep only the rownames (unique taxa), eco-weight, composition data
output <- output[,4:dim(output)[2]]
# subset to keep only taxa for which at least one match in on of the BI
output <- subset(output, found_index==1)
otu_id_list <- subset(otu_id_list, found_index==1)
############################################################
### now compute all indices.
############################################################
data_ <- output
indices <- as.data.frame(array(NA, c(7,dim(data_)[2]-6)))
dimnames(indices)[[1]] <- c("AMBI", "ISI", "NSI", "NQI1", "Shannon", "ITI", "Bentix")
dimnames(indices)[[2]] <- dimnames(data_)[[2]][7:dim(data_)[2]]
indices["Shannon",] <- output_shannon
# for each SAMPLE
for (i in dimnames(indices)[[2]])
{
# AMBI
# keep only OTU with abundances
ambi <- subset(data_, data_[,i] !=0)
ambi <- subset(ambi, is.na(ambi[,i]) == FALSE ) # is the same as above... added the one below (we want only to compute with species with an index value
ambi <- subset(ambi, is.na(ambi$AMBI) == FALSE )
# normalize into %
ambi[,i] <- as.numeric(ambi[,i])*100/sum(as.numeric(ambi[,i]))
# creating the values and 0 by default
val2 <- val3 <- val4 <- val5 <- 0
gp2 <- gp3 <- gp4 <- gp5 <- 0
# for each OTU in the sample with abundances
for (j in dimnames(ambi)[[1]])
{
# if there is a value for AMBI
if (is.na(ambi[j,"AMBI"]) == FALSE)
{
# for AMBI value 2 (weights starts at 2)
if (ambi[j,"AMBI"] == 2)
{
val2 <- 1.5 * ambi[j,i]
gp2 <- gp2 + val2
} else { val2 <- 0}
if (ambi[j,"AMBI"] == 3)
{
val3 <- 3 * ambi[j,i]
gp3 <- gp3 + val3
} else { val3 <- 0}
if (ambi[j,"AMBI"] == 4)
{
val4 <- 4.5 * ambi[j,i]
gp4 <- gp4 + val4
} else { val4 <- 0}
if (ambi[j,"AMBI"] == 5)
{
val5 <- 6 * ambi[j,i]
gp5 <- gp5 + val5
} else { val5 <- 0}
}
}
ambi_value <- (gp2 + gp3 + gp4 + gp5) /100
# NSI
nsi <- subset(data_, data_[,i] !=0)
nsi <- subset(nsi, nsi$NSI > 0)
nsi <- sum(as.numeric(nsi[,i]) * nsi[,"NSI"])/sum(as.numeric(nsi[,i]))
# ISI 2012
isi <- subset(data_, data_[,i] !=0)
isi <- subset(isi, isi$ISI_value > 0)
isi <- sum(isi[,"ISI_value"])/dim(isi)[1]
# ITI index
iti <- subset(data_, data_[,i] !=0)
iti <- subset(iti, iti$ITI_GROUP > 0)
n1 <- subset(iti, iti$ITI_GROUP ==1)
n2 <- subset(iti, iti$ITI_GROUP ==2)
n3 <- subset(iti, iti$ITI_GROUP ==3)
n4 <- subset(iti, iti$ITI_GROUP ==4)
iti <- 100-100/3*(sum(as.numeric(n2[,i])) + 2*sum(as.numeric(n3[,i])) + 3*sum(as.numeric(n4[,i])))/(sum(as.numeric(n1[,i])) +sum(as.numeric(n2[,i]))+sum(as.numeric(n3[,i]))+sum(as.numeric(n4[,i])))
## NQI1
ambi_raw <- subset(data_, data_[,i] !=0)
nqi1 <- 0.5*(1-ambi_value/7)+0.5*((log(dim(ambi_raw)[1])/log(log(sum(as.numeric(ambi_raw[,i])))))/2.7)*sum(as.numeric(ambi_raw[,i]))/(sum(as.numeric(ambi_raw[,i]))+5)
### BENTIX
# keep only OTU with abundances
bentix <- subset(data_, data_[,i] !=0)
bentix <- subset(bentix, is.na(bentix[,i]) == FALSE ) # is the same as above... added the one below (we want only to compute with species with an index value
bentix <- subset(bentix, is.na(bentix$Bentix) == FALSE )
# normalize into %
bentix[,i] <- as.numeric(bentix[,i])*100/sum(as.numeric(bentix[,i]))
# creating the values and 0 by default
val1 <- val2 <- val3 <- 0
gp1 <- gp2 <- gp3 <- 0
# for each OTU in the sample with abundances
for (j in dimnames(bentix)[[1]])
{
# if there is a value for AMBI
if (is.na(bentix[j,"AMBI"]) == FALSE)
{
if (bentix[j,"Bentix"] == 1)
{
val1 <- 6 * bentix[j,i]
gp1 <- gp1 + val1
} else { val2 <- 0}
if (bentix[j,"Bentix"] == 2)
{
val2 <- 2 * bentix[j,i]
gp2 <- gp2 + val2
} else { val3 <- 0}
if (bentix[j,"Bentix"] == 3)
{
val3 <- 2 * bentix[j,i]
gp3 <- gp3 + val3
} else { val3 <- 0}
}
}
bentix_value <- (gp1 + gp2 + gp3) /100
# then paste values in the indices table for output
indices["AMBI", i] <- ambi_value
indices["NSI", i] <- nsi
indices["ISI", i] <- isi
indices["ITI", i] <- iti
indices["NQI1", i] <- nqi1
indices["Bentix", i] <- bentix_value
}
## now we can return the dicrete assessment for each BBI
tmp <- t(indices)[,c("AMBI", "ISI", "NSI", "NQI1", "Shannon")]
# preparing the class array
ind_class <- tmp
for (i in 1:nrow(ind_class))
{
for (j in colnames(ind_class))
{
if (j == "AMBI") ind_class[i,j] <- e$status.ambi(tmp[i,j])
if (j == "ISI") ind_class[i,j] <- e$status.isi(tmp[i,j])
if (j == "NSI") ind_class[i,j] <- e$status.nsi(tmp[i,j])
if (j == "NQI1") ind_class[i,j] <- e$status.nqi1(tmp[i,j])
if (j == "Shannon") ind_class[i,j] <- e$status.shannon(tmp[i,j])
}
}
# preparing the output
output <- list("found" = c("Found match:", cpt_found, " Not found:", cpt_not),
"queries" = queries,
"BBI" = t(indices),
"BBIclass" = ind_class,
"table" = output,
"taxa" = otu_id_list)
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/BBI/R/BBI.R |
#' metabarcoding data example
#'
#' An example of composition data, i.e. the so-called OTU table, obtained from eDNA metabarcoding
#'
#' @docType data
#'
#' @usage data(metab)
#'
#' @keywords datasets
#'
#' @examples
#' data(metab)
#' BI <- BBI(metab, log=T)
"metab"
| /scratch/gouwar.j/cran-all/cranData/BBI/R/metab.R |
#' morphological data example
#'
#' An example of composition data obtained from morphological inventories
#'
#' @docType data
#'
#' @usage data(morpho)
#'
#' @keywords datasets
#'
#' @examples
#' data(morpho)
#' BI <- BBI(morpho, log=T)
"morpho"
| /scratch/gouwar.j/cran-all/cranData/BBI/R/morpho.R |
#' nEQR function
#' @param data A data frame containing samples as row and BBI values as column.
#' @example nEQR(my_BBI$BBI)
#' @author Tristan Cordier
#' @export
#' nEQR()
#### nEQR calculation from BBI values
## the boundaries between classes are the normalized one from each of the indices
## create a "e" environment for storing the functions
e <-new.env()
## just a normalizing function used by nEQR
e$linMap <- function(x, from, to) {
# Shifting the vector so that min(x) == 0
x <- x - min(x)
# Scaling to the range of [0, 1]
x <- x / max(x)
# Scaling to the needed amplitude
x <- x * (to - from)
# Shifting to the needed level
x + from
}
## EQR functions for each BBI
e$eqr.ambi <- function(z){
t <- e$linMap(c(0,z,6),0,1)
return(t[2:(length(t)-1)])
}
e$eqr.nsi <- function(z){
t <- e$linMap(c(0,z,31),0,1)
return(t[2:(length(t)-1)])
}
e$eqr.isi <- function(z){
t <- e$linMap(c(0,z,13),0,1)
return(t[2:(length(t)-1)])
}
e$eqr.nqi1 <- function(z){
t <- e$linMap(c(0,z,0.9),0,1)
return(t[2:(length(t)-1)])
}
e$eqr.shannon <- function(z){
t <- e$linMap(c(0,z,5.7),0,1)
return(t[2:(length(t)-1)])
}
## normalized EQR functions for each BBI
e$nEQR.ambi <- function(z){
out <- c(1:length(z))
for (i in 1:length(z))
{
if (z[i] < 0.2) out[i] <- (z[i]-0)*(0.2-0)/(0.2-0) + 0
else if (z[i] >= 0.2 && z[i] < 0.55) out[i] <- (z[i]-0.2)*(0.4-0.2)/(0.55-0.2)+0.2
else if (z[i] >= 0.55 && z[i] < 0.7166667) out[i] <- (z[i]-0.55)*(0.6-0.4)/(0.7166667-0.55)+0.4
else if (z[i] >= 0.7166667 && z[i] < 0.9166667) out[i] <- (z[i]-0.7166667)*(0.8-0.6)/(0.9166667-0.7166667)+0.6
else if (z[i] >= 0.9166667 && z[i] <= 1) out[i] <- (z[i]-0.9166667)*(1-0.8)/(1-0.9166667)+0.8
}
return(1-out)
}
e$nEQR.nsi <- function(z){
out <- c(1:length(z))
for (i in 1:length(z))
{
if (z[i] < 0.3225806) out[i] <- (z[i]-0)*(0.2-0)/(0.2-0) + 0
else if (z[i] >= 0.3225806 && z[i] < 0.4838710) out[i] <- (z[i]-0.3225806)*(0.4-0.2)/(0.4838710-0.3225806)+0.2
else if (z[i] >= 0.4838710 && z[i] < 0.6451613) out[i] <- (z[i]-0.4838710)*(0.6-0.4)/(0.6451613-0.4838710)+0.4
else if (z[i] >= 0.6451613 && z[i] < 0.8064516) out[i] <- (z[i]-0.6451613)*(0.8-0.6)/(0.8064516-0.6451613)+0.6
else if (z[i] >= 0.8064516 && z[i] <= 1) out[i] <- (z[i]-0.8064516)*(1-0.8)/(1-0.8064516)+0.8
}
return(out)
}
e$nEQR.nqi1 <- function(z){
out <- c(1:length(z))
for (i in 1:length(z))
{
if (z[i] < 0.3444444) out[i] <- (z[i]-0)*(0.2-0)/(0.2-0) + 0
else if (z[i] >= 0.3444444 && z[i] < 0.5444444) out[i] <- (z[i]-0.3444444)*(0.4-0.2)/(0.5444444-0.3444444)+0.2
else if (z[i] >= 0.5444444 && z[i] < 0.7) out[i] <- (z[i]-0.5444444)*(0.6-0.4)/(0.7-0.5444444)+0.4
else if (z[i] >= 0.7 && z[i] < 0.9111111) out[i] <- (z[i]-0.7)*(0.8-0.6)/(0.9111111-0.7)+0.6
else if (z[i] >= 0.9111111 && z[i] <= 1) out[i] <- (z[i]-0.9111111)*(1-0.8)/(1-0.9111111)+0.8
}
return(out)
}
e$nEQR.isi <- function(z){
out <- c(1:length(z))
for (i in 1:length(z))
{
if (z[i] < 0.3461538) out[i] <- (z[i]-0)*(0.2-0)/(0.2-0) + 0
else if (z[i] >= 0.3461538 && z[i] < 0.4692308) out[i] <- (z[i]-0.3461538)*(0.4-0.2)/(0.4692308-0.3461538)+0.2
else if (z[i] >= 0.4692308 && z[i] < 0.5769231) out[i] <- (z[i]-0.4692308)*(0.6-0.4)/(0.5769231-0.4692308)+0.4
else if (z[i] >= 0.5769231 && z[i] < 0.7384615) out[i] <- (z[i]-0.5769231)*(0.8-0.6)/(0.7384615-0.5769231)+0.6
else if (z[i] >= 0.7384615 && z[i] <= 1) out[i] <- (z[i]-0.7384615)*(1-0.8)/(1-0.7384615)+0.8
}
return(out)
}
e$nEQR.shannon <- function(z){
out <- c(1:length(z))
for (i in 1:length(z))
{
if (z[i] < 0.1578947) out[i] <- (z[i]-0)*(0.2-0)/(0.2-0) + 0
else if (z[i] >= 0.1578947 && z[i] < 0.3333333) out[i] <- (z[i]-0.1578947)*(0.4-0.2)/(0.3333333-0.1578947)+0.2
else if (z[i] >= 0.3333333 && z[i] < 0.5263158) out[i] <- (z[i]-0.3333333)*(0.6-0.4)/(0.5263158-0.3333333)+0.4
else if (z[i] >= 0.5263158 && z[i] < 0.8421053) out[i] <- (z[i]-0.5263158)*(0.8-0.6)/(0.8421053-0.5263158)+0.6
else if (z[i] >= 0.8421053 && z[i] <= 1) out[i] <- (z[i]-0.8421053)*(1-0.8)/(1-0.8421053)+0.8
}
return(out)
}
## and now the main function to compute nEQR across several BBI
nEQR <- function (data) {
## if bentix indices is given (it is not included in the nEQR assessment..)
message("Bentix and ITI are being removed (if any), because not included in the nEQR assessment regulations")
BI_val <- data[,!(colnames(data)) %in% c("Bentix", "ITI")]
## now compute nEQR
# preparing the output
out_n <- cbind(BI_val, EQR = rep(0, nrow(BI_val)))
for (j in colnames(BI_val))
{
if (j == "AMBI") out_n[,j] <- e$nEQR.ambi(e$eqr.ambi(BI_val[,j]))
if (j == "ISI") out_n[,j] <- e$nEQR.isi(e$eqr.isi(BI_val[,j]))
if (j == "NSI") out_n[,j] <- e$nEQR.nsi(e$eqr.nsi(BI_val[,j]))
if (j == "NQI1") out_n[,j] <- e$nEQR.nqi1(e$eqr.nqi1(BI_val[,j]))
if (j == "Shannon") out_n[,j] <- e$nEQR.shannon(e$eqr.shannon(BI_val[,j]))
}
# renaming the columns
colnames(out_n) <- paste0("n", colnames(out_n))
## then average over the row for nEQR
out_n[,"nEQR"] <- rowMeans(out_n[,!(colnames(out_n)) %in% c("nEQR")])
## now we can return the dicrete assessment for nEQR
neqr_class <- out_n[,"nEQR"]
# getting the status
for (i in 1:length(neqr_class)) neqr_class[i] <- e$status.nEQR(out_n[i,"nEQR"])
## preparing the output
output <- list("nEQR" = out_n,
"nEQRclass" = cbind(nEQR = as.numeric(out_n[,"nEQR"]), nEQR_class = neqr_class))
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/BBI/R/nEQR.R |
# function for converting the continuous values into discrete assessement
e$status.ambi <- function(z){
if(is.na(z)) return("NA")
if (z < 1.2) return("very good")
else if (z >= 1.2 && z < 3.3) return("good")
else if (z >= 3.3 && z < 4.3) return("moderate")
else if (z >= 4.3 && z < 5.5) return("bad")
else if (z >= 5.5) return("very bad")
}
e$status.nsi <- function(z){
if(is.na(z)) return("NA")
if (z < 10) return("very bad")
else if (z >= 10 && z < 15) return("bad")
else if (z >= 15 && z < 20) return("moderate")
else if (z >= 20 && z < 25) return("good")
else if (z >= 25) return("very good")
}
e$status.nqi1 <- function(z){
if(is.na(z)) return("NA")
if (z < 0.31) return("very bad")
else if (z >= 0.31 && z < 0.49) return("bad")
else if (z >= 0.49 && z < 0.63) return("moderate")
else if (z >= 0.63 && z < 0.82) return("good")
else if (z >= 0.82) return("very good")
}
e$status.isi <- function(z){
if(is.na(z)) return("NA")
if (z < 4.5) return("very bad")
else if (z >= 4.5 && z < 6.1) return("bad")
else if (z >= 6.1 && z < 7.5) return("moderate")
else if (z >= 7.5 && z < 9.6) return("good")
else if (z >= 9.6) return("very good")
}
e$status.shannon <- function(z){
if(is.na(z)) return("NA")
if (z < 0.9) return("very bad")
else if (z >= 0.9 && z < 1.9) return("bad")
else if (z >= 1.9 && z < 3) return("moderate")
else if (z >= 3 && z < 4.8) return("good")
else if (z >= 4.8) return("very good")
}
e$status.nEQR <- function(z){
if(is.na(z)) return("NA")
if (z < 0.2) return("very bad")
else if (z >= 0.2 && z < 0.4) return("bad")
else if (z >= 0.4 && z < 0.6) return("moderate")
else if (z >= 0.6 && z < 0.8) return("good")
else if (z >= 0.8) return("very good")
}
| /scratch/gouwar.j/cran-all/cranData/BBI/R/status.R |
#' @export
#' @title BB-SSL
#' @name BB_SSL
#' @description This function runs BB-SSL, WBB with fixed prior weight, and WBB with random prior weight.
#' It solves the optimization by calling function SSLASSO_2, a variant of the function SSLASSO in CRAN package 'SSLASSO': in the version used,
#' we do NOT standardize the design matrix and allow inputting initial values of beta's.
#'
#' @usage BB_SSL(y, X, method = 3, lambda, NSample, a, b, maxiter=500, eps = 1e-3, burn.in = FALSE,
#' length.out = 50, discard = FALSE, alpha = 3, sigma = 1, initial.beta, penalty = c("adaptive","separable"),
#' theta=0.5)
#'
#' @param y A vector of continuous responses (n x 1).
#' @param X The design matrix (n x p), without an intercept.
#' @param method A number between c(1,2,3) to specify which method to run, method = 1 is fixed WBB, method = 2 is random WBB, method = 3 is BB-SSL.
#' @param lambda A two-dim vector = c(lambda0, lambda1).
#' @param NSample An integer which specifies the number of samples to be generated.
#' @param a,b Parameters of the prior.
#' @param maxiter An integer which specifies the maximum number of iterations for SSLASSO_2 (default maxiter= 500).
#' @param eps Convergence criterion when running SSLASSO_2: converged when difference in regression coefficients is less than eps (default eps = 0.001).
#' @param burn.in A boolean to specify whether to use annealing on a sequence of lambda0's (default burn.in = FALSE).
#' @param length.out An integer to specify the length of sequence of lambda0's used in annealing. This value is not used when burn.in = FALSE. Default is 50.
#' @param discard A boolean to specify whether to discard unconverged sample points.
#' @param alpha The parameter for generating weights in BB-SSL, which follows n x Dirichlet(alpha,...,alpha). Default is 3.
#' @param sigma Noise standard deviation.
#' @param initial.beta A vector of initial values of beta to used when solving SSLASSO_2 (n x 1).
#' @param penalty The penalty (prior) to be applied to the model. Either "separable" (with a fixed theta) or "adaptive" (with a random theta, where theta ~ B(a,p)). The default is "adaptive".
#' @param theta Prior mixing proportion. For "separable" penalty, this value is fixed. For "adaptive" penalty, this value is used as a starting value. Default is 0.5.
#'
#' @return A list of matrices, including matrix beta and matrix gamma (n x p).
#'
#' @author Lizhen Nie <[email protected]>, Veronika Rockova <[email protected]>
#' @references Nie, L., & Ročková, V. (2020). Bayesian Bootstrap Spike-and-Slab LASSO. arXiv:2011.14279.
#'
#' @examples
#' # -------------- Generate Data --------------
#' n = 100; p = 1000;
#' truth.beta = c(2, 3, -3, 4); # high-dimensional case
#' truth.sigma = 1
#' data = Generate_data(truth.beta, p, n, truth.sigma = 1, rho = 0.6,"block",100)
#' y = data$y; X = data$X; beta = data$beta
#'
#' # --------------- set parameters -----------------
#' lambda0 = 50; lambda1 = 0.05; lambda = c(lambda0, lambda1)
#' a = 1; b = p #beta prior for theta
#'
#' # Use SSLASSO_2 solution to get an initial value of beta's
#' result = SSLASSO_2(X, y, penalty = "adaptive", variance = "fixed", sigma = truth.sigma,
#' lambda1 = lambda1, lambda0 = seq(lambda1, lambda0, length.out = 50), a = a, b = b,
#' max.iter = 500, initial.beta = rep(0,p))
#'
#' #--------------- WBB with fixed prior --------------
#' fixed.WBB.result = BB_SSL(y, X, method = 1, lambda = c(lambda0, lambda1), NSample = 1000, a, b,
#' maxiter = 500, length.out = 50, burn.in = F, discard = T,initial.beta = result$beta[,50])
#'
#' #--------------- WBB with random prior--------------
#' random.WBB.result = BB_SSL(y, X, method = 2, lambda = c(lambda0, lambda1), NSample = 1000, a, b,
#' maxiter = 500, length.out = 50, burn.in = F, discard = T,initial.beta = result$beta[,50])
#'
#' #--------------- BB-SSL -------------
#' BB.SSL.result = BB_SSL(y, X, method = 3, lambda = c(lambda0, lambda1), NSample = 100, a, b,
#' maxiter = 500, length.out = 50, burn.in = F, discard = T, alpha=1,initial.beta = result$beta[,50])
#'
BB_SSL = function(y, X, method = 3, lambda, NSample, a, b, maxiter=500, eps = 1e-3, burn.in = FALSE, length.out = 50, discard = FALSE, alpha = 3,
sigma = 1, initial.beta, penalty = "adaptive", theta=0.5){
n = nrow(X)
p = ncol(X)
beta = matrix(NA, nrow = NSample, ncol = p)
gamma = matrix(0, nrow = NSample, ncol = p)
lambda1 = lambda[1]; lambda0 = lambda[2]
i = 1
while (i <= NSample){
start_time <- Sys.time()
# WBB with fixed prior weight
if (method==1){
mu = rep(0, p)
w = rexp(n)
}
# WBB with random prior weight
if (method==2){
wp = rexp(1)
mu = rep(0,p)
w = rexp(n)
w = w / wp
}
# BB-SSL
if (method==3){
mu = rmutil::rlaplace(p, m=0,s=1/lambda[1])
w = rgamma(n, shape = alpha, rate = 1)
adj = sum(w)
w = w * (n/adj)
}
sqrt.w = w^(1/2)
weighted.X = matrix(sqrt.w, nrow = n, ncol = p) * X # O(np)
weighted.y = sqrt.w * y
weighted.y = weighted.y - weighted.X %*% mu
if(burn.in == TRUE){
result = SSLASSO_2(weighted.X, weighted.y, penalty = penalty, variance = "fixed", sigma = sigma^2, theta = theta,
lambda1 = lambda[2], lambda0 = seq(lambda[2], lambda[1], length.out = length.out), a = a, b = b, max.iter = maxiter,
eps = eps, initial.beta = initial.beta)
#if doesn't converge, discard this result
if(result$iter[length.out]==maxiter & discard == TRUE){next}
}else{
result = SSLASSO_2(weighted.X, weighted.y, penalty = penalty, variance = "fixed", sigma = sigma^2, theta = theta,
lambda1 = lambda[2], lambda0 = lambda[1], a = a, b = b, max.iter = maxiter, eps = eps,
initial.beta = initial.beta)
if(result$iter==maxiter & discard == TRUE){next}
}
tmp = result$beta
mtmp = ncol(tmp)
beta[i,] = tmp[,mtmp]
beta[i,] = beta[i,] + mu
# threshold beta to get gamma
tmp = lambda1/lambda0 * exp(-(lambda1-lambda0)*abs(beta[i,]))
gamma[i,] = (tmp / (tmp + 1) > 0.5)
index = is.na(gamma[i,])
gamma[i,index] = 1
end_time <- Sys.time()
svMisc::progress(i, NSample-1, progress.bar = TRUE)
if (i == NSample) cat("Done!\n")
#if (i %% 100 == 0){
# print(i); print(end_time - start_time)
#}
# print(i); print(end_time - start_time)
i = i + 1
}
return(list(beta = beta, gamma = gamma))
}
| /scratch/gouwar.j/cran-all/cranData/BBSSL/R/BB_SSL.R |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.