content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' @export
parRF <- function(parVar = ".",
Kmax = NULL,
nmin = NULL,
ntree = 60,
mtry = NULL,
maxnodes = NULL
){
# return a function of response, training set fitted values, training set
# residual, training data and test data.
return(function(Rsp, predT, res,
Train.data, Validation.data){
# apply a build-in function to preselect partition covaraites
# by distance correlation
preFun = dcPre()
if (is.null(nmin)){
nmin <- ceiling(sqrt(nrow(Validation.data)))
}
# assign default vallues
if (is.null(Kmax)){
Kmax <- floor(nrow(Validation.data)/nmin)
}
if (is.null(maxnodes)){
maxnodes <- if(!identical(parVar, ".")){min(nrow(Train.data), ceiling(5 * length(parVar)) )}else{min(nrow(Train.data), ceiling(5 * ( ncol(Train.data) - 1)) )}
}
# training data with phat - y
datRf <- Train.data[, -which(names(Train.data) == Rsp)]
# calculate the Pearson residual
datRf$res <- res
# preselection result
preSelRes <- preFun(datRf = datRf, parVar = parVar)
parVarNew <- preSelRes$parVarNew
preSelected <- preSelRes$preSelected
if (is.null(mtry)){
mtry <- floor( length(parVarNew)/3)
}
# partition by random forest based on preselected variables
formula_rf <- stats :: as.formula(paste("res", " ~ ", paste(parVarNew, collapse = " + "), sep = ""))
resRf <- randomForest :: randomForest(formula_rf, data = datRf, ntree = ntree, maxnodes = maxnodes, mtry = mtry, importance=TRUE)
# obtain random forest residual prediction on the training set
trainsetPred <- stats :: predict(resRf, newdata = Train.data)
# sometimes prediction on the training set will result in all 1 or 0
# and grouping by the quantiles of the random forest will be problematic
# since there is only one unique value.
# if it happens, take all the observations into one group.
if (length( unique(round(trainsetPred, digits = 10)) ) == 1){
#the number of groups left
ngp <- 1
gup <- as.factor(rep(1, nrow(Validation.data)) )
}else{
# else, divide the groups by the quantiles.
# adjusted maximum number of groups, in case there are too few unique fitted values
Kmax_adj <- min(Kmax, length( unique(round(trainsetPred, digits = 10)) ) )
# from 1 to Kmax, calculate the chi-square value on the training set
chitrainVec <- numeric(Kmax_adj)
for (gt in c(1:Kmax_adj)){
# divide the fitted values on the training set by training set quantiles
gupt <- cut(round(trainsetPred, digits = 10) ,
breaks = stats::quantile(unique(round(trainsetPred, digits = 10)) , probs = seq(0, 1, 1/gt)), include.lowest = TRUE)
#########calculate the difference in each group on the training set
dift <- abs(stats :: xtabs( predT - Train.data[,Rsp] ~ gupt))
#calculate the denominator in each group
dent <- stats :: xtabs(predT * (1 - predT) ~ gupt)
#########calculate the test statistic
contrit <- (dift)^2/dent
chitrainVec[gt] <- sum(contrit)
# if one group in the test set has no observation, the contribution is NaN
# if that happens fill the value by the value from k-1. The previous
# k always be selected with high priority than this group
if(is.nan(sum(contrit))){
chitrainVec[gt] <- chitrainVec[(gt-1)]
}
}
# Select the number of groups
chitrainVec2 <- chitrainVec[-1]
chitrainVec3 <- chitrainVec[-length(chitrainVec)]
chitrainVec4 <- chitrainVec2 - chitrainVec3
Ksel <- which.max(chitrainVec4) + 1
# obtain random forest prediction on the test set
testsetPred <- stats :: predict(resRf, newdata = Validation.data)
# divide the prediction on the test set by training set quantiles
gup <- cut(testsetPred ,
breaks = c(Inf, -Inf, stats::quantile(unique(round(trainsetPred, digits = 10)) , probs = seq(0, 1, 1/Ksel)) ) , include.lowest = TRUE)
#drop the levels with 0 observations
gup <- droplevels(gup)
# if the number of observations in the smallest group is less than
# nmin, combine it with the second smallest group. Repeat this until
# all of the groups have size not less than nmin
freqTab <- table(gup)
while(min(freqTab) < nmin){
# combine the smallest level with the second smallest level
levels(gup)[which(levels(gup) == names(sort(freqTab))[1])] <- names(sort(freqTab))[2]
# update the frequency table
freqTab <- table(gup)
}
}
# store partition result
if (preSelected){
# store preselection importance
Pre_importance <- preSelRes$VI
# create a zero matrix
selImp <- matrix(0, nrow = nrow(Pre_importance), ncol = 2)
rownames(selImp) <- rownames(Pre_importance)
colnames(selImp) <- c("%IncMSE", "IncNodePurity")
# store the variable importance of selected variables
matchInd <- sapply(rownames(resRf$importance), function(x) which(rownames(Pre_importance) == x) )
selImp[matchInd, ] <- resRf$importance
parRes <- list(Var.imp = selImp,
preVar.imp = Pre_importance)
}else{
parRes <- list(Var.imp = resRf$importance)
}
return(list(gup = gup, parRes = parRes))
})
}
| /scratch/gouwar.j/cran-all/cranData/BAGofT/R/parRF.R |
#' @export
#######################################################
# 'testModel' function for binomial regression
#######################################################
testGlmBi <- function(formula, link){
testModel <- function(Train.data, Validation.data){
modT <- stats :: glm(formula , family = stats :: binomial(link = link), Train.data)
# prediction on the training set
predT <- stats :: predict(modT, newdata = Train.data
, type = "response", se.fit = TRUE)$fit
# prediction on the test set
predE <- stats :: predict(modT, newdata = Validation.data
, type = "response", se.fit = TRUE)$fit
# calculate the Pearson residual
res <- stats :: resid(modT, type = "pearson")
# obtain the response name
Rsp <- as.character(formula)[2]
return(list(predT = predT, predE = predE, res = res, Rsp = Rsp))
}
return(testModel)
}
| /scratch/gouwar.j/cran-all/cranData/BAGofT/R/testGlmBi.R |
#' @export
#######################################################
# 'testModel' function for penalized logistic regression
#######################################################
testGlmnet <- function(formula, alpha = 1){
# return a function of train data and test data
testModel <- function(Train.data, Validation.data){
# obtain the response name
Rsp <- as.character(formula)[2]
# regressor data
XmatT <- stats::model.matrix(formula, Train.data)[,-1]
XmatE <- stats::model.matrix(formula, Validation.data)[,-1]
# fit lasso regression
lasso_cvlamT <- glmnet :: cv.glmnet(XmatT, Train.data[, Rsp], family = "binomial", alpha = alpha)$lambda.min
lassoModT <- glmnet :: glmnet(XmatT, Train.data[, Rsp], family = "binomial", alpha = alpha, lambda = lasso_cvlamT)
#predict on the test set
predE <- stats :: predict(lassoModT, XmatE, type=c("response") )
#predict on the training set
predT <- stats :: predict(lassoModT, XmatT, type=c("response") )
# calculate the Pearson residual
res <- (Train.data[, Rsp] - predT)/sqrt(predT * (1 - predT ))
return(list(predT = predT, predE = predE, res = res, Rsp = Rsp))
}
return(testModel)
}
| /scratch/gouwar.j/cran-all/cranData/BAGofT/R/testGlmnet.R |
#' @export
#######################################################
# 'testModel' function for random forest
#######################################################
testRF <- function(formula, ntree = 500, mtry = NULL, maxnodes = NULL){
# return a function of train data and test data
testModel <- function(Train.data, Validation.data){
if (is.null(mtry)){
mtry <- max(floor( length( dim(stats::model.matrix(formula ,Train.data))[2]-1)/3),1)
}
# obtain the response name
Rsp <- as.character(formula)[2]
RspDat <- Train.data[,Rsp]
Train.data[,Rsp] <- as.factor(Train.data[,Rsp])
Validation.data[,Rsp] <- as.factor(Validation.data[,Rsp])
resRf <- randomForest :: randomForest(formula, data = Train.data, ntree = ntree, maxnodes = maxnodes, mtry = mtry, importance=FALSE)
# obtain random forest prediction on the training set
predT <- stats :: predict(resRf, newdata = Train.data, type = "prob")[,2]
# obtain random forest prediction on the test set
predE <- stats :: predict(resRf, newdata = Validation.data, type = "prob")[,2]
# calculate the Pearson residual
# res <- (RspDat - predT)/sqrt(predT * (1 - predT ))
res <- (RspDat - predT)
return(list(predT = predT, predE = predE, res = res, Rsp = Rsp))
}
return(testModel)
}
| /scratch/gouwar.j/cran-all/cranData/BAGofT/R/testRF.R |
#' @export
#######################################################
# 'testModel' function for penalized logistic regression
#######################################################
testXGboost <- function(formula, params = list(), nrounds = 25){
# return a function of train data and test data
testModel <- function(Train.data, Validation.data){
# obtain the response name
Rsp <- as.character(formula)[2]
# regressor data
XmatT <- stats::model.matrix(formula, Train.data)[,-1]
XmatE <- stats::model.matrix(formula, Validation.data)[,-1]
# fit xgboost
xgModT <- xgboost :: xgboost(data = XmatT, label = Train.data[, Rsp], params = params, nrounds = nrounds, objective = "binary:logistic", verbose = 0)
#predict on the test set
predE <- stats :: predict(xgModT, XmatE)
#predict on the training set
predT <- stats :: predict(xgModT, XmatT)
# calculate the Pearson residual
res <- (Train.data[, Rsp] - predT)/sqrt(predT * (1 - predT ))
return(list(predT = predT, predE = predE, res = res, Rsp = Rsp))
}
return(testModel)
}
| /scratch/gouwar.j/cran-all/cranData/BAGofT/R/testXGboost.R |
###########################################
################## BALLI ##################
###########################################
########## Made by Kyungtaek Park #########
######### Created on 26 Dec 2017 ##########
######## final edited on 24 Apr 2019 #######
###########################################
### Set Class ###
# Created by Kyungtaek Park on 26 Dec 2017
# Last modified 4 Apr 2019
#' Class TecVarList
#' Class \code{TecVarList} holds technical variance
#' @name TecVarList-class
#' @rdname TecVarList-class
#' @exportClass TecVarList
setClass("TecVarList", representation("list"))
#' Class Balli
#' Class \code{Balli} holds results from BALLI
#' @name Balli-class
#' @rdname Balli-class
#' @exportClass Balli
setClass("Balli", representation("list"))
#' Class LargeDataObject
#' Class \code{LargeDataObject} holds large data such as technical variance and results from BALLI fit
#' @name LargeDataObject-class
#' @rdname LargeDataObject-class
#' @exportClass LargeDataObject
setClass("LargeDataObject")
setIs("TecVarList","LargeDataObject")
setIs("Balli","LargeDataObject")
# Print and show method large data objects derived from code written in limma package
setMethod("show","LargeDataObject",
function(object)
{
cat("An object of class \"",class(object),"\"\n",sep="")
for (what in names(object)) {
x <- object[[what]]
cat("$",what,"\n",sep="")
printHead(x)
cat("\n")
}
for (what in setdiff(slotNames(object),".Data")) {
x <- slot(object,what)
if(length(x) > 0) {
cat("@",what,"\n",sep="")
printHead(x)
cat("\n")
}
}
})
#' Technical Variance Estimation
#' @description Estimate technical variance by using voom-trend. The code is derived from voom function in limma package
#' @param counts a DGEList object
#' @param design design matrix with samples in row and coefficient(s) to be estimated in column
#' @param lib.size numeric vector containing total library sizes for each sample
#' @param span width of the lowess smoothing window as a proportion
#' @param ... other arguments are passed to lmFit.
#' @return an TecVarList object with the following components:
#' \item{targets}{matrix containing covariables, library sizes and normalization foctors of each sample}
#' \item{design}{design matrix with samples in row and covariable(s) to be estimated in column}
#' \item{logcpm}{logcpm values of each gene and each sample}
#' \item{tecVar}{estimated techical variance of each gene and each sample}
#' @examples
#' expr <- data.frame(t(sapply(1:1000,function(x)rnbinom(20,mu=500,size=50))))
#' group <- c(rep("A",10),rep("B",10))
#' design <- model.matrix(~group, data = expr)
#' dge <- DGEList(counts=expr, group=group)
#' dge <- calcNormFactors(dge)
#' tecVarEstim(dge,design)
#' @export
# Derived from code written in limma package
# Created by Kyungtaek Park on 26 Dec 2017
# Last modified 4 Apr 2019
tecVarEstim <- function(counts,design=NULL,lib.size=NULL,span=0.5,...)
{
out <- list()
# Check counts
if(is(counts,"DGEList")) {
logcpm <- cpm(counts,log=T)
out$targets <- counts$samples
if(is.null(design) && diff(range(as.numeric(counts$sample$group)))>0) design <- model.matrix(~group,data=counts$samples)
if(is.null(lib.size)) lib.size <- with(counts$samples,lib.size*norm.factors)
counts <- counts$counts
} else {
stop("counts must be DEGList")
}
n <- nrow(counts)
if(n < 2L) stop("Need at least two genes to fit a mean-variance trend")
# Fit linear model to log2-counts-per-million
y <- t(log2(t(counts+0.5)/(lib.size+1)*1e6))
fit <- lmFit(y,design,...)
# Fit lowess trend to sqrt-standard-deviations by log-count-size
sx <- fit$Amean+mean(log2(lib.size+1))-log2(1e6)
sy <- sqrt(fit$sigma)
allzero <- rowSums(counts)==0
if(any(allzero)) {
sx <- sx[!allzero]
sy <- sy[!allzero]
}
l <- lowess(sx,sy,f=span)
f <- approxfun(l, rule=2)
# Find individual quarter-root fitted counts
if(fit$rank < ncol(design)) {
j <- fit$pivot[1:fit$rank]
fitted.values <- fit$coef[,j,drop=FALSE] %*% t(fit$design[,j,drop=FALSE])
} else {
fitted.values <- fit$coef %*% t(fit$design)
}
fitted.cpm <- 2^fitted.values
fitted.count <- 1e-6 * t(t(fitted.cpm)*(lib.size+1))
fitted.logcount <- log2(fitted.count)
# Apply trend to individual observations
ssquare <- f(fitted.logcount)^4
dim(ssquare) <- dim(fitted.logcount)
inverse.mu.hat <- 1/(2^fitted.logcount*(1+1/2*log(2)^2*ssquare))
colnames(inverse.mu.hat) <- colnames(y)
out$targets$lib.size <- lib.size
out$design <- design
out$logcpm <- logcpm
out$tecVar <- inverse.mu.hat
new("TecVarList",out)
}
#' balliFit
#' @description Estimates likelihood and Bartlett correction factor using BALLI algorithm of each gene
#' @param y_mat numeric vector containing log-cpm values of each gene and each sample
#' @param x_mat design matrix with samples in row and covariable(s) to be estimated in column
#' @param tecVar numeric vector containing estimated technical variance of a gene of each sample
#' @param intVar numeric vector designating interest variable(s) which is(are) column number(s) of x_mat
#' @param full logical value designating full model (TRUE) or reduced model (FALSE).
#' @param cfault initial value of index showing whether converged (0) or not (1).
#' @param miter maximum number of iteration to converge.
#' @param conv threshold for convergence
#' @return following components are estimated
#' \item{ll}{log-likelihoods}
#' \item{beta}{coefficients of interested variable(s)}
#' \item{alpha}{coefficients of nuisance variable(s)}
#' \item{BCF}{Bartlett's correction factor}
#' \item{cfault}{index whether converged or not}
#' @examples
#' expr <- data.frame(t(sapply(1:1000,function(x)rnbinom(20,mu=500,size=50))))
#' group <- c(rep("A",10),rep("B",10))
#' design <- model.matrix(~group, data = expr)
#' dge <- DGEList(counts=expr, group=group)
#' dge <- calcNormFactors(dge)
#' tV <- tecVarEstim(dge,design)
#' gtv <- tV$tecVar[1,]
#' gdat <- data.frame(logcpm=tV$logcpm[1,],design,tecVar=gtv)
#' gy <- matrix(unlist(gdat[,1]),ncol=1)
#' gx <- matrix(unlist(gdat[,2:(ncol(gdat)-1)]),ncol=ncol(gdat)-2)
#' balliFit(y_mat=gy,x_mat=gx,tecVar=gtv,intVar=2,full=TRUE,cfault=0,miter=200,conv=1e-6)
#' @export
# Created by Kyungtaek Park on 26 Dec 2017
# Last modified 4 Apr 2019
balliFit <- function(y_mat,x_mat,tecVar,intVar=2,full=T,cfault=0,miter=200,conv=1e-6) {
TT <- length(y_mat)
xp_tilda <- x_mat[,intVar,drop=F]
xnp_tilda <- x_mat[,-intVar,drop=F]
# Initial Value
sigma2_m1 <- var(y_mat)
varCov <- diag(c(tecVar)+c(sigma2_m1))
vi <- ginv(varCov)
Anpti <- solve(t(xnp_tilda) %*% vi %*% xnp_tilda)
xpp_tilda <- (diag(TT)-xnp_tilda %*% Anpti %*% t(xnp_tilda) %*% vi) %*% xp_tilda
Appti <- solve(t(xpp_tilda) %*% vi %*% xpp_tilda)
beta <- solve(t(x_mat) %*% vi %*% x_mat) %*% t(x_mat) %*% vi %*% y_mat
psi <- beta[intVar,]
xi <- Anpti %*% t(xnp_tilda) %*% vi %*% (y_mat - xpp_tilda %*% psi)
z <- y_mat - xpp_tilda %*% psi - xnp_tilda %*% xi
d1vs <- diag(TT)
d1vis <- -vi %*% d1vs %*% vi
d1xppts <- -xnp_tilda %*% Anpti %*% t(xnp_tilda) %*% d1vis %*% xpp_tilda
d1ls <- -1/2*sum(diag(vi %*% d1vs))-1/2*t(z) %*% d1vis %*% z + t(psi) %*% t(d1xppts) %*% vi %*% z
# Fisher Scoring Method
ii <- 1
diffs <- 1
while(diffs > conv) {
if(full) {
psi <- Appti %*% t(xpp_tilda) %*% vi %*% (y_mat - xnp_tilda %*% xi)
} else {
psi <- matrix(rep(0,length(intVar)),ncol=1)
}
xi <- Anpti %*% t(xnp_tilda) %*% vi %*% (y_mat - xpp_tilda %*% psi)
z <- y_mat - xpp_tilda %*% psi - xnp_tilda %*% xi
sigma2_i_mat <- - 1/2 * sum(diag(d1vis %*% d1vs)) + t(psi) %*% t(d1xppts) %*% vi %*% d1xppts %*% psi
sigma2_m2 <- sigma2_m1 + 1/sigma2_i_mat * d1ls
varCov <- diag(c(tecVar)+c(sigma2_m2))
vi <- ginv(varCov)
Anpti <- solve(t(xnp_tilda) %*% vi %*% xnp_tilda)
xpp_tilda <- (diag(TT)-xnp_tilda %*% Anpti %*% t(xnp_tilda) %*% vi) %*% xp_tilda
Appti <- solve(t(xpp_tilda) %*% vi %*% xpp_tilda)
d1vis <- -vi %*% d1vs %*% vi
d1xppts <- -xnp_tilda %*% Anpti %*% t(xnp_tilda) %*% d1vis %*% xpp_tilda
d1ls <- -1/2*sum(diag(vi %*% d1vs))-1/2*t(z) %*% d1vis %*% z + t(psi) %*% t(d1xppts) %*% vi %*% z
diffs <- abs(sigma2_m2-sigma2_m1)
if( ii == miter ) {
iii <- 1
diffs <- 1
sigma2_m2 <- sigma2_m1
while(diffs > conv) {
if(full) {
psi <- Appti %*% t(xpp_tilda) %*% vi %*% (y_mat - xnp_tilda %*% xi)
} else {
psi <- matrix(rep(0,length(intVar)),ncol=1)
}
xi <- Anpti %*% t(xnp_tilda) %*% vi %*% (y_mat - xpp_tilda %*% psi)
z <- y_mat - xpp_tilda %*% psi - xnp_tilda %*% xi
sigma.ll <- function(ss) {1/2*sum(log(tecVar+ss))+1/2*t(z) %*% solve(diag(c(tecVar+ss))) %*% z}
# An integer code. 0 indicates successful completion (which is always the case for "SANN" and "Brent").
sigma2_m2 <- optim(sigma2_m1,sigma.ll,method="Brent",lower=0,upper=1e+10)$par
varCov <- diag(c(tecVar)+c(sigma2_m2))
vi <- ginv(varCov)
Anpti <- solve(t(xnp_tilda) %*% vi %*% xnp_tilda)
xpp_tilda <- (diag(TT)-xnp_tilda %*% Anpti %*% t(xnp_tilda) %*% vi) %*% xp_tilda
Appti <- solve(t(xpp_tilda) %*% vi %*% xpp_tilda)
d1vis <- -vi %*% d1vs %*% vi
d1xppts <- -xnp_tilda %*% Anpti %*% t(xnp_tilda) %*% d1vis %*% xpp_tilda
diffs <- abs(sigma2_m2-sigma2_m1)
if (sigma2_m2 < 1e-8) {
sigma2_m2 <- 0
break
}
if( iii == miter ) {
cfault <- 1
break
} else {
iii <- iii + 1
sigma2_m1 <- sigma2_m2
}
}
break
} else {
ii <- ii + 1
sigma2_m1 <- sigma2_m2
}
}
if(sigma2_m2 <= 0) {
sigma2_m2 <- 0
varCov <- diag(c(tecVar))
vi <- ginv(varCov)
Anpti <- solve(t(xnp_tilda) %*% vi %*% xnp_tilda)
xpp_tilda <- (diag(TT)-xnp_tilda %*% Anpti %*% t(xnp_tilda) %*% vi) %*% xp_tilda
Appti <- solve(t(xpp_tilda) %*% vi %*% xpp_tilda)
d1vs <- matrix(0,nrow=nrow(vi),ncol=ncol(vi))
d1vis <- d1vs
d1xppts <- -xnp_tilda %*% Anpti %*% t(xnp_tilda) %*% d1vis %*% xpp_tilda
psi <- beta[intVar,]
xi <- Anpti %*% t(xnp_tilda) %*% vi %*% (y_mat - xpp_tilda %*% psi)
if(full) {
psi <- Appti %*% t(xpp_tilda) %*% vi %*% (y_mat - xnp_tilda %*% xi)
} else {
psi <- matrix(rep(0,length(intVar)),ncol=1)
}
xi <- Anpti %*% t(xnp_tilda) %*% vi %*% (y_mat - xpp_tilda %*% psi)
z <- y_mat - xpp_tilda %*% psi - xnp_tilda %*% xi
}
ll <- -TT/2*log(2*pi) - 1/2*sum(log(diag(varCov)))-1/2*t(z) %*% vi %*% z
if(full) {
return(list(ll=ll,beta=psi,alpha=xi,cfault=cfault))
} else {
if(sigma2_m2 != 0) {
d2viss <- -2 * d1vis %*% d1vs %*% vi
D <- matrix(1/2*sum(diag(d1vis %*% d1vs)))
M <- matrix(sum(diag(Appti %*% (t(xpp_tilda) %*% d2viss %*% xpp_tilda + 2 * t(d1xppts) %*% d1vis %*% xpp_tilda))))
P <- matrix(sum(diag(t(xpp_tilda) %*% d1vis %*% xpp_tilda %*% Appti %*% t(xpp_tilda) %*% d1vis %*% xpp_tilda %*% Appti)))
tau <- sum(diag(Appti %*% t(xpp_tilda) %*% d1vis %*% xpp_tilda))
gamma <- 0
nu <- sum(diag(Anpti %*% t(xnp_tilda) %*% d1vis %*% xnp_tilda))
C <- sum(diag(solve(D) %*% (-1/2*M + 1/4*P - 1/2*(gamma+nu) %*% t(tau))))
} else {
C <- 0
}
return(list(ll=ll,BCF=C,cfault=cfault))
}
}
#' BALLI
#' @description DEG analysis using BALLI algorithm
#' @param object a TecVarList object
#' @param intV numeric vector designating interest variable(s) which is(are) column number(s) of design matrix
#' @param logcpm logcpm values for each gene and each sample
#' @param tecVar estimated technical variance values for each gene and each sample
#' @param design design matrix with samples in row and covariable(s) to be estimated in column
#' @param numCores number of cores to be used for multithreding. If NULL, a single core is used
#' @param threshold threshold for convergence
#' @param maxiter maximum number of iteration to converge of estimated biological variance. If not, biological variance is estimated by using Brent method
#' @return an Balli object including Result and topGenes list. Following components are shown by Result (same order of genes with input data) and topGenes (ordered by pBALLI in Result) :
#' \item{log2FC}{log2 fold changes of interest variable(s)}
#' \item{lLLI}{log-likelihoods estimated by LLI}
#' \item{lBALLI}{log-likelihoods estimated by BALLI}
#' \item{pLLI}{p-values estimated by LLI}
#' \item{pBALLI}{p-values estimated by BALLI}
#' \item{BCF}{Bartlett's correction factor}
#' expr <- data.frame(t(sapply(1:1000,function(x)rnbinom(20,mu=500,size=50))))
#' group <- c(rep("A",10),rep("B",10))
#' design <- model.matrix(~group, data = expr)
#' dge <- DGEList(counts=expr, group=group)
#' dge <- calcNormFactors(dge)
#' tV <- tecVarEstim(dge,design)
#' balli(tV,intV=2)
#' @export
# Created by Kyungtaek Park on 26 Dec 2017
# Last modified 4 Apr 2019
balli <- function(object,intV=2,logcpm=NULL,tecVar=NULL,design=NULL,numCores=NULL,threshold=1e-6,maxiter=200) {
out <- list()
if(is(object,"TecVarList")) {
# Check logcpm
if(is.null(logcpm)) logcpm <- object$logcpm
# Check technical variance
if(is.null(tecVar)) tecVar <- object$tecVar
# Check design
if(is.null(design)) design <- object$design
} else {
if (is.null(logcpm)) stop("logcpm must be designated or object must be TecVarList")
if (is.null(tecVar)) stop("tecVar must be designated or object must be TecVarList")
if (is.null(design)) stop("design must be designated or object must be TecVarList")
}
design <- as.matrix(design)
balliFitAllGenes <- function(gidx){
gtv <- tecVar[gidx,]
gdat <- data.frame(logcpm=logcpm[gidx,],design,tecVar=gtv)
gy <- matrix(unlist(gdat[,1]),ncol=1)
gx <- matrix(unlist(gdat[,2:(ncol(gdat)-1)]),ncol=ncol(gdat)-2)
LL_h1 <- balliFit(y_mat=gy,x_mat=gx,tecVar=gtv,intVar=intV,miter=maxiter,conv=threshold,full=T)
if (LL_h1$cfault == 1) {
message("number of iteration increases")
stop("check")
LL_h1 <- balliFit(y_mat=gy,x_mat=gx,tecVar=gtv,intVar=intV,conv=threshold,full=T,miter=500)
}
LL_h0 <- balliFit(y_mat=gy,x_mat=gx,tecVar=gtv,intVar=intV,miter=maxiter,conv=threshold,full=F)
if (LL_h0$cfault == 1) {
message("number of iteration increases")
stop("check")
LL_h0 <- balliFit(y_mat=gy,x_mat=gx,tecVar=gtv,intVar=intV,conv=threshold,full=F,miter=500)
}
LL_full <- LL_h1$ll
beta <- LL_h1$beta
LL_red <- LL_h0$ll
BCF <- LL_h0$BCF
LR <- -2*(LL_red-LL_full)
BCLR <- LR/(1+BCF/length(intV))
pLR <- pchisq(LR,df=length(intV),lower.tail=F)
pBCLR <- pchisq(BCLR,df=length(intV),lower.tail=F)
return(c(beta,LR,BCLR,pLR,pBCLR,BCF))
}
if(is.null(numCores)) {
res <- t(sapply(1:nrow(logcpm),balliFitAllGenes))
} else {
res <- do.call(rbind,mclapply(1:nrow(logcpm),balliFitAllGenes,mc.cores=numCores))
}
res <- as.data.frame(res)
rownames(res) <- rownames(logcpm)
colnames(res) <- c(paste0("log2FC_",colnames(design)[intV]),"lLLI","lBALLI","pLLI","pBALLI","BCF")
adjpLLI <- p.adjust(res$pLLI,method="fdr")
adjpBALLI <- p.adjust(res$pBALLI,method="fdr")
top_res <- data.frame(res[,c(paste0("log2FC_",colnames(design)[intV]),"pLLI","pBALLI")],adjpLLI=adjpLLI,adjpBALLI=adjpBALLI)
top_res <- top_res[order(top_res$pBALLI),]
out$Result <- res
out$topGenes <- top_res
new("Balli",out)
}
#'@importFrom edgeR DGEList calcNormFactors cpm
#'@importFrom limma lmFit printHead
#'@importFrom MASS ginv
#'@importFrom parallel mclapply
#'@importFrom stats approxfun lowess model.matrix optim p.adjust pchisq var
#'@importFrom methods is new slot slotNames
NULL
| /scratch/gouwar.j/cran-all/cranData/BALLI/R/BALLI.v0.2.R |
## ----load-packages, message=FALSE, warning=F-----------------------------
require(BALLI)
## ------------------------------------------------------------------------
GenerateData <- function(nRow) {
expr_mean <- runif(1,10,100)
expr_size <- runif(1,1,10)
expr <- rnbinom(20,mu=expr_mean,size=expr_size)
return(expr)
}
data <- data.frame(t(sapply(1:10000,GenerateData)))
colnames(data) <- c(paste0("A",1:10),paste0("B",1:10))
rownames(data) <- paste0("gene",1:10000)
head(data)
## ------------------------------------------------------------------------
Group <- c(rep("A",10),rep("B",10))
Group
## ------------------------------------------------------------------------
design <- model.matrix(~Group, data = data)
head(design)
## ------------------------------------------------------------------------
dge <- DGEList(counts=data, group=Group)
dge <- calcNormFactors(dge)
dge
## ------------------------------------------------------------------------
tV <- tecVarEstim(dge,design)
tV
## ------------------------------------------------------------------------
fit <- balli(tV,intV=2)
fit
| /scratch/gouwar.j/cran-all/cranData/BALLI/inst/doc/QuickStart.R |
---
title: "Quick start of BALLI package"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Quick start of BALLI package}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Quick Start
This is an quick start manual of **BALLI**
```{r load-packages, message=FALSE, warning=F}
require(BALLI)
```
### 1. Load Count Data
```
data <- data.frame(read.table("counts.txt"))
```
or make example count data
```{r}
GenerateData <- function(nRow) {
expr_mean <- runif(1,10,100)
expr_size <- runif(1,1,10)
expr <- rnbinom(20,mu=expr_mean,size=expr_size)
return(expr)
}
data <- data.frame(t(sapply(1:10000,GenerateData)))
colnames(data) <- c(paste0("A",1:10),paste0("B",1:10))
rownames(data) <- paste0("gene",1:10000)
head(data)
```
### 2. Designate Group Information and Make Design Matrix
```{r}
Group <- c(rep("A",10),rep("B",10))
Group
```
```{r}
design <- model.matrix(~Group, data = data)
head(design)
```
### 3. Normalize Count Data
```{r}
dge <- DGEList(counts=data, group=Group)
dge <- calcNormFactors(dge)
dge
```
### 4. Estimate Technical Variance
```{r}
tV <- tecVarEstim(dge,design)
tV
```
### 5. Fit BALLI and See Top Significant Genes
```{r}
fit <- balli(tV,intV=2)
fit
```
| /scratch/gouwar.j/cran-all/cranData/BALLI/inst/doc/QuickStart.Rmd |
---
title: "Quick start of BALLI package"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Quick start of BALLI package}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Quick Start
This is an quick start manual of **BALLI**
```{r load-packages, message=FALSE, warning=F}
require(BALLI)
```
### 1. Load Count Data
```
data <- data.frame(read.table("counts.txt"))
```
or make example count data
```{r}
GenerateData <- function(nRow) {
expr_mean <- runif(1,10,100)
expr_size <- runif(1,1,10)
expr <- rnbinom(20,mu=expr_mean,size=expr_size)
return(expr)
}
data <- data.frame(t(sapply(1:10000,GenerateData)))
colnames(data) <- c(paste0("A",1:10),paste0("B",1:10))
rownames(data) <- paste0("gene",1:10000)
head(data)
```
### 2. Designate Group Information and Make Design Matrix
```{r}
Group <- c(rep("A",10),rep("B",10))
Group
```
```{r}
design <- model.matrix(~Group, data = data)
head(design)
```
### 3. Normalize Count Data
```{r}
dge <- DGEList(counts=data, group=Group)
dge <- calcNormFactors(dge)
dge
```
### 4. Estimate Technical Variance
```{r}
tV <- tecVarEstim(dge,design)
tV
```
### 5. Fit BALLI and See Top Significant Genes
```{r}
fit <- balli(tV,intV=2)
fit
```
| /scratch/gouwar.j/cran-all/cranData/BALLI/vignettes/QuickStart.Rmd |
#' `BAMBI`: An R package for Bivariate Angular Mixture Models
#'
#' `BAMBI` is an R package that provides functions for fitting
#' (using Bayesian methods) and simulating mixtures of univariate
#' and bivariate angular distributions. Please see the reference for a
#' detailed description of the functionalities of `BAMBI`.
#'
#' @references
#'
#' Chakraborty, S., & Wong, S. W. (2021). BAMBI: An R package for
#' fitting bivariate angular mixture models. *Journal of Statistical Software*,
#' 99 (11), 1-69. \doi{10.18637/jss.v099.i11}
#'
#' @name BAMBI
#' @md
NULL
#> NULL
#' @useDynLib BAMBI, .registration = TRUE
#' @import stats
#' @importFrom stats4 mle
#' @import graphics
#' @importFrom grDevices colorRampPalette
#' @importFrom methods is
#' @importFrom utils tail txtProgressBar
#' @importFrom parallel detectCores
#' @importFrom Rcpp sourceCpp evalCpp
NULL
#' @export
print.angmcmc <- function(x, ...) {
# browser()
output <- paste("Dataset consists of", x$n.data, "observations.")
if(grepl(x$method, "hmc")) {
output[2] <- paste(x$ncomp, "component", x$model, "mixture fitted via HMC for model parameters.",
ifelse(x$n.chains == 1, "Number of chain = ", "Number of chains = "),
paste0(x$n.chains, "."))
if(x$epsilon.random) {
output[3] <- paste("epsilon chosen randomly at each iteration with average epsilon =",
paste0(format(x$epsilon, scientific=TRUE, digits = 2), collapse = ", "))
} else {
output[3] <- paste("epsilon fixed at", x$epsilon )
}
if(x$L.random){
output[4] <- paste("L chosen randomly at each iteration with average L =",
paste(round(x$L, 2), collapse = ", "),
"across the ", x$n.chains, ifelse(x$n.chains == 1, "chain.", "chains."))
} else {
output[4] <- paste("L fixed at", paste(x$L, collapse = ", "),
"across the", x$n.chains, ifelse(x$n.chains == 1, "chain.", "chains."))
}
output[5] <- paste("acceptance rate for model parameters = ",
round(100*mean(x$accpt.modelpar), 2), "%.")
}
else if(grepl(x$method, "rwmh")) {
output[2] <- paste(x$ncomp, "component", x$model, "mixture fitted via RWMH for model parameters.",
ifelse(x$n.chains == 1, "Number of chain = ", "Number of chains = "),
paste0(x$n.chains, "."))
output[3] <- paste("proposals are independent normal with variances",
paste(format(x$propscale.final, scientific=TRUE, digits = 2), sep = "", collapse = ", "),
"for", paste(x$par.name[-1], sep = "", collapse=", "))
output[4] <- paste("acceptance rate for model parameters = ",
round(100*mean(x$accpt.modelpar), 2), "%.")
}
output[5] <- paste("Number of iterations =", x$n.iter)
cat(output, sep = "\n")
}
.onUnload <- function (libpath) {
library.dynam.unload("BAMBI", libpath)
}
#' @export
print.stepfit <- function(x, ...)
{
if(x$check_min) {
output <- paste("Optimum component size =", x$ncomp.best)
output[2] <- paste("Extract the best fit using the function \'bestmodel()\'")
cat("\n")
cat(output, sep = "\n")
} else {
warning(paste(toupper(x$crit), "did not attend a first minimum. Probably more clusters are needed."))
cat("\n")
cat("Extract the best fit using the function \'bestmodel()\'")
}
}
#' Angular MCMC (\code{angmcmc}) Object
#' @description Checking for and creating an angmcmc object
#' @param object any R object
#' @param ... arguments required to make an angmcmc object. See details
#' @return logical. Is the input an angmcmc object?
#' @details
#' \code{angmcmc} objects are classified lists that are created when any of the five mixture model fitting
#' functions, viz., \code{fit_vmmix}, \code{fit_wnormmix}, \code{fit_vmsinmix}, \code{fit_vmcosmix} and
#' \code{fit_wnorm2mix} is used. An \code{angmcmc} object contains a number of elements, including the dataset, the
#' model being fitted on the dataset and dimension of the model (univariate or bivariate), the tuning parameters
#' used, MCMC samples for the mixture model parameters, the (hidden) component or cluster indicators for data
#' points in each iteration and the (iteration-wise) log likelihood and log posterior density values (both calculated
#' upto some normalizing constants). When printed, an angmcmc object returns a brief summary of the function
#' arguments used to produce the object and the average acceptance rate of the proposals (in HMC and RWMH) used
#' over iterations. An \code{angmcmc} object can be used as an argument for the diagnostic and post-processing
#' functions available in \code{BAMBI} for making further inferences.
#' @examples
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' is.angmcmc(fit.vmsin.20)
#' @export
is.angmcmc <- function(object) {
inherits(object, "angmcmc")
}
#' @rdname is.angmcmc
#' @export
angmcmc <- function(...) {
ell <- list(...)
class(ell) <- "angmcmc"
ell
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/BAMBI-package.R |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
rowVars <- function(mat_in) {
.Call(`_BAMBI_rowVars`, mat_in)
}
par_mat_permute <- function(par_mat, perm_lab) {
.Call(`_BAMBI_par_mat_permute`, par_mat, perm_lab)
}
cID <- function(probs, ncomp, Uv) {
.Call(`_BAMBI_cID`, probs, ncomp, Uv)
}
change_labs <- function(orig, rand_perm) {
.Call(`_BAMBI_change_labs`, orig, rand_perm)
}
calc_corr_tau_2 <- function(samp_mat) {
.Call(`_BAMBI_calc_corr_tau_2`, samp_mat)
}
calc_corr_tau_1 <- function(samp_mat) {
.Call(`_BAMBI_calc_corr_tau_1`, samp_mat)
}
calc_corr_fl <- function(samp_mat) {
.Call(`_BAMBI_calc_corr_fl`, samp_mat)
}
const_univm <- function(k) {
.Call(`_BAMBI_const_univm`, k)
}
log_const_univm_all <- function(par_mat) {
.Call(`_BAMBI_log_const_univm_all`, par_mat)
}
ldunivmnum <- function(x, par) {
.Call(`_BAMBI_ldunivmnum`, x, par)
}
dunivm_manyx_onepar <- function(x, k, mu) {
.Call(`_BAMBI_dunivm_manyx_onepar`, x, k, mu)
}
dunivm_manyx_manypar <- function(x, k, mu) {
.Call(`_BAMBI_dunivm_manyx_manypar`, x, k, mu)
}
dunivm_onex_manypar <- function(x, k, mu) {
.Call(`_BAMBI_dunivm_onex_manypar`, x, k, mu)
}
runivm_onepar <- function(n, k, mu) {
.Call(`_BAMBI_runivm_onepar`, n, k, mu)
}
runivm_manypar <- function(k, mu) {
.Call(`_BAMBI_runivm_manypar`, k, mu)
}
univmmix <- function(x, par, pi, log_c_von) {
.Call(`_BAMBI_univmmix`, x, par, pi, log_c_von)
}
univmmix_manyx <- function(x, par, pi, log_c) {
.Call(`_BAMBI_univmmix_manyx`, x, par, pi, log_c)
}
mem_p_univm <- function(data, par, pi, log_c_von) {
.Call(`_BAMBI_mem_p_univm`, data, par, pi, log_c_von)
}
llik_univm_one_comp <- function(data, par_vec, log_c) {
.Call(`_BAMBI_llik_univm_one_comp`, data, par_vec, log_c)
}
grad_llik_univm_C <- function(data, par) {
.Call(`_BAMBI_grad_llik_univm_C`, data, par)
}
llik_univm_contri_C <- function(data, par, pi, log_c) {
.Call(`_BAMBI_llik_univm_contri_C`, data, par, pi, log_c)
}
l_const_uniwnorm <- function(k) {
.Call(`_BAMBI_l_const_uniwnorm`, k)
}
const_uniwnorm <- function(k) {
.Call(`_BAMBI_const_uniwnorm`, k)
}
log_const_uniwnorm_all <- function(par_mat) {
.Call(`_BAMBI_log_const_uniwnorm_all`, par_mat)
}
lduniwnormnum <- function(x, par, omega_2pi_1d) {
.Call(`_BAMBI_lduniwnormnum`, x, par, omega_2pi_1d)
}
duniwnorm_manyx_onepar <- function(x, k, mu, omega_2pi_1d) {
.Call(`_BAMBI_duniwnorm_manyx_onepar`, x, k, mu, omega_2pi_1d)
}
duniwnorm_manyx_manypar <- function(x, k, mu, omega_2pi_1d) {
.Call(`_BAMBI_duniwnorm_manyx_manypar`, x, k, mu, omega_2pi_1d)
}
duniwnorm_onex_manypar <- function(x, k, mu, omega_2pi_1d) {
.Call(`_BAMBI_duniwnorm_onex_manypar`, x, k, mu, omega_2pi_1d)
}
mem_p_uniwnorm <- function(data, par, pi, log_c_von, omega_2pi_1d) {
.Call(`_BAMBI_mem_p_uniwnorm`, data, par, pi, log_c_von, omega_2pi_1d)
}
llik_uniwnorm_contri_C <- function(data, par, pi, log_c, omega_2pi_1d) {
.Call(`_BAMBI_llik_uniwnorm_contri_C`, data, par, pi, log_c, omega_2pi_1d)
}
llik_uniwnorm_one_comp <- function(data, par_vec, log_c, omega_2pi_1d) {
.Call(`_BAMBI_llik_uniwnorm_one_comp`, data, par_vec, log_c, omega_2pi_1d)
}
grad_llik_uniwnorm_C <- function(data, par, omega_2pi_1d) {
.Call(`_BAMBI_grad_llik_uniwnorm_C`, data, par, omega_2pi_1d)
}
BESSI0_C <- function(x) {
.Call(`_BAMBI_BESSI0_C`, x)
}
const_vmcos_anltc <- function(k1, k2, k3) {
.Call(`_BAMBI_const_vmcos_anltc`, k1, k2, k3)
}
const_vmcos_mc <- function(k1, k2, k3, uni_rand, return_log = FALSE) {
.Call(`_BAMBI_const_vmcos_mc`, k1, k2, k3, uni_rand, return_log)
}
const_vmcos <- function(k1, k2, k3, uni_rand, return_log = FALSE) {
.Call(`_BAMBI_const_vmcos`, k1, k2, k3, uni_rand, return_log)
}
d_const_vmcos_anltc <- function(k1, k2, k3) {
.Call(`_BAMBI_d_const_vmcos_anltc`, k1, k2, k3)
}
d_const_vmcos_mc <- function(k1, k2, k3, uni_rand, ncores = 1L) {
.Call(`_BAMBI_d_const_vmcos_mc`, k1, k2, k3, uni_rand, ncores)
}
d_const_vmcos <- function(par, uni_rand, ncores = 1L) {
.Call(`_BAMBI_d_const_vmcos`, par, uni_rand, ncores)
}
log_const_vmcos_all <- function(par_mat, uni_rand) {
.Call(`_BAMBI_log_const_vmcos_all`, par_mat, uni_rand)
}
ldcosnum <- function(x, y, par) {
.Call(`_BAMBI_ldcosnum`, x, y, par)
}
dcos_onex_manypar <- function(x, k1, k2, k3, mu1, mu2, l_const_all) {
.Call(`_BAMBI_dcos_onex_manypar`, x, k1, k2, k3, mu1, mu2, l_const_all)
}
dcos_manyx_onepar <- function(x, k1, k2, k3, mu1, mu2, l_const) {
.Call(`_BAMBI_dcos_manyx_onepar`, x, k1, k2, k3, mu1, mu2, l_const)
}
dcos_manyx_manypar <- function(x, k1, k2, k3, mu1, mu2, l_const_all) {
.Call(`_BAMBI_dcos_manyx_manypar`, x, k1, k2, k3, mu1, mu2, l_const_all)
}
rcos_unimodal <- function(n, k1, k2, k3, mu1, mu2, kappa_opt, log_I0_kappa_opt, logK, log_const_vmcos) {
.Call(`_BAMBI_rcos_unimodal`, n, k1, k2, k3, mu1, mu2, kappa_opt, log_I0_kappa_opt, logK, log_const_vmcos)
}
rcos_bimodal <- function(n, k1, k2, k3, mu1, mu2, kappa_opt, log_I0_kappa_opt, logK, log_const_vmcos, mode_1, mode_2, vmpropn, unifpropn) {
.Call(`_BAMBI_rcos_bimodal`, n, k1, k2, k3, mu1, mu2, kappa_opt, log_I0_kappa_opt, logK, log_const_vmcos, mode_1, mode_2, vmpropn, unifpropn)
}
rcos_onepar <- function(n, k1, k2, k3, mu1, mu2, I_upper_bd) {
.Call(`_BAMBI_rcos_onepar`, n, k1, k2, k3, mu1, mu2, I_upper_bd)
}
mem_p_cos <- function(data, par, pi, log_c_von) {
.Call(`_BAMBI_mem_p_cos`, data, par, pi, log_c_von)
}
llik_vmcos_contri_C <- function(data, par, pi, log_c) {
.Call(`_BAMBI_llik_vmcos_contri_C`, data, par, pi, log_c)
}
llik_vmcos_one_comp <- function(data, par_vec, log_c) {
.Call(`_BAMBI_llik_vmcos_one_comp`, data, par_vec, log_c)
}
grad_llik_vmcos_C <- function(data, par, uni_rand) {
.Call(`_BAMBI_grad_llik_vmcos_C`, data, par, uni_rand)
}
vmcos_var_corr_anltc <- function(k1, k2, k3) {
.Call(`_BAMBI_vmcos_var_corr_anltc`, k1, k2, k3)
}
vmcos_var_corr_mc <- function(k1, k2, k3, uni_rand, ncores = 1L) {
.Call(`_BAMBI_vmcos_var_corr_mc`, k1, k2, k3, uni_rand, ncores)
}
vmcos_var_cor_singlepar_cpp <- function(k1, k2, k3, uni_rand, ncores = 1L) {
.Call(`_BAMBI_vmcos_var_cor_singlepar_cpp`, k1, k2, k3, uni_rand, ncores)
}
ldcos_onex_manypar <- function(x, k1, k2, k3, mu1, mu2, l_const_all) {
.Call(`_BAMBI_ldcos_onex_manypar`, x, k1, k2, k3, mu1, mu2, l_const_all)
}
ldcos_manyx_onepar <- function(x, k1, k2, k3, mu1, mu2, l_const) {
.Call(`_BAMBI_ldcos_manyx_onepar`, x, k1, k2, k3, mu1, mu2, l_const)
}
ldcos_manyx_manypar <- function(x, k1, k2, k3, mu1, mu2, l_const_all) {
.Call(`_BAMBI_ldcos_manyx_manypar`, x, k1, k2, k3, mu1, mu2, l_const_all)
}
const_vmsin <- function(k1, k2, lambda) {
.Call(`_BAMBI_const_vmsin`, k1, k2, lambda)
}
d_const_vmsin <- function(par) {
.Call(`_BAMBI_d_const_vmsin`, par)
}
log_const_vmsin_all <- function(par_mat) {
.Call(`_BAMBI_log_const_vmsin_all`, par_mat)
}
ldsinnum <- function(x, y, par) {
.Call(`_BAMBI_ldsinnum`, x, y, par)
}
dsin_onex_manypar <- function(x, k1, k2, k3, mu1, mu2) {
.Call(`_BAMBI_dsin_onex_manypar`, x, k1, k2, k3, mu1, mu2)
}
dsin_manyx_onepar <- function(x, k1, k2, k3, mu1, mu2) {
.Call(`_BAMBI_dsin_manyx_onepar`, x, k1, k2, k3, mu1, mu2)
}
dsin_manyx_manypar <- function(x, k1, k2, k3, mu1, mu2) {
.Call(`_BAMBI_dsin_manyx_manypar`, x, k1, k2, k3, mu1, mu2)
}
rsin_unimodal <- function(n, k1, k2, k3, mu1, mu2, kappa_opt, log_I0_kappa_opt, logK, log_const_vmsin) {
.Call(`_BAMBI_rsin_unimodal`, n, k1, k2, k3, mu1, mu2, kappa_opt, log_I0_kappa_opt, logK, log_const_vmsin)
}
rsin_bimodal <- function(n, k1, k2, k3, mu1, mu2, kappa_opt, log_I0_kappa_opt, logK, log_const_vmsin, mode_1, mode_2, vmpropn, unifpropn) {
.Call(`_BAMBI_rsin_bimodal`, n, k1, k2, k3, mu1, mu2, kappa_opt, log_I0_kappa_opt, logK, log_const_vmsin, mode_1, mode_2, vmpropn, unifpropn)
}
rsin_onepar <- function(n, k1, k2, k3, mu1, mu2, I_upper_bd) {
.Call(`_BAMBI_rsin_onepar`, n, k1, k2, k3, mu1, mu2, I_upper_bd)
}
mem_p_sin <- function(data, par, pi, log_c_von, ncores = 1L) {
.Call(`_BAMBI_mem_p_sin`, data, par, pi, log_c_von, ncores)
}
llik_vmsin_contri_C <- function(data, par, pi, log_c) {
.Call(`_BAMBI_llik_vmsin_contri_C`, data, par, pi, log_c)
}
llik_vmsin_one_comp <- function(data, par_vec, log_c) {
.Call(`_BAMBI_llik_vmsin_one_comp`, data, par_vec, log_c)
}
grad_llik_vmsin_C <- function(data, par) {
.Call(`_BAMBI_grad_llik_vmsin_C`, data, par)
}
vmsin_var_corr_anltc <- function(k1, k2, lambda) {
.Call(`_BAMBI_vmsin_var_corr_anltc`, k1, k2, lambda)
}
vmsin_var_corr_mc <- function(k1, k2, k3, uni_rand, ncores = 1L) {
.Call(`_BAMBI_vmsin_var_corr_mc`, k1, k2, k3, uni_rand, ncores)
}
vmsin_var_cor_singlepar_cpp <- function(k1, k2, k3, uni_rand, ncores = 1L) {
.Call(`_BAMBI_vmsin_var_cor_singlepar_cpp`, k1, k2, k3, uni_rand, ncores)
}
ldsin_onex_manypar <- function(x, k1, k2, k3, mu1, mu2) {
.Call(`_BAMBI_ldsin_onex_manypar`, x, k1, k2, k3, mu1, mu2)
}
ldsin_manyx_onepar <- function(x, k1, k2, k3, mu1, mu2) {
.Call(`_BAMBI_ldsin_manyx_onepar`, x, k1, k2, k3, mu1, mu2)
}
ldsin_manyx_manypar <- function(x, k1, k2, k3, mu1, mu2) {
.Call(`_BAMBI_ldsin_manyx_manypar`, x, k1, k2, k3, mu1, mu2)
}
ldwnorm2_num <- function(x, par, omega_2pi) {
.Call(`_BAMBI_ldwnorm2_num`, x, par, omega_2pi)
}
l_const_wnorm2 <- function(par) {
.Call(`_BAMBI_l_const_wnorm2`, par)
}
const_wnorm2 <- function(par) {
.Call(`_BAMBI_const_wnorm2`, par)
}
log_const_wnorm2_all <- function(par_mat) {
.Call(`_BAMBI_log_const_wnorm2_all`, par_mat)
}
mem_p_wnorm2 <- function(data, par_mat, pi, log_c_wnorm, omega_2pi, ncores = 1L) {
.Call(`_BAMBI_mem_p_wnorm2`, data, par_mat, pi, log_c_wnorm, omega_2pi, ncores)
}
llik_wnorm2_contri_C <- function(data, par, pi, log_c, omega_2pi) {
.Call(`_BAMBI_llik_wnorm2_contri_C`, data, par, pi, log_c, omega_2pi)
}
llik_wnorm2_one_comp <- function(data, par_vec, log_c, omega_2pi) {
.Call(`_BAMBI_llik_wnorm2_one_comp`, data, par_vec, log_c, omega_2pi)
}
grad_llik_wnorm2_C <- function(data, par, omega_2pi) {
.Call(`_BAMBI_grad_llik_wnorm2_C`, data, par, omega_2pi)
}
grad_den_wnorm2_one_comp_i_unadj <- function(x, y, par, det_sig_inv, det_sig_inv_sqrt, omega_2pi) {
.Call(`_BAMBI_grad_den_wnorm2_one_comp_i_unadj`, x, y, par, det_sig_inv, det_sig_inv_sqrt, omega_2pi)
}
dwnorm2_onex_manypar <- function(x, k1, k2, k3, mu1, mu2, omega_2pi) {
.Call(`_BAMBI_dwnorm2_onex_manypar`, x, k1, k2, k3, mu1, mu2, omega_2pi)
}
dwnorm2_manyx_onepar <- function(x, k1, k2, k3, mu1, mu2, omega_2pi) {
.Call(`_BAMBI_dwnorm2_manyx_onepar`, x, k1, k2, k3, mu1, mu2, omega_2pi)
}
dwnorm2_manyx_manypar <- function(x, k1, k2, k3, mu1, mu2, omega_2pi) {
.Call(`_BAMBI_dwnorm2_manyx_manypar`, x, k1, k2, k3, mu1, mu2, omega_2pi)
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/RcppExports.R |
#' Contour plot for angmcmc objects with bivariate data
#'
#' @inheritParams pointest
#' @param type Passed to \link{d_fitted}. Possible choices are "point-est" and "post-pred".
#' @param x angular MCMC object (with bivariate data).
#' @param show.data logical. Should the data points be added to the contour plot? Ignored if \code{object} is NOT supplied.
#' @param cex,col,pch graphical parameters passed to \code{\link{points}} from graphics for plotting the data points.
#' Ignored if {show.data == FALSE}.
#' @param alpha color transparency for the data points, implemented via \code{\link[scales]{alpha}} from package \code{scales}.
#' Ignored if {show.data == FALSE}.
#' @inheritParams contour_model
#' @param ... additional arguments to be passed to the function \code{\link{contour}}.
#'
#' @details
#' \code{contour.angmcmc} is an S3 function for angmcmc objects that calls \code{\link{contour}} from graphics.
#'
#' To estimate the mixture density required to construct the contour plot, first the parameter vector \eqn{\eta} is estimated
#' by applying \code{fn} on the MCMC samples, yielding the (consistent) Bayes estimate \eqn{\hat{\eta}}. Then the mixture density
#' \eqn{f(x|\eta)} at any point \eqn{x} is (consistently) estimated by \eqn{f(x|\hat{\eta})}.
#'
#' @examples
#' # first fit a vmsin mixture model
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' # now create a contour plot
#' contour(fit.vmsin.20)
#'
#' @export
contour.angmcmc <- function(x, fn = "MAP", type = "point-est", show.data = TRUE,
xpoints = seq(0, 2*pi, length.out = 100),
ypoints = seq(0, 2*pi, length.out = 100),
levels, nlevels = 20,
cex = 1, col = "red", alpha = 0.4,
pch = 19, ...)
{
object <- x
if (!is.angmcmc(object))
stop("\'x\' must be an angmcmc object")
if(x$type != "bi") stop("\"x\" is not a bivariate angmcmc object")
if(missing(levels)) {
levels <- exp(seq(-20,2, length.out = nlevels))
}
dots <- list(...)
colnames_data <- colnames(x$data)
if(is.null(colnames_data)) {
xlab <- ylab <- ""
} else {
xlab <- colnames_data[1]
ylab <- colnames_data[2]
}
if(x$ncomp > 1) {
main <- paste("Contour plot for fitted", x$ncomp, "component", x$model, "mixtures")
} else {
main <- paste("Contour plot for fitted (single component)", x$model)
}
coords <- as.matrix(expand.grid(xpoints, ypoints))
dens <- d_fitted(coords, x, fn = fn, type = type)
contour_in <- c(
list(
x = xpoints,
y = ypoints,
z = matrix(dens, nrow=length(xpoints)),
levels = levels
),
dots
)
if (is.null(dots$xlab)) contour_in$xlab <- xlab
if (is.null(dots$ylab)) contour_in$ylab <- ylab
if (is.null(dots$main)) contour_in$main <- main
do.call(contour, contour_in)
if(show.data) points(x$data, col = scales::alpha(col, alpha),
cex = cex, pch = pch)
}
#
# panel_wireframe_cloud <- function(x, y, z, x2, y2, z2,...) {
# panel.wireframe(x, y, z,...)
# panel.cloud(x2, y2, z2,...)
# }
#' Density plots for angmcmc objects
#' @inheritParams contour.angmcmc
#' @description Plot fitted angular mixture model density surfaces or curves.
#' @inheritParams pointest
#' @param x angmcmc object.
#' @param data unused. The parameter is already filled with results from fitted angular model. It is kept
#' to ensure compatibility with the lattice S3 generic \code{densityplot}.
#' @param plot logical. Should the density surface (if the fitted data is bivariate) or the density
#' curve (if univariate) be plotted?
#' @param log.density logical. Should log density be used for the plot?
#' @param ... additional arguments passed to \code{lattice::wireframe} if
#' fitted data is bivariate, or to \link{hist} (if (\code{show.hist == TRUE})), if the fitted data is univariate
#' @param show.hist logical. Should a histogram for the data
#' points be added to the plot, if the fitted data is univariate? Ignored if data is
#' bivariate.
#' @param xlab,ylab,zlab,main graphical parameters passed to \code{lattice::wireframe} (if
#' bivariate) or \link{plot} (if univariate). If the data is univariate, \code{zlab} and \code{ylab} can be
#' used interchangeably (both correspond to the density).
#' @param xpoints,ypoints Points on the x and y coordinates (if bivariate) or only x coordinate
#' (if univariate) where the density is to be evaluated. Each defaults to seq(0, 2*pi, length.out=100).
#'
#' @details
#' When \code{plot==TRUE}, \code{densityplot.angmcmc} calls \code{lattice::wireframe} or
#' \link{plot} from graphics to draw the surface or curve.
#'
#' To estimate the mixture density, first the parameter vector \eqn{\eta} is estimated
#' by applying \code{fn} on the MCMC samples, yielding the (consistent) Bayes estimate \eqn{\hat{\eta}}. Then the mixture density
#' \eqn{f(x|\eta)} at any point \eqn{x} is (consistently) estimated by \eqn{f(x|\hat{\eta})}.
#'
#'
#' Note that \code{densityplot.angmcmc} \strong{does not} plot the kernel densitie estimates
#' of the MCMC parameters. (These plots can be obtained by first converting an \code{angmcmc}
#' object to an \code{mcmc} object via \link{as.mcmc.list}, and then
#' by using \code{densplot} from package coda on the resulting \code{mcmc.list} object. Instead,
#' \code{densityplot.angmcmc} returns the surface (if 2-D) or the curve (if 1-D)
#' of the fitted model density evaluated at the estimated parameter vector (obtain through \link{pointest}).
#'
#' @examples
#' # first fit a vmsin mixture model
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' # now create density surface with the default first 1/3 as burn-in and thin = 1
#' library(lattice)
#' densityplot(fit.vmsin.20)
#' # the viewing angles can be changed through the argument 'screen'
#' # (passed to lattice::wireframe)
#' densityplot(fit.vmsin.20, screen = list(z=-30, x=-60))
#' densityplot(fit.vmsin.20, screen = list(z=30, x=-60))
#' # the colors can be changed through 'col.regions'
#' cols <- grDevices::colorRampPalette(c("blue", "green",
#' "yellow", "orange", "red"))(100)
#' densityplot(fit.vmsin.20, col.regions = cols)
#'
#' # Now fit a vm mixture model
#' # illustration only - more iterations needed for convergence
#' fit.vm.20 <- fit_vmmix(wind$angle, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' densityplot(fit.vm.20)
#'
#' @importFrom lattice densityplot wireframe
#'
#' @export
densityplot.angmcmc <- function(x,
data = NULL,
fn = mean, type = "point-est", log.density = FALSE,
xpoints=seq(0, 2*pi, length.out=35),
ypoints=seq(0, 2*pi, length.out=35),
plot=TRUE,
show.hist=ifelse(log.density, FALSE, TRUE),
xlab, ylab,
zlab = ifelse(log.density, "Log Density", "Density"),
main,
...)
{
if(!is.angmcmc(x))
stop("\"x\" must be an angmcmc object")
object <- x
if (object$type == "bi") {
colnames_data <- colnames(object$data)
if (any(missing(xlab), missing(ylab))) {
if (is.null(colnames_data)) {
xlab <- ylab <- ""
} else {
xlab <- colnames_data[1]
ylab <- colnames_data[2]
}
}
if (missing(main)) {
if(x$ncomp > 1) {
main <- paste("Contour plot for fitted", x$ncomp, "component", x$model, "mixtures")
} else {
main <- paste("Contour plot for fitted (single component)", x$model)
}
}
coords <- as.matrix(expand.grid(xpoints, ypoints))
den <- d_fitted(coords, object, fn = fn, type = type)
denmat <- matrix(den, nrow=length(xpoints))
if(log.density) {
denmat <- log(denmat)
}
out <- list(x=xpoints, y=ypoints, density=denmat)
if (plot) {
nrden <- nrow(denmat)
ncden <- ncol(denmat)
if(object$ncomp > 1) {
main <- paste("Density surface for fitted", object$ncomp, "component",
object$model, "mixtures")
} else {
main <- paste("Density surface for fitted (single component)", object$model)
}
# # Create a function interpolating colors in the range of specified colors
# jet.colors <- grDevices::colorRampPalette( c("blue", "green",
# "yellow", "orange", "red") )
# # Generate the desired number of colors from this palette
# nbcol <- 500
# color <- jet.colors(nbcol)
# denfacet <- denmat[-1, -1] + denmat[-1, -ncden] +
# denmat[-nrden, -1] + denmat[-nrden, -ncden]
# Recode facet z-values into color indices
# facetcol <- cut(denfacet, nbcol)
print(basic_surfaceplot(xpoints = xpoints, ypoints = ypoints,
denmat = denmat, xlab = xlab, ylab = ylab,
main = main, zlab = zlab, ...))
# persp(x=xpoints, y=ypoints, z=denmat, theta = theta, phi = phi, expand = expand, col = color[facetcol],
# ltheta = 120, shade = shade, ticktype = "detailed",
# xlab = xlab, ylab = ylab, zlab = zlab,
# main = main, ...) -> res
# inargs <- list(...)
# inargs$x <- denmat~x*y
# inargs$data <- data.frame(x = xpoints,
# y=rep(ypoints, each=length(xpoints)),
# denmat=denmat)
# inargs$outerbox <- FALSE
# inargs$par.settings <- list(axis.line = list(col = 'transparent'))
# if (is.null(inargs$xlab)) inargs$xlab <- xlab
# if (is.null(inargs$ylab)) inargs$ylab <- ylab
# if (is.null(inargs$colorkey)) inargs$colorkey <- FALSE
# if (is.null(inargs$main)) inargs$main <- main
# if (is.null(inargs$neval)) inargs$neval <- 100
# if (is.null(inargs$aspect)) inargs$aspect <- c(61/87, 0.4)
# if (is.null(inargs$zlab)) inargs$zlab <- list("Density", rot=90)
# if (is.null(inargs$screen)) inargs$screen <- list(z=45, x=-45)
# if (is.null(inargs$colorkey)) inargs$colorkey <- FALSE
# if (is.null(inargs$scales))
# inargs$scales <- list(arrows=FALSE, col=1)
# if (is.null(inargs$drape)) inargs$drape <- TRUE
# if (is.null(inargs$light.source))
# inargs$light.source <- c(10,0,10)
# if (is.null(inargs$col.regions))
# inargs$col.regions <- colorRampPalette(c("blue", "green",
# "yellow", "orange", "red"))(100)
# if (is.null(inargs$par.settings))
# inargs$par.settings <- list(top.padding = 0,
# bottom.padding = 0,
# left.padding=0,
# right.padding=0,
# axis.line=list(col = 'transparent'))
# do.call(wireframe, inargs)
}
invisible(out)
}
else {
den <- d_fitted(xpoints, object, fn = fn, type = type)
if (log.density) den <- log(den)
out <- list(x=xpoints, density=den)
if (plot) {
if(show.hist){
histplot <- hist(object$data, plot = FALSE, ...)
} else {
histplot <- NULL
}
if (missing(main)) {
if(object$ncomp > 1) {
main <- paste("Density plot for fitted", object$ncomp, "component", object$model, "mixtures")
} else {
main <- paste("Density plot for fitted (single component)", object$model)
}
}
if (missing(xlab))
xlab <- "Angles in radians"
if (missing(ylab)) {
ylab <- zlab
}
y_max <- 1.1* max(den, histplot$density)
plot(NULL, xlim=range(xpoints), ylim=c(0, y_max), xlab = xlab,
ylab=ylab, main=main)
points(xpoints, den, type = "l")
if(show.hist) plot(histplot, freq = FALSE, add = TRUE, ...)
title(main = main)
}
}
invisible(out)
}
extract_digits <- function(x) {
tmp <- regmatches(
x,
gregexpr("[[:digit:]]+", x)
)[[1]]
out <- if (length(tmp) > 0) {
strsplit(
tmp,
""
)[[1]]
} else ""
out
}
# get_unicode <- function(x) {
# subscript_char <- extract_digits(x)
# subscript_char_unicode <- if (subscript_char != "") {
# paste0(
# sapply(
# subscript_char,
# function(this_digit) {
# unicode_num <- 2080 + as.numeric(this_digit)
# unicode_num_char <- eval(parse(text = paste0("'\\u", unicode_num, "'")))
# }
# ),
# collapse = ""
# )
# } else ""
#
# parameter_greek <- if (grepl("mu", x)) {
# "\u03BC"
# } else if (grepl("kappa", x)) {
# "\u03BA"
# } else if (grepl("pmix", x)) {
# "p"
# }
#
# out <- paste0(parameter_greek, subscript_char_unicode)
#
# if (grepl("pmix", x)) {
# # append superscript 'mix'
# out <- paste0(out, "\u1D50", "\u2071", "\u02E3")
# }
#
# out
# }
get_bquote_expr <- function(x) {
subscript_char <- extract_digits(x)
param_char <- gsub(subscript_char, "", x)
out <- if (grepl("pmix", x)) {
"p['mix']"
} else {
paste0(param_char, "[", subscript_char, "]")
}
out
}
parse_text <- function(x) {
eval(parse(text = x))
}
parse_text_bquote <- function(x) {
tmp <- paste0("bquote(", x, ")")
parse_text(tmp)
}
#' Trace plot for parameters from an angmcmc object
#' @inheritParams pointest
#' @param object angular MCMC object.
#' @param par parameter for which trace plot is to be created.
# #' @param press.enter logical. Should the next plot in the series
# #' be shown after you press "Enter"? Ignored if only a single plot
# #' is to be created.
#' @param ... unused
#' @return
#' Returns a single plot if a single \code{par} and a single \code{comp.label} is supplied.
#' Otherwise, a series of plots is produced.
#'
#' @examples
#' # first fit a vmsin mixture model
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' # trace plot for kappa1 in component 1
#' paramtrace(fit.vmsin.20, "kappa1", 1)
#' # for kappa1 in all components
#' paramtrace(fit.vmsin.20, "kappa1")
#' # for all parameters in component 1
#' paramtrace(fit.vmsin.20, comp.label = 1)
#'
#' @importFrom RColorBrewer brewer.pal
#'
#' @export
paramtrace <- function(object, par.name, comp.label, chain.no,
...)
{
if(!class(object) %in% "angmcmc") stop("\'object\' must be an angmcmc object")
ell <- list(...)
if (!is.null(ell$press.enter))
warning("\'press.enter\' is deprecated. Instead use par(ask=TRUE) before calling paramtrace.")
if (!is.null(ell$burnin))
warning("Use of burnin is deprecated in postprocessing. Use \'burnin.prop\' during original MCMC run instead.")
if (!is.null(ell$thin))
warning("Use of thin is deprecated in postprocessing. Use \'thin\' during original MCMC run instead.")
if (missing(par.name)) {
par.name <- object$par.name
} else if (any(!par.name %in% object$par.name)) {
stop("invalid par.name")
}
if (missing(comp.label)) {
comp.label <- 1:object$ncomp
} else if (any(!comp.label %in% 1:object$ncomp)) {
stop("invalid component label")
}
if (missing(chain.no)) {
chain.no <- 1:object$n.chains
} else if (any(!chain.no %in% 1:object$n.chains)) {
stop("invalid chain number")
}
if(any(c(length(par.name), length(comp.label)) > 1)) {
singleplot <- FALSE
} else {
singleplot <- TRUE
}
n.chains <- length(chain.no)
col_brew <- brewer.pal(n = max(n.chains, 3), name = "Pastel2")
if (singleplot) {
val <- extractsamples(object, par.name, comp.label, chain.no, drop = FALSE)
plot(NULL, xlim = c(1, object$n.iter.final), ylim = range(val), ylab="", xlab = "Iteration")
for(ch in 1:n.chains)
points(val[, , , ch], type = "l", col = col_brew[ch])
legend("bottomright", legend = paste("Chain", chain.no), col = col_brew,
lty = 1)
par.name.1 <- sapply(par.name, get_bquote_expr)
if(object$ncomp > 1) {
ylab <- paste0(par.name.1, "~'for component ", comp.label, "'")
main <- paste0("'Traceplot for'~", ylab, "~'in ",
object$ncomp, "-component ", object$model, " mixtures'")
} else {
ylab <- par.name.1
main <- paste0("Traceplot for'~", ylab, "~'in (single component) ", object$model, "'")
}
title(main = parse_text_bquote(main), ylab = parse_text_bquote(ylab))
}
# not singleplot
else {
nplots <- length(par.name) * length(comp.label)
currplotno <- 1L
for(par.curr in par.name) {
for(comp.label.curr in comp.label) {
val <- extractsamples(object, par.curr, comp.label.curr, chain.no, drop = FALSE)
plot(NULL, xlim = c(1, object$n.iter.final), ylim = range(val), ylab="", xlab = "Iteration")
for(ch in 1:n.chains)
points(val[, , , ch], type = "l", col = col_brew[ch])
legend("bottomright", legend = paste("Chain", chain.no), col = col_brew,
lty = 1)
par.curr.1 <- sapply(par.curr, get_bquote_expr)
if(object$ncomp > 1) {
ylab <- paste(par.curr.1, "~' for component", comp.label.curr, "'")
main <- paste0("'Traceplot for'~", ylab, "*' in ", object$ncomp, "-component ", object$model, " mixtures'")
} else {
ylab <- par.curr.1
main <- paste0("'Traceplot for'~", ylab, "*' in (single component)", object$model, "'")
}
title(main = parse_text_bquote(main), ylab = parse_text_bquote(ylab))
# --not required--
# if(currplotno < nplots) {
# if(press.enter) {
# press_enter()
# frame()
# }
# }
currplotno <- currplotno + 1
}
}
}
}
#' Trace and autocorrelation plots of log posterior density or log likelihood from an angmcmc object
#' @inheritParams paramtrace
#' @param object angular MCMC object.
#' @param use.llik logical. Should log likelihood be plotted instead of log posterior? Set
#' to \code{FALSE} by default.
#' @param plot.autocor logical. Should the autocorrelations be plotted as well?
#' @param lag.max maximum lag for autocorrelation. Passed to \link{acf}. Ignored if
#' \code{plot.autocor = FALSE}.
#' @examples
#' # first fit a vmsin mixture model
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' # log posterior density trace
#' lpdtrace(fit.vmsin.20)
#' # log likelihood trace
#' lpdtrace(fit.vmsin.20, use.llik = TRUE)
#'
#' @export
lpdtrace <- function(object, chain.no, use.llik = FALSE,
plot.autocor = FALSE,
# press.enter = TRUE,
lag.max = NULL, ...)
{
if (!is.angmcmc(object)) stop("lpdtrace can only be used for \'angmcmc\' objects")
ell <- list(...)
if (!is.null(ell$press.enter))
warning("\'press.enter\' is deprecated. Instead use par(ask=TRUE) before calling lpdtrace.")
if (!is.null(ell$burnin))
warning("Use of burnin is deprecated in postprocessing. Use \'burnin.prop\' during original MCMC run instead.")
if (!is.null(ell$thin))
warning("Use of thin is deprecated in postprocessing. Use \'thin\' during original MCMC run instead.")
if (missing(chain.no)) {
chain.no <- 1:object$n.chains
} else if (any(!chain.no %in% 1:object$n.chains)) {
stop("invalid chain number")
}
n.chains <- length(chain.no)
final_iter_set <- object$final_iter
col_brew <- brewer.pal(n = max(n.chains, 3), name = "Paired")
if (use.llik) {
val <- object$llik[final_iter_set, chain.no, drop = FALSE]
plot(NULL, xlim = c(1, object$n.iter.final), ylim = range(val),
ylab="Log Likelihood", xlab = "Iteration")
for(ch in 1:n.chains)
points(val[, ch], type = "l", col = col_brew[ch])
legend("bottomright", legend = paste("Chain", chain.no), col = col_brew,
lty = 1)
if(object$ncomp > 1) {
main <- paste("Log likelihood traceplot for ", object$ncomp, "component", object$model, "mixtures")
} else {
main <- paste("Log likelihood traceplot for (single component)", object$model)
}
title(main = main)
# if (press.enter & plot.autocor) {
# press_enter()
# frame()
# }
if (plot.autocor) {
all_autocors <- lapply(1:n.chains,
function(ch) acf(val[, ch], lag.max = lag.max, plot = FALSE))
range_y <- range(unlist(lapply(all_autocors, function(j) j$acf)))
for(ch in 1:n.chains) {
acr <- all_autocors[[ch]]
if (ch == 1) {
plot(acr$acf, type = "b", col = col_brew[ch],
ylab="Log likelihood autocorrelation",
xlab = "Lag", pch = 16, ylim = range_y)
} else {
points(acr$acf, type = "b", col = col_brew[ch], pch = 16)
}
}
abline(h = 0, lty = 2)
legend("topright", legend = paste("Chain", chain.no), col = col_brew,
pch = 16, lty = 1)
if(object$ncomp > 1) {
main <- paste("Log likelihood autocorrelation plot for ", object$ncomp, "component", object$model, "mixtures")
} else {
main <- paste("Log likelihood autocorrelation plot for (single component)", object$model)
}
title(main = main)
}
} else {
val <- object$lpd[final_iter_set, chain.no, drop = FALSE]
plot(NULL, xlim = c(1, object$n.iter.final), ylim = range(val),
ylab="Log Posterior Density", xlab = "Iteration")
for(ch in 1:n.chains)
points(val[, ch], type = "l", col = col_brew[ch])
legend("bottomright", legend = paste("Chain", chain.no), col = col_brew,
lty = 1)
if(object$ncomp > 1) {
main <- paste("Log Posterior Density traceplot for", object$ncomp, "component", object$model, "mixtures")
} else {
main <- paste("Log Posterior Density traceplot fitted (single component)", object$model)
}
title(main = main)
# if (press.enter & plot.autocor) {
# press_enter()
# frame()
# }
if (plot.autocor) {
all_autocors <- lapply(1:n.chains,
function(ch) acf(val[, ch], lag.max = lag.max, plot = FALSE))
range_y <- range(unlist(lapply(all_autocors, function(j) j$acf)))
for(ch in 1:n.chains) {
acr <- all_autocors[[ch]]
if (ch == 1) {
plot(acr$acf, type = "b", col = col_brew[ch],
ylab="Log posterior autocorrelation",
xlab = "Lag", pch = 16, ylim = range_y)
} else {
points(acr$acf, type = "b", col = col_brew[ch], pch = 16)
}
}
abline(h = 0, lty = 2)
legend("topright", legend = paste("Chain", chain.no), col = col_brew,
pch = 16, lty = 1)
if(object$ncomp > 1) {
main <- paste("Log posterior autocorrelation plot for ", object$ncomp, "component", object$model, "mixtures")
} else {
main <- paste("Log posterior autocorrelation plot for (single component)", object$model)
}
title(main = main)
}
}
}
#' Summary plots for angmcmc objects
#' @inheritParams paramtrace
#' @inheritParams lpdtrace
#' @param do.paramtrace logical. Should the trace(s) for the
#' parameter(s) be plotted?
#' @param do.lpdtrace logical. Should the log posterior trace
#' be plotted?
#' @param use.llik logical. Should the log likelihood be plotted
#' instead? Ignored if \code{do.lpdtrace == FALSE}.
#' @param x angmcmc object
#'
#' @examples
#' # first fit a vmsin mixture model
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' plot(fit.vmsin.20)
#' @export
plot.angmcmc <- function(x, par.name, comp.label, chain.no,
do.paramtrace = TRUE,
do.lpdtrace = TRUE, use.llik = FALSE,
...)
{
if (!is.angmcmc(x))
stop("\'x\' must be an angmcmc object")
ell <- list(...)
if (!is.null(ell$press.enter))
warning("\'press.enter\' is deprecated. Instead use par(ask=TRUE) before calling plot.angmcmc.")
if (do.paramtrace)
paramtrace(x, par.name, comp.label, chain.no, ...)
# if (press.enter) {
# press_enter()
# frame()
# }
if (do.lpdtrace)
lpdtrace(x, chain.no, use.llik)
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/all_diagnostics.R |
#' Contourplot for bivariate angular mixture model densities
#' @param model bivariate angular model whose mixture is of interest. Must be one of
#' "vmsin", "vmcos" and "wnorm2".
#' @param kappa1,kappa2,kappa3,mu1,mu2,pmix model parameters and mixing
#' proportions. See the respective mixture model densities (\link{dvmsinmix}, \link{dvmcosmix},
#' \link{dwnorm2mix}) for more details.
#' @param levels numeric vector of levels at which to draw contour lines;
#' passed to the \link{contour} function in graphics.
#' @param nlevels number of contour levels desired \strong{if} levels is not supplied;
#' passed to the \link{contour} function in graphics.
#' @param xpoints Points on the first (x-) coordinate where the density is to be evaluated.
#' Default to seq(0, 2*pi, length.out=100).
#' @param ypoints Points on the first (x-) coordinate where the density is to be evaluated.
#' Default to seq(0, 2*pi, length.out=100).
#' @param ... additional model specific argment
#' @param xlab,ylab,col,lty,main graphical parameters passed to \link{contour}.
#' @examples
#' contour_model('vmsin', 1, 1, 1.5, pi, pi)
#' contour_model('vmcos', 1, 1, 1.5, pi, pi)
#'
#'
#' @export
contour_model <- function(model = "vmsin", kappa1, kappa2, kappa3, mu1, mu2,
pmix = rep(1/length(kappa1), length(kappa1)),
xpoints = seq(0, 2*pi, length.out = 100),
ypoints = seq(0, 2*pi, length.out = 100),
levels, nlevels = 20,
xlab="x", ylab="y", col="black",
lty=1, main, ...)
{
if (!model %in% c("vmsin", "vmcos", "wnorm2"))
stop("model must be one of \"vmsin\", \"vmcos\" or \"wnorm2\"")
if (missing(levels)) {
levels <- exp(seq(-20,2, length.out = nlevels))
}
if (missing(main))
main <- paste("Contour plot for", length(kappa1), "component",
model, "mixture density")
coords <- as.matrix(expand.grid(xpoints, ypoints))
inargs <- list(x = coords, kappa1 = kappa1, kappa2 = kappa2, kappa3 = kappa3,
mu1 = mu1, mu2 = mu2, pmix = pmix, ...)
dens <- do.call(paste0("d", model, "mix"), inargs)
contour(xpoints, ypoints, matrix(dens, nrow=length(xpoints)), levels=levels,
xlab=xlab, ylab=ylab, col=col, lty=lty, main=main)
}
#' Surface for bivariate angular mixture model densities
#' @inheritParams contour_model
#' @param kappa1,kappa2,kappa3,mu1,mu2,pmix model parameters and mixing
#' proportions. See the respective mixture model densities (\link{dvmsinmix}, \link{dvmcosmix},
#' \link{dwnorm2mix}) for more details.
#' @param log.density logical. Should log density be used for the plot?
#' @param xlab,ylab,zlab,main graphical parameters passed to \code{lattice::wireframe}
#' @param ... additional arguments passed to \code{lattice::wireframe}
#' @examples
#' surface_model('vmsin', 1, 1, 1.5, pi, pi)
#' surface_model('vmcos', 1, 1, 1.5, pi, pi)
#'
#' @export
surface_model <- function(model = "vmsin", kappa1, kappa2, kappa3, mu1, mu2,
pmix = rep(1/length(kappa1), length(kappa1)),
xpoints = seq(0, 2*pi, length.out = 30),
ypoints = seq(0, 2*pi, length.out = 30),
log.density = FALSE, xlab="x", ylab="y",
zlab = ifelse(log.density, "Log Density", "Density"),
main, ...)
{
if (!model %in% c("vmsin", "vmcos", "wnorm2"))
stop("model must be one of \"vmsin\", \"vmcos\" or \"wnorm2\"")
if (missing(main))
main <- paste("Density surface for", length(kappa1), "component",
model, "mixture density")
coords <- as.matrix(expand.grid(xpoints, ypoints))
inargs <- list(x = coords, kappa1 = kappa1, kappa2 = kappa2,
kappa3 = kappa3,
mu1 = mu1, mu2 = mu2, pmix = pmix)
dens <- do.call(paste0("d", model, "mix"), inargs)
denmat <- matrix(dens, nrow = length(xpoints))
if(log.density) {
denmat <- log(denmat)
}
print(basic_surfaceplot(xpoints = xpoints, ypoints = ypoints,
denmat = denmat, xlab = xlab, ylab = ylab,
main = main, zlab = zlab, ...))
# nrden <- nrow(denmat)
# ncden <- ncol(denmat)
#
# denfacet <- denmat[-1, -1] + denmat[-1, -ncden] +
# denmat[-nrden, -1] + denmat[-nrden, -ncden]
#
# # Create a function interpolating colors in the range of specified colors
# jet.colors <- grDevices::colorRampPalette( c("blue", "green",
# "yellow", "orange", "red") )
# # Generate the desired number of colors from this palette
# nbcol <- 500
# color <- jet.colors(nbcol)
#
# denfacet <- denmat[-1, -1] + denmat[-1, -ncden] +
# denmat[-nrden, -1] + denmat[-nrden, -ncden]
# # Recode facet z-values into color indices
# facetcol <- cut(denfacet, nbcol)
#
# persp(x=xpoints, y=ypoints, z=denmat, theta = theta, phi = phi,
# expand = expand, col = color[facetcol],
# ltheta = 120, shade = shade, ticktype = "detailed",
# xlab = xlab, ylab = ylab, zlab = zlab,
# main = main, ...) -> res
# wireframe(x=denmat~x*y,
# data=data.frame(x=xpoints,
# y=rep(ypoints, each=length(xpoints)),
# denmat=denmat),
# xlab=xlab, ylab=ylab, zlab=zlab,
# main=main, colorkey=colorkey, outerbox=FALSE,
# par.settings=list(axis.line=list(col = 'transparent')),
# neval=length(xpoints), aspect = c(61/87, 0.4),
# drape=TRUE, light.source=c(10,0,10),
# col.regions=colorRampPalette(c("blue", "green",
# "yellow", "orange",
# "red"))(100),
# scales=list(arrows=FALSE, col=1)
# )
# do.call(wireframe, inargs)
# inargs$outerbox <- FALSE
# inargs$par.settings <- list(axis.line = list(col = 'transparent'))
# if (is.null(inargs$xlab)) inargs$xlab <- expression(Theta)
# if (is.null(inargs$ylab)) inargs$ylab <- expression(Phi)
# if (is.null(inargs$colorkey)) inargs$colorkey <- FALSE
# if (is.null(inargs$main)) inargs$main <- list(maintext, font=2, cex=1.5)
# if (is.null(inargs$neval)) inargs$neval <- 100
# if (is.null(inargs$aspect)) inargs$aspect <- c(61/87, 0.4)
# if (is.null(inargs$zlab)) inargs$zlab <- list("Density", rot=90)
# if (is.null(inargs$screen)) inargs$screen <- list(z=45,x=-45)
# if (is.null(inargs$colorkey)) inargs$colorkey <- TRUE
# if (is.null(inargs$scales))
# inargs$scales <- list(
# arrows = FALSE,
# x=list(at = c(-pi, -pi/2, 0, pi, pi/2),
# labels = c(expression(-pi), expression(-pi/2), 0,
# expression(pi), expression(pi/2))),
# y=list(at = c(-pi, -pi/2, 0, pi, pi/2),
# labels = c(expression(-pi), expression(-pi/2), 0,
# expression(pi), expression(pi/2))),
# col = "black", font = 1, tck=1)
# if (is.null(inargs$drape)) inargs$drape <- TRUE
# if (is.null(inargs$light.source))
# inargs$light.source <- c(10,0,10)
# if (is.null(inargs$col.regions))
# inargs$col.regions <- colorRampPalette(c("blue", "green",
# "yellow", "orange", "red"))(100)
# if (is.null(inargs$par.settings))
# inargs$par.settings <- list(top.padding = 0,
# bottom.padding = 0,
# left.padding=0,
# right.padding=0)
#
# inargs$par <- c(k1,k2,k3,0,0)
#
#
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/all_model_plots.R |
#' Stepwise fitting of angular mixture models with incremental component sizes and optimum model selection
#' @inheritParams pointest
#' @inheritParams fit_angmix
#' @param start_ncomp starting component size. A single component model is fitted if \code{start_ncomp} is equal to one.
#' @param max_ncomp maximum number of components allowed in the mixture model.
#' @param crit model selection criteria, one of \code{"LOOIC", "WAIC", "AIC", "BIC", "DIC"} or \code{"LOGML"}. Default is
#' \code{"LOOIC"}.
#' @param L HMC tuning parameter (trajectory length) passed to \link{fit_angmix}. Can be a numeric vetor (or scalar), in which case
#' the same \code{L} is passed to all \link{fit_angmix} calls, or can be a list of length \code{max_ncomp-start_ncomp+1},
#' so that \code{L_list[[i]]} is passed as the argument \code{L} to \link{fit_angmix} call with \code{ncomp = max_ncomp+i-1}. See
#' \link{fit_angmix} for more details on \code{L} including its default values. Ignored if \code{method = "rwmh"}.
#' @param fn function to evaluate on MCMC samples to estimate parameters.
#' Defaults to \code{mean}, which computes the estimated posterior means. If \code{fn = max},
#' then MAP estimate is calculated from the MCMC run. Used only if \code{crit = "DIC"}, and ignored otherwise.
#' @param prev_par logical. Should the MAP estimated parameters from the model with \code{ncomp = K} be used in the model
#' with \code{ncomp = K+1} as the starting parameters, with the component with largest mixing proportion appearing twice in the
#' bigger model?
#' @param form form of crit to be used. Available choices are 1 and 2. Used only if \code{crit} is \code{"DIC"} and ignored otherwise.
#' @param ... additional arguments passed to \link{fit_angmix}.
#' @param logml_maxiter maximum number of iterations (\code{maxiter}) passed to \link{bridge_sampler} for calculating
#' \code{LOGML}. Ignored if \code{crit} is not \code{LOGML}.
#' @param fix_label logical. Should the label switchings on the current fit (only the corresponding "best chain" if \code{use_best_chain = TRUE})
#' be fixed before computing parameter estimates and model selection criterion? Defaults to \code{TRUE} if \code{perm_sampling} is true in
#' the \link{fit_angmix} call, or if \code{crit = "DIC"} and \code{form = 1}.
#' @param silent logical. Should the current status (such as what is the current component labels, which job is being done etc.)
#' be printed? Defaults to \code{TRUE}.
#' @param return_all logical. Should all angmcmc objects obtained during step-wise run be returned? *Warning*: depending on the
#' sizes of \code{n.iter}, \code{start_ncomp}, \code{max_ncomp} and \code{n.chains}, this can be very memory intesive. In such
#' cases, it is recommended that \code{return_all} be set to \code{FALSE}, and, if required, the intermediate fitted objects be
#' saved to file by setting \code{save_fits = TRUE}.
#' @param save_fits logical. Should the intermediate angmcmc objects obtained during step-wise run be saved
#' to file using \link{save}? Defaults to TRUE. See \code{save_file} and \code{save_dir}.
#' @param save_file,save_dir \code{save_file} is a list of size \code{max_ncomp-start_ncomp+1},
#' with k-th entry providing the \code{file}
#' argument used to \link{save} the intermediate angmcmc object with \code{ncomp = k} (titled \code{"fit_angmcmc"}).
#' If not provided, then k-th element
#' of \code{save_file[[k]]} is taken to be \code{\link{paste}(save_dir, "comp_k", sep="/")}. Both are ignored if
#' \code{save_fits = FALSE}.
#' @param use_best_chain logical. Should only the "best" chain obtained during each intermediate fit be used during
#' computation of model selection criterion? Here "best" means the chain
#' with largest (mean over iterations) log-posterior density. This can be helpful if one of the chains gets stuck at local optima. Defaults to TRUE.
#' @param return_llik_contri passed to \link{fit_angmix}. By default, set to \code{TRUE} if \code{crit} is either \code{"LOOIC"}
#' or \code{"WAIC"}, and to \code{FALSE} otherwise.
#' @param alpha significance level used in the test H_{0K}: expected log predictive density (elpd) for the fitted model with K components >= elpd for the fitted model
#' with K + 1 components if \code{crit} is \code{"LOOIC"} or \code{"WAIC"}.
#' Must be a scalar between 0 and 1. Defaults to 0.05. See Details. Ignored for any other \code{crit}.
#' @param bonferroni_alpha logical. Should a Bonferroni correction be made on the test size \code{alpha} to adjust for
#' multiplicity due to (\code{max_ncomp} - \code{start_ncomp}) possible hypothesis tests? Defaults to TRUE.
#' Relevant only if \code{crit} is in \code{c("LOOIC", "WAIC")}, and ignored otherwise. See Details.
#' @param bonferroni_adj_type character string. Denoting type of Bonferroni adjustment to make.
#' Possible choices are \code{"decreasing"} (default) and \code{"equal"}. Ignored if either \code{bonferroni_alpha}
#' is FALSE, or \code{crit} is outside \code{c("LOOIC", "WAIC")}. See Details.
#'
#' @details
#' The goal is to fit an angular mixture model with an optimally chosen component size K.
#' To obtain an optimum K, mixture models with incremental component sizes
#' between \code{start_ncomp} and \code{max_ncomp} are fitted incrementally using \link{fit_angmix},
#' starting from K = 1.
#' If the model selection criterion \code{crit} is \code{"LOOIC"} or \code{"WAIC"}, then a test of hypothesis
#' H_{0K}: expected log predictive density (elpd) for the fitted model with K components >= elpd for the fitted model
#' with K + 1 components, is performed at every K >= 1. The test-statistic used for the test is an approximate z-score
#' based on the normalized estimated elpd difference between the two models obtained from \link[loo]{compare}, which provides
#' estimated elpd difference along with its standard error estimate. Because the computed standard error of elpd difference
#' can be overly optimistic when the elpd difference is small (in particular < 4),
#' a conservative worst-case estimate (equal to twice of the computed standard error)
#' is used in such cases. To account for multiplicity among the M =
#' (\code{max_ncomp} - \code{start_ncomp}) possible sequential tests performed,
#' by default a Bonferroni adjustment to the test level \code{alpha} is made.
#' Set \code{bonferroni_alpha = FALSE} to remove the adjustment. To encourage
#' parsimony in the final model, by default (\code{bonferroni_adj_type = "decreasing"})
#' a decreasing sequence of adjusted alphas of the form \code{alpha * (0.5)^(1:M) / sum((0.5)^(1:M))}
#' is used. Set \code{bonferroni_adj_type = "equal"}
#' to use equal sequence of adjusted alphas (i.e., \code{alpha/M}) instead.
#'
#' The incremental fitting stops if H_{0K} cannot be rejected
#' (at level \code{alpha}) for some K >= 1; this K is then regarded as the optimum number of components.
#' If \code{crit} is not \code{"LOOIC"} or \code{"WAIC"} then mixture model with the first minimum value of the model selection criterion \code{crit}
#' is taken as the best model.
#'
#' Note that in each intermediate fitted model, the total number of components (instead of the number of
#' "non-empty components") in the model is used to estimate of the true component
#' size, and then the fitted model is penalized for model complexity (via the model selection criterion used).
#' This approach of selecting an optimal K follows the perspective "let two component specific parameters
#' be identical" for overfitting mixtures, and as such the Dirichlet prior hyper-parameters \code{pmix.alpha}
#' (passed to \link{fit_angmix}) should be large. See Fruhwirth-Schnatter (2011) for more deltails.
#'
#' Note that the stability of \link{bridge_sampler} used in marginal likelihood estimation heavily depends on stationarity of the
#' chains. As such, while using this criterion, we recommending running the chain long engouh, and setting \code{fix_label = TRUE}
#' for optimal performance.
#'
#' @references
#' Fruhwirth-Schnatter, S.: Label switching under model uncertainty. In: Mengerson, K., Robert, C., Titterington, D. (eds.) Mixtures:
#' Estimation and Application, pp. 213-239. Wiley, New York (2011).
#'
#'
#'
#'
#' @return Returns a named list (with class = \code{stepfit}) with the following seven elements:
#'
#' \code{fit.all} (if \code{return_all = TRUE}) - a list all angmcmc objects created at each component size;
#'
#' \code{fit.best} - angmcmc object corresponding to the optimum component size;
#'
#' \code{ncomp.best} - optimum component size (integer);
#'
#' \code{crit} - which model comparison criterion used (one of \code{"LOOIC", "WAIC", "AIC", "BIC", "DIC"} or \code{"LOGML"});
#'
#' \code{crit.all} - all \code{crit} values calculated (for all component sizes);
#'
#' \code{crit.best} - \code{crit} value for the optimum component size; and
#'
#' \code{maxllik.all} - maximum (obtained from MCMC iterations) log likelihood for all fitted models
#'
#' \code{maxllik.best} - maximum log likelihodd for the optimal model; and
#'
#' \code{check_min} - logical; is the optimum component size less than \code{max_ncomp}?
#'
#' @examples
#' # illustration only - more iterations needed for convergence
#' set.seed(1)
#' fit.vmsin.step.15 <- fit_incremental_angmix("vmsin", tim8, "BIC", start_ncomp = 1,
#' max_ncomp = 3, n.iter = 15,
#' n.chains = 1, save_fits=FALSE)
#' (fit.vmsin.best.15 <- bestmodel(fit.vmsin.step.15))
#' lattice::densityplot(fit.vmsin.best.15)
#'
#' @export
fit_incremental_angmix <- function(model, data,
crit = "LOOIC",
start_ncomp=1, max_ncomp=10,
L = NULL,
fn = mean,
fix_label = NULL,
form = 2,
start_par = NULL,
prev_par = TRUE,
logml_maxiter = 1e4,
return_all = FALSE,
save_fits = FALSE,
save_file = NULL,
save_dir = "",
silent = FALSE,
return_llik_contri = (crit %in% c("LOOIC", "WAIC")),
use_best_chain = TRUE,
alpha = 0.05,
bonferroni_alpha = TRUE,
bonferroni_adj_type = "decreasing",
...)
{
if (length(model) > 1)
stop("\'model\' must be a scalar")
if(missing(model))
stop("argument \"model\" is missing, with no default")
if(!crit %in% c("AIC", "BIC", "DIC", "WAIC", "LOOIC", "LOGML"))
stop("non-compatible criterion")
if (model %in% c("vmsin", "vmcos", "wnorm2")) {
type <- "bi"
} else if (model %in% c("vm", "wnorm")) {
type <- "uni"
} else {
stop("non-compatible model")
}
if (!missing(start_par)) {
if (min(listLen(start_par)) != max(listLen(start_par)))
stop("Lengths of elements in start_par differ")
else if((listLen(start_par))[1] != start_ncomp)
stop("Number of components in start_par must be equal to \'start_ncomp\'")
}
if(start_ncomp > max_ncomp)
stop("\'start_ncomp\' cannot be smaller than \'max_ncomp\'")
all_ncomp <- start_ncomp:max_ncomp
n_ncomp <- length(all_ncomp)
if (is.null(save_file)) {
save_file <- lapply(all_ncomp, function(j) paste0(save_dir, "/comp_", j, ".Rdata"))
}
else if (!is.list(save_file) | length(save_file) != n_ncomp)
stop("\'save_file\' must be a list of length max_ncomp-start_ncomp+1")
crit_print <- crit
if (crit == "LOGML")
crit_print <- "(negative) LOGML"
if (type == "bi") {
if (!(is.matrix(data) | is.data.frame(data)))
stop("\'data\' must be a two column matrix for model = \'vmsin\', \'vmcos\' and \'wnorm2\'")
if (ncol(data) != 2)
stop("\'data\' must be a two column matrix for model = \'vmsin\', \'vmcos\' and \'wnorm2\'")
data.rad <- rm_NA_rad(data)
n.data <- nrow(data.rad)
}
else {
if (!is.numeric(data))
stop("\'data\' must be a vector for \'model\' = \'vm\' and \'wnorm\'")
}
fit_all <- NULL
if (return_all)
fit_all <- vector("list", n_ncomp)
ell <- list(...)
if (is.null(ell$perm_sampling))
ell$perm_sampling <- formals(fit_angmix)$perm_sampling
if (is.null(fix_label)) {
if(any(form == 1 & crit == "DIC", ell$perm_sampling & prev_par,
ell$perm_sampling & crit == "LOGML")) {
fix_label <- TRUE
} else {
fix_label <- FALSE
}
}
ntests_max <- n_ncomp - 1
if (crit %in% c("LOOIC", "WAIC")) {
if (any(length(alpha) != 1,
!is.numeric(alpha),
alpha < 0,
alpha > 1)) {
stop("\'alpha\' must be a scalar between 0 and 1")
}
stopifnot(
is.logical(bonferroni_alpha),
length(bonferroni_alpha) == 1,
! is.na(bonferroni_alpha),
is.character(bonferroni_adj_type),
length(bonferroni_adj_type) == 1,
bonferroni_adj_type %in% c("equal", "decreasing"),
! is.na(bonferroni_adj_type)
)
alpha_vec <- rep(NA, ntests_max)
if (!bonferroni_alpha) {
alpha_vec <- rep(alpha, ntests_max)
} else if (bonferroni_adj_type == "decreasing") {
alpha_vec <- alpha * (0.5)^(1:ntests_max) / sum((0.5)^(1:ntests_max))
} else if (bonferroni_adj_type == "equal") {
alpha_vec <- alpha / ntests_max
}
}
# q_norm <- qnorm(alpha, lower.tail = FALSE)
# all_fit <- vector("list", n_ncomp)
all_input <- list("data" = data, "model" = model,
return_llik_contri = return_llik_contri,
...)
formal_fit_angmix <- formals(fit_angmix)
if (is.null(all_input$n.chains))
all_input$n.chains <- formal_fit_angmix$n.chains
n.chains <- all_input$n.chains
# when missing L, get default value of L and make L_list
if (is.null(L)) {
L_list <- lapply(1:(max_ncomp-start_ncomp+1),
function(j) {
ncomp <- start_ncomp-j+1
eval(formal_fit_angmix$L)
})
}
# when L is not null, check if it's in correct format first
else if (all(!is.list(L) & !is.numeric(L),
is.list(L) & length(L) != (max_ncomp - start_ncomp + 1),
!is.numeric(L)))
stop("L must either be a list of length max_ncomp-start_ncomp+1 or a vector")
else if (!is.list(L) & is.numeric(L)) {
L_list <- lapply(1:(max_ncomp - start_ncomp + 1),
function(j) L)
# all elements of L_list are the same
}
else if (is.list(L)) {
L_list <- L
# L_list is just L, when given properly
}
all_par_est <- vector("list", length = max_ncomp-start_ncomp+1)
# all_crit <- rep(0, n_ncomp)
all_crit <- vector("list", length = n_ncomp)
all_maxllik <- rep(0, n_ncomp)
if(!form %in% 1:2) form <- 1
check_min <- FALSE
curr_seed <- NULL
if (exists(".Random.seed", .GlobalEnv)) {
curr_seed <- .GlobalEnv$.Random.seed
}
for(j in seq_len(length(all_ncomp))) {
all_input$ncomp <- all_ncomp[j]
all_input$L <- L_list[[j]]
# copy the previous fit as fit_prev if j > 1
if (j > 1) {
fit_prev <- fit_angmcmc
rm(fit_angmcmc)
gc()
}
# starting parameters for j > 1, ncomp >= 3
if (j > 1 & prev_par & all_ncomp[j] > 2) {
all_par <- all_par_est[[j-1]]
# find the component with largest mix_prop
copy_comp_id <- which.max(all_par[1, ])[1]
new_comp <- all_par[, copy_comp_id]
new_comp_id <- all_input$ncomp
all_par <- cbind(all_par, new_comp)
# distribute the weights between the "new" and "old" components
all_par[1, c(copy_comp_id, new_comp_id)] <-
all_par[1, copy_comp_id]/2
colnames(all_par) <- 1:new_comp_id
start_par <- list_by_row(all_par)
all_input$start_par <- start_par
}
else if (j == 1 & !missing(start_par)) {
all_input$start_par <- start_par
}
else {
all_input$start_par <- NULL
}
if (!silent) {
cat("**************\n")
cat(paste("Fitting", model,
"mixture model with ncomp = ",
all_ncomp[j], "...\n") )
}
if (!is.null(curr_seed)) {
msg <- paste(
"\n***Restoring RNG state to specified seed***\n"
)
# cat(msg)
.GlobalEnv$.Random.seed <- curr_seed
}
fit_angmcmc <- do.call(fit_angmix, all_input)
all_maxllik[j] <- maxllik_curr <- max(fit_angmcmc$llik[fit_angmcmc$final_iter, ])
if (!silent)
cat(paste("\nMaximum log likelihood (from MCMC iterations) =",
round(maxllik_curr, 3), "\n"))
if (use_best_chain) {
best.chain.id <- which.max(
vapply(1:fit_angmcmc$n.chains,
function (j) mean(fit_angmcmc$lpd[fit_angmcmc$final_iter, j]),
0))
fit_angmcmc_adj <- select_chains(fit_angmcmc, best.chain.id)
} else {
fit_angmcmc_adj <- fit_angmcmc
}
if (fix_label & all_ncomp[j] > 1) {
if(!silent)
cat("\nCalling fix_label with default settings ...\n")
fit_angmcmc_adj <- fix_label(fit_angmcmc_adj)
# replace fit_angmcmc by fit_angmcmc_adj if fix_label and !use_best_chain
if (!use_best_chain) {
fit_angmcmc <- fit_angmcmc_adj
}
}
else if (!silent) {
cat("\nSkipping fix_label call ...\n")
}
all_par_est[[j]] <- pointest(fit_angmcmc_adj, fn = "MODE")
if(save_fits) {
if (!silent)
cat(paste0("\nSaving the output (titled \'fit_angmcmc\') with filename = \'",
save_file[[j]], "\' ...\n"))
save(fit_angmcmc, file=save_file[[j]])
}
if (return_all)
fit_all[[j]] <- fit_angmcmc
if(!silent)
cat("\nComputing model selection criterion ...\n")
if (crit == "WAIC") {
curr_crit <- suppressWarnings(loo::waic(fit_angmcmc_adj))
all_crit[[j]] <- curr_crit
}
else if (crit == "LOOIC") {
curr_crit <- suppressWarnings(loo::loo(fit_angmcmc_adj))
all_crit[[j]] <- curr_crit
}
else if (crit == "LOGML") {
curr_crit <- tryCatch(bridgesampling::bridge_sampler(fit_angmcmc_adj, silent = TRUE, maxiter = logml_maxiter),
error = function(e) "error")
if (unlist(curr_crit)[1] == "error")
stop(paste0("log posterior too unstable with ncomp = ",
all_ncomp[j], " to calculate log ML. Try a different criterion."))
all_crit[[j]] <- -curr_crit$logml
}
else if (crit == "DIC") {
curr_crit <- DIC(fit_angmcmc_adj, form=form)
all_crit[[j]] <- curr_crit["DIC"]
}
else if (crit == "AIC") {
all_crit[[j]] <- AIC(fit_angmcmc_adj)
}
else {
all_crit[[j]] <- BIC(fit_angmcmc_adj)
}
# if(j > start_ncomp) cat("\n")
if(!silent) {
crit_val_print <- ""
if (!crit %in% c("LOOIC", "WAIC"))
crit_val_print <- round(all_crit[[j]], 3)
cat(paste("\t", "ncomp = ", all_ncomp[j], ",\t",
crit_print, ":", crit_val_print))
if (crit %in% c("LOOIC", "WAIC")) {
cat("\n")
cat(suppressWarnings(utils::capture.output(all_crit[[j]])), sep = "\n")
}
cat("\n")
cat("**************\n\n")
}
if (j > 1 ) {
if (crit %in% c("LOOIC", "WAIC")) {
# browser()
crit_list <- list(
comp_j_minus_1 = all_crit[[j-1]],
comp_j = all_crit[[j]]
)
compare_crit_obj <- loo::loo_compare(
x = crit_list
)
E_diff <- (
compare_crit_obj["comp_j", "elpd_diff"]
- compare_crit_obj["comp_j_minus_1", "elpd_diff"]
)
E_diff_se <- sum(compare_crit_obj[, "se_diff"])
if (abs(E_diff) < 4) {
E_diff_se <- 2 * E_diff_se
}
# test for signif improvement in elpd
# H0: curr elpd - prev elpd <= 0 vs Ha: >
zscore <- E_diff/E_diff_se
pval_curr <- pnorm(zscore, lower.tail = FALSE)
# zscore <- compare_crit[1]/compare_crit[2]
if (pval_curr >= alpha_vec[j-1]) {
# fail to reject null at alpha --
# so no signific improvement in curr elpd compared to prev
check_min <- TRUE
j.best <- j-1
pval_txt <- format(pval_curr, digits = 3, scientific = TRUE)
alpha_txt <- format(alpha_vec[j-1], digits = 3, scientific = TRUE)
msg <- paste0(
"\nImprovement in predicitive accuracy not significant",
" (p=", pval_txt,
">=level=", alpha_txt,
"). Stopping...\n\n"
)
cat(msg)
fit_best <- fit_prev #previous fit is best
break
}
} else if (all_crit[[j]] > all_crit[[j-1]]) {
check_min <- TRUE
j.best <- j-1
cat("\nFirst minimum attained. Stopping...\n")
fit_best <- fit_prev #previous fit is best
break
}
}
if (all_ncomp[j] == max_ncomp) {
cat("\n\'max_ncomp\' reached. Stopping...\n")
j.best <- j
fit_best <- fit_angmcmc
}
}
result <- list("fit.all" = fit_all[1:j], "fit.best" = fit_best,
"ncomp.best" = all_ncomp[j.best], "crit" = crit,
"crit.all" = all_crit[1:j],
"crit.best" = all_crit[[j.best]],
"maxllik.all" = all_maxllik[1:j],
"maxllik.best" = all_maxllik[j.best],
"check_min" = check_min)
class(result) <- "stepfit"
result
}
#' Convenience function for extracting angmcmc object, and the value of the model
#' selection criterion corresponding to the best fitted model in stepwise fits
#'
#' @param step_object stepwise fitted object obtained from \link{fit_incremental_angmix}.
#'
#' @return \code{bestmodel} returns an \code{angmcmc} object, and
#' \code{bestcriterion} returns the corresponding value of model selection criterion for the best fitted model in \code{step_object}.
#'
#' @details
#' These are convenience functions; the best fitted model and the corresponding value of model selection criterion
#' can also be directly obtained by
#' extracting the elements \code{"fit.best"} and \code{"crit.best"} from \code{step_object} respectively.
#' Note that \code{bestcriterion} returns:
#' (a) a scalar number (class = \code{numeric}) if \code{crit}
#' used in original \code{fit_incremental_angmix} call is \code{'AIC'}, \code{'BIC'} or \code{'DIC'},
#' (b) an element of class \code{bridge} from package \code{bridgesampling} if \code{crit} is
#' \code{LOGML}, (c) an element of class \code{c("waic", "loo")} if \code{crit = 'WAIC'}, and (d) an element of
#' class \code{c("psis_loo", "loo")} if \code{crit = "LOOIC"}. See documentations of these model
#' selection criteria for more details.
#'
#' @examples
#' # illustration only - more iterations needed for convergence
#' set.seed(1)
#' fit.vmsin.step.15 <- fit_incremental_angmix("vmsin", tim8, start_ncomp = 1,
#' max_ncomp = 3, n.iter = 15,
#' n.chains = 1,
#' crit = "WAIC")
#' fit.vmsin.best.15 <- bestmodel(fit.vmsin.step.15)
#' fit.vmsin.best.15
#'
#' crit.best <- bestcriterion(fit.vmsin.step.15)
#' crit.best
#' @export
bestmodel <- function(step_object) {
if (!is(step_object, "stepfit")) stop("\'step_object\' is not a stepwise fitted object")
step_object$fit.best
}
#' @rdname bestmodel
#' @export
bestcriterion <- function(step_object) {
if (!is(step_object, "stepfit")) stop("\'step_object\' is not a stepwise fitted object")
step_object$crit.best
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/all_model_select.R |
#' Add (extra) burnin and thin to angmcmc object after original run
#' @param object angmcmc object
#' @inheritParams fit_angmix
#' @examples
#' # first fit a vmsin mixture model
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' lpdtrace(fit.vmsin.20)
#' # Now add extra burn-in
#' fit.vmsin.20.burn <- add_burnin_thin(fit.vmsin.20, 0.3)
#' lpdtrace(fit.vmsin.20.burn)
#' @export
add_burnin_thin <- function(object, burnin.prop=0, thin=1)
{
if (!is.angmcmc(object))
stop("object must be an \'angmcmc\' object")
if(burnin.prop < 0 || burnin.prop >= 1)
stop("\"burnin.prop\" must be in [0, 1)")
if(thin < 1)
stop("\"thin\" must be a positive integer")
# browser()
final_iter.orig <- object$final_iter
final_iter.burn <- setdiff(final_iter.orig,
final_iter.orig[seq_len(ceiling(length(final_iter.orig)*burnin.prop))])
thin <- round(thin)
thin.final <- object$thin*thin
final_iter.thin <- final_iter.burn[c(TRUE, rep(FALSE, thin.final-1))]
object$n.burnin <- final_iter.thin[1] - 1
object$thin <- thin.final
object$n.iter.final <- length(final_iter.thin)
object$final_iter <- final_iter.thin
object
}
#' Select chains from angmcmc objects
#' @inheritParams pointest
#' @param chain.no labels of chains to be retained in the final sample. If missing,
#' all chains are used.
#' @param ... unused
#'
#' @examples
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' L = c(10, 12), chains_parallel = FALSE,
#' n.chains = 2)
#' fit.vmsin.20
#' fit.vmsin.20.1 <- select_chains(fit.vmsin.20, 1)
#' fit.vmsin.20.1
#'
#' @return Returns another angmcmc object with only selected chains passed through \code{chain.no}
#' @export
select_chains <- function(object, chain.no, ...)
{
if (!is.angmcmc(object))
stop("\'object\' must be an angmcmc object")
if (missing(chain.no)) {
chain.no <- 1:object$n.chains
} else if (any(!chain.no %in% 1:object$n.chains)) {
stop("invalid chain number")
}
object$par.value <- object$par.value[, , , chain.no, drop=FALSE]
object$clus.ind <- object$clus.ind[, , chain.no, drop=FALSE]
object$llik.contri <- object$llik.contri[, , chain.no, drop=FALSE]
object$mem.prob <- object$mem.prob[, , , chain.no, drop=FALSE]
object$llik <- object$llik[, chain.no, drop=FALSE]
object$lpd <- object$lpd[, chain.no, drop=FALSE]
object$lprior <- object$lprior[, chain.no, drop=FALSE]
object$accpt.modelpar <- object$accpt.modelpar[, , chain.no, drop=FALSE]
if (object$return_tune_param)
object$tune_param <- object$tune_param[, , chain.no, drop=FALSE]
object$epsilon <- object$epsilon[, chain.no, drop=FALSE]
object$L <- object$L[chain.no]
object$propscale <- object$propscale[, chain.no, drop=FALSE]
object$n.chains <- length(chain.no)
object
}
#' Create an mcmc.list object from an angmcmc object
#' @param x angmcmc object
#' @param ... unused
#'
#' @examples
#' # first fit a vmsin mixture model
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#'
#' # now convert it to mcmc.list
#' library(coda)
#' fit.vmsin.20.mcmc <- as.mcmc.list(fit.vmsin.20)
#' @importFrom coda as.mcmc.list
#' @method as.mcmc.list angmcmc
#'
#' @export
as.mcmc.list.angmcmc <- function(x, ...)
{
if (!is.angmcmc(x))
stop("\'x\' must be an angmcmc object")
object <- x
npars <- length(object$par.name)
ncomp <- object$ncomp
n.iter <- object$n.iter
n.data <- object$n.data
n.chains <- object$n.chains
if (object$ncomp == 1) {
par_names_long <- object$par.name
} else {
par_names_long <- apply(as.matrix(expand.grid(object$par.name, 1:object$ncomp)),
1, function(x) paste0(x[1], "[", x[2], "]"))
}
start <- object$final_iter[1]
end <- object$n.iter
thin <- object$thin
out <- vector("list", n.chains)
for(ii in 1:n.chains) {
par.vals <- object$par.value[, , , ii, drop=FALSE]
x <- t(matrix(c(par.vals), ncomp*npars, n.iter))
colnames(x) <- par_names_long
if (object$ncomp == 1) x <- x[, -1, drop = FALSE]
out[[ii]] <- coda::mcmc(x, start = start, end = end, thin = thin)
}
coda::as.mcmc.list(out)
}
#' Fix label switching in angmcmc objects
#' @inheritParams pointest
#'
#' @param ... arguments other than \code{z, K, complete, mcmc, p}
#' and \code{data} passed to \link{label.switching}. See details.
#'
#' @details \code{fix_label} is a wrapper for \link{label.switching} from
#' package \code{label.switching} for \code{angmcmc} objects. The arguments
#' \code{z, K, complete, mcmc, p} and \code{data} are appropriately filled in
#' from \code{object}. The \code{label.switching} argument \code{method} can
#' be a scalar or vector; for this wrapper it defaults to \code{"STEPHENS"} if the \code{angmcmc} was
#' created with permutation sampling (by setting perm_sampling = TRUE in
#' \link{fit_angmix}), and to \code{"DATA-BASED"} otherwise.
#'
#' @return Returns a single \code{angmcmc} object or a list of \code{angmcmc} objects (according as whether
#' the argument \code{method} is a scalar or vector) with label switchings corrected (after burn-in and thin)
#' according to the resulting permutation from \link{label.switching}.
#'
#' @examples
#' # first fit a vmsin mixture model
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' # now apply fix_label
#' fit.vmsin.20.fix <- fix_label(fit.vmsin.20)
#'
#' @export
fix_label <- function(object, ...) {
if (!is.angmcmc(object))
stop("\'object\' must be an angmcmc object")
if (object$ncomp == 1)
stop("Fitted model has only one component")
# first collect all necessary arguments from all chains,
# and collapse into a wide matrix matrix
K <- ncomp <- object$ncomp
n.data <- object$n.data
n.iter.final <- object$n.iter.final
n.chains <- object$n.chains
final_iter <- object$final_iter
npar <- length(object$par.name)
ell <- list(...)
if (is.null(ell$method)) {
if (object$perm_sampling)
ell$method <- "STEPHENS"
else
ell$method <- "DATA-BASED"
}
method <- ell$method
iter_no <- matrix(1:(n.chains*n.iter.final), ncol = n.chains)
z <- matrix(1, n.chains*n.iter.final, n.data)
p <- array(1, c(n.chains*n.iter.final, n.data, ncomp))
mcmc_par <- array(1, c(n.chains*n.iter.final, ncomp, npar))
for(ii in 1:n.chains) {
z[iter_no[, ii], ] <- t(object$clus.ind[, final_iter, ii])
p[iter_no[, ii], , ] <- aperm(object$mem.prob[, , final_iter, ii], c(3, 1, 2))
mcmc_par[iter_no[, ii], , ] <- aperm(object$par.value[, , final_iter, ii], c(3, 2, 1))
}
par_long <- aperm(mcmc_par, c(3, 2, 1))
mem_prob_long <- aperm(p, c(2, 3, 1))
clus_ind_long <- t(z)
# browser()
complete <- function(x, z, pars) {
# x: data (size = n)
# z: allocation vector (size = n)
# pars: K x npars matrix of normal mixture parameters:
# pars[k, 1] = k-th mixing proportion
# pars[k, -1] = k-th component parameters
pars.t <- t(pars)
rownames(pars.t) <- object$par.name
K <- ncol(pars.t)
# pars.t[1, ] : mixing proportions
# pars.t[-1, ] : model pars
if (object$type == "bi")
data_groups <- lapply(1:K, function(j) x[which(z == j), ])
else
data_groups <- lapply(1:K, function(j) x[which(z == j)])
n.groups <- listLen(data_groups)
res <- 0
for(j in 1:K) {
inargs <- list_by_row(pars.t[-1, j, drop = FALSE])
inargs$x <- data_groups[[j]]
if (object$model %in% c("wnorm", "wnorm2"))
inargs$int.displ <- object$int.displ
else if (object$model == "vmcos")
inargs$qrnd_grid <- object$qrnd_grid
res <- res + sum(
signif(
log(
do.call(paste0("d", object$model), inargs)
),
8
)
)
}
res + sum(n.groups * log(pars.t[1, ]))
}
labswitch_in <- addtolist(ell,
z = z, K = ncomp,
complete = complete,
mcmc = mcmc_par,
p = p, data = object$data)
# lab_switch <-
# label.switching::label.switching(method = method,
# z = z, K = ncomp,
# complete = complete,
# mcmc = mcmc_par,
# p = p, data = object$data,
# ...)
lab_switch <-
do.call(label.switching::label.switching, labswitch_in)
# now rearrage the component labels according to the rearrangement
# obtained in lab_switch, and revert back to the original (1 higer dim
# array) form of the outputs
res <- vector("list", length(method))
for(j in 1:length(method)) {
tmp <- object
method_curr <- method[j]
perm_mat <- lab_switch[["permutations"]][[j]]
par.value.mod <- tmp$par.value[, , final_iter, , drop=FALSE]
mem.prob.mod <- tmp$mem.prob[, , final_iter, , drop=FALSE]
clus.ind.mod <- tmp$clus.ind[, final_iter, , drop=FALSE]
if (object$return_tune_param)
tune_param_mod <- tmp$tune_param[, final_iter, , drop=FALSE]
# relabel on these subarrays
for(ii in 1:n.chains) {
for(iter in 1:n.iter.final) {
# browser()
par.value.mod[ , , iter, ii] <- par.value.mod[, perm_mat[iter_no[iter, ii], ], iter, ii]
mem.prob.mod[, , iter, ii] <- mem.prob.mod[, perm_mat[iter_no[iter, ii], ], iter, ii]
clus.ind.mod[, iter, ii] <- perm_mat[iter_no[iter, ii], clus.ind.mod[, iter, ii]]
if (object$return_tune_param) {
tume_param_temp_mat <- matrix(tune_param_mod[, iter, ii], ncol=ncomp)
tune_param_mod[, iter, ii] <- c(tume_param_temp_mat[, perm_mat[iter_no[iter, ii], ]])
}
}
}
tmp$par.value[, , final_iter, ] <- par.value.mod
tmp$mem.prob[, , final_iter, ] <- mem.prob.mod
tmp$clus.ind[, final_iter, ] <- clus.ind.mod
if (object$return_tune_param)
tmp$tune_param[, final_iter, ] <- tune_param_mod
tmp$fixed.label <- TRUE
res[[j]] <- tmp
}
if (length(method) == 1)
res <- res[[1]]
res
}
#' Point estimates for parameters from an angmcmc object
#' @param object angular MCMC object.
#' @param fn function, or a single character string specifying its name, to evaluate on MCMC samples to estimate
#' parameters. Defaults to \code{mean}, which computes the estimated posterior mean.
#' Note that if \code{fn = "MODE"} (warning: not \code{"mode"}) or \code{fn = "MAP"}, then the maximum aposteriori estimate (MAP) is
#' calculated.
#' @param par.name vector of names of parameters for which point estimates are to be computed. If \code{NULL}, results for all parameters are provided.
#' @param comp.label vector of component labels (positive integers, e.g., \code{1, 2, ...}) for which point estimates are to be computed.
#' If \code{NULL}, results for all components are provided.
#' @param chain.no vector of chain numbers whose samples are to be be used.
#' in the estimation. By default all chains are used.
#' @param ... additional arguments to be passed to the function.
#'
#' @return Returns a matrix of point estimates, or vector of point estimates if \code{length(par.name)==1} or \code{length(comp.label)==1}.
#'
#' @examples
#' # first fit a vmsin mixture model
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' # estimate parameters by sample mean
#' (est_mean <- pointest(fit.vmsin.20))
#' # estimate parameters by sample median
#' (est_median <- pointest(fit.vmsin.20, fn = median))
#' # estimate parameters by MAP
#' (est_median <- pointest(fit.vmsin.20, fn = "MODE"))
#' @export
pointest <- function(object, fn = mean, par.name,
comp.label, chain.no, ...)
{
if (!is.angmcmc(object))
stop("\'object\' must be an angmcmc object")
if (is.character(fn)) {
if (fn == "MODE" | fn == "MAP") {
do_MAP <- TRUE
}
else {
do_MAP <- FALSE
fn <- match.fun(fn)
}
} else {
do_MAP <- FALSE
fn <- match.fun(fn)
}
ell <- list(...)
if (any(!is.null(ell$burnin), !is.null(ell$thin)))
warning("Use of burnin and thin are obsolete. Specify \'burnin.prop\' and \'thin\' during original MCMC run, or use \'add_burnin_thin\'.")
if (missing(par.name)) {
par.name <- object$par.name
} else if (any(!par.name %in% object$par.name)) {
stop("invalid par.name")
}
if (missing(comp.label)) {
comp.label <- 1:object$ncomp
} else if (any(!comp.label %in% 1:object$ncomp)) {
stop("invalid component label")
}
if (missing(chain.no)) {
chain.no <- 1:object$n.chains
} else if (any(!chain.no %in% 1:object$n.chains)) {
stop("invalid chain number")
}
par.posit <- which(object$par.name %in% par.name)
final_iter <- object$final_iter
if (!do_MAP) {
res <- apply(object$par.value[par.posit, comp.label, final_iter, chain.no, drop = FALSE],
c(1, 2), fn, drop = FALSE)
}
else {
map_indic <- which(object$lpd[, chain.no, drop=FALSE]
== max(object$lpd[final_iter, chain.no]),
arr.ind = TRUE)[1, ] # first entry of max
res_mat <- as.matrix(object$par.value[ , , map_indic[1], map_indic[2]])
res <- res_mat[par.posit, comp.label, drop=FALSE]
}
rownames(res) <- par.name
colnames(res) <- comp.label
res
}
#' Quantile estimates for parameters from an angmcmc object
#' @inheritParams pointest
#' @param x angmcmc object
#' @inheritParams stats::quantile
#' @return Returns a three dimensional array of quantiles, or a matrix (vector) of quantiles
#' if one (or two) among \code{par.name}, \code{comp.label}, \code{probs} has length 1.
#' @param ... further arguments to pass to \code{quantile}. In particular, \code{probs = seq(0, 1, 0.25)}
#' is the default vector of quantiles computed for each parameter.
#'
#' @examples
#' # first fit a vmsin mixture model
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' # 0.025th quantiles
#' (quant_025 <- quantile(fit.vmsin.20, prob = 0.025))
#' # 0.975th quantiles
#' (quant_975 <- quantile(fit.vmsin.20, prob = 0.975))
#' # default quantiles
#' (quant_def <- quantile(fit.vmsin.20))
#'
#' @export
quantile.angmcmc <- function(x, par.name, comp.label, chain.no,
probs = seq(0, 1, 0.25), ...)
{
object <- x
ell <- list(...)
if (!is.angmcmc(object))
stop("\'x\' must be an angmcmc object")
if (any(!is.null(ell$burnin), !is.null(ell$thin)))
warning("Use of burnin and thin are obsolete. Specify \'burnin.prop\' and \'thin\' during original MCMC run, or use \'add_burnin_thin\'.")
if (missing(par.name)) {
par.name <- object$par.name
} else if (any(!par.name %in% object$par.name)) {
stop("invalid par.name")
}
if (missing(comp.label)) {
comp.label <- 1:object$ncomp
} else if (any(!comp.label %in% 1:object$ncomp)) {
stop("invalid component label")
}
if (missing(chain.no)) {
chain.no <- 1:object$n.chains
} else if (any(!chain.no %in% 1:object$n.chains)) {
stop("invalid chain number")
}
par.posit <- which(object$par.name %in% par.name)
final_iter <- object$final_iter
n_probs <- length(probs)
res <- list()
for(jj in 1:n_probs) {
res[[jj]] <- apply(object$par.value[par.posit, comp.label, final_iter, chain.no, drop = FALSE],
c(1, 2),
function(y) quantile(y, probs = probs[jj]))
rownames(res[[jj]]) <- par.name
colnames(res[[jj]]) <- comp.label
}
names(res) <- paste0(round(probs*100), "%")
res
}
#' Extract MCMC samples for parameters from an angmcmc object
#' @inheritParams pointest
#' @param object angular MCMC object
#' @param drop logical. Should the dimension of the output be dropped, if \code{par.name},
#' \code{comp.label} or \code{chain.no} has a single level?
#' @details The default for both \code{par.name} and \code{comp.label} are the all possible choices
#' available in \code{object}.
#' @return
#' Returns a four dimensional array with
#'
#' dimension 1 - model parameters and mixing proportions
#' dimention 2 - components
#' dimension 3 - MCMC iterations
#' dimension 4 - chain number
#'
#' @examples
#' # first fit a vmsin mixture model
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' # extract Markov chain realizations for kappa1 from component 1
#' extr_kappa1_1 <- extractsamples(fit.vmsin.20, "kappa1", 1)
#' # for kappa1 from component from all components
#' extr_kappa1 <- extractsamples(fit.vmsin.20, "kappa1")
#' # for all parameters in component 1
#' extr_1 <- extractsamples(fit.vmsin.20, comp.label = 1)
#'
#' @export
extractsamples <- function(object, par.name, comp.label,
chain.no, drop = TRUE, ...)
{
if (!is.angmcmc(object))
stop("\'object\' must be an angmcmc object")
ell <- list(...)
if (any(!is.null(ell$burnin), !is.null(ell$thin)))
warning("Use of burnin and thin are obsolete. Specify \'burnin.prop\' and \'thin\' during original MCMC run, or use \'add_burnin_thin\'.")
if (missing(par.name)) {
par.name <- object$par.name
} else if (any(!par.name %in% object$par.name)) {
stop("invalid par.name")
}
if (missing(comp.label)) {
comp.label <- 1:object$ncomp
} else if (any(!comp.label %in% 1:object$ncomp)) {
stop("invalid component label")
}
if (missing(chain.no)) {
chain.no <- 1:object$n.chains
} else if (any(!chain.no %in% 1:object$n.chains)) {
stop("invalid chain number")
}
par.posit <- which(object$par.name %in% par.name)
final_iter <- object$final_iter
out <- object$par.value[par.posit, comp.label, final_iter, chain.no, drop = drop]
# browser()
if (!drop)
dimnames(out) <- list(par.name, comp.label, NULL, chain.no)
out
}
#' Summary statistics for parameters from an angmcmc object
#' @inheritParams base::summary
#' @inheritParams pointest
#' @param object angular MCMC object.
#' @details Computes (after thinning and discarding burn-in) point estimates with 95\% posterior credible sets for all components and all parameters,
#' together with the sample averages of log likelihood and log posterior density.
#' @return Returns a list with elements \code{estimate, lower, upper, llik} and \code{lpd}. \code{estimate}
#' is itself a list with three elements: \code{mean}, \code{median} and \code{mode} providing the
#' sample mean, sample median and (sample) MAP estimates.
#'
#' Note that \code{summary.angmcmc} has its own print method, providing a table the estimated mean and 95\% credible intervals for each parameter
#'
#' @examples
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' summary(fit.vmsin.20)
#'
#' @export
summary.angmcmc <- function(object, par.name, comp.label,
chain.no, ...)
{
if (!is.angmcmc(object))
stop("\'object\' must be an angmcmc object")
ell <- list(...)
if (any(!is.null(ell$burnin), !is.null(ell$thin)))
warning("Use of burnin and thin are obsolete. Specify \'burnin.prop\' and \'thin\' during original MCMC run, or use \'add_burnin_thin\'.")
if (missing(par.name)) {
par.name <- object$par.name
} else if (any(!par.name %in% object$par.name)) {
stop("invalid par.name")
}
if (missing(comp.label)) {
comp.label <- 1:object$ncomp
} else if (any(!comp.label %in% 1:object$ncomp)) {
stop("invalid component label")
}
if (missing(chain.no)) {
chain.no <- 1:object$n.chains
} else if (any(!chain.no %in% 1:object$n.chains)) {
stop("invalid chain number")
}
est <- pointest(object, mean, par.name, comp.label, chain.no)
map <- pointest(object, "MODE", par.name, comp.label, chain.no)
quants <- quantile(object, par.name, comp.label, chain.no,
probs = c(0.5, 0.025, 0.975))
med <- quants[[1]]
low <- quants[[2]]
up <- quants[[3]]
llik <- as.numeric(logLik(object))
lpd <- mean(object$lprior) + llik
res <- list("estimate " = list(mean = est, median = med, mode = map), "upper" = up, "lower" = low,
"llik" = llik, "lpd" = lpd)
class(res) <- "summary_angmcmc"
res
}
#' @export
print.summary_angmcmc <- function(x, ...)
{
res <- x
print(est_ci(res$estimate$mean, res$lower, res$upper, digits=2))
}
# #' AIC and BIC for angmcmc objects -- not needed
# #' @inheritParams stats::AIC
# #' @inheritParams logLik.angmcmc
# #' @param ... additional argument to be passed to \link{logLik.angmcmc}
# #' @return AIC computes the AIC and BIC computes BIC for \code{angmcmc} objects.
# #'
# #' @details
# #' Note that \code{AIC.angmcmc} and \code{BIC.angmcmc} calls \link{logLik.angmcmc},
# #' which calculates Bayes estimate of the log-likelihood and *not* the maximum
# #' likelihood. As such, care needs to be taken while using theses quantities.
# #'
# #' \eqn{\hat{L}} is estimated by the sample maximum obtained from the MCMC realizations.
# #'
# #' @examples
# #' # illustration only - more iterations needed for convergence
# #' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
# #' n.chains = 1)
# #' AIC(fit.vmsin.20)
# #' BIC(fit.vmsin.20)
# #'
# #' @export
# #'
# #' AIC.angmcmc <- function(object, ..., k = 2)
# #' {
# #' if (!is.angmcmc(object))
# #' stop("\'object\' must be an angmcmc object")
# #'
# #' ell <- list(...)
# #'
# #' if (any(!is.null(ell$burnin), !is.null(ell$thin)))
# #' warning("Use of burnin and thin are obsolete. Specify \'burnin.prop\' and \'thin\' during original MCMC run, or use \'add_burnin_thin\'.")
# #'
# #' llik <- logLik.angmcmc(object, ...)
# #' k*attr(llik, "df") - 2*as.numeric(llik)
# #' }
# #'
# #'
# #'
# #' @rdname AIC.angmcmc
# #' @export
# #'
# #' BIC.angmcmc <- function(object, ...)
# #' {
# #' AIC.angmcmc(object, ..., k=log(object$n.data))
# #' }
#' Deviance Information Criterion (DIC) for angmcmc objects
#' @inheritParams pointest
#' @param ... additional model specific arguments to be passed to \code{DIC}. For example, \code{int.displ}
#' specifies integer dispacement in wnorm and wnorm2 models. See \link{fit_wnormmix} and
#' \link{fit_wnorm2mix} for more details.
#' @param form form of DIC to use. Available choices are 1 and 2 (default). See details.
#' @return Computes the DIC for a given angmcmc object
#' @details Given a deviance function \eqn{D(\theta) = -2 log(p(y|\theta))}, and an estimate
#' \eqn{\theta* = (\sum \theta_i) / N} of the posterior mean
#' \eqn{E(\theta|y)}, where \eqn{y} denote the data, \eqn{\theta} are the unknown
#' parameters of the model, \eqn{\theta_1, ..., \theta_N} are MCMC samples from the posterior
#' distribution of \eqn{\theta} given \eqn{y} and \eqn{p(y|\theta)} is the likelihood function,
#' the (form 1 of) Deviance Infomation Criterion (DIC) is defined as
#' \deqn{DIC = 2 ( (\sum_{s=1}^N D(\theta_s)) / N - D(\theta*) )}
#' The second form for DIC is given by
#' \deqn{DIC = D(\theta*) - 4 \hat{var} \log p(y|\theta_s)}
#' where for \eqn{i = 1, ..., n}, \eqn{\hat{var} \log p(y|\theta)} denotes the estimated variance
#' of the log likelihood based on the realizations \eqn{\theta_1, ..., \theta_N}.
#'
#' Like AIC and BIC, DIC is an asymptotic approximation for large samples, and
#' is only valid when the posterior distribution is approximately normal.
#'
#' @examples
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' DIC(fit.vmsin.20)
#'
#' @export
DIC <- function(object, form = 2, ...)
{
if (!is.angmcmc(object))
stop("\'object\' must be an angmcmc object")
ell <- list(...)
if (any(!is.null(ell$burnin), !is.null(ell$thin)))
warning("Use of burnin and thin are obsolete. Specify \'burnin.prop\' and \'thin\' during original MCMC run, or use \'add_burnin_thin\'.")
if(!form %in% 1:2)
stop("form must be either 1 or 2")
final_iter <- object$final_iter
all_llik <- object$llik[final_iter, ]
llik_estpar <-
as.numeric(logLik.angmcmc(object, fn = mean, method = 1))
D_all <- -2*all_llik
Dbar <- mean(D_all)
Dhat <- -2*llik_estpar
if (form == 1) {
pD <- Dbar - Dhat
} else {
pD <- 0.5*var(D_all)
}
c("pD" = pD, "DIC" = pD + Dbar)
}
#' Watanabe-Akaike Information Criterion (WAIC) for angmcmc objects
#'
#' @param x angmcmc object.
#' @param ... additional model specific arguments to be passed to \link{waic} from loo. For example, \code{int.displ}
#' specifies integer displacement in wnorm and wnorm2 models. See \link{fit_wnormmix} and
#' \link{fit_wnorm2mix} for more details.
#' @return Computes the WAIC for a given angmcmc object.
#' @details
#' Given a deviance function \eqn{D(\eta) = -2 \log(p(y|\eta))}, and an estimate
#' \eqn{\eta* = (\sum \eta_i) / n} of the posterior mean
#' \eqn{E(\eta|y)}, where \eqn{y = (y_1, ..., y_n)} denote the data, \eqn{\eta} is the unknown
#' parameter vector of the model, \eqn{\eta_1, ..., \eta_N} are MCMC samples from the posterior
#' distribution of \eqn{\eta} given \eqn{y} and \eqn{p(y|\eta)} is the likelihood function,
#' the Watanabe-Akaike Information Criterion (WAIC) is defined as
#' \deqn{WAIC = LPPD - p_W}
#' where
#' \deqn{LPPD = \sum_{i=1}^n \log (N^{-1} \sum_{s=1}^N p(y_i|\eta_s) )}
#' and (form 1 of)
#' \deqn{p_W = 2 \sum_{i=1}^n [ \log (N^{-1} \sum_{s=1}^N p(y_i|\eta_s) ) - N^{-1} \sum_{s=1}^N \log \:p(y_i|\eta_s) ].}
#' An alternative form (form 2) for \eqn{p_W} is given by
#' \deqn{p_W = \sum_{i=1}^n \hat{var} \log p(y_i|\eta)}
#' where for \eqn{i = 1, ..., n}, \eqn{\hat{var} \log p(y_i|\eta)} denotes the estimated variance
#' of \eqn{\log p(y_i|\eta)} based on the realizations \eqn{\eta_1, ..., \eta_N}.
#'
#' Note that waic.angmcmc calls \link{waic} for computation. If the likelihood contribution of each data
#' point for each MCMC iteration is available in \code{object} (can be returned by setting \code{return_llik_contri = TRUE})
#' during \link{fit_angmix} call), \code{waic.array} is used; otherwise \code{waic.function} is
#' called. Computation is much faster if the likelihood contributions are available - however, they are very
#' memory intensive, and by default not returned in \link{fit_angmix}.
#'
#' @examples
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1, return_llik_contri = TRUE)
#' library(loo)
#' waic(fit.vmsin.20)
#'
#' @importFrom loo waic loo
#'
#' @export
waic.angmcmc <- function(x, ...)
{
object <- x
if (!is.angmcmc(object))
stop("\'x\' must be an angmcmc object")
ell <- list(...)
if (any(!is.null(ell$burnin), !is.null(ell$thin)))
warning("Use of burnin and thin are obsolete. Specify \'burnin.prop\' and \'thin\' during original MCMC run, or use \'add_burnin_thin\'.")
final_iter <- object$final_iter
if (object$return_llik_contri) {
all_llik_contri <- object$llik.contri
llik_for_loo <- aperm(all_llik_contri[, final_iter, , drop = FALSE], c(2, 3, 1))
waic <- loo::waic.array(llik_for_loo, ...)
}
else {
samples <- object$par.value[, , final_iter, , drop=FALSE]
chain_id <- unlist(lapply(1:object$n.chains, function(j) rep(j, length(final_iter))))
dim_samples <- dim(samples)
samples_long <- array(c(samples),
dim = c(dim_samples[1:2], dim_samples[3]*dim_samples[4]))
dimnames(samples_long)[[1]] <- object$par.name
calc_llik_contri <- function(data_i, draws) {
N <- dim(draws)[3]
out <- rep(NA, N)
for (j in 1:N) {
input_pars <- list_by_row(draws[, , j])
input_pars$log <- TRUE
input_pars$x <- c(data_i)
out[j] <- signif(
do.call(paste0("d", object$model, "mix"), input_pars),
8
)
}
out
}
data <- as.matrix(object$data)
waic <- loo::waic.function(calc_llik_contri,
data = data, draws = samples_long,
...)
}
waic
}
#' Leave-one-out cross-validation (LOO) for angmcmc objects
#' @inheritParams waic.angmcmc
#'
#' @examples
#' \donttest{
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1, return_llik_contri = TRUE)
#' library(loo)
#' loo(fit.vmsin.20)
#' }
#' @export
#'
#' @details
#' Note that loo.angmcmc calls \link{loo} for computation. If the likelihood contribution of each data
#' point for each MCMC iteration is available in \code{object} (can be returned by setting \code{return_llik_contri = TRUE})
#' during \link{fit_angmix} call), \code{loo.array} is used; otherwise \code{loo.function} is
#' called. Computation is much faster if the likelihood contributions are available - however, they are very
#' memory intensive, and by default not returned in \link{fit_angmix}.
#'
#'
loo.angmcmc <- function(x, ...)
{
object <- x
if (!is.angmcmc(object))
stop("\'x\' must be an angmcmc object")
ell <- list(...)
if (any(!is.null(ell$burnin), !is.null(ell$thin)))
warning("Use of burnin and thin are obsolete. Specify \'burnin.prop\' and \'thin\' during original MCMC run, or use \'add_burnin_thin\'.")
final_iter <- object$final_iter
if (object$return_llik_contri) {
all_llik_contri <- object$llik.contri
llik_for_loo <- aperm(all_llik_contri[, final_iter, , drop = FALSE],
c(2, 3, 1))
r_eff <- loo::relative_eff(exp(llik_for_loo), ...)
looic <- loo::loo.array(llik_for_loo, r_eff = r_eff,
...)
}
else {
samples <- object$par.value[, , final_iter, , drop=FALSE]
chain_id <- unlist(lapply(1:object$n.chains, function(j) rep(j, length(final_iter))))
dim_samples <- dim(samples)
samples_long <- array(c(samples),
dim = c(dim_samples[1:2], dim_samples[3]*dim_samples[4]))
dimnames(samples_long)[[1]] <- object$par.name
calc_llik_contri <- function(data_i, draws) {
N <- dim(draws)[3]
out <- rep(NA, N)
for (j in 1:N) {
input_pars <- list_by_row(draws[, , j])
input_pars$log <- TRUE
input_pars$x <- c(data_i)
out[j] <- signif(
do.call(paste0("d", object$model, "mix"),
input_pars),
8
)
}
out
}
data <- as.matrix(object$data)
r_eff <- loo::relative_eff(calc_llik_contri,
chain_id = chain_id,
data=data,
draws = samples_long)
looic <- loo::loo.function(calc_llik_contri, r_eff = r_eff,
data = data, draws = samples_long,
...)
}
looic
}
#' Density and random deviates from an angmcmc object
#' @inheritParams pointest
#' @inheritParams dvmsin
#' @param type Method of estimating density/generating random deviates. Possible choices are
#' \code{"post-pred"} and \code{"point-est"}. See details. Defaults to \code{"point-est"}.
#' @param object angular MCMC object. The dimension of the model must match with \code{x}.
#' @param x vector, if univariate or a two column matrix, if bivariate, with each row a 2-D vector, (can
#' also be a data frame of similar dimensions) of points where the
#' densities are to be computed.
#' @param n number of observations to be generated.
#' @return \code{d_fitted} gives a vector the densities computed at the given points and \code{r_fitted}
#' creates a vector (if univariate) or a matrix (if bivariate) with each row being a 2-D point, of random deviates.
#'
#' @details
#'
#' If \code{type = 'point-est'}, density is evaluated/random samples are generated at a point estimate of
#' the parameter values. To estimate the mixture density, first the parameter vector \eqn{\eta} is estimated
#' by applying \code{fn} on the MCMC samples (using the function \link{pointest}), yielding the (consistent) Bayes estimate \eqn{\hat{\eta}}.
#' Then the mixture density \eqn{f(x|\eta)} at any point \eqn{x} is (consistently) estimated by
#' \eqn{f(x|\hat{\eta})}. The random deviates are generated from the estimated mixture density \eqn{f(x|\hat{\eta})}.
#'
#' If \code{type == 'post-pred'}, posterior predictive samples and densities are returned. That
#' is, the average density \eqn{S^{-1} \sum_{s = 1}^S f(x | \eta_s)} is returned in \code{d_fitted},
#' where \eqn{\eta_1, \dots, \eta_S} is the set posterior MCMC samples obtained from \code{object}. In
#' \code{r_fitted}, first a random sub-sample \eqn{\eta_{(1)}, \dots, \eta_{(n)}} of size \code{n} from the
#' set of posterior samples \eqn{\eta_1, \dots, \eta_S} is drawn (with replacement if \code{n} > S). Then
#' the i-th posterior predictive data point is generated from the mixture density
#' \eqn{f(x|\eta_{(i)})} for i = 1,..., n.
#'
#' @examples
#' set.seed(1)
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' d_fitted(c(0,0), fit.vmsin.20, type = "post-pred")
#' d_fitted(c(0,0), fit.vmsin.20, type = "point-est")
#'
#' r_fitted(10, fit.vmsin.20, type = "post-pred")
#' r_fitted(10, fit.vmsin.20, type = "point-est")
#' @export
d_fitted <- function(x, object, type = "point-est", fn = mean, log=FALSE,
chain.no, ...)
{
if (!is.angmcmc(object))
stop("\'object\' must be an angmcmc object")
ell <- list(...)
if (any(!is.null(ell$burnin), !is.null(ell$thin)))
warning("Use of burnin and thin are obsolete. Specify \'burnin.prop\' and \'thin\' during original MCMC run, or use \'add_burnin_thin\'.")
if (is.data.frame(x))
x <- as.matrix(x)
if(object$type == "bi") {
if((length(dim(x)) < 2 && length(x) != 2) || (length(dim(x)) == 2 && tail(dim(x), 1) != 2)
|| (length(dim(x)) > 2)) stop("x must either be a bivariate vector or a two-column matrix")
}
if (!type %in% c("post-pred", "point-est")) {
stop ("\'type\' must either be \'post-pred\' or \'point-est\'")
}
inargs <- list(x = x)
if (object$model %in% c("wnorm", "wnorm2"))
inargs$int.displ <- object$int.displ
else if (object$model == "vmcos")
inargs$qrnd_grid <- object$qrnd_grid
if (type == "point-est") {
est <- pointest(object, fn = fn)
inargs <- c(list_by_row(est), inargs)
inargs$log <- log
out <- signif(suppressWarnings(do.call(paste0("d", object$model, "mix"), inargs)), 8)
} else {
samp <- extractsamples(object, chain.no = chain.no, drop = FALSE)
dim_samp <- dim(samp)
nsamp <- dim_samp[3]*dim_samp[4]
samp_coll <- samp
dim(samp_coll) <- c(dim_samp[1:2], nsamp)
dimnames(samp_coll)[1:2] <- dimnames(samp)[1:2]
inargs$log <- FALSE
out_list <- lapply(seq_len(nsamp),
function(j) {
inargs1 <- c(list_by_row(as.matrix(samp_coll[, , j])), inargs)
signif(
c(suppressWarnings(do.call(paste0("d",
object$model,
"mix"), inargs1))),
8
)
})
out <- Reduce('+', out_list)/nsamp
if (log) out <- log(out)
}
out
}
#' @rdname d_fitted
#' @export
r_fitted <- function(n=1, object, type = "point-est", fn = mean,
chain.no, ...)
{
if (!is.angmcmc(object))
stop("\'object\' must be an angmcmc object")
ell <- list(...)
if (any(!is.null(ell$burnin), !is.null(ell$thin)))
warning("Use of burnin and thin are obsolete. Specify \'burnin.prop\' and \'thin\' during original MCMC run, or use \'add_burnin_thin\'.")
if (!type %in% c("post-pred", "point-est")) {
stop ("\'type\' must either be \'post-pred\' or \'point-est\'")
}
inargs <- list()
if (object$model == "vmcos") {
inargs$qrnd_grid <- object$qrnd_grid
}
if (object$model %in% c("vmcos", "vmsin")) {
inargs$method <- "naive"
}
if (type == "point-est") {
est <- pointest(object, fn = fn, chain.no = chain.no)
inargs <- c(list_by_row(as.matrix(est)), inargs)
inargs$n <- n
out <- suppressWarnings(do.call(paste0("r", object$model, "mix"), inargs))
} else {
samp <- extractsamples(object, chain.no = chain.no, drop = FALSE)
dim_samp <- dim(samp)
nsamp <- dim_samp[3]*dim_samp[4]
samp_coll <- samp
dim(samp_coll) <- c(dim_samp[1:2], nsamp)
dimnames(samp_coll)[1:2] <- dimnames(samp)[1:2]
ids <- sample(seq_len(dim_samp[3]*dim_samp[4]), size = n,
replace = n > nsamp)
if (object$type == "bi") {
out_row <- numeric(2)
} else {
out_row <- numeric(1)
}
out <- vapply(ids,
function(j) {
inargs1 <- c(list_by_row(as.matrix(samp_coll[, , j])), inargs)
inargs1$n <- 1
c(suppressWarnings(do.call(paste0("r",
object$model,
"mix"), inargs1)))
},
out_row)
if (object$type == "bi") out <- t(out)
}
out
}
#' Extract Log-Likelihood from angmcmc objects
#' @inheritParams pointest
#' @inheritParams stats::logLik
#' @param method interger specifying method of estimating the log likelihood. Must be 1 or 2. Defaults to 1. See details.
#' @param fn function to evaluate on the iteration-wise log-likelihood values obtained during MCMC run if \code{method = 1}; or,
#' if \code{method = 2}, function to evaluate on the MCMC samples for parameter estimation (passed to \link{pointest}).
#' Defaults to \code{max} if \code{method = 1} and \code{mean} if \code{method = 2}.
#'
#' @details
#'
#' There are two ways to estimate the log likelihood from the model. If \code{method = 1},
#' then log likelihood is estimated by applying \code{fn} (defaults to max, if method = 1)
#' direclty on the log likelihood values from observed during the MCMC run.
#' On the other hand, if \code{method == 2}, then parameter estimates
#' are first computed using \code{pointest} with \code{fn}
#' (defaults to "MODE", if \code{method == 2}) applied on the MCMC samples,
#' and then then log likelihood is evaluated at the parameter estimates.
#'
#'
#' The degrees of the likelihood function is the total number of free parameters estimated in the mixture models,
#' which is equal to \eqn{6K - 1} for bivariate models (vmsin, vmcos and wnorm2), or \eqn{3K - 1} for univariate
#' models (vm and wnorm), where \eqn{K} denotes the number of components in the mixture model.
#'
#' @return Returns an object of class \link{logLik}. This is a number (the estimated log likelihood) with attributes "df"
#' (degrees of freedom) and "nobs" (number of observations).
#'
#' @examples
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' logLik(fit.vmsin.20)
#' @export
logLik.angmcmc <- function(object, method = 1, fn, ...)
{
if (!is.angmcmc(object))
stop("\'object\' must be an angmcmc object")
if (!method %in% c(1, 2))
stop("method must be either 1 or 2")
if (missing(fn)) {
if (method == 1) fn <- "max"
else fn <- "MODE"
}
do_MAP <- FALSE
if (is.character(fn)) {
if (fn %in% c("MODE", "MAP")) {
if (method == 1)
stop("fn can be \'MODE\' or \'MAP\' only when method = 2")
else do_MAP <- TRUE
}
}
if (!do_MAP) fn <- match.fun(fn)
ell <- list(...)
if (any(!is.null(ell$burnin), !is.null(ell$thin)))
warning("Use of burnin and thin are obsolete. Specify \'burnin.prop\' and \'thin\' during original MCMC run, or use \'add_burnin_thin\'.")
final_iter <- object$final_iter
if (method == 2) {
all_den <- d_fitted(object$data, object, fn = fn, log=TRUE)
llik <- sum(all_den)
} else {
llik <- fn(object$llik[final_iter, ])
}
if(object$type == "uni") {
df <- 3*object$ncomp - 1
} else if (object$cov.restrict == "ZERO") {
df <- 5*object$ncomp - 1 #the cov terms aren't present
} else {
df <- 6*object$ncomp - 1
}
n_data <- object$n.data
result <- llik
attributes(result) <- list("df" = df, "nobs" = n_data)
class(result) <- "logLik"
result
}
#' Log Marginal Likelihood via Bridge Sampling for angmcmc objects
#' @param samples angmcmc object
#' @param ... additional argument passed to \link{bridge_sampler}. Note that default for
#' the argument \code{method} is \code{"warp3"}, (instead of \code{"normal"} as used in \code{bridgesampling} package)
#' to account for multi-modality of the posterior density.
#' @param ave_over_chains logical. Separately call \link{bridge_sampler} on
#' each chain in the angmcmc object and then take the average? Defaults to \code{TRUE}.
#' See details.
#'
#' @details
#' Marginal likelihood is calculated by first converting the \code{angmcmc} object \code{samples} to an
#' \code{mcmc.list} object, and then by passing the resulting \code{mcmc.list} object to \link{bridge_sampler}.
#' If variablity across multiple chains (if any) are very different,
#' then calling \link{bridge_sampler} separately for each chain
#' usually provides more stable results; the final log ML is computed by averaging over
#' chain specific MLs.
#'
#' @importFrom bridgesampling bridge_sampler
#'
#' @examples
#' \donttest{
#' library(future)
#' library(parallel)
#' # plan(multisession, gc = TRUE) # parallelize chains
#'
#' set.seed(100)
#' MC.fit <- fit_angmix("vmsin", tim8, ncomp=3, n.iter=5000,
#' n.chains = 3)
#'
#'
#' library(bridgesampling)
#' bridge_sampler(MC.fit)
#' }
#'
#' @export
bridge_sampler.angmcmc <- function(samples, ..., ave_over_chains = TRUE)
{
object <- samples
if (!is.angmcmc(object))
stop("\'samples\' must be an angmcmc object")
ell <- list(...)
if (is.null(ell$method)) {
ell$method <- "warp3"
}
n.chains <- object$n.chains
if (object$model == "wnorm2") {
kappa_tmp <- object$par.value
sig_tmp <-
apply(kappa_tmp, 2:4,
function(x) c(x[1],
unname(kappas2sigmas_wnorm2(x[2], x[3], x[4])),
x[5:6]))
names(sig_tmp) <- names(kappa_tmp)
object$par.value <- sig_tmp
}
object_mcmc <- coda::as.mcmc.list(object)
object_mcmc_matrix <- as.matrix(object_mcmc)
gam.loc <- object$gam.loc
gam.rate <- 1/object$gam.scale
norm.var <- object$norm.var
if (object$type == "bi" & length(norm.var) == 1) norm.var <- rep(norm.var, 3)
if (object$ncomp == 1) {
calc_lpd <- function(par_vec, data) {
# reparametrize if wnorm2
if (object$model == "wnorm2") {
par_vec <- c(sigmas2kappas_wnorm2(par_vec[1], par_vec[2], par_vec[3]),
par_vec[4:5])
}
allpar_mat <- as.matrix(par_vec)
rownames(allpar_mat) <- object$par.name[-1]
inargs <- list_by_row(allpar_mat)
inargs$x <- data
if (grepl("wnorm", object$model))
inargs$int.displ <- object$int.displ
if (object$model == "vmcos")
inargs$qrnd_grid <- object$qrnd_grid
inargs$log <- TRUE
llik <- sum(signif(do.call(paste0("d", object$model), inargs), 8))
if (object$type == "bi") {
lprior <- sum(-0.5*c((log(allpar_mat[1, ]))^2/norm.var[1],
(log(allpar_mat[2, ]))^2/norm.var[2],
((allpar_mat[3, ]))^2/norm.var[3]))
} else {
lprior <- sum(-0.5*log(allpar_mat[1, ])^2/norm.var)
}
unname(llik+lprior)
}
# lower and upper limits for reparameterized wnorm2
if (object$model == "wnorm2") {
lower <- object$modelpar.lower
upper <- object$modelpar.upper
upper[1:2, ] <- 1e5
lower[3, ] <- -(1-1e-7)
upper[3, ] <- (1-1e-7)
lower <- c(lower)
upper <- c(upper)
}
else {
lower <- c(object$modelpar.lower)
upper <- c(object$modelpar.upper)
}
names(lower) <- names(upper) <- colnames(object_mcmc_matrix)
if(n.chains == 1 || !ave_over_chains) {
tmp_mat <- do.call(rbind, lapply(1:n.chains,
function(j)
as.matrix(object_mcmc[[j]])))
do.call(bridgesampling::bridge_sampler,
c(list(samples=tmp_mat,
log_posterior=calc_lpd,
data=object$data,
lb=lower, ub=upper),
ell))
}
else {
all_bridge_samp <- vector("list", n.chains)
for (j in 1:n.chains) {
# tmp <- coda::as.mcmc.list(select_chains(object, j))
tmp_mat <- as.matrix(object_mcmc[[j]])
if (!any(ell$silent)) {
cat(paste0("\napplying \'bridge_sampler\' on chain ", j, "...\n\n"))
}
all_bridge_samp[[j]] <-
do.call(bridgesampling::bridge_sampler,
c(list(samples=tmp_mat,
log_posterior=calc_lpd,
data=object$data,
lb=lower, ub=upper),
ell))
}
final_res <- all_bridge_samp[[1]]
final_res$logml <- mean(vapply(all_bridge_samp,
function(x) x$logml, 0))
final_res$niter <- max(vapply(all_bridge_samp,
function(x) x$niter, 0))
final_res$method <- all_bridge_samp[[1]]$method
final_res$q11 <- unlist(lapply(all_bridge_samp,
function(x) x$q11))
final_res$q12 <- unlist(lapply(all_bridge_samp,
function(x) x$q12))
final_res$q21 <- unlist(lapply(all_bridge_samp,
function(x) x$q21))
final_res$q22 <- unlist(lapply(all_bridge_samp,
function(x) x$q22))
final_res$all <- all_bridge_samp
final_res
}
}
else {
calc_lpd <- function(par_vec, data) {
allpar_mat <- matrix(par_vec,
nrow = length(object$par.name))
# reparametrize if wnorm2
if (object$model == "wnorm2") {
allpar_mat <- apply(allpar_mat, 2,
function(x) c(x[1],
unname(sigmas2kappas_wnorm2(x[2], x[3], x[4])),
x[5:6]))
}
allpar_mat[1, ] <- allpar_mat[1, ]/sum(allpar_mat[1, ])
rownames(allpar_mat) <- object$par.name
inargs <- list_by_row(allpar_mat)
inargs$x <- data
if (grepl("wnorm", object$model))
inargs$int.displ <- object$int.displ
if (object$model == "vmcos")
inargs$qrnd_grid <- object$qrnd_grid
inargs$log <- TRUE
llik <- suppressWarnings(
sum(
signif(do.call(paste0("d", object$model, "mix"), inargs)), 8
)
)
par_mat <- allpar_mat[-1, ]
pmix_vec <- allpar_mat[1, ]
if (object$type == "bi") {
lprior <- sum(-0.5*c((log(allpar_mat[1, ]))^2/norm.var[1],
(log(allpar_mat[2, ]))^2/norm.var[2],
((allpar_mat[3, ]))^2/norm.var[3])) +
sum(object$pmix.alpha*log(pmix_vec))
} else {
lprior <- sum(-0.5*log(allpar_mat[1, ])^2/norm.var) +
sum(object$pmix.alpha*log(pmix_vec))
}
# if (object$type == "bi") {
# lprior <- sum((gam.loc - 1)*log(par_mat[1:2, ]) -
# gam.rate*par_mat[1:2, ]) -
# 0.5*sum(par_mat[3, ]^2)/object$norm.var +
# sum(object$pmix.alpha*log(pmix_vec))
# } else {
# lprior <- sum((gam.loc - 1)*log(allpar_mat[1, ]) -
# gam.rate*allpar_mat[1, ]) +
# sum(object$pmix.alpha*log(pmix_vec))
# }
unname(llik+lprior)
}
# lower and upper limits for reparameterized wnorm2
if (object$model == "wnorm2") {
lower <- rbind(0, object$modelpar.lower)
upper <- rbind(Inf, object$modelpar.upper)
upper[2:3, ] <- 1e5
lower[4, ] <- -(1-1e-7)
upper[4, ] <- (1-1e-7)
lower <- c(lower)
upper <- c(upper)
}
else {
lower <- c(rbind(0, object$modelpar.lower))
upper <- c(rbind(Inf, object$modelpar.upper))
}
names(lower) <- names(upper) <-
colnames(object_mcmc_matrix)
if (n.chains == 1 || !ave_over_chains) {
tmp_mat <- do.call(rbind, lapply(1:n.chains,
function(j)
as.matrix(object_mcmc[[j]])))
do.call(bridgesampling::bridge_sampler,
c(list(samples=tmp_mat,
log_posterior=calc_lpd,
data=object$data,
lb=lower, ub=upper),
ell))
}
else {
all_bridge_samp <- vector("list", n.chains)
for (j in 1:n.chains) {
# tmp <- coda::as.mcmc.list(select_chains(object, j))
tmp_mat <- as.matrix(object_mcmc[[j]])
if (!any(ell$silent)) {
cat(paste0("\napplying \'bridge_sampler\' on chain ", j, "...\n\n"))
}
all_bridge_samp[[j]] <-
do.call(bridgesampling::bridge_sampler,
c(list(samples=tmp_mat,
log_posterior=calc_lpd,
data=object$data,
lb=lower, ub=upper),
ell))
}
final_res <- all_bridge_samp[[1]]
final_res$logml <- mean(vapply(all_bridge_samp,
function(x) x$logml, 0))
final_res$niter <- max(vapply(all_bridge_samp,
function(x) x$niter, 0))
final_res$method <- all_bridge_samp[[1]]$method
final_res$q11 <- unlist(lapply(all_bridge_samp,
function(x) x$q11))
final_res$q12 <- unlist(lapply(all_bridge_samp,
function(x) x$q12))
final_res$q21 <- unlist(lapply(all_bridge_samp,
function(x) x$q21))
final_res$q22 <- unlist(lapply(all_bridge_samp,
function(x) x$q22))
final_res$all <- all_bridge_samp
final_res
}
}
}
#' Finding latent allocation (component indicators) from an angmcmc object
#' @inheritParams pointest
#' @param ... passed to \link{pointest} to estimate parameters.
#'
#' @details
#' In order to find the latent component indicators, estimates
#' of mixing proportions and model parameters are first computed via
#' pointest. Then, a data point is assigned label j, if the j-th
#' component gives highest density for that point.
#'
#' @return
#' Returns a vector of length n, where n is the length (if univariate) or
#' number of rows (if bivariate) of the data used in original fit.
#' i-th entry of the output vector provides component label for the i-th data point.
#'
#' @examples
#' # first fit a vmsin mixture model
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' # now find latent allocation
#' latent_allocation(fit.vmsin.20)
#' @export
latent_allocation <- function(object, ...)
{
if (!is.angmcmc(object))
stop("object must be an \'angmcmc\' object")
ell <- list(...)
if (any(!is.null(c(ell$comp.label, ell$par.name)))) {
warning("\'par.name\' and \'comp.label\' are not used")
ell$comp.label <- NULL
ell$par.name <- NULL
}
if (object$ncomp > 1) {
parest <- pointest(object, ...)
pi.mix <- parest[1, ]
par.mat <- parest[-1, ]
if (object$model == "vmsin") {
l_c_vmsin <- log_const_vmsin_all(par.mat)
mem_p_model <- mem_p_sin(object$data, par.mat, pi.mix, l_c_vmsin)
}
else if (object$model == "vmcos") {
l_c_vmcos <- log_const_vmcos_all(par.mat, object$qrnd_grid)
mem_p_model <- mem_p_cos(object$data, par.mat, pi.mix, l_c_vmcos)
}
else if (object$model == "wnorm2") {
l_c_wnorm2 <- log_const_wnorm2_all(par.mat)
mem_p_model <- mem_p_wnorm2(object$data, par.mat, pi.mix, l_c_wnorm2, object$omega.2pi)
}
else if (object$model == "vm") {
l_c_vm <- log_const_univm_all(par.mat)
mem_p_model <- mem_p_univm(object$data, par.mat, pi.mix, l_c_vm)
}
else {
l_c_wnorm <- log_const_uniwnorm_all(par.mat)
mem_p_model <- mem_p_uniwnorm(object$data, par.mat, pi.mix, l_c_wnorm, object$omega.2pi)
}
labs <- apply(mem_p_model, 1, which.max_entry1)
}
else {
warning("\'object\' has only one component. A vector of all 1\'s will is returned")
labs <- rep(1, object$n.data)
}
labs
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/all_postprodn_fns.R |
# list of functions
#' The univariate von Mises distribution
#' @inheritParams rvmsin
#' @param x vector of angles (in radians) where the densities are to be evaluated.
#' @param mu vector of means.
#' @param kappa vector of concentration (inverse-variance) parameters; \code{kappa} > 0.
#' @details If \code{mu} and \code{kappa} are not specified they assume the default values of \code{0} and \code{1} respectively.
#' @details The univariate von Mises distribution has density
#' \deqn{f(x) = 1/(2\pi I_0 (\kappa)) \exp(\kappa \cos(x - mu))}
#' where \eqn{I_0 (\kappa)} denotes the modified Bessel function of the first kind with order 0 evaluated at the point \eqn{\kappa}.
#' @return \code{dvm} gives the density and \code{rvm} generates random deviates.
#'
#' @examples
#'
#' kappa <- 1:3
#' mu <- 0:2
#' x <- 1:10
#' n <- 10
#'
#'
#' # when x and both parameters are scalars, dvm returns a single density
#' dvm(x[1], kappa[1], mu[1])
#'
#' # when x is a vector but both the parameters are scalars, dmv returns a vector of
#' # densities calculated at each entry of x with the same parameters
#' dvm(x, kappa[1], mu[1])
#'
#' # if x is scalar and at least one of the two paraemters is a vector, both parameters are
#' # recycled to the same length, and dvm returns a vector of with ith element being the
#' # density evaluated at x with parameter values kappa[i] and mu[i]
#' dvm(x[1], kappa, mu)
#'
#' # if x and at least one of the two paraemters is a vector, x and the two parameters are
#' # recycled to the same length, and dvm returns a vector of with ith element being the
#' # density at ith element of the (recycled) x with parameter values kappa[i] and mu[i]
#' dvm(x, kappa, mu)
#'
#' # when parameters are all scalars, number of observations generated by rvm is n
#' rvm(n, kappa[1], mu[1])
#'
#' # when at least one of the two parameters is a vector, both are recycled to the same length,
#' # n is ignored, and the number of observations generated by rvm is the same as the length of
#' # the recycled vectors
#' rvm(n, kappa, mu)
#'
#' @export
rvm <- function(n, kappa = 1, mu = 0)
{
if(any(kappa < 0)) stop("kappa must be non_negative")
if(any(mu < 0 | mu >= 2*pi)) mu <- prncp_reg(mu)
if(max(length(kappa), length(mu)) > 1) {
expanded <- expand_args(kappa, mu)
kappa <- expanded[[1]]; mu <- expanded[[2]]
m <- length(kappa)
out <- rep(0, m)
for(j in 1:m) {
if (kappa[j] > 1e-7) {
out[j] <- c(runivm_onepar(1, kappa[j], mu[j]))
} else {
out[j] <- runif(1, 0, 2*pi)
}
}
} else {
if (kappa> 1e-10) {
out <- c(runivm_onepar(n, kappa, mu))
} else {
out <- runif(n, 0, 2*pi)
}
}
out
}
#' @rdname rvm
#' @export
dvm <- function(x, kappa = 1, mu = 0, log = FALSE)
{
if(any(kappa < 0)) stop("kappa must be non-negative")
if(any(mu < 0 | mu >= 2*pi)) mu <- prncp_reg(mu)
# if(max(length(kappa), length(mu)) > 1) {
# expanded <- expand_args(kappa, mu)
# kappa <- expanded[[1]]; mu <- expanded[[2]]
#
# if(length(x) > 1) {
# x_set <- 1:length(x)
# par_set <- 1:length(kappa)
# expndn_set <- expand_args(x_set, par_set)
# x_set <- expndn_set[[1]]
# par_set <- expndn_set[[2]]
# } else{
# den <- as.vector(dunivm_onex_manypar(x, kappa, mu))
# }
#
# } else {
# if(length(x) > 1){
# den <- as.vector(dunivm_manyx_onepar(as.vector(x), kappa, mu))
# } else{
# den <- exp(ldunivmnum(as.vector(x), c(kappa, mu))) / const_univm(kappa)
# }
# }
if(max(length(kappa), length(mu)) > 1) {
expanded <- expand_args(kappa, mu)
kappa <- expanded[[1]]; mu <- expanded[[2]]
}
par.mat <- rbind(kappa, mu)
n_par <- ncol(par.mat)
n_x <- length(x)
if (n_par == 1) {
den <- c(dunivm_manyx_onepar(as.vector(x), kappa, mu))
} else if (n_x == 1) {
den <- c(dunivm_onex_manypar(x, kappa, mu))
} else {
x_set <- 1:n_x
par_set <- 1:n_par
expndn_set <- expand_args(x_set, par_set)
x_set <- expndn_set[[1]]
par_set <- expndn_set[[2]]
den <- c(dunivm_manyx_manypar(x[x_set], kappa[par_set], mu[par_set]))
}
if (log) den <- log(den)
den
}
#' The univariate von Mises mixtures
#' @inheritParams rvm
#' @param mu vector of component means.
#' @param kappa vector of component concentration (inverse-variance) parameters, \code{kappa > 0}.
#' @param pmix vector of mixing proportions.
#' @details \code{pmix}, \code{mu} and \code{kappa} must be of the same length, with \eqn{j}-th element corresponding to the \eqn{j}-th component of the mixture distribution.
#' @details The univariate von Mises mixture distribution with component size \code{K = \link{length}(pmix)} has density
#' \deqn{g(x) = p[1] * f(x; \kappa[1], \mu[1]) + ... + p[K] * f(x; \kappa[K], \mu[K])}
#' where \eqn{p[j], \kappa[j], \mu[j]} respectively denote the mixing proportion, concentration parameter and the mean parameter for the \eqn{j}-th component
#' and \eqn{f(. ; \kappa, \mu)} denotes the density function of the (univariate) von Mises distribution with mean parameter \eqn{\mu} and concentration parameter \eqn{\kappa}.
#' @return \code{dvmmix} computes the density and \code{rvmmix} generates random deviates from the mixture density.
#'
#' @examples
#' kappa <- 1:3
#' mu <- 0:2
#' pmix <- c(0.3, 0.3, 0.4)
#' x <- 1:10
#' n <- 10
#'
#' # mixture densities calculated at each point in x
#' dvmmix(x, kappa, mu, pmix)
#'
#' # number of observations generated from the mixture distribution is n
#' rvmmix(n, kappa, mu, pmix)
#'
#' @export
rvmmix <- function(n, kappa, mu, pmix)
{
allpar <- list(kappa=kappa, mu=mu, pmix=pmix)
allpar_len <- listLen(allpar)
if(min(allpar_len) != max(allpar_len))
stop("component size mismatch: number of components of the input parameter vectors differ")
if(any(allpar$pmix < 0)) stop("\'pmix\' must be non-negative")
sum_pmix <- sum(allpar$pmix)
if(signif(sum_pmix, 4) != 1) {
if(sum_pmix <= 0) stop("\'pmix\' must have at least one positive element")
allpar$pmix <- allpar$pmix/sum_pmix
warning("\'pmix\' is rescaled to add up to 1")
}
if(any(allpar$kappa < 0)) stop("kappa must be non-negative")
if(any(allpar$mu < 0 | allpar$mu >= 2*pi)) allpar$mu <- prncp_reg(allpar$mu)
out <- rep(0, n)
ncomp <- allpar_len[1] # number of components
comp_ind <- cID(tcrossprod(rep(1, n), allpar$pmix), ncomp, runif(n))
# n samples from multinom(ncomp, pmix)
for(j in seq_len(ncomp)) {
obs_ind_j <- which(comp_ind == j)
n_j <- length(obs_ind_j)
if(n_j > 0) {
out[obs_ind_j] <- rvm(n_j, kappa[j], mu[j])
}
}
out
}
#' @rdname rvmmix
#' @export
dvmmix <- function(x, kappa, mu, pmix, log=FALSE)
{
allpar <- list(kappa=kappa, mu=mu, pmix=pmix)
allpar_len <- listLen(allpar)
if(min(allpar_len) != max(allpar_len))
stop("component size mismatch: number of components of the input parameter vectors differ")
if(any(allpar$pmix < 0)) stop("\'pmix\' must be non-negative")
sum_pmix <- sum(allpar$pmix)
if(signif(sum_pmix, 4) != 1) {
if(sum_pmix <= 0) stop("\'pmix\' must have at least one positive element")
allpar$pmix <- allpar$pmix/sum_pmix
warning("\'pmix\' is rescaled to add up to 1")
}
if(any(allpar$pmix < 0)) stop("pmix must be non-negative")
if(sum(allpar$pmix) != 1) {
allpar$pmix <- allpar$pmix/sum(allpar$pmix)
warning("\'pmix\' is rescaled to add up to 1")
}
if(any(allpar$kappa < 0)) stop("kappa must be non-negative")
if(any(allpar$mu < 0 | allpar$mu >= 2*pi)) allpar$mu <- prncp_reg(allpar$mu)
ncomp <- length(kappa)
allcompden <- vapply(1:ncomp,
function(j) dvm(x, kappa[j], mu[j], FALSE),
rep(0, length(x)))
mixden <- c(allcompden %*% pmix)
if (log) {
log(mixden)
} else {
mixden
}
}
#' Fitting univariate von Mises mixtures using MCMC
#' @inheritParams fit_vmsinmix
#'
#' @details
#' Wrapper for \link{fit_angmix} with \code{model = "vm"}.
#'
#'
#'
#' @examples
#' # illustration only - more iterations needed for convergence
#' fit.vm.20 <- fit_vmmix(wind$angle, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' fit.vm.20
#' @export
fit_vmmix <- function(...)
{
fit_angmix(model="vm", ...)
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/all_vm_fns.R |
# list of functions
#' The bivariate von Mises cosine model
#' @inheritParams rvmsin
#' @param mu1,mu2 vectors of mean parameters.
#' @param kappa1,kappa2,kappa3 vectors of concentration parameters; \code{kappa1, kappa2 > 0}.
#' @param ... additional arguments to be passed to dvmcos. See details.
#'
#' @details
#' The bivariate von Mises cosine model density at the point \eqn{x = (x_1, x_2)} is given by
#' \deqn{f(x) = C_c (\kappa_1, \kappa_2, \kappa_3) \exp(\kappa_1 \cos(T_1) + \kappa_2 \cos(T_2) + \kappa_3 \cos(T_1 - T_2))}
#' where
#' \deqn{T_1 = x_1 - \mu_1; T_2 = x_2 - \mu_2}
#' and \eqn{C_c (\kappa_1, \kappa_2, \kappa_3)} denotes the normalizing constant for the cosine model.
#'
#' Because \eqn{C_c} involves an infinite alternating series with product of Bessel functions,
#' if \code{kappa3 < -5} or \code{max(kappa1, kappa2, abs(kappa3)) > 50}, \eqn{C_c} is evaluated
#' numerically via (quasi) Monte carlo method for
#' numerical stability. These (quasi) random numbers can be provided through the
#' argument \code{qrnd}, which must be a two column matrix, with each element being
#' a (quasi) random number between 0 and 1. Alternatively, if \code{n_qrnd} is
#' provided (and \code{qrnd} is missing), a two dimensional sobol sequence of size \code{n_qrnd} is
#' generated via the function \link{sobol} from the R package \code{qrng}. If none of \code{qrnd}
#' or \code{n_qrnd} is available, a two dimensional sobol sequence of size 1e4 is used. By default Monte
#' Carlo approximation is used only if \code{kappa3 < -5} or \code{max(kappa1, kappa2, abs(kappa3)) > 50}.
#' However, a forced Monte Carlo approximation can be made (irrespective of the choice of \code{kappa1, kappa2} and
#' \code{kappa3}) by setting \code{force_approx_const = TRUE}. See examples.
#'
#' @return \code{dvmcos} gives the density and \code{rvmcos} generates random deviates.
#'
#' @examples
#' kappa1 <- c(1, 2, 3)
#' kappa2 <- c(1, 6, 5)
#' kappa3 <- c(0, 1, 2)
#' mu1 <- c(1, 2, 5)
#' mu2 <- c(0, 1, 3)
#' x <- diag(2, 2)
#' n <- 10
#'
#' # when x is a bivariate vector and parameters are all scalars,
#' # dvmcos returns single density
#' dvmcos(x[1, ], kappa1[1], kappa2[1], kappa3[1], mu1[1], mu2[1])
#'
#' # when x is a two column matrix and parameters are all scalars,
#' # dmvsin returns a vector of densities calculated at the rows of
#' # x with the same parameters
#' dvmcos(x, kappa1[1], kappa2[1], kappa3[1], mu1[1], mu2[1])
#'
#' # if x is a bivariate vector and at least one of the parameters is
#' # a vector, all parameters are recycled to the same length, and
#' # dvmcos returns a vector with ith element being the density
#' # evaluated at x with parameter values kappa1[i], kappa2[i],
#' # kappa3[i], mu1[i] and mu2[i]
#' dvmcos(x[1, ], kappa1, kappa2, kappa3, mu1, mu2)
#'
#' # if x is a two column matrix and at least one of the parameters is
#' # a vector, rows of x and the parameters are recycled to the same
#' # length, and dvmcos returns a vector with ith element being the
#' # density evaluated at ith row of x with parameter values kappa1[i],
#' # kappa2[i], # kappa3[i], mu1[i] and mu2[i]
#' dvmcos(x, kappa1, kappa2, kappa3, mu1, mu2)
#'
#' # when parameters are all scalars, number of observations generated
#' # by rvmcos is n
#' rvmcos(n, kappa1[1], kappa2[1], kappa3[1], mu1[1], mu2[1])
#'
#' # when at least one of the parameters is a vector, all parameters are
#' # recycled to the same length, n is ignored, and the number of
#' # observations generated by rvmcos is the same as the length of the
#' # recycled vectors
#' rvmcos(n, kappa1, kappa2, kappa3, mu1, mu2)
#'
#'
#' \donttest{
#' ## Visualizing (quasi) Monte Carlo based approximations of
#' ## the normalizing constant through density evaluations.
#'
#' # "good" setup, where the analytic formula for C_c can be
#' # calculated without numerical issues
#' # kappa1 = 1, kappa2 = 1, kappa3 = -2, mu1 = pi, mu2 = pi
#'
#' n_qrnd <- (1:500)*20
#' # analytic
#' good.a <- dvmcos(c(3,3), 1, 1, -2, pi, pi, log=TRUE)
#' # using quasi Monte Carlo
#' good.q <- sapply(n_qrnd,
#' function(j)
#' dvmcos(c(3,3), 1, 1, -2, pi, pi,
#' log=TRUE, n_qrnd = j,
#' force_approx_const = TRUE))
#' # using ordinary Monte Carlo
#' set.seed(1)
#' good.r <- sapply(n_qrnd,
#' function(j)
#' dvmcos(c(3,3), 1, 1, -2, pi, pi,
#' log=TRUE,
#' qrnd = matrix(runif(2*j), ncol = 2),
#' force_approx_const = TRUE))
#'
#'
#' plot(n_qrnd, good.q, ylim = range(good.a, good.q, good.r),
#' col = "orange", type = "l",
#' ylab = "",
#' main = "dvmcos(c(3,3), 1, 1, -2, pi, pi, log = TRUE)")
#' points(n_qrnd, good.r, col = "skyblue", type = "l")
#' abline(h = good.a, lty = 2, col = "grey")
#' legend("topright",
#' legend = c("Sobol", "Random", "Analytic"),
#' col = c("orange", "skyblue", "grey"),
#' lty = c(1, 1, 2))
#'
#'
#' # "bad" setup, where the calculating C_c
#' # numerically using the analytic formula is problematic
#' # kappa1 = 100, kappa2 = 100, kappa3 = -200, mu1 = pi, mu2 = pi
#'
#' n_qrnd <- (1:500)*20
#'
#' # using quasi Monte Carlo
#' bad.q <- sapply(n_qrnd,
#' function(j)
#' dvmcos(c(3,3), 100, 100, -200, pi, pi,
#' log=TRUE, n_qrnd = j,
#' force_approx_const = TRUE))
#' # using ordinary Monte Carlo
#' set.seed(1)
#' bad.r <- sapply(n_qrnd,
#' function(j)
#' dvmcos(c(3,3), 100, 100, -200, pi, pi,
#' log=TRUE,
#' qrnd = matrix(runif(2*j), ncol = 2),
#' force_approx_const = TRUE))
#'
#'
#' plot(n_qrnd, bad.q, ylim = range(bad.q, bad.r),
#' col = "orange", type = "l",
#' ylab = "",
#' main = "dvmcos(c(3,3), 100, 100, -200, pi, pi, log = TRUE)")
#' points(n_qrnd, bad.r, col = "skyblue", type = "l")
#' legend("topright",
#' legend = c("Sobol", "Random"),
#' col = c("orange", "skyblue"), lty = 1)
#'}
#'
#' @export
rvmcos <- function(n, kappa1=1, kappa2=1,
kappa3=0, mu1=0, mu2=0,
method="naive")
{
if(any(c(kappa1, kappa2) < 0)) stop("kappa1 and kappa2 must be nonnegative")
if(any(mu1 < 0 | mu1 >= 2*pi)) mu1 <- prncp_reg(mu1)
if(any(mu2 < 0 | mu2 >= 2*pi)) mu2 <- prncp_reg(mu2)
if(n < 0) stop("invalid n")
# if (is.null(method)) {
# if (n > 1e5) method <- "vmprop"
# else method <- "naive"
# }
if (!method %in% c("naive", "vmprop"))
stop("method must be either \'naive\' or \'vmprop\'")
if(max(length(kappa1), length(kappa2), length(kappa3),
length(mu1), length(mu2)) > 1) {
expanded <- expand_args(kappa1, kappa2, kappa3, mu1, mu2)
k1 <- expanded[[1]]; k2 <- expanded[[2]]; k3 <- expanded[[3]]
mu1 <- expanded[[4]]; mu2 <- expanded[[5]]
t(vapply(1:length(k1),
function(j) rvmcos_1par(1, k1[j], k2[j], k3[j],
mu1[j], mu2[j], "naive"), c(0, 0)))
} else {
# if (is.null(method) & max(kappa1, kappa2, abs(kappa3)) < 0.1)
# method <- "vmprop"
rvmcos_1par(n, kappa1, kappa2, kappa3, mu1, mu2, method)
}
}
# rvmcos <- function(n, kappa1=1, kappa2=1, kappa3=0,
# mu1=0, mu2=0) {
# if(any(c(kappa1, kappa2) < 0)) stop("kappa1 and kappa2 must be nonnegative")
# if(any(mu1 < 0 | mu1 >= 2*pi)) mu1 <- prncp_reg(mu1)
# if(any(mu2 < 0 | mu2 >= 2*pi)) mu2 <- prncp_reg(mu2)
#
# opt_obj <- function(k1=1, k2=1, k3=0, mu1=0, mu2=0) {
# # for numerical stability, if k3 < 0, fabs(k1+k3) < 1e-5 or fabs(k2+k3) < 1e-5
# # make k3 = k3 * (1+1e-5)
# if (k3 < 0) {
# while (abs(k1 + k3) < 1e-5 || abs(k2 + k3) < 1e-5) {
# k3 = k3*(1+1e-5)
# }
# }
# obj <- optim(c(0,0), fn = function(x) -(k1*cos(x[1]-mu1)+k2*cos(x[2]-mu2)+k3*cos(x[1]-x[2]-mu1+mu2)),
# gr = function(x) -c(-k1*sin(x[1]-mu1)-k3*sin(x[1]-x[2]-mu1+mu2),
# -k2*sin(x[2]-mu2)+k3*sin(x[1]-x[2]-mu1+mu2)))
# -obj$value
#
# }
#
# if(max(length(kappa1), length(kappa2), length(kappa3), length(mu1), length(mu2)) > 1) {
# expanded <- expand_args(kappa1, kappa2, kappa3, mu1, mu2)
# k1 <- expanded[[1]]; k2 <- expanded[[2]]; k3 <- expanded[[3]]
# mu1 <- expanded[[4]]; mu2 <- expanded[[5]]
# upper_bd_all <- vapply(1:length(k1),
# function(h) opt_obj(k1[h], k2[h], k3[h], mu1[h], mu2[h]),
# 0)
# rcos_manypar(k1, k2, k3, mu1, mu2, upper_bd_all)
# } else {
# upper_bd <- opt_obj(kappa1, kappa2, kappa3, mu1, mu2)
# qrnd_grid <- sobol(1e4, 2, FALSE)
# cat(exp(log(const_vmcos(kappa1, kappa2, kappa3,
# qrnd_grid))
# - upper_bd)/(4*pi^2))
# rcos_onepar(n, kappa1, kappa2, kappa3, mu1, mu2, upper_bd)
# }
# }
#' @rdname rvmcos
#' @importFrom qrng sobol
#' @export
dvmcos <- function(x, kappa1=1, kappa2=1, kappa3=0, mu1=0,
mu2=0, log=FALSE, ...) {
if (any(c(kappa1, kappa2) < 0)) stop("kappa1 and kappa2 must be nonnegative")
if (any(mu1 < 0 | mu1 >= 2*pi)) mu1 <- prncp_reg(mu1)
if (any(mu2 < 0 | mu2 >= 2*pi)) mu2 <- prncp_reg(mu2)
if ((length(dim(x)) < 2 && length(x) != 2) || (length(dim(x)) == 2 && tail(dim(x), 1) != 2)
|| (length(dim(x)) > 2)) stop("x must either be a bivariate vector or a two-column matrix")
ell <- list(...)
if (!is.null(ell$qrnd)) {
qrnd_grid <- ell$qrnd
dim_qrnd <- dim(qrnd_grid)
if (!is.matrix(qrnd_grid) | is.null(dim_qrnd) |
dim_qrnd[2] != 2)
stop("\'qrnd\' must be a two column matrix")
n_qrnd <- dim_qrnd[1]
} else if (!is.null(ell$n_qrnd)){
n_qrnd <- round(ell$n_qrnd)
if (n_qrnd < 1)
stop("n_qrnd must be a positive integer")
qrnd_grid <- sobol(n_qrnd, 2, FALSE)
} else {
n_qrnd <- 1e4
qrnd_grid <- sobol(n_qrnd, 2, FALSE)
}
if(max(length(kappa1), length(kappa2), length(kappa3), length(mu1), length(mu2)) > 1) {
expanded <- expand_args(kappa1, kappa2, kappa3, mu1, mu2)
kappa1 <- expanded[[1]]
kappa2 <- expanded[[2]]
kappa3 <- expanded[[3]]
mu1 <- expanded[[4]]
mu2 <- expanded[[5]]
}
par.mat <- rbind(kappa1, kappa2, kappa3, mu1, mu2)
n_par <- ncol(par.mat)
if (length(x) == 2) x <- matrix(x, nrow = 1)
n_x <- nrow(x)
if (is.null(ell$force_approx_const)) {
force_approx_const <- FALSE
} else if (!is.logical(ell$force_approx_const)) {
stop("\'force_approx_const\' must be logical.")
} else {
force_approx_const <- ell$force_approx_const
}
l_const_all <- rep(0, n_par)
for(j in 1:n_par) {
if (all(!force_approx_const,
((kappa3[j] >= -5 & max(kappa1[j], kappa2[j],
abs(kappa3[j])) <= 50) |
abs(kappa3[j]) < 1e-4))) {
l_const_all[j] <- log(const_vmcos_anltc(kappa1[j], kappa2[j],
kappa3[j]))
} else {
l_const_all[j] <- log(const_vmcos_mc(kappa1[j], kappa2[j],
kappa3[j], qrnd_grid));
}
}
if (n_par == 1) {
log_den <- c(ldcos_manyx_onepar(x, kappa1, kappa2, kappa3,
mu1, mu2, l_const_all))
} else if (n_x == 1) {
log_den <- c(ldcos_onex_manypar(c(x), kappa1, kappa2, kappa3,
mu1, mu2, l_const_all))
} else {
x_set <- 1:nrow(x)
par_set <- 1:n_par
expndn_set <- expand_args(x_set, par_set)
x_set <- expndn_set[[1]]
par_set <- expndn_set[[2]]
log_den <- c(ldcos_manyx_manypar(x[x_set, ], kappa1[par_set],
kappa2[par_set], kappa3[par_set],
mu1[par_set], mu2[par_set],
l_const_all[par_set]))
}
if (log) log_den
else exp(log_den)
}
# dvmcos <- function(x, kappa1=1, kappa2=1, kappa3=0, mu1=0, mu2=0) {
#
# if(any(c(kappa1, kappa2) < 0)) stop("kappa1 and kappa2 must be nonnegative")
# if(any(mu1 < 0 | mu1 >= 2*pi)) mu1 <- prncp_reg(mu1)
# if(any(mu2 < 0 | mu2 >= 2*pi)) mu2 <- prncp_reg(mu2)
# if((length(dim(x)) < 2 && length(x) != 2) || (length(dim(x)) == 2 && tail(dim(x), 1) != 2)
# || (length(dim(x)) > 2)) stop("x must either be a bivariate vector or a two-column matrix")
#
#
# if(max(length(kappa1), length(kappa2), length(kappa3), length(mu1), length(mu2)) > 1) {
# expanded <- expand_args(kappa1, kappa2, kappa3, mu1, mu2)
# k1 <- expanded[[1]]; k2 <- expanded[[2]]; k3 <- expanded[[3]]
# mu1 <- expanded[[4]]; mu2 <- expanded[[5]]
# sobol_grid <- sobol_2d_1e4_from_seed_1
# par.mat <- rbind(k1,k2,k3,mu1,mu2)
# l_const_all <- log_const_vmcos_all(par.mat, sobol_grid, ncores = 1)
# if(length(x) != 2) {
# x_set <- 1:nrow(x)
# par_set <- 1:length(kappa1)
# expndn_set <- expand_args(x_set, par_set)
# x_set <- expndn_set[[1]]
# par_set <- expndn_set[[2]]
# as.vector(dcos_manyx_manypar(x[x_set, ], k1[par_set], k2[par_set], k3[par_set], mu1[par_set], mu2[par_set], l_const_all[par_set]))
# } else{
# as.vector(dcos_onex_manypar(x, k1, k2, k3, mu1, mu2, l_const_all))
# }
#
# } else {
# sobol_grid <- sobol_2d_1e4_from_seed_1
# const.vmcos <- const_vmcos(kappa1, kappa2, kappa3, sobol_grid, ncores = 1)
# if(length(x) != 2){
# as.vector(dcos_manyx_onepar(x, kappa1, kappa2, kappa3, mu1, mu2, log(const.vmcos)))
# } else{
# exp(ldcosnum(x[1], x[2], c(kappa1, kappa2, kappa3, mu1, mu2)))/const.vmcos
# }
# }
#
#
#
# }
#' The bivariate von Mises cosine model mixtures
#' @inheritParams rvmsinmix
#' @inheritParams rvmcos
#' @param mu1,mu2 vectors of mean parameters.
#' @param kappa1,kappa2,kappa3 vectors of concentration parameters; \code{kappa1, kappa2 > 0} for each component.
#'
#' @details All the argument vectors \code{pmix, kappa1, kappa2, kappa3, mu1} and \code{mu2} must be of
#' the same length ( = component size of the mixture model), with \eqn{j}-th element corresponding to the
#' \eqn{j}-th component of the mixture distribution.
#' @details The bivariate von Mises cosine model mixture distribution with component size \code{K = \link{length}(pmix)} has density
#' \deqn{g(x) = \sum p[j] * f(x; \kappa_1[j], \kappa_2[j], \kappa_3[j], \mu_1[j], \mu_2[j])}
#' where the sum extends over \eqn{j}; \eqn{p[j]; \kappa_1[j], \kappa_2[j], \kappa_3[j]}; and \eqn{\mu_1[j], \mu_2[j]} respectively denote the mixing proportion,
#' the three concentration parameters and the two mean parameter for the \eqn{j}-th cluster, \eqn{j = 1, ..., K},
#' and \eqn{f(. ; \kappa_1, \kappa_2, \kappa_3, \mu_1, \mu_2)} denotes the density function of the von Mises cosine model
#' with concentration parameters \eqn{\kappa_1, \kappa_2, \kappa_3} and mean parameters \eqn{\mu_1, \mu_2}.
#'
#' @return \code{dvmcosmix} computes the density and \code{rvmcosmix} generates random deviates from the mixture density.
#'
#' @examples
#' kappa1 <- c(1, 2, 3)
#' kappa2 <- c(1, 6, 5)
#' kappa3 <- c(0, 1, 2)
#' mu1 <- c(1, 2, 5)
#' mu2 <- c(0, 1, 3)
#' pmix <- c(0.3, 0.4, 0.3)
#' x <- diag(2, 2)
#' n <- 10
#'
#' # mixture densities calculated at the rows of x
#' dvmcosmix(x, kappa1, kappa2, kappa3, mu1, mu2, pmix)
#'
#' # number of observations generated from the mixture distribution is n
#' rvmcosmix(n, kappa1, kappa2, kappa3, mu1, mu2, pmix)
#'
#' @export
rvmcosmix <- function(n, kappa1, kappa2, kappa3,
mu1, mu2, pmix, method = "naive", ...)
{
allpar <- list(kappa1=kappa1, kappa2=kappa2, kappa3=kappa3,
mu1=mu1, mu2=mu2, pmix=pmix)
allpar_len <- listLen(allpar)
if(min(allpar_len) != max(allpar_len))
stop("component size mismatch: number of components of the input parameter vectors differ")
if(any(allpar$pmix < 0)) stop("\'pmix\' must be non-negative")
sum_pmix <- sum(allpar$pmix)
if(signif(sum_pmix, 5) != 1) {
if(sum_pmix <= 0) stop("\'pmix\' must have at least one positive element")
allpar$pmix <- allpar$pmix/sum_pmix
warning("\'pmix\' is rescaled to add up to 1")
}
if(any(c(allpar$kappa1, allpar$kappa2) < 0)) stop("kappa1 and kappa2 must be non-negative")
if(any(allpar$mu1 < 0 | allpar$mu1 >= 2*pi)) allpar$mu1 <- prncp_reg(allpar$mu1)
if(any(allpar$mu2 < 0 | allpar$mu2 >= 2*pi)) allpar$mu2 <- prncp_reg(allpar$mu2)
# opt_I_k13 <- function(k1, k2, k3, mu1, mu2) {
# I_k13 <- function(y) BESSI0_C(sqrt(k1 * k1 + k3 * k3 + 2 * k1 * k3 * cos(y - mu2)))
# optimize(I_k13, c(0, 2*pi), maximum = TRUE)$maximum
# }
#
# upper_bd_all <- vapply(1:length(kappa1),
# function(h) opt_I_k13(kappa1[h], kappa2[h], kappa3[h],
# mu1[h], mu2[h]),
# 0)
#
# clus_label <- cID(t(replicate(allpar$pmix, n = n)), length(allpar$pmix), runif(n))
# rcos_manypar(allpar$kappa1[clus_label], allpar$kappa2[clus_label], allpar$kappa3[clus_label],
# allpar$mu1[clus_label], allpar$mu2[clus_label], upper_bd_all[clus_label])
out <- matrix(0, n, 2)
ncomp <- allpar_len[1] # number of components
comp_ind <- cID(tcrossprod(rep(1, n), allpar$pmix), ncomp, runif(n))
# n samples from multinom(ncomp, pmix)
for(j in seq_len(ncomp)) {
obs_ind_j <- which(comp_ind == j)
n_j <- length(obs_ind_j)
if(n_j > 0) {
out[obs_ind_j, ] <- rvmcos(n_j, kappa1[j], kappa2[j],
kappa3[j], mu1[j], mu2[j], method)
}
}
out
}
#' @rdname rvmcosmix
#' @export
dvmcosmix <- function(x, kappa1, kappa2, kappa3,
mu1, mu2, pmix, log=FALSE, ...)
{
allpar <- list("kappa1"=kappa1, "kappa2"=kappa2, "kappa3"=kappa3,
"mu1"=mu1, "mu2"=mu2, "pmix"=pmix)
allpar_len <- listLen(allpar)
if(min(allpar_len) != max(allpar_len)) stop("component size mismatch: number of components of the input parameter vectors differ")
if(any(allpar$pmix < 0)) stop("\'pmix\' must be non-negative")
sum_pmix <- sum(allpar$pmix)
if(signif(sum_pmix, 5) != 1) {
if(sum_pmix <= 0) stop("\'pmix\' must have at least one positive element")
allpar$pmix <- allpar$pmix/sum_pmix
warning("\'pmix\' is rescaled to add up to 1")
}
if(any(c(allpar$kappa1, allpar$kappa2) <= 0)) stop("kappa1 and kappa2 must be positive")
if(any(allpar$mu1 < 0 | allpar$mu1 >= 2*pi)) allpar$mu1 <- prncp_reg(allpar$mu1)
if(any(allpar$mu2 < 0 | allpar$mu2 >= 2*pi)) allpar$mu2 <- prncp_reg(allpar$mu2)
if((length(dim(x)) < 2 && length(x) != 2) || (length(dim(x)) == 2 && tail(dim(x), 1) != 2)
|| (length(dim(x)) > 2)) stop("x must either be a bivariate vector or a two-column matrix")
ncomp <- length(kappa1)
if (length(x) == 2) x <- matrix(x, nrow=1)
allcompden <- vapply(1:ncomp,
function(j) dvmcos(x, kappa1[j], kappa2[j],
kappa3[j], mu1[j], mu2[j], FALSE, ...),
rep(0, nrow(x)))
mixden <- c(allcompden %*% pmix)
if (log) {
log(mixden)
} else {
mixden
}
}
#' Fitting bivariate von Mises cosine model mixtures using MCMC
#' @inheritParams fit_vmsinmix
#'
#' @details
#' Wrapper for \link{fit_angmix} with \code{model = "vmcos"}.
#'
#'
#' @examples
#' # illustration only - more iterations needed for convergence
#' fit.vmcos.10 <- fit_vmcosmix(tim8, ncomp = 3, n.iter = 10,
#' n.chains = 1)
#' fit.vmcos.10
#'
#' @export
fit_vmcosmix <- function(...)
{
fit_angmix(model="vmcos", ...)
}
vmcos_var_cor_singlepar_numeric <- function(kappa1, kappa2, kappa3, qrnd_grid) {
# N <- 1e4
# browser()
fn_log_vmcos_const <- function(pars) {
const_vmcos(pars[1], pars[2], pars[3], uni_rand = qrnd_grid, return_log = TRUE)
}
# const <- fn_log_vmcos_const(c(kappa1, kappa2, kappa3))
grad_over_const <- numDeriv::grad(fn_log_vmcos_const, c(kappa1, kappa2, kappa3))
names(grad_over_const) <- c("k1", "k2", "k3")
hess_over_const <- numDeriv::hessian(fn_log_vmcos_const, c(kappa1, kappa2, kappa3)) +
tcrossprod(grad_over_const)
dimnames(hess_over_const) <- list(c("k1", "k2", "k3"), c("k1", "k2", "k3"))
rho_fl <- unname(
((grad_over_const["k3"] - hess_over_const["k1", "k2"]) *
hess_over_const["k1", "k2"]) /
sqrt(
hess_over_const["k1", "k1"] * (1 - hess_over_const["k1", "k1"])
* hess_over_const["k2", "k2"] * (1 - hess_over_const["k2", "k2"])
)
)
rho_js <- unname(
(grad_over_const["k3"] - hess_over_const["k1", "k2"]) /
sqrt((1 - hess_over_const["k1", "k1"]) *
(1 - hess_over_const["k2", "k2"]))
)
var1 <- unname(1 - grad_over_const["k1"])
var2 <- unname(1 - grad_over_const["k2"])
# dat <- rvmcos(N, kappa1, kappa2, kappa3, 0, 0)
#
# ave_sin1sin2 <- sum(sin(dat[, 1]) * sin(dat[, 2]))/N
# ave_cos1cos2 <- sum(cos(dat[, 1]) * cos(dat[, 2]))/N
#
# ave_sin1sq <- sum(sin(dat[, 1])^2)/N
# ave_cos1sq <- 1-ave_sin1sq
# ave_cos1 <- sum(cos(dat[, 1]))/N
#
# ave_sin2sq <- sum(sin(dat[, 2])^2)/N
# ave_cos2sq <- 1-ave_sin2sq
# ave_cos2 <- sum(cos(dat[, 2]))/N
#
# rho_js <- ave_sin1sin2/sqrt(ave_sin1sq * ave_sin2sq)
# # ifelse(ave_sin1sin2 >= 0, 1, -1) *
# # min(abs(ave_sin1sin2)/sqrt(ave_sin1sq * ave_sin2sq), 1)
#
# rho_fl <- rho_js *
# ave_cos1cos2/sqrt(ave_cos1sq * ave_cos2sq)
# # ifelse(ave_cos1cos2 >= 0, 1, -1) *
# # min(abs(ave_cos1cos2)/sqrt(ave_cos1sq * ave_cos2sq), 1)
#
# var1 <- min(1 - ave_cos1, 1)
# var2 <- min(1 - ave_cos2, 1)
list(var1 = var1, var2 = var2, rho_fl = rho_fl, rho_js = rho_js)
}
vmcos_var_cor_singlepar <- function(kappa1, kappa2, kappa3,
qrnd_grid) {
if (max(kappa1, kappa2, abs(kappa3)) > 50 |
kappa3 < 0) {
out <- vmcos_var_cor_singlepar_numeric(kappa1, kappa2,
kappa3, qrnd_grid)
# } else if(kappa3 < -1 | max(kappa1, kappa2, abs(kappa3)) > 50) {
# vmcos_var_corr_mc(kappa1, kappa2, kappa3, qrnd_grid)
} else {
out <- vmcos_var_corr_anltc(kappa1, kappa2, kappa3)
}
for (rho in c("rho_js", "rho_fl")) {
if (out[[rho]] <= -1) {
out[[rho]] <- -1
} else if (out[[rho]] >= 1) {
out[[rho]] <- 1
}
}
for (var in c("var1", "var2")) {
if (out[[var]] <= 0) {
out[[var]] <- 0
} else if (out[[var]] >= 1) {
out[[var]] <- 1
}
}
out
}
rvmcos_1par <- function(n=1, kappa1, kappa2, kappa3, mu1, mu2, method)
{
if (abs(abs(kappa3) - kappa1) < 1e-8 |
abs(abs(kappa3) - kappa2) < 1e-8)
kappa3 <- kappa3*(1+1e-6)
if (method == "vmprop") {
qrnd_grid <- sobol(1e4, 2, FALSE)
log_const_vmcos <- log(const_vmcos(kappa1, kappa2, kappa3, qrnd_grid))
log_2pi <- log(2*pi)
unimodal_y <- TRUE
phistar <- NULL
method <- "vmprop"
# first check if the joint density is bimodal
if (kappa3 < - kappa1*kappa2/(kappa1+kappa2+1e-16)) {
# now check if the y marginal is bimodal
if (A_bessel(abs(kappa1+kappa3)) >
-abs(kappa1+kappa3)*kappa2/(kappa1*kappa3 + 1e-16)) {
unimodal_y <- FALSE
phistar_eqn <- function(phi) {
kappa13 <- sqrt(kappa1^2 + kappa3^2 + 2*kappa1*kappa3*cos(phi))
-kappa1*kappa3*A_bessel(kappa13)/kappa13 - kappa2
}
find_root <- uniroot.all(phistar_eqn, c(0, 2*pi))
# if no root found, use the naive two dimensional rejection sampler
if (is.null(find_root)) {
method <- "naive"
} else {
phistar <- find_root[1]
}
}
}
# browser()
if (method == "vmprop" & unimodal_y) {
grid_0_2pi <- seq(0, 2*pi, length.out = 100)
# do minimax to find optimum kappa and K
# don't worry about mu1 mu2 while optimizing!
# abs difference between the target log marginal den
# and log proposal den
obj_kappa_abs <- function(kappa) {
obj_kappa_phi <- function(phi) {
cos_phi <- cos(phi)
kappa13 <- sqrt(kappa1^2 + kappa3^2 + 2*kappa1*kappa3*cos_phi)
log_I_k_13 <- log(besselI(kappa13, 0))
log_target_marginal <- -log_const_vmcos + log_2pi + log_I_k_13 + kappa2*cos_phi
log_proposal <- -log_2pi - log(besselI(kappa, 0)) + kappa*cos_phi
abs(log_target_marginal - log_proposal)
}
optimize(obj_kappa_phi, c(0, 2*pi), maximum=TRUE)$objective
}
# minimize obj_kappa_abs wrt kappa
kappa_opt <- optimize(obj_kappa_abs, c(0, max(50, kappa2*2)))$minimum
log_prop_den <- function(kappa, phi) {
cos_phi <- cos(phi)
-log_2pi - log(besselI(kappa, 0)) +
kappa*cos_phi
}
# find max of log_prop_den
max_log_prop_den <- max(log_prop_den(kappa_opt, grid_0_2pi))
# any point with density < exp(max_log_prop_den)*exp(-10)
# are less likely to appear, so make exp(max_log_prop_den)*exp(-10)
# as the lower bound for propsal density to avoid instability
# browser()
# now maximize the log ratio of the two densities
# w.r.t phi, given kappa = kappa_opt
obj_kappa_phi <- function(kappa, phi) {
cos_phi <- cos(phi)
kappa13 <- sqrt(kappa1^2 + kappa3^2 + 2*kappa1*kappa3*cos_phi)
log_I_k_13 <- log(besselI(kappa13, 0))
log_target_marginal <- -log_const_vmcos + log_2pi + log_I_k_13 + kappa2*cos_phi
log_proposal <- pmax(-log_2pi - log(besselI(kappa, 0)) +
kappa*cos_phi, max_log_prop_den-10)
# log_proposals are >= max_log_prop_den -10 to avoid instability
# also, proposal deviates with log density less than this bound are unlikely
log_target_marginal - log_proposal
}
# maximize the log-ratio over a grid
(logK <- max(obj_kappa_phi(kappa_opt, seq(0, 2*pi, length.out = 200))))
# browser()
# cat(exp(-logK))
# rcos_unimodal(1, kappa1, kappa2, kappa3, mu1, mu2,
# kappa_opt, log(besselI(kappa_opt, 0, TRUE)) + kappa_opt,
# logK, log_const_vmcos)
rcos_unimodal(n, kappa1, kappa2, kappa3, mu1, mu2,
kappa_opt, log(BESSI0_C(kappa_opt)), logK,
log_const_vmcos)
}
else if (method == "vmprop" & !unimodal_y) {
# change the modes into [0, 2*pi]
mode_1 <- prncp_reg(prncp_reg.minuspi.pi(mu2)+phistar)
mode_2 <- prncp_reg(prncp_reg.minuspi.pi(mu2)-phistar)
sin_phistar <- sin(phistar)
unifpropn <- 1e-10
vmpropn <- (1-1e-10)/2
grid_0_2pi <- seq(0, 2*pi, length.out = 100)
# proposal for y marginal = vmpropn-vmpropn-unifpropn mix
# of vm(kappa, mu2 +- phistar) and unif(0, 2*pi)
# and unif(0, 2*pi) (to avoid overflow of the ratio where
# the densities are flat)
# do minimax to find optimum kappa and K
# abs difference between the target log marginal den
# and log proposal den
obj_kappa_abs <- function(kappa) {
obj_kappa_phi <- function(phi) {
cos_phi <- cos(phi-mu2)
kappa13 <- sqrt(kappa1^2 + kappa3^2 + 2*kappa1*kappa3*cos_phi)
log_I_k_13 <- log(besselI(kappa13, 0))
log_target_marginal <- -log_const_vmcos + log_2pi + log_I_k_13 + kappa2*cos_phi
log_proposal <-
-log_2pi + log(exp(log(vmpropn) - log(besselI(kappa, 0)) +
kappa*cos(phi-mode_1) +
log(1+exp(kappa*(cos(phi-mode_2)
- cos(phi-mode_1))))
) + unifpropn) # simplified mixture density
abs(log_target_marginal - log_proposal)
}
max(obj_kappa_phi(grid_0_2pi))
}
# minimize obj_kappa_abs wrt kappa
(kappa_opt <- optimize(obj_kappa_abs,
c(0, max(kappa1, kappa2,
abs(kappa3))))$minimum)
log_prop_den <- function(kappa, phi) {
-log_2pi + log(exp(log(vmpropn) - log(besselI(kappa, 0)) +
kappa*cos(phi-mode_1) +
log(1+exp(kappa*(cos(phi-mode_2) - cos(phi-mode_1)))))
+ unifpropn)
}
# find max of log_prop_den
max_log_prop_den <- max(log_prop_den(kappa_opt, grid_0_2pi))
# any point with density < exp(max_log_prop_den)*exp(-10)
# are less likely to appear, so make exp(max_log_prop_den)*exp(-10)
# as the lower bound for propsal density to avoid instability
# now maximize the log ratio of the two densities
# w.r.t phi, given kappa = kappa_opt
obj_kappa_phi <- function(kappa, phi) {
cos_phi <- cos(phi-mu2)
kappa13 <- sqrt(kappa1^2 + kappa3^2 + 2*kappa1*kappa3*cos_phi)
log_I_k_13 <- log(besselI(kappa13, 0))
log_target_marginal <- -log_const_vmcos + log_2pi + log_I_k_13 + kappa2*cos_phi
log_proposal <-
pmax(-log_2pi + log(exp(log(vmpropn) - log(besselI(kappa, 0)) +
kappa*cos(phi-mode_1) +
log(1+exp(kappa*(cos(phi-mode_2) - cos(phi-mode_1)))))
+ unifpropn), max_log_prop_den-10)
# log_proposals are >= max_log_prop_den -10 to avoid instability
# also, proposal deviates with log density less than this bound are unlikely
log_target_marginal - log_proposal
}
# browser()
#
# target_den <- function(phi) {
# cos_phi <- cos(phi-mu2)
# kappa13 <- sqrt(kappa1^2 + kappa3^2 + 2*kappa1*kappa3*cos_phi)
# log_I_k_13 <- log(besselI(kappa13, 0))
# log_target_marginal <- -log_const_vmcos + log_2pi + log_I_k_13 + kappa2*cos_phi
# exp(log_target_marginal)
# }
#
# prop_den <- function(kappa, phi) {
# cos_phi <- cos(phi)
# log_proposal <-
# pmax(-log_2pi + log(exp(log(vmpropn) - log(besselI(kappa, 0)) +
# kappa*cos(phi-mode_1) +
# log(1+exp(kappa*(cos(phi-mode_2) - cos(phi-mode_1)))))
# + unifpropn), max_log_prop_den-30)
# exp(log_proposal)
# }
# #
# # integrate(gg, 0, 2*pi)
# # integrate(function(x) pp(kappa_opt, x), 0, 2*pi)
# maximize the log-ratio over a grid
(logK <- max(obj_kappa_phi(kappa_opt, seq(0, 2*pi, length.out = 200))))
exp(logK)
# poin <- seq(0, 2*pi, length.out = 100)
# lggp <- log(target_den(poin))
# lppp <- log(prop_den(kappa_opt, poin))
# # difp <- obj_kappa_phi(kappa_opt, poin)
# #
# plot(poin, lggp, type="l", ylim=range(lggp, lppp, lggp-lppp))
# points(poin, lppp, type="l", col = "blue")
# # points(poin, lggp-lppp, type="l")
# # points(poin, difp, type="l", col = "red")
# abline(v=c(prncp_reg(mu2+phistar), prncp_reg(mu2-phistar)))
rcos_bimodal(n, kappa1, kappa2, kappa3, mu1, mu2,
kappa_opt, log(BESSI0_C(kappa_opt)), logK,
log_const_vmcos, mode_1, mode_2,
vmpropn, unifpropn)
# rcos_unimodal(n, kappa1, kappa2, kappa3, mu1, mu2,
# kappa_opt, log(BESSI0_C(kappa_opt)), logK,
# log_const_vmcos)
# rcos_unimodal_R(kappa1, kappa2, kappa3, mu1, mu2,
# kappa_opt, log(BESSI0_C(kappa_opt)), logK,
# log_const_vmcos)
}
}
else if (method == "naive") {
opt_obj <- function(k1=1, k2=1, k3=0, mu1=0, mu2=0) {
obj <- optim(c(0,0), fn = function(x) -(k1*cos(x[1]-mu1)+k2*cos(x[2]-mu2)+k3*cos(x[1]-x[2]-mu1+mu2)),
gr = function(x) -c(-k1*sin(x[1]-mu1)-k3*sin(x[1]-x[2]-mu1+mu2),
-k2*sin(x[2]-mu2)+k3*sin(x[1]-x[2]-mu1+mu2)))
-obj$value
}
upper_bd <- opt_obj(kappa1, kappa2, kappa3, mu1, mu2)
# qrnd_grid <- sobol(1e4, 2, FALSE)
# cat(exp(log(const_vmcos(kappa1, kappa2, kappa3,
# qrnd_grid))
# - upper_bd)/(4*pi^2))
rcos_onepar(n, kappa1, kappa2, kappa3, mu1, mu2, upper_bd)
}
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/all_vmcos_fns.R |
#' The bivariate von Mises sine model
#'
#' @param n number of observations. Ignored if at least one of the other parameters have length k > 1, in which
#' case, all the parameters are recycled to length k to produce k random variates.
#' @param x bivariate vector or a two-column matrix with each row being a bivariate vector of angles
#' (in radians) where the densities are to be evaluated.
#' @param mu1,mu2 vectors of mean parameters.
#' @param kappa1,kappa2,kappa3 vectors of concentration parameters; \code{kappa1, kappa2 > 0}.
#' @param log logical. Should the log density be returned instead?
#' @param method Rejection sampling method to be used. Available choices are \code{"naive"} (default) or \code{"vmprop"}. See details.
#' @details
#' The bivariate von Mises sine model density at the point \eqn{x = (x_1, x_2)} is given by
#' \deqn{f(x) = C_s (\kappa_1, \kappa_2, \kappa_3) \exp(\kappa_1 \cos(T_1) + \kappa_2 \cos(T_2) + \kappa_3 \sin(T_1) \sin(T_2))}
#' where
#' \deqn{T_1 = x_1 - \mu_1; T_2 = x_2 - \mu_2}
#' and \eqn{C_s (\kappa_1, \kappa_2, \kappa_3)} denotes the normalizing constant for the sine model.
#'
#' Two different rejection sampling methods are implemented for random generation. If \code{method = "vmprop"}, then first the y-marginal
#' is drawn from the associated marginal density, and then x is generated from the conditional distributio of x given y. The marginal generation of
#' y is implemented in a rejection sampling scheme with proposal being either von Mises (if the target marginal density is unimodal), or a mixture of
#' von Mises (if bimodal), with optimally chosen concentration. This the method suggested in Mardia et al. (2007). On the other hand, when
#' \code{method = "naive"} (default) a (naive) bivariate rejection sampling scheme with (bivariate) uniform propsoal is used.
#'
#' Note that although method = \code{"vmprop"} may provide better efficiency when the density is highly concentrated, it does have
#' an (often substantial) overhead due to the optimziation step required to find a reasonable proposal concentration parameter.
#' This can compensate the efficiency gains of this method, especially when \code{n} is not large.
#'
#'
#'
#'
#' @return \code{dvmsin} gives the density and \code{rvmsin} generates random deviates.
#'
#' @examples
#' kappa1 <- c(1, 2, 3)
#' kappa2 <- c(1, 6, 5)
#' kappa3 <- c(0, 1, 2)
#' mu1 <- c(1, 2, 5)
#' mu2 <- c(0, 1, 3)
#' x <- diag(2, 2)
#' n <- 10
#'
#' # when x is a bivariate vector and parameters are all scalars,
#' # dvmsin returns single density
#' dvmsin(x[1, ], kappa1[1], kappa2[1], kappa3[1], mu1[1], mu2[1])
#'
#' # when x is a two column matrix and parameters are all scalars,
#' # dmvsin returns a vector of densities calculated at the rows of
#' # x with the same parameters
#' dvmsin(x, kappa1[1], kappa2[1], kappa3[1], mu1[1], mu2[1])
#'
#' # if x is a bivariate vector and at least one of the parameters is
#' # a vector, all parameters are recycled to the same length, and
#' # dvmsin returns a vector of with ith element being the density
#' # evaluated at x with parameter values kappa1[i], kappa2[i],
#' # kappa3[i], mu1[i] and mu2[i]
#' dvmsin(x[1, ], kappa1, kappa2, kappa3, mu1, mu2)
#'
#' # if x is a two column matrix and at least one of the parameters is
#' # a vector, rows of x and the parameters are recycled to the same
#' # length, and dvmsin returns a vector of with ith element being the
#' # density evaluated at ith row of x with parameter values kappa1[i],
#' # kappa2[i], # kappa3[i], mu1[i] and mu2[i]
#' dvmsin(x[1, ], kappa1, kappa2, kappa3, mu1, mu2)
#'
#' # when parameters are all scalars, number of observations generated
#' # by rvmsin is n
#' rvmsin(n, kappa1[1], kappa2[1], kappa3[1], mu1[1], mu2[1])
#'
#' # when at least one of the parameters is a vector, all parameters are
#' # recycled to the same length, n is ignored, and the number of
#' # observations generated by rvmsin is the same as the length of the
#' # recycled vectors
#' rvmsin(n, kappa1, kappa2, kappa3, mu1, mu2)
#'
#' @export
rvmsin <- function(n, kappa1=1, kappa2=1,
kappa3=0, mu1=0, mu2=0, method="naive")
{
if(any(c(kappa1, kappa2) < 0)) stop("kappa1 and kappa2 must be nonnegative")
if(any(mu1 < 0 | mu1 >= 2*pi)) mu1 <- prncp_reg(mu1)
if(any(mu2 < 0 | mu2 >= 2*pi)) mu2 <- prncp_reg(mu2)
if(n < 0) stop("invalid n")
# if (is.null(method)) {
# if (n > 1e5) method <- "vmprop"
# else method <- "naive"
# }
if (!method %in% c("naive", "vmprop"))
stop("method must be either \'naive\' or \'vmprop\'")
if(max(length(kappa1), length(kappa2), length(kappa3),
length(mu1), length(mu2)) > 1) {
expanded <- expand_args(kappa1, kappa2, kappa3, mu1, mu2)
k1 <- expanded[[1]]; k2 <- expanded[[2]]; k3 <- expanded[[3]]
mu1 <- expanded[[4]]; mu2 <- expanded[[5]]
t(vapply(1:length(k1),
function(j) rvmsin_1par(1, k1[j], k2[j], k3[j],
mu1[j], mu2[j], "naive"), c(0, 0)))
} else {
# if (is.null(method) & max(kappa1, kappa2, abs(kappa3)) < 0.1)
# method <- "vmprop"
rvmsin_1par(n, kappa1, kappa2, kappa3, mu1, mu2, method)
}
}
# rvmsin_naive <- function(n, kappa1=1, kappa2=1,
# kappa3=0, mu1=0, mu2=0) {
#
# opt_obj <- function(k1=1, k2=1, k3=0, mu1=0, mu2=0) {
# # for numerical stability, if k3 < 0, fabs(k1+k3) < 1e-5 or fabs(k2+k3) < 1e-5
# # make k3 = k3 * (1+1e-5)
# if (k3 < 0) {
# while (abs(k1 + k3) < 1e-5 || abs(k2 + k3) < 1e-5) {
# k3 = k3*(1+1e-5)
# }
# }
# obj <- optim(c(0,0), fn = function(x) -(k1*cos(x[1]-mu1)+k2*cos(x[2]-mu2)+k3*sin(x[1]-mu1)*sin(x[2]-mu2)),
# gr = function(x) -c(-k1*sin(x[1]-mu1)+k3*cos(x[1]-mu1)*sin(x[2]-mu2),
# -k2*sin(x[2]-mu2)+k3*sin(x[1]-mu1)*cos(x[2]-mu2)))
# -obj$value
#
# }
#
# if(max(length(kappa1), length(kappa2), length(kappa3), length(mu1), length(mu2)) > 1) {
# expanded <- expand_args(kappa1, kappa2, kappa3, mu1, mu2)
# k1 <- expanded[[1]]; k2 <- expanded[[2]]; k3 <- expanded[[3]]
# mu1 <- expanded[[4]]; mu2 <- expanded[[5]]
# upper_bd_all <- vapply(1:length(k1),
# function(h) opt_obj(k1[h], k2[h], k3[h], mu1[h], mu2[h]),
# 0)
# rsin_manypar(k1, k2, k3, mu1, mu2, upper_bd_all)
# } else {
# upper_bd <- opt_obj(kappa1, kappa2, kappa3, mu1, mu2)
# rsin_onepar(n, kappa1, kappa2, kappa3, mu1, mu2, upper_bd)
# }
# }
#' @rdname rvmsin
#' @export
dvmsin <- function(x, kappa1=1, kappa2=1, kappa3=0, mu1=0, mu2=0, log = FALSE)
{
if(any(c(kappa1, kappa2) < 0)) stop("kappa1 and kappa2 must be non-negative")
if(any(mu1 < 0 | mu1 >= 2*pi)) mu1 <- prncp_reg(mu1)
if(any(mu2 < 0 | mu2 >= 2*pi)) mu2 <- prncp_reg(mu2)
if((length(dim(x)) < 2 && length(x) != 2) || (length(dim(x)) == 2 && tail(dim(x), 1) != 2)
|| (length(dim(x)) > 2)) stop("x must either be a bivariate vector or a two-column matrix")
if(max(length(kappa1), length(kappa2), length(kappa3), length(mu1), length(mu2)) > 1) {
expanded <- expand_args(kappa1, kappa2, kappa3, mu1, mu2)
kappa1 <- expanded[[1]]
kappa2 <- expanded[[2]]
kappa3 <- expanded[[3]]
mu1 <- expanded[[4]]
mu2 <- expanded[[5]]
}
par.mat <- rbind(kappa1, kappa2, kappa3, mu1, mu2)
n_par <- ncol(par.mat)
if (length(x) == 2) x <- matrix(x, nrow = 1)
n_x <- nrow(x)
if (n_par == 1) {
log_den <- c(ldsin_manyx_onepar(x, kappa1, kappa2, kappa3,
mu1, mu2))
} else if (n_x == 1) {
log_den <- c(ldsin_onex_manypar(c(x), kappa1, kappa2, kappa3,
mu1, mu2))
} else {
x_set <- 1:nrow(x)
par_set <- 1:n_par
expndn_set <- expand_args(x_set, par_set)
x_set <- expndn_set[[1]]
par_set <- expndn_set[[2]]
log_den <- c(ldsin_manyx_manypar(x[x_set, ], kappa1[par_set],
kappa2[par_set], kappa3[par_set],
mu1[par_set], mu2[par_set]))
}
if (log) log_den
else exp(log_den)
}
#'
# dvmsin <- function(x, kappa1=1, kappa2=1, kappa3=0, mu1=0, mu2=0)
# {
# if(any(c(kappa1, kappa2) < 0)) stop("kappa1 and kappa2 must be non-negative")
# if(any(mu1 < 0 | mu1 >= 2*pi)) mu1 <- prncp_reg(mu1)
# if(any(mu2 < 0 | mu2 >= 2*pi)) mu2 <- prncp_reg(mu2)
# if((length(dim(x)) < 2 && length(x) != 2) || (length(dim(x)) == 2 && tail(dim(x), 1) != 2)
# || (length(dim(x)) > 2)) stop("x must either be a bivariate vector or a two-column matrix")
#
# if(max(length(kappa1), length(kappa2), length(kappa3), length(mu1), length(mu2)) > 1) {
# expanded <- expand_args(kappa1, kappa2, kappa3, mu1, mu2)
# kappa1 <- expanded[[1]]
# kappa2 <- expanded[[2]]
# kappa3 <- expanded[[3]]
# mu1 <- expanded[[4]]
# mu2 <- expanded[[5]]
# }
#
# par.mat <- rbind(kappa1, kappa2, kappa3, mu1, mu2)
# n_par <- ncol(par.mat)
# if (length(x) == 2) x <- matrix(x, nrow = 1)
# n_x <- nrow(x)
#
#
# if (n_par == 1) {
# log_den <- c(ldsin_manyx_onepar(x, kappa1, kappa2, kappa3,
# mu1, mu2))
#
# } else if (n_x == 1) {
# log_den <- c(ldsin_onex_manypar(c(x), kappa1, kappa2, kappa3,
# mu1, mu2))
# } else {
# x_set <- 1:nrow(x)
# par_set <- 1:n_par
# expndn_set <- expand_args(x_set, par_set)
# x_set <- expndn_set[[1]]
# par_set <- expndn_set[[2]]
# log_den <- c(ldsin_manyx_manypar(x[x_set, ], kappa1[par_set],
# kappa2[par_set], kappa3[par_set],
# mu1[par_set], mu2[par_set]))
# }
#
# if (log) log_den
# else exp(log_den)
# }
#' The bivariate von Mises sine model mixtures
#' @inheritParams rvmsin
#' @param n number of observations.
#' @param x matrix of angles (in radians) where the density is to be evaluated, with each row being a
#' single bivariate vector of angles.
#'
#' @param pmix vector of mixture proportions.
#' @param mu1,mu2 vectors of mean parameters.
#' @param kappa1,kappa2,kappa3 vectors of concentration parameters; \code{kappa1, kappa2 > 0} for each component.
#'
#' @details All the argument vectors \code{pmix, kappa1, kappa2, kappa3, mu1} and \code{mu2} must be of
#' the same length ( = component size of the mixture model), with \eqn{j}-th element corresponding to the
#' \eqn{j}-th component of the mixture distribution.
#' @details The bivariate von Mises sine model mixture distribution with component size \code{K = \link{length}(p.mix)} has density
#' \deqn{g(x) = \sum p[j] * f(x; \kappa_1[j], \kappa_2[j], \kappa_3[j], \mu_1[j], \mu_2[j])}
#' where the sum extends over \eqn{j}; \eqn{p[j]; \kappa_1[j], \kappa_2[j], \kappa_3[j]}; and \eqn{\mu_1[j], \mu_2[j]} respectively denote the mixing proportion,
#' the three concentration parameters and the two mean parameter for the \eqn{j}-th component, \eqn{j = 1, ..., K},
#' and \eqn{f(. ; \kappa_1, \kappa_2, \kappa_3, \mu_1, \mu_2)} denotes the density function of the von Mises sine model
#' with concentration parameters \eqn{\kappa_1, \kappa_2, \kappa_3} and mean parameters \eqn{\mu_1, \mu_2}.
#'
#' @return \code{dvmsinmix} computes the density (vector if x is a two column matrix with more than one row)
#' and \code{rvmsinmix} generates random deviates from the mixture density.
#'
#' @examples
#' kappa1 <- c(1, 2, 3)
#' kappa2 <- c(1, 6, 5)
#' kappa3 <- c(0, 1, 2)
#' mu1 <- c(1, 2, 5)
#' mu2 <- c(0, 1, 3)
#' pmix <- c(0.3, 0.4, 0.3)
#' x <- diag(2, 2)
#' n <- 10
#'
#' # mixture densities calculated at the rows of x
#' dvmsinmix(x, kappa1, kappa2, kappa3, mu1, mu2, pmix)
#'
#' # number of observations generated from the mixture distribution is n
#' rvmsinmix(n, kappa1, kappa2, kappa3, mu1, mu2, pmix)
#'
#' @export
rvmsinmix <- function(n, kappa1, kappa2, kappa3, mu1, mu2, pmix, method="naive")
{
allpar <- list(kappa1=kappa1, kappa2=kappa2, kappa3=kappa3,
mu1=mu1, mu2=mu2, pmix=pmix)
allpar_len <- listLen(allpar)
if(min(allpar_len) != max(allpar_len))
stop("component size mismatch: number of components of the input parameter vectors differ")
if(any(allpar$pmix < 0)) stop("\'pmix\' must be non-negative")
sum_pmix <- sum(allpar$pmix)
if(signif(sum_pmix, 5) != 1) {
if(sum_pmix <= 0) stop("\'pmix\' must have at least one positive element")
allpar$pmix <- allpar$pmix/sum_pmix
warning("\'pmix\' is rescaled to add up to 1")
}
if(any(c(allpar$kappa1, allpar$kappa2) < 0)) stop("kappa1 and kappa2 must be non-negative")
if(any(allpar$mu1 < 0 | allpar$mu1 >= 2*pi)) allpar$mu1 <- prncp_reg(allpar$mu1)
if(any(allpar$mu2 < 0 | allpar$mu2 >= 2*pi)) allpar$mu2 <- prncp_reg(allpar$mu2)
out <- matrix(0, n, 2)
ncomp <- allpar_len[1] # number of components
comp_ind <- cID(tcrossprod(rep(1, n), allpar$pmix), ncomp, runif(n))
# n samples from multinom(ncomp, pmix)
for(j in seq_len(ncomp)) {
obs_ind_j <- which(comp_ind == j)
n_j <- length(obs_ind_j)
if(n_j > 0) {
out[obs_ind_j, ] <- rvmsin(n_j, kappa1[j], kappa2[j],
kappa3[j], mu1[j], mu2[j], method)
}
}
out
}
#' @rdname rvmsinmix
#' @export
dvmsinmix <- function(x, kappa1, kappa2, kappa3,
mu1, mu2, pmix, log=FALSE)
{
allpar <- list("kappa1"=kappa1, "kappa2"=kappa2, "kappa3"=kappa3,
"mu1"=mu1, "mu2"=mu2, "pmix"=pmix)
allpar_len <- listLen(allpar)
if(min(allpar_len) != max(allpar_len)) stop("component size mismatch: number of components of the input parameter vectors differ")
if(any(allpar$pmix < 0)) stop("\'pmix\' must be non-negative")
sum_pmix <- sum(allpar$pmix)
if(signif(sum_pmix, 5) != 1) {
if(sum_pmix <= 0) stop("\'pmix\' must have at least one positive element")
allpar$pmix <- allpar$pmix/sum_pmix
warning("\'pmix\' is rescaled to add up to 1")
}
if(any(c(allpar$kappa1, allpar$kappa2) < 0)) stop("kappa1 and kappa2 must be nonnegative")
if(any(allpar$mu1 < 0 | allpar$mu1 >= 2*pi)) allpar$mu1 <- prncp_reg(allpar$mu1)
if(any(allpar$mu2 < 0 | allpar$mu2 >= 2*pi)) allpar$mu2 <- prncp_reg(allpar$mu2)
if((length(dim(x)) < 2 && length(x) != 2) || (length(dim(x)) == 2 && tail(dim(x), 1) != 2)
|| (length(dim(x)) > 2)) stop("x must either be a bivariate vector or a two-column matrix")
ncomp <- length(kappa1)
if (length(x) == 2) x <- matrix(x, nrow=1)
allcompden <- vapply(1:ncomp,
function(j) dvmsin(x, kappa1[j], kappa2[j],
kappa3[j], mu1[j], mu2[j], FALSE),
rep(0, nrow(x)))
mixden <- c(allcompden %*% pmix)
if (log) {
log(mixden)
} else {
mixden
}
}
#' Fitting bivariate von Mises sine model mixtures using MCMC
#' @param ... arguments (other than \code{model}) passed to \link{fit_angmix}
#'
#' @details Wrapper for \link{fit_angmix} with \code{model = "vmsin"}
#'
#' @examples
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_vmsinmix(tim8, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' fit.vmsin.20
#'
#' @export
fit_vmsinmix <- function(...)
{
fit_angmix(model = "vmsin", ...)
}
vmsin_var_cor_singlepar_numeric <- function(kappa1, kappa2, kappa3) {
fn_vmsin_const <- function(pars) {
const_vmsin(pars[1], pars[2], pars[3])
}
const <- fn_vmsin_const(c(kappa1, kappa2, kappa3))
grad <- numDeriv::grad(fn_vmsin_const, c(kappa1, kappa2, kappa3))
names(grad) <- c("k1", "k2", "k3")
hess <- numDeriv::hessian(fn_vmsin_const, c(kappa1, kappa2, kappa3))
dimnames(hess) <- list(c("k1", "k2", "k3"), c("k1", "k2", "k3"))
rho_fl <- unname(
(grad["k3"] * hess["k1", "k2"]) /
sqrt(
hess["k1", "k1"] * (const - hess["k1", "k1"])
* hess["k2", "k2"] * (const - hess["k2", "k2"])
)
)
rho_js <- unname(
grad["k3"] /
sqrt((const - hess["k1", "k1"]) * (const - hess["k2", "k2"]))
)
var1 <- unname(1 - grad["k1"]/const)
var2 <- unname(1 - grad["k2"]/const)
# if (rho_js <= -1) {
# rho_js <- -1
# } else if (rho_js >= 1) {
# rho_js <- 1
# }
#
# dat <- rvmsin(N, kappa1, kappa2, kappa3, 0, 0)
#
# ave_sin1sin2 <- sum(sin(dat[, 1]) * sin(dat[, 2]))/N
# ave_cos1cos2 <- sum(cos(dat[, 1]) * cos(dat[, 2]))/N
#
# ave_sin1sq <- sum(sin(dat[, 1])^2)/N
# ave_cos1sq <- 1-ave_sin1sq
# ave_cos1 <- sum(cos(dat[, 1]))/N
#
# ave_sin2sq <- sum(sin(dat[, 2])^2)/N
# ave_cos2sq <- 1-ave_sin2sq
# ave_cos2 <- sum(cos(dat[, 2]))/N
#
# rho_js <- ave_sin1sin2/sqrt(ave_sin1sq * ave_sin2sq)
# # ifelse(ave_sin1sin2 >= 0, 1, -1) *
# # min(abs(ave_sin1sin2)/sqrt(ave_sin1sq * ave_sin2sq), 1)
#
# rho_fl <- rho_js *
# ave_cos1cos2/sqrt(ave_cos1sq * ave_cos2sq)
# # ifelse(ave_cos1cos2 >= 0, 1, -1) *
# # min(abs(ave_cos1cos2)/sqrt(ave_cos1sq * ave_cos2sq), 1)
#
# var1 <- min(1 - ave_cos1, 1)
# var2 <- min(1 - ave_cos2, 1)
list(var1 = var1, var2 = var2, rho_fl = rho_fl, rho_js = rho_js)
}
vmsin_var_cor_singlepar <- function(kappa1, kappa2, kappa3, ...) {
if (max(kappa1, kappa2, abs(kappa3) > 150)) {
out <- vmsin_var_cor_singlepar_numeric(kappa1, kappa2, kappa3)
} else {
out <- vmsin_var_corr_anltc(kappa1, kappa2, kappa3)
}
for (rho in c("rho_js", "rho_fl")) {
if (out[[rho]] <= -1) {
out[[rho]] <- -1
} else if (out[[rho]] >= 1) {
out[[rho]] <- 1
}
}
for (var in c("var1", "var2")) {
if (out[[var]] <= 0) {
out[[var]] <- 0
} else if (out[[var]] >= 1) {
out[[var]] <- 1
}
}
out
}
rvmsin_1par <- function(n=1, kappa1=1, kappa2=1, kappa3=1,
mu1=0, mu2=0, method = "vmprop")
{
if (abs(abs(kappa3) - kappa1) < 1e-8 |
abs(abs(kappa3) - kappa2) < 1e-8)
kappa3 <- kappa3*(1+1e-6)
if (method == "vmprop") {
log_const_vmsin <- log(const_vmsin(kappa1, kappa2, kappa3))
log_2pi <- log(2*pi)
unimodal_x <- TRUE
phistar <- NULL
# first check if the joint density is bimodal
if (kappa3^2 >= kappa1*kappa2) {
# now check if the x marginal is bimodal
if (A_bessel(kappa2) > kappa1*kappa2/kappa3^2) {
unimodal_x <- FALSE
phistar_eqn <- function(phi) {
a_phi <- sqrt(kappa2^2 + kappa3^2*sin(phi)^2)
cos(phi)*A_bessel(a_phi)/a_phi - kappa1/kappa3^2
}
# find the modes in (-pi, pi)
find_root <- uniroot.all(phistar_eqn, c(-pi, pi))
# if no root found, use the naive two dimensional rejection sampler
if (is.null(find_root)) {
method <- "naive"
} else {
phistar <- find_root[1]
}
}
}
if (method=="vmprop" & unimodal_x) {
grid_0_2pi <- seq(0, 2*pi, length.out = 100)
# do minimax to find optimum kappa and K
# abs difference between the target log marginal den
# and log proposal den
obj_kappa_abs <- function(kappa) {
obj_kappa_phi <- function(phi) {
cos_phi_mu1 <- cos(phi-mu1)
a_phi <- sqrt(kappa2^2 + kappa3^2*sin(phi-mu1)^2)
log_I_a_phi <- log(besselI(a_phi, 0))
log_target_marginal <- -log_const_vmsin + log_2pi + log_I_a_phi + kappa2*cos_phi_mu1
log_proposal <- -log_2pi - log(besselI(kappa, 0)) + kappa*cos_phi_mu1
abs(log_target_marginal - log_proposal)
}
optimize(obj_kappa_phi, c(0, 2*pi), maximum=TRUE)$objective
}
# minimize obj_kappa_abs wrt kappa
kappa_opt <- optimize(obj_kappa_abs, c(0, max(50, kappa2*2)))$minimum
log_prop_den <- function(kappa, phi) {
cos_phi <- cos(phi)
-log_2pi - log(besselI(kappa, 0)) +
kappa*cos_phi
}
# find max of log_prop_den
max_log_prop_den <- max(log_prop_den(kappa_opt, grid_0_2pi))
# any point with density < exp(max_log_prop_den)*exp(-10)
# are less likely to appear, so make exp(max_log_prop_den)*exp(-10)
# as the lower bound for propsal density to avoid instability
# browser()
# now maximize the log ratio of the two densities
# w.r.t phi, given kappa = kappa_opt
obj_kappa_phi <- function(kappa, phi) {
cos_phi_mu1 <- cos(phi-mu1)
a_phi <- sqrt(kappa2^2 + kappa3^2*sin(phi-mu1)^2)
log_I_a_phi <- log(besselI(a_phi, 0))
log_target_marginal <- -log_const_vmsin + log_2pi + log_I_a_phi + kappa2*cos_phi_mu1
log_proposal <- pmax(-log_2pi - log(besselI(kappa, 0)) +
kappa*cos_phi_mu1, max_log_prop_den-10)
# log_proposals are >= max_log_prop_den -10 to avoid instability
# also, proposal deviates with log density less than this bound are unlikely
log_target_marginal - log_proposal
}
# maximize the log-ratio over a grid
(logK <- max(obj_kappa_phi(kappa_opt, seq(0, 2*pi, length.out = 200))))
# cat(exp(-logK))
# rcos_unimodal(1, kappa1, kappa2, kappa3, mu1, mu2,
# kappa_opt, log(besselI(kappa_opt, 0, TRUE)) + kappa_opt,
# logK, log_const_vmsin)
rsin_unimodal(n, kappa1, kappa2, kappa3, mu1, mu2,
kappa_opt, log(BESSI0_C(kappa_opt)), logK,
log_const_vmsin)
}
else if (method=="vmprop" & !unimodal_x) {
# browser()
# do all otimizations is (-pi, pi) then revert back to [0, 2*pi]
# change the modes into [0, 2*pi]
mode_1 <- prncp_reg(mu1 + phistar)
mode_2 <- prncp_reg(mu1 - phistar)
# sin_phistar <- sin(phistar)
unifpropn <- 1e-10
vmpropn <- (1-1e-10)/2
grid_0_2pi <- seq(0, 2*pi, length.out = 100)
# proposal for y marginal = vmpropn-vmpropn-unifpropn mix
# of vm(kappa, mu2 +- phistar) and unif(0, 2*pi)
# and unif(0, 2*pi) (to avoid overflow of the ratio where
# the densities are flat)
# do minimax to find optimum kappa and K
# don't worry about mu1 mu2 while optimizing!
# abs difference between the target log marginal den
# and log proposal den
obj_kappa_abs <- function(kappa) {
obj_kappa_phi <- function(phi) {
cos_phi_mu1 <- cos(phi-mu1)
a_phi <- sqrt(kappa2^2 + kappa3^2*sin(phi-mu1)^2)
log_I_a_phi <- log(besselI(a_phi, 0))
log_target_marginal <- -log_const_vmsin + log_2pi + log_I_a_phi + kappa2*cos_phi_mu1
log_proposal <-
-log_2pi + log(exp(log(vmpropn) - log(besselI(kappa, 0)) +
kappa*cos(phi-mode_1) +
log(1+exp(kappa*(cos(phi-mode_2)
- cos(phi-mode_1))))
) + unifpropn)
# simplified mixture density
abs(log_target_marginal - log_proposal)
}
# optimize(obj_kappa_phi, c(0, 2*pi), maximum=TRUE)$objective
max(obj_kappa_phi(grid_0_2pi))
}
# minimize obj_kappa_abs wrt kappa
(kappa_opt <- optimize(obj_kappa_abs,
c(0, max(kappa1, kappa2,
abs(kappa3))))$minimum)
# browser()
#
# target_den <- function(phi) {
# cos_phi_mu1 <- cos(phi-mu1)
# a_phi <- sqrt(kappa2^2 + kappa3^2*sin(phi-mu1)^2)
# log_I_a_phi <- log(besselI(a_phi, 0))
# log_target_marginal <- -log_const_vmsin + log_2pi + log_I_a_phi + kappa2*cos_phi_mu1
# exp(log_target_marginal)
# }
log_prop_den <- function(kappa, phi) {
cos_phi <- cos(phi)
-log_2pi + log(exp(log(vmpropn) - log(besselI(kappa, 0)) +
kappa*cos(phi-mode_1) +
log(1+exp(kappa*(cos(phi-mode_2)
- cos(phi-mode_1))))
) + unifpropn)
}
# find max of log_prop_den
max_log_prop_den <- max(log_prop_den(kappa_opt, grid_0_2pi))
# any point with density < exp(max_log_prop_den)*exp(-10)
# are less likely to appear, so make exp(max_log_prop_den)*exp(-10)
# as the lower bound for propsal density to avoid instability
# now maximize the log ratio of the two densities
# w.r.t phi, given kappa = kappa_opt
obj_kappa_phi <- function(kappa, phi) {
cos_phi_mu1 <- cos(phi-mu1)
a_phi <- sqrt(kappa2^2 + kappa3^2*sin(phi-mu1)^2)
log_I_a_phi <- log(besselI(a_phi, 0))
log_target_marginal <- -log_const_vmsin + log_2pi + log_I_a_phi + kappa2*cos_phi_mu1
log_proposal <-
pmax(
-log_2pi + log(exp(log(vmpropn) - log(besselI(kappa, 0)) +
kappa*cos(phi-mode_1) +
log(1+exp(kappa*(cos(phi-mode_2)
- cos(phi-mode_1))))
) + unifpropn),
max_log_prop_den-10)
# log_proposals are >= max_log_prop_den -10 to avoid instability
# also, proposal deviates with log density less than this bound are unlikely
log_target_marginal - log_proposal
}
#
# prop_den <- function(kappa, phi) {
# exp(log_prop_den(kappa, phi))
# }
#
# pp <- function(kappa, phi) {
# cos_phi <- cos(phi)
# log_proposal <-
# pmax(-log_2pi + log(exp(log(vmpropn) - log(besselI(kappa, 0)) +
# kappa*cos(phi-mode_1) +
# log(1+exp(kappa*(cos(phi-mode_2) - cos(phi-mode_1)))))
# + unifpropn), max_log_prop_den-10)
# exp(log_proposal)
# }
# #
# integrate(target_den, 0, 2*pi)
# integrate(function(x) prop_den(kappa_opt, x), 0, 2*pi)
# maximize the log-ratio over a grid
(logK <- max(obj_kappa_phi(kappa_opt, seq(0, 2*pi, length.out = 200))))
exp(logK)
# poin <- seq(0, 2*pi, length.out = 100)
# # ggp <- target_den(poin)
# # ppp <- prop_den(kappa_opt, poin)
# # #
# # plot(poin, ggp, type="l", ylim=range(ggp, ppp, ggp/ppp))
# # points(poin, ppp, type="l", col = "blue")
# # points(poin, ggp/ppp, type="l")
#
#
# lggp <- log(target_den(poin))
# lppp <- log(prop_den(kappa_opt, poin))
# difp <- obj_kappa_phi(kappa_opt, poin)
# #
# plot(poin, lggp, type="l", ylim=range(lggp, lppp, lggp-lppp))
# points(poin, lppp, type="l", col = "blue")
# points(poin, lggp-lppp, type="l")
# points(poin, difp, type="l", col = "red")
# abline(v=c(mode_1, mode_2))
# #
# #
# #
# # browser()
# cat(exp(-logK))
rsin_bimodal(n, kappa1, kappa2, kappa3, mu1, mu2,
kappa_opt, log(BESSI0_C(kappa_opt)), logK,
log_const_vmsin, mode_1, mode_2,
vmpropn, unifpropn)
}
}
else if (method == "naive") {
opt_obj <- function(k1=1, k2=1, k3=0, mu1=0, mu2=0) {
# for numerical stability, if k3 < 0, fabs(k1+k3) < 1e-5 or fabs(k2+k3) < 1e-5
# make k3 = k3 * (1+1e-5)
if (k3 < 0) {
while (abs(k1 + k3) < 1e-5 || abs(k2 + k3) < 1e-5) {
k3 = k3*(1+1e-5)
}
}
obj <- optim(c(0,0), fn = function(x) -(k1*cos(x[1]-mu1)+k2*cos(x[2]-mu2)+k3*sin(x[1]-mu1)*sin(x[2]-mu2)),
gr = function(x) -c(-k1*sin(x[1]-mu1)+k3*cos(x[1]-mu1)*sin(x[2]-mu2),
-k2*sin(x[2]-mu2)+k3*sin(x[1]-mu1)*cos(x[2]-mu2)))
-obj$value
}
upper_bd <- opt_obj(kappa1, kappa2, kappa3, mu1, mu2)
rsin_onepar(n, kappa1, kappa2, kappa3, mu1, mu2, upper_bd)
}
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/all_vmsin_fns.R |
#' The bivariate Wrapped Normal distribution
#' @inheritParams rvmsin
#' @inheritParams rwnorm
#' @param int.displ integer displacement. If \code{int.displ =} M, then each infinite sum in the
#' density is approximated by a finite sum over 2*M + 1 elements. (See Details.) The allowed values are 1, 2, 3, 4 and 5. Default is 3.
#' @param mu1,mu2 vectors of mean parameters.
#' @param kappa1,kappa2,kappa3 vectors of concentration parameters; \code{kappa1, kappa2 > 0},
#' and \code{kappa3^2 < kappa1*kappa2}.
#' @param ... additional arguments passed to \link{rmvnorm} from package \code{mvtnorm}
#' @importFrom mvtnorm rmvnorm
#'
#' @details
#' The bivariate wrapped normal density at the point \eqn{x = (x_1, x_2)} is given by,
#' \deqn{f(x) = \sqrt((\kappa_1 \kappa_2 - (\kappa_3)^2)) / (2\pi) \sum \exp(-1/2 * (\kappa_1 (T_1)^2 + \kappa_2 (T_2)^2 + 2 \kappa_3 (T_1) (T_2)) )}
#' where
#' \deqn{T_1 = T_1(x, \mu, \omega) = (x_1 - \mu_1(2\pi\omega_1))}
#' \deqn{T_2 = T_2(x, \mu, \omega) = (x_2 - \mu_1(2\pi\omega_2))}
#' the sum extends over all pairs of integers \eqn{\omega = (\omega_1, \omega_2)},
#' and is approximated by a sum over \eqn{(\omega_1, \omega_2)} in \eqn{\{-M, -M+1, ..., M-1, M \}^2} if \code{int.displ = } \eqn{M}.
#'
#' Note that above density is essentially the "wrapped" version of a bivariate normal density with mean
#' \deqn{\mu = (\mu_1, \mu_2)}
#' and dispersion matrix \eqn{\Sigma = \Delta^{-1}}, where
#'
#' \tabular{lrrr}{
#' \tab \eqn{\kappa_1} \tab \eqn{ } \tab \eqn{\kappa_3} \cr
#' \eqn{\Delta =} \tab \eqn{ } \tab \eqn{ } \tab \eqn{ } \cr
#' \tab \eqn{\kappa_3} \tab \eqn{ } \tab \eqn{\kappa_2}.
#' }
#'
#'
#' @return \code{dwnorm2} gives the density and \code{rwnorm2} generates random deviates.
#'
#' @examples
#' kappa1 <- c(1, 2, 3)
#' kappa2 <- c(1, 6, 5)
#' kappa3 <- c(0, 1, 2)
#' mu1 <- c(1, 2, 5)
#' mu2 <- c(0, 1, 3)
#' x <- diag(2, 2)
#' n <- 10
#'
#' # when x is a bivariate vector and parameters are all scalars,
#' # dwnorm2 returns single density
#' dwnorm2(x[1, ], kappa1[1], kappa2[1], kappa3[1], mu1[1], mu2[1])
#'
#' # when x is a two column matrix and parameters are all scalars,
#' # dmvsin returns a vector of densities calculated at the rows of
#' # x with the same parameters
#' dwnorm2(x, kappa1[1], kappa2[1], kappa3[1], mu1[1], mu2[1])
#'
#' # if x is a bivariate vector and at least one of the parameters is
#' # a vector, all parameters are recycled to the same length, and
#' # dwnorm2 returns a vector of with ith element being the density
#' # evaluated at x with parameter values kappa1[i], kappa2[i],
#' # kappa3[i], mu1[i] and mu2[i]
#' dwnorm2(x[1, ], kappa1, kappa2, kappa3, mu1, mu2)
#'
#' # if x is a two column matrix and at least one of the parameters is
#' # a vector, rows of x and the parameters are recycled to the same
#' # length, and dwnorm2 returns a vector of with ith element being the
#' # density evaluated at ith row of x with parameter values kappa1[i],
#' # kappa2[i], # kappa3[i], mu1[i] and mu2[i]
#' dwnorm2(x, kappa1, kappa2, kappa3, mu1, mu2)
#'
#' # when parameters are all scalars, number of observations generated
#' # by rwnorm2 is n
#' rwnorm2(n, kappa1[1], kappa2[1], kappa3[1], mu1[1], mu2[1])
#'
#' # when at least one of the parameters is a vector, all parameters are
#' # recycled to the same length, n is ignored, and the number of
#' # observations generated by rwnorm2 is the same as the length of the
#' # recycled vectors
#' rwnorm2(n, kappa1, kappa2, kappa3, mu1, mu2)
#'
#' @export
rwnorm2 <- function(n, kappa1=1, kappa2=1,
kappa3=0, mu1=0, mu2=0, ...)
{
if(any(c(kappa1, kappa2) < 0))
stop("kappa1 and kappa2 must be non-negative")
if(any(mu1 < 0 | mu1 >= 2*pi)) mu1 <- prncp_reg(mu1)
if(any(mu2 < 0 | mu2 >= 2*pi)) mu2 <- prncp_reg(mu2)
if(max(length(kappa1), length(kappa2), length(kappa3),
length(mu1), length(mu2)) > 1) {
expanded <- expand_args(kappa1, kappa2, kappa3, mu1, mu2)
kappa1 <- expanded[[1]]; kappa2 <- expanded[[2]]; kappa3 <- expanded[[3]]
mu1 <- expanded[[4]]; mu2 <- expanded[[5]]
if(any(kappa1*kappa2 - kappa3*kappa3 < 0))
stop("abs(kappa3) must be less than or equal to sqrt(kappa1*kappa2) in wnorm2")
m <- length(kappa1)
samp <- matrix(0, m, 2)
for (j in 1:m) {
if (kappa1[j] < 1e-10 & kappa2[j] < 1e-10) {
samp[j, ] <- runif(2, 0, 2*pi)
}
else {
mu_curr <- c(mu1[j], mu2[j])
sigma_curr <- matrix(c(kappa2[j], -kappa3[j], -kappa3[j],
kappa1[j])/(kappa1[j]*kappa2[j] - kappa3[j]^2), 2)
# samp <- t(sapply(1:m, function(j) rnorm2(1, mu_list[[j]], sigma_list[[j]])))
samp[j, ] <-
rmvnorm(1, mean=mu_curr, sigma=sigma_curr, ...)
}
}
}
else {
if(kappa1*kappa2 - kappa3*kappa3 < 0)
stop("abs(kappa3) must be less than or equal to sqrt(kappa1*kappa2) in wnorm2")
if (kappa1*kappa2 < 1e-10) {
samp <- matrix(runif(2*n, 0, 2*pi), ncol=2)
}
else {
mu <- c(mu1, mu2)
sigma <- matrix(c(kappa2, -kappa3, -kappa3, kappa1)/(kappa1*kappa2 - kappa3*kappa3), 2)
samp <- rmvnorm(n, mean=mu, sigma=sigma, ...)
}
}
prncp_reg(samp)
}
#' @rdname rwnorm2
#' @export
dwnorm2 <- function(x, kappa1=1, kappa2=1, kappa3=0, mu1=0,
mu2=0, int.displ, log=FALSE)
{
if(missing(int.displ)) int.displ <- 3
else if(int.displ >= 5) int.displ <- 5
else if(int.displ <= 1) int.displ <- 1
displ <- floor(int.displ)
omega.2pi.all <- expand.grid(-displ:displ,-displ:displ) * (2*pi) # 2pi * integer displacements
omega.2pi <- as.matrix(omega.2pi.all)
if(any(c(kappa1, kappa2) < 0))
stop("kappa1 and kappa2 must be non-negative")
if(any(mu1 < 0 | mu1 >= 2*pi)) mu1 <- prncp_reg(mu1)
if(any(mu2 < 0 | mu2 >= 2*pi)) mu2 <- prncp_reg(mu2)
if((length(dim(x)) < 2 && length(x) != 2) || (length(dim(x)) == 2 && tail(dim(x), 1) != 2)
|| (length(dim(x)) > 2)) stop("x must either be a bivariate vector or a two-column matrix")
if(max(length(kappa1), length(kappa2), length(kappa3), length(mu1), length(mu2)) > 1) {
expanded <- expand_args(kappa1, kappa2, kappa3, mu1, mu2)
kappa1 <- expanded[[1]]
kappa2 <- expanded[[2]]
kappa3 <- expanded[[3]]
mu1 <- expanded[[4]]
mu2 <- expanded[[5]]
}
par.mat <- rbind(kappa1, kappa2, kappa3, mu1, mu2)
n_par <- ncol(par.mat)
if (length(x) == 2) x <- matrix(x, nrow = 1)
n_x <- nrow(x)
if (all (kappa1 > 1e-10 | kappa2 > 1e-10)) {
# regular wnorm2 density
if (n_par == 1) {
den <- c(dwnorm2_manyx_onepar(x, kappa1, kappa2, kappa3,
mu1, mu2, omega.2pi))
} else if (n_x == 1) {
den <- c(dwnorm2_onex_manypar(c(x), kappa1, kappa2, kappa3,
mu1, mu2, omega.2pi))
} else {
x_set <- 1:n_x
par_set <- 1:n_par
expndn_set <- expand_args(x_set, par_set)
x_set <- expndn_set[[1]]
par_set <- expndn_set[[2]]
den <- c(dwnorm2_manyx_manypar(x[x_set, ], kappa1[par_set],
kappa2[par_set], kappa3[par_set],
mu1[par_set], mu2[par_set], omega.2pi))
}
}
else {
# some can be uniform
x_set <- 1:n_x
par_set <- 1:n_par
expndn_set <- expand_args(x_set, par_set)
x_set <- expndn_set[[1]]
par_set <- expndn_set[[2]]
x_long <- x[x_set, , drop=FALSE]
kappa1_long <- kappa1[par_set]
kappa2_long <- kappa2[par_set]
kappa3_long <- kappa3[par_set]
mu1_long <- mu1[par_set]
mu2_long <- mu2[par_set]
n_x_final <- nrow(x_long)
den <- rep(0, n_x_final)
which_unif <- which(kappa1_long < 1e-10 & kappa2_long < 1e-10)
n_unif <- length(which_unif)
# browser()
den[which_unif] <- 1/(4*pi^2)
if (n_unif < n_x_final)
den[-which_unif] <- c(dwnorm2_manyx_manypar(x_long[-which_unif, , drop=FALSE], kappa1_long[-which_unif],
kappa2_long[-which_unif], kappa3_long[-which_unif],
mu1_long[-which_unif], mu2_long[-which_unif],
omega.2pi))
}
if (log)
den <- log(den)
den
}
#' The bivariate Wrapped Normal mixtures
#' @inheritParams rvmsinmix
#' @inheritParams rwnorm2
#' @param mu1,mu2 vectors of mean parameters.
#' @param kappa1,kappa2,kappa3 vectors of concentration parameters; \code{kappa1, kappa2 > 0, kappa3^2 < kappa1*kappa2} for each component.
#'
#' @details All the argument vectors \code{pmix, kappa1, kappa2, kappa3, mu1} and \code{mu2} must be of the same length,
#' with \eqn{j}-th element corresponding to the \eqn{j}-th component of the mixture distribution.
#' @details The bivariate wrapped normal mixture distribution with component size \code{K = \link{length}(pmix)} has density
#' \deqn{g(x) = \sum p[j] * f(x; \kappa_1[j], \kappa_2[j], \kappa_3[j], \mu_1[j], \mu_2[j])}
#' where the sum extends over \eqn{j}; \eqn{p[j]; \kappa_1[j], \kappa_2[j], \kappa_3[j]}; and \eqn{\mu_1[j], \mu_2[j]} respectively denote the mixing proportion,
#' the three concentration parameters and the two mean parameter for the \eqn{j}-th component, \eqn{j = 1, ..., K},
#' and \eqn{f(. ; \kappa_1, \kappa_2, \kappa_3, \mu_1, \mu_2)} denotes the density function of the wrapped normal distribution
#' with concentration parameters \eqn{\kappa_1, \kappa_2, \kappa_3} and mean parameters \eqn{\mu_1, \mu_2}.
#' @return \code{dwnorm2mix} computes the density and \code{rwnorm2mix} generates random deviates from the mixture density.
#'
#' @examples
#' kappa1 <- c(1, 2, 3)
#' kappa2 <- c(1, 6, 5)
#' kappa3 <- c(0, 1, 2)
#' mu1 <- c(1, 2, 5)
#' mu2 <- c(0, 1, 3)
#' pmix <- c(0.3, 0.4, 0.3)
#' x <- diag(2, 2)
#' n <- 10
#'
#' # mixture densities calculated at the rows of x
#' dwnorm2mix(x, kappa1, kappa2, kappa3, mu1, mu2, pmix)
#'
#' # number of observations generated from the mixture distribution is n
#' rwnorm2mix(n, kappa1, kappa2, kappa3, mu1, mu2, pmix)
#'
#' @export
rwnorm2mix <- function(n, kappa1, kappa2, kappa3,
mu1, mu2, pmix, ...)
{
allpar <- list(kappa1=kappa1, kappa2=kappa2, kappa3=kappa3,
mu1=mu1, mu2=mu2, pmix=pmix)
allpar_len <- listLen(allpar)
if(min(allpar_len) != max(allpar_len))
stop("component size mismatch: number of components of the input parameter vectors differ")
if(any(allpar$pmix < 0)) stop("\'pmix\' must be non-negative")
sum_pmix <- sum(allpar$pmix)
if(signif(sum_pmix, 5) != 1) {
if(sum_pmix <= 0) stop("\'pmix\' must have at least one positive element")
allpar$pmix <- allpar$pmix/sum_pmix
warning("\'pmix\' is rescaled to add up to 1")
}
if(any(c(allpar$kappa1, allpar$kappa2) <= 0))
stop("kappa1 and kappa2 must be positive in wnorm2")
if(any(allpar$kappa1*allpar$kappa2 - allpar$kappa3^2 <= 1e-10))
stop("abs(kappa3) must be less than sqrt(kappa1*kappa2) in wnorm2")
if(any(allpar$mu1 < 0 | allpar$mu1 >= 2*pi)) allpar$mu1 <- prncp_reg(allpar$mu1)
if(any(allpar$mu2 < 0 | allpar$mu2 >= 2*pi)) allpar$mu2 <- prncp_reg(allpar$mu2)
out <- matrix(0, n, 2)
ncomp <- allpar_len[1] # number of components
comp_ind <- cID(tcrossprod(rep(1, n), allpar$pmix), ncomp, runif(n))
# n samples from multinom(ncomp, pmix)
for(j in seq_len(ncomp)) {
obs_ind_j <- which(comp_ind == j)
n_j <- length(obs_ind_j)
if(n_j > 0) {
out[obs_ind_j, ] <- rwnorm2(n_j, kappa1[j], kappa2[j],
kappa3[j], mu1[j], mu2[j], ...)
}
}
out
}
#' @rdname rwnorm2mix
#' @export
dwnorm2mix <- function(x, kappa1, kappa2, kappa3,
mu1, mu2, pmix, int.displ, log=FALSE)
{
if(missing(int.displ)) int.displ <- 3
else if(int.displ >= 5) int.displ <- 5
else if(int.displ <= 1) int.displ <- 1
displ <- floor(int.displ)
omega.2pi.all <- expand.grid(-displ:displ,-displ:displ) * (2*pi) # 2pi * integer displacements
omega.2pi <- as.matrix(omega.2pi.all)
allpar <- list("kappa1"=kappa1, "kappa2"=kappa2, "kappa3"=kappa3,
"mu1"=mu1, "mu2"=mu2, "pmix"=pmix)
allpar_len <- listLen(allpar)
if(min(allpar_len) != max(allpar_len)) stop("component size mismatch: number of components of the input parameter vectors differ")
if(any(allpar$pmix < 0)) stop("\'pmix\' must be non-negative")
sum_pmix <- sum(allpar$pmix)
if(signif(sum_pmix, 5) != 1) {
if(sum_pmix <= 0) stop("\'pmix\' must have at least one positive element")
allpar$pmix <- allpar$pmix/sum_pmix
warning("\'pmix\' is rescaled to add up to 1")
}
if(any(allpar$kappa1*allpar$kappa2 - allpar$kappa3*allpar$kappa3 <= 1e-10))
stop("abs(kappa3) must be less than sqrt(kappa1*kappa2) in wnorm2")
if(any(c(allpar$kappa1, allpar$kappa2) <= 0))
stop("kappa1 and kappa2 must be positive in wnorm2")
if(any(allpar$mu1 < 0 | allpar$mu1 >= 2*pi)) allpar$mu1 <- prncp_reg(allpar$mu1)
if(any(allpar$mu2 < 0 | allpar$mu2 >= 2*pi)) allpar$mu2 <- prncp_reg(allpar$mu2)
if((length(dim(x)) < 2 && length(x) != 2) || (length(dim(x)) == 2 && tail(dim(x), 1) != 2)
|| (length(dim(x)) > 2)) stop("x must either be a bivariate vector or a two-column matrix")
ncomp <- length(kappa1)
if (length(x) == 2) x <- matrix(x, nrow=1)
allcompden <- vapply(1:ncomp,
function(j) dwnorm2(x, kappa1[j], kappa2[j],
kappa3[j], mu1[j], mu2[j],
int.displ, FALSE),
rep(0, nrow(x)))
mixden <- c(allcompden %*% pmix)
if (log) {
log(mixden)
} else {
mixden
}
}
#' Fitting bivariate wrapped normal model mixtures using MCMC
#' @inheritParams fit_vmsinmix
#'
#' @details
#' Wrapper for \link{fit_angmix} with \code{model = "wnorm2"}.
#'
#'
#' @examples
#' # illustration only - more iterations needed for convergence
#' fit.wnorm2.10 <- fit_wnorm2mix(tim8, ncomp = 3, n.iter = 10,
#' n.chains = 1)
#' fit.wnorm2.10
#'
#' @export
fit_wnorm2mix <- function(...)
{
fit_angmix(model="wnorm2", ...)
}
wnorm2_var_cor_singlepar <- function(kappa1, kappa2, kappa3) {
den <- kappa1*kappa2 - kappa3^2
sig1_sq <- kappa2/den
sig2_sq <- kappa1/den
sig12 <- -kappa3/den
rho_fl <- sinh(2*sig12) /
sqrt(sinh(2*sig1_sq)* sinh(2*sig2_sq))
rho_js <- sinh(sig12) /
sqrt(sinh(sig1_sq)* sinh(sig2_sq))
list(var1 = 1-exp(-0.5*sig1_sq),
var2 = 1-exp(-0.5*sig2_sq),
rho_fl = rho_fl,
rho_js = rho_js
)
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/all_wnorm2_fns.R |
# list of functions
#' The univariate Wrapped Normal distribution
#' @inheritParams rvm
#' @param int.displ integer displacement. If \code{int.displ =} M, then the infinite sum in the
#' density is approximated by a sum over 2*M + 1 elements. (See Details.) The allowed values are 1, 2, 3, 4 and 5. Default is 3.
#' @details If \code{mu} and \code{kappa} are not specified they assume the default values of \code{0} and \code{1} respectively.
#' @details The univariate wrapped normal distribution has density
#' \deqn{f(x) = \sqrt(\kappa/(2\pi)) \sum \exp(-\kappa/2 (x - \mu(2\pi\omega))^2)}
#' where the sum extends over all integers \eqn{\omega},
#' and is approximated by a sum over \eqn{\omega} in \eqn{\{-M, -M+1, ..., M-1, M \}} if \code{int.displ = } \eqn{M}.
#' @return \code{dwnorm} gives the density and \code{rwnorm} generates random deviates.
#'
#' @examples
#'
#' kappa <- 1:3
#' mu <- 0:2
#' x <- 1:10
#' n <- 10
#'
#'
#' # when x and both parameters are scalars, dwnorm returns a single density
#' dwnorm(x[1], kappa[1], mu[1])
#'
#' # when x is a vector but both the parameters are scalars, dmv returns a vector of
#' # densities calculated at each entry of x with the same parameters
#' dwnorm(x, kappa[1], mu[1])
#'
#' # if x is scalar and at least one of the two paraemters is a vector, both parameters are
#' # recycled to the same length, and dwnorm returns a vector of with ith element being the
#' # density evaluated at x with parameter values kappa[i] and mu[i]
#' dwnorm(x[1], kappa, mu)
#'
#' # if x and at least one of the two paraemters is a vector, x and the two parameters are
#' # recycled to the same length, and dwnorm returns a vector of with ith element being the
#' # density at ith element of the (recycled) x with parameter values kappa[i] and mu[i]
#' dwnorm(x, kappa, mu)
#'
#' # when parameters are all scalars, number of observations generated by rwnorm is n
#' rwnorm(n, kappa[1], mu[1])
#'
#' # when at least one of the two parameters is a vector, both are recycled to the same length,
#' # n is ignored, and the number of observations generated by rwnorm is the same as the length
#' # of the recycled vectors
#' rwnorm(n, kappa, mu)
#'
#' @export
rwnorm <- function(n=1, kappa = 1, mu = 0)
{
if(any(kappa < 0)) stop("kappa must be non-negative")
if(any(mu < 0 | mu >= 2*pi)) mu <- prncp_reg(mu)
if (all(kappa > 1e-10)) {
samp <- rnorm(n, mean = mu, sd = sqrt(1/kappa))
prncp_reg(samp)
} else {
expndn_set <- expand_args(kappa, mu, 1:n)
kappa <- expndn_set[[2]]
mu <- expndn_set[[3]]
# den <- rep(0, n_x)
n_final <- length(kappa)
which_unif <- which(kappa < 1e-10)
n_unif <- length(which_unif)
samp <- rep(0, n_final)
samp[which_unif] <- runif(n_unif, 0, 2*pi)
if (n_unif < n_final)
samp[-which_unif] <- prncp_reg(rnorm(n_final - n_unif, mu[-which_unif],
sqrt(1/kappa[-which_unif])))
# for (i in 1:n_x) {
# if (kappa1[par_set[i]] < 1e-10 | kappa2[par_set[i]] < 1e-10) {
# den[i] <- c(dwnorm2_onex_manypar(x[x_set[i], ], kappa[par_set[i]],
# kappa2[par_set[i]], kappa3[par_set[i]],
# mu1[par_set[i]], mu2[par_set[i]], omega.2pi))
# } else {
# den[i] <- 1/(4*pi^2)
# }
# }
samp
}
}
#' @rdname rwnorm
#' @export
dwnorm <- function(x, kappa = 1, mu = 0, int.displ,
log=FALSE)
{
if(any(kappa < 0)) stop("kappa must be non-negative")
if(any(mu < 0 | mu >= 2*pi)) mu <- prncp_reg(mu)
if(missing(int.displ)) int.displ <- 3
else if(int.displ >= 5) int.displ <- 5
else if(int.displ <= 1) int.displ <- 1
displ <- floor(int.displ)
omega.2pi.1d <- (-displ):displ * (2*pi) # 2pi * 1d integer displacements
if(max(length(kappa), length(mu)) > 1) {
expanded <- expand_args(kappa, mu)
kappa <- expanded[[1]]; mu <- expanded[[2]]
}
par.mat <- rbind(kappa, mu)
n_par <- ncol(par.mat)
n_x <- length(x)
# browser()
if (all(kappa > 1e-10)) {
if (n_par == 1) {
den <- c(duniwnorm_manyx_onepar(as.vector(x), kappa, mu, omega.2pi.1d))
} else if (n_x == 1) {
den <- c(duniwnorm_onex_manypar(x, kappa, mu, omega.2pi.1d))
} else {
x_set <- 1:n_x
par_set <- 1:n_par
expndn_set <- expand_args(x_set, par_set)
x_set <- expndn_set[[1]]
par_set <- expndn_set[[2]]
den <- c(duniwnorm_manyx_manypar(x[x_set], kappa[par_set],
mu[par_set], omega.2pi.1d))
}
} else {
x_set <- 1:n_x
par_set <- 1:n_par
expndn_set <- expand_args(x_set, par_set)
x_set <- expndn_set[[1]]
par_set <- expndn_set[[2]]
x_long <- x[x_set]
kappa_long <- kappa[par_set]
mu_long <- mu[par_set]
which_unif <- which(kappa_long < 1e-10)
n_x_long <- length(x_long)
den <- rep(0, n_x_long)
den[which_unif] <- 1/(2*pi)
if (length(which_unif) < n_x_long)
den[-which_unif] <- c(duniwnorm_manyx_manypar(x_long[-which_unif],
kappa_long[-which_unif],
mu_long[-which_unif],
omega.2pi.1d))
}
if (log) {
den <- log(den)
}
den
}
#' The univariate Wrapped Normal mixtures
#' @inheritParams rvmmix
#' @inheritParams rwnorm
#' @details \code{pmix}, \code{mu} and \code{kappa} must be of the same length, with \eqn{j}-th element corresponding to the \eqn{j}-th component of the mixture distribution.
#' @details The univariate wrapped normal mixture distribution with component size \code{K = \link{length}(pmix)} has density
#' \deqn{g(x) = p[1] * f(x; \kappa[1], \mu[1]) + ... + p[K] * f(x; \kappa[K], \mu[K])}
#' where \eqn{p[j], \kappa[j], \mu[j]} respectively denote the mixing proportion, concentration parameter and the mean parameter for the \eqn{j}-th component
#' and \eqn{f(. ; \kappa, \mu)} denotes the density function of the (univariate) wrapped normal distribution with mean parameter \eqn{\mu} and concentration parameter \eqn{\kappa}.
#' @return \code{dwnormmix} computes the density and \code{rwnormmix} generates random deviates from the mixture density.
#'
#' @examples
#' kappa <- 1:3
#' mu <- 0:2
#' pmix <- c(0.3, 0.3, 0.4)
#' x <- 1:10
#' n <- 10
#'
#' # mixture densities calculated at each point in x
#' dwnormmix(x, kappa, mu, pmix)
#'
#' # number of observations generated from the mixture distribution is n
#' rwnormmix(n, kappa, mu, pmix)
#'
#' @export
rwnormmix <- function(n=1, kappa, mu, pmix)
{
allpar <- list(kappa=kappa, mu=mu, pmix=pmix)
allpar_len <- listLen(allpar)
if(min(allpar_len) != max(allpar_len))
stop("component size mismatch: number of components of the input parameter vectors differ")
if(any(allpar$pmix < 0)) stop("\'pmix\' must be non-negative")
sum_pmix <- sum(allpar$pmix)
if(signif(sum_pmix, 5) != 1) {
if(sum_pmix <= 0) stop("\'pmix\' must have at least one positive element")
allpar$pmix <- allpar$pmix/sum_pmix
warning("\'pmix\' is rescaled to add up to 1")
}
if(any(allpar$kappa <= 0)) stop("kappa must be positive in wnorm")
if(any(allpar$mu < 0 | allpar$mu >= 2*pi)) allpar$mu <- prncp_reg(allpar$mu)
clus_label <- cID(tcrossprod(rep(1, n), allpar$pmix), length(allpar$pmix), runif(n))
samp <- rnorm(length(clus_label), mean = allpar$mu[clus_label], sd = 1/sqrt(allpar$kappa[clus_label]))
prncp_reg(samp)
}
#' @rdname rwnormmix
#' @export
dwnormmix <- function(x, kappa, mu, pmix, int.displ=3, log=FALSE)
{
allpar <- list(kappa=kappa, mu=mu, pmix=pmix)
allpar_len <- listLen(allpar)
if(min(allpar_len) != max(allpar_len))
stop("component size mismatch: number of components of the input parameter vectors differ")
if(any(allpar$pmix < 0)) stop("\'pmix\' must be non-negative")
sum_pmix <- sum(allpar$pmix)
if(signif(sum_pmix, 5) != 1) {
if(sum_pmix <= 0) stop("\'pmix\' must have at least one positive element")
allpar$pmix <- allpar$pmix/sum_pmix
warning("\'pmix\' is rescaled to add up to 1")
}
if(any(allpar$kappa <= 0)) stop("kappa must be positive")
if(any(allpar$mu < 0 | allpar$mu >= 2*pi)) allpar$mu <- prncp_reg(allpar$mu)
ncomp <- length(kappa)
allcompden <- vapply(1:ncomp,
function(j) dwnorm(x, kappa[j], mu[j],
int.displ, FALSE),
rep(0, length(x)))
mixden <- c(allcompden %*% pmix)
if (log) {
log(mixden)
} else {
mixden
}
}
#' Fitting univariate wrapped normal mixtures using MCMC
#' @inheritParams fit_vmsinmix
#'
#' @details
#' Wrapper for \link{fit_angmix} with \code{model = "wnorm"}.
#'
#'
#' @examples
#' # illustration only - more iterations needed for convergence
#' fit.wnorm.20 <- fit_wnormmix(wind$angle, ncomp = 3, n.iter = 20,
#' n.chains = 1)
#' fit.wnorm.20
#'
#' @export
fit_wnormmix <- function(...)
{
fit_angmix(model="wnorm", ...)
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/all_wnorm_fns.R |
expand_args <- function(...){
ell <- list(...)
max_length <- max(vapply(ell, length, 0))
lapply(ell, rep, length.out = max_length)
}
ldgamanum <- function(x, loc, scale) {
(loc-1)*log(x) - x/scale
}
# mean <- function(x) {
# sum(x)/length(x)
# }
har_mean <- function(x) {
if(sum(x == 0) > 0) stop("zero value in harmonic mean")
1/mean(1/x)
}
sum_sq <- function(x) sum(x^2)
prncp_reg <- function(x) x %% (2*pi)
#makes and angle in [0, 2*pi]
prncp_reg.minuspi.pi <- function(x) {
y <- (x + pi) %% (2*pi)
y_neg <- which(y < 0)
y[y_neg] <- 2*pi + y[y_neg]
y - pi
} #makes a single angle in [-pi, pi]
atan3 <- function(x) prncp_reg(atan(x[2]/x[1]))
sph2cart <- function(x)
c(cos(x[1])*sin(x[2]), sin(x[1])*sin(x[2]), cos(x[2]))
#calculates unit vectors from pair of angles
listLen <- function(l)
vapply(1:length(l), function(i) length(l[[i]]), 0)
rm_NA_rad <- function(data, rad = TRUE) {
if(length(dim(data)) == 2) {
phi.no.na <- data[,1]
psi.no.na <- data[,2]
na.phi.id <- NULL
na.psi.id <- NULL
is.na.phi <- is.na(data[,1])
is.na.psi <- is.na(data[,2])
if(sum(is.na.phi) > 0)
na.phi.id <- which(is.na.phi)
if(sum(is.na.psi) > 0)
na.psi.id <- which(is.na.psi)
na.id <- union(na.phi.id, na.psi.id)
if(length(na.id) > 0){
phi.no.na <- data[,1][-na.id]
psi.no.na <- data[,2][-na.id]
}
if(rad) res <- prncp_reg(cbind(phi.no.na, psi.no.na))
else res <- prncp_reg(cbind(phi.no.na, psi.no.na) * pi / 180)
colnames(res) <- colnames(data)
} else {
data.no.na <- data
na.id <- NULL
is.na.data <- is.na(data)
if(sum(is.na.data) > 0){
na.id <- which(is.na.data)
data.no.na <- data[-na.id]
}
if(rad) res <- prncp_reg(data.no.na)
else res <- prncp_reg(data.no.na * pi / 180)
}
res
} #removes NA and converts into radians
# rdirichlet <- function (n, alpha) # random generation from dirichlet
# {
# len <- length(alpha)
# x <- matrix(rgamma(len * n, alpha), ncol = len, byrow = TRUE)
# tot <- x %*% rep(1, len)
# x/as.vector(tot)
# }
# rnorm2 <- function (n = 1, mu, Sigma) # random generation from biv normal
# {
# p <- 2L
# eS <- eigen(Sigma, symmetric = TRUE)
# ev <- eS$values
# X <- matrix(rnorm(p * n), n)
# X <- drop(mu) + eS$vectors %*% diag(sqrt(pmax(ev, 0)), p) %*% t(X)
# if (n == 1) drop(X)
# else t(X)
# }
list_by_row <- function(mat, row_index) # create a list with elements being rows of a matrix
{
mat.list <- lapply(1:nrow(mat), function(j) mat[j, ])
names(mat.list) <- rownames(mat)
mat.list
}
addtolist <- function(list_in, ...) # add element to a list
{
ell <- list(...)
c(list_in, ell)
}
press_enter <- function() # waits for the user to press [enter]
{
cat("Press [enter] to continue")
line <- readline()
}
kappas2sigmas_wnorm2 <- function(kappa1, kappa2, kappa3) {
den <- kappa1*kappa2 - kappa3^2
sigma1 <- kappa2/den
sigma2 <- kappa1/den
rho <- -kappa3/sqrt(kappa1*kappa2)
c(sigma11 = sigma1, sigma22 = sigma2, rho = rho)
}
sigmas2kappas_wnorm2 <- function(sigma11, sigma22, rho) {
den <- sigma11*sigma22*(1-rho^2)
kappa1 <- sigma22/den
kappa2 <- sigma11/den
kappa3 <- -rho/((1-rho^2)*sqrt(sigma11*sigma22))
c(kappa1 = kappa1, kappa2 = kappa2, kappa3 = kappa3)
}
which.max_entry1 <- function(x) {
which.max(x)[1]
}
signif_or_round <- function(x, ...) {
for(j in length(x)) {
if (abs(x[j]) > 1) return(round(x[j], ...))
else return(signif(x[j], ...))
}
}
# print est (ci_lower, ci_upper) for each element
est_ci <- function(est, lower, upper, digits = 2)
{
out_mat <- est
for(j in 1:length(est)) {
out_mat[j] <- paste0(format(signif_or_round(est[j], digits), nsmall = digits),
" (",
format(signif_or_round(lower[j], digits), nsmall = digits),
", ",
format(signif_or_round(upper[j], digits), nsmall = digits),
")")
}
as.data.frame(out_mat)
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/basic_fns.R |
basic_surfaceplot <- function(xpoints, ypoints, denmat,
xlab, ylab, zlab,
main = main, ...) {
inargs <- list(...)
inargs$x <- denmat~x*y
inargs$data <- data.frame(x = xpoints,
y=rep(ypoints, each=length(xpoints)),
denmat=denmat)
if (is.null(inargs$outerbox)) inargs$outerbox <- FALSE
if (is.null(inargs$par.settings)) inargs$par.settings <- list(axis.line = list(col = 'transparent'))
if (is.null(inargs$xlab)) inargs$xlab <- xlab
if (is.null(inargs$ylab)) inargs$ylab <- ylab
if (is.null(inargs$colorkey)) inargs$colorkey <- FALSE
if (is.null(inargs$main)) inargs$main <- main
if (is.null(inargs$neval)) inargs$neval <- 100
if (is.null(inargs$aspect)) inargs$aspect <- c(61/87, 0.4)
if (is.null(inargs$zlab)) inargs$zlab <- list("Density", rot=90)
if (is.null(inargs$screen)) inargs$screen <- list(z=-30, x=-60)
if (is.null(inargs$colorkey)) inargs$colorkey <- FALSE
if (is.null(inargs$scales))
inargs$scales <- list(arrows=FALSE, col=1)
if (is.null(inargs$drape)) inargs$drape <- TRUE
if (is.null(inargs$light.source))
inargs$light.source <- c(10,0,10)
if (is.null(inargs$col.regions))
inargs$col.regions <- grDevices::colorRampPalette(c("steelblue", "green",
"yellow", "orange", "red"))(60)
if (is.null(inargs$par.settings))
inargs$par.settings <-
list(axis.line = list(col = 'transparent'
),
layout.heights = list(
top.padding = 0,
main.key.padding = 0,
key.axis.padding = 0,
axis.xlab.padding = 0,
xlab.key.padding = 0,
key.sub.padding = 0,
bottom.padding = 0
),
layout.widths = list(
left.padding = 0,
key.ylab.padding = 0,
ylab.axis.padding = 0,
axis.key.padding = 0,
right.padding = 0
))
if (is.null(inargs$zoom))
inargs$zoom <- 0.85
do.call(lattice::wireframe, inargs)
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/basic_surfaceplot.R |
bounded_hmc_biv <- function(lpr_grad,
init,
lower,
upper,
dep_cov = FALSE,
dep_cov_type = NULL,
zero_cov = FALSE,
nsteps = 1,
step)
{
broken <- FALSE # will be true if breaks
apr <- NULL #will be replaced if doesn't break
delta <- NULL #will be replaced if doesn't break
lower1 <- lower
upper1 <- upper
ncomp <- 1
npar <- 5
kappa3_index <- 3
# positions of kappa3 in vec(par_mat)
lpr_grad.init <- lpr_grad(init)
lpr.init <- lpr_grad.init$lpr
gr <- lpr_grad.init$grad
init.p <- matrix(rnorm(npar), 5, ncomp)
# Compute the kinetic energy at the start of the trajectory.
kinetic.init <- sum(init.p^2) / 2
# Compute the trajectory by the leapfrog method.
q <- init
p <- init.p
if (zero_cov) {
p[kappa3_index] <- 0
}
reflections <- 0
# Make a half step for momentum at the beginning.
p <- p + (step/2) * gr
if (zero_cov) {
p[kappa3_index] <- 0
}
for (i in 1:nsteps)
{
# Make a full step for the position.
q <- q + step * p
# check if broken
if (any(is.nan(c(p, q)))) {
broken <- TRUE
#stop("Algorithm breaks. Try a smaller epsilon.")
break
}
# Check for bound violations, and adjust position and momentum
for(k in 1:npar) {
# adjust the bound for the cov
# param if dependent (for wnorm2)
if(k == 3) {
if (zero_cov) {
next
}
else if (dep_cov) {
# if (any(exp(q[1:2]) <= 0)) {
# broken <- TRUE
# break
# }
if (dep_cov_type %in% c("wnorm2_bound", "vmsin_unimodal")) {
bd_k1k2 <- sqrt(exp(q[1])*exp(q[2]))
lower[k] <- max(lower1[k], -bd_k1k2)
upper[k] <- min(upper1[k], bd_k1k2)
}
else {
# dep_cov_type == "vmcos_unimodal"
lower[k] <- max(lower1[k], -exp(q[1])*exp(q[2])/(exp(q[1])+exp(q[2])))
}
}
# if (any(is.nan(c(upper[k], lower[k])))) {
# broken <- TRUE
# break
# }
}
reflections_curr <- 0L
while(all(q[k] < lower[k] || q[k] > upper[k],
reflections_curr <= 100)) {
if (q[k]<lower[k]) {
q[k] <- lower[k] + (lower[k] - q[k])
p[k] <- -p[k]
reflections_curr <- reflections_curr + 1L
}
else if (q[k]>upper[k]) {
q[k] <- upper[k] - (q[k] - upper[k])
p[k] <- -p[k]
reflections_curr <- reflections_curr + 1L
}
}
reflections <- reflections + reflections_curr
if (reflections_curr >= 100) {
broken <- TRUE
break
}
}
# check for broken, bound violation, or nan
# if so then break from i loop
if (any(is.nan(c(p, q)),
broken)) {
broken <- TRUE
#stop("Algorithm breaks. Try a smaller epsilon.")
break
}
# browser()
# Evaluate the gradient at the new position, provided not broken
lpr_grad_current <- lpr_grad(q)
lr <- lpr_grad_current$lpr
gr <- lpr_grad_current$grad
# Make a full step for the momentum, except when we're coming to the end of
# the trajectory.
if (i != nsteps) {
p <- p + step * gr
if (zero_cov) {
p[kappa3_index] <- 0
}
}
}
# Make a half step for momentum at the end.
p <- p + (step/2) * gr
if (zero_cov) {
p[kappa3_index] <- 0
}
# Negate momentum at end of trajectory to make the proposal symmetric.
p <- -p
# Look at log probability and kinetic energy at the end of the trajectory.
lpr.prop <- lr
kinetic.prop <- sum(p^2) / 2
# Accept or reject the state at the end of the trajectory.
H.init <- -lpr.init + kinetic.init
H.prop <- -lpr.prop + kinetic.prop
delta <- H.prop - H.init
apr <- min(1,exp(-delta))
if (any(is.nan(c(p, q, apr)))) {
broken <- TRUE
apr <- NULL
delta <- NULL
#stop("Algorithm breaks. Try a smaller epsilon.")
}
if(broken) # reject
{
final.q <- init
final.p <- init.p
lpr.final <- lpr.init
acc <- 0
}
else if (runif(1) > apr) # reject
{
final.q <- init
final.p <- init.p
lpr.final <- lpr.init
acc <- 0
}
else # accept
{
final.q <- q
final.p <- p
lpr.final <- lr
acc <- 1
}
# Return new state, its log probability and gradient, plus additional
# information, including the trajectory, if requested.
out <- list (final=final.q, final.p=final.p, lpr=lpr.final, step=step,
apr=apr, accpt=acc, delta=delta, reflections=reflections, broken = broken)
out
}
bounded_hmc_uni <- function(lpr_grad,
init,
lower,
upper,
nsteps = 1,
step)
{
broken <- FALSE # will be true if breaks
apr <- NULL #will be replaced if doesn't break
delta <- NULL #will be replaced if doesn't break
ncomp <- ncol(init)
npar <- 2*ncomp
lpr_grad.init <- lpr_grad(init)
lpr.init <- lpr_grad.init$lpr
gr <- lpr_grad.init$grad
init.p <- matrix(rnorm(npar), 2, ncomp)
# Compute the kinetic energy at the start of the trajectory.
kinetic.init <- sum(init.p^2) / 2
# Compute the trajectory by the leapfrog method.
q <- init
p <- init.p
reflections <- 0
# Make a half step for momentum at the beginning.
p <- p + (step/2) * gr
# browser()
# Alternate full steps for position and momentum.
for (i in 1:nsteps)
{
# Make a full step for the position.
q <- q + step * p
# check if broken
if (any(is.nan(c(p, q)))) {
broken <- TRUE
#stop("Algorithm breaks. Try a smaller epsilon.")
break
}
# Check for bound violations, and adjust position and momentum
for(k in 1:npar) {
reflections_curr <- 0L
while(all(q[k] < lower[k] || q[k] > upper[k],
reflections_curr <= 100)) {
if (q[k]<lower[k]) {
q[k] <- lower[k] + (lower[k] - q[k])
p[k] <- -p[k]
reflections_curr <- reflections_curr + 1L
}
else if (q[k]>upper[k]) {
q[k] <- upper[k] - (q[k] - upper[k])
p[k] <- -p[k]
reflections_curr <- reflections_curr + 1L
}
}
reflections <- reflections + reflections_curr
if (reflections_curr >= 100) {
broken <- TRUE
break
}
}
# check if broken
if (any(is.nan(c(p, q)), broken)) {
broken <- TRUE
#stop("Algorithm breaks. Try a smaller epsilon.")
break
}
# Evaluate the gradient at the new position.
lpr_grad_current <- lpr_grad(q)
lr <- lpr_grad_current$lpr
gr <- lpr_grad_current$grad
# Make a full step for the momentum, except when we're coming to the end of
# the trajectory.
if (i != nsteps) {
p <- p + step * gr
}
}
# Make a half step for momentum at the end.
p <- p + (step/2) * gr
# Negate momentum at end of trajectory to make the proposal symmetric.
p <- -p
# Look at log probability and kinetic energy at the end of the trajectory.
lpr.prop <- lr
kinetic.prop <- sum(p^2) / 2
# Accept or reject the state at the end of the trajectory.
H.init <- -lpr.init + kinetic.init
H.prop <- -lpr.prop + kinetic.prop
delta <- H.prop - H.init
apr <- min(1,exp(-delta))
if (any(is.nan(c(p, q, apr)))) {
broken <- TRUE
apr <- NULL
delta <- NULL
#stop("Algorithm breaks. Try a smaller epsilon.")
}
if(broken) # reject
{
final.q <- init
final.p <- init.p
lpr.final <- lpr.init
acc <- 0
}
else if (runif(1) > apr) # reject
{
final.q <- init
final.p <- init.p
lpr.final <- lpr.init
acc <- 0
}
else # accept
{
final.q <- q
final.p <- p
lpr.final <- lr
acc <- 1
}
# Return new state, its log probability and gradient, plus additional
# information, including the trajectory, if requested.
out <- list (final=final.q, final.p=final.p, lpr=lpr.final, step=step,
apr=apr, accpt=acc, delta=delta, reflections=reflections, broken = broken)
out
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/bounded_hmc_biv.R |
bounded_rwmh_biv <- function(lpr,
init,
lower,
upper,
dep_cov = FALSE,
dep_cov_type = NULL,
zero_cov = FALSE,
step)
{
lpr.init <- lpr(init)
ncomp <- ncol(init)
prop <- init
if (zero_cov) prop[3, ] <- 0
prop[1:2, ] <- init[1:2, ] + rnorm(2*ncomp, 0, step[1:2])
prop[4:5, ] <- prncp_reg(init[4:5, ] + rnorm(2*ncomp, 0, step[4:5]))
if (!zero_cov) {
prop[3, ] <- init[3, ] + rnorm(ncomp, 0, step[3])
if (dep_cov) {
if (dep_cov_type %in% c("wnorm2_bound", "vmsin_unimodal")) {
bd_k1k2 <- sqrt(exp(prop[1, ])*exp(prop[2, ]))
lower[3, ] <- pmax(-bd_k1k2, lower[3, ])
upper[3, ] <- pmin(bd_k1k2, upper[3, ])
} else {
# dep_cov_type == "vmcos_unimodal"
lower[3, ] <- pmax(-exp(prop[1, ])*exp(prop[2, ])/
(exp(prop[1, ])+exp(prop[2, ])),
lower[3, ])
}
}
}
bd_err <- any(c(prop-lower, upper-prop) < 0)
if (bd_err) {
lpr.prop <- -Inf
} else {
lpr.prop <- lpr(prop)
}
aprob <- min(1, exp(lpr.prop-lpr.init))
accpt <- (runif(1) < aprob)
# if (is.na(propcheck)) browser()
if (accpt) {
final <- prop
lpr.final <- lpr.prop
} else {
final <- init
lpr.final <- lpr.init
}
out <- list (final=final, lpr=lpr.final, step=step,
aprob=aprob, accpt=accpt*1)
out
}
bounded_rwmh_uni <- function(lpr,
init,
lower,
upper,
step)
{
lpr.init <- lpr(init)
ncomp <- ncol(init)
prop <- init
prop[1, ] <- pmax(init[1, ] + rnorm(ncomp, 0, step[1]), 1e-6)
prop[2, ] <- prncp_reg(init[2, ] + rnorm(ncomp, 0, step[2]))
bd_err <- any(c(prop-lower, upper-prop) < 0)
if (bd_err) {
lpr.prop <- -Inf
} else {
lpr.prop <- lpr(prop)
}
aprob <- min(1, exp(lpr.prop-lpr.init))
accpt <- (runif(1) < aprob)
# if (is.na(propcheck)) browser()
if (accpt) {
final <- prop
lpr.final <- lpr.prop
} else {
final <- init
lpr.final <- lpr.init
}
out <- list (final=final, lpr=lpr.final, step=step,
aprob=aprob, accpt=accpt*1)
out
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/bounded_rmwh_biv.R |
#' Sample circular correlation coefficients
#' @param x two column matrix. NA values are not allowed.
#' @param type type of the circular correlation.
#' Must be one of "fl", "js", "tau1" and "tau2". See details.
#' @param alternative one of \code{"two.sided"}, \code{"less"} or \code{"greater"}
#' (defaults to \code{"two.sided"}).
#' Hypothesis test is performed only when \code{type} is either \code{"fl"} or \code{"js"},
#' in which case asymptotic standard error of the estimator is used to construct the test
#' statistic.
#' @param jackknife logical. Compute jackknifed estimate and standard error? Defaults to FALSE.
#' @param bootse logical. Compute bootstrap standard error? Defaults to FALSE.
#' @param n.boot number of bootstrapped samples to compute bootstrap standard error. Defaults to
#' 100. Ignored if \code{bootse} if FALSE.
#'
#' @details
#' \code{circ_cor} calculates the (sample) circular correlation between the columns of x.
#' Two parametric (the Jammalamadaka-Sarma (1988, equation 2.6) form \code{"js"}, and
#' the Fisher-Lee (1983, Section 3) form \code{"fl"})
#' and two non-parametric (two versions of Kendall's tau) correlation coefficients are considered.
#' The first version of Kendall's tau (\code{"tau1"}) is based on equation 2.1 in Fisher and Lee (1982),
#' whereas the second version (\code{"tau2"}) is computed using equations 6.7-6.8 in Zhan et al (2017).
#'
#' The cost-complexity for \code{"js"}, \code{"fl"}, \code{"tau2"} and \code{"tau1"} are \eqn{O(n), O(n^2), O(n^2)} and \eqn{O(n^3)}
#' respectively, where \eqn{n} denotes the number of rows in \code{x}. As such, for large \eqn{n} evaluation of
#' \code{"tau1"} will be slow.
#'
#'
#' @references
#'
#' Fisher, N. I. and Lee, A. J. (1982). Nonparametric measures of angular-angular association. Biometrika, 69(2), 315-321.
#'
#' Fisher, N. I. and Lee, A. J. (1983). A correlation coefficient for circular data. Biometrika, 70(2):327-332.
#'
#' Jammalamadaka, S. R. and Sarma, Y. (1988). A correlation coefficient for
#' angular variables. Statistical theory and data analysis II, pages 349-364.
#'
#' Zhan, X., Ma, T., Liu, S., & Shimizu, K. (2017). On circular correlation for data on the torus. Statistical Papers, 1-21.
#'
#'
#' @examples
#' # generate data from vmsin model
#' set.seed(1)
#' dat <- rvmsin(100, 2,3,-0.8,0,0)
#'
#' # now calculate circular correlation(s) between the 2 columns of dat
#' circ_cor(dat, type="js")
#' circ_cor(dat, type="fl")
#' circ_cor(dat, type="tau1")
#' circ_cor(dat, type="tau2")
#'
#'
#' @export
circ_cor <- function(x, type="js", alternative = "two.sided",
jackknife = FALSE, bootse = FALSE, n.boot = 100) {
if (any(is.na(x)))
stop("NA values in \'x\'")
if (!is.matrix(x)) {
stop("\'x\' must be a two-column matrix")
}
if (ncol(x)!=2) {
stop("\'x\' must be a two-column matrix")
}
if (!type %in% c("fl", "js", "tau1", "tau2"))
stop("\'type\' must be one of \'js\', \'fl\', \'tau1\' or \'tau2\'")
x <- prncp_reg(x)
n <- nrow(x)
if (type == "fl") {
calc_rho <- function(x) {
rho <- calc_corr_fl(x)
A_over_mu2 <- function(margin) {
alpha <- sapply(1:2, function(p) sum(cos(p*margin))/n)
beta <- sapply(1:2, function(p) sum(sin(p*margin))/n)
A <- alpha[1]^2 + beta[1]^2 + alpha[2]*beta[1]^2 -
alpha[1]^2*alpha[2] - 2*alpha[1]*beta[1]*beta[2]
mu2 <- 0.5 * (1 - alpha[2]^2 - beta[2]^2)
A/mu2
}
avar <- prod(apply(x, 2, A_over_mu2))
se <- sqrt(avar)/sqrt(n)
attr(rho, "se") <- se
rho
}
# attr(rho_fl, "se") <- se
# attr(rho_fl, "p.value") <- pval
# rho
} else if (type == "js") {
calc_rho <- function(x) {
sin_x_1_cent <- sin(x[, 1] - atan2(sum(sin(x[, 1])), sum(cos(x[, 1]))))
sin_x_2_cent <- sin(x[, 2] - atan2(sum(sin(x[, 2])), sum(cos(x[, 2]))))
num <- sum(sin_x_1_cent*sin_x_2_cent)
den <- sqrt(sum(sin_x_1_cent^2)*sum(sin_x_2_cent^2))
rho <- num/den
# asymptotic variance
# idx <- data.matrix(expand.grid(0:4, 0:4))
idx <- rbind(
c(2, 2),
c(2, 0), c(0, 2),
c(1, 3), c(3, 1),
c(4, 0), c(0, 4)
)
rownames(idx) <- paste0(idx[, 1], idx[, 2])
lambda <- apply(
idx,
1,
function(ii) {
sum(sin_x_1_cent^(ii[1]) * sin_x_2_cent^(ii[2]))/n
}
)
avar <- unname(
max(
lambda["22"]/lambda["20"]*lambda["02"] -
rho * (
lambda["13"]/(lambda["20"]*sqrt(lambda["20"]*lambda["02"])) +
lambda["31"]/(lambda["02"]*sqrt(lambda["20"]*lambda["02"]))
) +
rho^2 / 4 * (
1 +
lambda["40"]/lambda["20"]^2 +
lambda["04"]/lambda["02"]^2 +
lambda["22"]/(lambda["20"]*lambda["02"])
),
1e-10
)
)
# browser()
se <- sqrt(avar)/sqrt(n)
# z <- rho_js/se
attr(rho, "se") <- se
# if (alternative == "two.sided") {
# pval <- 2 * pnorm(abs(z), lower.tail = FALSE)
# } else if (alternative == "less") {
# pval <- pnorm(z, lower.tail = TRUE)
# } else if (alternative == "greater") {
# pval <- pnorm(z, lower.tail = FALSE)
# }
#
# attr(rho_js, "se") <- se
# attr(rho_js, "p.value") <- pval
# rho_js
rho
}
} else if (type == "tau1") {
calc_rho <- calc_corr_tau_1
} else {
calc_rho <- calc_corr_tau_2
}
# browser()
rho <- calc_rho(x)
rho_attr <- attributes(rho)
if (jackknife) {
vals <- vapply(
1:n,
function(ii){
c(calc_rho(x[-ii, , drop = FALSE]))
},
0
)
vals_adj <- n*rho - (n-1)*vals
rho_attr$jackknife.est <- sum(vals_adj)/n
rho_attr$jackknife.se <- sqrt(var(vals_adj)/n)
}
if (bootse) {
boot_vals <- vapply(
1:n.boot,
function(ii) {
idx <- sample(1:n, replace = TRUE)
c(calc_rho(x[idx, , drop = FALSE]))
},
0
)
rho_attr$bootse <- sd(boot_vals)
}
if (type %in% c("js", "fl")) {
z <- c(rho)/rho_attr$se
if (alternative == "two.sided") {
rho_attr$pval <- 2 * pnorm(abs(z), lower.tail = FALSE)
} else if (alternative == "less") {
rho_attr$pval <- pnorm(z, lower.tail = TRUE)
} else if (alternative == "greater") {
rho_attr$pval <- pnorm(z, lower.tail = FALSE)
}
}
attributes(rho) <- rho_attr
rho
}
#' Analytic circular variances and correlations for bivariate angular models
#' @param model bivariate angular model. Must be one of \code{"vmsin"},
#' \code{"vmcos"}, or \code{"wnorm2"}.
#' @param kappa1,kappa2,kappa3 concentration and covariance parameters.
#' Recycled to the same size. kappa3^2 must be < kappa1*kappa2 in the wnorm2 model
#' (see \link{rwnorm2} for a detailed parameterization of \code{wnorm2}).
#' @param mu1,mu2 mean parameters. Ignored as they do not play any role in
#' the analytical formulas.
#' @param nsim Monte Carlo sample size. Ignored if all of \code{kappa1}, \code{kappa2}
#' and \code{abs(kappa3)} are < 150 or if model = \code{"wnorm2"}.
#' @inheritParams contour_model
#'
#' @return
#' Returns a list with elements \code{var1}, \code{var2} (circular variances for the
#' first and second coordinates), \code{rho_fl} and \code{rho_js} (circular correlations).
#' See details.
#'
#' @details
#' The function computes the analytic circular variances and correlations
#' (both Jammalamadaka-Sarma (JS) and Fisher-Lee (FL) forms) for von Mises sine,
#' von Mises cosine and bivariate wrapped normal distributions.
#'
#' For \code{wnorm2}, expressions for the circular variances,
#' JS and FL correlation coefficients can be found in Mardia and Jupp (2009),
#' Jammalamadaka and Sarma (1988) and Fisher and Lee (1983) respectively.
#' For \code{vmsin} and \code{vmcos} these expressions are provided in Chakraborty and Wong (2018).
#'
#' Because the analytic expressions in \code{vmsin} and \code{vmcos} models involve infinite sums
#' of product of Bessel functions,
#' if any of \code{kappa1}, \code{kappa2} and \code{abs(kappa3)} is larger
#' than or equal to 150, IID Monte Carlo with sample size \code{nsim} is used
#' to approximate \code{rho_js} for numerical stability. From \code{rho_js},
#' \code{rho_fl} is computed using Corollary 2.2 in
#' Chakraborty and Wong (2018), which makes cost-complexity for
#' the \code{rho_fl} evaluation to be of order O(\code{nsim}) for \code{vmsin}
#' and \code{vmcos} models. (In general, \code{rho_fl} evaluation
#' is of order O(\code{nsim}^2)).
#'
#' In addition, for the \code{vmcos} model, when \code{-150 < kappa3 < -1}
#' or \code{50 < max(kappa1, kappa2, abs(kappa3)) <= 150}, the analytic formulas
#' in Chakraborty and Wong (2018) are used; however, the reciprocal of the normalizing
#' constant and its partial derivatives are all calculated numerically via (quasi) Monte carlo method for
#' numerical stability. These (quasi) random numbers can be provided through the
#' argument \code{qrnd}, which must be a two column matrix, with each element being
#' a (quasi) random number between 0 and 1. Alternatively, if \code{n_qrnd} is
#' provided (and \code{qrnd} is missing), a two dimensional sobol sequence of size \code{n_qrnd} is
#' generated via the function \link{sobol} from the R package \code{qrng}. If none of \code{qrnd}
#' or \code{n_qrnd} is available, a two dimensional sobol sequence of size 1e4 is used.
#'
#'
#' @examples
#' circ_varcor_model("vmsin", kappa1= 1, kappa2 = 2, kappa3 = 3)
#'
#' # Monte Carlo approximation
#' set.seed(1)
#' dat <- rvmsin(1000, 1, 2, 3)
#' # sample circular variance
#' circ_var <- function(x)
#' 1 - mean(cos(x - atan2(mean(sin(x)), mean(cos(x))) ))
#' circ_var(dat[, 1])
#' circ_var(dat[, 2])
#' circ_cor(dat, "fl")
#' circ_cor(dat, "js")
#'
#' @references
#' Fisher, N. I. and Lee, A. (1983). A correlation coefficient for circular data. Biometrika, 70(2):327-332.
#'
#' Jammalamadaka, S. R. and Sarma, Y. (1988). A correlation coefficient for
#' angular variables. Statistical theory and data analysis II, pages 349-364.
#'
#' Mardia, K. and Jupp, P. (2009). Directional Statistics. Wiley Series in Probability and Statistics. Wiley.
#'
#' Chakraborty, S. and Wong, S, W.K. (2018). On the circular correlation coefficients
#' for bivariate von Mises distributions on a torus. arXiv e-print.
#'
#' @export
circ_varcor_model <- function(model = "vmsin", kappa1 = 1, kappa2 = 1, kappa3 = 0,
mu1 = 0, mu2 = 0, nsim = 1e4, ...)
{
if(any(c(kappa1, kappa2) < 0))
stop("kappa1 and kappa2 must be non-negative")
if (length(model) != 1 | !model %in% c("vmsin", "vmcos", "wnorm2"))
stop("\'model\' must be one of \"vmsin\", \"vmcos\" or \"wnorm2\"")
if (nsim <= 0)
stop("\'nsim\' must be a positive integer")
ell <- list(...)
nsim <- round(nsim)
if (model == "vmcos") {
if (!is.null(ell$qrnd)) {
qrnd_grid <- ell$qrnd
dim_qrnd <- dim(qrnd_grid)
if (!is.matrix(qrnd_grid) | is.null(dim_qrnd) |
dim_qrnd[2] != 2)
stop("\'qrnd\' must be a two column matrix")
n_qrnd <- dim_qrnd[1]
} else if (!is.null(ell$n_qrnd)){
n_qrnd <- round(ell$n_qrnd)
if (n_qrnd < 1)
stop("n_qrnd must be a positive integer")
qrnd_grid <- sobol(n_qrnd, 2, FALSE)
} else {
n_qrnd <- 1e4
qrnd_grid <- sobol(n_qrnd, 2, FALSE)
}
}
if(max(length(kappa1), length(kappa2), length(kappa3), length(mu1), length(mu2)) > 1) {
expanded <- expand_args(kappa1, kappa2, kappa3, mu1, mu2)
kappa1 <- expanded[[1]]
kappa2 <- expanded[[2]]
kappa3 <- expanded[[3]]
if (model == "wnorm2" &
any (kappa1*kappa2 - kappa3*kappa3 <= 1e-10))
stop("abs(kappa3) must be less than sqrt(kappa1*kappa2) in wnorm2")
lapply(1:length(kappa1),
function(j) {
inargs <- list(kappa1 = kappa1[j], kappa2 = kappa2[j],
kappa3 = kappa3[j])
if (model == "vmcos") {
inargs$qrnd_grid <- qrnd_grid
# inargs$force_approx_const <- ell$force_approx_const
}
do.call(paste0(model, "_var_cor_singlepar"),
inargs)
}
)
} else {
if (model == "wnorm2" &
(kappa1*kappa2 - kappa3*kappa3 <= 1e-10))
stop("abs(kappa3) must be less than sqrt(kappa1*kappa2) in wnorm2")
inargs <- list(kappa1 = kappa1, kappa2 = kappa2,
kappa3 = kappa3)
if (model == "vmcos") inargs$qrnd_grid <- qrnd_grid
do.call(paste0(model, "_var_cor_singlepar"),
inargs)
}
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/circ_cor.R |
#' Backbone Dihedral Angles of Triose Phosphate Isomerase (8TIM)
#'
#' A dataset consisting of 490 pairs of backbone dihedral angles (in radian scale \eqn{[0, 2\pi)} )
#' \eqn{(\phi, \psi)} for the protein Triose Phosphate Isomerase (8TIM). The angles were obtained first by using
#' the DSSP software on the PDB file for 8TIM to get the backbone angles (in degrees),
#' and then by converting all angles into radians. Due to the presence of different secondary structures
#' (helices, sheets and loops) in the protein, the angular data show considerable variability, and is multimodal
#' with noticeably distinct clusters.
#'
#'
#' @format A data frame with 490 rows and 2 variables (backbone dihedral angles) phi and psi.
#' @source 8TIM PDB file: \url{http://www.rcsb.org/pdb/explore.do?structureId=8tim}.
#' @source DSSP software: \url{https://swift.cmbi.umcn.nl/gv/dssp/}.
#'
#' @usage
#' data(tim8)
"tim8"
#' Saturna Island wind directions
#'
#' @description
#' A dataset consisting of 239 observations on wind direction in radians (original measurements were
#' in 10s of degrees), measured at Saturna Island, British Columbia,
#' Canada during October 1-10, 2016 (obtained from Environment Canada website). There was a severe storm
#' during October 4-7, which caused significant fluctuations among the wind directions. As a result the
#' angular data show a clear multimodality.
#'
#' @format A data frame with 239 rows and 2 columns; the column "angle" provides the angular direction (in radian)
#' and the column day provides the days on which the data points were collected (ranges between 1-10, corresponding to
#' October 1-10, 2016).
#' @source Environment Canada: \url{https://climate.weather.gc.ca/climate_data/data_quality_e.html}.
#' @source CBC news on the storm: \url{https://www.cbc.ca/news/canada/british-columbia/storm-bc-1.3795204}.
#'
#' @usage
#' data(wind)
"wind"
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/data.R |
# find_lscale <- function(x) {
# y <- x
# y[1:2] <- log(x[1:2])
# y
# }
find_expscale <- function(x) {
y <- x
y[1:2] <- exp(x[1:2])
y
}
find_expscale_uni <- function(x) {
y <- x
y[1] <- exp(x[1])
y
}
find_lscale_mat <- function(x) {
y <- x
y[1:2, ] <- log(x[1:2, ])
y
}
find_lscale_mat_uni <- function(x) {
y <- x
y[1, ] <- log(x[1, ])
y
}
#' Fitting Bivariate and univariate angular mixture models
#'
#' @importFrom gtools rdirichlet
#' @importFrom future.apply future_lapply
#'
#' @param model angular model whose mixtures are to be fitted. Available choices are \code{"vmsin", "vmcos"} and \code{"wnorm2"} for
#' bivariate data, and \code{"vm"} and \code{"wnorm"} for univariate data.
#' @param data data matrix (if bivarate, in which case it must have two columns) or vector. If outside, the values
#' are transformed into the scale \eqn{[0, 2\pi)}. *Note:* BAMBI cannot handle missing data. Missing values must
#' either be removed or properly imputed.
#' @param ncomp number of components in the mixture model. Must be a positive integer. vector values are not allowed.
#' If \code{comp == 1}, a single component model is fitted.
#' @param start_par list with elements \code{pmix} (ignored if \code{comp == 1}), together with \code{kappa1, kappa2, mu1} and \code{mu2},
#' for bivariate models, and \code{kappa} and \code{mu} for univariate models,
#' all being vectors of length same as \code{ncomp}.
#' These provides the starting values for the Markov chain; with \eqn{j}-th component of each vector corresponding to the \eqn{j}-th
#' component of the mixture distribution. If missing, the data is first clustered into \code{ncomp} groups either via k-means (after
#' projecting onto a unit sphere), or randomly, depending on \code{rand_start}, and then moment estimators for components are used as
#' the starting points. Note that a very wrong starting point can potentially lead the chain to get stuck at a wrong solution for thousands
#' of iterations. As such, we recommend using the default option, which is k-means followed by moment estimation.
#' @param cov.restrict Should there be any restriction on the covariance parameter for a bivariate model. Available choices are
#' \code{"POSITIVE", "NEGATIVE", "ZERO"} and "NONE". Note that \code{"ZERO"} fits a mixture with product components. Defaults to
#' \code{"NONE"}.
#' @param unimodal.component logical. Should each component in the mixture model be unimodal? Only used if \code{model} is either \code{"vmsin"}
#' or \code{"vmcos"}. Defaults to FALSE.
#' @param n.chains number of chains to run. Must be a positive integer.
#' @param chains_parallel logical. Should the chains be run in parallel? Defaluts to TRUE, and ignored if \code{n.chains} = 1.
#' Note that parallelization is implemented via \link{future_lapply} from package \code{future.apply} which
#' uses futures for this purpose, and thus provides a convenient way of parallelization across various OSs and computing environments.
#' However, a proper \link[future]{plan} must be set for the parallization before running the chain. Otherwise the chains will run sequentially.
#' @param method MCMC strategy to be used for the model paramters: \code{"hmc"} or \code{"rwmh"}.
#' @param perm_sampling logical. Should the permutation sampling algorithm of Fruhwirth-Schnatter (2001) be used?
#' If TRUE, at every iteration after burnin, once model parameters and mixing proportions are sampled,
#' a random permutation of 1, ..., ncomp is considered, and components are relabelled according
#' to this random permutation. This forced random label switchings may imporve the mixing rate of the chage. However, (automated) tuning
#' is very difficult with such a scheme, as there is no simple way of keeping track of the "original" component labels. This creates problem
#' with computing standard deviations of the generated model parameters, thus making the
#' scaling step used in tuning for \code{epsilon} or \code{paramscale} problematic as well. As such, \code{perm_sampling} is always turned
#' off during burn-in (even if \code{autotune = FALSE}), and turned on thereafter, if \code{TRUE}.
#' Defaults to and is set to \code{FALSE}.
#' @param int.displ absolute integer displacement for each coordinate for \code{wnorm} and \code{wnorm2} models (ignored otherwise). Default is 3.
#' Allowed minimum and maximum are 1 and 5 respectively.
#' @param epsilon,L tuning parameters for HMC; ignored if \code{method = "rwmh"}. \code{epsilon} (step-size) is a single number,
#' or a vector of size \code{2*ncomp} for univariate models and \code{5*ncomp} for bivariate models. Note that the "mass matrix"
#' in HMC is assumed to be identity. As such, \code{epsilon}'s corresponding to different model parameters need to be in proper scale for
#' optimal acceptance rate. Can be autotuned during burnin. See \code{autotune}.
#' \code{L} (leapfrog steps) is a positive integer or a vector of positive integers of length \code{n.chains}.
#' If multiple chains are used, we suggest same \code{L} values acorss different chains to make the chains as homogenous as possible.
#'
#' @param epsilon.random logical. Should \code{epsilon*delta}, where \code{delta} is a random
#' number between \code{(1-epsilon.incr, 1+epsilon.incr)} be used instead of \code{epsilon} at each iteration?
#' Ignored if \code{method = "rwmh"}.
#' @param L.random logical. Should a random integer between \code{L.orig/exp(L.incr)} and \code{L.orig*exp(L.incr)}be used instead as \code{L}
#' at each iteration? Ignored if \code{method = "rwmh"}. Defaults to \code{TRUE}.
#' @param L.incr amount of randomness incorporated in L if \code{L.random = TRUE}.
#' @param epsilon.incr amount of randomness incorporated in \code{epsilon} if \code{epsilon.random = TRUE}.
#' @param propscale tuning parameters for RWMH; a vector of size 5 (for bivariate models) or 2 (for univariate models) representing
#' the variances for the proposal normal densities
#' for the model parameters. Ignored if \code{method = "hmc"}. Can be autotuned during burnin. See \code{autotune}.
#' @param n.iter number of iterations for the Markov Chain.
## @param gam.loc,gam.scale location and scale (hyper-) parameters for the gamma prior for \code{kappa1} and \code{kappa2}. See
## \link{dgamma}. Defaults are \code{gam.loc = 0, gam.scale = 1000} that makes the prior non-informative.
#' @param pmix.alpha concentration parameter(s) for the Dirichlet prior for \code{pmix}. Must either be a positive real number, or a vector
#' with positive entries and of length \code{ncomp}. The default is \eqn{(r+r(r+1)/2)/2+3}, where \eqn{r} is 1 or 2 according as whether
#' the model is univariate or bivariate. Note that it is recommended to use larger \code{alpha} values to ensure the a good posterior behavior,
#' especially when \link{fit_incremental_angmix} is used for model selection, which handles overfitting in "let two component-specific parameters be
# identical", uses total number of components in the fitted model as the estimator for true component
#' size, and then penalizes for model complexity. See Fruhwirth-Schnatter (2011) for more details on this.
#' @param norm.var variance (hyper-) parameters in the normal prior for \code{log(kappa), log(kappa1), log(kappa2)} and \code{kappa3}. (Prior mean is zero).
#' Can be a vector. Default is 1000 that makes the prior non-informative.
#' @param burnin.prop proportion of iterations to used for burnin. Must be a be a number in [0, 1].
#' Default is 0.5.
#' @param thin thining size to be used. Must be a positive integer. If \code{thin = } n, then every nth iteration is reatained
#' in the final MCMC sample.
#' @param autotune logical. Should the Markov chain auto-tune the parameter \code{epsilon} (in HMC) or
#' \code{propscale} (in RWMH) during burn-in? Set to \code{TRUE} by default. An adaptive tuning strategy is implemented.
#' Here, at every 10th iteration during in burn-in, the acceptance ratio in the last \code{tune_ave_size}
#' iterations is calculated. Then the tuning parameter is decreased (increased) by a factor of
#' \code{1-tune.incr} (\code{1+tune.incr}) if the calculated acceptance rate
#' falls below (above) \code{accpt.prob.lower} (\code{accpt.prob.upper}). In addditon, when \code{iter} is a multiple of
#' \code{tune_ave_size}, \code{epsilon} for each model parameter is rescaled via the standard deviation of
#' the corresponding parameter over the past \code{tune_ave_size} iterations.
#' @param tune.prop proportion of *\code{burnin}* used to tune the parameters (\code{epsilon} in HMC and
#' \code{propscale} in RWMH). Must be a number between 0 and 1; defaults to 1. Ignored if \code{autotune == FALSE}.
#' @param show.progress logical. Should a progress bar be displayed?
#' @param accpt.prob.lower,accpt.prob.upper lower and upper limits of acceptance ratio to be maintained while tuning
#' during burn-in. Must be numbers between 0 and 1, which \code{accpt.prob.lower < accpt.prob.upper}. See \code{autotune}. Default to (0.6, 0,9) for HMC and (0.3, 0.5) for RWMH.
#' Ignored if \code{autotune = FALSE}.
#' @param tune.incr how much should the tuning parameter be increased or decreased at each step while tuning during burn-in?
#' Must be a number between 0 and 1. See \code{autotune}. Defaults to 0.05. Ignored if \code{autotune = FALSE}.
#' @param rand_start logical. Should a random starting clustering be used? Must be either a scalar, or a vector of length \code{ncomp},
#' one for each chain. Ignored if \code{start_par} is supplied. See \code{start_par} for more details. Defaults to \code{FALSE}.
#' @param tune_ave_size number previous iterations used to compute the acceptance rate while tuning in burn-in. Must be a positive
#' integer. Defaults to 100.
#' @param qrnd,n_qrnd Used only if \code{method="vmcos"}. See \link{dvmcos} for details.
#' @param kappa_upper,kappa_lower upper and lower bounds for the concentration and (absolute) association parameters. Must be a positive integers. Defaults to 150 and 1e-4,
#' and parameter with value above or below these limits rarely make sense in practice.
#' Warning: values much larger or smaller than the default are not recommended as they can cause numerical instability.
#' @param return_llik_contri logical. Should the log likelihood contribution of each data point for each MCMC iteration in each chain be returned? This makes
#' computation of \link{waic.angmcmc} and \link{loo.angmcmc} much faster. *Warning*: Depending on the length of data and \code{n.iter}, this can be
#' very memory intensive. We suggest setting \code{return_llik_contri = TRUE} only if \link{waic.angmcmc} and \link{loo.angmcmc} are aimed for. Defaults to
#' \code{FALSE}.
#' @param return_tune_param logical. Should the values of the tuning parameters used at each iteration in each chain be returned? Defaults to \code{FALSE}.
#' @param ... Unused.
#'
#' @note
#' Sampling is done in log scale for the concentration parameters (kappa, kappa1 and kappa2).
#'
#'
#' Parallelization is done by default when more than one chain is used,
#' but the chains can be run sequentially as well by setting
#' \code{chains_parallel = FALSE}. To retain reproducibility while running
#' multiple chains in parallel, the same RNG state is passed at the
#' beginning of each chain. This is done by specifying \code{future.seed = TRUE}
#' in \code{future.apply::future_lapply} call. Then at the beginning of the i-th
#' chain, before drawing any parameters, i-many Uniform(0, 1) random numbers are
#' generated using \code{runif(i)} (and then thrown away). This ensures that the
#' RNG states across chains prior to random generation of the parameters are
#' different, and hence, no two chains can become identical, even if they have
#' the same starting and tuning parameters. This, however creates a difference
#' between a \code{fit_angmix} call with multiple chains which is run sequentially
#' by setting \code{chains_parallel = FALSE}, and another which is run sequentially
#' because of a sequential \code{plan()} (or no \code{plan()}), with
#' \code{chains_parallel = TRUE}. In the former, different RNG states are passed at
#' the initiation of each chain.
#'
#'
#' @examples
#' # illustration only - more iterations needed for convergence
#' fit.vmsin.20 <- fit_angmix("vmsin", tim8,
#' ncomp = 3, n.iter = 20,
#' n.chains = 1
#' )
#' fit.vmsin.20
#'
#'
#' # Parallelization is implemented via future_lapply from the
#' # package future.apply. To parallelize, first provide a parallel
#' # plan(); otherwise the chains will run sequentially.
#' # Note that not all plan() might work on every OS, as they execute
#' # functions defined internally in fit_mixmodel. We suggest
#' # plan(multisession) which works on every OS.
#' \donttest{
#' library(future)
#' library(parallel)
#' # plan(multisession, gc = TRUE) # parallelize chains
#'
#' set.seed(1)
#' MC.fit <- fit_angmix("vmsin", tim8,
#' ncomp = 3, n.iter = 5000,
#' n.chains = 3
#' )
#'
#'
#' pointest(MC.fit)
#'
#' MC.fix <- fix_label(MC.fit)
#'
#' contour(MC.fit)
#' contour(MC.fix)
#' lpdtrace(MC.fit)
#' }
#'
#' @references
#' Fruhwirth-Schnatter, S. (2011). Label switching under model uncertainty. Mixtures: Estimation and Application, 213-239.
#'
#' Fruhwirth-Schnatter, S. (2001). Markov chain Monte Carlo estimation of classical and dynamic switching and mixture models. Journal of the American Statistical Association, 96(453), 194-209.
#'
#' @export
fit_angmix <- function(model = "vmsin",
data,
ncomp,
cov.restrict = "NONE",
unimodal.component = FALSE,
start_par = NULL,
rand_start = rep(FALSE, n.chains),
method = "hmc",
perm_sampling = FALSE,
n.chains = 3,
chains_parallel = TRUE,
return_llik_contri = FALSE,
int.displ = 3,
epsilon = 0.1,
L = 10,
epsilon.random = TRUE,
L.random = FALSE,
burnin.prop = 0.5,
tune.prop = 1,
thin = 1,
propscale = 0.05,
n.iter = 500,
# gam.loc = 0.001,
# gam.scale = 1000,
pmix.alpha = NULL,
norm.var = 1000,
autotune = TRUE,
show.progress = TRUE,
accpt.prob.upper,
accpt.prob.lower,
epsilon.incr = 0.05,
L.incr = 0.075,
tune.incr = 0.05,
tune_ave_size = 100,
kappa_upper = 150,
kappa_lower = 1e-4,
return_tune_param = FALSE,
qrnd = NULL,
n_qrnd = NULL, ...) {
# if(is.null(dim(data)) | !(mode(data) %in% c("list", "numeric", "data.frame") && ncol(data) == 2)) stop("non-compatible data")
dots <- list(...)
backward_compatible <- FALSE
backward_compatible <- dots$backward_compatible
if (is.null(backward_compatible)) {
backward_compatible <- FALSE
}
stopifnot(
is.logical(backward_compatible)
)
# progress.backend <- "tcltk"
# progressor <- function(...) NULL
progress.backend <- "tcltk"
signif_ <- if (backward_compatible) function(x, ...) x else function(x, ...) signif(x, 8)
if (length(model) > 1) stop("\'model\' must be a scalar")
if (model %in% c("vmsin", "vmcos", "wnorm2")) {
type <- "bi"
} else if (model %in% c("vm", "wnorm")) {
type <- "uni"
} else {
stop("non-compatible model")
}
if (missing(ncomp)) {
stop("\'ncomp\' is missing, with no default.")
}
if (length(ncomp) > 1) {
stop("\'ncomp\' must be a scalar")
}
if (any(length(n.chains) > 1, length(n.chains) < 1, n.chains == 0)) {
stop("Invalid n.chains")
}
zero_cov <- FALSE
if (type == "bi") {
if (!(is.matrix(data) | is.data.frame(data))) {
stop("\'data\' must be a two column matrix for model = \'vmsin\', \'vmcos\' and \'wnorm2\'")
}
if (ncol(data) != 2) {
stop("\'data\' must be a two column matrix for model = \'vmsin\', \'vmcos\' and \'wnorm2\'")
}
if (length(cov.restrict) > 1 |
!cov.restrict %in% c("NONE", "POSITIVE", "NEGATIVE", "ZERO")) {
stop("cov.restrict must be one of \"NONE\", \"POSITIVE\", \"NEGATIVE\" and \"ZERO\"")
}
if (any(is.na(data))) {
stop(paste(
"\'data\' contains missing value(s). BAMBI cannot handle missing data.\n",
"Either remove them or properly impute them."
))
}
data.rad <- rm_NA_rad(data)
n.data <- nrow(data.rad)
npar_1_comp <- 5
par.names <- c("kappa1", "kappa2", "kappa3", "mu1", "mu2")
par_lower <- replicate(ncomp, c(kappa_lower, kappa_lower, -kappa_upper, 0, 0))
par_upper <- replicate(ncomp, c(
kappa_upper, kappa_upper,
kappa_upper, 2 * pi, 2 * pi
))
if (cov.restrict == "POSITIVE") {
par_lower[3, ] <- 0
} else if (cov.restrict == "NEGATIVE") {
par_upper[3, ] <- 0
} else if (cov.restrict == "ZERO") {
par_lower[3, ] <- par_upper[3, ] <- 0
zero_cov <- TRUE
}
if (missing(pmix.alpha)) {
pmix.alpha <- 5.5
}
} else {
if ((is.matrix(data) | is.data.frame(data))) {
data <- as.vector(as.matrix(data))
}
if (!is.numeric(data)) {
stop("\'data\' must be a vector for \'model\' = \'vm\' and \'wnorm\'")
}
data <- as.numeric(data)
data.rad <- rm_NA_rad(data)
n.data <- length(data.rad)
npar_1_comp <- 2
par.names <- c("kappa", "mu")
par_lower <- replicate(ncomp, c(kappa_lower, 0))
par_upper <- replicate(ncomp, c(kappa_upper, 2 * pi))
if (missing(pmix.alpha)) {
pmix.alpha <- 4
}
}
if (any(
length(perm_sampling) > 1, length(perm_sampling) < 1,
!is.logical(perm_sampling)
)) {
stop("Invalid perm_sampling")
}
if (any(
length(chains_parallel) > 1, length(chains_parallel) < 1,
!is.logical(chains_parallel)
)) {
stop("Invalid chains_parallel")
}
if (any(
length(chains_parallel) > 1, length(chains_parallel) < 1,
!is.logical(chains_parallel)
)) {
stop("Invalid chains_parallel")
}
if (any(
length(autotune) > 1, length(autotune) < 1,
!is.logical(autotune)
)) {
stop("Invalid autotune")
}
if (any(
length(unimodal.component) > 1, length(unimodal.component) < 1,
!is.logical(unimodal.component)
)) {
stop("Invalid unimodal.component")
}
if (any(
length(return_llik_contri) > 1, length(return_llik_contri) < 1,
!is.logical(return_llik_contri)
)) {
stop("Invalid return_llik_contri")
}
if (any(
length(return_tune_param) > 1, length(return_tune_param) < 1,
!is.logical(return_tune_param)
)) {
stop("Invalid return_tune_param")
}
if (length(pmix.alpha) == 1) {
pmix.alpha <- rep(pmix.alpha, ncomp)
} else if (length(pmix.alpha) != ncomp) {
stop("length(pmix.alpha) and ncomp differ")
}
if (n.chains < 1) {
stop("\'n.chains\' must be a positive integer")
}
if (n.chains == 1) {
chains_parallel <- FALSE
}
if (length(burnin.prop) != 1 || burnin.prop < 0 || burnin.prop >= 1) {
stop("\"burnin.prop\" must be a number in [0, 1)")
}
if (thin < 1) {
stop("\"thin\" must be a positive integer")
}
if (tune.incr <= 0 | tune.incr >= 1) {
stop("\'tune.incr\' must be between 0 and 1")
}
n.burnin <- ceiling(burnin.prop * n.iter)
if (length(tune.prop) != 1 || tune.prop < 0 || tune.prop > 1) {
stop("\"tune.prop\" must be in [0, 1]")
}
iter.tune <- ceiling(burnin.prop * tune.prop * n.iter)
if (iter.tune > n.burnin) {
iter.tune <- n.burnin
}
n.iter.final <- n.iter - n.burnin
burnin_iter <- seq_len(n.burnin)
thin <- round(thin)
thin_filter <- c(TRUE, rep(FALSE, thin - 1))
final_iter_set <- (seq_len(n.iter))[-burnin_iter][thin_filter]
# gam.rate <- 1/gam.scale
curr.model <- model
if (length(rand_start) == 1) {
rand_start <- rep(rand_start, n.chains)
}
if (method == "hmc") {
if (any(L < 1)) {
stop("\'L\' must be a positive integer")
}
L <- ceiling(L)
if (length(L) == 1) {
L <- rep(L, n.chains)
} else if (length(L) != n.chains) {
stop("\'L\' must be a vector of length \'n.chains\'")
} else {
rand_start <- rep(rand_start, n.chains)
}
if (missing(accpt.prob.upper)) {
accpt.prob.upper <- 0.9
}
if (missing(accpt.prob.lower)) {
accpt.prob.lower <- if (model %in% c("wnorm", "wnorm2")) 0.58 else 0.6
}
if (length(epsilon) == ncomp * npar_1_comp) {
tune_param <- matrix(c(epsilon), npar_1_comp, ncomp)
} else if (length(epsilon) == 1) {
tune_param <- matrix(epsilon, npar_1_comp, ncomp)
} else {
stop("epsilon must either be a scalar or a vector of length 2*ncomp (univariate) or 5*ncomp (bivariate)")
}
}
#
# if (method == "hmc" & sum(rand_start) == 0 & n.chains > 1 & chains_parallel) {
# if (length(L) == 1 | max(L) == min(L)) {
# warning(paste("same L accross multiple chains while running them in",
# "parllel with rand_start = FALSE will just make",
# "identical copies of the same chain. L is changed"))
# L <- seq(ceiling(L/2), ceiling(2*L), length.out = n.chains)
# }
# }
if (grepl(method, "rwmh")) # using rwmh
{
if (missing(accpt.prob.upper)) {
accpt.prob.upper <- 0.5
}
if (missing(accpt.prob.lower)) {
accpt.prob.lower <- 0.3
}
if (length(propscale) == ncomp * npar_1_comp) {
tune_param <- matrix(c(propscale), npar_1_comp, ncomp)
} else if (length(propscale) == 1) {
tune_param <- matrix(propscale, npar_1_comp, ncomp)
} else {
stop("propscale must either be a scalar or a vector of length 2*ncomp (univariate) or 5*ncomp (bivariate)")
}
}
nterms <- tune_ave_size
if (!model %in% c("wnorm", "wnorm2")) {
int.displ <- omega.2pi <- NULL
}
if (model != "vmcos") {
qrnd_grid <- NULL
n_qrnd <- NULL
}
# Now rename the model specific compiled llik and grad functions
if (model == "vmsin") {
if (unimodal.component) {
dep_cov <- TRUE
dep_cov_type <- "vmsin_unimodal"
} else {
dep_cov <- FALSE
dep_cov_type <- NULL
}
# # in log scale
# lpd_grad_model_indep_1comp <- function(data, par_vec_lscale,
# obs_group, n.clus) {
# par_vec <- c(exp(par_vec_lscale[1:2]), par_vec_lscale[3:5])
# lpd_grad <- matrix(NA, 6, 1)
# if (n.clus > 0) {
# lpd_grad <- (grad_llik_vmsin_C(data[obs_group, , drop=FALSE],
# par_vec) +
# c( # grad for lprior
# (gam.loc - 1)/par_vec[1:2] - gam.rate, -par_vec[3]/norm.var, 0, 0,
# # lprior
# sum((gam.loc - 1)*par_vec_lscale[1:2]-
# gam.rate*par_vec[1:2]) - 0.5*par_vec[3]^2/norm.var
# )) * c(par_vec[1:2], rep(1, 4))
# } else {
# lpd_grad <-
# c( # grad for lprior
# (gam.loc - 1)/par_vec[1:2] - gam.rate, -par_vec[3]/norm.var, 0, 0,
# # lprior
# sum((gam.loc - 1)*par_vec_lscale[1:2]-
# gam.rate*par_vec[1:2]) - 0.5*par_vec[3]^2/norm.var
# ) *
# c(par_vec[1:2], rep(1, 4))
# }
#
# list(lpr = (lpd_grad[6]), grad = lpd_grad[1:5])
# }
#
#
# lpd_model_indep_1comp <- function(data, par_vec_lscale, obs_group,
# n.clus) {
# par_vec <- c(exp(par_vec_lscale[1:2]), par_vec_lscale[3:5])
# if (n.clus > 0) {
# # llik + prior
# res <-
# llik_vmsin_one_comp(data[obs_group, , drop=FALSE], par_vec,
# log(const_vmsin(par_vec[1],
# par_vec[2], par_vec[3]))) +
# sum((gam.loc - 1)*par_vec_lscale[1:2]-
# gam.rate*par_vec[1:2]) - 0.5*par_vec[3]^2/norm.var
# } else{
# # only prior
# res <-
# sum((gam.loc - 1)*par_vec_lscale[1:2]-
# gam.rate*par_vec[1:2]) - 0.5*par_vec[3]^2/norm.var
# }
# res
# }
# in log scale
lpd_grad_model_indep_1comp <- function(data, par_vec_lscale,
obs_group, n.clus) {
par_vec <- c(exp(par_vec_lscale[1:2]), par_vec_lscale[3:5])
lpd_grad <- matrix(NA, 6, 1)
if (n.clus > 0) {
lpd_grad <- signif_(suppressWarnings(
grad_llik_vmsin_C(
data[obs_group, , drop = FALSE],
par_vec
)
) *
c(par_vec[1:2], rep(1, 4)) +
c( # grad for lprior
-par_vec_lscale[1:3] / norm.var, 0, 0,
# lprior
-0.5 * sum(par_vec_lscale[1:3]^2 / norm.var)
))
} else {
lpd_grad[] <-
signif_(c( # grad for lprior
-par_vec_lscale[1:3] / norm.var, 0, 0,
# lprior
-0.5 * sum(par_vec_lscale[1:3]^2 / norm.var)
))
}
list(lpr = (lpd_grad[6]), grad = lpd_grad[1:5])
}
lpd_model_indep_1comp <- function(data, par_vec_lscale, obs_group, n.clus) {
par_vec <- c(exp(par_vec_lscale[1:2]), par_vec_lscale[3:5])
if (n.clus > 0) {
# llik + prior
res <-
signif_(llik_vmsin_one_comp(
data[obs_group, , drop = FALSE], par_vec,
log(const_vmsin(
par_vec[1],
par_vec[2], par_vec[3]
))
) +
0.5 * sum(-par_vec_lscale[1:3]^2 / norm.var))
} else {
# only prior
res <-
signif_(0.5 * sum(-par_vec_lscale[1:3]^2 / norm.var))
}
res
}
llik_model_contri <- function(data, par_mat, pi_mix) {
signif_(
llik_vmsin_contri_C(
data, par_mat, pi_mix,
signif_(log_const_vmsin_all(par_mat))
)
)
}
mem_p_model <- function(data, par_mat, pi_mix) {
signif_(
mem_p_sin(
data, par_mat, pi_mix,
signif_(log_const_vmsin_all(par_mat)), 1
)
)
}
} else if (model == "vmcos") {
ell <- list(qrnd_grid = qrnd, n_qrnd = n_qrnd)
if (!is.null(ell$qrnd)) {
qrnd_grid <- ell$qrnd
dim_qrnd <- dim(qrnd_grid)
if (!is.matrix(qrnd_grid) | is.null(dim_qrnd) |
dim_qrnd[2] != 2) {
stop("qrnd_grid must be a two column matrix")
}
n_qrnd <- dim_qrnd[1]
} else if (!is.null(ell$n_qrnd)) {
n_qrnd <- round(ell$n_qrnd)
if (n_qrnd < 1) {
stop("n_qrnd must be a positive integer")
}
qrnd_grid <- sobol(n_qrnd, 2, FALSE)
} else {
n_qrnd <- 1e4
qrnd_grid <- sobol(n_qrnd, 2, FALSE)
}
if (unimodal.component) {
dep_cov <- TRUE
dep_cov_type <- "vmcos_unimodal"
} else {
dep_cov <- FALSE
dep_cov_type <- NULL
}
# # in log scale
# lpd_grad_model_indep_1comp <- function(data, par_vec_lscale, obs_group, n.clus) {
# par_vec <- c(exp(par_vec_lscale[1:2]), par_vec_lscale[3:5])
# lpd_grad <- matrix(NA, 6, 1)
# if (n.clus > 0) {
# lpd_grad[] <- (grad_llik_vmcos_C(data[obs_group, , drop=FALSE],
# par_vec[], qrnd_grid) +
# c( # grad for lprior
# (gam.loc - 1)/par_vec[1:2] - gam.rate, -par_vec[3]/norm.var, 0, 0,
# # lprior
# sum((gam.loc - 1)*par_vec_lscale[1:2]-
# gam.rate*par_vec[1:2]) - 0.5*par_vec[3]^2/norm.var)
# ) *
# c(par_vec[1:2], rep(1, 4))
# } else {
# lpd_grad[] <-
# c( # grad for lprior
# (gam.loc - 1)/par_vec[1:2] - gam.rate, -par_vec[3]/norm.var, 0, 0,
# # lprior
# sum((gam.loc - 1)*par_vec_lscale[1:2]-
# gam.rate*par_vec[1:2]) - 0.5*par_vec[3]^2/norm.var
# ) *
# c(par_vec[1:2], rep(1, 4))
# }
# list(lpr = (lpd_grad[6]), grad = lpd_grad[1:5])
# }
#
# lpd_model_indep_1comp <- function(data, par_vec_lscale, obs_group, n.clus) {
#
# par_vec <- c(exp(par_vec_lscale[1:2]), par_vec_lscale[3:5])
# if (n.clus > 0) {
# # llik + prior
# res <-
# llik_vmcos_one_comp(data[obs_group, , drop=FALSE], par_vec[],
# log(const_vmcos(par_vec[1],
# par_vec[2],
# par_vec[3],
# qrnd_grid))) +
# sum((gam.loc - 1)*log(par_vec[1:2])-
# gam.rate*par_vec[1:2]) - 0.5*par_vec[3]^2/norm.var
# } else{
# # only prior
# res <-
# sum((gam.loc - 1)*log(par_vec[1:2])-
# gam.rate*par_vec[1:2]) - 0.5*par_vec[3]^2/norm.var
# }
# res
# }
# in log scale
lpd_grad_model_indep_1comp <- function(data, par_vec_lscale, obs_group, n.clus) {
par_vec <- c(exp(par_vec_lscale[1:2]), par_vec_lscale[3:5])
lpd_grad <- matrix(NA, 6, 1)
if (n.clus > 0) {
lpd_grad[] <- signif_(suppressWarnings(
grad_llik_vmcos_C(
data[obs_group, , drop = FALSE],
par_vec[], qrnd_grid
)
) *
c(par_vec[1:2], rep(1, 4)) +
c( # grad for lprior
-par_vec_lscale[1:3] / norm.var, 0, 0,
# lprior
-0.5 * sum(par_vec_lscale[1:3]^2 / norm.var)
))
} else {
lpd_grad[] <-
signif_(c( # grad for lprior
-par_vec_lscale[1:3] / norm.var, 0, 0,
# lprior
-0.5 * sum(par_vec_lscale[1:3]^2 / norm.var)
))
}
list(lpr = (lpd_grad[6]), grad = lpd_grad[1:5])
}
lpd_model_indep_1comp <- function(data, par_vec_lscale, obs_group, n.clus) {
par_vec <- c(exp(par_vec_lscale[1:2]), par_vec_lscale[3:5])
if (n.clus > 0) {
# llik + prior
res <-
signif_(llik_vmcos_one_comp(
data[obs_group, , drop = FALSE], par_vec[],
log(const_vmcos(
par_vec[1],
par_vec[2],
par_vec[3],
qrnd_grid
))
) -
0.5 * sum(par_vec_lscale[1:3]^2 / norm.var))
} else {
# only prior
res <-
signif_(-0.5 * sum(par_vec_lscale[1:3]^2 / norm.var))
}
res
}
llik_model_contri <- function(data, par_mat, pi_mix) {
signif_(
llik_vmcos_contri_C(
data, par_mat, pi_mix,
signif_(log_const_vmcos_all(par_mat, qrnd_grid))
)
)
}
mem_p_model <- function(data, par_mat, pi_mix) {
signif_(
mem_p_cos(
data, par_mat, pi_mix,
signif_(log_const_vmcos_all(par_mat, qrnd_grid))
)
)
}
} else if (model == "wnorm2") {
dep_cov <- TRUE
dep_cov_type <- "wnorm2_bound"
if (int.displ >= 5) {
int.displ <- 5
} else if (int.displ <= 1) int.displ <- 1
int.displ <- floor(int.displ)
omega.2pi.all <- expand.grid(-int.displ:int.displ, -int.displ:int.displ) * (2 * pi) # 2pi * integer displacements
omega.2pi <- as.matrix(omega.2pi.all)
# lpd_grad_model_indep <- function(data, par_mat, obs_group, n.clus) {
# lpd_grad <- matrix(NA, 6, ncomp)
# for(j in 1:ncomp) {
# if (n.clus[j] > 0) {
# lpd_grad[, j] <- grad_llik_wnorm2_C(data[obs_group[[j]], , drop=FALSE],
# par_mat[, j], omega.2pi) +
# c( # grad for lprior
# (gam.loc - 1)/par_mat[1:2, j] - gam.rate, -par_mat[3, j]/norm.var, 0, 0,
# # lprior
# sum((gam.loc - 1)*log(par_mat[1:2, j])-
# gam.rate*par_mat[1:2, j]) - 0.5*par_mat[3, j]^2/norm.var
# )
# } else {
# lpd_grad[, j] <-
# c( # grad for lprior
# (gam.loc - 1)/par_mat[1:2, j] - gam.rate, -par_mat[3, j]/norm.var, 0, 0,
# # lprior
# sum((gam.loc - 1)*log(par_mat[1:2, j])-
# gam.rate*par_mat[1:2, j]) - 0.5*par_mat[3, j]^2/norm.var
# )
# }
# }
# list(lpr = sum(lpd_grad[6, ]), grad = lpd_grad[1:5, ])
# }
# lpd_model_indep <- function(data, par_mat, obs_group, n.clus) {
# res <- 0
# for(j in 1:ncomp) {
# if (n.clus[j] > 0) {
# # llik + prior
# res <- res +
# llik_wnorm2_one_comp(data[obs_group[[j]], , drop=FALSE], par_mat[, j],
# l_const_wnorm2(par_mat[, j]),
# omega.2pi) +
# sum((gam.loc - 1)*log(par_mat[1:2, j])-
# gam.rate*par_mat[1:2, j]) - 0.5*par_mat[3, j]^2/norm.var
# } else{
# # only prior
# res <- res +
# sum((gam.loc - 1)*log(par_mat[1:2, j])-
# gam.rate*par_mat[1:2, j]) - 0.5*par_mat[3, j]^2/norm.var
# }
# }
# unname(res)
# }
# # in log scale
# lpd_grad_model_indep_1comp <- function(data, par_vec_lscale, obs_group, n.clus) {
# par_vec <- c(exp(par_vec_lscale[1:2]), par_vec_lscale[3:5])
# lpd_grad <- matrix(NA, 6, 1)
# if (n.clus > 0) {
# lpd_grad[] <- (grad_llik_wnorm2_C(data[obs_group, , drop=FALSE],
# par_vec[], omega.2pi) +
# c( # grad for lprior
# (gam.loc - 1)/par_vec[1:2] - gam.rate, -par_vec[3]/norm.var, 0, 0,
# # lprior
# sum((gam.loc - 1)*par_vec_lscale[1:2]-
# gam.rate*par_vec[1:2]) - 0.5*par_vec[3]^2/norm.var
# )) *
# c(par_vec[1:2], rep(1, 4))
# } else {
# lpd_grad[] <-
# c( # grad for lprior
# (gam.loc - 1)/par_vec[1:2] - gam.rate, -par_vec[3]/norm.var, 0, 0,
# # lprior
# sum((gam.loc - 1)*par_vec_lscale[1:2]-
# gam.rate*par_vec[1:2]) - 0.5*par_vec[3]^2/norm.var
# ) *
# c(par_vec[1:2], rep(1, 4))
# }
# list(lpr = sum(lpd_grad[6]), grad = lpd_grad[1:5])
# }
#
# lpd_model_indep_1comp <- function(data, par_vec_lscale, obs_group, n.clus) {
# par_vec <- c(exp(par_vec_lscale[1:2]), par_vec_lscale[3:5])
# if (n.clus > 0) {
# # llik + prior
# res <-
# llik_wnorm2_one_comp(data[obs_group, , drop=FALSE], par_vec[],
# l_const_wnorm2(par_vec[]),
# omega.2pi) +
# sum((gam.loc - 1)*par_vec_lscale[1:2]-
# gam.rate*par_vec[1:2]) - 0.5*par_vec[3]^2/norm.var
# } else{
# # only prior
# res <-
# sum((gam.loc - 1)*par_vec_lscale[1:2]-
# gam.rate*par_vec[1:2]) - 0.5*par_vec[3]^2/norm.var
# }
# unname(res)
# }
# in log scale
lpd_grad_model_indep_1comp <- function(data, par_vec_lscale, obs_group, n.clus) {
par_vec <- signif_(c(exp(par_vec_lscale[1:2]), par_vec_lscale[3:5]))
lpd_grad <- matrix(NA, 6, 1)
if (n.clus > 0) {
lpd_grad[] <- signif_(
grad_llik_wnorm2_C(
data[obs_group, , drop = FALSE],
par_vec[], omega.2pi
) *
c(par_vec[1:2], rep(1, 4)) +
c( # grad for lprior
-par_vec_lscale[1:3] / norm.var, 0, 0,
# lprior
-0.5 * sum(par_vec_lscale[1:3]^2 / norm.var)
)
)
} else {
lpd_grad[] <-
signif_(c( # grad for lprior
-par_vec_lscale[1:3] / norm.var, 0, 0,
# lprior
-0.5 * sum(par_vec_lscale[1:3]^2 / norm.var)
))
}
list(lpr = (lpd_grad[6]), grad = lpd_grad[1:5])
}
lpd_model_indep_1comp <- function(data, par_vec_lscale, obs_group, n.clus) {
par_vec <- signif_(c(exp(par_vec_lscale[1:2]), par_vec_lscale[3:5]))
if (n.clus > 0) {
# llik + prior
res <-
signif_(llik_wnorm2_one_comp(
data[obs_group, , drop = FALSE], par_vec[],
l_const_wnorm2(par_vec[]),
omega.2pi
) -
0.5 * sum(par_vec_lscale[1:3]^2 / norm.var))
} else {
# only prior
res <-
signif_(-0.5 * sum(par_vec_lscale[1:3]^2 / norm.var))
}
res
}
llik_model_contri <- function(data, par_mat, pi_mix) {
signif_(
llik_wnorm2_contri_C(
data, par_mat, pi_mix,
signif_(log_const_wnorm2_all(par_mat)),
omega.2pi
)
)
}
mem_p_model <- function(data, par_mat, pi_mix) {
signif_(
mem_p_wnorm2(
data, par_mat, pi_mix,
signif_(log_const_wnorm2_all(par_mat)),
omega.2pi
)
)
}
} else if (model == "vm") {
dep_cov <- FALSE
# lpd_grad_model_indep <- function(data, par_mat, obs_group, n.clus) {
# lpd_grad <- matrix(NA, 3, ncomp)
# for(j in 1:ncomp) {
# if (n.clus[j] > 0) {
# lpd_grad[, j] <- grad_llik_univm_C(data[obs_group[[j]]],
# par_mat[, j]) +
# c( # grad for lprior
# (gam.loc - 1)/par_mat[1, j] - gam.rate, 0,
# # lprior
# (gam.loc - 1)*log(par_mat[1, j])-
# gam.rate*par_mat[1, j]
# )
#
# } else {
# lpd_grad[, j] <-
# c( # grad for lprior
# (gam.loc - 1)/par_mat[1, j] - gam.rate, 0,
# # lprior
# (gam.loc - 1)*log(par_mat[1, j])-
# gam.rate*par_mat[1, j]
# )
# }
# }
# list(lpr = sum(lpd_grad[3, ]), grad = lpd_grad[1:2, ])
# }
#
# lpd_model_indep <- function(data, par_mat, obs_group, n.clus) {
# res <- 0
# for(j in 1:ncomp) {
# if (n.clus[j] > 0) {
# # llik + prior
# res <- res +
# llik_univm_one_comp(data[obs_group[[j]]], par_mat[, j],
# log(const_univm(par_mat[1, j]))) +
# (gam.loc - 1)*log(par_mat[1, j]) - gam.rate*par_mat[1, j]
#
# } else{
# # only prior
# res <- res +
# (gam.loc - 1)*log(par_mat[1, j]) - gam.rate*par_mat[1, j]
# }
# }
# unname(res)
# }
# lpd_grad_model_indep_1comp <- function(data, par_vec, obs_group, n.clus) {
# lpd_grad <- matrix(NA, 3, 1)
# if (n.clus > 0) {
# lpd_grad[] <- grad_llik_univm_C(data[obs_group],
# par_vec[]) +
# c( # grad for lprior
# (gam.loc - 1)/par_vec[1] - gam.rate, 0,
# # lprior
# (gam.loc - 1)*log(par_vec[1])-
# gam.rate*par_vec[1]
# )
#
# } else {
# lpd_grad[] <-
# c( # grad for lprior
# (gam.loc - 1)/par_vec[1] - gam.rate, 0,
# # lprior
# (gam.loc - 1)*log(par_vec[1])-
# gam.rate*par_vec[1]
# )
# }
# list(lpr = (lpd_grad[3 ]), grad = lpd_grad[1:2 ])
# }
#
# in log scale
lpd_grad_model_indep_1comp <- function(data, par_vec_lscale, obs_group, n.clus) {
lpd_grad <- matrix(NA, 3, 1)
par_vec <- c(exp(par_vec_lscale[1]), par_vec_lscale[2])
if (n.clus > 0) {
lpd_grad[] <- signif_(suppressWarnings(
grad_llik_univm_C(
data[obs_group],
par_vec[]
)
) * c(par_vec[1], 1, 1) +
c( # grad for lprior
-par_vec_lscale[1] / norm.var, 0,
# lprior
-0.5 * sum(par_vec_lscale[1]^2 / norm.var)
))
} else {
lpd_grad[] <-
signif_(c( # grad for lprior
-par_vec_lscale[1] / norm.var, 0,
# lprior
-0.5 * sum(par_vec_lscale[1]^2 / norm.var)
))
}
list(lpr = (lpd_grad[3]), grad = lpd_grad[1:2])
}
lpd_model_indep_1comp <- function(data, par_vec_lscale, obs_group, n.clus) {
par_vec <- c(exp(par_vec_lscale[1]), par_vec_lscale[2])
if (n.clus > 0) {
# llik + prior
res <-
signif_(
llik_univm_one_comp(
data[obs_group], par_vec[],
log(const_univm(par_vec[1]))
) -
0.5 * sum(par_vec_lscale[1]^2 / norm.var)
)
} else {
# only prior
res <-
signif_(-0.5 * sum(par_vec_lscale[1]^2 / norm.var))
}
unname(res)
}
llik_model_contri <- function(data, par_mat, pi_mix) {
signif_(
llik_univm_contri_C(
data, par_mat, pi_mix,
signif_(log_const_univm_all(par_mat))
)
)
}
mem_p_model <- function(data, par_mat, pi_mix) {
signif_(
mem_p_univm(
data, par_mat, pi_mix,
signif_(log_const_univm_all(par_mat))
)
)
}
}
# else if (model == "wnorm")
else {
dep_cov <- FALSE
if (int.displ >= 5) {
int.displ <- 5
} else if (int.displ <= 1) int.displ <- 1
int.displ <- floor(int.displ)
omega.2pi <- (-int.displ):int.displ * (2 * pi) # 2pi * 1d integer displacements
#
# lpd_grad_model_indep <- function(data, par_mat, obs_group, n.clus) {
# lpd_grad <- matrix(NA, 3, ncomp)
# for(j in 1:ncomp) {
# if (n.clus[j] > 0) {
# lpd_grad[, j] <- grad_llik_uniwnorm_C(data[obs_group[[j]]],
# par_mat[, j], omega.2pi) +
# c( # grad for lprior
# (gam.loc - 1)/par_mat[1, j] - gam.rate, 0,
# # lprior
# (gam.loc - 1)*log(par_mat[1, j])-
# gam.rate*par_mat[1, j]
# )
#
# } else {
# lpd_grad[, j] <-
# c( # grad for lprior
# (gam.loc - 1)/par_mat[1, j] - gam.rate, 0,
# # lprior
# (gam.loc - 1)*log(par_mat[1, j])-
# gam.rate*par_mat[1, j]
# )
# }
# }
# list(lpr = sum(lpd_grad[3, ]), grad = lpd_grad[1:2, ])
# }
#
# lpd_model_indep <- function(data, par_mat, obs_group, n.clus) {
# res <- 0
# for(j in 1:ncomp) {
# if (n.clus[j] > 0) {
# # llik + prior
# res <- res +
# llik_uniwnorm_one_comp(data[obs_group[[j]]], par_mat[, j],
# l_const_uniwnorm(par_mat[1, j]),
# omega.2pi) +
# (gam.loc - 1)*log(par_mat[1, j]) - gam.rate*par_mat[1, j]
#
# } else{
# # only prior
# res <- res +
# (gam.loc - 1)*log(par_mat[1, j]) - gam.rate*par_mat[1, j]
# }
# }
# unname(res)
# }
lpd_grad_model_indep_1comp <- function(data, par_vec_lscale, obs_group, n.clus) {
lpd_grad <- matrix(NA, 3, 1)
par_vec <- c(exp(par_vec_lscale[1]), par_vec_lscale[2])
if (n.clus > 0) {
lpd_grad[] <- signif_(grad_llik_uniwnorm_C(
data[obs_group],
par_vec[], omega.2pi
) * c(par_vec[1], 1, 1) +
c( # grad for lprior
-par_vec_lscale[1] / norm.var, 0,
# lprior
-0.5 * sum(par_vec_lscale[1]^2 / norm.var)
))
} else {
lpd_grad[] <-
signif_(c( # grad for lprior
-par_vec_lscale[1] / norm.var, 0,
# lprior
-0.5 * sum(par_vec_lscale[1]^2 / norm.var)
))
}
list(lpr = (lpd_grad[3]), grad = lpd_grad[1:2])
}
lpd_model_indep_1comp <- function(data, par_vec_lscale, obs_group, n.clus) {
par_vec <- c(exp(par_vec_lscale[1]), par_vec_lscale[2])
if (n.clus > 0) {
# llik + prior
res <-
signif_(llik_uniwnorm_one_comp(
data[obs_group], par_vec[],
l_const_uniwnorm(par_vec[1]),
omega.2pi
) -
0.5 * sum(par_vec_lscale[1]^2 / norm.var))
} else {
# only prior
res <-
signif_(-0.5 * sum(par_vec_lscale[1]^2 / norm.var))
}
unname(res)
}
llik_model_contri <- function(data, par_mat, pi_mix) {
signif_(
llik_uniwnorm_contri_C(
data, par_mat, pi_mix,
signif_(log_const_uniwnorm_all(par_mat)),
omega.2pi
)
)
}
mem_p_model <- function(data, par_mat, pi_mix) {
signif_(
mem_p_uniwnorm(
data, par_mat, pi_mix,
signif_(log_const_uniwnorm_all(par_mat)),
omega.2pi
)
)
}
# grad_llik_uniwnorm_R <- function(data, par) {
# gr <- numDeriv::grad(function(par) llik_uniwnorm_one_comp(data, par,
# l_const_uniwnorm(par[1]),
# omega.2pi),
# par)
# gr[3] <- llik_uniwnorm_one_comp(data, par,
# l_const_uniwnorm(par[1]), omega.2pi)
# gr
# }
# llik_uniwnorm_full(data.rad, par.mat, pi.mix, log_c = log_const_uniwnorm_all(par.mat), omega.2pi)
#
#
# grad_llik_uniwnorm_C(data.rad, par.mat[, 1], omega.2pi)
# grad_llik_uniwnorm_R(data.rad, par.mat[, 1])
}
if (!is.null(start_par)) {
if (!is.list(start_par)) {
stop("start_par must be a list")
}
if (!is.list(start_par[[1]])) {
if (length(setdiff(c(par.names, "pmix"), names(start_par))) > 0) {
stop("start_par does not have all parameters")
}
start_par <- lapply(1:n.chains, function(ii) start_par)
} else {
if (length(start_par) != n.chains) {
stop("length(start_par) must be either 1 or equal to n.chains")
}
for (jj in 1:n.chains) {
if (length(setdiff(c(par.names, "pmix"), names(start_par[[jj]]))) > 0) {
stop(paste0("start_par[[", jj, "]] does not have all parameters"))
}
}
}
}
if (any(is.null(start_par), !is.list(start_par[[1]])) &
all(!rand_start)) {
starting_1 <- process_startpar(
start_par,
data.rad,
ncomp,
model,
FALSE
)
starting <- lapply(1:n.chains, function(j) starting_1)
} else {
starting <- lapply(
1:n.chains,
function(j) {
process_startpar(
start_par[[j]],
data.rad,
ncomp,
model,
rand_start[j]
)
}
)
}
run_MC <- function(starting, L, chain_no) {
# just to change the RNG state across chains, so that
# no two c chains turn out to be identical
change_rng_state <- runif(chain_no)
if (method == "hmc") # using hmc
{
L_vec <- rep(L, n.iter)
if (L.random) {
L.orig <- L
}
}
starting$par.mat[abs(starting$par.mat) >= kappa_upper / 2] <- kappa_upper / 2
starting$par.mat[abs(starting$par.mat) <= 2 * kappa_lower] <- 2 * kappa_lower
par.mat.all <- array(0, dim = c(npar_1_comp, ncomp, n.iter))
par.mat_lscale.all <- par.mat.all
pi.mix.all <- matrix(1, nrow = ncomp, ncol = n.iter)
llik.all <- lprior.all <- lpd.all <- rep(-Inf, (n.iter))
accpt.par.mat.all <- matrix(NA, ncomp, n.iter)
modelpar.names <- par.names
clus_ind.all <- matrix(1, nrow = n.data, ncol = n.iter)
if (return_llik_contri) {
llik_contri_all <- matrix(1, nrow = n.data, ncol = n.iter)
} else {
llik_contri_all <- NULL
}
mem_prob_all <- array(1, dim = c(n.data, ncomp, n.iter))
epsilon_ave <- NULL
L_ave <- NULL
propscale_final <- NULL
pi.mix <- starting$pi.mix
par.mat <- as.matrix(starting$par.mat)
if (zero_cov) {
par.mat[3, ] <- 0
} else if (cov.restrict == "POSITIVE") {
par.mat[3, ] <- pmax(par.mat[3, ], 0)
} else if (cov.restrict == "NEGATIVE") {
par.mat[3, ] <- pmin(par.mat[3, ], 0)
}
if (type == "bi") {
par.mat_lscale <- find_lscale_mat(par.mat)
par_lower_lscale <- find_lscale_mat(par_lower)
par_upper_lscale <- find_lscale_mat(par_upper)
} else {
par.mat_lscale <- find_lscale_mat_uni(par.mat)
par_lower_lscale <- find_lscale_mat_uni(par_lower)
par_upper_lscale <- find_lscale_mat_uni(par_upper)
}
# browser()
if (ncomp == 1) {
perm_sampling <- FALSE
clus.ind <- clus.ind_curr <- rep(1, n.data)
obs_group <- list(1:n.data)
n.clus <- n.data
post.wt <- matrix(1, n.data, 1)
pi.mix <- 1
}
tcltk_fail <- FALSE
if (show.progress & !exists("pb") & progress.backend == "tcltk") {
pb <- tryCatch(tcltk::tkProgressBar(
title = paste("Chain", chain_no),
label = "Initializing...",
min = 1, max = n.iter
),
error = function(e) e
)
if (is(pb, "error") | is(pb, "warning")) {
show.progress <- FALSE
tcltk_fail <- TRUE
}
} else if (show.progress & !exists("pb") & progress.backend == "txt") {
pb <- utils::txtProgressBar(title = paste("Chain", chain_no),
label = "Initializing...",
min = 1, max = n.iter, style = 3)
tcltk_fail <- FALSE
}
if (tcltk_fail) {
msg <- paste(
"{tcltk} could not be loaded; 'show.progress' was set to FALSE."#,
# "Consider setting progress.backend = \"txt\" for a text-based",
# "progress bar. See ?fit_angmix for more details."
)
message(msg)
show.progress <- FALSE
}
par.mat.all.order <- par.mat.all
ntunes_up <- ntunes_down <- rep(0, ncomp)
# tune_iter_no <- c()
# ave_accpt_all <- c()
tune_param_all <- matrix(NA, length(tune_param), n.iter)
tune_status <- matrix(0, ncomp, n.iter)
# Run the Markov chain
for (iter in seq_len(n.iter)) {
# browser()
if (method == "hmc") {
if (epsilon.random) {
tune_param_final <- tune_param * runif(1, 1 - epsilon.incr, 1 + epsilon.incr)
} else {
tune_param_final <- tune_param
}
if (L.random) {
L_vec[iter] <- L <- sample(ceiling(L.orig / exp(L.incr)):ceiling(L.orig * exp(L.incr)), 1)
}
}
#----------------------------------------------------------------------------------
# generating mixture proportions if ncomp > 1
#----------------------------------------------------------------------------------
if (ncomp > 1) {
post.wt <- mem_p_model(data.rad, par.mat, pi.mix)
clus.ind <- cID(post.wt, ncomp, runif(n.data))
# clus.ind <- apply(post.wt, 1, function(x) which.max(rmultinom(n = 1, size = 1, prob = x)))
# n.clus <- tabulate(clus.ind_curr, nbins = ncomp)
obs_group <- lapply(1:ncomp, function(j) which(clus.ind == j))
n.clus <- listLen(obs_group)
pi.mix <-
as.numeric(rdirichlet(1, (pmix.alpha + n.clus))) # new mixture proportions
}
#----------------------------------------------------------------------------------
# generating par.mat
#----------------------------------------------------------------------------------
if (type == "bi") {
if (method == "hmc") {
# hmc_curr <-
# bounded_hmc_biv(lpr_grad =
# function(par_mat)
# lpd_grad_model_indep(data.rad, par_mat,
# obs_group, n.clus),
# init = par.mat,
# lower = par_lower,
# upper = par_upper,
# dep_cov = dep_cov,
# dep_cov_type = dep_cov_type,
# zero_cov = zero_cov,
# nsteps = L,
# step = tune_param_final)
#
# par.mat <- hmc_curr$final
# accpt.par.mat.all[iter] <- hmc_curr$acc
# par_mat = par.mat
# lpd_grad_model_indep(data.rad, par_mat,
# obs_group, n.clus)
#
#
# lpd_grad_model_indep_1comp(data.rad, par_mat[, 1],
# obs_group[[1]], n.clus[1])
# lpd_grad_model_indep_1comp(data.rad, par_mat[, 2],
# obs_group[[2]], n.clus[2])
#
#
# lpd_model_indep(data.rad, par_mat,
# obs_group, n.clus)
#
# lpd_model_indep_1comp(data.rad, par_mat[, 1],
# obs_group[[1]], n.clus[1])
#
# browser()
for (j in 1:ncomp) {
# browser()
hmc_curr <-
bounded_hmc_biv(
lpr_grad =
function(par_vec_lscale) {
lpd_grad_model_indep_1comp(
data.rad, par_vec_lscale,
obs_group[[j]], n.clus[j]
)
},
init = par.mat_lscale[, j, drop = FALSE],
lower = par_lower_lscale[, j, drop = FALSE],
upper = par_upper_lscale[, j, drop = FALSE],
dep_cov = dep_cov,
dep_cov_type = dep_cov_type,
zero_cov = zero_cov,
nsteps = L,
step = tune_param_final[, j, drop = FALSE]
)
par.mat[, j] <- signif_(find_expscale(hmc_curr$final))
par.mat_lscale[, j] <- signif_(hmc_curr$final)
accpt.par.mat.all[j, iter] <- hmc_curr$acc
}
} else {
# rwmh_curr <- bounded_rwmh_biv(lpr =
# function(par_mat)
# lpd_model_indep(data.rad, par_mat,
# obs_group, n.clus),
# init = par.mat,
# lower = par_lower,
# upper = par_upper,
# dep_cov = dep_cov,
# dep_cov_type = dep_cov_type,
# zero_cov = zero_cov,
# step = tune_param)
#
# par.mat <- rwmh_curr$final
# accpt.par.mat.all[iter] <- rwmh_curr$accpt
for (j in 1:ncomp) {
rwmh_curr <- bounded_rwmh_biv(
lpr =
function(par_vec_lscale) {
lpd_model_indep_1comp(
data.rad, par_vec_lscale,
obs_group[[j]], n.clus[j]
)
},
init = par.mat_lscale[, j, drop = FALSE],
lower = par_lower_lscale[, j, drop = FALSE],
upper = par_upper_lscale[, j, drop = FALSE],
dep_cov = dep_cov,
dep_cov_type = dep_cov_type,
zero_cov = zero_cov,
step = tune_param[, j]
)
par.mat[, j] <- signif_(find_expscale(rwmh_curr$final))
par.mat_lscale[, j] <- signif_(rwmh_curr$final)
accpt.par.mat.all[j, iter] <- rwmh_curr$accpt
}
}
lprior.all[iter] <-
-0.5 * sum(par.mat_lscale[1:3, ]^2 / norm.var) +
sum(pmix.alpha * log(pi.mix))
}
# if type == "uni"
else {
if (method == "hmc") {
# hmc_curr <-
# bounded_hmc_uni(lpr_grad =
# function(par_mat)
# lpd_grad_model_indep(data.rad, par_mat,
# obs_group, n.clus),
# init = par.mat,
# lower = par_lower,
# upper = par_upper,
# nsteps = L,
# step = tune_param_final)
#
# par.mat <- hmc_curr$final
# accpt.par.mat.all[iter] <- hmc_curr$acc
for (j in 1:ncomp) {
hmc_curr <-
bounded_hmc_uni(
lpr_grad =
function(par_vec_lscale) {
lpd_grad_model_indep_1comp(
data.rad, par_vec_lscale,
obs_group[[j]], n.clus[j]
)
},
init = par.mat_lscale[, j, drop = FALSE],
lower = par_lower_lscale[, j, drop = FALSE],
upper = par_upper_lscale[, j, drop = FALSE],
nsteps = L,
step = tune_param_final[, j, drop = FALSE]
)
par.mat[, j] <- signif_(find_expscale_uni(hmc_curr$final))
par.mat_lscale[, j] <- signif_(hmc_curr$final)
accpt.par.mat.all[j, iter] <- hmc_curr$acc
}
} else {
# rwmh_curr <- bounded_rwmh_uni(lpr =
# function(par_mat)
# lpd_model_indep(data.rad, par_mat,
# obs_group, n.clus),
# init = par.mat,
# lower = par_lower,
# upper = par_upper,
# step = tune_param)
#
# par.mat <- rwmh_curr$final
# accpt.par.mat.all[iter] <- rwmh_curr$accpt
for (j in 1:ncomp) {
rwmh_curr <- bounded_rwmh_uni(
lpr =
function(par_vec_lscale) {
lpd_model_indep_1comp(
data.rad, par_vec_lscale,
obs_group[[j]], n.clus[j]
)
},
init = par.mat_lscale[, j, drop = FALSE],
lower = par_lower_lscale[, j, drop = FALSE],
upper = par_upper_lscale[, j, drop = FALSE],
step = tune_param[, j]
)
par.mat[, j] <- signif_(find_expscale_uni(rwmh_curr$final))
par.mat_lscale[, j] <- signif_(rwmh_curr$final)
accpt.par.mat.all[j, iter] <- rwmh_curr$accpt
}
}
lprior.all[iter] <-
-0.5 * sum(par.mat_lscale[1, ]^2 / norm.var) +
sum(pmix.alpha * log(pi.mix))
}
# do permutation sampling only after tuning
if (perm_sampling & iter > n.burnin) {
rand_perm <- sample(1:ncomp)
clus.ind <- rand_perm[clus.ind] # random label switch
post.wt <- post.wt[, rand_perm, drop = FALSE]
par.mat <- par.mat[, rand_perm, drop = FALSE]
par.mat_lscale <- par.mat_lscale[, rand_perm, drop = FALSE]
pi.mix <- pi.mix[rand_perm, drop = FALSE]
par.mat.all.order[, , iter] <- par.mat[, order(rand_perm), drop = FALSE]
# needed for tuning
# if (method == "hmc")
tune_param <- tune_param[, rand_perm, drop = FALSE]
}
# browser()
llik_contri <- llik_model_contri(data.rad, par.mat, pi.mix)
if (return_llik_contri) {
llik_contri_all[, iter] <- llik_contri
}
llik.all[iter] <- sum(llik_contri)
lpd.all[iter] <- llik.all[iter] + lprior.all[iter]
par.mat.all[, , iter] <- par.mat
par.mat_lscale.all[, , iter] <- par.mat_lscale
pi.mix.all[, iter] <- pi.mix
mem_prob_all[, , iter] <- post.wt
clus_ind.all[, iter] <- clus.ind
tune_param_all[, iter] <- c(tune_param)
# tuning tune_param during burnin
if (autotune & iter >= nterms & iter %% 10 == 0 &
iter <= iter.tune) {
ave_accpt_all <- rep(0, ncomp)
for (j in 1:ncomp) {
ave_accpt_all[j] <-
ave_accpt <- sum(accpt.par.mat.all[j, (iter - nterms + 1):iter]) / nterms
if (ave_accpt > accpt.prob.upper) {
tune_param[, j] <- tune_param[, j] * (1 + tune.incr)
ntunes_up[j] <- ntunes_up[j] + 1
tune_status[j, iter] <- 1
} else if (ave_accpt < accpt.prob.lower) {
tune_param[, j] <- tune_param[, j] * (1 - tune.incr)
ntunes_down[j] <- ntunes_down[j] + 1
tune_status[j, iter] <- -1
}
if (iter %% nterms == 0) {
par.sd <- apply(par.mat_lscale.all[, j, (iter - nterms + 1):iter], 1, sd)
mean_par.sd <- sum(par.sd) / (npar_1_comp)
mean_tune_param_j <- sum(tune_param[, j]) / (npar_1_comp)
if (mean_par.sd > 0) {
tune_param[, j] <- 0.5 * par.sd / mean_par.sd * mean_tune_param_j + 0.5 * tune_param[, j]
}
# else {
# par.sd <- apply(par.mat.all.order[, , (iter-nterms+1):iter], c(1:2), sd)
# par.sd <- par.sd[, rand_perm]
# mean_par.sd <- sum(par.sd)/(npar_1_comp*ncomp)
# mean_tune_param <- sum(tune_param)/(npar_1_comp*ncomp)
# if(mean_par.sd > 0)
# tune_param <- 0.5*par.sd/mean_par.sd*mean_tune_param + 0.5*tune_param
#
# cat(ave_accpt, tune_param[1,],lpd.all[iter], "\n")
# }
}
}
# if(iter %% nterms == 0)
# cat(paste("(", paste0(ave_accpt_all, collapse = ","), ")"),
# tune_param[1,], lpd.all[iter], "\n")
}
# if (method == "hmc" & autotune & iter >= nterms
# & iter %% 5 == 0 & iter <= n.burnin) {
# acr <- cor(lpd.all[(iter-nterms+2):iter],
# lpd.all[(iter-nterms+1):(iter-1)])
# if (acr < acr.lower) {
# L <- ceiling(L*(1 - tune.incr))
# } else if (acr > acr.upper) {
# L <- ceiling(L*(1 + tune.incr))
# }
# }
if (show.progress) {
message <- paste0(
"Progress: ", round(iter / n.iter * 100), "% ",
ifelse(iter <= n.burnin,
"(Burn-in)",
"(Sampling)"
)
)
if (progress.backend == "tcltk") {
tcltk::setTkProgressBar(pb, iter, label = message)
} else if (progress.backend == "txt") {
utils::setTxtProgressBar(pb, value = iter, label = message)
}
}
}
if (grepl(method, "hmc")) {
epsilon_ave <- epsilon <- mean(tune_param_all)
if (L.random) {
L_ave <- sum(L_vec) / n.iter
} else {
L_ave <- L
}
}
if (grepl(method, "rwmh")) {
propscale_final <- propscale <- rowSums(tune_param_all) / n.iter
}
allpar_val <- array(1, dim = c(npar_1_comp + 1, ncomp, n.iter))
allpar_val[1, , ] <- pi.mix.all
allpar_val[-1, , ] <- par.mat.all
rm(pi.mix.all, par.mat.all)
allpar_name <- c("pmix", modelpar.names)
dimnames(allpar_val)[[1]] <- c("pmix", modelpar.names)
if (!return_tune_param) {
rm(tune_param_all)
tune_param_all <- NULL
}
result <- list(
"par.value" = allpar_val, # [, , final_iter_set],
"par.name" = allpar_name,
"llik.contri" = llik_contri_all, # [, final_iter_set],
"llik" = llik.all, # [final_iter_set],
"lpd" = lpd.all, # [final_iter_set],
"lprior" = lprior.all, # [final_iter_set],
"accpt.modelpar" = accpt.par.mat.all, # [final_iter_set],
"clus.ind" = clus_ind.all, # [, final_iter_set],
"mem.prob" = mem_prob_all, # [, , final_iter_set],
"epsilon" = epsilon_ave,
"L" = L_ave,
"propscale" = propscale_final,
"tune_param" = tune_param_all,
"par.upper" = par_upper,
"par.lower" = par_lower,
"tcltk_fail" = tcltk_fail
)
# if (show.progress & progress.backend == "tcltk")
if (show.progress) close(pb)
result
}
# seed <- floor(runif(1, 1, 100))
# browser()
# generate three chains in parallel, if possible
if (chains_parallel) {
res_list <- future_lapply(1:n.chains,
function(ii) {
run_MC(
starting[[ii]],
L[ii], ii
)
},
future.seed = TRUE
)
} else {
res_list <- lapply(
1:n.chains,
function(ii) run_MC(starting[[ii]], L[ii], ii)
)
}
# if (res_list[[1]]$tcltk_fail) {
# msg <- paste(
# "tcltk could not be loaded; 'show.progress' was set to FALSE.",
# "Consider setting progress.backend = \"txt\" for a text-based",
# "progress bar. See ?fit_angmix for more details."
# )
# warning()
# }
# combine the results from the lists
allpar_val <- array(0, dim = c(npar_1_comp + 1, ncomp, n.iter, n.chains))
llik_all <- lprior_all <- lpd_all <- matrix(0, n.iter, n.chains)
accpt.modelpar_all <- array(0, dim = c(ncomp, n.iter, n.chains))
clus.ind_all <- array(0, dim = c(n.data, n.iter, n.chains))
if (return_llik_contri) {
llik_contri_all <- array(0, dim = c(n.data, n.iter, n.chains))
} else {
llik_contri_all <- NULL
}
mem_prob_all <- array(1, dim = c(n.data, ncomp, n.iter, n.chains))
if (return_tune_param) {
tune_param_all <- array(NA, dim = c(length(tune_param), n.iter, n.chains))
} else {
tune_param_all <- NULL
}
for (j in 1:n.chains) {
allpar_val[, , , j] <- res_list[[j]]$par.value
llik_all[, j] <- res_list[[j]]$llik
lprior_all[, j] <- res_list[[j]]$lprior
lpd_all[, j] <- res_list[[j]]$lpd
accpt.modelpar_all[, , j] <- res_list[[j]]$accpt.modelpar
clus.ind_all[, , j] <- res_list[[j]]$clus.ind
if (return_llik_contri) {
llik_contri_all[, , j] <- res_list[[j]]$llik.contri
}
mem_prob_all[, , , j] <- res_list[[j]]$mem.prob
if (return_tune_param) {
tune_param_all[, , j] <- res_list[[j]]$tune_param
}
}
if (method == "hmc") {
epsilon_final <- do.call(cbind, lapply(res_list, function(x) x$epsilon))
L_final <- unlist(lapply(res_list, function(x) x$L))
propscale_final <- NULL
} else {
propscale_final <- do.call(cbind, lapply(res_list, function(x) x$propscale))
epsilon_final <- NULL
L_final <- NULL
}
out <- list(
"par.value" = allpar_val,
"clus.ind" = clus.ind_all,
"par.name" = res_list[[1]]$par.name,
"modelpar.lower" = res_list[[1]]$par.lower,
"modelpar.upper" = res_list[[1]]$par.upper,
"return_llik_contri" = return_llik_contri,
"llik.contri" = llik_contri_all,
"mem.prob" = mem_prob_all,
"llik" = llik_all,
"lpd" = lpd_all,
"lprior" = lprior_all,
"accpt.modelpar" = accpt.modelpar_all,
"model" = curr.model,
"method" = method,
"perm_sampling" = perm_sampling,
"epsilon.random" = epsilon.random,
"epsilon" = epsilon_final,
"L.random" = L.random,
"L" = L_final,
"iter.tune" = iter.tune,
"propscale" = propscale_final,
"tune_param" = tune_param_all,
"return_tune_param" = return_tune_param,
"type" = type,
"data" = data.rad,
"cov.restrict" = cov.restrict,
"pmix.alpha" = pmix.alpha,
"norm.var" = norm.var,
"n.data" = n.data,
"ncomp" = ncomp,
"n.chains" = n.chains,
"n.iter" = n.iter,
"n.burnin" = n.burnin,
"thin" = thin,
"n.iter.final" = n.iter.final,
"final_iter" = final_iter_set,
"int.displ" = int.displ,
"qrnd_grid" = qrnd_grid,
"omega.2pi" = omega.2pi
)
class(out) <- "angmcmc"
out
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/fit_mixmodel.R |
process_startpar <- function(start_par, data,
ncomp, model,
rand_start) {
if (is.null(start_par) & !rand_start) {
starting <-
do.call(paste0("start_clus_kmeans_", model),
list(data.full=data, comp=ncomp))
}
else if (is.null(start_par) & rand_start) {
starting <-
do.call(paste0("start_clus_rand_", model),
list(data.full=data, comp=ncomp,nstart=5))
}
else {
if (model %in% c("vmsin", "vmcos", "wnorm2")) {
allpar <- start_par
if (any(is.null(allpar$kappa1), is.null(allpar$kappa2), is.null(allpar$kappa3),
is.null(allpar$mu1), is.null(allpar$mu2)) ) {
stop("too few elements in start_par, with no default")
}
allpar1 <- list(allpar$kappa1, allpar$kappa2, allpar$kappa3, allpar$mu1, allpar$mu2)
allpar_len <- listLen(allpar1)
if (min(allpar_len) != max(allpar_len)){
stop("component size mismatch: number of components of in the starting parameter vectors differ")
}
starting <- list(par.mat = rbind(start_par$kappa1, start_par$kappa2,
start_par$kappa3,
start_par$mu1,
start_par$mu2))
}
else if (model %in% c("vm", "wnorm")) {
allpar <- start_par
if (any(is.null(allpar$kappa), is.null(allpar$mu)) ) {
stop("too few elements in start_par, with no default")
}
allpar1 <- list(allpar$kappa, allpar$mu)
allpar_len <- listLen(allpar1)
if (min(allpar_len) != max(allpar_len)){
stop("component size mismatch: number of components of in the starting parameter vectors differ")
}
starting <- list(par.mat = rbind(start_par$kappa,
start_par$mu))
}
if (ncomp == 1) {
starting$pi.mix <- 1
}
else {
starting$pi.mix <- start_par$pmix
}
}
starting
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/process_startpar.R |
# based on uniroot.all function from package rootSolve
# the package is now archived
uniroot.all <- function(
f,
interval,
lower = min(interval),
upper = max(interval),
tol = .Machine$double.eps^0.2,
maxiter = 1000,
trace = 0,
n = 100,
...
) {
xseq <- seq(lower, upper, len = n + 1)
f_xseq <- f(xseq, ...)
out <- xseq[which(f_xseq == 0)]
f_sign <- f_xseq[1:n] * f_xseq[2:(n + 1)]
i_range <- which(f_sign < 0)
for (i in i_range) {
out <- c(
out,
uniroot(f, lower = xseq[i],
upper = xseq[i + 1],
maxiter = maxiter,
tol = tol,
trace = trace,
...)$root
)
}
out
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/uniroot_all.R |
#' Maximum likelihood estimation of bivariate von Mises parameters
#' @inheritParams fit_angmix
#' @param model Bivariate von Mises model. One of "vmsin", "vmcos" or "indep".
#' @param ... Additional arguments. See details.
#' @details The parameters \code{kappa1} and \code{kappa2} are optimized
#' in log scales. The method of optimization used (passed to \link{optim})
#' can be specified through \code{method} in \code{...}
#' (defaults to \code{"L-BFGS-B"}). Note, however, that
#' lower (0) and upper (2*pi) bounds for \code{mu1} and \code{mu2}
#' are specified; so not all methods implemented in \link{optim} will work.
#' @return An object of class \link{mle-class}.
#' @examples
#' pars <- list(kappa1 = 3, kappa2 = 2, kappa3 = 1.5, mu1 = 0.5, mu2 = 1.5)
#' nsamp <- 2000
#' model <- "vmsin"
#' set.seed(100)
#' dat_gen <- do.call(paste0("r", model), c(list(n = nsamp), pars))
#'
#' est <- vm2_mle(dat_gen, model = model)
#' library(stats4)
#' coef(est)
#' vcov(est)
#' @export
vm2_mle <- function(data, model = c("vmsin", "vmcos"), ...) {
model <- model[1]
dots <- list(...)
data <- data.matrix(data)
call <- match.call()
if (is.null(dots$method)) {
dots$method <- "L-BFGS-B"
}
method <- dots$method
if (model == "vmsin") {
# in log scale
lpd_grad_model_indep_1comp <- function(par_vec_lscale) {
par_vec <- c(exp(par_vec_lscale[1:2]), par_vec_lscale[3:5])
lpd_grad <- matrix(NA, 6, 1)
lpd_grad <- signif(
suppressWarnings(grad_llik_vmsin_C(data, par_vec))*
c(par_vec[1:2], rep(1, 4)),
8
)
list(lpr = (lpd_grad[6]), grad = lpd_grad[1:5])
}
start_par_gen <- start_par_vmsin
hessian_fn <- function(par_vec) {
numDeriv::hessian(
func = function(par_vec) {
-grad_llik_vmsin_C(data, par_vec)[6]
},
x = par_vec
)
}
}
else if (model == "vmcos") {
ell <- dots[c("qrnd_grid", "n_qrnd")] #list(qrnd_grid = qrnd, n_qrnd = n_qrnd)
if (!is.null(ell$qrnd)) {
qrnd_grid <- ell$qrnd
dim_qrnd <- dim(qrnd_grid)
if (!is.matrix(qrnd_grid) | is.null(dim_qrnd) |
dim_qrnd[2] != 2)
stop("qrnd_grid must be a two column matrix")
n_qrnd <- dim_qrnd[1]
} else if (!is.null(ell$n_qrnd)){
n_qrnd <- round(ell$n_qrnd)
if (n_qrnd < 1)
stop("n_qrnd must be a positive integer")
qrnd_grid <- sobol(n_qrnd, 2, FALSE)
} else {
n_qrnd <- 1e4
qrnd_grid <- sobol(n_qrnd, 2, FALSE)
}
# in log scale
lpd_grad_model_indep_1comp <- function(par_vec_lscale) {
par_vec <- c(exp(par_vec_lscale[1:2]), par_vec_lscale[3:5])
lpd_grad <- matrix(NA, 6, 1)
lpd_grad[] <- signif(
suppressWarnings(grad_llik_vmcos_C(data, par_vec, qrnd_grid)) *
c(par_vec[1:2], rep(1, 4)),
8
)
list(lpr = (lpd_grad[6]), grad = lpd_grad[1:5])
}
start_par_gen <- start_par_vmcos
hessian_fn <- function(par_vec) {
numDeriv::hessian(
func = function(par_vec) {
-grad_llik_vmcos_C(data, par_vec, qrnd_grid)[6]
},
x = par_vec
)
}
}
else if (model == "indep") {
# in log scale
lpd_grad_model_indep_1comp <- function(par_vec_lscale) {
par_vec <- c(exp(par_vec_lscale[1:2]), 0, par_vec_lscale[4:5])
lpd_grad_parts <- lapply(
1:2,
function(j) {
signif(
suppressWarnings(grad_llik_univm_C(data[, j], par_vec[c(j, 3+j)]))*
c(par_vec[j], 1, 1),
8
)
}
)
lpr <- sum(sapply(lpd_grad_parts, "[", 3))
grad <- rep(0, 5)
for (j in 1:2) {
grad[c(j, 3+j)] <- lpd_grad_parts[[j]][1:2]
}
list(lpr = lpr, grad = grad)
}
start_par_gen <- function(dat) {
pars_by_dim <- lapply(1:2, function(j) start_par_vm(dat[, j]))
pars <- numeric(5)
for (j in 1:2) {
pars[c(j, 3+j)] <- pars_by_dim[[j]][1:2]
}
pars
}
hessian_fn <- function(par_vec) {
numDeriv::hessian(
func = function(par_vec) {
-sum(
sapply(
1:2,
function(j) grad_llik_univm_C(data[, j], par_vec[c(j, 3+j)])[3]
)
)
},
x = par_vec
)
}
}
start <- start_par_gen(data)
names(start) <- c("log_kappa1", "log_kappa2", "kappa3", "mu1", "mu2")
start_lscale <- start
start_lscale[c("log_kappa1", "log_kappa2")] <-
log(start[c("log_kappa1", "log_kappa2")])
opt <- optim(
par = start_lscale,
fn = function(par_lscale) {
-lpd_grad_model_indep_1comp(par_lscale)$lpr
},
gr = function(par_lscale) {
-lpd_grad_model_indep_1comp(par_lscale)$grad
},
lower = c(rep(-Inf, 3), 0, 0),
upper = c(rep(Inf, 3), 2*pi, 2*pi),
method = method
# hessian = TRUE
)
est_par <- opt$par
names(est_par)[1:2] <- c("kappa1", "kappa2")
est_par[c("kappa1", "kappa2")] <- exp(est_par[c("kappa1", "kappa2")])
hess <- hessian_fn(par_vec = est_par)
dimnames(hess) <- list(names(est_par), names(est_par))
if (model == "indep") {
vcov <- matrix(0, 5, 5)
dimnames(vcov) <- dimnames(hess)
vcov[-3, -3] <- solve(hess[-3, -3])
} else {
vcov <- solve(hess)
}
res <- methods::new(
"mle",
call = call,
coef = est_par,
fullcoef = unlist(est_par),
vcov = vcov,
min = opt$value,
details = opt,
minuslogl = function(kappa1, kappa2, kappa3, mu1, mu2) {
par_lscale <- c(log(kappa1), log(kappa2), kappa3, mu1, mu2)
-lpd_grad_model_indep_1comp(par_lscale)$lpr
},
nobs = nrow(data),
method = method
)
res
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/vm2_mle.R |
#' @keywords internal
Ainv <- function(x) {
if(x < 0.53) (2*x + x^3 + 5/6*x^5)
else if(x >= 0.86 && x < 0.95) ((9 - 8*x + 3*x^2) / (8 * (1 - x)))
else ((1.28 - 0.53*x^2) * tan(x*pi/2))
}
#' @keywords internal
A_bessel <- function(x){
besselI(x, 1, TRUE)/besselI(x, 0, TRUE)
}
#' @keywords internal
start_par_vm <- function(data.sub) {
x1 <- data.sub
Sbar <- mean(sin(x1))
Cbar <- mean(cos(x1))
muhat <- atan(Sbar/Cbar) + pi * (Cbar < 0)
Rbar <- sqrt(Sbar^2 + Cbar^2)
k <- Ainv(Rbar)
c(k, prncp_reg(muhat))
} #starting parameters from a dataset
#' @keywords internal
start_clus_kmeans_vm <- function(data.full, comp = 2, nstart = 10){
data.full.cart <- t(sapply(data.full, function(x) c(cos(x), sin(x))))
data.kmean <- kmeans(data.full.cart, centers = comp, nstart = nstart)
ids <- data.kmean$cluster
clust.ind <- lapply(1:comp, function(i) which(ids == i))
par <- sapply(1:length(clust.ind), function(m) start_par_vm(data.full[clust.ind[[m]]]))
pi.mix <- listLen(clust.ind)/length(unlist(clust.ind))
order.conc <- order(order(apply(par, 2, function(x) sum_sq(x[1:3]))))
list("par.mat" = par[,order.conc], "pi.mix" = pi.mix[order.conc], "clust.ind" = clust.ind[order.conc], "id" = ids)
} #kmeans, then start_par for each cluster
#' @keywords internal
start_clus_rand_vm <- function(data.full, comp = 2, nstart = 10) {
rand.pi <- runif(comp, 1/(2*comp), 2/comp)
rand.pi <- rand.pi/sum(rand.pi)
rand.multinom <- t(rmultinom(length(data.full), 1, rand.pi))
ids <- apply(rand.multinom, 1, which.max)
clust.ind <- lapply(1:comp, function(i) which(ids == i))
par <- sapply(1:length(clust.ind), function(m) start_par_vm(data.full[clust.ind[[m]]]))
pi.mix <- listLen(clust.ind)/length(unlist(clust.ind))
order.conc <- order(order(apply(par, 2, function(x) sum_sq(x[1:3]))))
list("par.mat" = par[,order.conc], "pi.mix" = pi.mix[order.conc], "clust.ind" = clust.ind[order.conc], "id" = ids)
} #random groups, then start_par for each cluster
#' @keywords internal
start_par_vmsin <- function(data.sub) {
x1 <- data.sub[,1]; y1 <- data.sub[,2]
Sbarphi <- mean(sin(x1))
Cbarphi <- mean(cos(x1))
phibar <- atan(Sbarphi/Cbarphi) + pi * (Cbarphi < 0)
Rbarphi <- sqrt(Sbarphi^2 + Cbarphi^2)
k1 <- Ainv(Rbarphi)
Sbarpsi <- mean(sin(y1))
Cbarpsi <- mean(cos(y1))
psibar <- atan(Sbarpsi/Cbarpsi) + pi * (Cbarpsi < 0)
Rbarpsi <- sqrt(Sbarpsi^2 + Cbarpsi^2)
k2 <- Ainv(Rbarpsi)
sindiffphi <- sin(outer(x1, x1, "-"))
sindiffpsi <- sin(outer(y1, y1, "-"))
rho <- sum(sindiffphi*sindiffpsi)/sum(sindiffphi^2)/sum(sindiffpsi^2)
c(k1, k2, rho*sqrt(k1*k2), prncp_reg(phibar), prncp_reg(psibar))
} #starting parameters from a dataset
#' @keywords internal
start_clus_kmeans_vmsin <- function(data.full, comp = 2, nstart = 10){
data.full.cart <- t(apply(data.full, 1, sph2cart))
data.kmean <- kmeans(data.full.cart, centers = comp, nstart = nstart)
ids <- data.kmean$cluster
clust.ind <- lapply(1:comp, function(i) which(ids == i))
par <- sapply(1:length(clust.ind), function(m) start_par_vmsin(data.full[clust.ind[[m]],]))
pi.mix <- listLen(clust.ind)/length(unlist(clust.ind))
order.conc <- order(order(apply(par, 2, function(x) sum_sq(x[1:3]))))
list("par.mat" = par[,order.conc], "pi.mix" = pi.mix[order.conc], "clust.ind" = clust.ind[order.conc], "id" = ids)
} #kmeans, then start_par for each cluster
#' @keywords internal
start_clus_rand_vmsin <- function(data.full, comp = 2, nstart = 10) {
rand.pi <- runif(comp, 1/(2*comp), 2/comp)
rand.pi <- rand.pi/sum(rand.pi)
rand.multinom <- t(rmultinom(nrow(data.full), 1, rand.pi))
ids <- apply(rand.multinom, 1, which.max)
clust.ind <- lapply(1:comp, function(i) which(ids == i))
par <- sapply(1:length(clust.ind), function(m) start_par_vmsin(data.full[clust.ind[[m]],]))
pi.mix <- listLen(clust.ind)/length(unlist(clust.ind))
order.conc <- order(order(apply(par, 2, function(x) sum_sq(x[1:3]))))
list("par.mat" = par[,order.conc], "pi.mix" = pi.mix[order.conc], "clust.ind" = clust.ind[order.conc], "id" = ids)
} #random groups, then start_par for each cluster
#' @keywords internal
start_par_vmcos <- function(data.sub) {
x1 <- data.sub[,1];
y1 <- data.sub[,2]
Sbarphi <- mean(sin(x1))
Cbarphi <- mean(cos(x1))
phibar <- atan(Sbarphi/Cbarphi) + pi * (Cbarphi < 0)
Rbarphi <- sqrt(Sbarphi^2 + Cbarphi^2)
k1 <- Ainv(Rbarphi)
#k1 <- min(15, Ainv(Rbarphi))
Sbarpsi <- mean(sin(y1))
Cbarpsi <- mean(cos(y1))
psibar <- atan(Sbarpsi/Cbarpsi) + pi * (Cbarpsi < 0)
Rbarpsi <- sqrt(Sbarpsi^2 + Cbarpsi^2)
k2 <- Ainv(Rbarpsi)
#k2 <- min(15, Ainv(Rbarpsi))
mu <- prncp_reg(phibar)
nu <- prncp_reg(psibar)
Sbarphi_psi <- mean(sin(x1-y1))
Cbarphi_psi <- mean(cos(x1-y1))
phi_psibar <- atan(Sbarphi_psi/Cbarphi_psi) + pi * (Cbarphi_psi < 0)
Rbarphi_psi <- sqrt(Sbarphi_psi^2 + Cbarphi_psi^2)
k3.unsgn <- Ainv(Rbarphi_psi)
sindiffphi <- sin(outer(x1, x1, "-"))
sindiffpsi <- sin(outer(y1, y1, "-"))
rho <- sum(sindiffphi*sindiffpsi)/sum(sindiffphi^2)/sum(sindiffpsi^2)
# Sbarphi_psi <- mean(sin(x1-y1+mu-nu))
# Cbarphi_psi <- mean(cos(x1-y1+mu-nu))
# Rbarphi_psi <- sqrt(Sbarphi_psi^2 + Cbarphi_psi^2)
k3 <- sign(rho)*k3.unsgn
c(k1, k2, k3, mu, nu)
} #starting parameters from a dataset
#' @keywords internal
start_clus_kmeans_vmcos <- function(data.full, comp = 2, nstart = 10){
data.full.cart <- t(apply(data.full, 1, sph2cart))
data.kmean <- kmeans(data.full.cart, centers = comp, nstart = nstart)
ids <- data.kmean$cluster
clust.ind <- lapply(1:comp, function(i) which(ids == i))
par <- sapply(1:length(clust.ind), function(m) start_par_vmcos(data.full[clust.ind[[m]],]))
pi.mix <- listLen(clust.ind)/length(unlist(clust.ind))
order.conc <- order(order(apply(par, 2, function(x) sum_sq(x[1:3]))))
list("par.mat" = par[,order.conc], "pi.mix" = pi.mix[order.conc], "clust.ind" = clust.ind[order.conc], "id" = ids)
} #kmeans, then start_par for each cluster
#' @keywords internal
start_clus_rand_vmcos <- function(data.full, comp = 2, nstart = 10){
rand.pi <- runif(comp, 1/(2*comp), 2/comp)
rand.pi <- rand.pi/sum(rand.pi)
rand.multinom <- t(rmultinom(nrow(data.full), 1, rand.pi))
ids <- apply(rand.multinom, 1, which.max)
clust.ind <- lapply(1:comp, function(i) which(ids == i))
par <- sapply(1:length(clust.ind), function(m) start_par_vmcos(data.full[clust.ind[[m]],]))
pi.mix <- listLen(clust.ind)/length(unlist(clust.ind))
order.conc <- order(order(apply(par, 2, sum_sq)))
list("par.mat" = par[,order.conc], "pi.mix" = pi.mix[order.conc], "clust.ind" = clust.ind[order.conc], "id" = ids)
} #random groups, then start_par for each cluster
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/vmstart.R |
start_par_wnorm <- function(data.sub) {
x1 <- prncp_reg(data.sub)
Sbar <- mean(sin(x1))
Cbar <- mean(cos(x1))
muhat <- prncp_reg(atan(Sbar/Cbar))
Rbar <- sqrt(Sbar^2 + Cbar^2)
c(1/(1-Rbar), muhat)
} #starting parameters from a dataset
start_clus_kmeans_wnorm <- function(data.full, comp = 2, nstart = 5){
data.full.cart <- t(sapply(data.full, function(x) c(cos(x), sin(x))))
data.kmean <- kmeans(data.full.cart, centers = comp, nstart = nstart)
ids <- data.kmean$cluster
clust.ind <- lapply(1:comp, function(i) which(ids == i))
par <- sapply(1:length(clust.ind), function(m) start_par_wnorm(data.full[clust.ind[[m]]]))
pi.mix <- listLen(clust.ind)/length(unlist(clust.ind))
order.conc <- order(colSums(par^2))
list("par.mat" = par[,order.conc], "pi.mix" = pi.mix[order.conc], "clust.ind" = clust.ind[order.conc], "id" = ids)
} #kmeans, then start_par for each cluster
start_clus_rand_wnorm <- function(data.full, comp = 2){
rand.pi <- runif(comp, 1/(2*comp), 2/comp)
rand.pi <- rand.pi/sum(rand.pi)
rand.multinom <- t(rmultinom(length(data.full), 1, rand.pi))
ids <- apply(rand.multinom, 1, which.max)
clust.ind <- lapply(1:comp, function(i) which(ids == i))
par <- sapply(1:length(clust.ind), function(m) start_par_wnorm(data.full[clust.ind[[m]]]))
pi.mix <- listLen(clust.ind)/length(unlist(clust.ind))
order.conc <- order(colSums(par^2))
list("par.mat" = par[,order.conc], "pi.mix" = pi.mix[order.conc], "clust.ind" = clust.ind[order.conc], "id" = ids)
} #random groups, then start_par for each cluster
start_par_wnorm2 <- function(data.sub) {
data.sub.0.2pi <- prncp_reg(data.sub)
mu <- prncp_reg(colMeans(data.sub.0.2pi))
sigma.inv.vec <- solve(cov(data.sub.0.2pi))[c(1,4,2)]
c(sigma.inv.vec, mu)
} #starting parameters from a dataset
start_clus_kmeans_wnorm2 <- function(data.full, comp = 2, nstart = 5){
data.full.cart <- t(apply(data.full, 1, sph2cart))
data.kmean <- kmeans(data.full.cart, centers = comp, nstart = nstart)
ids <- data.kmean$cluster
clust.ind <- lapply(1:comp, function(i) which(ids == i))
par <- sapply(1:length(clust.ind), function(m) start_par_wnorm2(data.full[clust.ind[[m]],]))
pi.mix <- listLen(clust.ind)/length(unlist(clust.ind))
order.conc <- order(colSums(par^2))
list("par.mat" = par[,order.conc], "pi.mix" = pi.mix[order.conc], "clust.ind" = clust.ind[order.conc], "id" = ids)
} #kmeans, then start_par for each cluster
start_clus_rand_wnorm2 <- function(data.full, comp = 2, nstart = 5){
rand.pi <- runif(comp, 1/(2*comp), 2/comp)
rand.pi <- rand.pi/sum(rand.pi)
rand.multinom <- t(rmultinom(nrow(data.full), 1, rand.pi))
ids <- apply(rand.multinom, 1, which.max)
clust.ind <- lapply(1:comp, function(i) which(ids == i))
par <- sapply(1:length(clust.ind), function(m) start_par_wnorm2(data.full[clust.ind[[m]],]))
pi.mix <- listLen(clust.ind)/length(unlist(clust.ind))
order.conc <- order(colSums(par^2))
list("par.mat" = par[,order.conc], "pi.mix" = pi.mix[order.conc], "clust.ind" = clust.ind[order.conc], "id" = ids)
} #random groups, then start_par for each cluster
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/wnormstart.R |
#' Wrap angles into \code{[-pi, pi]} or \code{[0, 2*pi]}
#' @param x numeric vector or matrix or data.frame.
#' @details
#' \code{minuspi_to_pi} wraps \code{x} into \code{[-pi, pi]},
#' while \code{zero_to_pi} wraps \code{x} into \code{[0, 2*pi]}.
#'
#' @examples
#' dat <- matrix(runif(100, -pi, pi), ncol=2)
#' dat1 <- zero_to_2pi(dat)
#' dat2 <- minuspi_to_pi(dat1)
#' all.equal(dat, dat2)
#' @export
zero_to_2pi <- function(x)
{
x %% (2*pi)
}
#' @rdname zero_to_2pi
#' @export
minuspi_to_pi <- function(x)
{
prncp_reg.minuspi.pi(x)
}
| /scratch/gouwar.j/cran-all/cranData/BAMBI/R/wrap_angles.R |
##' @title BAMMtools datasets
##'
##' @description Example datasets and sample \code{BAMM} output for the
##' package \code{BAMMtools}.
##'
##' @name BAMMtools-data
##' @aliases whales primates fishes mcmc.whales mcmc.primates events.whales
##' events.primates events.fishes mass.primates traits.fishes
##'
##' @docType data
##'
##' @details This includes both the raw data and the \code{BAMM} output for
##' three example analyses. The first is an analysis of speciation and
##' extinction rates during the radiation of modern whales, using a
##' time-calibrated tree from Steeman et al. (2009). The second is a
##' \code{BAMM} analysis of phenotypic evolutionary rates (body mass)
##' during the radiation of extant primates, taken from Vos and Mooers
##' (2006) and Redding et al. (2010).The third is a \code{BAMM} analysis
##' of speciation and extinction rates for a 300-species subset of
##' ray-finned fishes, along with body size data for these species from
##' Rabosky et al. (2013).
##'
##' Dataset \code{whales} is the raw time-calibrated tree that was
##' analyzed with \code{BAMM}, \code{primates} is the corresponding
##' time-calibrated phylogeny of 233 primate species, and \code{fishes}
##' is the time-calibrated phylogeny of 300 fish species. Log-transformed
##' body masses for primates are in dataset \code{mass.primates}, and fish
##' body sizes are in dataset \code{traits.fishes}.
##'
##' The MCMC output files (\code{mcmc.whales} and \code{mcmc.primates})
##' are dataframes containing the raw MCMC output as generated by
##' \code{BAMM}. Column headers in the dataframes includes the sampling
##' generation, the current number of shifts in the simulation
##' (\code{N_shifts}), the log-prior density of the parameters
##' (\code{logPrior}), the log-likelihood of the data (\code{logLik}), the
##' current parameter of the Poisson process governing the number of
##' regime shifts (\code{eventRate}), and the MCMC acceptance rate
##' (\code{acceptRate}). This is the file that would typically be analyzed
##' as a first step towards assessing MCMC convergence (e.g., analyzing
##' effective sample sizes of \code{logLik} and \code{N_shifts}).
##'
##' The "core" \code{BAMM} output is included in the \emph{event data}
##' files (\code{events.whales}, \code{events.primates} and
##' \code{events.fishes}). These are all the parameters sampled with MCMC
##' that are relevant to reconstructing the nature and location of
##' evolutionary rate dynamics across a phylogeny. Please refer to
##' \code{BAMM} documentation for a detailed overview of this output, but
##' a brief description is as follows:
##'
##' \code{generation}: The index value of the state in the MCMC simulation
##' (the "generation").
##'
##' \code{leftchild, rightchild}: This defines a unique topological
##' location where a rate shift was sampled. Specifically, for given
##' right-left pair, the shift is sampled on the branch leading to the
##' node from which \code{rightchild} and \code{leftchild} are descended
##' (these two taxa are part of the spanning set of taxa for the node). If
##' \code{leftchild} is "NA", this simply means that the shift was sampled
##' on a terminal branch.
##'
##' \code{abstime}: The absolute occurrence time of the shift, assuming
##' that the time of the root node is 0.0.
##'
##' \code{lambdainit, lambdashift}: For speciation extinction model, the
##' initial speciation rate and rate change parameter for the process.
##'
##' \code{muinit}: For speciation extinction model, the extinction rate
##' (time-invariant).
##'
##' \code{betainit, betashift}: For phenotypic evolutionary model, the
##' initial (\code{betainit}) rate of phenotypic evolution and the rate
##' change parameter (\code{betashift}).
##'
##' @source
##' Vos R.A., A.O. Mooers. 2006. A new dated supertree of the Primates.
##' Chapter 5. In Inferring large phylogenies: the big tree problem (R
##' Vos, Ph.D. thesis) Simon Fraser University.
##'
##' Redding D.W., C. DeWolff, A.O. Mooers. 2010. Evolutionary
##' distinctiveness, threat status and ecological oddity in primates.
##' Conservation Biology 24: 1052-1058. DOI:
##' 10.1111/j.1523-1739.2010.01532.x
##'
##' Steeman, M.E., M.B. Hebsgaard, R.E. Fordyce, S.W.Y. Ho, D.L. Rabosky,
##' R. Nielsen, C. Rahbek, H. Glenner, M.V. Sorensen, E. Willerslev. 2009.
##' Radiation of Extant Cetaceans Driven by Restructuring of the Oceans.
##' Systematic Biology. 58: 573-585. DOI: 10.1093/sysbio/syp060
##'
##' Rabosky, D. L., F. Santini, J.T. Eastman, S. . Smith, B.L. Sidlauskas,
##' J. Chang, and M.E. Alfaro. 2013. Rates of speciation and morphological
##' evolution are correlated across the largest vertebrate radiation.
##' Nature Communications DOI: 10.1038/ncomms2958.
##' @keywords datasets
##'
NULL
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/BAMMtools-data.R |
##' @title BAMMtools
##' @description An R package for the analysis and visualization of complex
##' macroevolutionary dynamics. Functions in \code{BAMMtools} are oriented
##' entirely around analysis of results obtained using the \code{BAMM}
##' software (\url{http://bamm-project.org/}).
##'
##' @author Dan Rabosky, Mike Grundler, Pascal Title, Jonathan Mitchell,
##' Carlos Anderson, Jeff Shi, Joseph Brown, Huateng Huang
##'
##' @references \url{http://bamm-project.org/}
##'
##' Rabosky, D., M. Grundler, C. Anderson, P. Title, J. Shi, J. Brown,
##' H. Huang and J. Larson. 2014. BAMMtools: an R package for the
##' analysis of evolutionary dynamics on phylogenetic trees. Methods in
##' Ecology and Evolution 5: 701-707.
##'
##' Rabosky, D. L. 2014. Automatic detection of key innovations, rate
##' shifts, and diversity-dependence on phylogenetic trees. PLoS ONE 9:
##' e89543.
##'
##' Shi, J. J., and D. L. Rabosky. 2015. Speciation dynamics during the
##' global radiation of extant bats. Evolution 69: 1528-1545.
##'
##' Rabosky, D. L., F. Santini, J. T. Eastman, S. A. Smith, B. L.
##' Sidlauskas, J. Chang, and M. E. Alfaro. 2013. Rates of speciation
##' and morphological evolution are correlated across the largest
##' vertebrate radiation. Nature Communications DOI: 10.1038/ncomms2958.
##'
##' @name BAMMtools
##' @docType package
##' @keywords package
##' @useDynLib BAMMtools
##' @importFrom Rcpp evalCpp
##' @importFrom gplots rich.colors
##' @importFrom methods hasArg
##' @importFrom utils lsf.str read.csv read.table tail write.csv write.table
##' @importFrom stats cor.test dbinom density dgeom kruskal.test loess median
##' optim quantile reorder runif sd setNames wilcox.test
##' @importFrom grDevices col2rgb colorRampPalette dev.off gray pdf rgb
##' terrain.colors
##' @importFrom graphics abline axTicks axis barplot box grconvertX grconvertY
##' image layout legend lines locator mtext par plot plot.new plot.window
##' points polygon rect segments text
##' @importFrom stats qchisq
##' @import ape
NULL
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/BAMMtools-package.R |
# Recursively compute branching times for phylogenetic tree.
# Allows for non-ultrametric (fossil) trees.
# Two return types:
# return.type == 'bt'
# returns traditional ape-like branching times (branching.times)
# return.type == 'begin.end'
# returns phylogeny with begin and end vectors
# exactly like getStartStopTimes
#
# This is slower than ape::branching.times, on account of
# the recursion. Could recode in C for speed.
#
NU.branching.times <- function(phy, return.type = 'bt'){
if (!is.binary.phylo(phy)){
stop("error. Need fully bifurcating (resolved) tree\n")
}
phy$begin <- rep(0, nrow(phy$edge))
phy$end <- rep(0, nrow(phy$edge))
# Do it recursively
fx <- function(phy, node){
cur.time <- 0
root <- length(phy$tip.label) + 1
if (node > root){
cur.time <- phy$end[which(phy$edge[,2] == node)]
}
dset <- phy$edge[,2][phy$edge[,1] == node]
i1 <- which(phy$edge[,2] == dset[1])
i2 <- which(phy$edge[,2] == dset[2])
phy$end[i1] <- cur.time + phy$edge.length[i1]
phy$end[i2] <- cur.time + phy$edge.length[i2]
if (dset[1] > length(phy$tip.label)){
phy$begin[phy$edge[,1] == dset[1]] <- phy$end[i1]
phy <- fx(phy, node = dset[1])
}
if (dset[2] > length(phy$tip.label)){
phy$begin[phy$edge[,1] == dset[2]] <- phy$end[i2]
phy <- fx(phy, node = dset[2])
}
return(phy)
}
phy <- fx(phy, node = length(phy$tip.label) + 1)
if (return.type == 'bt'){
maxbt <- max(phy$end)
nodes <- (length(phy$tip.label) + 1):(2*length(phy$tip.label) - 1)
bt <- numeric(length(nodes))
names(bt) <- nodes
for (i in 1:length(bt)){
tt <- phy$begin[phy$edge[,1] == nodes[i]][1]
bt[i] <- maxbt - tt
}
return(bt)
}else if (return.type == 'begin.end'){
return(phy)
}
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/NU.branching.times.R |
#############################################################
#
# addBAMMlegend(x, location = 'topleft', side = 'auto', nTicks = 2, direction = 'auto', shortFrac = 0.02, longFrac = 0.3, axisOffset = 0.002, cex.axis = 0.8, labelDist = 0.7, ...)
#
# x = saved plot.bammdata object
# location = 'topleft', 'topright', 'bottomleft','bottomright','top','bottom','left','right' OR coordinates for legend c(xmin, xmax, ymin, ymax)
# side = side for tick marks, see axis() documentation, if NULL, automatically inferred
# nTicks = number of ticks, outside of min and max
# direction = 'auto' or vertical or horizontal
# shortFrac = percent of axis that is short dimension of legend
# longFrac = percent of axis that is long dimension of legend
# axisOffset = distance from color bar for labels as a percent of total
# cex.axis = size of axis labels
# labelDist = distance from axis to labels, passed to mgp
# ... additional parameters to be passed to axis()
#
##' @title Add a color legend to a phylo-rate plot
##'
##' @description Add a legend to a phylorate plot, with greater manual
##' control.
##'
##' @param x A \code{plot.bammdata} object.
##' @param direction Direction of color ramp. If omitted, then direction is
##' automatically inferred, otherwise can be specified as horizontal or
##' vertical.
##' @param side Side for tick marks, see \code{\link{axis}} documentation.
##' Automatically inferred if omitted.
##' @param location Either a location name (see Details), or coordinates for
##' the corners of the bar legend c(xmin, xmax, ymin, ymax).
##' @param nTicks Number of tick marks, besides min and max.
##' @param stretchInterval If color.interval was defined, should the legend be
##' stretched to the color.interval, or should the full range of rates be
##' presented.
##' @param shortFrac Percent of the plot width range that will be used as the
##' short dimention of the legend. Only applies to preset location
##' options.
##' @param longFrac Percent of the plot width range that will be used as the
##' long dimension of the legend. Only applies to preset location options.
##' @param axisOffset Distance from color bar for labels, as a percent of the
##' plot width.
##' @param cex.axis Size of axis labels.
##' @param labelDist Distance from axis to axis labels (passed to mgp).
##' @param \dots Additional parameters to be passed to axis.
##'
##' @details A number of predefined locations exist in this function to make
##' it easy to add a legend to a phylorate plot. Preset \code{locations}
##' are: \code{topleft}, \code{topright}, \code{bottomleft},
##' \code{bottomright}, \code{left}, \code{right}, \code{top} and
##' \code{bottom}. If more fine-tuned control is desired, then a numeric
##' vector of length 4 can be supplied to \code{location}, specifying the
##' min x, max x, min y and max y values for the legend. See
##' \code{Examples}.
##'
##' @return Invisibly returns a list with the following components:
##' \itemize{
##' \item coords: A 2-column matrix of xy coordinates for each color
##' bin in the legend.
##' \item width: Coordinates for the short dimension of the legend.
##' \item pal: The color ramp.
##' \item tickLocs: The tick mark locations in plotting units.
##' \item labels: The rate values associated with those tick
##' locations.
##' }
##'
##' @author Pascal Title
##'
##' @seealso Requires an object created with \code{\link{plot.bammdata}}.
##'
##' @examples
##' data(whales, events.whales)
##' ephy <- getEventData(whales, events.whales, burnin = 0.25, nsamples = 300)
##'
##' # plot phylorate with extra margin space
##' x <- plot(ephy, lwd = 2, mar = c(5,4,4,4))
##' # presets
##' addBAMMlegend(x, location = 'topleft')
##' addBAMMlegend(x, location = 'bottom')
##' addBAMMlegend(x, location = 'right')
##'
##' # fine-tune placement
##' x <- plot(ephy, lwd = 2, mar = c(5,4,4,4))
##' axis(1); axis(2)
##' addBAMMlegend(x, location = c(-1, -0.5, 40, 80), nTicks = 4)
##' addBAMMlegend(x, location = c(5, 20, 60, 61), nTicks = 4, side = 3,
##' cex.axis = 0.7)
##'
##' # addBAMMlegend also automatically detects the use of color.interval
##' data(primates, events.primates)
##' ephy <- getEventData(primates, events.primates, burnin=0.25,
##' nsamples = 300, type = 'trait')
##'
##' x <- plot(ephy, breaksmethod = 'linear',
##' color.interval = c(NA, 0.12), lwd = 2)
##' addBAMMlegend(x, location = c(0, 30, 200, 205), nTicks = 1, side = 3)
##' @export
addBAMMlegend <- function(x, direction, side, location = 'topleft', nTicks = 2, stretchInterval = FALSE, shortFrac = 0.02, longFrac = 0.3, axisOffset = 0.002, cex.axis = 0.8, labelDist = 0.7, ...) {
#location xmin,xmax,ymin,ymax
if (hasArg('corners')) {
stop('Error: some options have been deprecated. Please consult the documentation.')
}
if(!hasArg('direction')) {
direction <- 'auto'
}
if (!identical(names(x), c('coords', 'colorbreaks', 'palette', 'colordens'))) {
stop("x must be a saved plot.bammdata object.");
}
if (!direction %in% c('auto', 'vertical', 'horizontal')) {
stop("direction must be auto, vertical or horizontal.");
}
if (is.character(location)) {
if (!location %in% c('bottomleft','bottomright','topleft','topright','bottom','top','left','right')) {
stop('location is not recognized.');
}
}
colorbreaks <- x$colorbreaks;
pal <- x$palette;
# If there are duplicate colors, then this color ramp is the result of
# a specified color.interval. If stretchInterval is TRUE, then rather
# than include many duplicate colors, we will only include the color.interval
# range.
# intervalSide = top means that the top range of the color palette has
# duplicate colors
intervalSide <- NULL
if (length(unique(pal)) != length(pal) & stretchInterval) {
uniquePal <- which(!duplicated(pal))
if (uniquePal[2] != (uniquePal[1] + 1)) {
uniquePal[1] <- uniquePal[2] - 1
}
colorbreaks <- colorbreaks[c(uniquePal, uniquePal[length(uniquePal)] + 1)]
pal <- pal[uniquePal]
if (identical(x$palette[1], x$palette[2]) & identical(tail(x$palette, 1), tail(x$palette, 2)[1])) {
intervalSide <- 'both'
} else if (identical(x$palette[1], x$palette[2]) & !identical(tail(x$palette, 1), tail(x$palette, 2)[1])) {
intervalSide <- 'bottom'
} else if (!identical(x$palette[1], x$palette[2]) & identical(tail(x$palette, 1), tail(x$palette, 2)[1])) {
intervalSide <- 'top'
}
}
n <- length(colorbreaks);
#return plot region extremes and define outer coordinates
minX <- grconvertX(par('fig')[1], from = 'ndc', to = 'user')
maxX <- grconvertX(par('fig')[2], from = 'ndc', to = 'user')
minY <- grconvertY(par('fig')[3], from = 'ndc', to = 'user')
maxY <- grconvertY(par('fig')[4], from = 'ndc', to = 'user')
xrange <- maxX - minX
yrange <- maxY - minY
minX <- minX + xrange * 0.05
maxX <- maxX - xrange * 0.05
minY <- minY + yrange * 0.05
maxY <- maxY - yrange * 0.05
if (is.character(location)) {
if (location == 'topleft' & direction %in% c('auto', 'vertical')) {
location <- vector('numeric', length = 4);
location[1] <- minX
location[2] <- minX + (maxX - minX) * shortFrac
location[3] <- maxY - (maxY - minY) * longFrac
location[4] <- maxY
} else
if (location == 'topleft' & direction == 'horizontal') {
location <- vector('numeric', length = 4);
location[1] <- minX
location[2] <- minX + (maxX - minX) * longFrac
location[3] <- maxY - (maxY - minY) * shortFrac
location[4] <- maxY
} else
if (location == 'topright' & direction %in% c('auto', 'vertical')) {
location <- vector('numeric', length = 4);
location[1] <- maxX - (maxX - minX) * shortFrac
location[2] <- maxX
location[3] <- maxY - (maxY - minY) * longFrac
location[4] <- maxY
} else
if (location == 'topright' & direction == 'horizontal') {
location <- vector('numeric', length = 4);
location[1] <- maxX - (maxX - minX) * longFrac
location[2] <- maxX
location[3] <- maxY - (maxY - minY) * shortFrac
location[4] <- maxY
} else
if (location == 'bottomleft' & direction %in% c('auto', 'vertical')) {
location <- vector('numeric', length = 4);
location[1] <- minX
location[2] <- minX + (maxX - minX) * shortFrac
location[3] <- minY
location[4] <- minY + (maxY - minY) * longFrac
} else
if (location == 'bottomleft' & direction == 'horizontal') {
location <- vector('numeric', length = 4);
location[1] <- minX
location[2] <- minX + (maxX - minX) * longFrac
location[3] <- minY
location[4] <- minY + (maxY - minY) * shortFrac
} else
if (location == 'bottomright' & direction %in% c('auto', 'vertical')) {
location <- vector('numeric', length = 4);
location[1] <- maxX - (maxX - minX) * shortFrac
location[2] <- maxX
location[3] <- minY
location[4] <- minY + (maxY - minY) * longFrac
} else
if (location == 'bottomright' & direction == 'horizontal') {
location <- vector('numeric', length = 4);
location[1] <- maxX - (maxX - minX) * longFrac
location[2] <- maxX
location[3] <- minY
location[4] <- minY + (maxY - minY) * shortFrac
} else
if (location == 'left') {
location <- vector('numeric', length = 4);
location[1] <- minX
location[2] <- minX + (maxX - minX) * shortFrac
location[3] <- mean(par('usr')[3:4]) - ((maxY - minY) * longFrac)/2
location[4] <- mean(par('usr')[3:4]) + ((maxY - minY) * longFrac)/2
direction <- 'vertical'
} else
if (location == 'right') {
location <- vector('numeric', length = 4);
location[1] <- maxX - (maxX - minX) * shortFrac
location[2] <- maxX
location[3] <- mean(par('usr')[3:4]) - ((maxY - minY) * longFrac)/2
location[4] <- mean(par('usr')[3:4]) + ((maxY - minY) * longFrac)/2
direction <- 'vertical'
} else
if (location == 'top') {
location <- vector('numeric', length = 4);
location[1] <- mean(par('usr')[1:2]) - ((maxX - minX) * longFrac)/2
location[2] <- mean(par('usr')[1:2]) + ((maxX - minX) * longFrac)/2
location[3] <- maxY - (maxY - minY) * shortFrac
location[4] <- maxY
direction <- 'horizontal'
} else
if (location == 'bottom') {
location <- vector('numeric', length = 4);
location[1] <- mean(par('usr')[1:2]) - ((maxX - minX) * longFrac)/2
location[2] <- mean(par('usr')[1:2]) + ((maxX - minX) * longFrac)/2
location[3] <- minY
location[4] <- minY + (maxY - minY) * shortFrac
direction <- 'horizontal'
}
}
# infer direction based on dimensions of legend box
if (direction == 'auto') {
if (((location[2] - location[1]) / (par('usr')[2] - par('usr')[1])) >= ((location[4] - location[3]) / (par('usr')[4] - par('usr')[3]))) {
direction <- 'horizontal';
} else {
direction <- 'vertical';
}
}
if (direction == 'horizontal') {
axisOffset <- axisOffset * (par('usr')[4] - par('usr')[3]);
} else if (direction == 'vertical') {
axisOffset <- axisOffset * (par('usr')[2] - par('usr')[1]);
}
#determine side for labels based on location in plot and direction
if (!hasArg('side')) {
if (direction == 'vertical') { #side = 1 or 4
if (mean(location[1:2]) <= mean(par('usr')[1:2])) {
side <- 4;
} else {
side <- 2;
}
}
if (direction == 'horizontal') { #side = 2 or 3
if (mean(location[3:4]) > mean(par('usr')[3:4])) {
side <- 1;
} else {
side <- 3;
}
}
}
if (direction == 'horizontal') {
x <- seq(from = location[1], to = location[2], length.out = n);
width <- location[3:4];
} else {
x <- seq(from = location[3], to = location[4], length.out = n);
width <- location[1:2];
}
#get bin coordinates
x <- rep(x,each = 2);
x <- x[-c(1,length(x))];
x <- matrix(x, ncol = 2, byrow = TRUE);
#find tick locations
#get equivalent rate bins
z <- rep(colorbreaks,each = 2);
z <- z[-c(1,length(z))];
z <- matrix(z, ncol = 2, byrow = TRUE);
tx <- trunc(seq(from = 1, to = nrow(x), length.out = nTicks + 2));
tickLocs <- x[tx,1]
tx <- z[tx,1]
tickLocs[length(tickLocs)] <- max(x[,2])
tx[length(tx)] <- max(z[,2])
#plot bar
if (direction == 'horizontal') {
rect(xleft = x[,1], ybottom = width[1], xright = x[,2], ytop = width[2], border = pal, col = pal, xpd = NA);
} else {
rect(xleft = width[1], ybottom = x[,1], xright = width[2], ytop = x[,2], border = pal, col = pal, xpd = NA);
}
#add tickmarks
tickLabels <- as.character(signif(tx, 2));
if (!is.null(intervalSide)) {
if (intervalSide == 'top' | intervalSide == 'both') {
tickLabels[length(tickLabels)] <- paste('\u2265', tickLabels[length(tickLabels)])
}
if (intervalSide == 'bottom' | intervalSide == 'both') {
tickLabels[1] <- paste('\u2264', tickLabels[1])
}
}
if (side == 1) { #bottom
axis(side, at = tickLocs, pos = location[3] - axisOffset, labels = tickLabels, xpd = NA, las = 1, cex.axis = cex.axis, mgp = c(3, labelDist, 0), ...);
}
if (side == 3) { #top
axis(side, at = tickLocs, pos = location[4] + axisOffset, labels = tickLabels, xpd = NA, las = 1, cex.axis = cex.axis, mgp = c(3, labelDist, 0), ...);
}
if (side == 2) { #left
axis(side, at = tickLocs, pos = location[1] - axisOffset, labels = tickLabels, xpd = NA, las = 1, cex.axis = cex.axis, mgp = c(3, labelDist, 0), ...);
}
if (side == 4) { #right
axis(side, at = tickLocs, pos = location[2] + axisOffset, labels = tickLabels, xpd = NA, las = 1, cex.axis = cex.axis, mgp = c(3, labelDist, 0), ...);
}
invisible(list(coords = x, width = width, pal = pal, tickLocs = tickLocs, labels = tx))
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/addBAMMlegend.R |
##' @title Add \code{BAMM}-inferred rate shifts to a phylogeny plot
##'
##' @description Adds symbols to a plotted tree to mark the location(s) where
##' there is a shift in the macroevolutionary dynamics of diversification
##' or trait evolution.
##'
##' @param ephy An object of class \code{bammdata}.
##' @param method A character string indicating the method used in plotting.
##' Must be "polar" or "phylogram".
##' @param index An integer indicating which posterior sample to use for
##' adding shifts to the plotted tree.
##' @param cex A numeric indicating the character expansion ("size") of the
##' plotted points.
##' @param pch An integer indicating the choice of plotting symbol.
##' @param col An integer or character string indicating the border color of
##' the plotting symbol.
##' @param bg An integer or character string indicating the background color
##' of the plotting symbol.
##' @param msp If not \code{NULL}, an object of class \code{phylo} where each
##' branch length is equal to the marginal probability of a shift
##' occurring on that branch. Plotted points corresponding to shifts will
##' be sized by these probabilities.
##' @param shiftnodes An optional vector of node numbers indicating the
##' locations of shifts to plot.
##' @param par.reset A logical indicating whether to reset the graphical
##' parameters before exiting.
##' @param \dots additional arguments to be passed to \code{\link{points}}.
##'
##' @details Any given sample from the posterior distribution sampled using
##' \code{BAMM} contains a potentially unique configuration of rate shifts
##' and associated parameters. There is no single "best" rate shift, but
##' rather a set of shift configurations (and associated parameters) -
##' along with their relative probabilities - sampled with MCMC. This
##' function enables the user to plot the locations of shifts sampled with
##' \code{BAMM} for a given sample from the posterior.
##'
##' If the \code{bammdata} object contains just a single sample, these
##' shifts will be plotted regardless of the value of \code{index}.
##'
##' @note If a \code{shiftnodes} argument is passed care should be taken to
##' ensure that the nodes are in the same order as in the event data for
##' the sample index.
##'
##' @author Mike Grundler
##'
##' @seealso \code{\link{getShiftNodesFromIndex}}, \code{\link{plot.bammdata}}
##'
##' @examples
##' data(whales, events.whales)
##' ed <- getEventData(whales, events.whales, burnin=0.25, nsamples = 500)
##'
##' # adding shifts to tree for specific posterior samples
##' plot(ed, method="polar")
##' addBAMMshifts(ed, index=5, "polar")
##'
##' # multi-panel plotting and adding shifts
##' par(mfrow=c(2,3),mar=c(5,1,1,1))
##' samples = sample(1:length(ed$eventData), 6)
##' for (i in 1:6) {
##' sed <- subsetEventData(ed, samples[i])
##' plot(sed, par.reset=FALSE)
##' addBAMMshifts(sed,index=1,method="phylogram",par.reset=FALSE)
##' }
##' @keywords graphics
##' @export
addBAMMshifts = function(ephy, index = 1, method = 'phylogram', cex=1, pch=21, col=1, bg=2, msp = NULL, shiftnodes = NULL, par.reset=TRUE, ...) {
if (!inherits(ephy, 'bammdata')) stop("Object ephy must be of class bammdata");
lastPP <- get("last_plot.phylo", envir = .PlotPhyloEnv);
if (par.reset){
op <- par(no.readonly = TRUE);
par(lastPP$pp);
}
if (length(ephy$eventData) == 1){
index <- 1;
}
if (is.null(shiftnodes))
shiftnodes <- getShiftNodesFromIndex(ephy, index)
isShift <- ephy$eventData[[index]]$node %in% shiftnodes;
times <- ephy$eventData[[index]]$time[isShift];
if (!is.null(msp)) {
cex <- 0.75 + 5 * msp$edge.length[msp$edge[,2] %in% shiftnodes];
}
if (method == 'phylogram') {
### obsolete b/c plot.bammdata no longer scales each axis to a max of 1. now behaves like plot.phylo
# if (max(lastPP$xx) <= 1) {
# XX <- times/max(branching.times(as.phylo.bammdata(ephy)));
# } else {
# XX <- times;
# }
XX <- times;
YY <- lastPP$yy[shiftnodes];
} else if (method == 'polar') {
rb <- lastPP$rb;
XX <- (rb+times/max(branching.times(as.phylo.bammdata(ephy)))) * cos(lastPP$theta[shiftnodes]);
YY <- (rb+times/max(branching.times(as.phylo.bammdata(ephy)))) * sin(lastPP$theta[shiftnodes]);
}
points(XX,YY,pch=pch,cex=cex,col=col,bg=bg, ...);
if (par.reset) {
par(op);
}
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/addBAMMshifts.R |
##################################
# Internal function called by plot.dtrates(...)
# Arguments:
# x,y = coordinates of center of curvature of arc, e.g. (0,0)
# theta1 = initial theta of arc (radians)
# theta2 = ending theta of arc (radians)
# rad = radius of arc
arc <- function(x,y,theta1,theta2,rad,border,...)
{
noTips <- which((theta2 - theta1) != 0);
if ((length(theta1)+1)/2 > 1000) {
steps <- (theta2-theta1)/30;
steps <- steps[noTips];
theta1 <- theta1[noTips];
theta2 <- theta2[noTips];
rad <- rad[noTips];
border <- border[noTips];
for (i in 1:length(steps))
{
xv <- x+rad[i]*cos(seq(theta1[i],theta2[i],steps[i]));
yv <- y+rad[i]*sin(seq(theta1[i],theta2[i],steps[i]));
lines(xv,yv,lend=2,col=border[i],...);
}
}
else {
#storing all the coords up front for fast arc plotting, so can be memory intensive.
#tested on tree with 6670 tips with no problem, but for now only use
#for trees under 1000 tips
m <- matrix(NA, nrow=4, ncol=length(noTips));
m[1,] <- theta2[noTips];
m[2,] <- theta1[noTips];
m[3,] <- rad[noTips];
m[4,] <- border[noTips];
arcsegs <- apply(m, 2, function(z) {
zz <- as.numeric(z[1:3]);
inc <- (zz[2] - zz[1])/30
xv <- zz[3]*cos(seq(zz[1],zz[2],inc));
yv <- zz[3]*sin(seq(zz[1],zz[2],inc));
xv <- rep(xv, each=2);
xv <- xv[-c(1,length(xv))];
xv <- matrix(xv, ncol=2, byrow=TRUE);
yv <- rep(yv, each=2);
yv <- yv[-c(1,length(yv))];
yv <- matrix(yv, ncol=2, byrow=TRUE);
data.frame(xv,yv,rep(z[4],nrow(xv)),stringsAsFactors=FALSE);
});
arcsegs <- do.call(rbind, arcsegs);
segments(x+arcsegs[,1], y+arcsegs[,3], x+arcsegs[,2], y+arcsegs[,4], col=arcsegs[,5], lend=2, ...);
}
} | /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/arc.R |
areEventConfigurationsIdentical <- function(ephy, index1, index2){
nodeset <- c(ephy$eventData[[index1]]$node, ephy$eventData[[index2]]$node);
diffs <- sum(table(nodeset) == 1);
return(diffs == 0);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/areEventConfigurationsIdentical.R |
# Tests whether two sets of shifts are exactly identical
# a = vector of shift nodes
# b = vector of shift nodes
areShiftSetsEqual <- function(a, b){
if (length(a) != length(b)){
return(FALSE);
}else if (length(a) == 0 & length(b) == 0){
return(TRUE);
}else{
if (length(intersect(a,b)) != length(a)){
return(FALSE);
}else{
return(TRUE);
}
}
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/areShiftSetsEqual.R |
as.bammdata <- function(x, ...) {
if (length(class(x)) == 1 && inherits(x, "bammdata")) {
return(x);
}
UseMethod("as.bammdata");
}
##' @export
as.bammdata.credibleshiftset <- function(x, ...) {
obj <- x;
class(obj) <- "bammdata";
return(obj);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/as.bammdata.R |
##' @export
as.phylo.bammdata <- function(x, ...) {
if (!inherits(x, 'bammdata')) {
stop("Object ephy must be of class bammdata\n");
}
newphylo <- list();
newphylo$edge <- x$edge;
newphylo$Nnode <- x$Nnode;
newphylo$tip.label <- x$tip.label;
newphylo$edge.length <- x$edge.length;
class(newphylo) <- 'phylo';
attributes(newphylo)$order = attributes(x)$order;
if (attributes(newphylo)$order != "cladewise") {
newphylo <- reorder(newphylo);
}
return(newphylo);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/as.phylo.bammdata.R |
##' @title Map macroevolutionary rates to colors
##'
##' @description Maps macroevolutionary rates to a set of \code{NCOLORS}.
##'
##' @param rates A numeric vector of phenotypic rates or a list of numeric
##' vectors of speciation and extinction rates.
##' @param NCOLORS An integer number of colors to use for the mapping. Larger
##' numbers do not necessarily result in smoother looking color ramps. The
##' default is 64 and is probably sufficient for most purposes.
##' @param spex A character string. "s" means that speciation rates are used
##' to make the map, "e" means that extinction rates are used. "netdiv"
##' means that diversification rates are used. Ignored for \code{BAMM}
##' trait data.
##' @param logcolor Logical. Should the natural logarithm of rates be used for
##' the color map.
##' @param method Determines how the color breaks are created. See Details.
##' @param JenksSubset Number of regularly spaced samples to subset from
##' \code{rates}. Only relevant when \code{method = "jenks"}. See Details.
##'
##' @details If \code{method = "quantile"} macroevolutionary rates are binned
##' into \code{NCOLORS+1} percentiles and rates in each bin are mapped to
##' a color determined by the \code{pal} argument in \code{plot.bammdata}.
##' Alternatively, if \code{method = "linear"} macroevolutionary rates are
##' binned into \code{NCOLORS+1} equal length intervals between the
##' minimum and maximum.
##'
##' If \code{method = "jenks"}, macroevolutionary rates are binned into
##' \code{NCOLORS+1} categories, according to the Jenks natural breaks
##' classification method. This method is borrowed from the field of
##' cartography, and seeks to minimize the variance within categories,
##' while maximizing the variance between categories.
##'
##' The Jenks natural breaks method was ported to C from code found in the classInt R package.
##'
##' @return A numeric vector of rate percentiles/intervals.
##'
##' @author Mike Grundler, Pascal Title
##'
##' @seealso \code{\link{plot.bammdata}}
##'
##' @examples
##' data(whales, events.whales)
##' ed <- getEventData(whales, events.whales, burnin = 0.2, nsamples = 500)
##'
##' ed <- dtRates(ed, 0.01)
##' colors <- assignColorBreaks(ed$dtrates$rates, spex="s") #speciation rates
##' #colors <- assignColorBreaks(ed$dtrates$rates[[1]])
##' #this also works for speciation rates
##'
##' plot(ed, colorbreaks = colors, spex="s")
##' colors <- assignColorBreaks(ed$dtrates$rates, spex="netdiv")
##' #diversification rates
##'
##' #colors <- assignColorBreaks(ed$dtrates$rates[[1]] - ed$dtrates$rates[[2]])
##' #this also works for diversification rates
##'
##' plot(ed, colorbreaks = colors, spex="netdiv")
##' @keywords graphics
##' @export
assignColorBreaks <- function(rates, NCOLORS = 64, spex = "s", logcolor = FALSE, method = c("linear","quantile","jenks"), JenksSubset = NULL) {
method = match.arg(method, c("linear", "quantile", "jenks"));
if (mode(rates) == "numeric") {
if (logcolor == FALSE) {
if (method == "quantile") {
bks <- quantile(rates, seq(0,1, length.out=(NCOLORS+1)));
}
if (method == 'jenks') {
bks <- getJenksBreaks(rates, k=(NCOLORS + 1), subset = JenksSubset);
}
if (method == 'linear') {
bks <- seq(min(rates), max(rates), length.out = (NCOLORS+1));
}
}
else {
if (method == "quantile") {
bks <- quantile(log(rates), seq(0,1, length.out=(NCOLORS+1)));
}
if (method == 'jenks') {
bks <- getJenksBreaks(log(rates), k=(NCOLORS + 1), subset = JenksSubset);
}
if (method == 'linear') {
bks <- seq(min(log(rates)), max(log(rates)), length.out = (NCOLORS+1));
}
}
}
else if (mode(rates) == "list") {
if (tolower(spex) == "s") {
if (logcolor == FALSE) {
if (method == "quantile") {
bks <- quantile(rates[[1]], seq(0,1, length.out=(NCOLORS+1)));
}
if (method == 'jenks') {
bks <- getJenksBreaks(rates[[1]], k=(NCOLORS + 1), subset = JenksSubset);
}
if (method == 'linear') {
bks <- seq(min(rates[[1]]), max(rates[[1]]), length.out = (NCOLORS+1));
}
}
else {
if (method == "quantile") {
bks <- quantile(log(rates[[1]]), seq(0,1, length.out=(NCOLORS+1)));
}
if (method == 'jenks') {
bks <- getJenksBreaks(log(rates[[1]]), k=(NCOLORS + 1), subset = JenksSubset);
}
if (method == 'linear') {
bks <- seq(min(log(rates[[1]])), max(log(rates[[1]])), length.out = (NCOLORS+1));
}
}
}
else if (tolower(spex) == "e") {
if (logcolor == FALSE) {
if (method == "quantile") {
bks <- quantile(rates[[2]], seq(0,1, length.out=(NCOLORS+1)));
}
if (method == 'jenks') {
bks <- getJenksBreaks(rates[[2]], k=(NCOLORS + 1), subset = JenksSubset);
}
if (method == 'linear') {
bks <- seq(min(rates[[2]]), max(rates[[2]]), length.out = (NCOLORS+1));
}
}
else {
if (method == "quantile") {
bks <- quantile(log(rates[[2]]), seq(0,1, length.out=(NCOLORS+1)));
}
if (method == 'jenks') {
bks <- getJenksBreaks(log(rates[[2]]), k=(NCOLORS + 1), subset = JenksSubset);
}
if (method == 'linear') {
bks <- seq(min(log(rates[[1]])), max(log(rates[[1]])), length.out = (NCOLORS+1));
}
}
}
else if (tolower(spex) == "netdiv") {
if (logcolor == FALSE) {
if (method == "quantile") {
bks <- quantile(rates[[1]] - rates[[2]], seq(0,1, length.out=(NCOLORS+1)));
}
if (method == 'jenks') {
bks <- getJenksBreaks(rates[[1]] - rates[[2]], k=(NCOLORS + 1), subset = JenksSubset);
}
if (method == 'linear') {
bks <- seq(min(rates[[1]] - rates[[2]]), max(rates[[1]] - rates[[2]]), length.out = (NCOLORS+1));
}
}
else {
z <- safeLog(rates[[1]] - rates[[2]]);
if (method == "quantile") {
bks <- quantile(z, seq(0,1, length.out=(NCOLORS+1)));
#bks <- quantile(log(rates[[1]] - rates[[2]]), seq(0,1, length.out=(NCOLORS+1)))
}
if (method == 'jenks') {
bks <- getJenksBreaks(z, k=(NCOLORS + 1), subset = JenksSubset);
}
if (method == 'linear') {
bks <- seq(min(z), max(z), length.out = (NCOLORS+1));
#bks <- seq(min(log(rates[[1]] - rates[[2]])), max(min(log(rates[[1]] - rates[[2]]))), length.out=(NCOLORS+1) );
}
attr(bks, "increment") <- attr(z, "increment");
return (safeLog(bks, inverse = TRUE));
}
}
}
if (logcolor)
return (exp(bks));
return (bks)
}
safeLog <- function(x, inverse = FALSE) {
if (inverse)
y <- exp(x) - attr(x, "increment")
else {
y <- log(x + abs(min(x)) + 0.0001);
attr(y, "increment") <- abs(min(x)) + 0.0001;
}
return (y);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/assignColorBreaks.R |
# BAMMtools: all functions except for getBAMMlikelihood are undocumented internal functions.
# This function to compute likelihood exactly as in BAMM.
# break tree into segments, map events. Generate complete matrix for recursive calculations.
# event:
# index, node, event, timestart, timeend, E0, Et, Dt
# create list of time vectors, of length 1: max number of nodes.
# event_times[[k]] would give the vector of absolute times of events that happened on branch leading to node k
# Event_ID[[k]] would give the index value of the event that happened.
# Generate augmented phylogenetic tree structure with the following components:
# event_times: a list of length (number of nodes), where event_times[[k]] is
# the vector of absolute times, in order, of events that happened
# on a focal branch. If no event, it is NULL
# event_id : a list of length equal to number of nodes, as event_times, but
# holding the corresponding event id
# events : a dataframe giving parameters and associated nodes (and unique index values)
# of the event data.
# node_event : The event governing the process realized at the node. This will be the first
# event encountered as one moves rootwards towards the tips from the focal node
eventMatrix <- function(x, phy) {
if (!inherits(x, 'data.frame')) {
x <- read.csv(x, header = FALSE, stringsAsFactors = FALSE)
}
colnames(x) <- c("generation", "leftchild", "rightchild", "abstime", "lambdainit", "lambdashift", "muinit", "mushift")
#generation,leftchild,rightchild,abstime,lambdainit,lambdashift,muinit,mushift
x$index <- 1:nrow(x)
x$node <- numeric(nrow(x))
for (i in 1:nrow(x)) {
if (is.na(x$rightchild[i])) {
x$node[i] <- which(phy$tip.label == x$leftchild[i])
} else {
x$node[i] <- getMRCA(phy, as.character(x[i,2:3]))
}
}
return(x)
}
# A modified version of function eventMatrix(...)
# but creates a constant-rate birth-death matrix
# so you can compare likelihoods to diversitree etc.
makeBDeventMatrix <- function(phy, lambda, mu) {
xx <- data.frame(generation=1, leftchild=phy$tip.label[1], rightchild = phy$tip.label[length(phy$tip.label)], abstime = 0, lambdainit = lambda, lambdashift = 0, muinit = mu, mushift = 0, stringsAsFactors = FALSE)
colnames(xx) <- c("generation", "leftchild", "rightchild", "abstime", "lambdainit", "lambdashift", "muinit", "mushift")
#generation,leftchild,rightchild,abstime,lambdainit,lambdashift,muinit,mushift
xx$index <- 1:nrow(xx)
xx$node <- numeric(nrow(xx))
for (i in 1:nrow(xx)){
if (is.na(xx$rightchild[i])){
xx$node[i] <- which(phy$tip.label == xx$leftchild[i])
}else{
xx$node[i] <- getMRCA(phy, as.character(xx[i,2:3]))
}
}
return(xx)
}
buildTreeWithEventList <- function(phy, events) {
nodeset <- 1:max(phy$edge)
phy$event_times <- vector("list", length = length(nodeset))
phy$event_id <- vector("list", length = length(nodeset))
phy$node_event <- numeric(length(nodeset))
bt <- branching.times(phy)
phy$events <- events
rootnode <- length(phy$tip.label) + 1
phy <- recursiveAddEventsToTree(phy, rootnode)
}
recursiveAddEventsToTree <- function(phy, node) {
rootnode <- length(phy$tip.label) + 1
if (node == rootnode) {
phy$event_times[[node]] <- 0
phy$event_id[[node]] <- 1
phy$node_event[node] <- 1
dset <- phy$edge[,2][phy$edge[,1] == node]
phy <- recursiveAddEventsToTree(phy, dset[1])
phy <- recursiveAddEventsToTree(phy, dset[2])
} else {
parent <- phy$edge[,1][phy$edge[,2] == node]
events_on_branch <- sum(phy$events$node == node)
if (events_on_branch == 0) {
phy$node_event[node] <- phy$node_event[parent]
} else {
tmp <- phy$events[phy$events$node == node, ]
tmp <- tmp[order(tmp$abstime), ]
phy$event_times[[node]] <- tmp$abstime
phy$event_id[[node]] <- tmp$index
phy$node_event[node] <- phy$event_id[[node]][events_on_branch]
}
if (node > length(phy$tip.label)) {
dset <- phy$edge[,2][phy$edge[,1] == node]
phy <- recursiveAddEventsToTree(phy, dset[1])
phy <- recursiveAddEventsToTree(phy, dset[2])
}
}
return(phy)
}
# make a list of matrices for each node
# also a vector of E0 and D0 for calculations
# recursively assemble the segment matrix.
# need seglength value.
# rel_seg is the relative segment length, exactly as specified in BAMM
buildSegmentMatrix <- function(phy, rel_seg){
nodeset <- 1:max(phy$edge)
phy$D_0 <- rep(1, length(nodeset))
phy$E_0 <- numeric(length(nodeset))
phy$branchsegs <- vector("list", length(nodeset))
bt <- branching.times(phy)
phy$maxtime <- max(bt)
phy$seg <- rel_seg * phy$maxtime
bt <- max(bt) - bt
phy$bt <- rep(phy$maxtime, length(nodeset))
names(phy$bt) <- as.character(nodeset)
# now includes extant nodes; also speciation times, not branching times
phy$bt[names(bt)] <- bt
phy$postorder <- NULL
phy <- buildSegmentMatricesRecursive(phy, (length(phy$tip.label) + 1))
}
buildSegmentMatricesRecursive <- function(phy, node) {
seg <- phy$seg
# these branching times are actually speciation times:
# start at zero, and tip nodes have value equal to max age of tree
bt <- phy$bt
if (node > length(phy$tip.label)) {
dset <- phy$edge[,2][phy$edge[,1] == node]
phy <- buildSegmentMatricesRecursive(phy, dset[1])
phy <- buildSegmentMatricesRecursive(phy, dset[2])
}
if (node != (length(phy$tip.label) + 1)) {
#cat("here")
parent <- phy$edge[,1][phy$edge[,2] == node]
ev <- phy$event_id[[node]]
et <- phy$event_times[[node]]
branch_end <- bt[as.character(node)]
branch_start <- bt[as.character(parent)]
start_time <- branch_end
end_time <- branch_end
if (is.null(ev)) {
# If there are no events on branch
# do not worry about them
# but still do piecewise calculations.
# since ev is null, branch is governed by process of parent node:
ev <- phy$node_event[parent]
while (start_time > branch_start) {
start_time <- start_time - seg
if (start_time < branch_start) {
start_time <- branch_start
}
tmp <- matrix(c(ev, start_time, end_time, NA, NA, NA, NA, NA), nrow = 1)
if (is.null(phy$branchsegs[[node]])){
phy$branchsegs[[node]] <- tmp
} else {
phy$branchsegs[[node]] <- rbind(phy$branchsegs[[node]], tmp)
}
end_time <- start_time
}
} else {
while (length(et) > 0) {
start_time <- start_time - seg
ll <- length(et)
if (start_time <= et[ll]){
start_time <- et[ll]
et <- et[-ll]
}
tmp <- matrix(c(ev[ll], start_time, end_time, NA, NA, NA, NA, NA), nrow=1)
if (is.null(phy$branchsegs[[node]])) {
phy$branchsegs[[node]] <- tmp
} else {
phy$branchsegs[[node]] <- rbind(phy$branchsegs[[node]], tmp)
}
end_time <- start_time
}
# now, should be done with all events. Now go through
# to beginning of branch
ev <- phy$node_event[parent]
while (start_time > branch_start) {
start_time <- start_time - seg
if (start_time < branch_start) {
start_time <- branch_start
}
tmp <- matrix(c(ev, start_time, end_time, NA, NA, NA, NA, NA), nrow = 1)
phy$branchsegs[[node]] <- rbind(phy$branchsegs[[node]], tmp)
end_time <- start_time
}
}
}
if (is.null(phy$postorder)) {
phy$postorder <- node
} else {
phy$postorder <- c(phy$postorder, node)
}
return(phy)
}
# likelihood functions for the constant-rate birth-death process
# used to compute speciation and extinction probabilities
# on individual branches.
E_func <- function(lam, mu, E0, dt) {
num <- (1 - E0) * (lam - mu);
denom <- (1 - E0) * lam - (mu - lam * E0) * exp(-(lam - mu) * dt);
return( 1 - num / denom)
}
D_func <- function(lam, mu, E0, D0, dt) {
r <- lam - mu
num <- (D0 * r^2) * exp((-r) * dt);
denom <- ( (lam - lam * E0 + exp((-r) * dt) * (lam * E0 - mu) ) ) ^ 2;
return(num / denom)
}
############ Likelihood calculation
# Let E_0 and D_0 be initial extinction and data probabilities
# Let E_t and D_t be probabilities computed after some time t, given
# initial values.
# Do not track D0 values, always refactor to 1.
# Initialize E0 at tip nodes.
# Loop over postorder sequence
# for each segment
# Initialize E_0
# case (i): if first (tipwards) segment, take E_0 from node. These
# will already have been computed or set.
# case (ii): otherwise, take previous branch E_t as E_0
# case (iii): if finish branch and if identical in state to other branch
# set E0 for parent node equal to this value
# case (iv): if finish branch and not identical in state to other branch
# multiply, thus conditioning the extinction probability on the occurrence
# of two lineages at this time
#
# Compute D_t given this E_0 and add log to likelihood.
# Compute E_t for segment
# At end of segment matrix, E_t for the parent node is set to E_t from
# the last segment calculation
#
#
#
# computeBAMMlikelihood
# sf: sampling fraction
# phy: phylogenetic tree with all components from buildSegmentMatrixEtc
#
# Should do this 2 ways for constant-rate process:
# 1. always recomputed E0 for each segment
# 2. do it using segments (the segment advantage is key for time-varying process)
computeBAMMlikelihood <- function(phy, sf = 1, alwaysRecomputeE0 = FALSE, e_prob_condition = "if_different", TOL = 0.001) {
# initial calculation: lets us start with D_0 = 1 at all nodes.
logLik <- length(phy$tip.label) * log(sf)
phy$E_0[1:length(phy$E_0)] <- -1 # set all values initiall to < 0
phy$E_0[1:length(phy$tip.label)] <- 1 - sf # set tip E0
events <- phy$events
for (i in phy$postorder[1:(length(phy$postorder) - 1)]) {
em <- phy$branchsegs[[i]]
for (k in 1:nrow(em)) {
# elements of branchsegs matrix, in order:
# event index, start time , end time,
# E_init for segment, E_final for seg, D_final for seg
curr_event <- em[k,1]
index <- which(phy$events$index == curr_event)
# time for start and stop of interval
# expressed in units of time since start of current process:
event_t_start <- em[k,2] - events$abstime[index]
event_t_end <- em[k,3] - events$abstime[index]
lambdainit <- events[index, "lambdainit"]
lambdashift <- events[index, "lambdashift"]
muinit <- events[index, "muinit"]
mushift <- events[index, "mushift"]
curr_lam <- meanExponentialRate(lambdainit, lambdashift, event_t_start, event_t_end)
curr_mu <- meanExponentialRate(muinit, mushift, event_t_start, event_t_end)
tt <- em[k,3] - em[k,2]
em[k,7] <- curr_lam
em[k,8] <- curr_mu
if (k == 1 & (!alwaysRecomputeE0)) {
# if first segment, set to parent node E_init
em[k,4] <- phy$E_0[i]
} else {
em[k,4] <- em[k-1,5]
# at this point, all E_0 values should be set.
}
# compute extinction prob on segment:
em[k,5] <- E_func(curr_lam, curr_mu, em[k,4], tt)
# compute speciation prob on segment
em[k,6] <- D_func(curr_lam, curr_mu, em[k,4], 1.0, tt)
logLik <- logLik + as.numeric( log(em[k,6]) )
} # for k loop
if (k == nrow(em)) {
# at end of branch segments. Ef for last calculation
# becomes E_0 for the parent node IF identical in state
# else multiply, thus conditioning on a speciation event.
parent <- phy$edge[,1][phy$edge[,2] == i]
e0a <- phy$E_0[parent]
# value for other descendant branc
# if not equal to -1, it will already have been set by other branch
if (e0a < 0){
# value has not been set
phy$E_0[parent] <- em[nrow(em), 5]
}else{
# here is value for other lineage
e0b <- em[nrow(em), 5]
if (e_prob_condition == "arbitrary"){
# just making this explicit, taking the "a" branch:
#cat("arbitrary\n")
phy$E_0[parent] <- e0a
}else if (e_prob_condition == "all_nodes"){
phy$E_0[parent] <- e0a * e0b
#cat("all nodes\n")
}else if (e_prob_condition == "if_different"){
#cat("if_diff\n")
delta <- abs(e0a - e0b)
if (delta > TOL){
phy$E_0[parent] <- e0a * e0b
}else{
phy$E_0[parent] <- e0a
}
}else if (e_prob_condition == "random"){
if (runif(1) < 0.5){
phy$E_0[parent] <- e0a
}else{
phy$E_0[parent] <- e0b
}
}else{
stop("Invalid options for e_prob_condition")
}
# case i: take value abitrarily
# case ii. condition all nodes
# case iii. Condition only if extinction probs are different
# case iv. take value at random
# e.g., assume 1 lineage is a true parent process
}
#parent <- phy$edge[,1][phy$edge[,2] == i]
#phy$E_0[parent] <- em[nrow(em), 5]
}
phy$branchsegs[[i]] <- em
} # for i loop
# Calculations on nodes:
# This explicitly conditions on occurrence of a root node
# because log(lambda) at the root is not added
nodeset <- phy$postorder[phy$postorder > (length(phy$tip.label) + 1)]
for (i in nodeset) {
# this block is valid for time-varying rates
# as exponential speciation rates are computed for each node
curr_lam <- 0
curr <- phy$node_event[i]
time_from_event <- phy$bt[as.character(i)] - phy$events$abstime[ phy$events$index == curr ]
lam0 <- events[events$index == phy$node_event[i], "lambdainit" ]
mu <- events[events$index == phy$node_event[i], "muinit" ]
lshift <- events[events$index == phy$node_event[i], "lambdashift"]
if (lshift <= 0){
curr_lam <- as.numeric(lam0 * exp(time_from_event * lshift))
}else{
curr_lam <- as.numeric(lam0 * (2 - exp(-lshift * time_from_event)))
}
logLik <- logLik + log(curr_lam)
}
## To condition on survival:
# Need the extinction probs of process at 2 basal branches:
dset <- phy$edge[phy$edge[,1] == (length(phy$tip.label) + 1), 2]
e1 <- phy$branchsegs[[dset[1]]]
e2 <- phy$branchsegs[[dset[2]]]
E_prob_1 <- e1[nrow(e1), 5]
E_prob_2 <- e2[nrow(e2), 5]
# These are the probability that a single lineage
# does not go extinct.
# So probability that both basal branches persist
# is (1 - p(extinct))^2
# This is exactly isometric with the diversitree likelihood
#
# But treat R and L branches separately as may have different values
logLik <- logLik - log(1 - E_prob_1) - log(1 - E_prob_2)
phy$logLik <- logLik
return(phy)
}
meanExponentialRate <- function(rate_init, rate_shift, t_start, t_end) {
delta_T = t_end - t_start;
integrated = 0.0;
if (rate_shift < 0) {
integrated = (rate_init / rate_shift) * (exp(rate_shift * t_end) - exp(rate_shift * t_start));
} else if (rate_shift > 0) {
integrated = rate_init * (2 * delta_T + (1.0 / rate_shift) *
(exp(-rate_shift * t_end) - exp(-rate_shift * t_start)));
} else {
integrated = rate_init * delta_T;
}
return(integrated / delta_T)
}
##' @title Calculate \code{BAMM} likelihood
##'
##' @description Calculates the likelihood of a phylogeny exactly as is done
##' by \code{BAMM}, given a set of events.
##'
##' @param phy Either an object of class \code{phylo} or the path to a tree
##' file in newick format.
##' @param eventdata A table of event data, as returned by \code{BAMM}, either
##' as an object of class \code{dataframe} or as the path to an event_data
##' file. Alternatively, a named numeric vector of length two holding
##' speciation ("lambda") and extinction ("mu") rates for the
##' constant-rate birth-death process.
##' @param gen The \code{BAMM} generation for which the likelihood should be
##' calculated. Can be an integer specifying a specific generation, or
##' \code{last}, specifying the last generation, or \code{all}, in which
##' case the likelihood will be calculated for all generations.
##' @param segLength The relative segment length, exactly as defined for
##' \code{BAMM}.
##' @param sf The sampling fraction.
##' @param return.intermediates Debugging option, returns augmented
##' \code{phylo} objects for each generation, see Details.
##' @param e_prob_condition Approach for how extinction probabilities are
##' handled at nodes.
##' @param \dots Additional arguments that will be passed to an internal
##' function \code{computeBAMMlikelihood}.
##'
##' @details This function allows the user to check the likelihoods computed
##' by \code{BAMM} using an independent R-based implementation. This is
##' designed to provide a check on potential software bugs that might be
##' introduced during future \code{BAMM} development and which might
##' compromise the likelihood calculation. If you observe measurable
##' discrepancies between the likelihood computed by this function and the
##' corresponding likelihood returned by \code{BAMM}, please inform the
##' \code{BAMM} development team.
##'
##' @return If \code{return.intermediates == TRUE}, then \code{phylo} objects
##' are returned with the following components:
##'
##' \item{event_times}{A list of length (number of nodes), where
##' event_times[[k]] is the vector of absolute times, in order, of
##' events that happened on a focal branch. If no event, it is
##' \code{NULL}.}
##' \item{event_id}{A list of length equal to number of nodes, as
##' event_times, but holding the corresponding event id.}
##' \item{events}{A dataframe giving parameters and associated nodes (and
##' unique index values) of the event data.}
##' \item{node_event}{The event governing the process realized at the
##' node. This will be the first event encountered as one moves
##' rootwards towards the tips from the focal node.}
##'
##' @author Dan Rabosky, Pascal Title
##'
##' @examples
##' # a global sampling fraction of 0.98 was used in generating the whales
##' # dataset.
##' data(whales, events.whales, mcmc.whales)
##'
##' x <- BAMMlikelihood(whales, events.whales, gen = 'last', sf = 0.98)
##'
##' # Does the likelihood generated by BAMM match the R implementation?
##' identical(round(x, 3), mcmc.whales[nrow(mcmc.whales), 'logLik'])
##'
##' # an example with a constant-rate birth-death process:
##' pars <- c(0.5, 0.45)
##' names(pars) <- c("lambda", "mu")
##' BAMMlikelihood(whales, pars, sf = 0.98)
##' @export
BAMMlikelihood <- function(phy, eventdata, gen = 'last', segLength = 0.02, sf = 1, return.intermediates = FALSE, e_prob_condition = "if_different", ...) {
#gen can be a number 1 -> numberOfGenerations, or 'last', or 'all'
#segLength is segLength value used in BAMM
#sf is sampling fraction
if (sum(names(eventdata) %in% c('lambda', 'mu')) == 2 & length(eventdata == 2)){
eventdata <- makeBDeventMatrix(phy, eventdata['lambda'], eventdata['mu'])
}
if (inherits(phy, 'character')) {
phy <- read.tree(phy)
}
if (!inherits(eventdata, 'data.frame')) {
eventdatafile <- eventdata
eventdata <- read.csv(eventdata, header = FALSE, stringsAsFactors = FALSE)
}
colnames(eventdata) <- c("generation", "leftchild", "rightchild", "abstime", "lambdainit", "lambdashift", "muinit", "mushift")
#if eventdata already had header, fix
if (eventdata[1,1] == 'generation') {
eventdata <- read.csv(eventdatafile, header = TRUE, stringsAsFactors = FALSE)
}
#check that supplied generation number is valid
if (is.numeric(gen)) {
if (any(!gen %in% eventdata$generation)) {
stop('Supplied generation number is not valid.')
}
}
if (gen == 'last') {
gen <- tail(eventdata$generation, 1)
}
if (gen == 'all') {
gen <- unique(eventdata$generation)
}
#extract requested generation
if (length(gen) != length(unique(eventdata$generation))) {
eventdata <- eventdata[sort(unlist(sapply(gen, function(x) which(eventdata$generation == x)))),]
}
eventList <- split(eventdata, eventdata$generation)
if (return.intermediates){
res <- vector(length=length(eventList), mode = "list")
for (i in 1:length(eventList)) {
eMat <- eventMatrix(eventList[[i]], phy)
phy2 <- buildTreeWithEventList(phy, eMat)
phy2 <- buildSegmentMatrix(phy2, segLength)
res[[i]] <- computeBAMMlikelihood(phy2, sf, e_prob_condition = e_prob_condition, ...)
}
return(res)
}else{
res <- vector(length = length(eventList))
for (i in 1:length(eventList)) {
eMat <- eventMatrix(eventList[[i]], phy)
phy2 <- buildTreeWithEventList(phy, eMat)
phy2 <- buildSegmentMatrix(phy2, segLength)
res[i] <- computeBAMMlikelihood(phy2, sf, e_prob_condition = e_prob_condition, ...)$logLik
}
return(res)
}
}
# x is event data frame with locations
# parameters (lambda, mu etc) are not used.
optimize1shiftBAMM <- function( phy, x , rel_seg = 1, e_prob_condition = "if_different"){
emat <- eventMatrix(x, phy)
phy2 <- buildTreeWithEventList(phy, emat)
phy2 <- buildSegmentMatrix(phy2, rel_seg)
lfx <- function(x){
phy2$events$lambdainit[1:2] <- exp(x[1:2])
phy2$events$muinit[1:2] <- exp(x[3:4])
phy2$events$lambdashift[1:2] <- c(0,0)
phy2$events$mushift[1:2] <- c(0,0)
tmp <- computeBAMMlikelihood(phy2, sf=1, e_prob_condition = e_prob_condition)
return(tmp$logLik)
}
# usually right here I would sample random parameters:
# like runif(2, 0, 0.5) for lambda
# then eps <- runif(2, 0, 1) for relative extinction
# then back-compute the extinction rates by multiplying eps * lambda
# the danger is that you need a try statement to catch
# if the function cannot be evaluated at the initial parameters
#pars_init <- c(0.1, 0.1, 0.01, 0.01)
lam_init <- runif(2, 0, 0.5)
mu_init <- runif(2, 0, 1) * lam_init
pars_init <- c(lam_init, mu_init)
res <- optim(log(pars_init), fn=lfx, method = "Nelder", control=list(fnscale = -1))
rr <- list(logLik = res$value, lambda = exp(res$par[1:2]), mu = exp(res$par[3:4]), conv=res$convergence, e_prob_condition = e_prob_condition)
return(rr)
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/bammLikelihood.R |
palettes <- list(
BrBG = rev(c("#543005","#8c510a","#bf812d","#dfc27d","#f6e8c3",
"#f5f5f5","#c7eae5","#80cdc1","#35978f","#01665e",
"#003c30")),
PiYG = rev(c("#8e0152","#c51b7d","#de77ae","#f1b6da","#fde0ef",
"#f7f7f7","#e6f5d0","#b8e186","#7fbc41","#4d9221",
"#276419")),
PRGn = rev(c("#40004b","#762a83","#9970ab","#c2a5cf","#e7d4e8",
"#f7f7f7","#d9f0d3","#a6dba0","#5aae61","#1b7837",
"#00441b")),
PuOr = rev(c("#7f3b08","#b35806","#e08214","#fdb863","#fee0b6",
"#f7f7f7","#d8daeb","#b2abd2","#8073ac","#542788",
"#2d004b")),
RdBu = rev(c("#67001f","#b2182b","#d6604d","#f4a582","#fddbc7",
"#f7f7f7","#d1e5f0","#92c5de","#4393c3","#2166ac",
"#053061")),
RdYlBu = rev(c("#a50026","#d73027","#f46d43","#fdae61","#fee090",
"#ffffbf","#e0f3f8","#abd9e9","#74add1","#4575b4",
"#313695")),
BuOr = c("#002bff","#1a66ff","#3399ff","#66CCff","#99eeff",
"#ccffff","#ffffcc","#ffee99","#ffee66","#ff9933",
"#ff661a","#ff2b00"),
BuOrRd = c("#085aff","#3377ff","#5991ff","#8cb2ff","#bfd4FF",
"#e6eeff","#f7faff","#ffffcc","#ffff99","#ffff00",
"#ffcc00","#ff9900","#ff6600","#ff0000"),
DkRdBu = c("#2a0bd9","#264eff","#40a1ff","#73daff","#abf8ff",
"#e0ffff","#ffffbf","#ffe099","#ffad73","#f76e5e",
"#d92632","#a60021"),
BuDkOr = c("#1f8f99","#52c4cc","#99faff","#b2fcff","#ccfeff",
"#e6ffff","#ffe6cc","#ffca99","#ffad66","#ff8f33",
"#cc5800","#994000"),
GnPu = c("#005100","#008600","#00bc00","#00f100","#51ff51",
"#86ff86","#bcffbc","#ffffff","#fff1ff","#ffbcff",
"#ff86ff","#ff51ff","#f100f1","#bc00bc","#860086",
"#510051"),
RdYlGn = rev(c("#a50026","#d73027","#f46d43","#fdae61","#fee08b",
"#ffffbf","#d9ef8b","#a6d96a","#66bd63","#1a9850",
"#006837")),
Spectral = rev(c("#9e0142","#d53e4f","#f46d43","#fdae61","#fee08b",
"#ffffbf","#e6f598","#abdda4","#66c2a5","#3288bd",
"#5e4fa2")),
grayscale = c("#ffffff","#f0f0f0","#d9d9d9","#bdbdbd","#969696","#737373","#525252",
"#252525",
"#000000"),
revgray = rev(c("#ffffff","#f0f0f0","#d9d9d9","#bdbdbd","#969696","#737373","#525252",
"#252525",
"#000000")),
greyscale = c("#ffffff","#f0f0f0","#d9d9d9","#bdbdbd","#969696","#737373","#525252",
"#252525",
"#000000"),
revgrey = rev(c("#ffffff","#f0f0f0","#d9d9d9","#bdbdbd","#969696","#737373","#525252",
"#252525",
"#000000"))
);
.colorEnv <- new.env();
assign("palettes", palettes, env = .colorEnv);
# for backwards compatibility
##' @title Rich color palette
##'
##' @description Deprecated function. Please use
##' \code{\link[gplots]{rich.colors}} instead.
##'
##' @param n The number of desired colors.
##'
##' @seealso \code{\link[gplots]{rich.colors}}
##' @export
richColors <- function (n) {
cat("NOTE: function `richColors` is deprecated. Please use `rich.colors` instead.\n")
return(gplots::rich.colors(n))
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/bammcolors.R |
barLegend <- function(pal, colorbreaks, fig, side, mar = rep(0,4), colpalette = NULL, ...) {
#if (length(pal) == 1)
# pal <- colorRampPalette(get("palettes",envir=.colorEnv)[[pal]])(length(colorbreaks)-1);
dpal <- get("palettes", envir = .colorEnv);
NCOLORS <- length(colorbreaks) - 1;
if (length(pal) >= 3) {
pal <- colorRampPalette(pal, space = 'Lab')(NCOLORS);
}
else if (pal %in% names(dpal)) {
pal <- colorRampPalette(dpal[[pal]], space = 'Lab')(NCOLORS);
}
else if (tolower(pal) == "temperature") {
pal <- gplots::rich.colors(NCOLORS);
}
else if (tolower(pal) == "terrain") {
pal <- terrain.colors(NCOLORS);
}
else {
stop("Unrecognized color palette specification");
}
if (!is.null(colpalette)) {
pal <- colpalette;
}
n <- length(pal);
x <- seq(0, n, 1) / n;
x <- rep(x, each = 2);
x <- x[-c(1, length(x))];
x <- matrix(x, ncol = 2, byrow = TRUE);
par(fig = fig, mar = mar, new = TRUE);
plot.new();
if (side == 2 || side == 4) {
xlim <- c(-0.1, 0.1);
ylim <- c(0, 1);
plot.window(xlim, ylim);
segments(x0 = 0, y0 = x[,1], x1 = 0, y1 = x[,2], col = pal, lwd = 8, lend = 2);
}
else {
xlim <- c(0,1);
ylim <- c(-0.1, 0.1);
plot.window(xlim, ylim);
segments(x0 = x[,1], y0 = 0, x1 = x[,2], y1 = 0, col = pal, lwd = 8, lend = 2);
}
tx <- numeric(3);
tx[1] <- min(colorbreaks, na.rm = TRUE);
tx[2] <- colorbreaks[median(1:length(colorbreaks))];
tx[3] <- max(colorbreaks, na.rm = TRUE);
axis(side, at = c(0, 0.5, 1), labels = signif(tx, 2), las=1, ...);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/barLegend.R |
#############################################################
#
# branchMeanRateExponential(....)
#
# Computes time-averaged rate of eponential process
branchMeanRateExponential <- function(t1, t2, p1, p2){
tol <- 0.00001;
res <- vector(mode = 'numeric', length = length(t1));
res[which(abs(p2) < tol)] <- p1[which(abs(p2) < tol)];
nonzero <- which(p2 < -tol);
p1s <- p1[nonzero];
p2s <- p2[nonzero];
t1s <- t1[nonzero];
t2s <- t2[nonzero];
res[nonzero] <- (p1s/p2s)*(exp(p2s*t2s) - exp(p2s*t1s)) / (t2s - t1s);
nonzero <- which(p2 > tol);
p1s <- p1[nonzero];
p2s <- p2[nonzero];
t1s <- t1[nonzero];
t2s <- t2[nonzero];
res[nonzero] <- (p1s/p2s)*(2*p2s*(t2s-t1s) + exp(-p2s*t2s) - exp(-p2s*t1s)) / (t2s - t1s);
return(res);
} | /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/branchMeanRateExponential.R |
##' @title Visualize macroevolutionary cohorts
##'
##' @description Plots the matrix of pairwise correlations in rate regimes
##' between all tips in a phylogeny.
##'
##' @param x A matrix of pairwise correlations generated by
##' \code{getCohortMatrix}.
##' @param ephy An object of class \code{bammdata}.
##' @param col A vector of colors passed to the function \code{image}. These
##' will be used to color the values in \code{x}. See documentation for
##' \code{image}. If \code{col = 'temperature'}, the color palette from
##' \code{\link{rich.colors}} from the gplots package will be used.
##' @param pal The palette to use if \code{use.plot.bammdata=TRUE}. See
##' options documented in the help file for \code{\link{plot.bammdata}}.
##' @param lwd A numeric indicating the width of branches in the phylogeny.
##' @param ofs A numeric controlling the offset of the phylogeny from the
##' matrix plot. Appropriate values will probably be in the interval
##' [0,0.1].
##' @param use.plot.bammdata Logical. should a phylorate plot be generated?
##' @param useraster A logical indicating whether the function \code{image}
##' should plot the matrix as a raster.
##' @param LARGE An integer. If trees have more tips than \code{LARGE},
##' \code{useraster} will be coerced to \code{TRUE}.
##' @param \dots Further arguments passed to \code{plot.bammdata} if
##' \code{use.plot.bammdata=TRUE} or \code{plot.phylo} if
##' \code{use.plot.bammdata=FALSE}.
##'
##' @details The plotting function creates an image of the \code{BAMM}
##' correlation matrix between tip lineages of the phylogeny. Each
##' correlation is the posterior frequency with which a pair of lineages
##' occurs in the same macroevolutionary rate regime. Correlations are
##' mapped to a set of colors, with warmer colors corresponding to higher
##' correlations than cooler colors. The set of colors is specified by the
##' \code{col} argument and a legend is plotted to guide interpretation of
##' the color-correlation map. Trees are plotted on the margins of the
##' matrix image. The correlation between any two tips can be inferred by
##' finding their intersection within the matrix image.
##'
##' \strong{IMPORTANT}: the legend DOES NOT apply to the phylorate plots
##' shown in the margin if \code{use.plot.bammdata=TRUE}.
##'
##' @author Mike Grundler
##'
##' @seealso \code{\link{plot.bammdata}}, \code{\link{getCohortMatrix}},
##' \code{\link{image}}
##'
##' @examples
##' data(whales, events.whales)
##' ed <- getEventData(whales, events.whales, burnin=0.1, nsamples=500)
##' x <- getCohortMatrix(ed)
##' cohorts(x, ed)
##' cohorts(x, ed, col='temperature')
##' cohorts(x, ed, ofs=0.05, col='temperature')
##' cohorts(x, ed, pal="temperature", col='temperature', use.plot.bammdata=TRUE)
##' # gray scale
##' cohorts(x, ed, col=gray(seq(0.2,0.9,length.out=128)),
##' use.plot.bammdata=FALSE)
##' @export
cohorts <- function(x, ephy, col, pal, lwd = 1, ofs = 0, use.plot.bammdata = FALSE, useraster = FALSE, LARGE = 500,...) {
if (is.null(dimnames(x) ))
stop("x must have row and column names");
op <- par(no.readonly = TRUE);
figs <- matrix(c(0,0.2,0.8,1,
0.2,0.95,0.8+ofs,1,
0,0.2-ofs,0,0.8,
0.2,0.95,0,0.8,
0.95,1,0.25,0.75
), byrow=TRUE,
nrow=5, ncol=4);
if (dim(x)[1] > LARGE)
useraster <- TRUE;
if (missing(pal))
pal <- "RdYlBu";
if (missing(col))
col <- colorRampPalette(get("palettes",.colorEnv)[["RdYlBu"]])(64);
if (all(col == 'temperature'))
col <- gplots::rich.colors(64);
ncolors <- length(col);
breaks <- quantile(seq(0,1.01,length.out=100),probs=seq(0,1,length.out=ncolors+1));
index <- match(ephy$tip.label, rownames(x));
x <- x[index, index];
if (use.plot.bammdata) {
par(fig = figs[2,], new=FALSE, mar = c(0,0,1,4));
plot(ephy, pal=pal,lwd=lwd,direction="downwards",...);
par(fig = figs[3,], new=TRUE, mar = c(5,1,0,0));
plot(ephy,pal=pal,lwd=lwd,direction="rightwards",...)
par(fig = figs[4,], new=TRUE, mar = c(5,0,0,4));
plot(0,0,type="n",axes=FALSE,ann=FALSE,xlim=c(0,1),ylim=c(0,1))
image(x,axes=FALSE,xlab="",ylab="",col=col,xlim=c(0,1),ylim=c(0,1),breaks=breaks,add=TRUE,useRaster=useraster);
}
else {
phy <- as.phylo.bammdata(ephy);
bt <- max(ephy$end)
par(fig = figs[2,], new=FALSE, mar = c(0,0,1,4));
plot.phylo(phy,edge.width=lwd,direction="downwards",show.tip.label=FALSE,x.lim=c(1,length(phy$tip.label)),y.lim=c(0,bt), ...);
par(fig = figs[3,], new=TRUE, mar = c(5,1,0,0));
plot.phylo(phy,edge.width=lwd,direction="rightwards",show.tip.label=FALSE,y.lim=c(1,length(phy$tip.label)),x.lim=c(0,bt), ...);
par(fig = figs[4,], new=TRUE, mar = c(5,0,0,4));
gl <- 1:(length(ephy$tip.label)+1);
plot(0,0,type="n",axes=FALSE,ann=FALSE,xlim=c(1,length(gl)-1),ylim=c(1,length(gl)-1))
image(gl,gl,x,axes=FALSE,xlab="",ylab="",col=col,xlim=c(1,length(gl)-1),ylim=c(1,length(gl)-1),breaks=breaks,add=TRUE,useRaster=useraster);
}
#barLegend(col, quantile(seq(min(x),max(x),length.out=ncolors+1),probs=seq(min(x),max(x),length.out=ncolors+1)),fig=figs[5,],side=2);
barLegend(col,breaks,fig=figs[5,],side=2);
par(op);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/cohorts.R |
############################################
# Internal function called by plot.bammdata(...)
#
#
colorMap <- function(x, pal, breaks, logcolor = FALSE, color.interval = NULL) {
dpal <- get("palettes", envir = .colorEnv);
NCOLORS <- length(breaks) - 1;
if (length(pal) >= 3) {
colpalette <- colorRampPalette(pal,space='Lab')(NCOLORS);
} else if (pal %in% names(dpal)) {
colpalette <- colorRampPalette(dpal[[pal]],space='Lab')(NCOLORS);
} else if (tolower(pal) == "temperature") {
colpalette <- gplots::rich.colors(NCOLORS);
} else if (tolower(pal) == "terrain") {
colpalette <- terrain.colors(NCOLORS);
} else {
stop("Unrecognized color palette specification");
}
if (!is.null(color.interval)) {
if (length(color.interval) != 2) {
stop("Color interval must have 2 values.");
}
if (is.na(color.interval[1])) {
color.interval[1] <- min(breaks);
}
if (is.na(color.interval[2])) {
color.interval[2] <- max(breaks);
}
#if color interval is contained within the range of supplied rates
if (color.interval[1] >= min(breaks) & color.interval[2] <= max(breaks)) {
goodbreaks <- intersect(which(breaks > color.interval[1]), which(breaks < color.interval[2]));
topcolor <- colpalette[length(colpalette)];
bottomcolor <- colpalette[1];
NCOLORS <- length(goodbreaks) - 1;
if (length(pal) >= 3) {
colpalette2 <- colorRampPalette(pal,space='Lab')(NCOLORS);
} else if (pal %in% names(dpal)) {
colpalette2 <- colorRampPalette(dpal[[pal]],space='Lab')(NCOLORS);
} else if (tolower(pal) == "temperature") {
colpalette2 <- gplots::rich.colors(NCOLORS);
} else if (tolower(pal) == "terrain") {
colpalette2 <- terrain.colors(NCOLORS);
}
#replace colors in original color ramp
colpalette[goodbreaks[1:(length(goodbreaks) - 1)]] <- colpalette2;
colpalette[1:(goodbreaks[1] - 1)] <- bottomcolor;
colpalette[goodbreaks[length(goodbreaks)]:length(colpalette)] <- topcolor;
}
#if color interval exceeds the range of color breaks
if (color.interval[1] < min(breaks) | color.interval[2] > max(breaks)) {
#generate new set of breaks for full range of rate values
newbreaks <- seq(from = min(color.interval[1], min(breaks)), to = max(color.interval[2], max(breaks)), by = (breaks[2] - breaks[1]));
newbreaks <- seq(from = min(color.interval[1], min(breaks)), to = max(color.interval[2], max(breaks)), length.out = length(newbreaks));
# which breaks fall within the defined color.interval
goodbreaks <- intersect(which(newbreaks >= color.interval[1]), which(newbreaks <= color.interval[2]));
#generate colors for new breaks
NCOLORS <- length(goodbreaks) - 1;
if (length(pal) >= 3) {
colpalette2 <- colorRampPalette(pal,space='Lab')(NCOLORS);
} else if (pal %in% names(dpal)) {
colpalette2 <- colorRampPalette(dpal[[pal]],space='Lab')(NCOLORS);
} else if (tolower(pal) == "temperature") {
colpalette2 <- gplots::rich.colors(NCOLORS);
} else if (tolower(pal) == "terrain") {
colpalette2 <- terrain.colors(NCOLORS);
}
# create new color palette that contains the color ramp
# within the color.interval
# Fill in other slots with repeats of min or max color
colpalette <- character(length(newbreaks) - 1);
colpalette[goodbreaks[1:length(goodbreaks) - 1]] <- colpalette2;
breaks <- newbreaks;
if (any(colpalette == '')) {
NAcol <- which(colpalette == '');
nonNAcol <- which(colpalette != '');
colFill <- sapply(NAcol, function(y) which.min(abs(y - nonNAcol)));
colpalette[NAcol] <- colpalette[nonNAcol[colFill]];
}
}
}
kde <- density(x, from=min(x), to=max(x));
colset <- numeric(length(x));
coldens <- numeric(length(kde$x));
for (i in 2:length(breaks)) {
if (i == 2) {
colset[x < breaks[2]] <- colpalette[1];
coldens[kde$x < breaks[2]] <- colpalette[1];
}
else if (i == length(breaks)) {
colset[x >= breaks[length(breaks)-1]] <- colpalette[length(breaks)-1];
coldens[kde$x >= breaks[length(breaks)-1]] <- colpalette[length(breaks)-1];
}
else {
colset[x >= breaks[i-1] & x < breaks[i]] <- colpalette[i-1];
coldens[kde$x >= breaks[i-1] & kde$x < breaks[i]] <- colpalette[i-1];
}
}
coldens <- data.frame(kde$x,kde$y,coldens,stringsAsFactors=FALSE);
return(list(cols = colset, colsdensity = coldens, breaks = breaks, colpalette = colpalette));
}
# colorMap = function(x, pal, NCOLORS)
# {
# dpal = c('BrBG','PiYG','PuOr','RdBu','RdGy','RdYlBu','RdYlGn','Spectral');
# colset = numeric(length(x));
# if(length(pal) == 3)
# {
# colpalette = colorRampPalette(pal,space='Lab')(NCOLORS);
# }
# else if(pal %in% dpal)
# {
# colpalette = colorRampPalette(rev(brewer.pal(3,pal)),space='Lab')(NCOLORS);
# }
# else if(pal == 'temperature')
# {
# colpalette = gplots::rich.colors(NCOLORS);
# }
# bks = quantile(x, seq(0,1,length.out=(NCOLORS+1)));
# for(i in 2:length(bks))
# {
# if(i == 2)
# {
# colset[x < bks[2]] = colpalette[1];
# }
# else if(i == length(bks))
# {
# colset[x >= bks[length(bks)-1]] = colpalette[length(bks)-1];
# }
# else
# {
# colset[x >= bks[i-1] & x < bks[i]] = colpalette[i-1];
# }
# }
# return(colset);
# }
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/colorMap.R |
##' @title Compute Bayes Factors
##'
##' @description Computes pairwise Bayes factors for a set of
##' macroevolutionary models sampled using \code{BAMM}, using MCMC
##' simulation output.
##'
##' @param postdata Filename for the MCMC output file from a \code{BAMM} run.
##' Alternatively, a dataframe containing this information.
##' @param expectedNumberOfShifts Expected number of shifts under the prior.
##' @param burnin What fraction of samples to discard from postdata as burnin?
##' @param \dots Additional arguments to computeBayesFactors.
##'
##' @details This function returns a matrix of pairwise Bayes factors, where
##' the Bayes factor is the ratio of marginal likelihoods between two
##' models \ifelse{html}{\out{M<sub>i</sub>}}{\eqn{M_i}} and \ifelse{html}{\out{M<sub>j</sub>}}{\eqn{M_j}}. Numerator models are given as rows, and
##' denominator models as columns. Row names and column names give the
##' number of shifts in the corresponding model. Suppose you have an
##' output matrix with row and column names 0:3 (0, 1, 2, 3). Model 0 is a
##' model with just a single process (starting at the root), and no
##' among-lineage rate heterogeneity.
##'
##' If \code{computeBayesFactors} gives a matrix \code{mm}, and
##' \code{mm[2,1]} is 10.0, this implies Bayes factor evidence of 10 in
##' favor of the 2nd row model (a model with 1 process; e.g.,
##' \code{rownames(mm)[2]}) over the first column model (a model with a
##' single process).
##'
##' This function will only compute Bayes factors between models which
##' were actually sampled during simulation of the posterior. Hence, if
##' a model has such low probability that it is never visited by
##' \code{BAMM} during the simulation of the posterior, it will be
##' impossible to estimate its posterior probability (and thus, you will
##' get no Bayes factors involving this particular model). This is likely
##' to change in the future with more robust methods for estimating
##' posterior probabilities in the tails of the distribution.
##'
##' @return A matrix of pairwise Bayes factors between models.
##'
##' @author Dan Rabosky
##'
##' @examples
##' data(mcmc.whales)
##' computeBayesFactors(mcmc.whales, expectedNumberOfShifts = 1, burnin = 0.1)
##' @keywords models
##' @export
computeBayesFactors <- function(postdata, expectedNumberOfShifts, burnin = 0.1, ...){
if (hasArg("strict") | hasArg("threshpost") | hasArg("threshprior") | hasArg("nbprior") | hasArg("priordata") | hasArg("modelset")){
cat("Error - you have specified some argument names that have been deprecated\n");
cat("in this version of BAMMtools. Check the help file on this function\n");
cat("to see what has changed\n\n");
stop();
}
if (inherits(postdata, 'character')){
dpost <- read.csv(postdata, header=T);
} else if (inherits(postdata, 'data.frame')){
dpost <- postdata;
} else{
stop("invalid postdata argument (wrong class) in computeBayesFactors\n");
}
dpost <- dpost[floor(burnin*nrow(dpost)):nrow(dpost), ];
tx <- table(dpost$N_shifts) / nrow(dpost);
post <- data.frame(N_shifts=as.numeric(names(tx)), prob=as.numeric(tx));
ux <- as.numeric(names(tx))
if (length(ux) <= 1){
cat("Not enough models sampled in simulation of posterior\n")
cat("You must have valid posterior probabilities for at least 2 models\n")
cat("to use this function'\n")
}
pp <- (1 / (1 + expectedNumberOfShifts))
prior <- dgeom(ux, prob = pp)
names(prior) <- ux
mm <- matrix(NA, nrow=length(prior), ncol=length(prior));
rownames(mm) <- names(prior);
colnames(mm) <- names(prior);
for (i in 1:length(prior)){
mi <- ux[i];
for (j in 1:length(prior)){
mj <- ux[j];
prior_odds <- prior[i] / prior[j]
post_odds <- post$prob[post$N_shifts == mi] / post$prob[post$N_shifts == mj];
mm[i,j] <- post_odds * (1 / prior_odds);
}
}
return(mm);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/computeBayesFactors.R |
# Feb 28 2014
##' @title Credible set of macroevolutionary rate shift configurations from
##' \code{BAMM} results
##'
##' @description Computes the 95\% (or any other \%) credible set of
##' macroevolutionary rate shift configurations from a \code{bammdata}
##' object. These results can be analyzed further and/or plotted.
##'
##' @param ephy An object of class \code{bammdata}.
##' @param expectedNumberOfShifts The expected number of rate shifts under the
##' prior.
##' @param threshold The marginal posterior-to-prior odds ratio for a rate
##' shift on a specific branch, used to distinguish core and non-core
##' shifts.
##' @param set.limit The desired limit to the credible set. A value of 0.95
##' will return the 95\% credible set of shift configurations.
##' @param \dots Other arguments to \code{credibleShiftSet}.
##'
##' @details Computes the 95\% credible set (or XX\% credible set, depending
##' on \code{set.limit}) of diversification shift configurations sampled
##' using \code{BAMM}. This is analogous to a credible set of phylogenetic
##' tree topologies from a Bayesian phylogenetic analysis.
##'
##' To understand how this calculation is performed, one must first
##' distinguish between "core" and "non-core" rate shifts. A "core shift"
##' is a rate shift with a marginal probability that is substantially
##' elevated above the probability expected on the basis of the prior
##' alone. With \code{BAMM}, every branch in a phylogenetic tree is
##' associated with some non-zero prior probability of a rate shift.
##' Typically this is a very low per-branch shift probability (this prior
##' is determined by the value of the "poissonRatePrior" parameter in a
##' \code{BAMM} analysis).
##'
##' If we compute distinct shift configurations with every sampled shift
##' (including those shifts with very low marginal probabilities), the
##' number of distinct shift configurations will be overwhelmingly high.
##' However, most of these configurations include shifts with marginal
##' probabilities that are expected even under the prior alone. Hence,
##' using these shifts to identify distinct shift configurations simply
##' generates noise and isn't particularly useful.
##'
##' The solution adopted in \code{BAMMtools} is, for each branch in the
##' phylogeny, to compute both the posterior and prior probabilities of a
##' rate shift occurring. The ratio of these probabilities is a
##' branch-specific marginal odds ratio: it is the marginal posterior
##' frequency of one or more rate shifts normalized by the corresponding
##' prior probability. Hence, any branch with a marginal odds ratio of
##' 1.0 is one where the observed (posterior) odds of a rate shift are no
##' different from the prior odds. A value of 10 implies that the
##' posterior probability is 10 times the prior probability.
##'
##' The user of \code{credibleShiftSet} must specify a \code{threshold}
##' argument. This is simply a cutoff value for identifying "important"
##' shifts for the purposes of identifying distinct shift configurations.
##' This does not imply that it is identifying "significant" shifts. See
##' the online documentation on this topic available at
##' \url{http://bamm-project.org} for more information. If you specify
##' \code{threshold = 5} as an argument to \code{credibleShiftSet}, the
##' function will ignore all branches with marginal odds ratios less than
##' 5 during the enumeration of topologically distinct shift
##' configurations. Only shifts with marginal odds ratios greater than or
##' equal to \code{threshold} will be treated as core shifts for the
##' purposes of identifying distinct shift configurations.
##'
##' For each shift configuration in the credible set, this function will
##' compute the average diversification parameters. For example, the most
##' frequent shift configuration (the maximum a posteriori shift
##' configuration) might have 3 shifts, and 150 samples from your
##' posterior (within the \code{bammdata} object) might show this shift
##' configuration. However, the parameters associated with each of these
##' shift configurations (the actual evolutionary rate parameters) might
##' be different for every sample. This function returns the mean set of
##' rate parameters for each shift configuration, averaging over all
##' samples from the posterior that can be assigned to a particular shift
##' configuration.
##'
##' @return A class \code{credibleshiftset} object with many components. Most
##' components are an ordered list of length L, where L is the number of
##' distinct shift configurations in the credible set. The first list
##' element in each case corresponds to the shift configuration with the
##' maximum a posteriori probability.
##'
##' \item{frequency}{A vector of frequencies of shift configurations,
##' including those that account for \code{set.limit} (typically, 0.95
##' or 0.99) of the probability of the data. The index of the i'th
##' element of this vector is the i'th most probable shift
##' configuration (excepting ties).}
##' \item{shiftnodes}{A list of the "core" rate shifts (marginal
##' probability > threshold) that occurred in each distinct shift
##' configuration in the credible set. The i'th vector from this list
##' gives the core shift nodes for the i'th shift configuration. They
##' are sorted by frequency, so \code{x$shiftnodes[[1]]} gives the
##' shift nodes that occurred together in the shift configuration with
##' the highest posterior probability.}
##' \item{indices}{A list of vectors containing the indices of samples in
##' the \code{bammdata} object that are assigned to a given shift
##' configuration. All are sorted by frequency.}
##' \item{cumulative}{Like \code{frequency}, but contains the cumulative
##' frequencies.}
##' \item{threshold}{The marginal posterior-to-prior odds for rate shifts
##' on branches used during enumeration of distinct shift
##' configurations.}
##' \item{number.distinct}{Number of distinct shift configurations in the
##' credible set.}
##' \item{set.limit}{which credible set is this (0.9, 0.95, etc)?}
##' \item{coreshifts}{A vector of node numbers corresponding to the core
##' shifts. All of these nodes have a Bayes factor of at least
##' \code{BFcriterion} supporting a rate shift.}
##'
##' In addition, a number of components that are defined similarly in
##' class \code{phylo} or class \code{bammdata} objects:
##'
##' \item{edge}{See documentation for class \code{phylo} in package ape.}
##' \item{Nnode}{See documentation for class \code{phylo} in package ape.}
##' \item{tip.label}{See documentation for class \code{phylo} in package
##' ape.}
##' \item{edge.length}{See documentation for class \code{phylo} in package
##' ape.}
##' \item{begin}{The beginning time of each branch in absolute time (the
##' root is set to time zero)}
##' \item{end}{The ending time of each branch in absolute time.}
##' \item{numberEvents}{An integer vector with the number of core events
##' contained in the \code{bammdata} object for each shift
##' configuration in the credible set. The length of this vector is
##' equal to the number of distinct shift configurations in the
##' credible set.}
##' \item{eventData}{A list of dataframes. Each element holds the average
##' rate and location parameters for all samples from the posterior
##' that were assigned to a particular distinct shift configuration.
##' Each row in a dataframe holds the data for a single event. Data
##' associated with an event are: \code{node} - a node number. This
##' identifies the branch where the event originates. \code{time} -
##' this is the absolute time on that branch where the event
##' originates (with the root at time 0). \code{lam1} - an initial
##' rate of speciation or trait evolution.\code{lam2} - a decay/growth
##' parameter. \code{mu1} - an initial rate of extinction. \code{mu2}
##' - a decay/growth parameter. \code{index} - a unique integer
##' associated with the event. See 'Details' in the documentation for
##' \code{\link{getEventData}} for more information.}
##' \item{eventVectors}{A list of integer vectors. Each element is for a
##' single shift configuration in the posterior. For each branch in
##' the \code{bammdata} object, gives the index of the event governing
##' the (tipwards) end of the branch. Branches are ordered increasing
##' here and elsewhere.}
##' \item{eventBranchSegs}{A list of matrices. Each element of the list is
##' a single distinct shift configuration. Each matrix has four
##' columns: \code{Column 1} identifies a node in \code{phy}.
##' \code{Column 2} identifies the beginning time of the branch or
##' segment of the branch that subtends the node in \code{Column 1}.
##' \code{Column 3} identifies the ending time of the branch or
##' segment of the branch that subtends the node in \code{Column 1}.
##' \code{Column 4} identifies the index of the event that occurs
##' along the branch or segment of the branch that subtends the node
##' in \code{Column 1}.}
##' \item{tipStates}{A list of integer vectors. Each element is a single
##' distinct shift configuration. For each tip the index of the event
##' that occurs along the branch subtending the tip. Tips are ordered
##' increasing here and elsewhere.}
##' \item{tipLambda}{A list of numeric vectors. Each element is a single
##' distinct shift configuration. For each tip is the average rate of
##' speciation or trait evolution at the end of the terminal branch
##' subtending that tip (averaged over all samples that are assignable
##' to this particular distinct shift configuration).}
##' \item{tipMu}{A list of numeric vectors. Each element is a single
##' distinct shift configuration. For each tip the rate of extinction
##' at the end of the terminal branch subtending that tip. Meaningless
##' if working with \code{BAMM} trait results.}
##' \item{type}{A character string. Either "diversification" or "trait"
##' depending on your \code{BAMM} analysis.}
##' \item{downseq}{An integer vector holding the nodes of \code{phy}. The
##' order corresponds to the order in which nodes are visited by a
##' pre-order tree traversal.}
##' \item{lastvisit}{An integer vector giving the index of the last node
##' visited by the node in the corresponding position in
##' \code{downseq}. \code{downseq} and \code{lastvisit} can be used to
##' quickly retrieve the descendants of any node. e.g. the descendants
##' of node 89 can be found by
##' \code{downseq[which(downseq==89):which(downseq==lastvisit[89])}.}
##'
##' @author Dan Rabosky
##'
##' @seealso \code{\link{distinctShiftConfigurations}},
##' \code{\link{plot.bammshifts}}, \code{\link{summary.credibleshiftset}},
##' \code{\link{plot.credibleshiftset}},
##' \code{\link{getBranchShiftPriors}}
##'
##' @references \url{http://bamm-project.org/}
##'
##' @examples
##' data(events.whales, whales)
##' ed <- getEventData(whales, events.whales, burnin=0.1, nsamples=500)
##'
##' cset <- credibleShiftSet(ed, expectedNumberOfShifts = 1, threshold = 5)
##'
##' # Here is the total number of samples in the posterior:
##' length(ed$eventData)
##'
##' # And here is the number of distinct shift configurations:
##' cset$number.distinct
##'
##' # here is the summary statistics:
##' summary(cset)
##'
##' # Accessing the raw frequency vector for the credible set:
##' cset$frequency
##'
##' #The cumulative frequencies:
##' cset$cumulative
##'
##' # The first element is the shift configuration with the maximum
##' # a posteriori probability. We can identify all the samples from
##' # posterior that show this shift configuration:
##'
##' cset$indices[[1]]
##'
##' # Now we can plot the credible set:
##' plot(cset, plotmax=4)
##' @keywords models
##' @export
credibleShiftSet <- function(ephy, expectedNumberOfShifts, threshold = 5, set.limit = 0.95, ...){
prior <- getBranchShiftPriors(ephy, expectedNumberOfShifts)
dsc <- distinctShiftConfigurations(ephy, expectedNumberOfShifts, threshold);
cfreq <- cumsum(dsc$frequency);
cut <- min(which(cfreq >= set.limit));
nodeset <- NULL;
shiftnodes <- dsc$shifts[1:cut];
indices <- dsc$samplesets[1:cut];
frequency <- dsc$frequency[1:cut];
cumulative <- cumsum(dsc$frequency)[1:cut];
ephy$marg.probs <- dsc$marg.probs;
ephy$shiftnodes <- shiftnodes;
ephy$indices <- indices;
ephy$frequency <- frequency;
ephy$cumulative <- cumulative;
ephy$coreshifts <- dsc$coreshifts;
ephy$threshold <- threshold;
ephy$set.limit <- set.limit;
ephy$number.distinct <- length(indices);
class(ephy) <- 'credibleshiftset';
return(ephy);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/credibleShiftSet.R |
#############################################################
#
# cumulativeShiftProbsTree(....)
#
# Args: ephy = object of class 'bammdata'
#
# Returns: a phylogenetic tree, but where each
# branch length (edge length) is equal to the
# cumulative probability of a shift somewhere
# between the focal branch and the root of the
# tree. The branch length itself does not tell
# you where the shifts occur, but they tell
# you which clades/lineages have diversification
# dynamics that are decoupled from the root of the tree
#
##' @export
##' @rdname ShiftProbsTree
cumulativeShiftProbsTree <- function(ephy) {
if (!inherits(ephy, 'bammdata')) {
stop("Object ephy must be of class bammdata\n");
}
shiftvec <- numeric(length(ephy$edge.length));
rootnode <- length(ephy$tip.label) + 1;
for (i in 1:length(ephy$eventData)) {
snodes <- unique(ephy$eventBranchSegs[[i]][,1][ephy$eventBranchSegs[[i]][,4] != 1]);
hasShift <- ephy$edge[,2] %in% snodes;
shiftvec[hasShift] <- shiftvec[hasShift] + rep(1, sum(hasShift));
}
shiftvec <- shiftvec / length(ephy$eventData);
newphy <- as.phylo.bammdata(ephy);
newphy$edge.length <- shiftvec;
return(newphy);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/cumulativeShiftProbsTree.R |
# Drop all nodes from event data with marginal probs < 0.05
# test is unique
# if so, add to list
# Returns:
# $marg.probs = marginal probs for nodes
# $marginal_odds_ratio = branch-specific (marginal) posterior:prior odds ratios associated with 1 or more shifts
# $shifts = unique shift sets
# $samplesets = list of sample indices that reduce to each of the unique shift sets
# $frequency = vector of frequencies of each shift configuration
# $threshold = (marginal) posterior:prior odds ratio threshold for shifts
#
# Results are sorted by frequency.
# $frequency[1] gives the most common shift config sampled
# $shifts[[1]] gives the corresponding node indices for that configuration
# $samplesets[[1]] gives the indices of samples with this configuration
##' @title Identify distinct rate shift configurations
##'
##' @description Identify topologically distinct rate shift configurations
##' that were sampled with \code{BAMM}, and assign each sample in the
##' posterior to one of the distinct shift configurations.
##'
##' @param ephy An object of class \code{bammdata}.
##' @param expectedNumberOfShifts The expected number of rate shifts under the
##' prior.
##' @param threshold Threshold value for marginal posterior-to-prior odds
##' ratios, used to identify branches with elevated shift probabilities
##' relative to prior (core vs non-core shifts).
##' @param \dots Other arguments to distinctShiftConfigurations (possibly
##' deprecated args).
##'
##' @details See Rabosky et al (2014) and the \code{BAMM} project website for
##' details on the nature of distinct shift configurations and especially
##' the distinction between "core" and "non-core" rate shifts. Note that
##' branches with elevated marginal posterior probabilities relative to
##' the prior (marginal odds ratios) cannot be claimed to have
##' "significant" evidence for a rate shift on the basis of this evidence
##' alone.
##'
##' @return An object of class \code{bammshifts}. This is a list with the
##' following components:
##' \itemize{
##' \item marg.probs: A list of the marginal probability of a shift
##' occurring at each node of the phylogeny for each distinct rate
##' shift configuration.
##' \item marginal_odd_ratio: Marginal posterior-to-prior odds ratios
##' for one or more rate shifts an a given branch.
##' \item shifts: A list of the set of shift nodes for each distinct
##' rate configuration.
##' \item samplesets: A list of sample indices that reduce to each of
##' the unique shift sets.
##' \item frequency: A vector of frequencies of each distinct shift
##' configuration.
##' \item coreshifts: A vector of node numbers corresponding to the
##' core shifts. All of these nodes have a marginal odds ratio of
##' at least \code{threshold} supporting a rate shift.
##' \item threshold: A single numeric value giving the marginal
##' posterior:prior odds ratio threshold used during enumeration
##' of distinct shift configurations.
##' }
##' Results are sorted by frequency:
##'
##' $frequency[1] gives the most common shift configuration sampled.
##'
##' $shifts[[1]] gives the corresponding node indices for that
##' configuration.
##'
##' $samplesets[[1]] gives the indices of samples with this configuration.
##'
##' @author Dan Rabosky
##'
##' @seealso \code{\link{plot.bammshifts}}, \code{\link{credibleShiftSet}}
##'
##' @examples
##' data(whales, events.whales)
##' ed <- getEventData(whales, events.whales, burnin=0.25, nsamples=500)
##'
##' sc <- distinctShiftConfigurations(ed, expectedNumberOfShifts = 1,
##' threshold = 5)
##'
##' plot(sc, ed, rank=1)
##' @export
distinctShiftConfigurations <- function(ephy, expectedNumberOfShifts, threshold, ... ) {
or <- marginalOddsRatioBranches(ephy, expectedNumberOfShifts)
mm <- marginalShiftProbsTree(ephy);
goodnodes <- or$edge[,2][or$edge.length >= threshold];
xlist <- list();
for (i in 1:length(ephy$eventData)) {
xlist[[i]] <- intersect(goodnodes, ephy$eventData[[i]]$node);
}
ulist <- list();
treesets <- list();
ulist[[1]] <- xlist[[1]];
treesets[[1]] <- 1;
for (i in 2:length(xlist)) {
lx <- length(ulist);
#cat(lx, '\n')
for (k in 1:lx) {
if (areShiftSetsEqual(ulist[[k]], xlist[[i]])){
treesets[[k]] <- c(treesets[[k]], i);
break;
} else {
if (k == length(ulist)){
xlen <- length(ulist);
ulist[[xlen + 1]] <- xlist[[i]];
treesets[[xlen + 1]] <- i;
}
}
}
}
freqs <- unlist(lapply(treesets, length));
freqs <- freqs / sum(freqs);
ord <- order(freqs, decreasing=TRUE);
obj <- list();
obj$marg.probs <- mm$edge.length;
names(obj$marg.probs) <- mm$edge[,2];
obj$marginal_odds_ratio <- or$edge.length;
names(obj$marginal_odds_ratio) <- or$edge[,2];
obj$shifts <- ulist[ord];
obj$samplesets <- treesets[ord];
obj$frequency <- freqs[ord];
obj$coreshifts <- goodnodes;
obj$threshold <- threshold;
class(obj) <- 'bammshifts';
return(obj);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/distinctShiftConfigurations.R |
############################################
# dtRates(ephy,tau)
#
# A function to calculate approximations of
# mean instantaneous speciation/extinction rates or
# phenotypic rates along each branch.
#
# Arguments: ephy = a bammdata object
# tau = fraction of tree height for approximation (e.g. 0.01).
# This is the step size over which rates are calculated along
# a branch, 0.01 corresponds to a step size of 1% of tree height.
# ism = index of posterior sample(s). Currently may be NULL or
# a vector of integer values. if NULL the function will use all
# posterior samples, otherwise it will use only
# the samples corresponding to the indices in ism,
# e.g. 50, e.g. 50:100.
#
# Returns: an ephy object with a list appended containing a vector of branch
# rates and the step size used for calculation.
##' @title Calculate macroevolutionary rate changes on a phylogeny from
##' \code{BAMM} output
##'
##' @description \code{dtRates} calculates the mean of the marginal posterior
##' density of the rate of speciation/extinction or trait evolution for
##' small segments along each branch in a phylogeny.
##'
##' @param ephy An object of class \code{bammdata}.
##' @param tau A numeric that specifies the size (as a fraction of tree
##' height) of the segments that each branch will be discretized into.
##' @param ism An integer vector indexing which posterior samples to include
##' in the calculation.
##' @param tmat A logical. If \code{TRUE} the matrix of branch segments is
##' returned.
##'
##' @details \code{dtRates} bins the phylogeny into windows of time and
##' calculates average rates of speciation/extinction or phenotypic
##' evolution along each segment of a branch within a window. The width of
##' each window is determined by \code{tau}. \code{tau} is a fraction of
##' the root to tip distance so a value of \code{tau = 0.01} bins the
##' phylogeny into 100 time windows of equal width.
##'
##' @return A \code{bammdata} object with a new component named "dtrates", which
##' is a list with two or three components:
##' \itemize{
##' \item tau: The parameter value of \code{tau} used in the
##' calculation.
##' \item rates: If \code{ephy$type = "trait"}: a numeric vector with
##' the phenotypic rates of each segment on each branch. If
##' \code{ephy$type = "diversification"}: a list with two
##' components. The first component is a numeric vector of
##' speciation rates. The second component is a numeric vector of
##' extinction rates.
##' \item tmat: A matrix of the starting and ending times of the
##' segments on each branch. Only if \code{tmat = TRUE}.
##' }
##'
##' @note If there are zero length branches in the input tree \code{NA}s will
##' result.
##'
##' @author Mike Grundler
##'
##' @seealso \code{\link{plot.bammdata}}
##'
##' @references \url{http://bamm-project.org/}
##'
##' @examples
##' data(whales, events.whales)
##' ed <- getEventData(whales, events.whales, burnin=0.25, nsamples=500)
##'
##' # use all posterior samples
##' ed <- dtRates(ed, tau=0.01)
##'
##' # use specified range of posterior samples
##' ed <- dtRates(ed, tau=0.01, ism=50:150)
##' @keywords graphics
##' @export
dtRates <- function (ephy, tau, ism = NULL, tmat = FALSE) {
if (!inherits(ephy, "bammdata")) {
stop("Object ephy must be of class bammdata");
}
if (attributes(ephy)$order != "cladewise") {
stop("Function requires tree in 'cladewise' order");
}
ephy$eventBranchSegs <- lapply(ephy$eventBranchSegs, function(x) x[order(x[,1]), ]);
# phy <- as.phylo.bammdata(ephy);
# phy <- getStartStopTimes(phy);
# if (is.ultrametric(phy))
# tH <- max(branching.times(phy))
# else
# tH <- max(NU.branching.times(phy));
tH <- max(ephy$end);
segmat <- segMap(ephy, tau);
#tol = max(1 * 10^-decimals(ephy$eventBranchSegs[[1]][1, 2]),1 * 10^-decimals(ephy$eventBranchSegs[[1]][1, 3]));
tol <- 0.00001;
if (storage.mode(segmat) != "double") stop("Error: wrong storage mode in foreign function call");
if (storage.mode(tol) != "double") stop("Error: wrong storage mode in foreign function call");
if (storage.mode(ephy) != "list") stop("Error: wrong storage mode in foreign function call");
if (is.null(ism)) {
ism <- as.integer(1:length(ephy$eventBranchSegs));
}
else ism <- as.integer(ism);
if (ism[length(ism)] > length(ephy$eventBranchSegs)) {
warning("Sample index out of range");
ism <- as.integer(1:length(ephy$eventBranchSegs));
}
index <- 1:nrow(segmat);
rownames(segmat) <- index;
segmat <- segmat[order(segmat[, 1]), ];
if (ephy$type == "diversification") {
dtrates <- .Call("dtrates", ephy, segmat, tol, ism, 0L, PACKAGE = "BAMMtools");
for (i in 1:2) {
names(dtrates[[i]]) <- rownames(segmat);
dtrates[[i]] <- dtrates[[i]][as.character(index)];
names(dtrates[[i]]) <- NULL;
}
if (sum(is.na(dtrates[[1]]))) {
warning(sprintf("Found %d NA speciation rates. Coercing to zero.", sum(is.na(dtrates[[1]]))));
dtrates[[1]][is.na(dtrates[[1]])] <- 0;
}
if (sum(is.na(dtrates[[2]]))) {
warning(sprintf("Found %d NA extinction rates. Coercing to zero.", sum(is.na(dtrates[[2]]))));
dtrates[[2]][is.na(dtrates[[2]])] <- 0;
}
}
else if (ephy$type == "trait") {
dtrates <- .Call("dtrates", ephy, segmat, tol, ism, 1L, PACKAGE = "BAMMtools");
names(dtrates) <- rownames(segmat);
dtrates <- dtrates[as.character(index)];
names(dtrates) <- NULL;
if (sum(is.na(dtrates))) {
warning(sprintf("Found %d NA phenotypic rates. Coercing to zero.", sum(is.na(dtrates))));
dtrates[is.na(dtrates)] <- 0;
}
}
else {
stop("Unrecognized model type");
}
if (tmat) {
segmat <- segmat[as.character(index),];
ephy$dtrates <- list(tau = tau, rates = dtrates, tmat = segmat);
return(ephy);
}
ephy$dtrates <- list(tau = tau, rates = dtrates);
return(ephy);
}
# dtRates = function(ephy, tau, ism = NULL) {
# if (!'bammdata' %in% class(ephy)) {
# stop('Object ephy must be of class bammdata');
# }
# ephy$eventBranchSegs = lapply(ephy$eventBranchSegs, function(x) x[order(x[,1]), ]);
# phy = as.phylo.bammdata(ephy);
# phy = getStartStopTimes(phy);
# #if (attributes(phy)$order != 'cladewise') {
# # phy = reorder(phy,'cladewise');
# #}
# tH = max(branching.times(phy));
# segmat = segMap(phy$edge[,2],phy$begin/tH,phy$end/tH,tau);
# segmat[,2] = segmat[,2] * tH;
# segmat[,3] = segmat[,3] * tH;
# tol = max(1*10^-decimals(ephy$eventBranchSegs[[1]][1,2]),1*10^-decimals(ephy$eventBranchSegs[[1]][1,3]));
# if (storage.mode(segmat) != "double") stop('Exiting');
# if (storage.mode(tol) != "double") stop('Exiting');
# if (storage.mode(ephy) != "list") stop('Exiting');
# if (is.null(ism)) ism = as.integer(1:length(ephy$eventBranchSegs)) else ism = as.integer(ism);
# if (ism[length(ism)] > length(ephy$eventBranchSegs)) {
# warning("Sample index out of range");
# ism = as.integer(1:length(ephy$eventBranchSegs));
# }
# index = 1:nrow(segmat)
# rownames(segmat) = index;
# segmat = segmat[order(segmat[,1]),];
# if (ephy$type == "diversification") {
# dtrates = .Call("dtrates", ephy, segmat, tol, ism, 0L, PACKAGE="BAMMtools");
# for (i in 1:2) {
# names(dtrates[[i]]) = rownames(segmat);
# dtrates[[i]] = dtrates[[i]][as.character(index)];
# names(dtrates[[i]]) = NULL;
# }
# }
# else {
# dtrates = .Call("dtrates", ephy, segmat, tol, ism, 1L, PACKAGE="BAMMtools");
# names(dtrates) = rownames(segmat);
# dtrates = dtrates[as.character(index)];
# names(dtrates) = NULL;
# }
# ephy$dtrates = list(tau = tau, rates = dtrates);
# return(ephy);
# }
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/dtRates.R |
#############################################################
#
# exponentialRate(....)
#
# Gets point estimate of evolutionary rate
# Vectorized <- No. this turns out to be much much slower
exponentialRate <- function(t1, p1, p2) {
tol <- 0.00001;
zero <- which(abs(p2) < tol);
ret <- numeric(length(t1));
ret[zero] <- p1[zero];
nonzero <- which(p2 < -tol | p2 > tol);
p1 <- p1[nonzero];
p2 <- p2[nonzero];
t1 <- t1[nonzero];
ret[nonzero] <- ( p1 * ((p2/abs(p2)) * (1 - exp(-abs(p2)*t1)) + 1) );
return(ret);
}
#exponentialRateV <- Vectorize(exponentialRate);
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/exponentialRate.R |
.fxnDeps <- function(func, package = "BAMMtools", verbose=TRUE, thorough=TRUE) {
pstr <- paste("package:",package,sep="");
fl <- lsf.str(pstr);
if (!func%in%fl)
stop(sprintf("Function %s is not in package %s",func,package));
calledby <- sapply(fl, function(x) grep(func, body(x)));
calls <- sapply(fl, grep, body(func));
calledby <- names(calledby[sapply(calledby, length) > 0]);
calls <- names(calls[sapply(calls, length) > 0]);
if (verbose) {
x <- sprintf("%s calls %s", func, calls);
y <- sprintf("%s is called by %s", func, calledby);
cat(y,sep="\n");
cat("----\n");
cat(x,sep="\n");
}
if (thorough) {
cat("----\n");
cat("running examples\n");
e <- .Options[["warn"]];
options(warn = 3);
for (i in calls) {
cat(i,"callee\n",sep=": ");
x <- try(eval(call("example",topic=i,package=package,echo=FALSE,verbose=FALSE,ask=FALSE)),silent=TRUE);
if (inherits(x,"try-error")) {
cat(sprintf("!***! callee %s has no example !***!",i),"\n");
}
}
for (i in calledby) {
cat(i,"caller\n",sep = ": ");
x <- try(eval(call("example",topic=i,package=package,echo=FALSE,verbose=FALSE,ask=FALSE)),silent=TRUE);
if (inherits(x,"try-error")) {
cat(sprintf("!***! caller %s has no example !***!",i),"\n");
}
}
}
options(warn = e);
invisible(list(calledby = calledby, calls = calls));
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/fxnDeps.R |
##' @title Generate control file for \code{BAMM}
##'
##' @description Generates a template diversification or trait control file
##' for \code{BAMM}, while allowing the user to specify parameter values.
##'
##' @param file Destination file name with or without path.
##' @param type Character, either \dQuote{\code{diversification}} or
##' \dQuote{\code{trait}}, depending on the desired \code{BAMM} analysis.
##' @param params List of parameters, see \code{Details}.
##'
##' @details The user can supply parameters as a list, where the name of the
##' list item is the name of the parameter as it appears in the control
##' file, and the value of the list item is what will be placed in the
##' contol file.
##'
##' If a parameter is specified by the user, it will automatically be
##' uncommented if it was commented in the template.
##'
##' @author Pascal Title
##'
##' @references \url{http://bamm-project.org/}
##'
##' @examples
##' \dontrun{
##' #Produce a blank template control file
##' generateControlFile(file = 'traitcontrol.txt', type='trait')
##'
##' #Produce a customized control file
##' data(whales)
##'
##' #get bamm priors to supply to control file
##' priors <- setBAMMpriors(whales, outfile = NULL)
##'
##' generateControlFile(file = 'divcontrol.txt', params = list(
##' treefile = 'whales.tre',
##' globalSamplingFraction = '1',
##' numberOfGenerations = '100000',
##' overwrite = '1',
##' lambdaInitPrior = as.numeric(priors['lambdaInitPrior']),
##' lambdaShiftPrior = as.numeric(priors['lambdaShiftPrior']),
##' muInitPrior = as.numeric(priors['muInitPrior']),
##' expectedNumberOfShifts = '1'))
##' }
##' @export
generateControlFile <- function(file = "controlfile.txt", type = "diversification", params = NULL) {
templates <- list(diversification =
'# BAMM configuration file for speciation/extinction analysis
# ==========================================================
#
# Format
# ------
#
# - Each option is specified as: option_name = option_value
# - Comments start with # and go to the end of the line
# - True is specified with "1" and False with "0"
################################################################################
# GENERAL SETUP AND DATA INPUT
################################################################################
modeltype = speciationextinction
# Specify "speciationextinction" or "trait" analysis
treefile = %%%%
# File name of the phylogenetic tree to be analyzed
runInfoFilename = run_info.txt
# File name to output general information about this run
sampleFromPriorOnly = 0
# Whether to perform analysis sampling from prior only (no likelihoods computed)
runMCMC = 1
# Whether to perform the MCMC simulation. If runMCMC = 0, the program will only
# check whether the data file can be read and the initial likelihood computed
loadEventData = 0
# Whether to load a previous event data file
eventDataInfile = event_data_in.txt
# File name of the event data file to load, used only if loadEventData = 1
initializeModel = 1
# Whether to initialize (but not run) the MCMC. If initializeModel = 0, the
# program will only ensure that the data files (e.g., treefile) can be read
useGlobalSamplingProbability = 1
# Whether to use a "global" sampling probability. If False (0), expects a file
# name for species-specific sampling probabilities (see sampleProbsFilename)
globalSamplingFraction = 1.0
# The sampling probability. If useGlobalSamplingProbability = 0, this is ignored
# and BAMM looks for a file name with species-specific sampling fractions
sampleProbsFilename = sample_probs.txt
# File name containing species-specific sampling fractions
# seed = 12345
# Seed for the random number generator.
# If not specified (or is -1), a seed is obtained from the system clock
overwrite = 0
# If True (1), the program will overwrite any output files in the current
# directory (if present)
################################################################################
# PRIORS
################################################################################
expectedNumberOfShifts = 1.0
# prior on the number of shifts in diversification
# Suggested values:
# expectedNumberOfShifts = 1.0 for small trees (< 500 tips)
# expectedNumberOfShifts = 10 or even 50 for large trees (> 5000 tips)
lambdaInitPrior = 1.0
# Prior (rate parameter of exponential) on the initial lambda value for rate
# regimes
lambdaShiftPrior = 0.05
# Prior (std dev of normal) on lambda shift parameter for rate regimes
# You cannot adjust the mean of this distribution (fixed at zero, which is
# equal to a constant rate diversification process)
muInitPrior = 1.0
# Prior (rate parameter of exponential) on extinction rates
lambdaIsTimeVariablePrior = 1
# Prior (probability) of the time mode being time-variable (vs. time-constant)
################################################################################
# MCMC SIMULATION SETTINGS & OUTPUT OPTIONS
################################################################################
numberOfGenerations = %%%%
# Number of generations to perform MCMC simulation
mcmcOutfile = mcmc_out.txt
# File name for the MCMC output, which only includes summary information about
# MCMC simulation (e.g., log-likelihoods, log-prior, number of processes)
mcmcWriteFreq = 1000
# Frequency in which to write the MCMC output to a file
eventDataOutfile = event_data.txt
# The raw event data (these are the main results). ALL of the results are
# contained in this file, and all branch-specific speciation rates, shift
# positions, marginal distributions etc can be reconstructed from this output.
# See R package BAMMtools for working with this output
eventDataWriteFreq = 1000
# Frequency in which to write the event data to a file
printFreq = 1000
# Frequency in which to print MCMC status to the screen
acceptanceResetFreq = 1000
# Frequency in which to reset the acceptance rate calculation
# The acceptance rate is output to both the MCMC data file and the screen
# outName = BAMM
# Optional name that will be prefixed on all output files (separated with "_")
# If commented out, no prefix will be used
################################################################################
# OPERATORS: MCMC SCALING OPERATORS
################################################################################
updateLambdaInitScale = 2.0
# Scale parameter for updating the initial speciation rate for each process
updateLambdaShiftScale = 0.1
# Scale parameter for the exponential change parameter for speciation
updateMuInitScale = 2.0
# Scale parameter for updating initial extinction rate for each process
updateEventLocationScale = 0.05
# Scale parameter for updating LOCAL moves of events on the tree
# This defines the width of the sliding window proposal
updateEventRateScale = 4.0
# Scale parameter (proportional shrinking/expanding) for updating
# the rate parameter of the Poisson process
################################################################################
# OPERATORS: MCMC MOVE FREQUENCIES
################################################################################
updateRateEventNumber = 0.1
# Relative frequency of MCMC moves that change the number of events
updateRateEventPosition = 1
# Relative frequency of MCMC moves that change the location of an event on the
# tree
updateRateEventRate = 1
# Relative frequency of MCMC moves that change the rate at which events occur
updateRateLambda0 = 1
# Relative frequency of MCMC moves that change the initial speciation rate
# associated with an event
updateRateLambdaShift = 1
# Relative frequency of MCMC moves that change the exponential shift parameter
# of the speciation rate associated with an event
updateRateMu0 = 1
# Relative frequency of MCMC moves that change the extinction rate for a given
# event
updateRateLambdaTimeMode = 0
# Relative frequency of MCMC moves that flip the time mode
# (time-constant <=> time-variable)
localGlobalMoveRatio = 10.0
# Ratio of local to global moves of events
################################################################################
# INITIAL PARAMETER VALUES
################################################################################
lambdaInit0 = 0.032
# Initial speciation rate (at the root of the tree)
lambdaShift0 = 0
# Initial shift parameter for the root process
muInit0 = 0.005
# Initial value of extinction (at the root)
initialNumberEvents = 0
# Initial number of non-root processes
################################################################################
# METROPOLIS COUPLED MCMC
################################################################################
numberOfChains = 4
# Number of Markov chains to run
deltaT = 0.1
# Temperature increment parameter. This value should be > 0
# The temperature for the i-th chain is computed as 1 / [1 + deltaT * (i - 1)]
swapPeriod = 1000
# Number of generations in which to propose a chain swap
chainSwapFileName = chain_swap.txt
# File name in which to output data about each chain swap proposal.
# The format of each line is [generation],[rank_1],[rank_2],[swap_accepted]
# where [generation] is the generation in which the swap proposal was made,
# [rank_1] and [rank_2] are the chains that were chosen, and [swap_accepted] is
# whether the swap was made. The cold chain has a rank of 1.
################################################################################
# NUMERICAL AND OTHER PARAMETERS
################################################################################
minCladeSizeForShift = 1
# Allows you to constrain location of possible rate-change events to occur
# only on branches with at least this many descendant tips. A value of 1
# allows shifts to occur on all branches.
segLength = 0.02
# Controls the "grain" of the likelihood calculations. Approximates the
# continuous-time change in diversification rates by breaking each branch into
# a constant-rate diversification segments, with each segment given a length
# determined by segLength. segLength is in units of the root-to-tip distance of
# the tree. So, if the segLength parameter is 0.01, and the crown age of your
# tree is 50, the "step size" of the constant rate approximation will be 0.5.
# If the value is greater than the branch length (e.g., you have a branch of
# length < 0.5 in the preceding example) BAMM will not break the branch into
# segments but use the mean rate across the entire branch.',
trait = '
# BAMM configuration file for phenotypic analysis
# ===============================================
#
# Format
# ------
#
# - Each option is specified as: option_name = option_value
# - Comments start with # and go to the end of the line
# - True is specified with "1" and False with "0"
################################################################################
# GENERAL SETUP AND DATA INPUT
################################################################################
modeltype = trait
# Specify "speciationextinction" or "trait" analysis
treefile = %%%%
# File name of the phylogenetic tree to be analyzed
traitfile = %%%%
# File name of the phenotypic traits file
runInfoFilename = run_info.txt
# File name to output general information about this run
sampleFromPriorOnly = 0
# Whether to perform analysis sampling from prior only (no likelihoods computed)
runMCMC = 1
# Whether to perform the MCMC simulation. If runMCMC = 0, the program will only
# check whether the data file can be read and the initial likelihood computed
loadEventData = 0
# Whether to load a previous event data file
eventDataInfile = event_data_in.txt
# File name of the event data file to load, used only if loadEventData = 1
initializeModel = 1
# Whether to initialize (but not run) the MCMC. If initializeModel = 0, the
# program will only ensure that the data files (e.g., treefile) can be read
# seed = 12345
# Seed for the random number generator.
# If not specified (or is -1), a seed is obtained from the system clock
overwrite = 0
# If True (1), the program will overwrite any output files in the current
# directory (if present)
################################################################################
# PRIORS
################################################################################
expectedNumberOfShifts = 1.0
# prior on the number of shifts in diversification
# Suggested values:
# expectedNumberOfShifts = 1.0 for small trees (< 500 tips)
# expectedNumberOfShifts = 10 or even 50 for large trees (> 5000 tips)
betaInitPrior = 1.0
# Prior (rate parameter of exponential) on the initial
# phenotypic evolutionary rate associated with regimes
betaShiftPrior = 0.05
# Prior (std dev of normal) on the rate-change parameter
# You cannot adjust the mean of this distribution (fixed at zero, which is
# equal to a constant rate diversification process)
useObservedMinMaxAsTraitPriors = 1
# If True (1), will put a uniform prior density on the distribution
# of ancestral character states, with upper and lower bounds determined
# by the min and max of the observed data
traitPriorMin = 0
# User-defined minimum value for the uniform density on the distribution of
# ancestral character states. Only used if useObservedMinMaxAsTraitPriors = 0.
traitPriorMax = 0
# User-defined maximum value for the uniform density on the distribution of
# ancestral character states. Only used if useObservedMinMaxAsTraitPriors = 0.
betaIsTimeVariablePrior = 1
# Prior (probability) of the time mode being time-variable (vs. time-constant)
################################################################################
# MCMC SIMULATION SETTINGS & OUTPUT OPTIONS
################################################################################
numberOfGenerations = %%%%
# Number of generations to perform MCMC simulation
mcmcOutfile = mcmc_out.txt
# File name for the MCMC output, which only includes summary information about
# MCMC simulation (e.g., log-likelihoods, log-prior, number of processes)
mcmcWriteFreq = 1000
# Frequency in which to write the MCMC output to a file
eventDataOutfile = event_data.txt
# The raw event data (these are the main results). ALL of the results are
# contained in this file, and all branch-specific speciation rates, shift
# positions, marginal distributions etc can be reconstructed from this output.
# See R package BAMMtools for working with this output
eventDataWriteFreq = 1000
# Frequency in which to write the event data to a file
printFreq = 1000
# Frequency in which to print MCMC status to the screen
acceptanceResetFreq = 1000
# Frequency in which to reset the acceptance rate calculation
# The acceptance rate is output to both the MCMC data file and the screen
# outName = BAMM
# Optional name that will be prefixed on all output files (separated with "_")
# If commented out, no prefix will be used
################################################################################
# OPERATORS: MCMC SCALING OPERATORS
################################################################################
updateBetaInitScale = 1
# Scale operator for proportional shrinking-expanding move to update
# initial phenotypic rate for rate regimes
updateBetaShiftScale = 1
# Scale operator for sliding window move to update initial phenotypic rate
updateNodeStateScale = 1
# Scale operator for sliding window move to update ancestral states
# at internal nodes
updateEventLocationScale = 0.05
# Scale parameter for updating LOCAL moves of events on the tree
# This defines the width of the sliding window proposal
updateEventRateScale = 4.0
# Scale parameter (proportional shrinking/expanding) for updating
# the rate parameter of the Poisson process
################################################################################
# OPERATORS: MCMC MOVE FREQUENCIES
################################################################################
updateRateEventNumber = 1
# Relative frequency of MCMC moves that change the number of events
updateRateEventPosition = 1
# Relative frequency of MCMC moves that change the location of an event
# on the tree
updateRateEventRate = 1
# Relative frequency of MCMC moves that change the rate at which events occur
updateRateBeta0 = 1
# Relative frequency of MCMC moves that change the initial phenotypic rate
# associated with an event
updateRateBetaShift = 1
# Relative frequency of MCMC moves that change the exponential shift parameter
# of the phenotypic rate associated with an event
updateRateNodeState = 25
# Relative frequency of MCMC moves that update the value of ancestral
# character states. You have as many ancestral states as you have
# internal nodes in your tree, so there are a lot of parameters:
# you should update this much more often than you update the event-associated
# parameters.
updateRateBetaTimeMode = 0
# Relative frequency of MCMC moves that flip the time mode
# (time-constant <=> time-variable)
localGlobalMoveRatio = 10.0
# Ratio of local to global moves of events
################################################################################
# INITIAL PARAMETER VALUES
################################################################################
betaInit = 0.5
# Initial value of the phenotypic evolutionary process at the root of the tree
betaShiftInit = 0
# Initial value of the exponential change parameter for the phenotypic
# evolutionary process at the root of the tree. A value of zero implies
# time-constant rates
initialNumberEvents = 0
# Initial number of non-root processes
################################################################################
# METROPOLIS COUPLED MCMC
################################################################################
numberOfChains = 4
# Number of Markov chains to run
deltaT = 0.1
# Temperature increment parameter. This value should be > 0
# The temperature for the i-th chain is calculated as 1 / [1 + deltaT * (i - 1)]
swapPeriod = 1000
# Number of generations in which to propose a chain swap
chainSwapFileName = chain_swap.txt
# File name in which to output data about each chain swap proposal.
# The format of each line is [generation],[rank_1],[rank_2],[swap_accepted]
# where [generation] is the generation in which the swap proposal was made,
# [rank_1] and [rank_2] are the chains that were chosen, and [swap_accepted] is
# whether the swap was made. The cold chain has a rank of 1.')
templates <- lapply(templates, function(x) strsplit(x, "\n")[[1]]);
templates <- lapply(templates, function(x) gsub("\t", "", x));
#identify appropriate template
if (type == "diversification") {
template <- templates$diversification;
} else if (type == "trait") {
template <- templates$trait;
} else {
stop("type must be either diversification or trait.");
}
#replace defaults with user-specified parameter values
if (!is.null(params)) {
params <- lapply(params, as.character);
for (i in 1:length(params)) {
paramName <- names(params)[i];
paramName <- paste(paramName, " = ", sep='');
if (!any(grepl(paramName, template))) {
stop(paste(names(params)[i], " parameter not found in template.", sep = ""));
} else {
ind <- which(sapply(template, function(x) grepl(paramName, x)) == TRUE);
#if multiple lines contain file name, 1 is likely correct, and the others are likely in comments
if (length(ind) > 1) {
isComment <- sapply(ind, function(x) grepl("#", template[x]), USE.NAMES = FALSE);
ind <- setdiff(ind, ind[which(isComment == TRUE)]);
}
#if multiple matches, this should be fixed, so report
if (length(ind) > 1 | length(ind) == 0) {
stop(paste(names(params)[i], " returning multiple hits.", sep=""));
}
template[ind] <- gsub("=.+$", paste("= ", params[i], sep=""), template[ind]);
#if parameter is commented out, uncomment it
template[ind] <- gsub("# ", "", template[ind]);
}
}
}
#write file to disk
write(template, file = file);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/generateControlFile.R |
##' @title Get the best (sampled) rate shift configuration from a \code{BAMM}
##' analysis
##'
##' @description Get the rate shift configuration with the maximum a
##' posteriori probability, e.g., the shift configuration that was sampled
##' most frequently with \code{BAMM}.
##'
##' @param x Either a \code{bammdata} object or a \code{credibleshiftset}
##' object.
##' @param expectedNumberOfShifts The expected number of shifts under the
##' prior.
##' @param threshold The marginal posterior-to-prior odds ratio used as a
##' cutoff for distinguishing between "core" and "non-core" rate shifts.
##'
##' @details This function estimates the rate shift configuration with the
##' highest maximum a posteriori (MAP) probability. It returns a
##' \code{bammdata} object with a single sample. This can be plotted with
##' \code{\link{plot.bammdata}}, and individual rate shifts can then
##' be added with \code{\link{addBAMMshifts}}.
##'
##' The parameters of this object are averaged over all samples in the
##' posterior that were assignable to the MAP shift configuration. All
##' non-core shifts have been excluded, such that the only shift
##' information contained in the object is from the "significant" rate
##' shifts, as determined by the relevant marginal posterior-to-prior odds
##' ratio \code{threshold}.
##'
##' You can extract the same information from the credible set of shift
##' configurations. See \code{\link{credibleShiftSet}} for more
##' information.
##'
##' @return A class \code{bammdata} object with a single sample, corresponding
##' to the diversification rate shift configuration with the maximum a
##' posteriori probability. See \code{\link{getEventData}} for details.
##'
##' @author Dan Rabosky
##'
##' @seealso \link{getEventData}, \link{credibleShiftSet},
##' \link{plot.credibleshiftset}, \link{plot.bammdata}
##'
##' @examples
##' data(whales, events.whales)
##' ed <- getEventData(whales, events.whales, burnin=0.1, nsamples=500)
##'
##' # Get prior distribution on shifts-per-branch:
##' bp <- getBranchShiftPriors(whales, expectedNumberOfShifts = 1)
##'
##' # Pass the event data object in to the function:
##' best <- getBestShiftConfiguration(ed, expectedNumberOfShifts = 1,
##' threshold = 5)
##' plot(best, lwd=2)
##' addBAMMshifts(best, cex=2)
##'
##' # Now we can also work with the credible shift set:
##' css <- credibleShiftSet(ed, expectedNumberOfShifts = 1, threshold = 5)
##'
##' summary(css)
##'
##' # examine model-averaged shifts from MAP configuration-
##' # This gives us parameters, times, and associated nodes
##' # of each evolutionary rate regime (note that one of
##' # them corresponds to the root)
##' css$eventData[[1]];
##'
##' # Get bammdata representation of MAP configuration:
##' best <- getBestShiftConfiguration(css, expectedNumberOfShifts = 1,
##' threshold = 5)
##'
##' plot(best)
##' addBAMMshifts(best)
##' @keywords models
##' @export
getBestShiftConfiguration <- function(x, expectedNumberOfShifts , threshold = 5){
if (inherits(x, 'bammdata')) {
x <- credibleShiftSet(x, expectedNumberOfShifts, threshold, set.limit = 0.95);
} else if (inherits(x, 'credibleshiftset')) {
} else {
stop("Argument x must be of class bammdata or credibleshiftset\n");
}
class(x) <- 'bammdata';
subb <- subsetEventData(x, index = x$indices[[1]]);
# Drop all non-core shifts after adding root:
coreshifts <- c((length(x$tip.label) + 1), x$coreshifts);
coreshifts <- intersect(subb$eventData[[1]]$node, coreshifts);
for (i in 1:length(subb$eventData)) {
#subb$eventData[[i]] <- subb$eventData[[i]][subb$eventData[[i]]$node %in% coreshifts,];
if (i == 1) {
ff <- subb$eventData[[i]];
}
ff <- rbind(ff, subb$eventData[[i]]);
}
xn <- numeric(length(coreshifts));
xc <- character(length(coreshifts));
if (x$type == 'diversification') {
dff <- data.frame(generation = xn, leftchild=xc, rightchild=xc, abstime=xn, lambdainit=xn, lambdashift=xn, muinit = xn, mushift = xn, stringsAsFactors=F);
for (i in 1:length(coreshifts)) {
if (coreshifts[i] <= length(x$tip.label)) {
# Node is terminal:
dset <- c(x$tip.label[coreshifts[i]], NA)
}else{
# node is internal.
tmp <- extract.clade(as.phylo(x), node= coreshifts[i]);
dset <- tmp$tip.label[c(1, length(tmp$tip.label))];
}
tmp2 <- ff[ff$node == coreshifts[i], ];
dff$leftchild[i] <- dset[1];
dff$rightchild[i] <- dset[2];
dff$abstime[i] <- mean(tmp2$time);
dff$lambdainit[i] <- mean(tmp2$lam1);
dff$lambdashift[i] <- mean(tmp2$lam2);
dff$muinit[i] <- mean(tmp2$mu1);
dff$mushift[i] <- mean(tmp2$mu2);
}
best_ed <- getEventData(as.phylo(x), eventdata=dff);
} else if (x$type == 'trait') {
dff <- data.frame(generation = xn, leftchild=xc, rightchild=xc, abstime=xn, betainit=xn, betashift=xn, stringsAsFactors=F);
for (i in 1:length(coreshifts)) {
if (coreshifts[i] <= length(x$tip.label)) {
# Node is terminal:
dset <- c(x$tip.label[coreshifts[i]], NA)
} else {
# node is internal.
tmp <- extract.clade(as.phylo(x), node= coreshifts[i]);
dset <- tmp$tip.label[c(1, length(tmp$tip.label))];
}
tmp2 <- ff[ff$node == coreshifts[i], ];
dff$leftchild[i] <- dset[1];
dff$rightchild[i] <- dset[2];
dff$abstime[i] <- mean(tmp2$time);
dff$betainit[i] <- mean(tmp2$lam1);
dff$betashift[i] <- mean(tmp2$lam2);
}
best_ed <- getEventData(as.phylo(x), eventdata=dff, type = 'trait');
} else {
stop("error in getBestShiftConfiguration; invalid type");
}
return(best_ed);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getBestShiftConfiguration.R |
##' @title Compute prior odds of a rate shift on each branch of a phylogeny
##' from BAMM output
##'
##' @description Computes the prior probability of a rate shift event for each
##' branch. These results are important for identifying topological rate
##' shift locations on phylogenies with marginal probabilities that exceed
##' those predicted under the prior alone.
##'
##' @param phy An object of class \code{phylo}.
##' @param expectedNumberOfShifts The expected number of shifts under the
##' prior.
##'
##' @details This function computes the prior odds on the distribution of
##' numbers of rate shift events per branch under the prior. It returns an
##' object which is nothing more than a copy of the original phylogenetic
##' tree but where each branch length has been replaced by the prior
##' probability of a rate shift on each branch.
##'
##' The significance of this function is that it lets us explicitly
##' determine which branches have shift probabilities that are elevated
##' relative to the prior expectation.
##'
##' @return A class \code{phylo} with all the components of the original class
##' \code{phylo} object, with the following changes:
##'
##' \item{edge.length}{Branch lengths now represent the prior probability
##' of a rate shift on each branch.}
##'
##' @author Dan Rabosky
##'
##' @seealso \code{\link{distinctShiftConfigurations}},
##' \code{\link{plot.bammshifts}}, \code{\link{summary.credibleshiftset}},
##' \code{\link{plot.credibleshiftset}}, \code{\link{credibleShiftSet}}
##'
##' @references \url{http://bamm-project.org/}
##'
##' @examples
##' data(whales)
##' prior_tree1 <- getBranchShiftPriors(whales, expectedNumberOfShifts = 1)
##' prior_tree10 <- getBranchShiftPriors(whales, expectedNumberOfShifts = 10)
##' # plot prior expectations for branches based on these two counts:
##' plot(prior_tree1$edge.length ~ prior_tree10$edge.length, xlim=c(0,0.05),
##' ylim=c(0,0.05), asp=1)
##' lines(x=c(0,1), y=c(0,1))
##' @keywords models
##' @export
getBranchShiftPriors <- function(phy, expectedNumberOfShifts) {
Nmax <- 1000;
geom_p <- 1 / (expectedNumberOfShifts + 1);
prior <- dgeom(1:Nmax, geom_p);
pvec <- phy$edge.length / sum(phy$edge.length);
pp <- numeric(length(phy$edge.length));
for (i in 1:length(prior)){
# probability of getting 0 shifts on branch given ns total
# given the branch lengths etc
# weighted by the probability of that shift category
pp <- pp + (1 - dbinom(0, i, prob=pvec)) * prior[i];
}
obj <- phy;
obj$edge.length <- pp;
return(obj);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getBranchShiftPriors.R |
#############################################################
#
# getCladeRates(....)
#
# mean clade-specific rates
# average of all branch-rates, but weighted by branch length
# node.type: will compute rates only for clade descended from specified node with 'include'
# will compute for all branches excluding a given clade, nodetype = 'exclude'
#
##' @title Compute clade-specific mean rates
##'
##' @description Computes marginal clade-specific rates of speciation,
##' extinction, or (if relevant) trait evolution from \code{BAMM} output.
##'
##' @param ephy An object of class \code{bammdata}.
##' @param node If computing rates for a specific portion of tree, the node
##' subtending the relevant subtree. If multiple nodes are supplied, then
##' the equivalent number of \code{nodetype} must be supplied.
##' @param nodetype Either "include" or "exclude". If \code{nodetype =
##' "include"}, the rates returned by the function will be for the subtree
##' defined by \code{node}. If \code{nodetype = "exclude"}, will compute
##' mean rates for the tree after excluding the subtree defined by
##' \code{node}. If multiple nodes are specified, there must be a
##' \code{nodetype} for each node.
##' @param verbose Logical. If \code{TRUE}, will print the sample index as
##' mean rates are computed for each sample from posterior. Potentially
##' useful for extremely large trees.
##'
##' @details Computes the time-weighted mean evolutionary rate for a given
##' clade. Conversely, one can compute the rate for a given phylogeny
##' while excluding a clade; this operation will give the "background"
##' rate. It is important to understand several aspects of these mean
##' rates. First, rates in the \code{BAMM} framework are not constant
##' through time. Hence, the function computes the mean time-integrated
##' rates across the subtree. Operationally, this is done by integrating
##' the speciation rate with respect to time along each branch in the
##' subtree. These time-integrated rates are then summed, and the sum
##' is divided by the total sum of branch lengths for the subtree.
##'
##' The function computes a rate for each sample in the posterior, and
##' returns a list of rate vectors. Each rate in the corresponding vector
##' is a mean rate for a particular sample from the posterior. Hence, you
##' can think of the return value for this function as an estimate of the
##' marginal distribution of rates for the focal clade. You can compute
##' means, medians, quantiles, etc from these vectors.
##'
##' @return A list with the following components:
##' \itemize{
##' \item lambda: A vector of speciation rates (if applicable), with
##' the i'th rate corresponding to the mean rate from the i'th
##' sample in the posterior.
##' \item mu: A vector of extinction rates (if applicable), with the
##' i'th rate corresponding to the mean rate from the i'th sample
##' in the posterior.
##' \item beta: A vector of phenotypic rates (if applicable), with
##' the i'th rate corresponding to the mean rate from the i'th
##' sample in the posterior.
##' }
##'
##' @author Dan Rabosky
##'
##' @references \url{http://bamm-project.org/}
##'
##' @examples
##' data(events.whales, whales)
##' ed <- getEventData(whales, events.whales, nsamples=500)
##' all_rates <- getCladeRates(ed)
##'
##' mean(all_rates$lambda)
##' mean(all_rates$mu)
##' # joint density of mean speciation and extinction rates:
##' plot(all_rates$mu ~ all_rates$lambda)
##'
##' # clade specific rates: here for Dolphin subtree:
##' dol_rates <- getCladeRates(ed, node=140)
##' mean(dol_rates$lambda)
##' mean(dol_rates$mu)
##'
##' # defining multiple nodes
##' mean(getCladeRates(ed, node=c(132, 140),
##' nodetype=c('include','exclude'))$lambda)
##' @keywords models
##' @export
getCladeRates <- function(ephy, node = NULL, nodetype='include', verbose=FALSE) {
if (!inherits(ephy, 'bammdata')) {
stop("Object ephy must be of class bammdata\n");
}
if (is.null(node)) {
nodeset <- ephy$edge[,2];
} else if (!is.null(node[1]) & nodetype[1] == 'include' & length(node) == 1) {
nodeset <- getDesc(ephy, node)$desc_set;
} else if (!is.null(node[1]) & nodetype[1] == 'exclude' & length(node) == 1) {
nodeset <- setdiff(ephy$edge[,2], getDesc(ephy, node)$desc_set);
} else if (!is.null(node[1]) & length(nodetype) == length(node) & length(node) > 1) {
nodesets <- lapply(node, function(x) getDesc(ephy, x)$desc_set);
Drop <- which(nodetype == 'exclude');
nodeset_toRemove <- unique(unlist(lapply(nodesets[Drop], function(x) x)));
Keep <- which(nodetype == 'include');
nodeset_toKeep <- unique(unlist(nodesets[Keep]));
nodeset <- setdiff(nodeset_toKeep, nodeset_toRemove);
if (length(nodeset) == 0) {
stop('Error: the combination of nodes and nodetypes has resulted in no remaining nodes!')
}
} else {
stop('Error: Please make sure you have specified a nodetype for every node');
}
lambda_vector <- numeric(length(ephy$eventBranchSegs));
mu_vector <- numeric(length(ephy$eventBranchSegs));
weights <- 'branchlengths'
for (i in 1:length(ephy$eventBranchSegs)) {
if (verbose) {
cat('Processing sample', i, '\n');
}
esegs <- ephy$eventBranchSegs[[i]];
esegs <- esegs[esegs[,1] %in% nodeset, ];
if (is.null(nrow(esegs))){
esegs <- t(as.matrix(esegs))
}
events <- ephy$eventData[[i]];
events <- events[order(events$index), ];
# relative start time of each seg, to event:
relsegmentstart <- esegs[,2] - ephy$eventData[[i]]$time[esegs[,4]];
relsegmentend <- esegs[,3] - ephy$eventData[[i]]$time[esegs[,4]];
lam1 <- ephy$eventData[[i]]$lam1[esegs[,4]];
lam2 <- ephy$eventData[[i]]$lam2[esegs[,4]];
mu1 <- ephy$eventData[[i]]$mu1[esegs[,4]];
mu2 <- ephy$eventData[[i]]$mu2[esegs[,4]];
seglengths <- esegs[,3] - esegs[,2];
wts <- seglengths / sum(seglengths);
lamseg <- timeIntegratedBranchRate(relsegmentstart, relsegmentend, lam1, lam2) / seglengths;
museg <- timeIntegratedBranchRate(relsegmentstart, relsegmentend, mu1, mu2) / seglengths;
lambda_vector[i] <- sum(lamseg * wts);
mu_vector[i] <- sum(museg * wts);
}
if (ephy$type == 'diversification') {
return(list(lambda = lambda_vector, mu = mu_vector));
}
if (ephy$type == 'trait') {
return(list(beta = lambda_vector));
}
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getCladeRates.R |
#############################################################
#
# getCohortMatrix(....)
#
# Each entry of this matrix represents the expected
# probability that a pair[i, j] of tips will have the same
# rate parameters due to BAMM model.
#
# Should modify this to allow exponential, spherical,
# and other possible correlation structures.
# Need to make a corStruct class that works with this
# for GLS analyses
# getCohortMatrix <- function(ephy) {
# if (!'bammdata' %in% class(ephy)) {
# stop("Object ephy must be of class bammdata\n");
# }
# TOL <- 0.0001;
# corMat <- matrix(0, nrow=length(ephy$tip.label), ncol=length(ephy$tip.label));
# n <- length(ephy$numberEvents);
# for (i in 1:length(ephy$tipStates)) {
# dd <- dist(ephy$tipStates[[i]]);
# cmat <- as.matrix(dd);
# corMat <- corMat + (cmat < TOL)/n;
# }
# #rownames(corMat) <- ephy$tip.label;
# #colnames(corMat) <- ephy$tip.label;
# dimnames(corMat)[1:2] <- list(ephy$tip.label);
# #return(corMat/length(ephy$numberEvents));
# return(corMat);
# }
##' @title Compute the pairwise correlation in rate regimes between all tips
##' in a \code{bammdata} object
##'
##' @description Takes a \code{bammdata} object and computes the pairwise
##' correlation in evolutionary rate regimes between all tips in the
##' phylogeny. This can be used to identify cohorts of taxa that share
##' common macroevolutionary rate parameters. It can also be used to
##' construct a correlation matrix for GLS analyses using
##' \code{BAMM}-estimated tip rates of speciation, extinction, or
##' phenotypic evolution.
##'
##' @param ephy An object of class \code{bammdata}.
##'
##' @details The cohort matrix is important for interpreting and visualizing
##' macroevolutionary dynamics. Each entry [i, j] of the cohort matrix is
##' the probability that taxon i and taxon j share a common
##' macroevolutionary rate regime. To compute this, we simply tabulate the
##' percentage of samples from the posterior where taxon i and taxon j
##' were placed in the same rate regime. If there is no rate heterogeneity
##' in the dataset (e.g., the data are best explained by a single rate
##' regime), then all species will tend to share the same rate regime and
##' all values of the cohort matrix will approach 1.
##'
##' A value of 0 between any two taxa means that at least one rate shift
##' occurred on the nodal path connecting them in 100\% of samples from
##' the posterior. A value of 0.50 would imply that 50\% of samples from
##' the posterior included a rate shift on the path connecting taxa i and
##' j. See below (Examples) for an illustration of this.
##'
##' @return A numeric matrix of dimension k x k, where k is the number of
##' species in the phylogeny included in the \code{bammdata} object.
##' Species names are included as row names and column names. The matrix
##' is symmetric, such that the values for entry [i , j] will mirror those
##' for [j , i].
##'
##' @author Dan Rabosky
##'
##' @references \url{http://bamm-project.org/}
##'
##' @examples
##' data(whales, events.whales)
##' ed <- getEventData(whales, events.whales, nsamples=500)
##'
##' cormat <- getCohortMatrix(ed)
##'
##' dim(cormat)
##' hist(cormat, breaks=50)
##' @keywords models
##' @export
getCohortMatrix <- function(ephy) {
if (!inherits(ephy, 'bammdata')) {
stop("Object ephy must be of class bammdata\n");
}
tipStates <- unlist(ephy$tipStates);
Ntips <- length(ephy$tip.label);
Nsamples <- length(ephy$tipStates);
mat <- .C("cohort_matrix",as.integer(tipStates), as.integer(Nsamples), as.integer(Ntips), double(Ntips*Ntips), PACKAGE = "BAMMtools")[[4]];
dim(mat) <- c(Ntips, Ntips);
dimnames(mat) <- list(ephy$tip.label, ephy$tip.label);
diag(mat) <- rep(1.0,Ntips);
return(mat);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getCohortMatrix.R |
#############################################################
#
# getDesc(....)
#
# returns a copy of the tree with a component 'desc_set', which
# is a vector of all desc nodes in downpass sequences order
getDesc <- function(ephy, node)
{
ephy$desc_set <- ephy$downseq[which(ephy$downseq == node):which(ephy$downseq == ephy$lastvisit[node])]
# if (is.null(phy$desc_set)) {
# phy$desc_set <- node;
# }
#
# if (node > length(phy$tip.label)){
# dset <- phy$edge[,2][phy$edge[,1] == node];
# phy$desc_set <- c(phy$desc_set, dset[1]);
# phy <- getDesc(phy, dset[1]);
# phy$desc_set <- c(phy$desc_set, dset[2]);
# phy <- getDesc(phy, dset[2]);
#
# }
#
return(ephy);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getDesc.R |
##' @title Create \code{bammdata} object from MCMC output
##'
##' @description \code{getEventData} Reads shift configuration data (the
##' "event data" output) from a \code{BAMM} analysis and creates a
##' \code{bammdata} object. The \code{bammdata} object is fundamental
##' for extracting information about macroevolutionary rate variation
##' through time and among lineages.
##'
##' @param phy An object of class \code{phylo} - specifically, the
##' time-calibrated tree that was analyzed with \code{BAMM}.
##' Alternatively, a character string specifying the path to a
##' newick-formatted tree.
##' @param eventdata A character string specifying the path to a \code{BAMM}
##' event-data file. Alternatively, an object of class \code{data.frame}
##' that includes the event data from a \code{BAMM} run.
##' @param burnin A numeric indicating the fraction of posterior samples to
##' discard as burn-in.
##' @param nsamples An integer indicating the number of posterior samples to
##' include in the \code{bammdata} object. May be \code{NULL}.
##' @param verbose A logical. If \code{TRUE} progess is outputted to the
##' console. Defaults to \code{FALSE}.
##' @param type A character string. Either "diversification" or "trait"
##' depending on your \code{BAMM} analysis.
##'
##' @details In the \code{BAMM} framework, an "event" defines a
##' macroevolutionary process of diversification or trait evolution. Every
##' sample from the posterior includes at least one process, defined by
##' such an "event". If a given sample includes just a single event, then
##' the dynamics of diversification or trait evolution can be described
##' entirely by a single time-constant or time-varying process that begins
##' at the root of the tree. Any sample from the posterior distribution
##' may include a complex mixture of distinct processes. To represent
##' temporal heterogeneity in macroevolutionary rates, \code{BAMM} models
##' a rate \eqn{R}, e.g. speciation, as a function that changes
##' exponentially with time:
##'
##' \eqn{R(t) = R(0)*exp(b*t)}.
##'
##' Here \eqn{R(0)} is the initial rate and \eqn{b} is a parameter
##' determining how quickly that rate grows or decays with time.
##'
##' The \code{eventdata} file (or data frame) is a record of events and
##' associated parameters that were sampled with \code{BAMM} during
##' simulation of the posterior with reversible jump MCMC. This complex,
##' information-rich file is processed into a \code{bammdata} object,
##' which serves as the core data object for numerous downstream analyses.
##' From a \code{bammdata} object, you can summarize rate variation
##' through time, among clades, extract locations of rate shifts,
##' summarize clade-specific rates of speciation and extinction, and more.
##'
##' In general, the user does not need to be concerned with the details of
##' a \code{bammdata} object. The object is used as input by a number of
##' \code{BAMMtools} functions.
##'
##' The parameter \code{nsamples} can be used to reduce the total amount
##' of data included in the raw eventdata output from a \code{BAMM} run.
##' The final \code{bammdata} object will consist of all data for
##' \code{nsamples} from the posterior. These \code{nsamples} are equally
##' spaced after discarding some \code{burnin} fraction as "burn-in". If
##' \code{nsamples} is set to \code{NULL}, the \code{bammdata} object will
##' include all samples in the posterior after discarding the
##' \code{burnin} fraction.
##'
##' @return A list with many components:
##' \itemize{
##' \item edge: See documentation for class \code{phylo} in package ape.
##' \item Nnode: See documentation for class \code{phylo} in package
##' ape.
##' \item tip.label: See documentation for class \code{phylo} in package
##' ape.
##' \item edge.length: See documentation for class \code{phylo} in
##' package ape.
##' \item begin: The beginning time of each branch in absolute time (the
##' root is set to time zero)
##' \item end: The ending time of each branch in absolute time.
##' \item numberEvents: An integer vector with the number of events
##' contained in \code{phy} for each posterior sample. The length of
##' this vector is equal to the number of posterior samples in the
##' \code{bammdata} object.
##' \item eventData: A list of dataframes. Each element is a single
##' posterior sample. Each row in a dataframe holds the data for a
##' single event. Data associated with an event are: \code{node} - a
##' node number. This identifies the branch where the event
##' originates. \code{time} - this is the absolute time on that branch
##' where the event originates (with the root at time 0). \code{lam1}
##' - an initial rate of speciation or trait evolution. \code{lam2} -
##' a decay/growth parameter. \code{mu1} - an initial rate of
##' extinction. \code{mu2} - a decay/growth parameter. \code{index} -
##' a unique integer associated with the event. See 'Details'.
##' \item eventVectors: A list of integer vectors. Each element is a
##' single posterior sample. For each branch in \code{phy} the index
##' of the event that occurs along that branch. Branches are ordered
##' increasing here and elsewhere.
##' \item eventBranchSegs: A list of matrices. Each element is a single
##' posterior sample. Each matrix has four columns: \code{Column 1}
##' identifies a node in \code{phy}. \code{Column 2} identifies the
##' beginning time of the branch or segment of the branch that
##' subtends the node in \code{Column 1}. \code{Column 3} identifies
##' the ending time of the branch or segment of the branch that
##' subtends the node in \code{Column 1}. \code{Column 4} identifies
##' the index of the event that occurs along the branch or segment of
##' the branch that subtends the node in \code{Column 1}.
##' \item tipStates: A list of integer vectors. Each element is a single
##' posterior sample. For each tip the index of the event that occurs
##' along the branch subtending the tip. Tips are ordered increasing
##' here and elsewhere.
##' \item tipLambda: A list of numeric vectors. Each element is a single
##' posterior sample. For each tip the rate of speciation or trait
##' evolution at the end of the terminal branch subtending that tip.
##' \item tipMu: A list of numeric vectors. Each element is a single
##' posterior sample. For each tip the rate of extinction at the end
##' of the terminal branch subtending that tip. Meaningless if working
##' with \code{BAMM} trait results.
##' \item meanTipLambda: For each tip the mean of the marginal posterior
##' density of the rate of speciation or trait evolution at the end of
##' the terminal branch subtending that tip.
##' \item meanTipMu: For each tip the mean of the marginal posterior
##' density of the rate of extinction at the end of the terminal
##' branch subtending that tip. Meaningless if working with
##' \code{BAMM} trait results.
##' \item type: A character string. Either "diversification" or "trait"
##' depending on your \code{BAMM} analysis.
##' \item downseq: An integer vector holding the nodes of \code{phy}. The
##' order corresponds to the order in which nodes are visited by a
##' pre-order tree traversal.
##' \item lastvisit: An integer vector giving the index of the last node
##' visited by the node in the corresponding position in
##' \code{downseq}. \code{downseq} and \code{lastvisit} can be used to
##' quickly retrieve the descendants of any node. e.g. the descendants
##' of node 89 can be found by
##' \code{downseq[which(downseq==89):which(downseq==lastvisit[89])}.
##' }
##'
##' @note Currently the function does not check for duplicate tip labels in
##' \code{phy}, which may cause the function to choke.
##'
##' @author Dan Rabosky, Mike Grundler
##'
##' @seealso \code{\link{summary.bammdata}}, \code{\link{plot.bammdata}},
##' \code{\link{dtRates}}.
##'
##' @references \url{http://bamm-project.org/}
##'
##' @examples
##' data(primates, events.primates)
##' xx <- getEventData(primates, events.primates, burnin=0.25, nsamples=500,
##' type = 'trait')
##'
##' # compute mean phenotypic rate for primate body size evolution:
##' brates <- getCladeRates(xx)
##' mean(brates$beta)
##'
##' # Plot rates:
##' plot(xx)
##' @keywords models
##' @export
getEventData <- function(phy, eventdata, burnin=0, nsamples = NULL, verbose=FALSE, type = 'diversification')
{
if (type != 'diversification' & type != 'trait') {
stop("Invalid 'type' specification. Should be 'diversification' or 'trait'");
}
if (inherits(phy, 'character')) {
phy <- read.tree(phy);
}
phy$node.label <- NULL;
# check that tree is binary and rooted.
if (!is.binary.phylo(phy)) {
stop('phy is not completely bifurcating.')
}
if (!is.rooted.phylo(phy)) {
stop('phy is not rooted.')
}
if (any(is.null(c(phy$begin, phy$end)))) {
phy <- getStartStopTimes(phy)
}
# Getting branching times direct from
# the begin and end components of phy
# should be able to now handle non-ultrametric trees.
maxbt <- max(phy$end)
nodes <- (length(phy$tip.label) + 1):(2*length(phy$tip.label) - 1)
bt <- numeric(length(nodes))
names(bt) <- nodes
for (i in 1:length(bt)){
tt <- phy$begin[phy$edge[,1] == nodes[i]][1]
bt[i] <- maxbt - tt
}
########
if (inherits(eventdata, 'data.frame')) {
cat("Processing event data from data.frame\n");
uniquegens <- sort(unique(eventdata[,1]));
}
else if (inherits(eventdata, 'character')) {
cat("Reading event datafile: ", eventdata, "\n\t\t...........");
eventdata <- read.csv(eventdata, header=TRUE, stringsAsFactors=FALSE);
uniquegens <- sort(unique(eventdata[,1]));
cat("\nRead a total of", length(uniquegens), "samples from posterior\n");
}
else {
err.string = c('eventdata arg invalid\n\nType is ', class(eventdata), '\n', sep='');
stop(err.string);
}
samplestart <- uniquegens[floor(burnin*length(uniquegens))];
if (!length(samplestart)) {
samplestart <- 0;
}
uniquegens <- uniquegens[uniquegens >= samplestart];
if (is.null(nsamples)) {
nsamples <- length(uniquegens);
}
else if (nsamples > length(uniquegens)) {
nsamples <- length(uniquegens);
}
eventVectors <- vector("list",nsamples);
eventData <- vector("list",nsamples);
tipStates <- vector("list",nsamples);
eventBranchSegs <- vector("list",nsamples);
tipLambda <- vector("list",nsamples);
tipMu <- vector("list",nsamples);
goodsamples <- uniquegens[seq.int(1, length(uniquegens), length.out=nsamples)];
cat('\nDiscarded as burnin: GENERATIONS < ', goodsamples[1]);
cat("\nAnalyzing ", length(goodsamples), " samples from posterior\n");
numberEvents <- length(goodsamples); # vector to hold number of events per sample
cat('\nSetting recursive sequence on tree...\n');
phy <- getRecursiveSequence(phy);
cat('\nDone with recursive sequence\n\n');
######### Get ancestors for each pair of taxa
if (verbose) {
cat("Start preprocessing MRCA pairs....\n");
}
x2 <- eventdata[eventdata$generation %in% goodsamples, ];
uniquePairSet <- matrix(NA, nrow=nrow(x2), ncol=2);
uniquePairNode <- numeric(nrow(x2));
uniquePairSet[,1] <- as.integer(match(x2$leftchild, phy$tip.label));
uniquePairSet[,2] <- as.integer(match(x2$rightchild, phy$tip.label, nomatch = 0L));
uniquePairNode <- getmrca(phy, uniquePairSet[,1],uniquePairSet[,2]);
if (verbose) {
cat("Done preprocessing MRCA pairs....\n");
}
####### Done with risky sstuff
meanTipMu <- numeric(length(phy$tip.label));
meanTipLambda <- numeric(length(phy$tip.label));
stoptime <- maxbt;
for (i in 1:length(goodsamples)) {
tmpEvents <- x2[x2[,1] == goodsamples[i], ];
if (verbose) cat('Processing event: ', i, '\n');
tm <- as.numeric(tmpEvents[,4]); # abs time of event
lam1 <- as.numeric(tmpEvents[,5]); # lambda parameter 1
lam2 <- as.numeric(tmpEvents[,6]); # lambda parameter 2
if (type == 'diversification') {
mu1 <- try(as.numeric(tmpEvents[, 7]),silent=TRUE); # mu parameter 1
if (inherits(mu1,"try-error")) {
stop("Unidentified column in event data file. Maybe try setting argument 'type = trait'");
}
mu2 <- as.numeric(tmpEvents[, 8]); #mu parameter 2
}
else { #for bamm trait data we set the mu columns to zero because those params don't exist
mu1 <- rep(0, nrow(tmpEvents));
mu2 <- rep(0, nrow(tmpEvents));
}
# Get subtending node for each event:
nodeVec <- uniquePairNode[x2[,1] == goodsamples[i]];
if (sum(nodeVec == 0) > 0) {
stop('Failed to assign event to node\n');
}
# make a dataframe:
dftemp <- data.frame(node=nodeVec, time=tm, lam1=lam1, lam2=lam2, mu1=mu1, mu2=mu2, stringsAsFactors=FALSE);
dftemp <- dftemp[order(dftemp$time), ];
dftemp$index <- 1:nrow(dftemp);
rownames(dftemp) <- NULL;
statevec <- rep(1, nrow(phy$edge));
if (nrow(dftemp) > 1) {
for (k in 2:nrow(dftemp)) {
s1 <- which(phy$downseq == dftemp$node[k]);
s2 <- which(phy$downseq == phy$lastvisit[dftemp$node[k]]);
descSet <- phy$downseq[s1:s2];
isDescendantNode <- phy$edge[,2] %in% descSet;
statevec[isDescendantNode] <- k;
}
}
tmpEventSegMat <- matrix(0, nrow=(max(phy$edge) + nrow(dftemp) - 2), ncol=4);
#non.root <- c(1:length(phy$tip.label), (length(phy$tip.label)+2):max(phy$edge));
non.root <- c(1:length(phy$tip.label), unique(phy$edge[,1]));
non.root <- non.root[-match(length(phy$tip.label)+1, non.root)];
pos <- 1;
is_noEventBranch = !(phy$edge[,2] %in% dftemp$node);
if (sum(is_noEventBranch) > 0) {
tmpEventSegMat[1:sum(is_noEventBranch), 1] <- phy$edge[,2][is_noEventBranch];
tmpEventSegMat[1:sum(is_noEventBranch), 2] <- phy$begin[is_noEventBranch];
tmpEventSegMat[1:sum(is_noEventBranch), 3] <- phy$end[is_noEventBranch];
tmpEventSegMat[1:sum(is_noEventBranch), 4] <- statevec[is_noEventBranch];
} else {
tempEventSegMat <- cbind(phy$edge[, 2], phy$begin, phy$end, statevec);
}
eventnodeset <- intersect(non.root, dftemp$node);
pos <- 1 + sum(is_noEventBranch);
for (k in eventnodeset) {
events.on.branch <- dftemp[dftemp$node == k, ];
events.on.branch <- events.on.branch[order(events.on.branch$time), ];
fBranch <- phy$edge[,2] == k;
start.times <- c(phy$begin[fBranch], events.on.branch$time);
stop.times <- c(events.on.branch$time, phy$end[fBranch]);
parent <- phy$edge[,1][phy$edge[,2] == k];
if (parent == (length(phy$tip.label) + 1)) {
# Parent is root:
proc.set <- c(1, events.on.branch$index);
} else {
proc.set <- c(statevec[phy$edge[,2] == parent], events.on.branch$index);
}
zzindex = pos:(pos + nrow(events.on.branch));
tmpEventSegMat[zzindex, 1] <- rep(k, length(zzindex));
tmpEventSegMat[zzindex, 2] <- start.times;
tmpEventSegMat[zzindex, 3] <- stop.times;
tmpEventSegMat[zzindex, 4] <- proc.set;
pos <- pos + 1 + nrow(events.on.branch);
}
tmpEventSegMat <- tmpEventSegMat[order(tmpEventSegMat[,1]), ];
eventBranchSegs[[i]] <- tmpEventSegMat;
tipstates <- numeric(length(phy$tip.label));
tipstates <- statevec[phy$edge[,2] <= phy$Nnode + 1];
tipstates <- tipstates[order(phy$edge[phy$edge[,2] <= phy$Nnode + 1, 2])];
### Compute tip rates:
#tiplam <- dftemp$lam1[tipstates] * exp(dftemp$lam2[tipstates] * (stoptime - dftemp$time[tipstates]));
tiplam <- exponentialRate(stoptime - dftemp$time[tipstates], dftemp$lam1[tipstates], dftemp$lam2[tipstates]);
tipmu <- dftemp$mu1[tipstates];
meanTipMu <- meanTipMu + tipmu/nsamples;
meanTipLambda <- meanTipLambda + tiplam/nsamples;
### List assignments and metadata across all events:
eventData[[i]] <- dftemp;
eventVectors[[i]] <- statevec;
numberEvents[i] <- nrow(dftemp);
tipStates[[i]] <- tipstates;
tipLambda[[i]] <- tiplam;
tipMu[[i]] <- tipmu;
}
phy$numberEvents <- numberEvents;
phy$eventData <- eventData;
phy$eventVectors <- eventVectors;
phy$tipStates <- tipStates;
phy$tipLambda <- tipLambda;
phy$meanTipLambda <- meanTipLambda;
phy$eventBranchSegs <- eventBranchSegs;
phy$tipMu <- tipMu;
phy$meanTipMu = meanTipMu;
if (type == 'diversification') {
phy$type <- 'diversification';
}
else {
phy$type <- 'trait';
}
# adds new class: 'bammdata'
class(phy) <- 'bammdata';
return(phy);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getEventData.R |
###############################
#
# getJenksBreaks <- function(...)
# Function returns the Jenks natural breaks for a vector of values
#
# var = numeric vector
# k = number of categories
# subset = if not NULL, this is the number of regularly sampled values to be taken from var
##' @title Jenks natural breaks classification
##'
##' @description Given a vector of numeric values and the number of desired
##' breaks, calculate the optimum breakpoints using Jenks natural
##' breaks optimization.
##'
##' @param var Numeric vector.
##' @param k Number of breaks.
##' @param subset Number of regularly spaced samples to subset from
##' \code{var}. Intended to improve runtime for large datasets. If
##' \code{NULL}, all values are used.
##'
##' @details \code{getJenksBreaks} is called by
##' \code{\link{assignColorBreaks}}.
##'
##' The values in \code{var} are binned into \code{k+1} categories,
##' according to the Jenks natural breaks classification method. This
##' method is borrowed from the field of cartography, and seeks to
##' minimize the variance within categories, while maximizing the variance
##' between categories. If \code{subset = NULL}, all values of \code{var}
##' are used for the optimization, however this can be a slow process with
##' very large datasets. If \code{subset} is set to some number, then
##' \code{subset} regularly spaced values of \code{var} will be sampled.
##' This is slightly less accurate than when using the entirety of
##' \code{var} but is unlikely to make much of a difference. If
##' \code{subset} is defined but \code{length(var) < subset}, then
##' \code{subset} has no effect.
##'
##' The Jenks natural breaks method was ported to C from code found in the
##' classInt R package.
##'
##' @return A numeric vector of intervals.
##'
##' @author Pascal Title
##'
##' @seealso See \code{\link{assignColorBreaks}} and
##' \code{\link{plot.bammdata}}.
##'
##' @examples
##' # load whales dataset
##' data(whales, events.whales)
##' ed <- getEventData(whales, events.whales, burnin=0.25, nsamples=500)
##'
##' # for demonstration purposes, extract the vector of speciation rates
##' ed <- dtRates(ed, tau=0.01)
##' vec <- ed$dtrates$rates[[1]]
##'
##' # Return breaks for the binning of speciation rates into 65 groups
##' # yielding 64 breaks
##' getJenksBreaks(vec, 64)
##' @keywords graphics
##' @export
getJenksBreaks <- function(var, k, subset = NULL) {
k <- k - 1;
#if more breaks than unique values, segfault, so avoid
if (k > length(unique(var))) {
k <- length(unique(var));
}
brks <- rep(1, k + 1);
#if requested, regularly sample subset values
if (!is.null(subset)) {
if (length(var) > subset) {
ind <- c(seq(from=1, to=length(var), by=floor(length(var)/subset)), length(var));
var <- var[ind];
}
}
d <- sort(var);
length_d <- length(d);
return(.C("jenksBrks", as.double(d), as.integer(k), as.integer(length_d), as.double(brks))[[4]]);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getJenksBreaks.R |
#############################################################
#
# getMarginalBranchRateMatrix(....)
#
# get matrix of marginal rates on each branch for each sample from posterior
#
# This function can handle either a 'bammdata' object or a multiphylo object (i.e., list of trees)
##' @title Compute mean branch rates for \code{bammdata} object
##'
##' @description For each sample in the posterior, computes the mean rate for
##' each branch in the focal phylogeny (speciation, extinction, trait
##' evolution). If the \code{bammdata} object contains \emph{nsamples}
##' samples and the target phylogeny has \emph{nbranches} branches, the
##' function will compute a matrix of \emph{nbranches} x \emph{nsamples}.
##'
##' @param ephy An object of class \code{bammdata}.
##' @param verbose Print progress during processing of \code{bammdata} object.
##'
##' @details If a \code{type = 'diversification'} \code{bammdata} object is
##' passed as an argument, the function will return matrices for both
##' speciation and extinction. If \code{type = 'trait'} object, the matrix
##' will simply be the corresponding phenotypic rates. Branch-specific
##' rates are the mean rates computed by integrating the relevant
##' rate-through-time function along each branch, then dividing by the
##' length of the branch.
##'
##' @return Returns a list with the following components:
##' \itemize{
##' \item lambda_branch_matrix: A \code{nbranches x nsamples} matrix
##' of mean speciation rates for each branch.
##' \item mu_branch_matrix: A \code{nbranches x nsamples} matrix of
##' mean extinction rates for each branch.
##' \item beta_branch_matrix: A \code{nbranches x nsamples} matrix of
##' mean phenotypic rates for each branch.
##' }
##'
##' @author Dan Rabosky
##'
##' @examples
##' data(whales)
##' data(events.whales)
##' ed <- getEventData(whales, events.whales, nsamples = 10)
##' mbr <- getMarginalBranchRateMatrix(ed)
##' dim(mbr$lambda_branch_matrix)
##' @keywords models
##' @export
getMarginalBranchRateMatrix <- function(ephy, verbose = FALSE) {
if (!inherits(ephy, 'bammdata')) {
stop("Object must be of class bammdata\n");
}
lammat <- matrix(0, ncol=length(ephy$eventBranchSegs), nrow=nrow(ephy$edge));
mumat <- matrix(0, ncol=length(ephy$eventBranchSegs), nrow=nrow(ephy$edge));
for (i in 1:length(ephy$eventBranchSegs)) {
if (verbose) {
cat('Processing sample ', i, '\n');
}
esegs <- ephy$eventBranchSegs[[i]];
events <- ephy$eventData[[i]];
events <- events[order(events$index), ];
# relative start time of each seg, to event:
relsegmentstart <- esegs[,2] - ephy$eventData[[i]]$time[esegs[,4]];
relsegmentend <- esegs[,3] - ephy$eventData[[i]]$time[esegs[,4]];
lam1 <- ephy$eventData[[i]]$lam1[esegs[,4]];
lam2 <- ephy$eventData[[i]]$lam2[esegs[,4]];
mu1 <- ephy$eventData[[i]]$mu1[esegs[,4]];
mu2 <- ephy$eventData[[i]]$mu2[esegs[,4]];
lamint <- timeIntegratedBranchRate(relsegmentstart, relsegmentend, lam1, lam2);
muint <- timeIntegratedBranchRate(relsegmentstart, relsegmentend, mu1, mu2);
seglengths <- esegs[,3] - esegs[,2];
for (k in 1:nrow(ephy$edge)) {
isRightBranch <- esegs[,1] == ephy$edge[k,2];
lammat[k, i] <- sum(lamint[isRightBranch]) / sum(seglengths[isRightBranch]);
mumat[k, i] <- sum(muint[isRightBranch]) / sum(seglengths[isRightBranch]);
}
}
if (ephy$type == 'diversification') {
return(list(lambda_branch_matrix = lammat, mu_branch_matrix = mumat));
}
if (ephy$type == 'trait') {
return(list(beta_branch_matrix = lammat));
}
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getMarginalBranchRateMatrix.R |
#############################################################
#
# getMeanBranchLengthTree(....)
#
##' @title Compute phylogeny with branch lengths equal to corresponding
##' macroevolutionary rate estimates
##'
##' @description Takes a \code{bammdata} object and computes a phylogenetic
##' tree where branch lengths are equal to the mean of the marginal
##' distributions of rates on each branch. This tree can be plotted to
##' visualize rate variation across a phylogeny.
##'
##' @param ephy An object of class \code{bammdata}.
##' @param rate The type of rate-tree to be computed. Options: "speciation"
##' (default), "extinction", "ndr" (net diversification), and "trait".
##'
##' @return A list with the following components:
##' \itemize{
##' \item phy: A phylogenetic tree, topologically identical to the
##' model tree, but with branch lengths replaced by the mean
##' (marginal) rates on each branch as estimated from the
##' posterior samples in the \code{bammdata} object.
##' \item mean: The mean rate over all branches.
##' \item median: the median rate over all branches.
##' }
##'
##' @author Dan Rabosky
##'
##' @seealso \code{\link{plot.bammdata}}
##'
##' @references \url{http://bamm-project.org/}
##'
##' @examples
##' data(whales)
##' data(events.whales)
##' ed <- getEventData(whales, events.whales, burnin=0.1, nsamples=500)
##' ed2 <- subsetEventData(ed, index = 1:20)
##' ratetree <- getMeanBranchLengthTree(ed2, rate='speciation')
##' plot(ratetree$phy, show.tip.label=FALSE)
##' @keywords models
##' @export
getMeanBranchLengthTree <- function(ephy, rate='speciation') {
if (inherits(ephy, 'bammdata')) {
v <- as.phylo.bammdata(ephy);
}
obj <- getMarginalBranchRateMatrix(ephy,verbose=FALSE);
if (ephy$type == 'diversification'){
if (rate == 'speciation'){
el <- rowMeans(obj$lambda_branch_matrix);
}else if (rate == 'extinction'){
el <- rowMeans(obj$mu_branch_matrix);
}else if (rate == 'ndr'){
el <- rowMeans(obj$lambda_branch_matrix) - rowMeans(obj$mu_branch_matrix);
}else{
stop("invalid rate specification in getMeanBranchLengthTree");
}
}else if (ephy$type == 'trait'){
el <- rowMeans(obj$beta_branch_matrix);
}else{
stop("error in getMeanBranchLengthTree - \nproblem with supplied ephy object");
}
v$edge.length <- el;
tmp <- list();
tmp$phy <- v;
tmp$median <- median(el);
tmp$mean <- mean(el);
return(tmp);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getMeanBranchLengthTree.R |
#############################################################
#
# getPathToRoot <- function(...)
#
# Internal function, gives node path from some node "node" to root
getPathToRoot <- function(phy, node){
root <- length(phy$tip.label) + 1;
nset <- node;
while (node != root){
node <- phy$edge[,1][phy$edge[,2] == node];
nset <- c(nset, node);
}
return(nset);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getPathToRoot.R |
#############################################################
#
# getRateThroughTimeMatrix(....)
#
# include or exclude options:
# start.time = start time (in units before present)
# if NULL, starts at root
# end.time = end time
# if NULL, ends at present
# nslices = number of time cuts
# Return
# list with components:
# $times = the time vector
# $lambda = speciation rate matrix
# $mu = extinction rate matrix
# $type = diversification or trait (needs to be extended to trait data)
# returns object of class bamm-ratematrix
#
##' @title Generate rate-through-time matrix from \code{bammdata} object
##'
##' @description Computes a matrix of macroevolutionary rates at specified
##' timepoints from a \code{bammdata} object. These rates can be used for
##' plotting speciation rates (and other rates) through time.
##'
##' @param ephy An object of class \code{bammdata}.
##' @param start.time The start time (in units before present) for the time.
##' sequence over which rates should be computed. If \code{NULL}, starts
##' at the root.
##' @param end.time The end time (in units before present) for the time
##' sequence over which rates should be computed. If \code{NULL}, ends in
##' the present.
##' @param nslices The number of time points at which to compute rate
##' estimates (between \code{start.time} and \code{end.time}).
##' @param node Allows user to extract rate-through-time information for the
##' subtree descended from a specific node. Alternatively, a specified
##' subtree can be excluded from the rate matrix calculations.
##' @param nodetype Two options: "include" and "exclude". If "include",
##' computes rate matrix only for the descendants of subtree defined by
##' \code{node}. If "exclude", computes rate matrix for all background
##' lineages in tree after excluding the subtree defined by \code{node}.
##' Ignored if \code{node = NULL}.
##'
##' @details Computes evolutionary rates for each sample in the posterior
##' included as part of the \code{bammdata} object. Rates are computed by
##' draping an imaginary grid over the phylogeny, where the grid begins at
##' \code{start.time} and ends at \code{end.time}, with \code{nslices}
##' vertical lines through the phylogeny. The mean rate at each point in
##' time (for a given sample from the posterior) is simply the mean rate
##' at that time for all branches that are intersected by the grid (see
##' the grid plot in the examples section).
##'
##' This function is used by \link{plotRateThroughTime}, but the user can
##' work directly with the \code{bamm-ratematrix} object for greater
##' control in plotting rate-through-time trajectories for individual
##' clades. See \code{examples} for an example of how this can be used to
##' plot confidence intervals on a rate trajectory using shaded polygons.
##'
##' The \code{node} options are particularly useful. If you have run
##' \code{BAMM} on a large phylogeny, you can easily generate the
##' rate-through-time data for a particular subtree by specifying the node
##' number along with \code{nodetype = "include"}. Likewise, if you want
##' to look at just the background rate - excluding some particular
##' lineage - just specify \code{nodetype = "exclude"}.
##'
##' @return An object of class \code{bamm-ratematrix} with the following
##' components:
##'
##' \item{lambda}{A \code{nsamples} x \code{nslices} matrix of speciation
##' rates, where \code{nsamples} is the number of posterior samples in
##' the \code{bammdata} object.}
##' \item{mu}{A \code{nsamples} x \code{nslices} matrix of extinction
##' rates.}
##' \item{beta}{A \code{nsamples} x \code{nslices} matrix of phenotypic
##' rates (if applicable).}
##' \item{times}{A vector of timepoints where rates were computed.}
##' \item{times}{A vector of timepoints where rates were computed (see
##' Examples).}
##' \item{type}{Either "diversification" or "trait", depending on the
##' input data.}
##'
##' @author Dan Rabosky
##'
##' @seealso \code{\link{plotRateThroughTime}}
##'
##' @examples
##' \dontrun{
##' # Plot a rate-through-time curve with
##' # confidence intervals for the whale dataset:
##'
##' data(whales, events.whales)
##' ed <- getEventData(whales, events.whales)
##'
##' rmat <- getRateThroughTimeMatrix(ed)
##'
##' plot.new()
##' plot.window(xlim=c(0, 36), ylim=c(0, .7))
##'
##' ## Speciation quantiles: plot 90% CIs
##' qq <- apply(rmat$lambda, 2, quantile, c(0.05, 0.5, 0.95))
##'
##' xv <- c(rmat$times, rev(rmat$times))
##' yv <- c(qq[1,], rev(qq[3,]))
##'
##' ## Add the confidence polygon on rate distributions:
##' polygon(xv, yv, col='gray80', border=FALSE)
##'
##' ## Add the median rate line:
##' lines(rmat$times, qq[2,], lwd=3, col='red')
##'
##' ## Add axes
##' axis(1, at=seq(-5, 35, by=5))
##' axis(2, at=seq(-0.2, 1, by=0.2), las=1)
##'
##' ####### Now we will show the actual grid used for rate calculations:
##'
##' plot(whales, show.tip.label=FALSE)
##' axisPhylo()
##'
##' mbt <- max(branching.times(whales))
##' tvec <- mbt - rmat$times;
##' tvec <- rmat$times;
##'
##' for (i in 1:length(tvec)){
##' lines(x=c(tvec[i], tvec[i]), y=c(0, 90), lwd=0.7, col='gray70')
##' }
##'
##' ## This shows the grid of time slices over the phylogeny}
##' @keywords models
##' @export
getRateThroughTimeMatrix <- function(ephy, start.time=NULL, end.time=NULL, nslices=100, node=NULL, nodetype = 'include') {
if (!inherits(ephy, 'bammdata')) {
stop("Object ephy must be of class 'bammdata'\n");
}
if (is.null(node)) {
nodeset <- c(length(ephy$tip.label) + 1, ephy$edge[,2]);
} else if (!is.null(node) & nodetype == 'include') {
nodeset <- unlist(sapply(node, function(x) getDesc(ephy, x)$desc_set))
} else if (!is.null(node) & nodetype == 'exclude') {
nodeset <- setdiff( ephy$edge[,2], unlist(sapply(node, function(x) getDesc(ephy, x)$desc_set)));
nodeset <- c(length(ephy$tip.label) + 1, nodeset);
} else {
stop('error in getRateThroughTimeMatrix\n');
}
if (is.ultrametric(as.phylo.bammdata(ephy))) {
bt <- branching.times(as.phylo.bammdata(ephy));
}
if (!is.ultrametric(as.phylo.bammdata(ephy))) {
bt <- NU.branching.times(as.phylo.bammdata(ephy));
}
maxpossible <- max(bt[as.character(intersect(nodeset, ephy$edge[,1]))]);
#convert from time before present to node heights
if (!is.null(start.time)) {
new.start.time <- max(bt) - start.time;
}
if (!is.null(end.time)) {
new.end.time <- max(bt) - end.time;
}
if (is.null(start.time)) {
new.start.time <- max(bt) - maxpossible;
start.time <- maxpossible;
}
if (is.null(end.time)) {
new.end.time <- max(bt);
end.time <- 0;
}
tvec <- seq(new.start.time, new.end.time, length.out = nslices);
names(tvec) <- seq(start.time, end.time, length.out = nslices);
#tol = 1*10^-decimals(ephy$eventBranchSegs[[1]][1,2]);
tol <- 0.00001
mm <- matrix(NA, nrow = length(ephy$eventBranchSegs), ncol = length(tvec));
mumat <- matrix(NA, nrow = length(ephy$eventBranchSegs), ncol = length(tvec));
for (i in 1:nrow(mm)){
es <- ephy$eventBranchSegs[[i]];
events <- ephy$eventData[[i]];
if (is.null(node)){
isGoodNode <- rep(TRUE, nrow(es));
} else {
# es[,1] are descendant nodes of branches.
# If es[,1] is in nodeset, then at least part of the branch is in the
# tree subset of interest
isGoodNode <- es[, 1] %in% nodeset;
}
# # plot branch segments that are to be included in time binned mean rate calc.
# plot(as.phylo.bammdata(ephy), cex = 0.5)
# nodelabels(node = node, frame = 'circle', cex = 0.4)
# abline(v = tvec, lty = 3, col = gray(0.5))
# axisPhylo()
# axis(1, line = 2)
for (k in 1:length(tvec)){
isGoodTime <- goodTime(es, tvec[k], tol = tol);
estemp <- matrix(es[isGoodTime & isGoodNode, ], ncol = 4);
# pp <- get("last_plot.phylo", envir = .PlotPhyloEnv)
# segments(x0 = estemp[,2], x1 = estemp[,3], y0 = pp$yy[estemp[,1]], y1 = pp$yy[estemp[,1]], lwd = 2, col = 'blue')
tvv <- tvec[k] - events$time[estemp[,4]];
rates <- exponentialRate(tvv, events$lam1[estemp[,4]], events$lam2[estemp[,4]]);
mm[i, k] <- mean(rates);
mumat[i,k] <- mean(events$mu1[estemp[,4]]);
}
}
obj <- list();
if (ephy$type == 'diversification') {
obj$lambda <- mm;
obj$mu <- mumat;
}
if (ephy$type == 'trait') {
obj$beta <- mm;
}
obj$times <- tvec;
class(obj) <- 'bamm-ratematrix';
if (ephy$type == 'diversification') {
obj$type <- 'diversification';
} else {
obj$type <- 'trait';
}
return(obj);
}
goodTime <- function (vec, val, tol) {
(vec[,2] - val <= tol) & (val - vec[,3] <= tol);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getRateThroughTimeMatrix.R |
#############################################################
#
# getRecursiveSequence(....)
#
# Private function, called by getEventDataDiversification
getRecursiveSequence = function(phy)
{
rootnd = as.integer(phy$Nnode+2);
anc = as.integer(phy$edge[,1]);
desc = as.integer(phy$edge[,2]);
ne = as.integer(dim(phy$edge)[1]);
L = .C('setrecursivesequence', anc, desc, rootnd, ne, integer(ne+1),integer(ne+1));
phy$downseq = as.integer(L[[5]]);
phy$lastvisit = as.integer(L[[6]]);
return(phy);
} | /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getRecursiveSequence.R |
#############################################################
#
# getSequenceForwardTraversal(....)
#
# Private function, called by getRecursiveSequence
getSequenceForwardTraversal <- function(phy, node){
if (node <= length(phy$tip.label)){
#phy$downseq <- c(phy$downseq, node);
}else{
dset <- phy$edge[,2][phy$edge[,1] == node];
phy$downseq <- c(phy$downseq, dset[1]);
phy <- getSequenceForwardTraversal(phy, dset[1]);
phy$downseq <- c(phy$downseq, dset[2]);
phy <- getSequenceForwardTraversal(phy, dset[2]);
}
phy$lastvisit[node] <- phy$downseq[length(phy$downseq)];
return(phy);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getSequenceForwardTraversal.R |
#############################################################
#
# getShiftNodesFromIndex (....)
#
# Args: ephy = object of class 'bammdata'
# index = the index of the sample you wish to view, e.g.,
# if index = 5, this will give you the nodes subtending
# all branches with rate shifts for the 5th sample
# from the posterior in your 'bammdata' object.
#
#
# Returns: - a vector of the nodes where shifts occurred, excluding the root.
# Note: if NO shifts occured, this will return a
# numeric object of length zero
#
##' @title Identify nodes associated with rate shifts from \code{bammdata}
##' object
##'
##' @description Find the node numbers associated with rate shifts for a
##' specified sample from the posterior distribution contained in a
##' \code{bammdata} object.
##'
##' @param ephy A \code{bammdata} object.
##' @param index The index value of the posterior sample from which you want
##' to identify shift nodes. This is \emph{not} the same as the actual
##' generation number of the MCMC sample. If your \code{bammdata} object
##' contains 100 samples from the posterior distribution, the value of
##' \code{index} must range from 1 to 100.
##'
##' @return A vector of nodes (excluding the root) that define branches on
##' which shifts occurred for the specified sample from the posterior.
##' Will return a numeric of length 0 if no non-root shifts occur in the
##' specified sample.
##'
##' @author Dan Rabosky
##'
##' @seealso \code{\link{addBAMMshifts}}, \code{\link{plot.bammdata}},
##' \code{\link{maximumShiftCredibility}}
##'
##' @examples
##' data(whales, events.whales)
##' ed <- getEventData(whales, events.whales, burnin=0.1, nsamples=500)
##'
##' # Get the maximum shift credibility configuration:
##' msc <- maximumShiftCredibility(ed)
##'
##' # Get the nodes at which shifts occurred in the
##' # maximum shift credibility configuration:
##'
##' getShiftNodesFromIndex(ed, index=msc$sampleindex)
##' @keywords models
##' @export
getShiftNodesFromIndex <- function(ephy, index) {
if (index > length(ephy$eventData)) {
cat("Error. Attempting to access non-existent element from 'bammdata' object\n");
cat("You have << ", length(ephy$eventData), " >>> samples in your 'bammdata' object\n");
cat("Value of index must be no greater than the number of samples\n\n");
stop();
}
root <- length(ephy$tip.label) + 1;
nodes <- ephy$eventData[[index]]$node;
nodes <- nodes[nodes != root];
return(nodes);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getShiftNodesFromIndex.R |
#############################################################
#
# getSpanningTaxonPair(....)
#
# returns pair of taxa that span a given taxon set
getSpanningTaxonPair <- function(phy, taxset){
if (! sum(taxset %in% phy$tip.label) > 0){
cat('Some species in taxset that are not in tree\n');
taxset <- taxset[taxset %in% phy$tip.label];
}
dt <- drop.tip(phy, setdiff(phy$tip.label, taxset));
return(c(dt$tip.label[1], dt$tip.label[length(dt$tip.label)]));
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getSpanningTaxonPair.R |
#############################################################
#
# getStartStopTimes(....)
#
# adds begin and end times (absolute time) to each edge of
# phylogenetic tree
getStartStopTimes <- function(phy){
if (is.ultrametric(phy)) {
bmax <- max(branching.times(phy));
bt <- bmax - branching.times(phy);
begin <- bt[as.character(phy$edge[,1])];
end <- begin + phy$edge.length;
phy$begin <- as.numeric(begin);
phy$end <- as.numeric(end);
return(phy);
}
return( NU.branching.times(phy, "begin.end"));
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getStartStopTimes.R |
#############################################################
#
# getTipRates(....)
#
# Returns a list with:
# lambda = matrix of tip rates where rows are species and columns are posterior samples,
# mu if ephy$type == 'diversification',
# beta if ephy$type='trait',
# lambda.avg, mu.avg, beta.avg: named vector of average tip rates.
# If returnNetDiv = TRUE, then a matrix and average vector for net div rates is returned.
##' @title Compute tip-specific macroevolutionary rates from \code{bammdata}
##' object
##'
##' @description Return speciation, extinction, net diversification, or
##' Brownian motion trait rates for all species in the phylogeny from
##' \code{BAMM} output.
##'
##' @param ephy An object of class \code{bammdata}.
##' @param returnNetDiv Logical. If \code{TRUE}, then net diversification
##' rates are returned, if \code{FALSE}, then both speciation and
##' extinction rates are returned. If \code{ephy} is of type \code{trait},
##' then this is ignored.
##' @param statistic Determines how the average tip rates should be
##' calculated. Can be either \code{mean} or \code{median}.
##'
##' @return Returns a list with the following elements:
##'
##' If \code{ephy} type is 'diversification':
##' \itemize{
##' \item lambda: A matrix of tip speciation rates with species as
##' rows, and posterior samples as columns.
##' \item mu: A matrix of tip extinction rates with species as rows,
##' and posterior samples as columns.
##' \item lambda.avg: A vector of average tip speciation rates,
##' averaged with mean or median, depending on selected option for
##' \code{statistic}. The vector is named with species names.
##' \item mu.avg: A vector of average tip extinction rates, averaged
##' with mean or median, depending on selected option for
##' \code{statistic}. The vector is named with species names.
##' }
##'
##' If \code{ephy} type is 'diversification' and
##' \code{returnNetDiv = TRUE}:
##' \itemize{
##' \item netdiv: A matrix of tip net diversification rates with
##' species as rows, and posterior samples as columns.
##'
##' \item netdiv.avg: A vector of average tip net diversification
##' rates, averaged with mean or median, depending on selected
##' option for \code{statistic}. The vector is named with species
##' names.
##' }
##'
##' If \code{ephy} type is 'trait':
##' \itemize{
##' \item beta: A matrix of tip phenotypic rates with species as
##' rows, and posterior samples as columns.
##' \item beta.avg: A vector of average tip phenotypic rates,
##' averaged with mean or median, depending on selected option for
##' \code{statistic}. The vector is named with species names.
##' }
##'
##' @author Pascal Title
##'
##' @seealso Requires an object of class \code{bammdata} as obtained with
##' \code{\link{getEventData}}.
##'
##' @examples
##' data(whales, events.whales)
##' ephy <- getEventData(whales, events.whales, burnin=0.25, nsamples = 500)
##'
##' # return a vector of average species-specific speciation rates.
##' meanlam <- getTipRates(ephy, returnNetDiv = FALSE,
##' statistic = 'mean')$lambda.avg
##' meanlam
##'
##' # return a vector of median species-specific net diversification rates.
##' ndr <- getTipRates(ephy, returnNetDiv = TRUE,
##' statistic = 'median')$netdiv.avg
##'
##' # Return mean species-specific speciation rates from all posterior
##' # samples in the \code{bamm-data} object.
##' lam <- getTipRates(ephy, returnNetDiv = FALSE, statistic = 'mean')$lambda
##' rowMeans(lam)
##' @keywords models
##' @export
getTipRates <- function(ephy, returnNetDiv = FALSE, statistic = 'mean') {
if (!inherits(ephy, 'bammdata')) {
stop("Object ephy must be of class bammdata\n");
}
if (!statistic %in% c('mean','median')) {
stop("statistic must be either 'mean' or 'median'.");
}
obj <- list();
if (ephy$type == 'diversification') {
if (returnNetDiv) {
obj$netdiv <- do.call(cbind, ephy$tipLambda) - do.call(cbind, ephy$tipMu);
rownames(obj$netdiv) <- as.phylo.bammdata(ephy)$tip.label;
if (statistic == 'mean') {
obj$netdiv.avg <- rowMeans(obj$netdiv);
}
if (statistic == 'median') {
obj$netdiv.avg <- apply(obj$netdiv, 1, median);
}
}
if (!returnNetDiv) {
obj$lambda <- do.call(cbind, ephy$tipLambda);
rownames(obj$lambda) <- as.phylo.bammdata(ephy)$tip.label;
obj$mu <- do.call(cbind, ephy$tipMu);
rownames(obj$mu) <- as.phylo.bammdata(ephy)$tip.label;
if (statistic == 'mean') {
obj$lambda.avg <- rowMeans(obj$lambda);
obj$mu.avg <- rowMeans(obj$mu);
}
if (statistic == 'median') {
obj$lambda.avg <- apply(obj$lambda, 1, median);
obj$mu.avg <- apply(obj$mu, 1, median);
}
}
}
if (ephy$type == 'trait') {
obj$beta <- do.call(cbind, ephy$tipLambda);
rownames(obj$beta) <- as.phylo.bammdata(ephy)$tip.label;
if (statistic == 'mean') {
obj$beta.avg <- rowMeans(obj$beta);
}
if (statistic == 'median') {
obj$betabeta.avg <- apply(obj$beta, 1, median);
}
}
return(obj);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getTipRates.R |
##' @title Find most recent common ancestors
##'
##' @description Calculates the most recent common ancestor for each pair of
##' tips. Used internally by \code{\link{getEventData}}.
##'
##' @param phy An object of class \code{phylo}.
##' @param t1 A vector of mode integer or character corresponding to tips in
##' \code{phy}.
##' @param t2 A vector of mode integer or character corresponding to tips in
##' \code{phy}.
##'
##' @details Finds the most recent common ancestor for each pair of tips where
##' pairs are defined as (\code{t1}[1], \code{t2}[1]), (\code{t1}[2],
##' \code{t2}[2]), ... , (\code{t1}[i], \code{t2}[i]), ... ,(\code{t1}[n],
##' \code{t2}[n]).
##'
##' @return A vector of node numbers of the common ancestor for each pair of
##' tips.
##'
##' @author Mike Grundler
##'
##' @seealso \code{\link{subtreeBAMM}}
##' @keywords manip
##' @export
getmrca <- function(phy,t1,t2)
{
if (mode(t1) == "character") {
t1 <- match(t1, phy$tip.label);
}
if (mode(t2) == "character") {
t2 <- match(t2, phy$tip.label);
}
ne <- as.integer(dim(phy$edge)[1]);
npair <- as.integer(length(t1));
anc <- as.integer(phy$edge[,1]);
desc <- as.integer(phy$edge[,2]);
root <- as.integer(length(phy$tip.label) + 1);
.C('fetchmrca',anc,desc,root,ne,npair,as.integer(t1),as.integer(t2),integer(npair))[[8]];
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/getmrca.R |
inv.logit <- function (x, min = 0, max = 1)
{
p <- exp(x)/(1 + exp(x))
p <- ifelse(is.na(p) & !is.na(x), 1, p)
p * (max - min) + min
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/inv.logit.R |
logit <- function (x, min = 0, max = 1)
{
p <- (x - min)/(max - min)
log(p/(1 - p))
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/logit.R |
# returns a phylogenetic tree where
# branch lengths are equal to the marginal
# odds ratio (posterior : prior) for a given branch.
# It is marginal in the sense that it is not independent
# of values for other branches.
##' @title Ratio of (marginal) posterior-to-prior probabilities on individual
##' branches
##'
##' @description Compute marginal posterior-to-prior odds ratio associated
##' with observing one or more rate shift on a given branch.
##'
##' @param ephy An object of class \code{bammdata}.
##' @param expectedNumberOfShifts Expected number of shifts under the prior
##' alone.
##'
##' @details This function returns a copy of a phylogenetic tree where each
##' branch length is equal to the marginal odds ratio in favor of a rate
##' shift on a particular branch. These cannot be interpreted as evidence
##' for a rate shift in an absolute sense. As explained on the website,
##' they are a marginal odds ratio. This function is provided primarily
##' for the purpose of distinguishing core and non-core shifts.
##'
##' @return A object of class \code{phylo} but where each branch length is
##' equal to the marginal shift odds on each branch.
##'
##' @author Dan Rabosky
##'
##' @seealso \code{\link{getBranchShiftPriors}},
##' \code{\link{distinctShiftConfigurations}},
##' \code{\link{credibleShiftSet}}
##'
##' @examples
##' data(whales, events.whales)
##' ed <- getEventData(whales, events.whales, burnin=0.1, nsamples=500)
##' marginalOddsRatioBranches(ed, expectedNumberOfShifts = 1)
##'
##' @export
marginalOddsRatioBranches <- function(ephy, expectedNumberOfShifts) {
tree_post <- marginalShiftProbsTree(ephy);
tree_prior <- getBranchShiftPriors(as.phylo.bammdata(ephy), expectedNumberOfShifts);
post_shift <- tree_post$edge.length;
prior_shift <- tree_prior$edge.length;
oddsratio <- post_shift / prior_shift;
tree_post$edge.length <- oddsratio;
return(tree_post);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/marginalOddsRatioBranches.R |
#############################################################
#
# marginalShiftProbsTree(....)
#
# Args: ephy = object of class 'bammdata'
#
# Returns: a phylogenetic tree, but where each
# branch length (edge length) is equal to the
# marginal probability of shift occuring
# on that particular branch.
#
##' @title Branch-specific rate shift probabilities
##'
##' @description \code{marginalShiftProbsTree} computes a version of a
##' phylogenetic tree where each branch length is equal to the marginal
##' probability that a shift occurred on a particular branch. The
##' \code{cumulativeShiftProbsTree} includes the cumulative probability
##' that a shift occurred on a given branch. See details.
##'
##' @param ephy An object of class \code{bammdata}.
##'
##' @details The \emph{marginal shift probability tree} is a copy of the
##' target phylogeny, but where each branch length is equal to the
##' branch-specific marginal probability that a rate-shift occurred on the
##' focal branch. For example, a branch length of 0.333 implies that 1/3
##' of all samples from the posterior had a rate shift on the focal branch.
##'
##' \bold{Note:} It is highly inaccurate to use marginal shift
##' probabilities as a measure of whether diversification rate
##' heterogeneity occurs within a given dataset. Consider the following
##' example. Suppose you have a tree with topology (A, (B, C)). You find a
##' marginal shift probability of 0.5 on the branch leading to clade C,
##' and also a marginal shift probability of 0.5 on the branch leading to
##' clade BC. Even though the marginal shift probabilities appear low, it
##' may be the case that the joint probability of a shift occurring on
##' \emph{either} the branch leading to C or BC is 1.0. Hence, you could
##' be extremely confident (posterior probabilities approaching 1.0) in
##' rate heterogeneity, yet find that no single branch has a particularly
##' high marginal shift probability. In fact, this is exactly what we
##' expect in most real datasets, because there is rarely enough signal to
##' strongly support the occurrence of a shift on any particular branch.
##'
##' The \emph{cumulative shift probability tree} is a copy of the target
##' phylogeny but where branch lengths are equal to the cumulative
##' probability that a rate shift occurred somewhere on the path between
##' the root and the focal branch. A branch length equal to 0.0 implies
##' that the branch in question has evolutionary rate dynamics that are
##' shared with the evolutionary process starting at the root of the tree.
##' A branch length of 1.0 implies that, with posterior probability 1.0,
##' the rate dynamics on a branch are decoupled from the "root process".
##'
##' @return An object of class \code{phylo}, but with branch lengths equal to
##' the marginal or cumulative shift probabilities.
##'
##' @author Dan Rabosky
##'
##' @seealso \code{\link{maximumShiftCredibility}}
##'
##' @references \url{http://bamm-project.org/}
##'
##' @examples
##' data(whales)
##' data(events.whales)
##' ed <- getEventData(whales, events.whales, nsamples = 500)
##'
##' # computing the marginal shift probs tree:
##' mst <- marginalShiftProbsTree(ed)
##'
##' # The cumulative shift probs tree:
##' cst <- cumulativeShiftProbsTree(ed)
##'
##' #compare the two types of shift trees side-by-side:
##' plot.new()
##' par(mfrow=c(1,2))
##' plot.phylo(mst, no.margin=TRUE, show.tip.label=FALSE)
##' plot.phylo(cst, no.margin=TRUE, show.tip.label=FALSE)
##' @rdname ShiftProbsTree
##' @keywords graphics
##' @export
marginalShiftProbsTree <- function(ephy) {
if (!inherits(ephy, 'bammdata')) {
stop("Object ephy must be of class bammdata\n");
}
shiftvec <- numeric(length(ephy$edge.length));
rootnode <- length(ephy$tip.label) + 1;
for (i in 1:length(ephy$eventData)) {
hasShift <- ephy$edge[,2] %in% ephy$eventData[[i]]$node;
shiftvec[hasShift] <- shiftvec[hasShift] + rep(1, sum(hasShift));
}
shiftvec <- shiftvec / length(ephy$eventData);
newphy <- as.phylo.bammdata(ephy);
newphy$edge.length <- shiftvec;
return(newphy);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/marginalShiftProbsTree.R |
#############################################################
#
# maximumShiftCredibility(....)
#
# Args: ephy = object of class 'bammdata'
#
#
# maximize = 'sum', = sum of branch probabilities for each tree
# 'product', = product of branch probabilities for each tree
#
# Returns: - bestconfigs: a list of length equal the number of
# unique shift configurations in the maximum shift
# credibility set. Each element is a vector of sample
# indices from the 'bammdata' object with identical
# shift configurations.
#
# - A vector of optimality scores for all other samples
# in posterior from the 'bammdata' object.
#
# - sampleindex: a representative index for samples from each
# set of unique shift configurations. The length of this vector
# is equal to the length of the bestconfigs list. If this vector was
# sampleindex = c(2, 20, 50), this would mean that there are 3 distinct
# sets of shift configurations with equal credibility under the optimality
# criterion. More commonly, a single shift configuration will be dominant, and
# although the length of bestconfigs[[1]] may be greater than 1, the sampleindex
# vector will contain a single representative event from that set.
#
# See example file.
# This is analogous to the maximum clade credibility tree from a
# Bayesian phylogenetic analysis.
##' @title Estimate maximum shift credibility configuration
##'
##' @description This is one estimate of the "best" rate shift configuration,
##' considering only those shift configurations that were actually sampled
##' using \code{BAMM}'s reversible jump MCMC simulator. This is analogous
##' to the "maximum clade credibility tree" from a Bayesian phylogenetic
##' analysis. It is not necessarily the same as the shift configuration
##' with the maximum a posteriori probability.
##'
##' @param ephy An object of class \code{BAMMdata}.
##' @param maximize Maximize the marginal probability of the product or sum of
##' branch-specific shifts.
##'
##' @details This is one point estimate of the overall "best" rate shift
##' configuration. Following an MCMC simulation, the marginal shift
##' probabilities on each individual branch are computed using
##' \code{\link{marginalShiftProbsTree}}. The shift configuration that
##' maximizes the product (or sum, if specified) of these marginal
##' branch-specific shift probabilities is the \emph{maximum shift
##' credibility configuration}.
##'
##' This option is only recommended if you have no clear "winner" in your
##' credible set of shift configurations (see
##' \code{\link{credibleShiftSet}}). If you have a number of
##' largely-equiprobable shift configurations in your 95\% credible set,
##' you may wish to try this function as an alternative for identifying a
##' single best shift configuration. Otherwise, it is recommended that you
##' present the shift configuration with the maximum a posteriori
##' probability (see \code{\link{getBestShiftConfiguration}}).
##'
##' @return A list with the following components:
##' \itemize{
##' \item bestconfigs: A vector of the index values of MCMC samples
##' with shift configurations equal to the maximum. Usually, more
##' than one state sampled during the MCMC simulation will have an
##' identical (maximized) marginal probability. All samples given
##' in this vector will have an identical shift configuration.
##' \item scores: The optimality score (product or sum of marginal
##' shift probabilities) for all sampled shift configurations in
##' the \code{BAMMdata} object.
##' \item optimalityType: Whether the product or sum of marginal
##' shift probabilities was used to compute the maximum shift
##' credibility configuration.
##' \item sampleindex: A representative sample that is equal to the
##' maximum shift credibility configuration (e.g., this can be
##' plotted with \code{\link{addBAMMshifts}}).
##' }
##'
##' @author Dan Rabosky
##'
##' @seealso \code{\link{marginalShiftProbsTree}},
##' \code{\link{addBAMMshifts}}, \code{\link{cumulativeShiftProbsTree}},
##' \code{\link{credibleShiftSet}},
##' \code{\link{getBestShiftConfiguration}}
##'
##' @examples
##' data(whales, events.whales)
##' ed <- getEventData(whales, events.whales, burnin=0.25, nsamples=500)
##' best_config <- maximumShiftCredibility(ed)
##' plot(ed)
##' addBAMMshifts(ed, method='phylogram', index=best_config$sampleindex)
##' @keywords manip graphics
##' @export
maximumShiftCredibility <- function(ephy, maximize = 'product') {
if (!inherits(ephy, 'bammdata')) {
stop("Object ephy must be of class bammdata\n");
}
probvec <- numeric(length(ephy$eventData));
mtree <- marginalShiftProbsTree(ephy);
#mtree$edge.length[mtree$edge.length < threshold] <- 0;
px <- mtree$edge.length;
ttx <- table(ephy$numberEvents) / length(ephy$numberEvents);
for (i in 1:length(ephy$eventData)) {
# posterior probabilities here:
proc_prob <- ttx[as.character(ephy$numberEvents[i])];
hasShift <- ephy$edge[,2] %in% ephy$eventData[[i]]$node;
branchprobs <- (hasShift)*px + (!hasShift)*(1 - px) ;
if (maximize == 'product') {
probvec[i] <- log(proc_prob) + sum(log(branchprobs));
} else if (maximize == 'sum') {
probvec[i] <- proc_prob * sum(branchprobs);
} else {
stop("Unsupported optimize criterion in maximumShiftCredibilityTree");
}
}
best <- which(probvec == max(probvec));
# Now test for multiple trees with same log-prob:
bestconfigs <- list();
index <- 0;
while (length(best) > 0) {
index <- index + 1;
lv <- logical(length = length(best));
for (i in 1:length(best)) {
lv[i] <- areEventConfigurationsIdentical(ephy, best[1], best[i]);
}
bestconfigs[[index]] <- best[lv];
best <- best[!lv];
}
sampleindex <- numeric(length(bestconfigs));
for (i in 1:length(bestconfigs)) {
sampleindex[i] <- bestconfigs[[i]][1];
}
obj <- list();
obj$bestconfigs <- bestconfigs;
obj$scores <- probvec;
obj$optimalityType = maximize;
obj$sampleindex <- sampleindex;
return(obj);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/maximumShiftCredibility.R |
mkdtsegsPhylo <- function(x, tau, edge) {
tol <- 0.0001;
xremainder <- numeric(max(edge[,1]));
dtsegs <- vector("list",nrow(edge));
for (i in 1:nrow(x)) {
if (xremainder[edge[i,1]] > 0) {
if (x[i,1]+xremainder[edge[i,1]] > x[i,3]) {
xremainder[edge[i,2]] = x[i,1]+xremainder[edge[i,1]]-x[i,3];
xx <- x[i,1];
yy <- x[i,2];
}
else {
xx <- seq(x[i,1]+xremainder[edge[i,1]],x[i,3],tau);
xx <- c(x[i,1],xx);
yy <- rep(x[i,2],length(xx));
}
}
else {
xx <- seq(x[i,1],x[i,3],tau);
yy <- rep(x[i,2],length(xx));
}
if (length(xx) > 1) {
if (x[i,3] - tail(xx,1) > tol) {
xremainder[edge[i,2]] <- tau - (x[i,3]-tail(xx,1));
xx <- c(xx,x[i,3]);
yy <- c(yy,x[i,2]);
}
xx <- rep(xx,each=2);
xx <- xx[-c(1,length(xx))];
xx <- matrix(xx,ncol=2,byrow=TRUE);
yy <- rep(yy,each=2);
yy <- yy[-c(1,length(yy))];
yy <- matrix(yy,ncol=2,byrow=TRUE);
segs <- cbind(xx,yy,rep(edge[i,2],nrow(xx)));
}
else {
if (xremainder[edge[i,1]] == 0) {
xremainder[edge[i,2]] <- tau - (x[i,3]-tail(xx,1));
}
segs <- matrix(c(x[i,1],x[i,3],x[i,2],x[i,4],edge[i,2]),nrow=1,ncol=5);
}
dtsegs[[i]] <- segs;
}
return(do.call(rbind,dtsegs));
}
# OLD VERSION
# mkdtsegs = function(x,tau,phy,tH)
# {
# #bn = sqrt((x[3]-x[1])^2 + (x[4]-x[2])^2);
# #len = bn/tau; if (len %% 1 == 0) len = len + 1;
# len = (phy$end[match(x[5],phy$edge[,2])]/tH-phy$begin[match(x[5],phy$edge[,2])]/tH)/tau; if (len %% 1 == 0) len = len + 1;
# j = seq(x[1],x[3],length.out=len);
# if(length(j) == 1) return(matrix(x[c(1,3,2,4,5)],nrow=1));
# k = seq(x[2],x[4],length.out = len);
# j = rep(j,each=2); j = j[-c(1,length(j))];
# j = matrix(j,ncol=2,byrow=TRUE);
# k = rep(k,each=2); k = k[-c(1,length(k))];
# k = matrix(k,ncol=2,byrow=TRUE);
# l = matrix(rep(x[5],nrow(j)),ncol=1);
# return(cbind(j,k,l));
# }
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/mkdtsegsPhylo.R |
mkdtsegsPolar <- function(x, tau, edge) {
tol <- 0.0001;
xremainder <- numeric(max(edge[,1]));
dtsegs <- vector("list", nrow(edge));
for (i in 1:nrow(x)) {
xtol <- abs(tol*cos(x[i,5]));
d <- sqrt((x[i,3]-x[i,1])^2+(x[i,4]-x[i,2])^2);
if (xremainder[edge[i,1]] > 0) {
if ((x[i,5] > pi/2 && x[i,5] < 3*pi/2) && x[i,5] != pi) {
if (x[i,1]+xremainder[edge[i,1]]*cos(x[i,5]) < x[i,3]) {
xremainder[edge[i,2]] <- abs(xremainder[edge[i,1]]-d);
xx <- x[i,1];
yy <- x[i,2];
}
else {
xx <- seq(x[i,1]+xremainder[edge[i,1]]*cos(x[i,5]),x[i,3],tau*cos(x[i,5]));
yy <- xx*tan(x[i,5]);
xx <- c(x[i,1],xx);
yy <- c(x[i,2],yy);
}
}
else if ((x[i,5] < pi/2 || x[i,5] > 3*pi/2) && x[i,5] != 0) {
if (x[i,1]+xremainder[edge[i,1]]*cos(x[i,5]) > x[i,3]) {
xremainder[edge[i,2]] <- abs(xremainder[edge[i,1]]-d);
xx <- x[i,1];
yy <- x[i,2];
}
else {
xx <- seq(x[i,1]+xremainder[edge[i,1]]*cos(x[i,5]),x[i,3],tau*cos(x[i,5]));
yy <- xx*tan(x[i,5]);
xx <- c(x[i,1],xx);
yy <- c(x[i,2],yy);
}
}
else if (x[i,5] == pi/2) {
if (x[i,2]+xremainder[edge[i,1]] > x[i,4]) {
xremainder[edge[i,2]] <- abs(xremainder[edge[i,1]]-d);
xx <- x[i,1];
yy <- x[i,2];
}
else {
yy <- seq(x[i,2]+xremainder[edge[i,1]],x[i,4],tau);
yy <- c(x[i,2],yy);
xx <- rep(x[i,1],length(yy));
}
}
else if (x[i,5] == 3*pi/2) {
if (x[i,2]-xremainder[edge[i,1]] < x[i,4]) {
xremainder[edge[i,2]] <- abs(xremainder[edge[i,1]]-d);
xx <- x[i,1];
yy <- x[i,2];
}
else {
yy <- seq(x[i,2]-xremainder[edge[i,1]],x[i,4],-tau);
yy <- c(x[i,2],yy);
xx <- rep(x[i,1],length(yy));
}
}
else if (x[i,5] == pi) {
if (x[i,1]-xremainder[edge[i,1]] < x[i,3]) {
xremainder[edge[i,2]] <- abs(xremainder[edge[i,1]]-d);
xx <- x[i,1];
yy <- x[i,2];
}
else {
xx <- seq(x[i,1]-xremainder[edge[i,1]],x[i,3],-tau);
xx <- c(x[i,1],xx);
yy <- rep(x[i,2],length(xx));
}
}
else {
if (x[i,1]+xremainder[edge[i,1]] > x[i,3]) {
xremainder[edge[i,2]] <- abs(xremainder[edge[i,1]]-d);
xx <- x[i,1];
yy <- x[i,2];
}
else {
xx <- seq(x[i,1]+xremainder[edge[i,1]],x[i,3],tau);
xx <- c(x[i,1],xx);
yy <- rep(x[i,2],length(xx));
}
}
}
else {
if (x[i,5] == 0 || x[i,5] == pi) {
xx <- seq(x[i,1],x[i,3],tau*cos(x[i,5]));
yy <- rep(x[i,2],length(xx));
}
else if (x[i,5] == pi/2 || x[i,5] == 3*pi/2) {
yy <- seq(x[i,2],x[i,4],tau*sin(x[i,5]));
xx <- rep(x[i,1],length(yy));
}
else {
xx <- seq(x[i,1],x[i,3],tau*cos(x[i,5]));
yy <- xx*tan(x[i,5]);
}
}
if (length(xx) > 1) {
d <- sqrt((x[i,3]-tail(xx,1))^2+(x[i,4]-tail(yy,1))^2);
if (abs(x[i,3] - tail(xx,1)) > xtol) {
xremainder[edge[i,2]] <- abs(tau-d);
xx <- c(xx,x[i,3]);
yy <- c(yy,x[i,4]);
}
xx <- rep(xx,each=2);
xx <- xx[-c(1,length(xx))];
xx <- matrix(xx,ncol=2,byrow=TRUE);
yy <- rep(yy,each=2);
yy <- yy[-c(1,length(yy))];
yy <- matrix(yy,ncol=2,byrow=TRUE);
segs <- cbind(xx,yy,rep(edge[i,2],nrow(xx)));
}
else {
d <- sqrt((x[i,3]-tail(xx,1))^2+(x[i,4]-tail(yy,1))^2);
if (xremainder[edge[i,1]] == 0) {
xremainder[edge[i,2]] <- abs(tau-d);
}
segs <- matrix(c(x[i,1],x[i,3],x[i,2],x[i,4],edge[i,2]),nrow=1,ncol=5);
}
dtsegs[[i]] <- segs;
}
return(do.call(rbind, dtsegs));
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/mkdtsegsPolar.R |
phylogeneticMean <- function(traits, phy, lambda = 1){
if (!is.null(names(traits)))
traits <- traits[phy$tip.label];
if (inherits(phy, 'phylo')) {
vmat <- vcv.phylo(phy);
}else{
vmat <- phy;
}
dd <- diag(vmat);
vmat <- vmat * lambda;
diag(vmat) <- dd;
onev <- matrix(rep(1, length(traits)), nrow=length(traits), ncol = 1);
anc <- as.vector(solve(t(onev)%*% solve(vmat) %*% onev) %*% (t(onev)%*% solve(vmat) %*% traits));
beta <- as.vector((t(traits-anc) %*% solve(vmat) %*% (traits-anc))/length(traits));
return(list(anc = as.vector(anc), beta = as.vector(beta)));
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/phylogeneticMean.R |
redirect <- function(coord, theta) {
rot <- function(x, theta) {
R <- matrix(c(cos(theta),-sin(theta),sin(theta),cos(theta)),byrow=TRUE,2,2);
R%*%x;
}
tmp <- coord;
tmp[,1:2] <- t(apply(coord[,1:2],1,rot,theta));
tmp[,3:4] <- t(apply(coord[,3:4],1,rot,theta));
return (tmp);
}
##' @title Plot \code{BAMM}-estimated macroevolutionary rates on a phylogeny
##'
##' @description \code{plot.bammdata} plots a phylogenetic tree from a
##' \code{bammdata} object and colors each branch by the estimated rate of
##' speciation, extinction, or trait evolution. Rates are not assumed to
##' be constant in time, and the function can plot continuously-varying
##' rates along individual branches.
##'
##' @param x An object of class \code{bammdata}.
##' @param tau A numeric indicating the grain size for the calculations. See
##' documentation for \code{\link{dtRates}}.
##' @param method A character string indicating the method for plotting the
##' phylogenetic tree. \code{method = "phylogram"} (default) plots the
##' phylogenetic tree using rectangular coordinates.
##' \code{method = "polar"} plots the phylogenetic tree using polar
##' coordinates.
##' @param xlim A numeric vector of coordinates for the x-axis endpoints.
##' Defaults to \code{NULL}, in which case they are calculated
##' automatically. The x-axis corresponds to time when the phylogeny is
##' plotted facing to the left or to the right. The time at the root
##' equals zero.
##' @param ylim A numeric vector of coordinates for the y-axis endpoints.
##' Defaults to \code{NULL}, in which case they are calculated
##' automatically. Tips are plotted at integer values beginning from zero
##' and stepping by one when the phylogeny is plotted facing to the left
##' or to the right.
##' @param vtheta A numeric indicating the angular separation (in degrees) of
##' the first and last terminal nodes. Ignored if
##' \code{method = "phylogram"}.
##' @param rbf A numeric indicating the length of the root branch as a
##' fraction of total tree height. Ignored if \code{method = "phylogram"}.
##' @param show A logical indicating whether or not to plot the tree. Defaults
##' to \code{TRUE}.
##' @param labels A logical indicating whether or not to plot the tip labels.
##' Defaults to \code{FALSE}.
##' @param legend A logical indicating whether or not to plot a legend for
##' interpreting the mapping of evolutionary rates to colors. Defaults to
##' \code{FALSE}.
##' @param spex A character string indicating what type of macroevolutionary
##' rates should be plotted. "s" (default) indicates speciation rates, "e"
##' indicates extinction rates, and "netdiv" indicates net diversification
##' rates. Ignored if \code{ephy$type = "trait"}.
##' @param lwd A numeric specifying the line width for branches.
##' @param cex A numeric specifying the size of tip labels.
##' @param pal A character string or vector of mode character that describes
##' the color palette. See Details for explanation of options.
##' @param mask An optional integer vector of node numbers specifying branches
##' that will be masked with \code{mask.color} when plotted.
##' @param mask.color The color for the mask.
##' @param colorbreaks A numeric vector of percentiles delimiting the bins for
##' mapping rates to colors. If \code{NULL} (default) bins are calculated
##' from the rates that are passed with the \code{bammdata} object.
##' @param logcolor Logical. Should colors be plotted on a log scale.
##' @param breaksmethod Method used for determining color breaks. See help
##' file for \code{\link{assignColorBreaks}}.
##' @param color.interval Min and max value for the mapping of rates. If
##' \code{NULL}, then min and max are inferred from the data. NA can also
##' be supplied for one of the two values. See details.
##' @param JenksSubset If \code{breaksmethod = "jenks"}, the number of
##' regularly spaced samples to subset from the full rates vector. Only
##' relevant for large datasets. See help file for
##' \code{\link{assignColorBreaks}}.
##' @param par.reset A logical indicating whether or not to reset the
##' graphical parameters when the function exits. Defaults to
##' \code{FALSE}.
##' @param direction A character string. Options are "rightwards",
##' "leftwards", "upwards", and "downwards", which determine the
##' orientation of the tips when the phylogeny plotted.
##' @param \dots Further arguments passed to \code{par}.
##'
##' @details To calculate rates, each branch of the phylogeny is discretized
##' into a number of small segments, and the mean of the marginal
##' posterior density of the rate of speciation/extinction or trait
##' evolution is calculated for each such segment. Rates are mapped to
##' colors such that cool colors represent slow rates and warm colors
##' represent fast rates. When the tree is plotted each of these small
##' segments is plotted so that changes in rates through time and shifts
##' in rates are visible as gradients of color. The \code{spex} argument
##' determines the type of rate that will be calculated. \code{spex = "s"}
##' will plot speciation rates, \code{spex = "e"} will plot extinction
##' rates, and \code{spex = "netdiv"} will plot diversification rates
##' (speciation - extinction). Note that if \code{x$type = "trait"} the
##' \code{spex} argument is ignored and rates of phenotypic evolution are
##' plotted instead. If \code{legend = TRUE} the function will plot a
##' legend that contains the mapping of colors to numerical values.
##'
##' A number of color palettes come built in with \code{BAMMtools}.
##' Color-blind friendly options include:
##' \itemize{
##' \item BrBG
##' \item PiYG
##' \item PRGn
##' \item PuOr
##' \item RdBu
##' \item RdYlBu
##' \item BuOr
##' \item BuOrRd
##' \item DkRdBu
##' \item BuDkOr
##' \item GnPu
##' }
##' Some color-blind unfriendly options include:
##' \itemize{
##' \item RdYlGn
##' \item Spectral
##' \item temperature
##' \item terrain
##' }
##' Some grayscale options include:
##' \itemize{
##' \item grayscale
##' \item revgray
##' }
##' For more information about these color palettes visit
##' \url{https://colorbrewer2.org/} and
##' \url{https://pjbartlein.github.io/datagraphics/color_scales.html} or
##' use the help files of the R packages \code{RColorBrewer} and
##' \code{dichromat}.
##'
##' Additionally, any vector of valid named colors may also be used. The
##' only restriction is that the length of this vector be greater than or
##' equal to three (you can provide a single color, but in this case the
##' entire tree will be assigned the same color). The colors should be
##' ordered from cool to warm as the colors will be mapped from low rates
##' to high rates in the order supplied (e.g. \code{pal=c("darkgreen",
##' "yellow2", "red")}). The option \code{pal = "temperature"} uses the
##' \code{rich.colors} function written by Arni Magnusson for the R
##' package \code{gplots}.
##'
##' Internally \code{plot.bammdata} checks whether or not rates have been
##' calculated by looking for a component named "dtrates" in the
##' \code{bammdata} object. If rates have not been calculated
##' \code{plot.bammdata} calls \code{dtRates} with \code{tau}. Specifying
##' smaller values for \code{tau} will result in smoother-looking rate
##' changes on the tree. Note that smaller values of \code{tau} require
##' more computation. If the \code{colorbreaks} argument
##' is \code{NULL} a map of rates to colors is also made by calling
##' \code{assignColorBreaks} with \code{NCOLORS = 64}. A user supplied
##' \code{colorbreaks} argument can be passed as well. This allows one to
##' plot parts of a tree while preserving the map of rates to colors that
##' was made using rates for the entire tree.
##'
##' If color.interval is defined, then those min and max values override
##' the automatic detection of min and max. This might be useful if some
##' small number of lineages have very high or very low rates, such that
##' the map of colors is being skewed towards these extremes, resulting in
##' other rate variation being drowned out. If specified, the color ramp
##' will be built between these two color.interval values, and the rates
##' outside of the color interval range will be set to the highest and
##' lowest color. The total number of colors will also be increased such
##' that 64 color bins are found within the color.interval.
##'
##' If \code{plot.bammdata} is called repeatedly with the same
##' \code{bammdata} object, computation can be reduced by first calling
##' \code{dtRates} in the global environment.
##'
##' @return Returns (invisibly) a list with three components.
##' \itemize{
##' \item coords: A matrix of plot coordinates. Rows correspond to
##' branches. Columns 1-2 are starting (x,y) coordinates of each
##' branch and columns 3-4 are ending (x,y) coordinates of each
##' branch. If \code{method = "polar"} a fifth column gives the
##' angle(in radians) of each branch.
##' \item colorbreaks: A vector of percentiles used to group
##' macroevolutionary rates into color bins.
##' \item colordens: A matrix of the kernel density estimates (column
##' 2) of evolutionary rates (column 1) and the color (column 3)
##' corresponding to each rate value.
##' }
##'
##' @source \url{https://colorbrewer2.org/},
##' \url{https://pjbartlein.github.io/datagraphics/color_scales.html}
##'
##' @author Mike Grundler, Pascal Title
##'
##' @seealso \code{\link{dtRates}}, \code{\link{addBAMMshifts}},
##' \code{\link{assignColorBreaks}}, \code{\link{subtreeBAMM}},
##' \code{\link{colorRampPalette}}
##'
##' @examples
##' data(whales, events.whales)
##' ed <- getEventData(whales, events.whales, burnin=0.25, nsamples=500)
##'
##' # The first call to plot.bammdata
##' # No calculations or assignments of rates have been made
##' plot(ed, lwd = 3, spex = "s") # calls dtRates & assignColorBreaks
##'
##' # Compare the different color breaks methods
##' par(mfrow=c(1,3))
##' plot(ed, lwd = 3, spex = "s", breaksmethod = "linear")
##' title(main="linear")
##' plot(ed, lwd = 3, spex = "s", breaksmethod = "quantile")
##' title(main="quantile")
##' plot(ed, lwd = 3, spex = "s", breaksmethod = "jenks")
##' title(main="jenks")
##'
##' \dontrun{
##' # now plot.bammdata no longer calls dtRates
##' ed <- dtRates(ed, tau = 0.01)
##' xx <- plot(ed, lwd = 3, spex = "s")
##'
##' # you can plot subtrees while preserving the original
##' # rates to colors map by passing the colorbreaks object as an argument
##' sed <- subtreeBAMM(ed, node = 103)
##' plot(sed, lwd = 3, colorbreaks = xx$colorbreaks)
##' sed <- subtreeBAMM(ed, node = 140)
##' plot(sed, lwd = 3, colorbreaks = xx$colorbreaks)
##' # note how if we do not pass colorbreaks the map is
##' # no longer relative to the rest of the tree and the plot is quite
##' # distinct from the original
##' plot(sed, lwd = 3)
##'
##' # if you want to change the value of tau and the rates to colors map for
##' # the entire tree
##' ed <- dtRates(ed, tau = 0.002)
##' xx <- plot(ed, lwd = 3, spex = "s")
##' # now you can re-plot the subtrees using this finer tau partition
##' sed <- subtreeBAMM(ed, node = 103)
##' sed <- dtRates(sed, 0.002)
##' plot(sed, lwd = 3, colorbreaks = xx$colorbreaks)
##' sed <- subtreeBAMM(ed, node = 140)
##' sed <- dtRates(sed, 0.002)
##' plot(sed, lwd = 3, colorbreaks = xx$colorbreaks)
##'
##' # multi-panel plotting and adding shifts of specific posterior samples
##' par(mfrow=c(2,3))
##' samples <- sample(1:length(ed$eventData), 6)
##' ed <- dtRates(ed, 0.005)
##' # individual plots will have a color map relative to the mean
##' xx <- plot(ed, show=FALSE)
##' for (i in 1:6) {
##' ed <- dtRates(ed, 0.005, samples[i])
##' plot(ed, colorbreaks=xx$colorbreaks)
##' addBAMMshifts(ed,index=samples[i],method="phylogram", par.reset=FALSE)
##' }
##' dev.off()
##'
##' # color options
##' ed <- dtRates(ed,0.01)
##' plot(ed, pal="temperature",lwd=3)
##' plot(ed, pal="terrain",lwd=3)
##' plot(ed, pal=c("darkgreen","yellow2","red"),lwd=3)
##' plot(ed,method="polar",pal="Spectral", lwd=3)
##' plot(ed,method="polar",pal="RdYlBu", lwd=3)}
##' @keywords models graphics
##' @rdname plot
##' @aliases plot.bammdata
##' @export
##' @export plot.bammdata
plot.bammdata <- function (x, tau = 0.01, method = "phylogram", xlim = NULL, ylim = NULL, vtheta = 5, rbf = 0.001, show = TRUE, labels = FALSE, legend = FALSE, spex = "s", lwd = 1, cex = 1, pal = "RdYlBu", mask = integer(0), mask.color = gray(0.5), colorbreaks = NULL, logcolor = FALSE, breaksmethod = "linear", color.interval = NULL, JenksSubset = 20000, par.reset = FALSE, direction = "rightwards", ...) {
if (inherits(x, "bammdata")) {
if (attributes(x)$order != "cladewise") {
stop("Function requires tree in 'cladewise' order");
}
phy <- as.phylo.bammdata(x);
}
else stop("Object ephy must be of class bammdata");
if (!spex %in% c('s','e','netdiv')) {
stop("spex must be 's', 'e' or 'netdiv'.");
}
if (length(pal) == 1 && !pal %in% names(get("palettes", envir=.colorEnv)) && pal != "temperature" && pal != "terrain")
pal <- rep(pal, 3)
else if (length(pal) == 2)
pal <- c(pal, pal[2]);
if (breaksmethod == 'linear' & !is.null(color.interval)) {
if (length(color.interval) != 2) {
stop("color.interval must be a vector of 2 numeric values.");
}
}
if (!is.binary.phylo(phy)) {
stop("Function requires fully bifurcating tree");
}
if (any(phy$edge.length == 0)) {
warning("Tree contains zero length branches. Rates for these will be NA and coerced to zero");
}
if (!("dtrates" %in% names(x))) {
x <- dtRates(x, tau);
}
NCOLORS <- 64;
if (!is.null(color.interval)) {
# change the number of breaks such that the range of color.interval
# is equivalent in terms of number of colors to the full range
# this way we preserve good resolution
# Here we will ensure that NCOLORS bins occur within the color.interval
if (x$type == "trait") {
ratesRange <- range(x$dtrates$rates);
} else if (x$type == "diversification") {
if (tolower(spex) == "s") {
ratesRange <- range(x$dtrates$rates[[1]]);
} else if (tolower(spex) == "e") {
ratesRange <- range(x$dtrates$rates[[2]]);
} else if (tolower(spex) == "netdiv") {
ratesRange <- range(x$dtrates$rates[[1]] - x$dtrates$rates[[2]]);
}
}
if (all(!is.na(color.interval))) {
brks <- seq(min(color.interval[1], ratesRange[1]), max(color.interval[2], ratesRange[2]), length.out = (NCOLORS+1));
intervalLength <- length(which.min(abs(color.interval[1] - brks)) : which.min(abs(color.interval[2] - brks)));
} else if (is.na(color.interval[1])) {
brks <- seq(ratesRange[1], max(color.interval[2], ratesRange[2]), length.out = (NCOLORS+1));
intervalLength <- length(1 : which.min(abs(color.interval[2] - brks)));
} else if (is.na(color.interval[2])) {
brks <- seq(min(color.interval[1], ratesRange[1]), ratesRange[2], length.out = (NCOLORS+1));
intervalLength <- length(which.min(abs(color.interval[1] - brks)) : length(brks));
}
NCOLORS <- round((NCOLORS ^ 2) / intervalLength)
}
if (is.null(colorbreaks)) {
colorbreaks <- assignColorBreaks(x$dtrates$rates, NCOLORS, spex, logcolor, breaksmethod, JenksSubset);
}
if (x$type == "trait") {
colorobj <- colorMap(x$dtrates$rates, pal, colorbreaks, logcolor, color.interval);
}
else if (x$type == "diversification") {
if (tolower(spex) == "s") {
colorobj <- colorMap(x$dtrates$rates[[1]], pal, colorbreaks, logcolor, color.interval);
}
else if (tolower(spex) == "e") {
colorobj <- colorMap(x$dtrates$rates[[2]], pal, colorbreaks, logcolor, color.interval);
}
else if (tolower(spex) == "netdiv") {
colorobj <- colorMap(x$dtrates$rates[[1]] - x$dtrates$rates[[2]], pal, colorbreaks, logcolor, color.interval);
}
}
else {
stop("Unrecognized/corrupt bammdata class. Type does not equal 'trait' or 'diversification'");
}
edge.color <- colorobj$cols;
# if (is.ultrametric(phy))
# tH <- max(branching.times(phy))
# else
# tH <- max(NU.branching.times(phy));
tH <- max(x$end);
phy$begin <- x$begin;
phy$end <- x$end;
tau <- x$dtrates$tau;
if (method == "polar") {
ret <- setPolarTreeCoords(phy, vtheta, rbf);
rb <- tH * rbf;
p <- mkdtsegsPolar(ret$segs[-1,], tau, x$edge);
}
else if (method == "phylogram") {
ret <- setPhyloTreeCoords(phy);
p <- mkdtsegsPhylo(ret$segs[-1,], tau, x$edge);
}
else {
stop("Unimplemented method");
}
x0 <- c(ret$segs[1,1], p[, 1]);
x1 <- c(ret$segs[1,3], p[, 2]);
y0 <- c(ret$segs[1,2], p[, 3]);
y1 <- c(ret$segs[1,4], p[, 4]);
offset <- table(p[, 5])[as.character(unique(p[, 5]))];
if (length(mask)) {
edge.color[p[,5] %in% mask] <- mask.color;
}
arc.color <- c(edge.color[1], edge.color[match(unique(p[, 5]), p[, 5]) + offset]);
edge.color <- c(edge.color[1], edge.color);
if (show) {
op <- par(no.readonly = TRUE);
if (length(list(...))) {
par(...);
}
if (legend) {
#par(fig=c(0,0.9,0,1));
par(mar = c(5, 4, 4, 5))
}
plot.new();
ofs <- 0;
if (labels) {
if (method == "phylogram")
ofs <- max(nchar(phy$tip.label) * 0.03 * cex * tH)
else
ofs <- max(nchar(phy$tip.label) * 0.03 * cex);
}
if (method == "polar") {
if (is.null(xlim) || is.null(ylim)) {
if (is.null(xlim))
xlim = c(-1, 1) + c(-rb, rb) + c(-ofs, ofs)
if (is.null(ylim))
ylim = c(-1, 1) + c(-rb, rb) + c(-ofs, ofs)
}
plot.window(xlim = xlim, ylim = ylim, asp = 1);
segments(x0, y0, x1, y1, col = edge.color, lwd = lwd, lend = 2);
arc(0, 0, ret$arcs[, 1], ret$arcs[, 2], c(rb, rb + phy$end/tH), border = arc.color, lwd = lwd);
if (labels) {
for (k in 1:length(phy$tip.label)) {
text(ret$segs[-1, ][phy$edge[, 2] == k, 3],ret$segs[-1, ][phy$edge[, 2] == k, 4], phy$tip.label[k],cex = cex, srt = (180/pi) * ret$arcs[-1,][phy$edge[, 2] == k, 1], adj = c(0, NA));
}
}
}
if (method == "phylogram") {
direction <- match.arg(direction, c("rightwards","leftwards","downwards","upwards"));
if (direction == "rightwards") {
bars <- redirect(cbind(x0,y0,x1,y1),0);
arcs <- redirect(ret$arcs,0);
bars[,c(1,3)] <- tH * bars[,c(1,3)];
arcs[,c(1,3)] <- tH * arcs[,c(1,3)];
# xlim <- c(0, 1 + ofs);
# ylim <- c(0, phy$Nnode * 1/(phy$Nnode + 1));
ret$segs[-1, c(1,3)] <- tH * ret$segs[-1, c(1,3)];
}
else if (direction == "leftwards") {
bars <- redirect(cbind(x0,y0,x1,y1),pi);
bars[,c(2,4)] <- abs(bars[,c(2,4)]);
arcs <- redirect(ret$arcs,pi);
arcs[,c(2,4)] <- abs(arcs[,c(2,4)]);
bars[,c(1,3)] <- tH * bars[,c(1,3)];
arcs[,c(1,3)] <- tH * arcs[,c(1,3)];
ret$segs[-1, c(1,3)] <- -tH * ret$segs[-1, c(1,3)];
# xlim <- rev(-1*c(0, 1 + ofs));
# ylim <- c(0, phy$Nnode * 1/(phy$Nnode + 1));
}
else if (direction == "downwards") {
bars <- redirect(cbind(x0,y0,x1,y1),-pi/2);
arcs <- redirect(ret$arcs,-pi/2);
bars[,c(2,4)] <- tH * bars[,c(2,4)];
arcs[,c(2,4)] <- tH * arcs[,c(2,4)];
ret$segs <- redirect(ret$segs, -pi/2);
ret$segs[,c(2,4)] <- tH * ret$segs[,c(2,4)];
# xlim <- c(0, phy$Nnode * 1/(phy$Nnode + 1));
# ylim <- rev(-1*c(0, 1 + ofs));
}
else if (direction == "upwards") {
bars <- redirect(cbind(x0,y0,x1,y1),pi/2);
bars[,c(1,3)] <- abs(bars[,c(1,3)]);
arcs <- redirect(ret$arcs,pi/2);
arcs[,c(1,3)] <- abs(arcs[,c(1,3)]);
bars[,c(2,4)] <- tH * bars[,c(2,4)];
arcs[,c(2,4)] <- tH * arcs[,c(2,4)];
ret$segs <- redirect(ret$segs, pi/2);
ret$segs[,c(1,3)] <- abs(ret$segs[,c(1,3)]);
ret$segs[,c(2,4)] <- tH * ret$segs[,c(2,4)];
# xlim <- c(0, phy$Nnode * 1/(phy$Nnode + 1));
# ylim <- c(0, 1 + ofs);
}
if (is.null(xlim) && direction == "rightwards") xlim <- c(0, tH + ofs);
if (is.null(xlim) && direction == "leftwards") xlim <- c(-(tH + ofs), 0);
if (is.null(ylim) && (direction == "rightwards" || direction == "leftwards")) ylim <- c(0, phy$Nnode);
if (is.null(xlim) && (direction == "upwards" || direction == "downwards")) xlim <- c(0, phy$Nnode);
if (is.null(ylim) && direction == "upwards") ylim <- c(0, tH + ofs);
if (is.null(ylim) && direction == "downwards") ylim <- c(-(tH + ofs), 0);
plot.window(xlim = xlim, ylim = ylim);
segments(bars[-1,1], bars[-1,2], bars[-1,3], bars[-1,4], col = edge.color[-1], lwd = lwd, lend = 2);
isTip <- phy$edge[, 2] <= phy$Nnode + 1;
isTip <- c(FALSE, isTip);
segments(arcs[!isTip, 1], arcs[!isTip, 2], arcs[!isTip, 3], arcs[!isTip, 4], col = arc.color[!isTip], lwd = lwd, lend = 2);
if (labels) {
if (direction == "rightwards")
text(ret$segs[isTip, 3], ret$segs[isTip, 4], phy$tip.label[phy$edge[isTip[-1],2]], cex = cex, pos = 4, offset = 0.25)
else if (direction == "leftwards")
text(ret$segs[isTip, 3], ret$segs[isTip, 4], phy$tip.label[phy$edge[isTip[-1],2]], cex = cex, pos = 2, offset = 0.25)
else if (direction == "upwards")
text(ret$segs[isTip, 3], ret$segs[isTip, 4], phy$tip.label[phy$edge[isTip[-1],2]], cex = cex, pos = 4, srt = 90, offset = 0)
else if (direction == "downwards")
text(ret$segs[isTip, 3], ret$segs[isTip, 4], phy$tip.label[phy$edge[isTip[-1],2]], cex = cex, pos = 2, srt = 90, offset = 0);
}
}
# if (legend) {
# #rateLegend(colorobj$colsdensity, logcolor);
# if (is.null(color.interval)) {
# barLegend(pal, colorbreaks, fig=c(0.9,1,0.25,0.75), side=2);
# } else {
# barLegend(pal, colorbreaks, fig=c(0.9,1,0.25,0.75), side=2, colpalette=colorobj$colpalette);
# }
# }
}
index <- order(as.numeric(rownames(ret$segs)));
if (show) {
if (method == "phylogram") {
assign("last_plot.phylo", list(type = "phylogram", direction = direction, Ntip = phy$Nnode + 1, Nnode = phy$Nnode, edge = phy$edge, xx = ret$segs[index, 3], yy = ret$segs[index, 4], pp = par(no.readonly = TRUE)), envir = .PlotPhyloEnv);
} else if (method == "polar") {
assign("last_plot.phylo", list(type = "fan", Ntip = phy$Nnode + 1, Nnode = phy$Nnode, edge = phy$edge, xx = ret$segs[index, 3], yy = ret$segs[index, 4], theta = ret$segs[index, 5], rb = rb, pp = par(no.readonly = TRUE)), envir = .PlotPhyloEnv);
}
if (legend) {
addBAMMlegend(x = list(coords = ret$segs[-1, ], colorbreaks = colorobj$breaks, palette = colorobj$colpalette, colordens = colorobj$colsdensity), location = 'right')
}
}
if (par.reset) {
par(op);
}
invisible(list(coords = ret$segs[-1, ], colorbreaks = colorobj$breaks, palette = colorobj$colpalette, colordens = colorobj$colsdensity));
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/plot.bammdata.R |
## show.all.nodes: should plot core and "floater" nodes with different colors
# that can be specified by the user.
# argument to specify layout , e.g., c(2,2) would specify a 2x2 panel?
# maybe print the number of shift configurations above the threshold that were
# not plotted along with their frequency?
#
# add.freq.text = argument to add text to each plot indicating the frequency of the
# shift configuration
##' @title Plot distinct rate shift configurations on a phylogeny
##'
##' @description Plots a random distinct rate shift configuration sampled by
##' \code{BAMM} on a phylogeny.
##'
##' @param x An object of class \code{bammshifts}.
##' @param ephy An object of class \code{bammdata}.
##' @param method A character string for which plotting method to use.
##' "phylogram" uses rectangular coordinates. "polar" uses polar
##' coordinates.
##' @param pal The color palette to use in \code{plot.bammdata}.
##' @param rank The rank of the core shift configuration to plot. For the
##' default (\code{NULL}) a random configuration is chosen.
##' @param index The posterior sample to plot. For the default (\code{NULL})
##' a random sample is chosen.
##' @param spex A character string indicating what type of macroevolutionary
##' rates should be plotted. "s" (default) indicates speciation rates, "e"
##' indicates extinction rates, and 'netdiv' indicates net diversification
##' rates. Ignored if \code{ephy$type = "trait"}.
##' @param legend Logical indicating whether to plot a legend.
##' @param add.freq.text A logical indicating whether the frequency of each
##' sampled shift configuration should be added to each plot.
##' @param logcolor Logical. Should colors be plotted on a log scale.
##' @param breaksmethod Method used for determining color breaks. See help
##' file for \code{\link{assignColorBreaks}}.
##' @param color.interval Min and max value for the mapping of rates. One of
##' the two values can be \code{NA}. See details in
##' \code{\link{plot.bammdata}} for further details.
##' @param JenksSubset If \code{breaksmethod = "jenks"}, the number of
##' regularly-spaced samples to subset from the full rates vector. Only
##' relevant for large datasets. See help file for
##' \code{\link{assignColorBreaks}}.
##' @param \dots Other arguments to \code{plot.bammdata}.
##'
##' @details A rate shift configuration is the set of nodes of the phylogeny
##' where a shift occurs in the macroevolutionary rate dynamic of
##' diversification or trait evolution. Each posterior sample is a
##' potentially distinct rate shift configuration. Different
##' configurations may imply different macroevolutionary scenarios. This
##' function helps visualize the different distinct rate shift
##' configurations sampled by \code{BAMM}.
##'
##' A core shift configuration is defined by a set of nodes that have
##' shift probabilities that are substantially elevated relative to what
##' you expect under the prior alone. These core configurations are
##' inferred in \code{\link{distinctShiftConfigurations}}. It is almost
##' certain that more than one core shift configuration will be sampled by
##' \code{BAMM}. Moreover, each core shift configuration may contain many
##' subconfigurations. A subconfiguration contains the core shift node
##' configuration and zero or more additional shift nodes that occur with
##' low marginal probability.
##'
##' Points are added to the branches subtending the nodes of each rate
##' configuration. The size of the point is proportional to the marginal
##' probability that a shift occurs on a specific branch. If the
##' instantaneous rate at a shift's origin represents an initial increase
##' above the ancestral instantaneous rate the point is red. If the
##' instantaneous rate at a shift's origin represents an initial decrease
##' below the ancestral instantaneous rate the point is blue.
##'
##' @author Mike Grundler, Dan Rabosky
##'
##' @seealso \code{\link{distinctShiftConfigurations}},
##' \code{\link{plot.bammdata}}
##'
##' @references \url{http://bamm-project.org/}
##'
##' @examples
##' data(whales, events.whales)
##'
##' ed <- getEventData(whales, events.whales, burnin=0.25, nsamples=500)
##'
##' sc <- distinctShiftConfigurations(ed, expectedNumberOfShifts = 1,
##' threshold = 5)
##'
##' plot(sc, ed)
##' @aliases plot.bammshifts
##' @export
##' @export plot.bammshifts
plot.bammshifts <- function(x, ephy, method="phylogram", pal="RdYlBu",
rank=NULL, index=NULL, spex="s", legend=TRUE, add.freq.text=TRUE, logcolor=FALSE, breaksmethod="linear", color.interval=NULL, JenksSubset=20000, ...)
{
if (!inherits(x, "bammshifts")) {
stop("arg sc must be of class 'bammshifts'");
}
if (!inherits(ephy, "bammdata")) {
stop("arg ephy must be of class 'bammdata'");
}
if (!spex %in% c('s', 'e', 'netdiv')) {
stop("arg spex must be 's', 'e' or 'netdiv'.")
}
if ((spex == "e" || spex == "netdiv") && ephy$type == "trait") {
warning("arg spex not meaningful for BAMMtrait");
spex <- "s";
}
if (is.null(rank) && is.null(index)) {
rank <- sample.int(length(x$shifts),1);
index <- x$samplesets[[rank]][sample.int(length(x$samplesets[[rank]]),1)];
}
else if (!is.null(rank) && is.null(index)) {
index <- x$samplesets[[rank]][sample.int(length(x$samplesets[[rank]]),1)];
}
else if (is.null(rank) && !is.null(index)) {
rank <- {
for (i in 1:length(x$sampleset)) {
if (index %in% x$sampleset[[i]]) {
break;
}
}
i;
}
}
else {
if (index > length(x$samplesets[[rank]])) {
warning("arg index is not relative to the set of posterior samples of the given core shift configuration");
index <- x$samplesets[[rank]][1];
}
else {
index <- x$samplesets[[rank]][index];
}
}
par.reset <- TRUE;
if (legend) {
par.reset <- FALSE;
m <- matrix(c(1,0,1,2,1,0),byrow=TRUE,nrow=3,ncol=2);
layout(m, widths=c(1,0.25));
par(mar=c(7.1,1,7.1,1));
}
ephy <- dtRates(ephy,0.01);
colorbreaks <- assignColorBreaks(ephy$dtrates$rates, spex=spex, logcolor=logcolor, method=breaksmethod, JenksSubset=JenksSubset);
sed <- subsetEventData(ephy, index);
plot.bammdata(sed, method=method, pal=pal, spex=spex, colorbreaks=colorbreaks, par.reset=par.reset, ...);
shiftnodes <- getShiftNodesFromIndex(ephy, index);
shiftnode_parents <- ephy$edge[match(shiftnodes,ephy$edge[,2],nomatch=0), 1];
root <- (shiftnode_parents == (ephy$Nnode + 2));
if (sum(root) > 0) {
isShiftNodeParent <- integer(length(shiftnodes));
isShiftNodeParent[root] <- 1;
isShiftNodeParent[!root] <- sed$eventVectors[[1]][match(shiftnode_parents[!root], ephy$edge[,2])];
}
else {
isShiftNodeParent <- sed$eventVectors[[1]][match(shiftnode_parents, ephy$edge[,2])];
}
isShiftNode <- match(shiftnodes, sed$eventData[[1]]$node);
time <- sed$eventData[[1]][isShiftNode, 2] - sed$eventData[[1]][isShiftNodeParent, 2];
if (spex == "s") {
lam1 <- sed$eventData[[1]][isShiftNodeParent, 3];
lam2 <- sed$eventData[[1]][isShiftNodeParent, 4];
AcDc <- exponentialRate(time, lam1, lam2) > sed$eventData[[1]][isShiftNode, 3];
}
else if (spex == "e") {
mu1 <- sed$eventData[[1]][isShiftNodeParent, 5];
mu2 <- sed$eventData[[1]][isShiftNodeParent, 6];
AcDc <- exponentialRate(time, mu1, mu2) > sed$eventData[[1]][isShiftNode, 5];
}
else if (spex == 'netdiv') {
lam1 <- sed$eventData[[1]][isShiftNodeParent, 3];
lam2 <- sed$eventData[[1]][isShiftNodeParent, 4];
mu1 <- sed$eventData[[1]][isShiftNodeParent, 5];
mu2 <- sed$eventData[[1]][isShiftNodeParent, 6];
AcDc <- (exponentialRate(time, lam1, lam2)-exponentialRate(time, mu1, mu2)) > (sed$eventData[[1]][isShiftNode, 3]-sed$eventData[[1]][isShiftNode, 5]);
}
bg <- rep("blue", length(AcDc));
bg[which(AcDc == FALSE)] <- "red";
cex <- 0.75 + 5 * x$marg.probs[as.character(getShiftNodesFromIndex(ephy, index))];
addBAMMshifts(sed, 1, method, cex=cex, bg=transparentColor(bg, 0.5),par.reset=par.reset);
if (add.freq.text) {
mtext(sprintf("core shift configuration: rank %i of %i", rank, length(x$shifts)),3,line=0);
mtext(sprintf("sampled with frequency f = %.2g",x$frequency[rank]),3,line=-1.25);
mtext(sprintf("showing subconfiguration %i of %i with this rank", match(index,x$samplesets[[rank]]), length(x$samplesets[[rank]])),1,line=-1.25);
}
if (legend) {
par(mar=c(5.1,1.1,2.1,2.1));
plot.new();
plot.window(xlim=c(0,1),ylim=c(0,1.25));
points(rep(0.5,8),rev(seq(0,1.125,length.out=8)),pch=21,bg="white",cex = rev(0.75 + 5*seq(0.125,1,0.125)));
mtext("marginal shift probability",cex=0.75,line=0);
text(x=rep(0.8,8),y=rev(seq(0,1.125,length.out=8)),labels=sprintf("%10.3f",rev(seq(0.125,1,0.125))));
}
}
# plot.bammshifts <- function(sc, ephy, plotmax=9, method='phylogram', pal = 'RdYlBu', spex = "s", add.arrows = TRUE, add.freq.text = TRUE, use.plot.bammdata = TRUE, send2pdf = FALSE, ...){
# if (class(sc) != 'bammshifts') {
# stop('arg sc must be of class "bammshifts"');
# }
# if (class(ephy) != 'bammdata') {
# stop('arg ephy must be of class "bammdata"');
# }
# if (plotmax > 9 && send2pdf == FALSE) {
# plotmax = 9;
# cat("arg plotmax coerced to 9\n");
# }
# mm <- min(c(length(sc$frequency), plotmax));
# if (send2pdf) {
# pdf("shiftconfig.pdf");
# }
# if (mm == 1) {
# par(mfrow=c(1,1));
# } else if (mm <= 2) {
# par(mfrow=c(1,2));
# } else if (mm <= 4) {
# par(mfrow = c(2,2));
# } else if (mm <= 6) {
# par(mfrow=c(2,3));
# } else {
# par(mfrow=c(3,3));
# }
# cat("Omitted", max(length(sc$frequency),mm) - min(length(sc$frequency),mm), "plots\n");
# if (use.plot.bammdata) {
# ephy = dtRates(ephy, 0.01);
# colorbreaks = assignColorBreaks(ephy$dtrates$rates,spex=spex);
# }
# tH = max(branching.times(as.phylo.bammdata(ephy)));
# for (i in 1:mm) {
# tmp <- subsetEventData(ephy, index=sc$sampleset[[i]]);
# par(mar = c(2,2,2,2));
# if (use.plot.bammdata) {
# plot.bammdata(tmp, method, pal=pal, colorbreaks=colorbreaks, ...);
# }
# else {
# if (method=="polar") method = "fan";
# plot.phylo(as.phylo.bammdata(ephy),type=method,show.tip.label=FALSE);
# }
# if (add.freq.text) {
# mtext(paste("f =",signif(sc$frequency[i],2)),3);
# mtext(paste(length(sc$sampleset[[i]]),"subconfigurations"),1,1.5);
# }
# box();
# cex = 2 + 8 * sc$marg.probs[as.character(getShiftNodesFromIndex(ephy,sc$sampleset[[i]][1]))];
# shiftnodes = getShiftNodesFromIndex(ephy,sc$sampleset[[i]][1]);
# shiftnode_parents = ephy$edge[which(ephy$edge[,2] %in% shiftnodes),1];
# isShiftNodeParent = integer(length(shiftnodes));
# root = (shiftnode_parents == ephy$Nnode+2);
# if (sum(root) > 0) {
# isShiftNodeParent[root] = 1;
# }
# if (sum(!root) > 0) {
# isShiftNodeParent[!root] = tmp$eventVectors[[1]][which(ephy$edge[,2] %in% shiftnode_parents[!root])];
# }
# isShiftNode = which(tmp$eventData[[1]]$node %in% shiftnodes);
# AcDc = exponentialRate(tmp$eventData[[1]][isShiftNode, 2]-tmp$eventData[[1]][isShiftNodeParent, 2],tmp$eventData[[1]][isShiftNodeParent,3],tmp$eventData[[1]][isShiftNodeParent,4]) > tmp$eventData[[1]][isShiftNode, 3];
# bg = rep("blue", length(AcDc));
# bg[which(AcDc == FALSE)] = "red";
# addBAMMshifts(tmp, method, 1, cex = cex, bg = transparentColor(bg,0.5), multi=TRUE);
# if (add.arrows) {
# lastPP <- get("last_plot.phylo", envir = .PlotPhyloEnv);
# XX = lastPP$xx[sc$shifts[[i]]];
# YY = lastPP$yy[sc$shifts[[i]]];
# arrows(XX + 0.15*cos(3*pi/4),YY + 0.15*sin(3*pi/4), XX, YY, length=0.1,lwd=2);
# #points(XX, YY, pch = 21, cex = cex, col = 1, bg = transparentColor(bg,0.5));
# }
# }
# if (send2pdf) dev.off();
# }
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/plot.bammshifts.R |
##' @title Plot credible set of rate shift configurations from \code{BAMM}
##' analysis
##'
##' @description Plots the credible set of rate shift configurations from a
##' \code{BAMM} analysis on a phylogeny.
##'
##' @param x An object of class \code{credibleshiftset}.
##' @param plotmax An integer number of plots to display.
##' @param method A coordinate method to use for plotting. Options are
##' "phylogram" or "polar".
##' @param pal A color palette to use with \code{plot.bammdata}.
##' @param shiftColor Color to use for shift points.
##' @param spex A character string indicating what type of macroevolutionary
##' rates should be plotted. "s" (default) indicates speciation rates, "e"
##' indicates extinction rates, and "netdiv" indicates net diversification
##' rates. Ignored if ephy$type = "trait".
##' @param add.freq.text A logical indicating whether to add the posterior
##' frequency of each shift configuration to the plotting region.
##' @param use.plot.bammdata A logical indicating whether to use
##' \code{plot.bammdata} (\code{TRUE}) or \code{plot.phylo}
##' (\code{FALSE}).
##' @param border A logical indicating whether to frame the plotting region.
##' @param legend A logical indicating whether to plot a legend.
##' @param send2pdf A logical indicating whether to print the figure to a PDF
##' file.
##' @param logcolor A logical indicating whether the rates should be
##' log-transformed.
##' @param breaksmethod Method used for determining color breaks. See help
##' file for \code{\link{assignColorBreaks}}.
##' @param color.interval Min and max value for the mapping of rates. One of
##' the two values can be \code{NA}. See details in
##' \code{\link{plot.bammdata}} for further details.
##' @param JenksSubset If \code{breaksmethod = "jenks"}, the number of
##' regularly spaced samples to subset from the full rates vector. Only
##' relevant for large datasets. See help file for
##' \code{\link{assignColorBreaks}}.
##' @param \dots Further arguments to pass to \code{plot.bammdata}.
##'
##' @details This produces phylorate plots for the \code{plotmax}
##' most-probable shift configurations sampled with \code{BAMM}. Shift
##' configurations are plotted in a single graphics window. The posterior
##' probability (frequency) of each rate shift configuration in the
##' posterior is shown (omitted with argument \code{add.freq.text = FALSE}).
##'
##' Points are added to the branches subtending the nodes of each rate
##' configuration. The size of the point is proportional to the marginal
##' probability that a shift occurs on a specific branch.
##'
##' @author Mike Grundler
##'
##' @seealso \code{\link{credibleShiftSet}},
##' \code{\link{distinctShiftConfigurations}},
##' \code{\link{plot.bammdata}}, \code{\link{plot.bammshifts}}
##'
##' @references \url{http://bamm-project.org/}
##'
##' @examples
##' data(events.whales)
##' data(whales)
##' ed <- getEventData(whales, events.whales, nsamples=500)
##' cset <- credibleShiftSet(ed, expectedNumberOfShifts = 1, threshold = 5)
##' plot(cset)
##' @aliases plot.credibleshiftset
##' @export
##' @export plot.credibleshiftset
plot.credibleshiftset <- function(x, plotmax=9, method='phylogram', pal = 'RdYlBu', shiftColor = 'black', spex = "s", add.freq.text = TRUE, use.plot.bammdata = TRUE, border = TRUE, legend = FALSE, send2pdf = FALSE, logcolor=FALSE, breaksmethod='linear', color.interval=NULL, JenksSubset=20000, ...)
{
if (!inherits(x, "credibleshiftset")) {
stop('arg x must be of class "credibleshiftset"');
}
if (!spex %in% c('s', 'e', 'netdiv')) {
stop("arg spex must be 's', 'e' or 'netdiv'. ")
}
if ((spex == "e" || spex == "se") && x$type == "trait") {
warning("arg spex not meaningful for BAMMtrait");
spex <- "s";
}
cset.bamm <- as.bammdata(x);
if (plotmax > 9 && send2pdf == FALSE) {
plotmax <- 9;
cat("arg plotmax coerced to 9\n");
}
mm <- min(x$number.distinct, plotmax);
if (send2pdf) {
pdf("credibleshiftset.pdf");
}
if (mm == 1) {
if (legend) {
m <- matrix(c(1,2),byrow=TRUE,nrow=1,ncol=2);
layout(m,respect=TRUE);
} else {
par(mfrow=c(1,1));
}
} else if (mm <= 2) {
if (legend) {
m <- matrix(c(1,2,3),byrow=TRUE,nrow=1,ncol=3);
layout(m,respect=TRUE);
} else {
par(mfrow=c(1,2));
}
} else if (mm <= 4) {
if (legend) {
m <- matrix(c(1,2,0,3,4,5), byrow=TRUE, nrow=2,ncol=3);
layout(m,respect=TRUE);
} else {
par(mfrow = c(2,2));
}
} else if (mm <= 6) {
if (legend) {
m <- matrix(c(1,2,0,3,4,7,5,6,0),byrow=TRUE,nrow=3,ncol=3);
layout(m,respect=TRUE);
} else {
par(mfrow=c(2,3));
}
} else {
if (legend) {
m <- matrix(c(1,2,3,0,4,5,6,10,7,8,9,0),byrow=TRUE,nrow=3,ncol=4);
layout(m,respect=TRUE);
} else {
par(mfrow=c(3,3));
}
}
cat("Omitted", max(x$number.distinct,mm) - min(x$number.distinct,mm), "plots\n");
if (use.plot.bammdata) {
cset.bamm <- dtRates(cset.bamm, 0.01);
colorbreaks <- assignColorBreaks(cset.bamm$dtrates$rates,spex=spex, logcolor=logcolor, method=breaksmethod, JenksSubset=JenksSubset);
}
for (i in 1:mm) {
sed <- subsetEventData(cset.bamm, index=x$indices[[i]]);
par(mar = c(2,2,2,2));
if (use.plot.bammdata) {
plot.bammdata(sed, method=method, pal=pal, spex=spex, colorbreaks=colorbreaks, par.reset=FALSE, logcolor=logcolor, ...);
}
else {
if (method=="polar") method = "fan";
plot.phylo(as.phylo.bammdata(cset.bamm),type=method,show.tip.label=FALSE);
}
if (add.freq.text) mtext(sprintf("f = %.2g",x$frequency[i]),3);
if (border) box();
#shiftnodes <- getShiftNodesFromIndex(cset.bamm, i);
shiftnodes <- x$shiftnodes[[i]];
# shiftnode_parents <- cset.bamm$edge[match(shiftnodes, cset.bamm$edge[,2],nomatch=0), 1];
# root <- (shiftnode_parents == (cset.bamm$Nnode + 2));
# if (sum(root) > 0) {
# isShiftNodeParent <- integer(length(shiftnodes));
# isShiftNodeParent[root] <- 1;
# isShiftNodeParent[!root] <- sed$eventVectors[[1]][match(shiftnode_parents[!root], cset.bamm$edge[,2])];
# }
# else {
# isShiftNodeParent <- sed$eventVectors[[1]][match(shiftnode_parents, cset.bamm$edge[,2])];
# }
# isShiftNode <- match(shiftnodes, sed$eventData[[1]]$node);
# time <- sed$eventData[[1]][isShiftNode, 2] - sed$eventData[[1]][isShiftNodeParent, 2];
# if (spex == "s") {
# lam1 <- sed$eventData[[1]][isShiftNodeParent, 3];
# lam2 <- sed$eventData[[1]][isShiftNodeParent, 4];
# AcDc <- exponentialRate(time, lam1, lam2) > sed$eventData[[1]][isShiftNode, 3];
# }
# else if (spex == "e") {
# mu1 <- sed$eventData[[1]][isShiftNodeParent, 5];
# mu2 <- sed$eventData[[1]][isShiftNodeParent, 6];
# AcDc <- exponentialRate(time, mu1, mu2) > sed$eventData[[1]][isShiftNode, 5];
# }
# else {
# lam1 <- sed$eventData[[1]][isShiftNodeParent, 3];
# lam2 <- sed$eventData[[1]][isShiftNodeParent, 4];
# mu1 <- sed$eventData[[1]][isShiftNodeParent, 5];
# mu2 <- sed$eventData[[1]][isShiftNodeParent, 6];
# AcDc <- (exponentialRate(time, lam1, lam2)-exponentialRate(time, mu1, mu2)) > (sed$eventData[[1]][isShiftNode, 3]-sed$eventData[[1]][isShiftNode, 5]);
# }
# bg <- rep("blue", length(AcDc));
# bg[which(AcDc == FALSE)] <- "red";
bg <- rep(shiftColor, length(shiftnodes));
cex <- 0.75 + 5 * x$marg.probs[as.character(shiftnodes)];
if (use.plot.bammdata) {
cex <- cex[match(sed$eventData[[1]]$node, shiftnodes, nomatch=0)];
bg <- bg[match(sed$eventData[[1]]$node, shiftnodes, nomatch=0)];
shiftnodes <- shiftnodes[match(sed$eventData[[1]]$node, shiftnodes, nomatch=0)];
addBAMMshifts(sed, 1, method, cex=cex, bg=transparentColor(bg, 0.5), shiftnodes = shiftnodes, par.reset=FALSE);
}
else {
r <- cset.bamm$edge.length[match(shiftnodes,cset.bamm$edge[,2])];
lastPP <- get("last_plot.phylo",envir=.PlotPhyloEnv);
XX <- lastPP$xx[shiftnodes];
YY <- lastPP$yy[shiftnodes];
if (method == "phylogram") {
XX <- XX - 0.5*r;
}
else {
theta <- atan2(YY, XX);
XX <- XX - 0.5*r*cos(theta);
YY <- YY - 0.5*r*sin(theta);
}
points(XX, YY, pch = 21, cex = cex, col = 1, bg = transparentColor(bg,0.5));
}
}
if (legend) {
par(mar=c(2.1,1.1,2.1,2.1));
plot.new();
plot.window(xlim=c(0,1),ylim=c(0,1.25));
points(rep(0.5,8),rev(seq(0,1.125,length.out=8)),pch=21,bg="white",cex = rev(0.75 + 5*seq(0.125,1,0.125)));
mtext("marginal shift probability",cex=0.75,line=0);
text(x=rep(0.8,8),y=rev(seq(0,1.125,length.out=8)),labels=sprintf("%10.3f",rev(seq(0.125,1,0.125))));
}
if (send2pdf) dev.off();
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/plot.credibleshiftset.R |
##' @title Plot the prior and posterior distribution of shifts
##'
##' @description Generates a barplot of the prior and posterior distributions
##' of the number of shifts.
##'
##' @param mcmc A dataframe of the mcmc_out file from a \code{BAMM} run, or
##' the filename.
##' @param expectedNumberOfShifts Expected number of shifts under the prior.
##' @param burnin The fraction of samples to discard as burn-in.
##' @param priorCol Color for the prior distribution.
##' @param postCol Color for the posterior distribution.
##' @param legendPos Placement of the legend, see \code{\link{legend}}.
##' @param \dots Additional parameters that are passed to
##' \code{\link{barplot}}.
##'
##' @return Invisibly returns a matrix with the probability of each shift
##' number under the prior and the posterior.
##'
##' @author Pascal Title
##'
##' @examples
##' data(mcmc.whales)
##' plotPrior(mcmc.whales, expectedNumberOfShifts = 1, burnin = 0.15)
##' @export
plotPrior <- function(mcmc, expectedNumberOfShifts = 1, burnin = 0.15, priorCol = 'light blue', postCol = 'red', legendPos = 'topright', ...) {
if (!any(inherits(mcmc, c('character', 'data.frame', 'matrix')))) {
stop('mcmc must be either a dataframe or the path to the mcmc_out file.')
}
if (is.character(mcmc)) {
mcmc <- read.csv(mcmc, stringsAsFactors = FALSE)
}
#drop burnin
mcmc2 <- mcmc[floor(burnin * nrow(mcmc)):nrow(mcmc),]
#get prior distribution of shifts
obsK <- seq(from = 0, to = max(mcmc2[,"N_shifts"]), by = 1)
prior <- sapply(obsK, prob.k, poissonRatePrior = 1/expectedNumberOfShifts)
prior <- data.frame(N_shifts = obsK, prob = prior)
#get posterior distribution of shifts
posterior <- sapply(obsK, function(x) length(which(mcmc2[,'N_shifts'] == x))) / nrow(mcmc2)
names(posterior) <- obsK
posterior <- data.frame(N_shifts = names(posterior), prob = posterior)
barplot(prior[,2], names.arg = prior[,1], ylim = c(0, max(c(prior[,2], posterior[,2]))), border = 'black', col = priorCol, xlab = 'n shifts', ...)
barplot(posterior[,2], add = TRUE, border = 'black', col = BAMMtools::transparentColor(postCol, 0.4), axes=FALSE)
legend(x = legendPos, y = NULL, legend = c('prior','posterior'), fill = c(priorCol, BAMMtools::transparentColor(postCol, 0.4)), bty = 'n', cex=1.5)
invisible(cbind(N_shifts = prior$N_shifts, priorProbs = prior$prob, postProbs = posterior$prob))
}
prob.k <- function(k, poissonRatePrior) {
Denom <- (poissonRatePrior + 1) ^ (k + 1)
Prob <- poissonRatePrior / Denom
return(Prob)
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/plotPrior.R |
#############################################################
#
# plotRateThroughTime <- function(...)
#
# ephy = object of class 'bammdata' or 'bamm-ratematrix'
# if bamm-ratematrix, start.time, end.time, node, nslices, nodetype are not used.
# useMedian = boolean, will plot median if TRUE, mean if FALSE.
# intervals if NULL, no intervals will be plotted, otherwise a vector of quantiles must be supplied (these will define shaded polygons)
# ratetype = autodetects diversification vs traits (based on input object 'type'), if 'auto', defaults to speciation (for diversification) or beta (for traits). Can alternatively specify 'netdiv' or 'extinction'.
# nBins = number of time slices used to generate rates through time
# smooth = boolean whether or not to apply loess smoothing
# smoothParam = loess smoothing parameter, ignored if smooth = F
# opacity = opacity of color for interval polygons
# intervalCol = transparent color for interval polygons
# avgCol = color for mean/median line
# start.time = start time in time before present to be fed to getRateThroughTimeMatrix
# end.time = end time in time before present to be fed to getRateThroughTimeMatrix
# node = if supplied, the clade descended from this node will be used.
# nodetype = supplied to getRateThroughTimeMatrix
# plot = boolean: if TRUE, a plot will be returned, if FALSE, the data for the plot will be returned.
# xticks = number of ticks on the x-axis, automatically inferred if NULL.
# yticks = number of ticks on the y-axis, automatically inferred if NULL.
# xlim = vector of length 2 with min and max times for x axis. X axis is time since present, so if plotting till the present, xlim[2]==0. Can also be 'auto'.
# ylim = vector of length 2 with min and max rates for y axis. Can also be 'auto'.
# add = boolean: should rates be added to an existing plot
#
# + several undocumented args to set plot parameters: mar, cex, xline, yline, etc.
#
##' @title Plot rates through time
##'
##' @description Generates a plot of diversification or phenotypic rate through
##' time with confidence intervals.
##'
##' @param ephy Object of class \code{bammdata} or \code{bamm-ratematrix}.
##' @param useMedian A logical: will plot median if \code{TRUE}, mean if
##' \code{FALSE}.
##' @param intervals If \code{NULL}, no intervals will be plotted, otherwise a
##' vector of quantiles must be supplied (these will define shaded
##' polygons).
##' @param ratetype If 'auto', defaults to speciation (for diversification) or
##' beta (for traits). Can alternatively specify 'netdiv' or 'extinction'.
##' @param nBins Number of time slices used to generate rates through time.
##' @param smooth A logical: whether or not to apply loess smoothing.
##' @param smoothParam Loess smoothing parameter, ignored if
##' \code{smooth = FALSE}.
##' @param opacity Opacity of color for interval polygons.
##' @param intervalCol Color for interval polygons.
##' @param avgCol Color for mean/median line (line will not be plotted if
##' \code{avgCol = NULL}).
##' @param start.time Start time (in units before present). If \code{NULL},
##' starts at root.
##' @param end.time End time (in units before present). If \code{NULL}, ends
##' at present.
##' @param node If supplied, the clade descended from this node will be used
##' or ignored, depending on \code{nodetype}.
##' @param nodetype If 'include', rates will be plotted only for the clade
##' descended from \code{node}. If 'exclude', the clade descended from
##' \code{node} will be left out of the calculation of rates.
##' @param plot A logical: if \code{TRUE}, a plot will be returned, if
##' \code{FALSE}, the data for the plot will be returned.
##' @param cex.axis Size of axis tick labels.
##' @param cex.lab Size of axis labels.
##' @param lwd Line width of the average rate.
##' @param xline Margin line for placing x-axis label.
##' @param yline Margin line for placing y-axis label.
##' @param mar Passed to \code{par()} to set plot margins.
##' @param xticks Number of ticks on the x-axis, automatically inferred if
##' \code{NULL}.
##' @param yticks Number of ticks on the y-axis, automatically inferred if
##' \code{NULL}.
##' @param xlim Vector of length 2 with min and max times for x axis. X axis
##' is time since present, so if plotting till the present,
##' \code{xlim[2] == 0}. Can also be 'auto'.
##' @param ylim Vector of length 2 with min and max rates for y axis. Can also
##' be 'auto'.
##' @param add A logical: should rates be added to an existing plot.
##' @param axis.labels A logical: if \code{TRUE}, axis labels will be plotted.
##'
##' @details If the input \code{ephy} object has been generated by
##' \code{\link{getEventData}} and is of class \code{bammdata}, then
##' \code{start.time}, \code{end.time}, \code{node}, and \code{nodetype}
##' can be specified. If the input \code{ephy} object has been generated
##' by \code{\link{getRateThroughTimeMatrix}} and is of class
##' \code{bamm-ratematrix}, then those arguments cannot be specified
##' because they are needed to generate the rate matrix, which in this
##' case has already happened.
##'
##' The user has complete control of the plotting of the confidence
##' intervals. Confidence intervals will not be plotted at all if
##' \code{intervals=NULL}. If a single confidence interval polygon is
##' desired, rather than overlapping polygons, then \code{intervals} can
##' specify the confidence interval bounds, and \code{opacity} should be
##' set to 1 (see examples).
##'
##' If working with a large dataset, we recommend first creating a
##' \code{bamm-ratematrix} object with
##' \code{\link{getRateThroughTimeMatrix}} and then using that object as
##' input for \code{plotRateThroughTime}. This way, the computation of
##' rates has already happened and will not slow the plotting function
##' down, making it easier to adjust plotting parameters.
##'
##' @return If \code{plot = FALSE}, then a list is returned with the following
##' components:
##' \itemize{
##' \item poly: A list of matrices, where each matrix contains the
##' coordinates that define each overlapping confidence interval
##' polygon.
##' \item avg: A vector of y-coordinates for mean or median rates
##' used to plot the average rates line.
##' \item times: A vector of time values, used as x-coordinates in
##' this plot function.
##' }
##'
##' @author Pascal Title
##'
##' @seealso See \code{\link{getEventData}} and
##' \code{\link{getRateThroughTimeMatrix}} to generate input data.
##'
##' @examples
##' \dontrun{
##' data(events.whales)
##' data(whales)
##' ephy <- getEventData(whales,events.whales)
##'
##' # Simple plot of rates through time with default settings
##' plotRateThroughTime(ephy)
##'
##' # Plot two processes separately with 90% CI and loess smoothing
##' plotRateThroughTime(ephy, intervals = seq(from = 0, 0.9, by = 0.01), smooth = TRUE,
##' node = 141, nodetype = 'exclude', ylim = c(0, 1.2))
##'
##' plotRateThroughTime(ephy, intervals = seq(from = 0, 0.9, by = 0.01), smooth = TRUE,
##' node = 141, nodetype = 'include', add = TRUE,
##' intervalCol = 'orange')
##'
##' legend('topleft', legend = c('Dolphins','Whales'), col = 'red',
##' fill = c('orange', 'blue'), border = FALSE, lty = 1, lwd = 2, merge = TRUE,
##' seg.len=0.6)
##'
##' # Same plot, but from bamm-ratematrix objects
##' rmat1 <- getRateThroughTimeMatrix(ephy, node = 141, nodetype = 'exclude')
##' rmat2 <- getRateThroughTimeMatrix(ephy, node = 141, nodetype = 'include')
##'
##' plotRateThroughTime(rmat1, intervals=seq(from = 0, 0.9, by = 0.01),
##' smooth = TRUE, ylim = c(0, 1.2))
##'
##' plotRateThroughTime(rmat2, intervals = seq(from = 0, 0.9, by = 0.01),
##' smooth = TRUE, add = TRUE, intervalCol = 'orange')
##'
##' # To plot the mean rate without the confidence envelope
##' plotRateThroughTime(ephy, useMedian = FALSE, intervals = NULL)
##'
##' # To plot the mean rate, with a single 95% confidence envelope, grayscale
##' plotRateThroughTime(ephy, useMedian = FALSE, intervals = c(0.05, 0.95),
##' intervalCol = 'gray70', avgCol = 'black', opacity = 1)
##'
##' # To not plot, but instead return the plotting data generated in this
##' # function, we can make plot = FALSE
##' plotRateThroughTime(ephy, plot = FALSE)}
##' @export
plotRateThroughTime <- function(ephy, useMedian = TRUE, intervals = seq(from = 0, to = 1, by = 0.01), ratetype = 'auto', nBins = 100, smooth = FALSE, smoothParam = 0.20, opacity = 0.01, intervalCol = 'blue', avgCol = 'red', start.time = NULL, end.time = NULL, node = NULL, nodetype = 'include', plot = TRUE, cex.axis = 1, cex.lab = 1.3, lwd = 3, xline = 3.5, yline = 3.5, mar = c(6, 6, 1, 1), xticks = NULL, yticks = NULL, xlim = 'auto', ylim = 'auto',add = FALSE, axis.labels = TRUE) {
if (!any(inherits(ephy, c('bammdata', 'bamm-ratematrix')))) {
stop("ERROR: Object ephy must be of class 'bammdata' or 'bamm-ratematrix'.\n");
}
if (!is.logical(useMedian)) {
stop('ERROR: useMedian must be either TRUE or FALSE.');
}
if (!any(inherits(intervals, c('numeric', 'NULL')))) {
stop("ERROR: intervals must be either 'NULL' or a vector of quantiles.");
}
if (!is.logical(smooth)) {
stop('ERROR: smooth must be either TRUE or FALSE.');
}
if (inherits(ephy, 'bammdata')) {
#get rates through binned time
rmat <- getRateThroughTimeMatrix(ephy, start.time = start.time, end.time = end.time, node = node, nslices = nBins, nodetype=nodetype);
}
if (inherits(ephy, 'bamm-ratematrix')) {
if (!any(is.null(c(start.time, end.time, node)))) {
stop('ERROR: You cannot specify start.time, end.time or node if the rate matrix is being provided. Please either provide the bammdata object instead or specify start.time, end.time or node in the creation of the bamm-ratematrix.')
}
#use existing rate matrix
rmat <- ephy;
}
#set appropriate rates
if (ratetype == 'speciation') {
ratetype <- 'auto';
}
if (ratetype != 'auto' & ratetype != 'extinction' & ratetype != 'netdiv') {
stop("ERROR: ratetype must be 'auto', 'extinction', or 'netdiv'.\n");
}
if (ephy$type == 'trait' & ratetype != 'auto') {
stop("ERROR: If input object is of type 'trait', ratetype can only be 'auto'.")
}
if (ratetype == 'auto' & ephy$type == 'diversification') {
rate <- rmat$lambda;
ratelabel <- 'speciation rate';
}
if (ratetype == 'auto' & ephy$type == 'trait') {
rate <- rmat$beta;
ratelabel <- 'trait rate';
}
if (ratetype == 'extinction') {
rate <- rmat$mu;
ratelabel <- 'extinction rate';
}
if (ratetype == 'netdiv') {
rate <- rmat$lambda - rmat$mu;
ratelabel <- 'net diversification rate';
}
# times in rate matrix are in terms of node heights (where root age = 0 and present = max divergence time)
## Now reorganize in terms of time before present
timeBP <- as.numeric(names(rmat$times))
#remove NaN columns
nanCol <- apply(rate, 2, function(x) any(is.nan(x)));
rate <- rate[,which(nanCol == FALSE)];
timeBP <- timeBP[which(nanCol == FALSE)];
#generate coordinates for polygons
if (!is.null(intervals)) {
mm <- apply(rate, MARGIN = 2, quantile, intervals);
poly <- list();
q1 <- 1;
q2 <- nrow(mm);
repeat {
if (q1 >= q2) {break}
a <- as.data.frame(cbind(timeBP, mm[q1,]));
b <- as.data.frame(cbind(timeBP, mm[q2,]));
b <- b[rev(rownames(b)),];
colnames(a) <- colnames(b) <- c('x','y');
poly[[q1]] <- rbind(a,b);
q1 <- q1 + 1;
q2 <- q2 - 1;
}
}
#Calculate averaged data line
if (!useMedian) {
avg <- colMeans(rate);
} else {
avg <- unlist(apply(rate, 2, median));
}
#apply loess smoothing to intervals
if (smooth) {
for (i in 1:length(poly)) {
p <- poly[[i]];
rows <- nrow(p);
p[1:rows/2,2] <- loess(p[1:rows/2, 2] ~ p[1:rows/2, 1], span = smoothParam)$fitted;
p[(rows/2):rows, 2] <- loess(p[(rows/2):rows, 2] ~ p[(rows/2):rows, 1], span = smoothParam)$fitted;
poly[[i]] <- p;
}
avg <- loess(avg ~ timeBP,span = smoothParam)$fitted;
}
#begin plotting
if (plot) {
if (!add) {
plot.new();
par(mar = mar);
if (unique(xlim == 'auto')) {
xMin <- max(timeBP);
xMax <- min(timeBP);
} else {
xMin <- xlim[1];
xMax <- xlim[2];
}
if (unique(ylim == 'auto')) {
if (!is.null(intervals)){
yMin <- min(poly[[1]][, 2]);
yMax <- max(poly[[1]][, 2]);
} else {
yMin <- min(avg);
yMax <- max(avg);
}
if (yMin >= 0) {
yMin <- 0;
}
} else {
yMin <- ylim[1];
yMax <- ylim[2];
}
plot.window(xlim = c(xMin, xMax), ylim = c(yMin, yMax));
if (is.null(xticks)) {
axis(side = 1, cex.axis = cex.axis, lwd = 0, lwd.ticks = 1);
}
if (!is.null(xticks)) {
axis(side = 1, at = seq(xMin, xMax, length.out = xticks + 1), labels = signif(seq(xMin, xMax, length.out = xticks + 1),digits = 2), cex.axis = cex.axis, lwd = 0, lwd.ticks = 1);
}
if (is.null(yticks)) {
axis(side = 2, cex.axis = cex.axis, las = 1, lwd = 0, lwd.ticks = 1);
}
if (!is.null(yticks)) {
axis(side = 2, at = seq(yMin, 1.2 * yMax, length.out = yticks + 1), labels = signif(seq(yMin, 1.2 * yMax, length.out = yticks + 1),digits = 2), las=1, cex.axis = cex.axis, lwd = 0, lwd.ticks = 1);
}
if (axis.labels) {
mtext(side = 1, text = 'time before present', line = xline, cex = cex.lab);
mtext(side = 2, text = ratelabel, line = yline, cex = cex.lab);
}
box(which = "plot", bty = "l");
}
#plot intervals
if (!is.null(intervals)) {
for (i in 1:length(poly)) {
polygon(x = poly[[i]][, 1],y = poly[[i]][, 2], col = BAMMtools::transparentColor(intervalCol, opacity), border = NA);
}
}
lines(x = timeBP, y = avg, lwd = lwd, col = avgCol);
} else {
return(list(poly = poly,avg = avg, times = timeBP));
}
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/plotRateThroughTime.R |
##' @export
print.bammdata = function(x, ...)
{
print.phylo(as.phylo.bammdata(x));
nsamples <- length(x$eventData);
cat(paste("\nPosterior samples:", nsamples,"\n\n"));
cat("List elements:\n");
cat("\t",names(x)[1:10]);
cat("\t",names(x)[11:length(x)]);
cat('\n');
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/print.bammdata.R |
############################################
# Internal function called by plot.bammdata(...)
#
#
rateLegend = function(colobj, log = FALSE) {
opar = par(no.readonly = TRUE);
cat("Click once where you want the lower left corner of the figure\n");
cxy = locator(n = 1);
xc = (grconvertX(cxy$x, to = "ndc"));
yc = (grconvertY(cxy$y, to = "ndc"));
ofs = min(1 - xc, 1 - yc);
fig = c(xc, xc + ofs, yc, yc + ofs);
par(fig = fig, new = TRUE, xpd = TRUE, mar = c(1.5, 1.5, 0.25, 0.25));
plot.new();
x = colobj[,1];
y = colobj[,2];
plot.window(xlim = c(min(0,min(x)), max(x)), ylim = c(0, max(y)));
segments(x, y, x, 0, lend = 2, col = colobj[,3]);
axis(1, signif(seq(min(0,min(x)), max(x), length.out = 5), 2), xaxs = "i", cex.axis = 0.75, tcl = NA, mgp = c(0, 0.25, 0));
axis(2, round(seq(0, max(y), length.out = 3), 0), las = 1, yaxs = "i", cex.axis = 0.75, tcl = NA, mgp = c(0, 0.25, 0));
if (log == FALSE) mtext("Evolutionary Rate", 1, line = 1, cex = 0.75)
else mtext("Evolutionary Rate (log)", 1, line = 1, cex = 0.75);
mtext("Density", 2, line = 1, cex = 0.75);
par(opar);
}
# histRates = function(rates,pal,NCOLORS) {
# opar = par(no.readonly = TRUE);
# fx = density(rates);
# dpal = c('BrBG','PiYG','PuOr','RdBu','RdGy','RdYlBu','RdYlGn','Spectral');
# if(length(pal) == 3) {
# rate.colors = colorRampPalette(pal,space='Lab')(NCOLORS);
# }
# else if(pal %in% dpal) {
# rate.colors = colorRampPalette(rev(brewer.pal(3,pal)),space='Lab')(NCOLORS);
# }
# else if(pal == 'temperature') {
# rate.colors = gplots::rich.colors(NCOLORS);
# }
# qx = quantile(rates,seq(0,1,length.out = NCOLORS+1));
# cat("Click once where you want the lower left corner of the figure\n");
# cxy = locator(n=1);
# xc = (grconvertX(cxy$x,to='ndc'));
# yc = (grconvertY(cxy$y,to='ndc'));
# ofs = min(1 - xc, 1 - yc);
# fig = c(xc,xc+ofs,yc,yc+ofs);
# par(fig = fig, new=TRUE, xpd=TRUE, mar=c(1.5,1.5,0.25,0.25));
# plot.new();
# plot.window(xlim=c(min(0,min(fx$x)),max(fx$x)),ylim=c(0,max(fx$y)));
# for(i in 1:length(fx$x)) {
# index = which(qx > fx$x[i])[1];
# if(is.na(index)) break;
# if(index > 1) index = index - 1;
# bcol = rate.colors[index];
# segments(fx$x[i],fx$y[i],fx$x[i],0,lend=2,col=bcol);
# }
# axis(1,signif(seq(min(0,min(fx$x)),max(fx$x),length.out=5),2),pos=0,cex.axis=0.75,tcl=NA,mgp=c(0,0.25,0));
# axis(2,round(seq(0,max(fx$y),length.out=3),0),las=1,pos=0,cex.axis=0.75,tcl=NA,mgp=c(0,0.25,0));
# mtext('Evolutionary Rate',1,line=1,cex=0.75);
# mtext('Density',2,line=1,cex=0.75);
# par(opar);
# }
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/rateLegend.R |
# ratesHistogram <- function(...)
# phylorates = a plot.bammdata object
# plotBrks = boolean, should breaks be plotted
# xlab = x-axis label
# ylab = y-axis label
# lwd = lwd for breaks lines
# lty = lty for breaks lines
# brksCol = color for breaks lines
# ... additional arguments passed on to mtext for axis labels
##' @title Histogram of \code{BAMM} rate frequencies
##'
##' @description Plots a histogram of the frequency of rate values across the
##' phylogeny.
##'
##' @param phylorates A saved \code{\link{plot.bammdata}} object.
##' @param plotBrks Boolean, should breaks be plotted over the histogram.
##' @param xlab x-axis label.
##' @param ylab y-axis label.
##' @param lwd Line width for breaks.
##' @param lty Line style for breaks.
##' @param brksCol Color of breaks lines.
##' @param \dots Additional arguments passed on to \code{mtext} for axis
##' labels.
##'
##' @details With this function, a histogram is plotted that shows the
##' frequency of rates present in the dataset. The color scheme plotted
##' is taken from the saved \code{plot.bammdata} object that is the main
##' input. Therefore, the mapping of colors to rates in the histogram
##' corresponds exactly to what is plotted in the phylorate plot. If
##' \code{plotBrks = TRUE}, then the color breaks used for the phylorates
##' plot are shown.
##'
##' This function can be a useful tool for exploring different
##' \code{plot.bammdata} options. Please see
##' \url{http://bamm-project.org/colorbreaks.html} on the bamm-project
##' website for more information on the utility of this function.
##'
##' @author Pascal Title
##'
##' @seealso \code{\link{plot.bammdata}}
##'
##' @references \url{http://bamm-project.org/}
##'
##' @examples
##' data(primates, events.primates)
##' ed <- getEventData(primates, events.primates, burnin=0.25, nsamples=500,
##' type = 'trait')
##'
##' # create phylorate plot with the jenks breaks method to generate output
##' phylorates <- plot(ed, breaksmethod='jenks', show = FALSE)
##'
##' ratesHistogram(phylorates, plotBrks = TRUE, xlab = 'trait rates')
##' @export
ratesHistogram <- function(phylorates, plotBrks = TRUE, xlab = 'speciation rate', ylab = 'density', lwd = 0.2, lty = 1, brksCol = 'black', ...) {
if (!identical(names(phylorates), c("coords", "colorbreaks", "palette", "colordens"))) {
stop("phylorates must be a saved plot.bammdata object.")
}
plot.new();
x <- phylorates$colordens[,1];
y <- phylorates$colordens[,2];
plot.window(xlim = c(min(x), max(x)), ylim = c(0, max(y)));
segments(x, y, x, 0, lend = 2, col = phylorates$colordens[,3], lwd = 3);
axis(1, signif(seq(min(0, min(x)), max(x), length.out = 5), 2), xaxs = "i", cex.axis = 0.75, tcl = NA, mgp = c(0, 0.25, 0), lwd = 0, lwd.ticks = 1);
axis(2, round(seq(0, max(y), length.out = 4)), las = 1, yaxs = "i", cex.axis = 0.75, tcl = NA, mgp = c(0, 0.35, 0), lwd = 0, lwd.ticks = 1);
box(which = "plot", bty = "l");
mtext(xlab, side = 1, line = 1.5, ...);
mtext(ylab, side = 2, line = 1.5, ...);
#add breaks as vertical lines
if (plotBrks) {
abline(v = phylorates$colorbreaks, lwd = lwd, lty = lty, col = brksCol);
}
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/ratesHistogram.R |
#############################################################
#
# recursivelySetNodeStates(....)
#
# Function to recursively assign states to nodes going from root-to-tip
# takes args of tree plus vector of node event assignments
# returns vector.
# phySV is tree with "statevec" component
# node is the focal node
# state is the state (probably "event" in this context)
recursivelySetNodeStates <- function(phySV, node, state) {
phySV$statevec[phySV$edge[,2] == node] <- state;
if (sum(phySV$edge[,1] == node) > 0){
# node is internal
dset <- phySV$edge[,2][phySV$edge[,1] == node];
phySV <- recursivelySetNodeStates(phySV, dset[1], state);
phySV <- recursivelySetNodeStates(phySV, dset[2], state);
}
return(phySV);
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/recursivelySetNodeStates.R |
####################################################
#
# samplingProbs <- function(...)
#
# tree = object of class phylo
# cladeTable = a dataframe with 1 column of species names and a second column of group assignment
# Must either be a table for all species in tree, or a table of greater species richness, including those species in the tree.
# cladeRichness = either NULL or a vector of species counts, named by group names.
# globalSampling = percent sampling of the backbone of the phylogeny
# output = path + output file name (.txt)
# writeToDisk = boolean, should the table be written to disk, defaults to TRUE
##' @title Creates clade-specific sampling fractions
##'
##' @description If the user would like to specify species sampling on a
##' clade-by-clade basis, a sampling probability table can be provided to
##' \code{BAMM}.
##'
##' @param tree An object of class \code{phylo}.
##' @param cladeTable A dataframe with one column of species names and a
##' second column of group assignment.
##' @param cladeRichness Either \code{NULL} or a vector of species counts,
##' named by clade names.
##' @param globalSampling percent sampling for the backbone of the phylogeny.
##' @param output Path + output file name.
##' @param writeToDisk A logical, should the table be written to disk,
##' defaults to \code{TRUE}.
##'
##' @details This function handles two types of input: The cladeTable can
##' either contain the species found in the phylogeny, along with clade
##' assignment of those species, or it can contain more species than found
##' in the phylogeny. If the table only contains those species in the
##' phylogeny, then a vector \code{cladeRichness} must be provided that
##' contains known clade richness. If the cladeTable contains more than
##' the species in the phylogeny, then cladeRichness should be set to
##' \code{NULL}. The \code{globalSampling} value should represent the
##' overall completeness of species sampling in terms of major clades. See
##' \url{http://bamm-project.org/} for additional details.
##'
##' @return If \code{writeToDisk = TRUE}, then no object is returned. If
##' \code{writeToDisk = FALSE}, then a dataframe is returned. The
##' resultant table must contain one row for each species in the
##' phylogeny, along with clade assignment, and sampling fraction. The
##' first line must contain the overall sampling fraction for the
##' phylogeny and must be written as tab-delimited, with no headers.
##'
##' @author Pascal Title
##'
##' @examples
##' # Generate dummy data
##' tree <- read.tree(text="(((t1:2,(t2:1,t3:1):1):1,((t4:1,t5:1):1,t6:2):1)
##' :1,(t7:3,(t8:2,t9:2):1):1);")
##' tree$tip.label <- paste(rep('Species',9),1:9,sep='')
##'
##' spTable <- as.data.frame(matrix(nrow=9,ncol=2))
##' spTable[,1] <- tree$tip.label
##' spTable[1:3,2] <- 'cladeA'
##' spTable[4:6,2] <- 'cladeB'
##' spTable[7:9,2] <- 'cladeC'
##' richnessVec <- c(cladeA=5, cladeB=4, cladeC=12)
##'
##' # Option 1: We have a table of clade assignment for the species in the
##' # tree, along with a vector of known clade richness
##' spTable
##' richnessVec
##' samplingProbs(tree, cladeTable = spTable, cladeRichness = richnessVec,
##' globalSampling = 1, writeToDisk = FALSE)
##'
##' # Option 2: We have a table of known species, beyond the sampling in the
##' # phylogeny
##' spTable <- rbind(spTable, c('Species10','cladeA'),c('Species11','cladeA'),
##' c('Species12','cladeC'), c('Species13','cladeC'),
##' c('Species14','cladeC'))
##'
##' spTable
##'
##' samplingProbs(tree, cladeTable = spTable, cladeRichness = NULL,
##' globalSampling = 0.9, writeToDisk = FALSE)
##' @export
samplingProbs <- function(tree, cladeTable, cladeRichness = NULL, globalSampling, output, writeToDisk = TRUE) {
if (length(intersect(tree$tip.label,cladeTable[,1])) != length(tree$tip.label)) {
stop("Not all species from tree are in cladeTable.");
}
if (nrow(cladeTable) == length(tree$tip.label)) {
if (is.null(cladeRichness)) {
stop("If cladeTable only contains species from tree, then cladeRichness must be provided.");
}
}
if (nrow(cladeTable) > length(tree$tip.label)) {
if (!is.null(cladeRichness)) {
warning("cladeTable contains more species than in tree, so cladeRichness vector will be ignored.");
}
}
if (!is.null(cladeRichness)) {
if (length(cladeRichness) != length(unique(cladeTable[,2]))) {
stop("The cladeRichness vector must contain the same number of clades as are described in the cladeTable.");
}
}
if (!is.vector(cladeRichness) & !is.null(cladeRichness)) {
stop("Error: cladeRichness must either be NULL or an integer vector named with clade names.");
}
if (ncol(cladeTable) > 2) {
stop("cladeTable must contain 2 columns: one of species, and one of clade assignment.");
}
if (is.matrix(cladeTable)) {
cladeTable <- as.data.frame(cladeTable, stringsAsFactors=FALSE);
}
if (nrow(cladeTable) > length(tree$tip.label)) {
probs <- as.data.frame(matrix(nrow=length(tree$tip.label),ncol=3));
colnames(probs) <- c('sp','clade','prob');
for (i in 1:length(tree$tip.label)) {
probs[i,1] <- tree$tip.label[i];
clade <- cladeTable[cladeTable[,1] == tree$tip.label[i],2];
inTree <- intersect(cladeTable[cladeTable[,2] == clade,1],tree$tip.label);
probs[i,2] <- clade;
probs[i,3] <- length(inTree) / length(cladeTable[cladeTable[,2] == clade,1]);
}
probs <- rbind(c(globalSampling,'',''),probs);
}
if (nrow(cladeTable) == length(tree$tip.label) & !is.null(cladeRichness)) {
probs <- as.data.frame(matrix(nrow=length(tree$tip.label),ncol=3));
colnames(probs) <- c('sp','clade','prob');
for (i in 1:length(tree$tip.label)) {
probs[i,1] <- tree$tip.label[i];
clade <- cladeTable[cladeTable[,1] == tree$tip.label[i],2];
probs[i,2] <- clade;
probs[i,3] <- nrow(cladeTable[cladeTable[,2] == clade,]) / cladeRichness[clade];
}
probs <- rbind(c(globalSampling,'',''),probs);
}
if (writeToDisk) {
write.table(probs, file=output, quote=F, col.names=F, row.names=F, sep='\t');
} else {
return(probs);
}
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/samplingProbs.R |
######################################
# Internal function called by dtRates(...)
#
#
segMap = function(ephy, tau) {
tol <- 0.0001;
# tH <- max(branching.times(as.phylo.bammdata(ephy)));
tH <- max(ephy$end);
remainder <- numeric(max(ephy$edge[,1]));
dtsegs <- vector("list",nrow(ephy$edge));
for (i in 1:nrow(ephy$edge)) {
if (remainder[ephy$edge[i,1]] > 0) {
if (ephy$begin[i]/tH+remainder[ephy$edge[i,1]] > ephy$end[i]/tH) {
remainder[ephy$edge[i,2]] <- ephy$begin[i]/tH + remainder[ephy$edge[i,1]] - ephy$end[i]/tH;
segs <- ephy$begin[i]/tH;
}
else {
segs <- seq(ephy$begin[i]/tH+remainder[ephy$edge[i,1]], ephy$end[i]/tH, tau);
segs <- c(ephy$begin[i]/tH,segs);
}
}
else {
segs <- seq(ephy$begin[i]/tH, ephy$end[i]/tH, tau);
}
if (length(segs) > 1) {
if (ephy$end[i]/tH - tail(segs,1) > tol) {
remainder[ephy$edge[i,2]] <- tau - (ephy$end[i]/tH-tail(segs,1));
segs <- c(segs,ephy$end[i]/tH);
}
segs <- rep(segs,each=2);
segs <- segs[-c(1,length(segs))];
segs <- matrix(segs,ncol=2,byrow=TRUE);
segs <- cbind(rep(ephy$edge[i,2],nrow(segs)), segs);
}
else {
if (remainder[ephy$edge[i,1]] == 0) {
remainder[ephy$edge[i,2]] <- tau - (ephy$end[i]/tH-tail(segs,1));
}
segs <- matrix(c(ephy$edge[i,2], ephy$begin[i]/tH, ephy$end[i]/tH),nrow=1,ncol=3);
}
dtsegs[[i]] <- segs;
}
dtsegs <- do.call(rbind,dtsegs);
dtsegs[,2] <- dtsegs[,2]*tH;
dtsegs[,3] <- dtsegs[,3]*tH;
return(dtsegs);
}
# segMap = function(nodes,begin,end,tau)
# {
# foo = function(x,tau)
# {
# len = (x[3] - x[2])/tau; if (len%%1 == 0) len = len+1;
# ret = seq(x[2],x[3],length.out=len);
# if(length(ret) == 1) return(matrix(x,nrow=1));
# #ret = seq(x[2],x[3],length.out=length(ret));
# ret = rep(ret,each=2); ret=ret[-c(1,length(ret))];
# ret = matrix(ret,ncol=2,byrow=TRUE);
# return(cbind(matrix(rep(as.integer(x[1]),nrow(ret)),ncol=1), ret));
# }
# times = cbind(nodes,begin,end);
# ret = apply(times,1,foo,tau);
# return(do.call(rbind,ret));
# }
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/segMap.R |
# phy is a phylogenetic tree
# traits is a file name to your BAMM formatted data.
# total.taxa is only if you have incomplete sampling
#
#
##' @title Set BAMM Priors
##'
##' @description Set priors for \code{BAMM} analysis.
##'
##' @param phy An object of class \code{phylo}, e.g., the phylogenetic tree
##' you will analyze with \code{BAMM}.
##' @param total.taxa If doing speciation-extinction analysis, the total
##' number of taxa in your clade. If your tree contains all taxa in the
##' clade (100\% sampling), then leave this as \code{NULL}.
##' @param traits A filename where the trait data (\code{BAMM} format) are
##' stored, or a numeric vector named according to the tips in \code{phy}.
##' Leave \code{NULL} if doing a speciation-extinction analysis.
##' @param outfile Filename for outputting the sample priors block. If
##' \code{NULL}, then a vector is returned instead.
##' @param Nmax If analyzing a very large tree for phenotypic evolution, uses
##' only this many taxa to estimate priors for your dataset. Avoid matrix
##' inversion issues with large numbers of tips.
##' @param suppressWarning Logical. If \code{TRUE}, then the warning about
##' setting the Poisson rate prior is suppressed. Only applies if
##' \code{outfile = NULL}.
##'
##' @details This is a "quick and dirty" tool for identifying approximately
##' acceptable priors for a \code{BAMM} analysis. We have found that
##' choice of prior can have a substantial impact on \code{BAMM} analyses.
##' It is difficult to simply set a default prior that applies across
##' datasets, because users often have trees with branch lengths in very
##' different units (e.g., numbers of substitutions versus millions of
##' years). Hence, without some careful attention, you can inadvertently
##' specify some very bad prior distributions. This function is designed
##' to at least put you in the right ballpark for decent prior
##' distributions, but there are no guarantees that these are most
##' appropriate for your data.
##'
##' The general rules applied here are:
##'
##' For the \code{lambdaInitPrior}, we estimate the speciation rate of the
##' data under a pure birth model. We then set this prior to give an
##' exponential distribution with a mean five times greater than this
##' computed pure birth speciation estimate.
##'
##' The \code{lambdaShiftPrior} is the standard deviation of the normal
##' prior on the exponential change parameter k. We set the prior
##' distribution based on the age of the root of the tree. We set the
##' standard deviation of this distribution such that 2 standard
##' deviations give a parameter that will yield a 90\% decline in the
##' initial speciation rate between the root of the tree and the tips.
##' The basic model is lambda(t) = lambda_0 * exp(k * t). This is a
##' straightforward calculation: let x = -log(0.1) / TMAX, where TMAX is
##' the age of the tree. Then set the standard deviation equal to (x / 2).
##'
##' We set \code{muInitPrior} equal to \code{lambdaInitPrior}.
##'
##' For trait evolution, we first compute the maximum likelihood estimate
##' of the variance of a Brownian motion process of trait evolution. The
##' prior \code{betaInitPrior} is then set to an exponential distribution
##' with a mean 5 times greater than this value (similar to what is done
##' for lambda and mu, above).
##'
##' This function generates an output file containing a prior parameters
##' block that can be pasted directly into the priors section of your
##' \code{BAMM} control file.
##'
##' @return The function does not return anything. It simply performs some
##' calculations and writes formatted output to a file. However, if
##' \code{outfile = NULL}, then a named vector is returned.
##'
##' @author Dan Rabosky
##'
##' @examples
##' # for diversification analyses
##' data(whales)
##' setBAMMpriors(phy = whales, total.taxa = 89, outfile = NULL)
##'
##' # for trait analyses
##' data("primates")
##' data("mass.primates")
##'
##' ## create a named vector of trait values
##' mass <- setNames(mass.primates[,2], mass.primates[,1])
##'
##' setBAMMpriors(phy = primates, traits = mass, outfile = NULL)
##' @keywords models
##' @export
setBAMMpriors <- function(phy, total.taxa = NULL, traits=NULL, outfile = 'myPriors.txt', Nmax = 1000, suppressWarning = FALSE){
if (is.ultrametric(phy)) {
mbt <- max(branching.times(phy));
} else {
mbt <- max(NU.branching.times(phy));
}
if (is.null(total.taxa)){
total.taxa <- length(phy$tip.label);
}
if (!is.null(total.taxa) & total.taxa <= 1) {
stop("total.taxa is a count, not a percent.");
}
if (length(phy$tip.label) > total.taxa) {
stop("Your tree has more tips than total.taxa...");
}
if (is.null(traits)){
pb <- (log(total.taxa) - log(2)) / mbt;
lamprior <- 1 / (pb * 5);
lamrootprior <- 1 / (pb * 1);
k1 <- log(0.1) / mbt;
kprior <- -1 * (k1 / 2);
s1 <- '###############################################';
s2 <- '# Prior block chosen by BAMMtools::setBAMMpriors';
s3 <- 'expectedNumberOfShifts = 1.0';
s4 <- paste('lambdaInitPrior = ', lamprior, sep='');
s5 <- paste('lambdaShiftPrior = ', kprior, sep='');
s6 <- paste('muInitPrior = ', lamprior, sep='');
s7 <- paste('#### End Prior block\n######################\n\n');
ss <- paste(s1,s2,s3,s4,s5,s6,s7, sep='\n\n');
if (!is.null(outfile)) {
write(ss, file = outfile, sep='');
} else {
res <- as.data.frame(cbind(c('expectedNumberOfShifts', 'lambdaInitPrior', 'lambdaShiftPrior', 'muInitPrior'), c(1.0, lamprior, kprior, lamprior)), stringsAsFactors=FALSE);
res[,2] <- as.numeric(res[,2]);
colnames(res) <- c('param','value');
}
} else {
if (is.character(traits)) {
x <- read.table(file = traits, sep = '\t', stringsAsFactors = FALSE, header = FALSE);
tvec <- x[,2];
names(tvec) <- x[,1];
} else {
tvec <- traits;
}
not.in.tree <- setdiff(phy$tip.label, names(tvec));
not.in.traits <- setdiff(names(tvec), phy$tip.label);
bad <- c(not.in.tree, not.in.traits);
if (length(bad) > 0){
cat('Taxon names do not match between tree and trait dataset\n');
cat('Printing mismatched names:\n\n');
for (i in bad){
cat(i, '\n');
}
stop('Names in trait dataset must match those in tree\n');
}
bad <- which(is.na(tvec)) #check for missing data
if (length(bad) > 0) {
cat('\nSome taxa are missing data:\n\n');
for (i in bad) {
cat(names(tvec)[i], '\n');
}
stop('All taxa must have trait data.');
}
tvec <- tvec[phy$tip.label];
if (length(phy$tip.label) > Nmax){
ss <- sample(phy$tip.label, size=Nmax);
drop <- setdiff(phy$tip.label, ss);
phy <- drop.tip(phy, tip = drop);
tvec <- tvec[phy$tip.label];
}
pmean <- phylogeneticMean(tvec, phy)$beta;
betaprior <- 1/(pmean * 5);
betarootprior <- 1/(pmean * 1);
k1 <- log(0.1) / mbt;
kprior <- -1 * (k1 / 2);
s1 <- '###############################################';
s2 <- '# Prior block chosen by BAMMtools::setBAMMpriors';
s3 <- 'expectedNumberOfShifts = 1.0';
s4 <- paste('betaInitPrior = ', betaprior, sep='');
s5 <- paste('betaShiftPrior = ', kprior, sep='');
s6 <- paste('useObservedMinMaxAsTraitPriors = 1');
s7 <- paste('#### End Prior block\n######################\n\n');
ss <- paste(s1,s2,s3,s4,s5,s6,s7,sep='\n\n');
if (!is.null(outfile)) {
write(ss, file = outfile, sep='');
} else {
res <- as.data.frame(cbind(c('expectedNumberOfShifts', 'betaInitPrior', 'betaShiftPrior', 'useObservedMinMaxAsTraitPriors'), c(1.0, betaprior, kprior, 1)), stringsAsFactors=FALSE);
res[,2] <- as.numeric(res[,2]);
colnames(res) <- c('param','value');
}
}
if (!is.null(outfile)) {
cat('\nPrior block written to file << ', outfile, " >>\n", sep='');
cat('Copy and paste the contents of the file into the\n');
cat('priors block of your BAMM input file\n');
}
if (!suppressWarning & !is.null(outfile)) {
cat('\nThis function simply sets the expectedNumberOfShifts to 1;\n');
cat('This is a parameter you may need to vary to achieve good convergence\n');
cat('with your data.\n');
}
if (is.null(outfile)) {
res <- setNames(res[,2], res[,1])
return(res);
}
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/setBAMMpriors.R |
#######################################
# Internal function called by plot.bammdata(...)
#
#
setPhyloTreeCoords = function(phy)
{
phy <- getStartStopTimes(phy);
# tH <- as.numeric(max(branching.times(phy)))
tH <- max(phy$end);
rootnd <- as.integer(phy$Nnode+2);
ntip <- as.integer(phy$Nnode+1);
anc <- as.integer(phy$edge[,1]);
desc <- as.integer(phy$edge[,2]);
nnode <- as.integer(dim(phy$edge)[1] + 1);
bl <- as.numeric(phy$edge.length/tH);
begin <- as.numeric(phy$end/tH);
ndorder <- .C('postorder_tree_traverse', anc, desc, rootnd, nnode, integer(nnode), PACKAGE="BAMMtools")[[5]];
ndorder <- as.integer(ndorder);
L <- .C('setphylotreecoords', anc, desc, ndorder, begin, bl, ntip, rootnd, nnode, numeric(nrow(phy$edge)*4), numeric(nrow(phy$edge)*4), numeric(4), PACKAGE="BAMMtools");
root <- matrix(L[[11]],nrow=1);
xy <- matrix(L[[10]],nrow=nrow(phy$edge),ncol=4);
bar <- matrix(L[[9]],nrow=nrow(phy$edge),ncol=4);
xy <- rbind(c(xy[1,1],sum(xy[1:2,4])/2,xy[1,1],sum(xy[1:2,4])/2),xy);
bar <- rbind(root,bar);
rownames(xy) <- c(phy$edge[1,1],phy$edge[,2]); colnames(xy) = c('x0','y0','x1','y1');
return(list (segs = xy, arcs = bar ) );
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/setPhyloTreeCoords.R |
###############################################
# Internal function called by plot.bammdata(...)
#
#
setPolarTreeCoords = function(phy,vtheta,rbf)
{
phy <- getStartStopTimes(phy);
# tH = max(branching.times(phy))
tH <- max(phy$end);
rootnd <- as.integer(phy$Nnode+2);
ntip <- as.integer(phy$Nnode+1);
anc <- as.integer(phy$edge[,1]);
desc <- as.integer(phy$edge[,2]);
nnode <- as.integer(dim(phy$edge)[1] + 1);
vtheta <- as.numeric(vtheta*(pi/180));
ths <- as.numeric((2*pi-vtheta)/phy$Nnode);
ndorder <- .C('postorder_tree_traverse', anc, desc, rootnd, nnode, integer(nnode), PACKAGE="BAMMtools")[[5]];
ndorder <- as.integer(ndorder);
L <- .C('setpolartreecoords', anc, desc, ndorder, ntip, rootnd, nnode, ths, numeric(nrow(phy$edge)*3), numeric(3), PACKAGE="BAMMtools");
root <- matrix(L[[9]],nrow=1);
theta <- matrix(L[[8]],nrow=nrow(phy$edge),ncol=3);
theta <- rbind(root, theta);
rb <- tH * rbf;
x0 <- c(rb,rb+(phy$begin/tH))*cos(theta[,1]);
y0 <- c(rb,rb+(phy$begin/tH))*sin(theta[,1]);
x1 <- c(rb,rb+(phy$end/tH))*cos(theta[,1]);
y1 <- c(rb,rb+(phy$end/tH))*sin(theta[,1]);
ret <- cbind(x0,y0,x1,y1,theta[,1]);
rownames(ret) <- c(phy$edge[1,1],phy$edge[,2]); colnames(ret) = c('x0','y0','x1','y1','theta');
return(list(segs = ret, arcs = theta[,2:3]) );
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/setPolarTreeCoords.R |
##' @title Compute species-specific rate through time trajectories
##'
##' @description Computes the mean of the marginal posterior density of
##' speciation/extinction or phenotypic rates for equally spaced points
##' along the root to tip path for each species.
##'
##' @param ephy An object of class \code{bammdata}.
##' @param nslices An integer number of time slices. This determines the
##' number of equally spaced points in time at which rates are computed
##' for each species.
##' @param index An integer or vector of mode integer indicating which
##' posterior samples to use in the calculation. If \code{NULL} (default)
##' all samples are used.
##' @param spex A character string. "s" (default) calculates speciation rates;
##' "e" calculates extinction rates; "netdiv" calculates diversification
##' rates. Ignored if \code{ephy$type = "trait"}.
##'
##' @return A list with two components:
##' \itemize{
##' \item times: A vector of time points where rates were
##' calculated.
##' \item rates: A species X times matrix of rate through time
##' trajectories.
##' }
##'
##' @author Mike Grundler
##'
##' @seealso \code{\link{getRateThroughTimeMatrix}}
##'
##' @references \url{http://bamm-project.org/}
##'
##' @examples
##' data(whales, events.whales)
##' ed <- getEventData(whales, events.whales, burnin=0.25, nsamples=500)
##' ratemat <- speciesByRatesMatrix(ed, nslices = 100)
##'
##' dolphins <- extract.clade(whales, 140)$tip.label
##' plot.new()
##' plot.window(xlim = c(0, 35), ylim = c(0, 0.8))
##' for (i in 1:nrow(ratemat$rates)) {
##' if (whales$tip.label[i] %in% dolphins) {
##' lines(ratemat$times, ratemat$rates[i,], lwd = 2, col = 4)
##' } else {
##' lines(ratemat$times, ratemat$rates[i,], lwd = 2, col = 8)
##' }
##' }
##' axis(1, seq(-5, 35, 5))
##' axis(2, seq(-0.2, 0.8, 0.2), las = 1)
##' mtext("Time since root", 1, line = 2.5)
##' mtext("Speciation rate", 2, line = 2.5)
##'
##' @keywords models
##' @export
speciesByRatesMatrix = function(ephy, nslices, index = NULL, spex = "s") {
if (!spex %in% c('s', 'e', 'netdiv')) {
stop("arg spex must be 's', 'e' or 'netdiv'.")
}
seq.nod <- .Call("seq_root2tip", ephy$edge, length(ephy$tip.label), ephy$Nnode, PACKAGE = "BAMMtools");
if (nslices <= 100) {
tvec <- (seq(0, 1, 0.01)+0.005) * max(ephy$end);
tvec <- tvec[seq.int(1,length(tvec),length.out=nslices+1)];
ephy <- dtRates(ephy, 0.01, index, tmat = TRUE);
}
else if (nslices > 100 && nslices <= 500) {
tvec <- (seq(0, 1, 0.002)+0.001) * max(ephy$end);
tvec <- tvec[seq.int(1,length(tvec),length.out=nslices+1)];
ephy <- dtRates(ephy, 0.002, index, tmat = TRUE);
}
else if (nslices > 500 && nslices <= 1000) {
tvec <- (seq(0, 1, 0.001)+0.0005) * max(ephy$end);
tvec <- tvec[seq.int(1,length(tvec),length.out=nslices+1)];
ephy <- dtRates(ephy, 0.001, index, tmat = TRUE);
}
else {
stop("Max slices (1000) exceeded. Choose a smaller number of slices");
}
ret <- lapply(seq.nod, function(x) {
path = which(ephy$dtrates$tmat[,1] %in% x);
ids = sapply(tvec[-length(tvec)], function(y) which(ephy$dtrates$tmat[path,2] <= y & ephy$dtrates$tmat[path,3] > y));
if (is.list(ids))
ids = unlist(ids);
if (ephy$type == "trait") {
rts = ephy$dtrates$rates[path][ids];
}
else {
if (tolower(spex) == "s") {
rts = ephy$dtrates$rates[[1]][path][ids];
}
else if (tolower(spex) == "e") {
rts = ephy$dtrates$rates[[2]][path][ids];
}
else if (tolower(spex) == "netdiv") {
rts = ephy$dtrates$rates[[1]][path][ids] - ephy$dtrates$rates[[2]][path][ids];
}
}
if (length(rts) < (length(tvec)-1))
rts = c(rts, rep(NA, length(tvec)-1-length(rts)));
rts;
});
ret <- do.call(rbind, ret);
rownames(ret) <- ephy$tip.label;
return(list(times = tvec[-length(tvec)],rates = ret));
}
| /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/speciesByRatesMatrix.R |
##' @title Identify the optimal number of shifts using Bayes factors
##'
##' @description stepBF is a function to determine the overall best fitting number of shifts
##' using Bayes factor evidence.
##'
##' @param BFmat square Bayes factor matrix or a named vector of posterior probabilities
##' @param step.size how much Bayes factor support is required to accept a more complex model, see Details
##' @param expectedNumberOfShifts expected number of shifts under the prior (only needed for \code{inputType = 'postProb'})
##' @param inputType describes the input: \code{'matrix'} or \code{'postProb'}
##'
##' @details
##' stepBF takes either a square Bayes factor matrix (such as output by \code{\link{computeBayesFactors}}) or a named
##' vector of posterior probabilities. If posterior probabilities are supplied, the model prior
##' (\code{expectedNumberOfShifts}) must also be provided.
##' If the input is a Bayes factor matrix, specify \code{inputType = 'matrix'}, otherwise if the input is
##' a named vector of posterior probabilities, specify \code{inputType = 'postProb'}.
##'
##' The \code{step.size} argument is how much Bayes factor support is needed to accept a more complex model.
##' By default, this value is 1, so any more complex model that has a better Bayes factor than the previous model
##' will be accepted. Increasing the step size greatly reduces the Type I error at the cost of inflating Type II
##' error. So, with increasing step.size, you will infer fewer shifts.
##'
##' @return
##' a list of 3 items: the number of shifts for the best model, the number of shifts for the second best model,
##' and the Bayes factor support for the best model over the second best.
##' @author Jonathan Mitchell
##'
##' @references \url{http://bamm-project.org/}
##' @seealso \code{\link{computeBayesFactors}}
##'
##' @examples
##' data(mcmc.whales)
##' # remove 10% burnin
##' mcmc.whales <- mcmc.whales[floor(0.1 * nrow(mcmc.whales)):nrow(mcmc.whales), ]
##' # from a square matrix of Bayes factor values (inputType = 'matrix')
##' bfmat <- computeBayesFactors(mcmc.whales, expectedNumberOfShifts = 1, burnin = 0)
##' stepBF(bfmat, step.size = 1, inputType = 'matrix')
##' # or from a vector of posterior probabilities (inputType = 'postProb')
##' postProb <- table(mcmc.whales$N_shifts) / nrow(mcmc.whales)
##' stepBF(postProb, step.size = 1, inputType = 'postProb')
##'
##' @export
stepBF <- function(BFmat, step.size = 20, expectedNumberOfShifts = 1, inputType = 'matrix') {
inputType <- match.arg(inputType, c('matrix', 'postProb'))
if (inherits(BFmat, "table")) {
BFmat <- setNames(as.vector(BFmat), names(BFmat))
}
if (inputType == 'postProb') {
if (inherits(BFmat, "matrix")) {
stop("If inputType is 'postProb', please provide a vector of posterior probabilities.")
}
post <- BFmat
prior <- dgeom(as.numeric(names(post)), prob = 1 / (1 + expectedNumberOfShifts))
BFmat <- matrix(0, nrow = length(post), ncol = length(post))
rownames(BFmat) <- names(post)
colnames(BFmat) <- names(post)
for (i in 1:length(post)) {
for (j in 1:length(post)) {
if (post[j] == 0) {
BFmat[i,j] <- NA
} else {
prior_odds <- prior[i] / prior[j]
post_odds <- post[i] / post[j]
BFmat[i,j] <- post_odds * (1/prior_odds)
}
}
}
}
if (inherits(BFmat, 'numeric') | !identical(ncol(BFmat), nrow(BFmat))) {
stop("Bayes factor matrix, BFmat, must be square, with each cell (i,j) representing the BF of model i relative to model j.")
}
if (step.size < 1) {
stop("Step size is less than 1! This means you will move to more complex models even when they aren't supported by any evidence!")
}
if (ncol(BFmat) == 1) {
bestModel <- as.numeric(colnames(BFmat)[1])
secondModel <- NA
BF <- NA
} else if (ncol(BFmat) > 1) {
Stop <- ncol(BFmat) - 1
Out <- vector('numeric', length = Stop)
for (i in 1:Stop) {
Out[i] <- ifelse(BFmat[i + 1, i] > step.size, BFmat[i + 1, i], 0)
}
best <- which(Out == 0)[1]
second <- best - 1
bestModel <- as.integer(colnames(BFmat)[best])
secondModel <- as.integer(colnames(BFmat)[second])
BF <- as.numeric(BFmat[best, second])
}
return(list(bestModel = bestModel, secondModel = secondModel, BF = BF))
} | /scratch/gouwar.j/cran-all/cranData/BAMMtools/R/stepBF.R |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.