content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
plotCI.UnlogCI<-function(x,...)
{
alt<-x$alternative
CIEs<-x$conf.int
rnames<-rownames(CIEs)
est<-CIEs[,"Estimate"]
lwr<-CIEs[,"lwr"]
upr<-CIEs[,"upr"]
names(est)<-rnames
args<-list(...)
args$estimate<-est
args$lower<-lwr
args$upper<-upr
args$alternative<-alt
do.call("plotCII", args)
}
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/plotCI.UnlogCI.R
|
`print.CInp` <-
function(x, ...)
{
args<-list(...)
alt<-x$alternative
cl<-x$conf.level
N<-x$N
switch(alt,
two.sided={ALT="Two-sided local "; BD<-"intervals "},
less={ALT="Upper local "; BD<-"bounds "},
greater={ALT="Lower local "; BD<-"bounds "}
)
Text<-paste(ALT, round(cl*100,2), "% confidence ", BD, "\n", "based on ",N, " simulation runs","\n", sep="")
cat(Text)
dat<-data.frame(x$estimate, x$conf.int)
names(dat)<-c("Estimate","Lower","Upper")
pargs<-args
pargs$x<-dat
if(is.null(args$digits))
pargs$digits<-4
do.call("print.data.frame", pargs)
invisible(x)
}
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/print.CInp.R
|
`print.SCSnp` <-
function(x, ...)
{
args<-list(...)
alt<-x$alternative
cl<-x$conf.level
N<-x$N
switch(alt,
two.sided={ALT="Two-sided simultaneous "; BD<-"intervals "},
less={ALT="Upper simultaneous "; BD<-"bounds "},
greater={ALT="Lower simultaneous "; BD<-"bounds "}
)
Text<-paste(ALT, round(cl*100,2), "% credible ", BD, "\n", "based on ",N, " simulation runs","\n", sep="")
cat(Text)
dat<-data.frame(x$estimate, x$conf.int)
names(dat)<-c("Estimate","Lower","Upper")
pargs<-args
pargs$x<-dat
if(is.null(args$digits))
pargs$digits<-4
do.call("print.data.frame", pargs)
invisible(x)
}
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/print.SCSnp.R
|
###########
checkargssim<-function(coef, vcov, cmat)
{
if(!is.matrix(vcov))
{stop("'vcov' must be a matrix")}
if(!is.numeric(vcov))
{stop("'vcov' must be a numeric matrix")}
if(!is.matrix(cmat))
{stop("'cmat' must be a matrix")}
if(!is.numeric(cmat))
{stop("'cmat' must be a numeric matrix")}
if(!is.numeric(coef))
{stop("'coef' must be a numeric vector")}
k<-length(coef)
if(k<=2)
{warning("Applying this function makes hardly sense for only two parameters!")}
if(any(dim(vcov)!=k))
{stop("'vcov' must be a k-times-k matrix if 'coef' has length k")}
if(ncol(cmat)!=k)
{stop("'cmat' must be a matrix with k columns, if 'coef' has length k")}
}
#################
simplesimint<-function(coef, vcov, cmat, df=NULL, conf.level = 0.95,
alternative = c("two.sided", "less", "greater") )
{
checkargssim(coef=coef, vcov=vcov, cmat=cmat)
alternative<-match.arg(alternative)
if(length(conf.level)!=1)
{stop("'conf.level' must be a single numeric value")}
if(!is.numeric(conf.level))
{stop("'conf.level' must be a single numeric value")}
if(conf.level<=0.5 | conf.level>=1)
{stop("'conf.level' must be a single numeric value between 0.5 and 1")}
estC <- cmat %*% coef
vcovC <- cmat %*% vcov %*% t(cmat)
corrC <- cov2cor(vcovC)
stderr <- sqrt(diag(vcovC))
M<-nrow(cmat)
if(is.null(df))
{
switch(alternative,
two.sided={ quanti <- qmvnorm(p=conf.level, corr=corrC, tail="both.tails")$quantile
lCI <- estC-quanti*stderr
uCI <- estC+quanti*stderr
},
less={ quanti <- qmvnorm(p=conf.level, corr=corrC, tail="lower.tail")$quantile
lCI <- rep(-Inf, M)
uCI <- estC+quanti*stderr
},
greater={quanti <- qmvnorm(p=conf.level, corr=corrC, tail="upper.tail")$quantile
lCI <- estC+quanti*stderr
uCI <- rep(Inf, M)
})
attr(quanti, which="dist") <- paste(M, "-variate normal distribution", collapse="")
}
else{
if(length(df)!=1)
{stop("'df' must be a single number")}
if(!is.numeric(df))
{stop("'df' must be numeric!")}
if(df<2)
{warning("degree of freedom less than 2 leads to computational problems!")}
switch(alternative,
two.sided={ quanti <- qmvt(p=conf.level, corr=corrC, tail="both.tails", df=df)$quantile
lCI <- estC-quanti*stderr
uCI <- estC+quanti*stderr
},
less={ quanti <- qmvt(p=conf.level, corr=corrC, tail="lower.tail", df=df)$quantile
lCI <- rep(-Inf, M)
uCI <- estC+quanti*stderr
},
greater={quanti <- qmvt(p=conf.level, corr=corrC, tail="upper.tail", df=df)$quantile
lCI <- estC+quanti*stderr
uCI <- rep(Inf, M)
})
attr(quanti, which="dist")<-paste(M, "-variate t-distribution", collapse="")
}
out<-list(
estimate=estC,
lower=lCI,
upper=uCI,
cmat=cmat,
alternative=alternative,
conf.level=conf.level,
quantile=quanti,
df=df,
stderr=stderr,
vcovC=vcovC)
class(out)<-"simplesimint"
return(out)
}
print.simplesimint <- function(x,...)
{
pargs<-list(...)
LEVEL<-round(x$conf.level*100,2)
CI<-data.frame(x$estimate, x$lower, x$upper)
colnames(CI)<-c("Estimate","Lower","Upper")
cat("\n Simultaneous", LEVEL, "% confidence intervals: \n")
pargs$x<-CI
do.call("print", pargs)
invisible(x)
}
##########
summary.simplesimint <- function(object,...)
{
pargs<-list(...)
LEVEL<-round(object$conf.level*100,2)
CI<-data.frame(object$estimate, object$lower, object$upper)
colnames(CI)<-c("Estimate","Lower","Upper")
CMAT<-object$cmat
QUANT<-round(object$quantile, 4)
DIST<-attr(object$quantile, which="dist")
DF<-object$df
VCOV<-object$vcovC
cat("\n Simultaneous ", LEVEL, "% confidence intervals: \n", sep="")
pargs$x<-CI
do.call("print", pargs)
cat("\n Used quantile: ", QUANT, ",\n", sep="")
cat("obtained from a ",DIST, "\n", sep="")
if(!is.null(DF)){cat( "with ", DF, " degrees of freedom.\n", sep="")}
#
cat("\n Used contrast matrix: \n")
pargs$x<-CMAT
do.call("print", pargs)
#
cat("\n Resulting variance covariance matrix of the contrasts: \n")
pargs$x<-VCOV
do.call("print", pargs)
#
cat("\n Corresponding correlation matrix of the contrasts: \n")
pargs$x<-cov2cor(VCOV)
do.call("print", pargs)
invisible(object)
}
##########
plotCI.simplesimint<-function(x, ...)
{
pargs<-list(...)
estimate<-as.numeric(x$estimate)
lower<-as.numeric(x$lower)
upper<-as.numeric(x$upper)
names(estimate)<-rownames(x$cmat)
pargs$estimate <- estimate
pargs$lower<-lower
pargs$upper<-upper
pargs$alternative<-x$alternative
do.call("plotCII", pargs)
}
####
|
/scratch/gouwar.j/cran-all/cranData/BSagri/R/simplesimint.R
|
#' (Adaptive) Boosting Trees (ABT/BT) Algorithm.
#'
#' Performs the (Adaptive) Boosting Trees algorithm. This code prepares the inputs and calls the function \code{\link{BT_call}}.
#' Each tree in the process is built thanks to the \code{\link{rpart}} function.
#' In case of cross-validation, this function prepares the folds and performs multiple calls to the fitting function \code{\link{BT_call}}.
#'
#' @param formula a symbolic description of the model to be fit. Note that the offset isn't supported in this algorithm.
#' Instead, everything is performed with a log-link function and a direct relationship exist between response, offset and weights.
#'
#' @param data an optional data frame containing the variables in the model. By default the variables are taken from \code{environment(formula)}, typically the environment from which
#' \code{BT} is called. If \code{keep.data=TRUE} in the initial call to \code{BT} then \code{BT} stores a copy with the object (up to the variables used).
#'
#' @param tweedie.power Experimental parameter currently not used - Set to 1 referring to Poisson distribution.
#'
#' @param ABT a boolean parameter. If \code{ABT=TRUE} an adaptive boosting tree algorithm is built whereas if \code{ABT=FALSE} an usual boosting tree algorithm is run.
#' By default, it is set to \code{TRUE}.
#'
#' @param n.iter the total number of iterations to fit. This is equivalent to the number of trees and the number of basis functions in the additive expansion.
#' Please note that the initialization is not taken into account in the \code{n.iter}. More explicitly, a weighted average initializes the algorithm and then \code{n.iter} trees
#' are built. Moreover, note that the \code{bag.fraction}, \code{colsample.bytree}, ... are not used for this initializing phase.
#' By default, it is set to 100.
#'
#' @param train.fraction the first \code{train.fraction * nrows(data)} observations are used to fit the \code{BT} and the remainder are used for
#' computing out-of-sample estimates (also known as validation error) of the loss function. By default, it is set to 1 meaning no out-of-sample estimates.
#'
#' @param interaction.depth the maximum depth of variable interactions: 1 builds an additive model, 2 builds a model with up to two-way interactions, etc.
#' This parameter can also be interpreted as the maximum number of non-terminal nodes. By default, it is set to 4.
#' Please note that if this parameter is \code{NULL}, all the trees in the expansion are built based on the \code{tree.control} parameter only, independently
#' of the \code{ABT} value.
#' This option is devoted to advanced users only and allows them to benefit from the full flexibility of the implemented algorithm.
#'
#' @param shrinkage a shrinkage parameter (in the interval (0,1]) applied to each tree in the expansion. Also known as the learning rate or step-size reduction. By default, it is set to 1.
#'
#' @param bag.fraction the fraction of independent training observations randomly selected to propose the next tree in the expansion.
#' This introduces randomness into the model fit. If \code{bag.fraction}<1 then running the same model twice will result in similar but different fits.
#' Please note that if this parameter is used the \code{BTErrors$training.error} corresponds to the normalized in-bag error and the out-of-bag improvements
#' are computed and stored in \code{BTErrors$oob.improvement}. See \code{\link{BTFit}} for more details.
#' By default, it is set to 1.
#'
#' @param colsample.bytree each tree will be trained on a random subset of \code{colsample.bytree} number of features. Each tree will consider a new
#' random subset of features from the formula, adding variability to the algorithm and reducing computation time. \code{colsample.bytree} will be bounded between
#' 1 and the number of features considered in the formula. By default, it is set to \code{NULL} meaning no effect.
#'
#' @param keep.data a boolean variable indicating whether to keep the data frames. This is particularly useful if one wants to keep track of the initial data frames
#' and is further used for predicting in case any data frame is specified.
#' Note that in case of cross-validation, if \code{keep.data=TRUE} the initial data frames are saved whereas the cross-validation samples are not.
#' By default, it is set to \code{FALSE}.
#'
#' @param is.verbose if \code{is.verbose=TRUE}, the \code{BT} will print out the algorithm progress. By default, it is set to \code{FALSE}.
#'
#' @param cv.folds a positive integer representing the number of cross-validation folds to perform. If \code{cv.folds}>1 then \code{BT}, in addition to the usual fit,
#' will perform a cross-validation and calculate an estimate of generalization error returned in \code{BTErrors$cv.error}. By default, it is set to 1 meaning no cross-validation.
#'
#' @param folds.id an optional vector of values identifying what fold each observation is in. If supplied, this parameter prevails over \code{cv.folds}.
#' By default, \code{folds.id = NULL} meaning that no folds are defined.
#'
#' @param n.cores the number of cores to use for parallelization. This parameter is used during the cross-validation.
#' This parameter is bounded between 1 and the maximum number of available cores.
#' By default, it is set to 1 leading to a sequential approach.
#'
#' @param tree.control for advanced user only. It allows to define additional tree parameters that will be used at each iteration.
#' See \code{\link{rpart.control}} for more information.
#'
#' @param weights optional vector of weights used in the fitting process. These weights must be positive but do not need to be normalized.
#' By default, it is set to \code{NULL} which corresponds to an uniform weight of 1 for each observation.
#'
#' @param seed optional number used as seed. Please note that if \code{cv.folds}>1, the \code{parLapply} function is called.
#' Therefore, the seed (if defined) used inside each fold will be a multiple of the \code{seed} parameter.
#'
#' @param \dots not currently used.
#'
#' @return a \code{\link{BTFit}} object.
#'
#' @details The NA values are currently dropped using \code{na.omit}.
#'
#' @author Gireg Willame \email{gireg.willame@@gmail.com}
#'
#' \emph{This package is inspired by the \code{gbm3} package. For more details, see \url{https://github.com/gbm-developers/gbm3/}}.
#'
#' @seealso \code{\link{BTFit}}, \code{\link{BTCVFit}}, \code{\link{BT_call}}, \code{\link{BT_perf}}, \code{\link{predict.BTFit}},
#' \code{\link{summary.BTFit}}, \code{\link{print.BTFit}}, \code{\link{.BT_cv_errors}}.
#'
#' @references M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |: GLMs and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries ||: Tree-Based Methods and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |||: Neural Networks and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2022). \strong{Response versus gradient boosting trees, GLMs and neural networks under Tweedie loss and log-link}.
#' Accepted for publication in \emph{Scandinavian Actuarial Journal}.
#'
#' M. Denuit, J. Huyghe and J. Trufin (2022). \strong{Boosting cost-complexity pruned trees on Tweedie responses: The ABT machine for insurance ratemaking}.
#' Paper submitted for publication.
#'
#' M. Denuit, J. Trufin and T. Verdebout (2022). \strong{Boosting on the responses with Tweedie loss functions}. Paper submitted for publication.
#'
#' @examples
#' \donttest{
#' ## Load dataset.
#' dataset <- BT::BT_Simulated_Data
#'
#' ## Fit a Boosting Tree model.
#' BT_algo <- BT(formula = Y_normalized ~ Age + Sport + Split + Gender, # formula
#' data = dataset, # data
#' ABT = FALSE, # Classical Boosting Tree
#' n.iter = 200,
#' train.fraction = 0.8,
#' interaction.depth = 3,
#' shrinkage = 0.01,
#' bag.fraction = 0.5,
#' colsample.bytree = 2, # 2 explanatory variable used at each iteration.
#' keep.data = FALSE, # Do not keep a data copy.
#' is.verbose = FALSE, # Do not print progress.
#' cv.folds = 3, # 3-cv will be performed.
#' folds.id = NULL ,
#' n.cores = 1,
#' weights = ExpoR, # <=> Poisson model on response Y with ExpoR in offset.
#' seed = NULL)
#'
#' ## Determine the model performance and plot results.
#' best_iter_val <- BT_perf(BT_algo, method='validation')
#' best_iter_oob <- BT_perf(BT_algo, method='OOB', oobag.curve = TRUE)
#' best_iter_cv <- BT_perf(BT_algo, method ='cv', oobag.curve = TRUE)
#'
#' best_iter <- best_iter_val
#'
#' ## Variable influence and plot results.
#' # Based on the first iteration.
#' variable_influence1 <- summary(BT_algo, n.iter = 1)
#' # Using all iterations up to best_iter.
#' variable_influence_best_iter <- summary(BT_algo, n.iter = best_iter)
#'
#' ## Print results : call, best_iters and summarized relative influence.
#' print(BT_algo)
#'
#' ## Model predictions.
#' # Predict on the link scale, using only the best_iter tree.
#' pred_single_iter <- predict(BT_algo, newdata = dataset,
#' n.iter = best_iter, type = 'link', single.iter = TRUE)
#' # Predict on the response scale, using the first best_iter.
#' pred_best_iter <- predict(BT_algo, newdata = dataset,
#' n.iter = best_iter, type = 'response')
#' }
#'
#' @export
#'
BT <-
function(formula = formula(data),
data = list(),
tweedie.power = 1,
ABT = TRUE,
n.iter = 100,
train.fraction = 1,
interaction.depth = 4,
shrinkage = 1,
bag.fraction = 1,
colsample.bytree = NULL,
keep.data = TRUE,
is.verbose = FALSE,
cv.folds = 1,
folds.id = NULL,
n.cores = 1,
tree.control = rpart.control(
xval = 0,
maxdepth = (if (!is.null(interaction.depth)) {
interaction.depth
} else{
10
}),
cp = -Inf,
minsplit = 2
),
weights = NULL,
seed = NULL,
...) {
if (!is.null(seed))
set.seed(seed)
the_call <- match.call()
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "weights"), names(mf), 0)
mf <- mf[c(1, m)]
mf$drop.unused.levels <- TRUE
mf$na.action <-
na.omit #na.pass : Need to be reset to na.pass once NA well handled.
mf[[1]] <- as.name("model.frame")
m <- mf
mf <- eval(mf, parent.frame())
Terms <- attr(mf, "terms")
respVar <-
as.character(attr(Terms, "variables"))[-1][attr(Terms, "response")]
explVar <- attr(Terms, "term.labels")
#mf$originalRespVar <- mf[,respVar] # Keep the original variable -> Do not need to modify the formula that way.
#originalFormula <- formula # Keep a track of the original formula if there's any change with variable subsampling.
if (!is.null(attr(Terms, "offset"))) {
stop(
"Offset are not supported. For Tweedie model with log-link function, weights (=offset)
and response rate variable (=Original response variable/Offset) can instead be used."
)
}
if (is.null(model.weights(mf))) {
mf$w <- rep(1, nrow(mf))
}
else{
colnames(mf)[names(mf) == "(weights)"] <- "w"
}
w <- "w"
.check_tweedie_power(tweedie.power)
.check_ABT(ABT)
.check_n_iter(n.iter)
.check_train_fraction(train.fraction)
.check_interaction_depth(interaction.depth)
.check_shrinkage(shrinkage)
.check_bag_fraction(bag.fraction)
.check_colsample_bytree(colsample.bytree, length(explVar))
.check_keep_data(keep.data)
.check_is_verbose(is.verbose)
.check_cv_folds(cv.folds)
.check_folds_id(folds.id)
.check_n_cores(n.cores)
.check_weights(mf$w)
if (!is.null(interaction.depth) &&
tree.control$maxdepth != interaction.depth) {
stop(
"interaction.depth and maxdepth defined. If interaction.depth is not null it has to be set to maxdepth."
)
}
setList <- .create_validation_set(mf, train.fraction)
training.set <- setList$training.set
validation.set <- setList$validation.set
rm(setList)
rm(mf)
gc()
# Fit full model.
if (is.verbose)
message("Fit the model on the whole training set. \n")
BT_full_results <-
BT_call(
training.set,
validation.set,
tweedie.power,
respVar,
w,
explVar,
ABT,
tree.control,
train.fraction,
interaction.depth,
bag.fraction,
shrinkage,
n.iter,
colsample.bytree,
keep.data,
is.verbose
)
if (!is.null(folds.id)) {
numFolds <- length(unique(folds.id))
if (cv.folds != numFolds)
warning("CV folds changed from ",
cv.folds,
" to ",
numFolds,
" because of levels in folds.id")
cv.folds <- numFolds
# Transform folds.id index to a numeric vector of index, ascending from 1.
folds.id <- as.numeric(as.factor(folds.id))
}
if (cv.folds == 1) {
BT_full_results$cv.folds <- cv.folds
BT_full_results$call <- the_call
BT_full_results$Terms <- Terms
BT_full_results$seed <- seed
return(BT_full_results)
}
# Else : cv.folds > 1 (or folds.id defined).
if (is.verbose)
message("Fit the model on the different CV folds. \n")
folds <- .create_cv_folds(training.set, cv.folds, folds.id, seed)
if (n.cores > 1) {
cl <- makeCluster(n.cores)
clusterExport(
cl,
varlist = c(
"training.set",
"tweedie.power",
"respVar",
"w",
"explVar",
"ABT",
"tree.control",
"train.fraction",
"interaction.depth",
"bag.fraction",
"shrinkage",
"n.iter",
"colsample.bytree",
"keep.data",
"is.verbose"
),
envir = environment()
)
BT_cv_results <-
parLapply(cl, seq_len(cv.folds), function(xx) {
if (!is.null(seed))
set.seed(seed * (xx + 1))
valIndex <- which(folds == xx)
trainIndex <- setdiff(1:length(folds), valIndex)
BT_call(
training.set[trainIndex,],
training.set[valIndex,],
tweedie.power,
respVar,
w,
explVar,
ABT,
tree.control,
train.fraction,
interaction.depth,
bag.fraction,
shrinkage,
n.iter,
colsample.bytree,
FALSE,
is.verbose
) # We dont keep a copy of each dataset in case of cross-validation, keep.data=FALSE
})
on.exit(stopCluster(cl))
} else{
# n.cores = 1
BT_cv_results <- lapply(seq_len(cv.folds), function(xx) {
if (!is.null(seed))
set.seed(seed * (xx + 1))
valIndex <- which(folds == xx)
trainIndex <- setdiff(1:length(folds), valIndex)
BT_call(
training.set[trainIndex,],
training.set[valIndex,],
tweedie.power,
respVar,
w,
explVar,
ABT,
tree.control,
train.fraction,
interaction.depth,
bag.fraction,
shrinkage,
n.iter,
colsample.bytree,
FALSE,
is.verbose
) # We dont keep a copy of each dataset in case of cross-validation, keep.data=FALSE
})
}
# Different folds -> result object is from a different class.
class(BT_cv_results) <- "BTCVFit"
cv_errors <- .BT_cv_errors(BT_cv_results, cv.folds, folds)
# Best number of iterations/trees.
bestIterCV <- which.min(cv_errors)
# Prediction on each OOF for the optimal number of iterations.
predictions <-
predict(BT_cv_results, training.set, cv.folds, folds, bestIterCV)
# Extract relevant part - all data model.
BT_full_results$cv.folds <- cv.folds
BT_full_results$folds <- folds
BT_full_results$call <- the_call
BT_full_results$Terms <- Terms
BT_full_results$seed <- seed
BT_full_results$BTErrors$cv.error <- cv_errors
BT_full_results$cv.fitted <- predictions
return(BT_full_results)
}
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BT.R
|
#' BTCVFit
#'
#' CV (Adaptive) Boosting Tree Model Object.
#'
#' @description These are objects representing CV fitted boosting trees.
#'
#' @return a list of \code{\link{BTFit}} objects with each element corresponding to a specific BT fit on a particular fold
#'
#' @section Structure : The following components must be included in a legitimate \code{BTCVFit} object.
#'
#' @author Gireg Willame \email{gireg.willame@@gmail.com}
#'
#' \emph{This package is inspired by the \code{gbm3} package. For more details, see \url{https://github.com/gbm-developers/gbm3/}}.
#'
#' @seealso \code{\link{BT}}.
#'
#' @references M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |: GLMs and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries ||: Tree-Based Methods and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |||: Neural Networks and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2022). \strong{Response versus gradient boosting trees, GLMs and neural networks under Tweedie loss and log-link}.
#' Accepted for publication in \emph{Scandinavian Actuarial Journal}.
#'
#' M. Denuit, J. Huyghe and J. Trufin (2022). \strong{Boosting cost-complexity pruned trees on Tweedie responses: The ABT machine for insurance ratemaking}.
#' Paper submitted for publication.
#'
#' M. Denuit, J. Trufin and T. Verdebout (2022). \strong{Boosting on the responses with Tweedie loss functions}. Paper submitted for publication.
#'
#' @keywords methods
#' @name BTCVFit
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BTCVFit.R
|
#' BTFit
#'
#' Boosting Tree Model Object.
#'
#' @description These are objects representing fitted boosting trees.
#'
#' @return
#' \item{BTInit}{an object of class \code{BTInit} containing the initial fitted value \code{initFit}, the initial \code{training.error} and the initial \code{validation.error} if any.}
#'
#' \item{BTErrors}{an object of class \code{BTErrors} containing the vectors of errors for each iteration performed (excl. the initialization). More precisely, it contains the \code{training.error},
#' \code{validation.error} if \code{train.fraction}<1 and the \code{oob.improvement} if \code{bag.fraction} < 1.
#' Moreover, if a cross-validation approach was performed, a vector of cross-validation errors \code{cv.error} as a function of boosting iteration is also stored in this object.}
#'
#' \item{BTIndivFits}{an object of class \code{BTIndivFits} containing the list of each individual tree fitted at each boosting iteration.}
#'
#' \item{distribution}{the Tweedie power (and so the distribution) that has been used to perform the algorithm. It will currently always output 1.}
#'
#' \item{var.names}{a vector containing the names of the explanatory variables.}
#'
#' \item{response}{the name of the target/response variable.}
#'
#' \item{w}{a vector containing the weights used.}
#'
#' \item{seed}{the used seed, if any.}
#'
#' \item{BTData}{if \code{keep.data=TRUE}, an object of class \code{BTData} containing the \code{training.set} and \code{validation.set} (can be NULL if not used). These data frames are reduced
#' to the used variables, that are the response and explanatory variables. Note that in case of cross-validation, even if \code{keep.data=TRUE} the folds will not be kept. In fact, only the data
#' frames related to the original fit (i.e. on the whole training set) will be saved.}
#'
#' \item{BTParams}{an object of class \code{BTParams} containing all the (Adaptive) boosting tree parameters. More precisely, it contains the \code{ABT}, \code{train.fraction},
#' \code{shrinkage}, \code{interaction.depth}, \code{bag.fraction}, \code{n.iter}, \code{colsample.bytree} and \code{tree.control} parameter values.}
#'
#' \item{keep.data}{the \code{keep.data} parameter value.}
#'
#' \item{is.verbose}{the \code{is.verbose} parameter value.}
#'
#' \item{fitted.values}{the training set fitted values on the score scale using all the \code{n.iter} (and initialization) iterations.}
#'
#' \item{cv.folds}{the number of cross-validation folds. Set to 1 if no cross-validation performed.}
#'
#' \item{call}{the original call to the \code{BT} algorithm.}
#'
#' \item{Terms}{the \code{model.frame} terms argument.}
#'
#' \item{folds}{a vector of values identifying to which fold each observation is in. This argument is not present if there is no cross-validation. On the other hand, it corresponds
#' to \code{folds.id} if it was initially defined by the user.}
#'
#' \item{cv.fitted}{a vector containing the cross-validation fitted values, if a cross-validation was performed. More precisely, for a given observation, the prediction will be furnished by the cv-model
#' for which this specific observation was out-of-fold. See \code{\link{predict.BTCVFit}} for more details.}
#'
#' @section Structure : The following components must be included in a legitimate \code{BTFit} object.
#'
#' @author Gireg Willame \email{gireg.willame@@gmail.com}
#'
#' \emph{This package is inspired by the \code{gbm3} package. For more details, see \url{https://github.com/gbm-developers/gbm3/}}.
#'
#' @seealso \code{\link{BT}}.
#'
#' @references M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |: GLMs and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries ||: Tree-Based Methods and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |||: Neural Networks and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2022). \strong{Response versus gradient boosting trees, GLMs and neural networks under Tweedie loss and log-link}.
#' Accepted for publication in \emph{Scandinavian Actuarial Journal}.
#'
#' M. Denuit, J. Huyghe and J. Trufin (2022). \strong{Boosting cost-complexity pruned trees on Tweedie responses: The ABT machine for insurance ratemaking}.
#' Paper submitted for publication.
#'
#' M. Denuit, J. Trufin and T. Verdebout (2022). \strong{Boosting on the responses with Tweedie loss functions}. Paper submitted for publication.
#'
#' @keywords methods
#' @name BTFit
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BTFit.R
|
#' Cross-validation errors.
#'
#' Function to compute the cross-validation error.
#'
#' @param BT_cv_fit a \code{\link{BTCVFit}} object.
#' @param cv.folds a numeric corresponding to the number of folds.
#' @param folds a numerical vector containing the different \code{folds.id}. Note that if the latter was not defined by the user, those are randomly generated based on the \code{cv.folds} input.
#'
#' @return Vector containing the cross-validation errors w.r.t. the boosting iteration.
#'
#' @details
#' This function computes the global cross-validation error as a function of the boosting iteration. Differently said, this measure is obtained by
#' computing the average of out-of-fold errors.
#'
#' @author Gireg Willame \email{gireg.willame@@gmail.com}
#'
#' \emph{This package is inspired by the \code{gbm3} package. For more details, see \url{https://github.com/gbm-developers/gbm3/}}.
#'
#' @seealso \code{\link{BT}}.
#'
#' @references M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |: GLMs and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries ||: Tree-Based Methods and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |||: Neural Networks and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2022). \strong{Response versus gradient boosting trees, GLMs and neural networks under Tweedie loss and log-link}.
#' Accepted for publication in \emph{Scandinavian Actuarial Journal}.
#'
#' M. Denuit, J. Huyghe and J. Trufin (2022). \strong{Boosting cost-complexity pruned trees on Tweedie responses: The ABT machine for insurance ratemaking}.
#' Paper submitted for publication.
#'
#' M. Denuit, J. Trufin and T. Verdebout (2022). \strong{Boosting on the responses with Tweedie loss functions}. Paper submitted for publication.
#'
#' @rdname BT_cv_errors
#' @keywords internal
.BT_cv_errors <- function(BT_cv_fit, cv.folds, folds) {
UseMethod("BT_cv_errors", BT_cv_fit)
}
#' @keywords internal
BT_cv_errors.BTCVFit <- function(BT_cv_fit, cv.folds, folds) {
.check_if_BTCV_fit(BT_cv_fit)
in_group <- tabulate(folds, nbins = cv.folds)
cv_error <- vapply(seq_len(cv.folds),
function(xx) {
model <- BT_cv_fit[[xx]]
model$BTErrors$validation.error * in_group[xx]
}, double(BT_cv_fit[[1]]$BTParams$n.iter)) # Similar structure for each BT_cv_fit.
## this is now a (num_trees, cv.folds) matrix
## and now a n.trees vector
return(rowSums(cv_error) / length(folds))
}
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BT_CV_Errors.R
|
#' Predictions for CV fitted BT models.
#'
#' Compute predictions from cross-validated Boosting Trees model.
#'
#' @param object a \code{\link{BTCVFit}} object containing CV BT models.
#' @param data the database on which one wants to predict the different CV BT models.
#' @param cv.folds a positive integer specifying the number of folds to be used in cross-validation of the BT fit.
#' @param folds vector of integers specifying which row of data belongs to which cv.folds.
#' @param best.iter.cv the optimal number of trees with a CV approach.
#' @param \dots not currently used.
#'
#' @return Returns a vector of predictions for each cv folds.
#'
#' @details
#' This function has not been coded for public usage but rather to assess the cross-validation performances.
#'
#' @author Gireg Willame \email{gireg.willame@@gmail.com}
#'
#' \emph{This package is inspired by the \code{gbm3} package. For more details, see \url{https://github.com/gbm-developers/gbm3/}}.
#'
#' @seealso \code{\link{BT}}, \code{\link{BTFit}}.
#'
#' @references M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |: GLMs and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries ||: Tree-Based Methods and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |||: Neural Networks and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2022). \strong{Response versus gradient boosting trees, GLMs and neural networks under Tweedie loss and log-link}.
#' Accepted for publication in \emph{Scandinavian Actuarial Journal}.
#'
#' M. Denuit, J. Huyghe and J. Trufin (2022). \strong{Boosting cost-complexity pruned trees on Tweedie responses: The ABT machine for insurance ratemaking}.
#' Paper submitted for publication.
#'
#' M. Denuit, J. Trufin and T. Verdebout (2022). \strong{Boosting on the responses with Tweedie loss functions}. Paper submitted for publication.
#'
#' @rdname predict.BTCVFit
#' @keywords internal
predict.BTCVFit <-
function(object,
data,
cv.folds,
folds,
best.iter.cv,
...) {
# We directly apply the predict on the dataset -> no database extraction.
# Check match between data and folds.
if (nrow(data) != length(folds)) {
stop('Error in predict.BTCVFit - folds and data should have the same number of records.')
}
# Flatten data for prediction.
result <- rep(NA, length = nrow(data))
for (index in seq_len(cv.folds)) {
currModel <- object[[index]]
flag <- which(folds == index)
result[flag] <- predict(currModel,
newdata = data[flag, currModel$var.names, drop =
FALSE],
n.iter = best.iter.cv)
}
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BT_CV_Predict.R
|
#' (Adaptive) Boosting Trees (ABT/BT) fit.
#'
#' Fit a (Adaptive) Boosting Trees algorithm. This is for "power" users who have a large number of variables and wish to avoid calling
#' \code{model.frame} which can be slow in this instance. This function is in particular called by \code{\link{BT}}.
#' It is mainly split in two parts, the first one considers the initialization (see \code{BT_callInit}) whereas the second performs all the boosting iterations (see \code{BT_callBoosting}).
#' By default, this function does not perform input checks (those are all done in \code{\link{BT}}) and all the parameters should be given in the right format. We therefore
#' suppose that the user is aware of all the choices made.
#'
#' @param training.set a data frame containing all the related variables on which one wants to fit the algorithm.
#'
#' @param validation.set a held-out data frame containing all the related variables on which one wants to assess the algorithm performance. This can be NULL.
#'
#' @param tweedie.power Experimental parameter currently not used - Set to 1 referring to Poisson distribution.
#'
#' @param respVar the name of the target/response variable.
#'
#' @param w a vector of weights.
#'
#' @param explVar a vector containing the name of explanatory variables.
#'
#' @param ABT a boolean parameter. If \code{ABT=TRUE} an adaptive boosting tree algorithm is built whereas if \code{ABT=FALSE} an usual boosting tree algorithm is run.
#'
#' @param tree.control allows to define additional tree parameters that will be used at each iteration. See \code{\link{rpart.control}} for more information.
#'
#' @param train.fraction the first \code{train.fraction * nrows(data)} observations are used to fit the \code{BT} and the remainder are used for
#' computing out-of-sample estimates (also known as validation error) of the loss function. It is mainly used to report the value in the \code{BTFit} object.
#'
#' @param interaction.depth the maximum depth of variable interactions: 1 builds an additive model, 2 builds a model with up to two-way interactions, etc.
#' This parameter can also be interpreted as the maximum number of non-terminal nodes. By default, it is set to 4.
#' Please note that if this parameter is \code{NULL}, all the trees in the expansion are built based on the \code{tree.control} parameter only.
#' This option is devoted to advanced users only and allows them to benefit from the full flexibility of the implemented algorithm.
#'
#' @param bag.fraction the fraction of independent training observations randomly selected to propose the next tree in the expansion.
#' This introduces randomness into the model fit. If \code{bag.fraction}<1 then running the same model twice will result in similar but different fits.
#' \code{BT} uses the R random number generator, so \code{set.seed} ensures the same model can be reconstructed. Please note that if this parameter is used the \code{BTErrors$training.error}
#' corresponds to the normalized in-bag error.
#'
#' @param shrinkage a shrinkage parameter applied to each tree in the expansion. Also known as the learning rate or step-size reduction.
#'
#' @param n.iter the total number of iterations to fit. This is equivalent to the number of trees and the number of basis functions in the additive expansion.
#' Please note that the initialization is not taken into account in the \code{n.iter}. More explicitly, a weighted average initializes the algorithm and then \code{n.iter} trees
#' are built. Moreover, note that the \code{bag.fraction}, \code{colsample.bytree}, ... are not used for this initializing phase.
#'
#' @param colsample.bytree each tree will be trained on a random subset of \code{colsample.bytree} number of features. Each tree will consider a new
#' random subset of features from the formula, adding variability to the algorithm and reducing computation time. \code{colsample.bytree} will be bounded between
#' 1 and the number of features considered.
#'
#' @param keep.data a boolean variable indicating whether to keep the data frames. This is particularly useful if one wants to keep track of the initial data frames
#' and is further used for predicting in case any data frame is specified.
#' Note that in case of cross-validation, if \code{keep.data=TRUE} the initial data frames are saved whereas the cross-validation samples are not.
#'
#' @param is.verbose if \code{is.verbose=TRUE}, the \code{BT} will print out the algorithm progress.
#'
#' @return a \code{\link{BTFit}} object.
#'
#' @author Gireg Willame \email{gireg.willame@@gmail.com}
#'
#' \emph{This package is inspired by the \code{gbm3} package. For more details, see \url{https://github.com/gbm-developers/gbm3/}}.
#'
#' @seealso \code{\link{BTFit}}, \code{\link{BTCVFit}}, \code{\link{BT_perf}}, \code{\link{predict.BTFit}},
#' \code{\link{summary.BTFit}}, \code{\link{print.BTFit}}, \code{\link{.BT_cv_errors}}.
#'
#' @references M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |: GLMs and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries ||: Tree-Based Methods and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |||: Neural Networks and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2022). \strong{Response versus gradient boosting trees, GLMs and neural networks under Tweedie loss and log-link}.
#' Accepted for publication in \emph{Scandinavian Actuarial Journal}.
#'
#' M. Denuit, J. Huyghe and J. Trufin (2022). \strong{Boosting cost-complexity pruned trees on Tweedie responses: The ABT machine for insurance ratemaking}.
#' Paper submitted for publication.
#'
#' M. Denuit, J. Trufin and T. Verdebout (2022). \strong{Boosting on the responses with Tweedie loss functions}. Paper submitted for publication.
#'
#' @rdname BT_Call
#' @export
#'
BT_call <-
function(training.set,
validation.set,
tweedie.power,
respVar,
w,
explVar,
ABT,
tree.control,
train.fraction,
interaction.depth,
bag.fraction,
shrinkage,
n.iter,
colsample.bytree,
keep.data,
is.verbose) {
# Create storage objects.
BT <- list()
# Init GLM + Init error.
if (is.verbose)
message('bag.fraction is not used for the initialization fit.')
init <-
BT_callInit(training.set, validation.set, tweedie.power, respVar, w)
initF <-
list(
initFit = init$initFit,
training.error = init$trainingError,
validation.error = init$validationError
)
currTrainScore <- init$currTrainScore
currValScore <- init$currValScore
rm(init)
gc()
# Boosting algorithm.
BT <-
BT_callBoosting(
cbind(training.set, currTrainScore),
cbind(validation.set, currValScore),
tweedie.power,
ABT,
tree.control,
interaction.depth,
bag.fraction,
shrinkage,
n.iter,
colsample.bytree,
train.fraction,
keep.data,
is.verbose,
respVar,
w,
explVar
)
# Add parameters in the list and init.
BT$BTInit <- structure(initF, class = "BTInit")
class(BT) <- "BTFit"
return(BT)
}
#' @rdname BT_Call
#' @export
#'
BT_callInit <-
function(training.set,
validation.set,
tweedie.power,
respVar,
w) {
initFit <-
sum(training.set[, w] * training.set[, respVar]) / sum(training.set[, w])
currTrainScore <-
rep(log(initFit), nrow(training.set)) # Return value on score scale.
trainingError <-
sum(BT_devTweedie(
training.set[, respVar],
exp(currTrainScore),
tweedieVal = tweedie.power,
w = training.set[, w]
)) / nrow(training.set)#sum(mf$originalWeights)
currValScore <- NULL
validationError <- NULL
if (!is.null(validation.set)) {
currValScore <-
rep(log(initFit), nrow(validation.set)) # Return value on score scale.
validationError <-
sum(
BT_devTweedie(
validation.set[, respVar],
exp(currValScore),
tweedieVal = tweedie.power,
w = validation.set[, w]
)
) / nrow(validation.set)
}
return(
list(
initFit = initFit,
currTrainScore = currTrainScore,
currValScore = currValScore,
trainingError = trainingError,
validationError = validationError
)
)
}
#' @rdname BT_Call
#' @export
#'
BT_callBoosting <-
function(training.set,
validation.set,
tweedie.power,
ABT,
tree.control,
interaction.depth,
bag.fraction,
shrinkage,
n.iter,
colsample.bytree,
train.fraction,
keep.data,
is.verbose,
respVar,
w,
explVar) {
sampRow <- 1:nrow(training.set)
currFormula <-
as.formula(paste("residuals ~ ", paste(explVar, collapse = " + ")))
training.error <- NULL
validation.error <- NULL
oob.improvement <- NULL
listFits <- list()
for (iTree in seq_len(n.iter)) {
if (is.verbose) {
if ((iTree <= 10) ||
(iTree <= 100 &&
iTree %% 10 == 0) ||
(iTree %% 100 == 0))
message("Iteration: ", iTree)
}
training.set[, "residuals"] <-
training.set[, respVar] / exp(training.set[, "currTrainScore"])
training.set[, "iWeights"] <-
training.set[, w] * (exp(training.set[, "currTrainScore"]) ^ (2 - tweedie.power))
if (bag.fraction < 1) {
sampRow <-
sample(1:nrow(training.set), bag.fraction * nrow(training.set))
oobRow <- setdiff(1:nrow(training.set), sampRow)
oldOOBError <-
(sum(
BT_devTweedie(
training.set[oobRow, respVar],
exp(training.set[oobRow, "currTrainScore"]),
tweedieVal = tweedie.power,
w = training.set[oobRow, w]
)
) / length(oobRow))
if (iTree == 1)
initOOB <- oldOOBError
}
if ((!is.null(colsample.bytree)) &&
(colsample.bytree != length(explVar))) {
sampVar <- explVar[sample(1:length(explVar), colsample.bytree)]
currFormula <-
as.formula(paste("residuals ~ ", paste(sampVar, collapse = " + ")))
}
# Fit the current tree, update score and store the fit.
if (tweedie.power == 1) {
currFit <-
rpart(
currFormula,
training.set[sampRow, ],
weights = training.set[sampRow, "iWeights"],
method = "poisson",
control = tree.control,
y = FALSE
) #iWeights is also working but not best.
} else{
stop("Currently implemented for Poisson distribution only.")
# currFit <- rpart(currFormula, training.set[sampRow,], weights = iWeights, method = list(eval=evalTweedie, split=splitTweedie, init=initTweedie),
# parms = list(tweedieVal=tweedie.power), control=tree.control, y=FALSE) # y = FALSE : do not keep a copy of the response variable.
}
# We need to prune the tree. If interaction.depth is NULL then the maxdepth approach is chosen and no pruning needed.
if (!is.null(interaction.depth)) {
if (!ABT) {
# interaction.depth defined and BT approach chosen.
splittingStrategy <-
.BT_splittingStrategy(currFit, interaction.depth)
if (!is.null(splittingStrategy) &&
length(splittingStrategy) > 0)
currFit <- snip.rpart(currFit, toss = splittingStrategy)
} else{
# interaction.depth defined and ABT approach chosen.
currFit <-
prune(currFit, cp = currFit$cptable[, "CP"][max(which(currFit$cptable[, "nsplit"] <= interaction.depth))])
}
}
# Delete the where object resulting from rpart. Not needed for the predict afterwards.
currFit$where <- NULL
training.set[, "currTrainScore"] <-
training.set[, "currTrainScore"] + shrinkage * log(predict(currFit, newdata =
training.set, type = "vector"))
listFits[[iTree]] <- currFit
# Compute errors.
training.error[iTree] <-
sum(BT_devTweedie(
training.set[sampRow, respVar],
exp(training.set[sampRow, "currTrainScore"]),
tweedieVal = tweedie.power,
w = training.set[sampRow, w]
)) / length(sampRow) # In-bag error.
if (bag.fraction < 1) {
oob.improvement[iTree] <-
(oldOOBError - (sum(
BT_devTweedie(
training.set[oobRow, respVar],
exp(training.set[oobRow, "currTrainScore"]),
tweedieVal = tweedie.power,
w = training.set[oobRow, w]
)
) / length(oobRow))) # OOB Improvement.
}
if (!is.null(validation.set)) {
validation.set[, "currValScore"] <-
validation.set[, "currValScore"] + shrinkage * log(predict(currFit, newdata =
validation.set, type = "vector"))
validation.error[iTree] <-
sum(
BT_devTweedie(
validation.set[, respVar],
exp(validation.set[, "currValScore"]),
tweedieVal = tweedie.power,
w = validation.set[, w]
)
) / nrow(validation.set) # Validation error.
}
} # End loop
# Return errors, fitted trees, misc.
BT_CallBoosting <- list()
BT_CallBoosting$BTErrors <-
structure(
list(
training.error = training.error,
validation.error = validation.error,
oob.improvement = oob.improvement
),
class = "BTErrors"
)
class(listFits) <- "BTIndivFits"
BT_CallBoosting$BTIndivFits <- listFits
BT_CallBoosting$distribution <- tweedie.power
BT_CallBoosting$var.names <- explVar
BT_CallBoosting$response <- respVar
BT_CallBoosting$w <- w
if (keep.data)
BT_CallBoosting$BTData <-
structure(list(training.set = training.set[, !(colnames(training.set) %in% c("iWeights", "residuals"))],
validation.set = validation.set),
class = "BTData")
BT_CallBoosting$BTParams <- structure(
list(
ABT = ABT,
train.fraction = train.fraction,
shrinkage = shrinkage,
interaction.depth = interaction.depth,
bag.fraction = bag.fraction,
n.iter = n.iter,
colsample.bytree = colsample.bytree,
tree.control = tree.control
),
class = "BTParams"
)
BT_CallBoosting$keep.data <- keep.data
BT_CallBoosting$is.verbose <- is.verbose
BT_CallBoosting$fitted.values <- training.set[, "currTrainScore"]
return(BT_CallBoosting)
}
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BT_Call.R
|
#' @keywords internal
.create_validation_set <- function(data, train.fraction) {
if (train.fraction != 1) {
trainIndex <- seq(1, train.fraction * nrow(data))
valIndex <- setdiff(1:nrow(data), trainIndex)
training.set <- data[trainIndex, ]
validation.set <- data[valIndex, ]
} else{
training.set <- data
validation.set <- NULL
}
return(list(training.set = training.set, validation.set = validation.set))
}
#' @keywords internal
.create_cv_folds <- function(data, cv.folds, folds.id, seed = NULL) {
if (!is.null(folds.id)) {
if (length(folds.id) != nrow(data))
stop("length(folds.id) differs from the number of rows in the data set.")
return(folds.id)
} else{
if (!is.null(seed))
set.seed(seed)
cv_index <- sample(seq(1, cv.folds),
size = nrow(data),
replace = T)
return(cv_index)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BT_Data_Split.R
|
#' Perform additional boosting iterations.
#'
#' Method to perform additional iterations of the Boosting Tree algorithm, starting from an initial \code{\link{BTFit}} object.
#' This does not support further cross-validation. Moreover, this approach is only allowed if \code{keep.data=TRUE} in the original call.
#'
#' @param BTFit_object a \code{\link{BTFit}} object.
#' @param new.n.iter number of new boosting iterations to perform.
#' @param is.verbose a logical specifying whether or not the additional fitting should run "noisely" with feedback on progress provided to the user.
#' @param seed optional seed used to perform the new iterations. By default, no seed is set.
#'
#' @return Returns a new \code{\link{BTFit}} object containing the initial call as well as the new iterations performed.
#'
#' @author Gireg Willame \email{gireg.willame@@gmail.com}
#'
#' \emph{This package is inspired by the \code{gbm3} package. For more details, see \url{https://github.com/gbm-developers/gbm3/}}.
#'
#' @seealso \code{\link{BT}}, \code{\link{BTFit}}.
#'
#' @references M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |: GLMs and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries ||: Tree-Based Methods and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |||: Neural Networks and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2022). \strong{Response versus gradient boosting trees, GLMs and neural networks under Tweedie loss and log-link}.
#' Accepted for publication in \emph{Scandinavian Actuarial Journal}.
#'
#' M. Denuit, J. Huyghe and J. Trufin (2022). \strong{Boosting cost-complexity pruned trees on Tweedie responses: The ABT machine for insurance ratemaking}.
#' Paper submitted for publication.
#'
#' M. Denuit, J. Trufin and T. Verdebout (2022). \strong{Boosting on the responses with Tweedie loss functions}. Paper submitted for publication.
#'
#' @rdname BT_more
#' @export
#'
BT_more <-
function(BTFit_object,
new.n.iter = 100,
is.verbose = FALSE,
seed = NULL) {
# Set the seed (if any) and store the call.
if (!is.null(seed))
set.seed(seed)
the_call <- match.call()
# Check inputs
.check_if_BT_fit(BTFit_object)
.check_n_iter(new.n.iter)
.check_is_verbose(is.verbose)
if (is.null(BTFit_object$BTData)) {
stop("keep.data was set to FALSE on original BT call - Impossible to continue the training.")
}
if (.has_cross_validation(BTFit_object)) {
warning("BT.more is incompatible with cross-validation; losing cv results.")
}
# Call BT package
BT_more_fit <- BT_callBoosting(
BTFit_object$BTData$training.set,
BTFit_object$BTData$validation.set,
BTFit_object$distribution,
BTFit_object$BTParams$ABT,
BTFit_object$BTParams$tree.control,
BTFit_object$BTParams$interaction.depth,
BTFit_object$BTParams$bag.fraction,
BTFit_object$BTParams$shrinkage,
new.n.iter,
BTFit_object$BTParams$colsample.bytree,
BTFit_object$BTParams$train.fraction,
BTFit_object$keep.data,
is.verbose,
BTFit_object$response,
BTFit_object$w,
BTFit_object$var.names
)
# Set class
class(BT_more_fit) <- "BTFit"
# Store correct parameters
BT_more_fit$cv.folds <- BTFit_object$cv.folds
BT_more_fit$call <- the_call
BT_more_fit$Terms <- BTFit_object$Terms
BT_more_fit$seed <- seed
# Transfer old results across
BT_more_fit$BTInit <- BTFit_object$BTInit
BT_more_fit$BTErrors <-
structure(list(
training.error = c(
BTFit_object$BTErrors$training.error,
BT_more_fit$BTErrors$training.error
),
validation.error = c(
BTFit_object$BTErrors$validation.error,
BT_more_fit$BTErrors$validation.error
),
oob.improvement = c(
BTFit_object$BTErrors$oob.improvement,
BT_more_fit$BTErrors$oob.improvement
)
),
class = "BTErrors")
BT_more_fit$BTIndivFits <-
structure(c(BTFit_object$BTIndivFits, BT_more_fit$BTIndivFits),
class = "BTIndivFits")
BT_more_fit$BTParams$n.iter <-
length(BT_more_fit$BTIndivFits) # is equal to BTFit_object$n.iter + new.n.iter.
return(BT_more_fit)
}
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BT_More.R
|
#'
#' @import rpart
#' @import statmod
#' @import stats
#' @import grDevices
#' @import graphics
#' @import parallel
NULL
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BT_Packages.R
|
#' Performance assessment.
#'
#' Function to compute the performances of a fitted boosting tree.
#'
#' @param BTFit_object a \code{\link{BTFit}} object resulting from an initial call to \code{\link{BT}}
#' @param plot.it a boolean indicating whether to plot the performance measure. Setting \code{plot.it = TRUE} creates two plots.
#' The first one plots the \code{object$BTErrors$training.error} (in black) as well as the \code{object$BTErrors$validation.error} (in red) and/or the \code{object$BTErrors$cv.error} (in green) depending on the \code{method} and
#' parametrization. These values are plotted as a function of the iteration number. The scale of the error measurement, shown on the left vertical axis, depends on the arguments used in the
#' initial call to \code{\link{BT}} and the chosen \code{method}.
#' @param oobag.curve indicates whether to plot the out-of-bag performance measures in a second plot. Note that this option makes sense if the \code{bag.fraction} was properly defined in the
#' initial call to \code{\link{BT}}.
#' @param overlay if set to \code{TRUE} and \code{oobag.curve=TRUE} then a right y-axis is added and the estimated cumulative improvement in the loss function is
#' plotted versus the iteration number.
#' @param method indicates the method used to estimate the optimal number of boosting iterations. Setting \code{method = "OOB"} computes the out-of-bag estimate and \code{method = "validation"}
#' uses the validation dataset to compute an out-of-sample estimate. Finally, setting \code{method = "cv"} extracts the optimal number of iterations using cross-validation, if
#' \code{\link{BT}} was called with \code{cv.folds > 1}. If missing, a guessing method is applied.
#' @param main optional parameter that allows the user to define specific plot title.
#'
#' @return Returns the estimated optimal number of iterations. The method of computation depends on the \code{method} argument.
#'
#' @author Gireg Willame \email{g.willame@@detralytics.eu}
#'
#' \emph{This package is inspired by the \code{gbm3} package. For more details, see \url{https://github.com/gbm-developers/gbm3/}}.
#'
#' @seealso \code{\link{BT}}, \code{\link{BT_call}}.
#'
#' @references M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |: GLMs and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries ||: Tree-Based Methods and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |||: Neural Networks and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2022). \strong{Response versus gradient boosting trees, GLMs and neural networks under Tweedie loss and log-link}.
#' Accepted for publication in \emph{Scandinavian Actuarial Journal}.
#'
#' M. Denuit, J. Huyghe and J. Trufin (2022). \strong{Boosting cost-complexity pruned trees on Tweedie responses: The ABT machine for insurance ratemaking}.
#' Paper submitted for publication.
#'
#' M. Denuit, J. Trufin and T. Verdebout (2022). \strong{Boosting on the responses with Tweedie loss functions}. Paper submitted for publication.
#'
#' @rdname BT_perf
#' @export
#'
BT_perf <- function(BTFit_object,
plot.it = TRUE,
oobag.curve = FALSE,
overlay = TRUE,
method,
main = "") {
if (!is.logical(plot.it) || (length(plot.it)) > 1 || is.na(plot.it))
stop("plot.it must be a logical - excluding NA")
performance <- .BT_callPerformance(BTFit_object, method)
if (plot.it) {
plot(
performance,
out_of_bag_curve = oobag.curve,
overlay = overlay,
main = main
)
}
as.numeric(performance)
}
#' @keywords internal
.BT_callPerformance <- function(BTFit_object, method) {
# Check inputs
.check_if_BT_fit(BTFit_object)
## Guess the method
if (missing(method)) {
method <- .guess_error_method(BTFit_object)
message("Using ", method, " method...")
}
result <- switch(
method,
OOB = .best_iter_out_of_bag(BTFit_object),
validation = .best_iter_validation(BTFit_object),
cv = .best_iter_cv(BTFit_object),
stop("method must be validation, cv or OOB")
)
attr(result, "info") <-
list(method = method, BTFit_object = BTFit_object)
class(result) <- "BTPerformance"
return(result)
}
#' @keywords internal
as.double.BTPerformance <- function(x, ...) {
as.double(unclass(x))
}
#' @keywords internal
print.BTPerformance <- function(x, ...) {
info <- attr(x, 'info')
method_descriptor <-
switch(
info$method,
cv = "cross-validation",
validation = "validation-set",
OOB = "out-of-bag",
stop("Unknown method.")
)
cat("The best ", method_descriptor, " iteration was ", x, ".\n",
sep = "")
invisible(x)
}
#' @keywords internal
plot.BTPerformance <-
function(x,
out_of_bag_curve = FALSE,
overlay = TRUE,
main = "",
...) {
info <- attr(x, 'info')
.perf_plot(info$BTFit_object,
x,
out_of_bag_curve,
overlay,
info$method,
main)
}
#' @keywords internal
.best_iter_validation <- function(BTFit_object) {
.check_if_BT_fit(BTFit_object)
if (!.has_train_validation_split(BTFit_object)) {
stop(
'In order to use method = "validation" BT must be called with a properly defined train.fraction parameter.'
)
}
best_iter_val <-
which.min(.iteration_error(BTFit_object, 'validation'))
return(best_iter_val)
}
#' @keywords internal
.best_iter_cv <- function(BTFit_object) {
.check_if_BT_fit(BTFit_object)
if (!.has_cross_validation(BTFit_object)) {
stop('In order to use method="cv" BT must be called with cv_folds>1.')
}
best_iter_cv <- which.min(.iteration_error(BTFit_object, 'cv'))
return(best_iter_cv)
}
#' @keywords internal
.best_iter_out_of_bag <- function(BTFit_object) {
.check_if_BT_fit(BTFit_object)
if (BTFit_object$BTParams$bag.fraction == 1)
stop("Cannot compute OOB estimate or the OOB curve when bag.fraction=1")
if (all(!is.finite(BTFit_object$BTErrors$oob.improvement)))
stop("Cannot compute OOB estimate or the OOB curve. No finite OOB estimates of improvement")
message(
"OOB generally underestimates the optimal number of iterations although predictive performance is reasonably competitive.
Using cv_folds>1 when calling BT usually results in improved predictive performance."
)
smoother <- .generate_smoother_oobag(BTFit_object)
best_iter_oob <- smoother$x[which.min(-cumsum(smoother$y))]
return(best_iter_oob)
}
#' @keywords internal
.generate_smoother_oobag <- function(BTFit_object) {
.check_if_BT_fit(BTFit_object)
smoother <- NULL
x <- seq_len(BTFit_object$BTParams$n.iter)
smoother <- loess(BTFit_object$BTErrors$oob.improvement ~ x,
enp.target = min(max(4, length(x) / 10), 50))
smoother$y <- smoother$fitted
smoother$x <- x
return(smoother)
}
#' @keywords internal
.guess_error_method <- function(BTFit_object) {
if (.has_train_validation_split(BTFit_object)) {
"validation"
} else if (.has_cross_validation(BTFit_object)) {
"cv"
} else{
"OOB"
}
}
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BT_Perf.R
|
#' @keywords internal
.perf_plot <- function(BTFit_object,
best_iter,
out_of_bag_curve,
overlay,
method,
main) {
# Check inputs
.check_if_BT_fit(BTFit_object)
if (!is.logical(overlay) ||
(length(overlay)) > 1 || is.na(overlay))
stop("overlay must be a logical - excluding NA")
if (!is.logical(out_of_bag_curve) ||
(length(out_of_bag_curve)) > 1 || is.na(out_of_bag_curve))
stop("out_of_bag_curve must be a logical - excluding NA")
#Restore old parameters on exit.
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mar = c(5, 4, 4, 4) + .1)
# Get y-axis label and limits
ylab <-
paste("Tweedie deviance (power=",
BTFit_object$distribution,
")",
sep = "")
if (!.has_train_validation_split(BTFit_object)) {
ylim <- switch(
method,
cv = range(
.iteration_error(BTFit_object, 'train'),
.iteration_error(BTFit_object, 'cv')
),
validation = range(
.iteration_error(BTFit_object, 'train'),
.iteration_error(BTFit_object, 'validation')
),
OOB = range(.iteration_error(BTFit_object, 'train'))
) # Those are the only 3 possibilities allowed by the main BT_callPerformance function, no further test needed.
} else {
ylim <- range(
.iteration_error(BTFit_object, 'train'),
.iteration_error(BTFit_object, 'validation')
)
}
# Initial plot
plot(
.iteration_error(BTFit_object, 'train'),
ylim = ylim,
type = "l",
xlab = "Iteration",
ylab = ylab,
main = main
)
if (.has_train_validation_split(BTFit_object)) {
lines(.iteration_error(BTFit_object, 'validation'), col = "red")
}
if (method == "cv") {
lines(.iteration_error(BTFit_object, 'cv'), col = "green")
}
if (!is.na(best_iter))
abline(
v = best_iter,
col = "blue",
lwd = 2,
lty = 2
)
# Plot out of bag curve
if (out_of_bag_curve) {
if (BTFit_object$BTParams$bag.fraction == 1)
stop("Cannot compute OOB estimate or the OOB curve when bag.fraction=1")
if (all(!is.finite(BTFit_object$BTErrors$oob.improvement)))
stop("Cannot compute OOB estimate or the OOB curve. No finite OOB estimates of improvement")
.plot_oobag(BTFit_object, best_iter, overlay, ylab)
}
}
#' @keywords internal
.plot_oobag <- function(BTFit_object, best_iter, overlay, ylab) {
# Get smoother
smoother <- .generate_smoother_oobag(BTFit_object)
# Plot smoothed out of bag improvement
if (overlay) {
#Restore old parameters on exit.
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(new = TRUE)
plot(
smoother$x,
cumsum(smoother$y),
col = "blue",
type = "l",
xlab = "",
ylab = "",
axes = FALSE
)
axis(4, srt = 0)
at <- mean(range(smoother$y))
mtext(
paste("OOB improvement in", ylab),
side = 4,
srt = 270,
line = 2
)
abline(h = 0,
col = "blue",
lwd = 2)
}
# Plot original out of bag improvement
plot(
BTFit_object$BTErrors$oob.improvement,
type = "l",
xlab = "Iteration",
ylab = paste("OOB change in", ylab)
)
lines(smoother, col = "red", lwd = 2)
abline(h = 0,
col = "blue",
lwd = 1)
abline(v = best_iter,
col = "blue",
lwd = 1)
}
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BT_Perf_Plot.R
|
#' Predict method for BT Model fits.
#'
#' Predicted values based on a boosting tree model object.
#'
#' @param object a \code{\link{BTFit}} object.
#' @param newdata data frame of observations for which to make predictions. If missing or not a data frame, if \code{keep.data=TRUE} in the initial fit then the original training set will be used.
#' @param n.iter number of boosting iterations used for the prediction. This parameter can be a vector in which case predictions are returned for each iteration specified.
#' @param type the scale on which the BT makes the predictions. Can either be "link" or "response". Note that, by construction, a log-link function is used during the fit.
#' @param single.iter if \code{single.iter=TRUE} then \code{predict.BTFit} returns the predictions from the single tree \code{n.iter}.
#' @param \dots not currently used.
#'
#' @return Returns a vector of predictions. By default, the predictions are on the score scale.
#' If \code{type = "response"}, then \code{BT} converts back to the same scale as the outcome. Note that, a log-link is supposed by construction.
#'
#' @details
#' \code{predict.BTFit} produces a predicted values for each observation in \code{newdata} using the first \code{n.iter} boosting iterations.
#' If \code{n.iter} is a vector then the result is a matrix with each column corresponding to the \code{BT} predictions with \code{n.iter[1]} boosting iterations, \code{n.iter[2]} boosting
#' iterations, and so on.
#'
#' As for the fit, the predictions do not include any offset term.
#' In the Poisson case, please remind that a weighted approach is initially favored.
#'
#' @author Gireg Willame \email{gireg.willame@@gmail.com}
#'
#' \emph{This package is inspired by the \code{gbm3} package. For more details, see \url{https://github.com/gbm-developers/gbm3/}}.
#'
#' @seealso \code{\link{BT}}, \code{\link{BTFit}}.
#'
#' @references M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |: GLMs and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries ||: Tree-Based Methods and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |||: Neural Networks and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2022). \strong{Response versus gradient boosting trees, GLMs and neural networks under Tweedie loss and log-link}.
#' Accepted for publication in \emph{Scandinavian Actuarial Journal}.
#'
#' M. Denuit, J. Huyghe and J. Trufin (2022). \strong{Boosting cost-complexity pruned trees on Tweedie responses: The ABT machine for insurance ratemaking}.
#' Paper submitted for publication.
#'
#' M. Denuit, J. Trufin and T. Verdebout (2022). \strong{Boosting on the responses with Tweedie loss functions}. Paper submitted for publication.
#'
#' @rdname predict.BTFit
#' @export
#'
predict.BTFit <-
function(object,
newdata,
n.iter,
type = "link",
single.iter = FALSE,
...) {
# Check inputs
if (!is.element(type, c("link", "response"))) {
stop("type must be either 'link' or 'response'")
}
if (missing(newdata) || !is.data.frame(newdata)) {
if (object$keep.data) {
message(
"As newdata is missing or is not a data frame, the training set has been used thanks to the keep.data = TRUE parameter."
)
newdata <- object$BTData$training.set
} else{
stop("newdata must be provided as a data frame.")
}
}
if (!all(object$var.names %in% colnames(newdata))) {
stop("newdata must contain the same explanatory variable as the original fitted BT object.")
}
if (missing(n.iter)) {
stop("Number of iterations to be used in prediction must be provided.")
}
if (length(n.iter) == 0) {
stop("n.iter cannot be NULL or a vector of zero length.")
}
if (any(n.iter != as.integer(n.iter)) ||
is.na(all(n.iter == as.integer(n.iter)))
||
any(n.iter <= 0)) {
# at least one iteration - not only the init considered to avoid problem.
stop("n.iter must be a vector of non-negative integers.")
}
if (!(single.iter %in% c(TRUE, FALSE))) {
stop("single.iter should be either TRUE or FALSE.")
}
if (any(n.iter > object$BTParams$n.iter)) {
n.iter[n.iter > object$BTParams$n.iter] <- object$BTParams$n.iter
warning("Number of trees exceeded number fit so far. Using ",
paste(n.iter, collapse = " "),
".")
}
outMatrix <- matrix(NA, nrow = nrow(newdata), ncol = length(n.iter))
if (single.iter) {
for (i in seq(1, length(n.iter))) {
iIter <- n.iter[i]
# Link-scale output.
outMatrix[, i] <-
log(predict(
object$BTIndivFits[[iIter]],
newdata = newdata,
type = "vector"
))
}
} else{
# Compute cumulative results for each iteration in the vector n.iter
lastIter <- max(n.iter)
shrinkage <- object$BTParams$shrinkage
currPred <-
rep(log(object$BTInit$initFit), nrow(newdata)) # GLM used as first prediction.
for (iIter in seq(1, lastIter)) {
currPred <-
currPred + shrinkage * log(predict(
object$BTIndivFits[[iIter]],
newdata = newdata,
type = "vector"
))
if (iIter %in% n.iter) {
outMatrix[, which(n.iter == iIter)] <- currPred
}
}
}
if (type == "response")
outMatrix <- exp(outMatrix) # Exponential link-function.
if (length(n.iter) == 1)
outMatrix <- as.vector(outMatrix)
return(outMatrix)
}
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BT_Predict.R
|
#' Printing function.
#'
#' Function to print the BT results.
#'
#' @param x a \code{\link{BTFit}} object.
#' @param \dots arguments passed to \code{print.default}.
#'
#' @return No value returned.
#'
#' @details Print the different input parameters as well as obtained results (best iteration/performance & relative influence) given the chosen approach.
#'
#' @author Gireg Willame \email{gireg.willame@@gmail.com}
#'
#' \emph{This package is inspired by the \code{gbm3} package. For more details, see \url{https://github.com/gbm-developers/gbm3/}}.
#'
#' @seealso \code{\link{BT}}, \code{\link{.BT_relative_influence}}, \code{\link{BT_perf}}.
#'
#' @references M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |: GLMs and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries ||: Tree-Based Methods and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |||: Neural Networks and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2022). \strong{Response versus gradient boosting trees, GLMs and neural networks under Tweedie loss and log-link}.
#' Accepted for publication in \emph{Scandinavian Actuarial Journal}.
#'
#' M. Denuit, J. Huyghe and J. Trufin (2022). \strong{Boosting cost-complexity pruned trees on Tweedie responses: The ABT machine for insurance ratemaking}.
#' Paper submitted for publication.
#'
#' M. Denuit, J. Trufin and T. Verdebout (2022). \strong{Boosting on the responses with Tweedie loss functions}. Paper submitted for publication.
#'
#' @rdname print.BTFit
#' @export
#'
print.BTFit <- function(x, ...) {
# Print call
if (!is.null(x$call))
print(x$call)
# Print out number of iterations and distribution used
.print_iters_and_dist(x)
# Print out performance measures
best_iter <- .print_perf_measures(x)
# Print out relative influence of variables
ri <- .BT_relative_influence(x, n.iter = best_iter)
cat(
"There were",
length(x$var.names),
"predictors of which",
sum(ri > 0),
"had non-zero influence.\n"
)
return(invisible(x))
}
#### Helper Functions ####
#' @keywords internal
.print_iters_and_dist <- function(x) {
.check_if_BT_fit(x)
if (x$BTParams$ABT) {
cat(
"An adaptive boosting tree model with Tweedie parameter :",
x$distribution,
" has been fitted.\n",
length(.iteration_error(x, 'train')),
"iterations were performed.\n"
)
} else{
cat(
"A boosting tree model with Tweedie parameter :",
x$distribution,
" has been fitted.\n",
length(.iteration_error(x, 'train')),
"iterations were performed.\n"
)
}
}
#' @keywords internal
.print_perf_measures <- function(x) {
# Calculate the best number of iterations - returns test set if possible
.check_if_BT_fit(x)
# Set default answer - final iteration
best_iter <- length(.iteration_error(x, 'train'))
# OOB best iteration.
if (.has_bagging(x)) {
best_iter <- print(.BT_callPerformance(x, method = "OOB"))
}
# CV best iteration
if (.has_cross_validation(x)) {
best_iter <- print(.BT_callPerformance(x, method = "cv"))
}
# Validation set best iteration
if (.has_train_validation_split(x)) {
best_iter <- print(.BT_callPerformance(x, method = "validation"))
}
return(best_iter)
}
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BT_Print.R
|
#' Method for estimating the relative influence.
#'
#' Helper function for computing the relative influence of each variable in the BT object.
#'
#' @param BTFit_object a \code{\link{BTFit}} object.
#' @param n.iter number of boosting iterations used for computation. If not provided, the function will perform a best guess approach to determine the optimal number of iterations. In fact,
#' if a validation set was used during the fitting, the retained number of iterations is the one corresponding to the lowest validation set error ; otherwise, if cross-validation was performed, the
#' number of iterations resulting in lowest cross-validation error will be used; otherwise, if the out-of-bag parameter was defined, the OOB error will be used to determine the optimal
#' number of iterations; otherwise, all iterations will be used.
#' @param rescale whether or not the results should be rescaled (divided by the maximum observation). Default set to \code{FALSE}.
#' @param sort.it whether or not the results should be (reverse) sorted. Default set to \code{FALSE}.
#' @param consider.competing whether or not competing split should be considered in the relative influence computation. Default set to \code{FALSE}.
#' @param consider.surrogates whether or not surrogates should be considered in the relative influence computation. Default set to \code{FALSE}.
#'
#' @return Returns by default an unprocessed vector of estimated relative influences. If the \code{rescale} and \code{sort.it} arguments are used, it returns
#' a processed version of the same vector.
#'
#' @details
#' This function is not intended for end-user use. It performs the relative influence computation and is called during the summary function.
#' Note that a permutation approach is not yet implemented.
#'
#' @author Gireg Willame \email{gireg.willame@@gmail.com}
#'
#' \emph{This package is inspired by the \code{gbm3} package. For more details, see \url{https://github.com/gbm-developers/gbm3/}}.
#'
#' @seealso \code{\link{BT}}, \code{\link{BTFit}}, \code{\link{BT_perf}}.
#'
#' @references M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |: GLMs and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries ||: Tree-Based Methods and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |||: Neural Networks and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2022). \strong{Response versus gradient boosting trees, GLMs and neural networks under Tweedie loss and log-link}.
#' Accepted for publication in \emph{Scandinavian Actuarial Journal}.
#'
#' M. Denuit, J. Huyghe and J. Trufin (2022). \strong{Boosting cost-complexity pruned trees on Tweedie responses: The ABT machine for insurance ratemaking}.
#' Paper submitted for publication.
#'
#' M. Denuit, J. Trufin and T. Verdebout (2022). \strong{Boosting on the responses with Tweedie loss functions}. Paper submitted for publication.
#'
#' @rdname BT_relative_influence
#' @keywords internal
.BT_relative_influence <- function(BTFit_object,
n.iter,
rescale = FALSE,
sort.it = FALSE,
consider.competing = FALSE,
consider.surrogates = FALSE) {
# Initial checks
.check_if_BT_fit(BTFit_object)
if (!is.logical(rescale) ||
(length(rescale) > 1) || is.na(rescale))
stop("rescale argument must be a logical")
if (!is.logical(sort.it) ||
(length(sort.it) > 1) || is.na(sort.it))
stop("sort.it must be a logical")
# Fill in missing values
if (missing(n.iter)) {
if (.has_train_validation_split(BTFit_object)) {
n.iter <- .BT_callPerformance(BTFit_object, method = "validation")
}
else if (.has_cross_validation(BTFit_object)) {
n.iter <- .BT_callPerformance(BTFit_object, method = "cv")
}
else if (.has_bagging(BTFit_object)) {
n.iter <- .BT_callPerformance(BTFit_object, method = "OOB")
}
else{
n.iter <- BTFit_object$BTParams$n.iter
}
message("n.iter not given. Using ", n.iter, " trees.")
}
else{
.check_n_iter(n.iter) # Additional checks on n.iter
if (n.iter > length(BTFit_object$BTIndivFits)) {
stop("n.iter exceeds number in fit")
}
}
# Create relative influence for every variable
rel_inf_verbose <-
unlist(lapply(BTFit_object$BTIndivFits[seq(1, n.iter)],
function(xx) {
.get_rel_inf_of_vars(xx,
considerCompeting = consider.competing,
considerSurrogates = consider.surrogates)
}))
# Sum across trees
rel_inf_compact <-
unlist(lapply(split(
rel_inf_verbose, names(rel_inf_verbose)
), sum))
# rel_inf_compact excludes variables that never entered the model
# insert 0's for the excluded variables
if (length(BTFit_object$var.names) != length(names(rel_inf_compact))) {
varToAdd <-
BTFit_object$var.names[!(BTFit_object$var.names %in% names(rel_inf_compact))]
rel_inf <- c(rel_inf_compact, rep(0, length(varToAdd)))
names(rel_inf)[(length(rel_inf_compact) + 1):length(BTFit_object$var.names)] <-
varToAdd
} else{
rel_inf <- rel_inf_compact
}
# Rescale and sort
if (rescale)
rel_inf <- rel_inf / max(rel_inf)
if (sort.it)
rel_inf <- rev(sort(rel_inf))
return(rel_inf)
}
#### Helper function ####
#' @keywords internal
.get_rel_inf_of_vars <-
function(rpart_object,
considerCompeting,
considerSurrogates) {
if (!is.null(rpart_object$splits) &
(nrow(rpart_object$splits) > 0)) {
frameWithoutLeafs <-
rpart_object$frame[rpart_object$frame$var != "<leaf>", ]
generateVec <- function(ncompete, nsurrogate) {
c(
"PrimarySplit",
rep("CompetingSplit", ncompete),
rep("SurrogateSplit", nsurrogate)
)
}
typeOfSplitList <-
mapply(
generateVec,
frameWithoutLeafs$ncompete,
frameWithoutLeafs$nsurrogate,
SIMPLIFY = FALSE
)
primarySplitRef <-
unlist(sapply(seq(1, length(
typeOfSplitList
)), function(xx)
rep(xx, length(
typeOfSplitList[[xx]]
))))
typeOfSplitVec <- unlist(typeOfSplitList)
filterVec <- c("PrimarySplit")
## According to rpart doc, need to rescale for anova method.
scaledImportance <- rpart_object$splits[, "improve"]
indexSurrogate <- (typeOfSplitVec == "SurrogateSplit")
if (rpart_object$method == "anova") {
scaledImportance[!indexSurrogate] <-
rpart_object$splits[!indexSurrogate, "improve"] * frameWithoutLeafs[primarySplitRef[!indexSurrogate], "dev"]
}
## According to rpart doc, need to adjust the surrogates as well.
if (considerSurrogates) {
scaledImportance[indexSurrogate] <-
(scaledImportance[typeOfSplitVec == "PrimarySplit"][primarySplitRef[indexSurrogate]] *
rpart_object$splits[indexSurrogate, "adj"])
filterVec <- c(filterVec, "SurrogateSplit")
}
filterVec <-
if (considerCompeting)
c(filterVec, "CompetingSplit")
else
filterVec
indexFilter <- which(typeOfSplitVec %in% filterVec)
return(lapply(split(
scaledImportance[indexFilter], names(scaledImportance[indexFilter])
), sum))
}
else
(return(list()))
}
#.get_rel_inf_of_vars <- function(rpart_object) {
# if (!is.null(rpart_object$splits)) return(lapply(split(rpart_object$splits[,3], names(rpart_object$splits[,3])), sum)) # 3 - Improvement
# else (return(list())) # With rpart : splits isn't returned if we've a single node (i.e. no splits).
#}
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BT_Relative_Influence.R
|
#' Summary of a BTFit object.
#'
#' Computes the relative influence of each variable in the BTFit object.
#'
#' @param object a \code{\link{BTFit}} object.
#' @param cBars the number of bars to plot. If \code{order=TRUE} only the variables with the \code{cBars} largest relative influence will appear in the barplot.
#' If \code{order=FALSE} then the first \code{cBars} variables will appear in the barplot.
#' @param n.iter the number of trees used to compute the relative influence. Only the first \code{n.iter} trees will be used.
#' @param plot_it an indicator as to whether the plot is generated.
#' @param order_it an indicator as to whether the plotted and/or returned relative influences are sorted.
#' @param method the function used to compute the relative influence. Currently, only \code{\link{.BT_relative_influence}} is available (default value as well).
#' @param normalize if \code{TRUE} returns the normalized relative influence.
#' @param ... additional argument passed to the plot function.
#'
#' @return Returns a data frame where the first component is the variable name and the second one is the computed relative influence, normalized to sum up to 100.
#' Depending on the \code{plot_it} value, the relative influence plot will be performed.
#'
#' @details Please note that the relative influence for variables having an original \strong{negative} relative influence is forced to 0.
#'
#' @author Gireg Willame \email{gireg.willame@@gmail.com}
#'
#' \emph{This package is inspired by the \code{gbm3} package. For more details, see \url{https://github.com/gbm-developers/gbm3/}}.
#'
#' @seealso \code{\link{BT}}, \code{\link{.BT_relative_influence}}.
#'
#' @references M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |: GLMs and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries ||: Tree-Based Methods and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |||: Neural Networks and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2022). \strong{Response versus gradient boosting trees, GLMs and neural networks under Tweedie loss and log-link}.
#' Accepted for publication in \emph{Scandinavian Actuarial Journal}.
#'
#' M. Denuit, J. Huyghe and J. Trufin (2022). \strong{Boosting cost-complexity pruned trees on Tweedie responses: The ABT machine for insurance ratemaking}.
#' Paper submitted for publication.
#'
#' M. Denuit, J. Trufin and T. Verdebout (2022). \strong{Boosting on the responses with Tweedie loss functions}. Paper submitted for publication.
#'
#' @rdname summary.BTFit
#' @export
#'
summary.BTFit <- function(object,
cBars = length(object$var.names),
n.iter = object$BTParams$n.iter,
plot_it = TRUE,
order_it = TRUE,
method = .BT_relative_influence,
normalize = TRUE,
...)
{
# Initial checks
.check_n_iter(n.iter)
.check_if_BT_fit(object)
if (is.null(cBars) ||
!(.check_if_natural_number(cBars) ||
(cBars == 0)) ||
(length(cBars) > 1)) {
stop("cBars should be an integer.")
}
if (!is.logical(plot_it) ||
(length(plot_it) > 1) ||
is.na(plot_it)) {
stop("argument plot_it must be a logical - excluding NA")
}
if (!is.logical(order_it) ||
(length(order_it) > 1) ||
is.na(order_it)) {
stop("argument order_it must be a logical - excluding NA")
}
if (!is.logical(normalize) ||
(length(normalize) > 1) ||
is.na(normalize)) {
stop("argument normalize must be a logical - excluding NA")
}
# Set inputs (if required)
if (cBars == 0)
cBars <- min(10, length(object$var.names))
if (cBars > length(object$var.names))
cBars <- length(object$var.names)
if (n.iter > object$BTParams$n.iter)
warning(
"Exceeded total number of BT terms. Results use n.iter=",
object$BTParams$n.iter,
" terms.\n"
)
n.iter <- min(n.iter, object$BTParams$n.iter)
# Calculate relative influence and order/normalize
rel_inf <- method(object, n.iter = n.iter)
rel_inf[rel_inf < 0] <- 0
if (normalize)
rel_inf <- 100 * rel_inf / sum(rel_inf)
ordering <- seq_len(length(rel_inf))
if (order_it) {
ordering <- order(-rel_inf)
}
# Bar plot of relative influence
if (plot_it) {
barplot(
rel_inf[ordering[cBars:1]],
horiz = TRUE,
col = rainbow(cBars, start = 3 / 6, end = 4 / 6),
names = object$var.names[ordering[cBars:1]],
xlab = "Relative influence",
las = 1,
...
)
}
return(data.frame(var = object$var.names[ordering],
rel_inf = rel_inf[ordering]))
}
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BT_Summary.R
|
############################
# Accessors.
############################
#' @keywords internal
.iteration_error <-
function(object,
which = c('train', 'validation', 'cv')) {
.check_if_BT_fit(object)
switch(
match.arg(which),
train = object$BTErrors$training.error,
validation = object$BTErrors$validation.error,
cv = object$BTErrors$cv.error,
stop("Unknown error measure")
)
}
############################
# Checks different BT arguments.
############################
#' @keywords internal
.check_if_rpart_params <- function(params) {
.assertInherits(params, "list")
}
#' @keywords internal
.check_tweedie_power <- function(tweedie.power) {
if (is.null(tweedie.power))
stop("Please defined a tweedie power.")
if (!is.double(tweedie.power) ||
(length(tweedie.power) > 1) ||
is.infinite(tweedie.power))
stop("tweedie.power should be a finite numeric")
if (tweedie.power > 0 &&
tweedie.power < 1)
stop("tweedie.power is not defined between 0 and 1.")
}
#' @keywords internal
.check_n_iter <- function(n.iter) {
if (is.null(n.iter) ||
!.check_if_natural_number(n.iter) ||
(length(n.iter) > 1))
stop("n.iter should be a positive integer.")
}
#' @keywords internal
.check_interaction_depth <- function(interaction.depth) {
if (!is.null(interaction.depth) &&
(!.check_if_natural_number(interaction.depth) ||
(length(interaction.depth) > 1)))
stop("When defined interaction.depth should be a positive integer.")
}
#' @keywords internal
.check_shrinkage <- function(shrinkage) {
if (is.null(shrinkage) ||
(length(shrinkage) > 1))
stop("Please define a shrinkage parameter.")
if (shrinkage <= 0 ||
shrinkage > 1)
stop("Shrinkage parameter should be > 0 and <=1.")
}
#' @keywords internal
.check_bag_fraction <- function(bag.fraction) {
if (is.null(bag.fraction) ||
(length(bag.fraction) > 1))
stop("Please define a bag.fraction parameter.")
if (bag.fraction <= 0 ||
bag.fraction > 1)
stop("bag.fraction parameter should be > 0 and <=1.")
}
#' @keywords internal
.check_colsample_bytree <- function(colsample.bytree, numExplVar) {
if (!is.null(colsample.bytree)) {
if (length(colsample.bytree) > 1)
stop("colsample.bytree should be a positive integer.")
if (colsample.bytree > numExplVar)
stop("colsample.bytree should be lower than the number of explanatory variables.")
if (!.check_if_natural_number(colsample.bytree))
stop("colsample.bytree should be a positive integer.")
}
}
#' @keywords internal
.check_train_fraction <- function(train.fraction) {
if (is.null(train.fraction) ||
(length(train.fraction) > 1))
stop("Please define a train.fraction parameter.")
if (train.fraction <= 0 ||
train.fraction > 1)
stop("When defined train.fraction should be > 0 and <= 1.")
}
#' @keywords internal
.check_keep_data <- function(keep.data) {
if (!is.logical(keep.data) ||
(length(keep.data) > 1) ||
is.na(keep.data))
stop("keep.data should be a boolean.")
}
#' @keywords internal
.check_is_verbose <- function(is.verbose) {
if (!is.logical(is.verbose) ||
(length(is.verbose) > 1) ||
is.na(is.verbose))
stop("is.verbose should be a boolean.")
}
#' @keywords internal
.check_cv_folds <- function(cv.folds) {
if (is.null(cv.folds))
stop("cv.folds should be defined.")
if (!.check_if_natural_number(cv.folds) ||
(length(cv.folds) > 1))
stop("cv.folds should be a positive integer.")
}
#' @keywords internal
.check_folds_id <- function(folds.id) {
if (!is.null(folds.id) &&
(!is.vector(folds.id) ||
any(is.na(folds.id))))
stop("When defined folds.id should be a vector of CV index.")
}
#' @keywords internal
.check_n_cores <- function(n.cores) {
if (!.check_if_natural_number(n.cores) ||
(length(n.cores) > 1))
stop("n.cores should be a positive integer.")
detectedCores <- parallel::detectCores()
if (n.cores > detectedCores)
stop(paste0(
"n.cores is higher than maximum available cores (",
detectedCores,
")."
))
if (n.cores == detectedCores)
warning(
"n.cores is equal to maximum available cores. System might become unresponsive and crash in case of insufficient memory.",
immediate. = T
)
}
#' @keywords internal
.check_weights <- function(weights) {
if (!is.double(weights) ||
any(weights <= 0))
stop("Non-double and negative weights not allowed.")
}
#' @keywords internal
.check_ABT <- function(ABT) {
if (!is.logical(ABT) ||
(length(ABT) > 1) || is.na(ABT))
stop("ABT should be a boolean.")
}
############################
# Check different outputs.
############################
#' @keywords internal
.has_train_validation_split <- function(object) {
(object$BTParams$train.fraction != 1) # Previously, !is.null(...)
}
#' @keywords internal
.has_bagging <- function(object) {
object$BTParams$bag.fraction < 1
}
#' @keywords internal
.has_cross_validation <- function(object) {
!is.null(object$BTErrors$cv.error)
}
#' @keywords internal
.check_if_natural_number <-
function(x, tol = .Machine$double.eps ^ 0.5) {
x > tol & abs(x - round(x)) < tol
}
############################
# Check different classes.
############################
#' @keywords internal
.assertInherits <- function(object, class.name) {
if (!isTRUE(inherits(object, class.name))) {
stop("Function requires a ", class.name, " object.")
}
}
#' @keywords internal
.check_if_BT_fit <- function(object) {
.assertInherits(object, "BTFit")
}
#' @keywords internal
.check_if_BTCV_fit <- function(object) {
.assertInherits(object, "BTCVFit")
}
############################
# Splitting strategy.
############################
#' @keywords internal
.BT_splittingStrategy <- function(rpart_object, interaction.depth) {
ff <- rpart_object$frame
# No split available - rootnode.
if (is.null(rpart_object$splits) ||
nrow(rpart_object$splits) == 0) {
return()
}
# Points to primary splits in ff
fpri <- which(ff$var != "<leaf>")
# Points to primaries in the splits matrix
spri <-
1 + cumsum(c(0, 1 + ff$ncompete[fpri] + ff$nsurrogate[fpri]))
spri <- spri[seq_along(fpri)]
# Add improvements to primary splits in ff and special treatment for anova.
ff <-
cbind(ff[fpri, ], "improve" = rpart_object$splits[spri, "improve"])
if (rpart_object$method == "anova")
ff$improve <- ff$improve * ff$dev
ff$node <- as.numeric(rownames(ff))
ff <- ff[order(ff$improve, decreasing = T), c("node", "improve")]
for (i in seq(1, interaction.depth)) {
if (i == 1) {
# Initialization
nodeToKeep <- c(1)
nodeCandidates <- c(2, 3)
} else{
# Be sure we consider only positive improvement (normally not needed, managed by rpart)
nodeIndex <- match(nodeCandidates, ff[ff$improve > 0, "node"])
if (all(is.na(nodeIndex))) {
# No further splits possible.
return(setdiff(ff$node, nodeToKeep))
}
bestSplittingNode <- nodeCandidates[which.min(nodeIndex)]
nodeToKeep <- c(nodeToKeep, bestSplittingNode)
nodeCandidates <-
c(
setdiff(nodeCandidates, bestSplittingNode),
c(2 * bestSplittingNode, 2 * bestSplittingNode + 1)
)
}
}
return(setdiff(ff$node, nodeToKeep))
}
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BT_Utilities.R
|
# Deviance functions for Tweedie family.
#' Deviance function for the Tweedie family.
#'
#' Compute the deviance for the Tweedie family case.
#'
#' @param y a vector containing the observed values.
#' @param mu a vector containing the fitted values.
#' @param w an optional vector of weights.
#' @param tweedieVal a numeric representing the Tweedie Power. It has to be a positive number outside of the interval ]0,1[.
#'
#' @return A vector of individual deviance contribution.
#'
#' @details
#' This function computes the Tweedie related deviance. The latter is defined as:
#'
#' \deqn{d(y, mu, w) = w (y-mu)^2, if tweedieVal = 0;}
#' \deqn{d(y, mu, w) = 2 w (y log(y/mu) + mu - y), if tweedieVal = 1;}
#' \deqn{d(y, mu, w) = 2 w (log(mu/y) + y/mu - 1), if tweedieVal = 2;}
#' \deqn{d(y, mu, w) = 2 w (max(y,0)^(2-p)/((1-p)(2-p)) - y mu^(1-p)/(1-p) + mu^(2-p)/(2-p)), else.}
#'
#'
#' @author Gireg Willame \email{gireg.willame@@gmail.com}
#'
#' \emph{This package is inspired by the \code{gbm3} package. For more details, see \url{https://github.com/gbm-developers/gbm3/}}.
#'
#' @seealso \code{\link{BT}}, \code{\link{BT_call}}.
#'
#' @references M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |: GLMs and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries ||: Tree-Based Methods and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2019). \strong{Effective Statistical Learning Methods for Actuaries |||: Neural Networks and Extensions}, \emph{Springer Actuarial}.
#'
#' M. Denuit, D. Hainaut and J. Trufin (2022). \strong{Response versus gradient boosting trees, GLMs and neural networks under Tweedie loss and log-link}.
#' Accepted for publication in \emph{Scandinavian Actuarial Journal}.
#'
#' M. Denuit, J. Huyghe and J. Trufin (2022). \strong{Boosting cost-complexity pruned trees on Tweedie responses: The ABT machine for insurance ratemaking}.
#' Paper submitted for publication.
#'
#' M. Denuit, J. Trufin and T. Verdebout (2022). \strong{Boosting on the responses with Tweedie loss functions}. Paper submitted for publication.
#'
#' @export
#'
BT_devTweedie <- function(y, mu, tweedieVal, w = NULL) {
.check_tweedie_power(tweedieVal)
if (any(is.logical(y) |
is.character(y) | (y != as.double(y)) | is.na(y))) {
stop("Responses must be doubles")
}
if (any(is.logical(mu) |
is.character(mu) | (mu != as.double(mu)) | is.na(mu))) {
stop("Predictions must be doubles")
}
if (is.null(w)) {
w <- rep(1, length(y))
}
if (any(is.logical(w) |
is.character(w) | (w != as.double(w)) | is.na(w) | (w < 0))) {
stop("Weights must be positive doubles")
}
if (any(length(y) != length(mu) | length(y) != length(w))) {
stop("Responses, predictions and weights should have the same length")
}
if (tweedieVal == 0) {
# Gaussian case.
dev <- w * (y - mu) ** 2
} else if (tweedieVal == 1) {
# Poisson case.
r <- mu
p <- which(y > 0)
r[p] <- (y * log(y / mu) - (y - mu))[p]
dev <- 2 * r * w
} else if (tweedieVal == 2) {
# Gamma case.
dev <-
2 * w * (-log(ifelse(y == 0, 1, y / mu)) + (y / mu) - 1) # Support Gamma : ]0; +Inf[
} else{
dev <-
2 * w * (((max(y, 0) ^ (2 - tweedieVal)) / ((1 - tweedieVal) * (2 - tweedieVal))) - (y *
(mu ^ (1 - tweedieVal)) / (1 - tweedieVal)) + ((mu ^ (2 - tweedieVal)) /
(2 - tweedieVal)))
}
return(dev)
}
|
/scratch/gouwar.j/cran-all/cranData/BT/R/BT_devTweedie.R
|
#' Simulated Database.
#'
#' A simulated database used for examples and vignettes.
#' The variables are related to a motor insurance pricing context.
#'
#' @format
#' A simulated data frame with 50,000 rows and 7 columns, containing simulation of different policyholders:
#' \describe{
#' \item{Gender}{Gender, varying between male and female.}
#' \item{Age}{Age, varying from 18 to 65years old.}
#' \item{Split}{Noisy variable, not used to simulate the response variable. It allows to assess how the algorithm handle these features.}
#' \item{Sport}{Car type, varying between yes (sport car) or no.}
#' \item{ExpoR}{Yearly exposure-to-risk, varying between 0 and 1.}
#' \item{Y}{Yearly claim number, simulated thanks to Poisson distribution.}
#' \item{Y_normalized}{Yearly claim frequency, corresponding to the ratio between Y and ExpoR.}
#' }
"BT_Simulated_Data"
|
/scratch/gouwar.j/cran-all/cranData/BT/R/data.R
|
## ---- echo=FALSE--------------------------------------------------------------
# write("TMPDIR = 'C:\\Users\\Gireg Willame\\Desktop\\TMP'", file=file.path(Sys.getenv('R_USER'), '.Renviron'))
# knitr::opts_chunk$set(
# fig.path = "c:/Users/Gireg Willame/Desktop/TMP/Figures"
# )
## -----------------------------------------------------------------------------
library(BT)
## ---- tidy=TRUE---------------------------------------------------------------
db <- BT::BT_Simulated_Data
## ---- tidy=TRUE---------------------------------------------------------------
str(db)
head(db)
## ---- tidy=TRUE---------------------------------------------------------------
summary(db)
## ---- tidy=TRUE---------------------------------------------------------------
sum(db$Y)/sum(db$ExpoR)
## ---- tidy=TRUE---------------------------------------------------------------
set.seed(404)
trainObs <- sample(seq(1, nrow(db)), 0.8*nrow(db))
trainSet <- db[trainObs,]
testSet <- db[setdiff(seq(1, nrow(db)), trainObs),]
sum(trainSet$Y)/sum(trainSet$ExpoR)
sum(testSet$Y)/sum(testSet$ExpoR)
## ---- tidy=TRUE---------------------------------------------------------------
formFreq <- Y_normalized ~ Gender + Age + Split + Sport
## ---- tidy=TRUE---------------------------------------------------------------
bt0 <- BT(formula = formFreq,
data = trainSet,
tweedie.power = 1,
ABT = FALSE,
n.iter = 50,
train.fraction = 0.8,
interaction.depth = 3,
shrinkage = 0.01,
bag.fraction = 0.5,
colsample.bytree = NULL,
keep.data = TRUE,
is.verbose = FALSE,
cv.folds = 1,
folds.id = NULL,
n.cores = 1,
weights = ExpoR,
seed = 4)
## ---- tidy=TRUE---------------------------------------------------------------
bt0$call
bt0$distribution
bt0$BTParams
bt0$keep.data
bt0$is.verbose
bt0$seed
#bt0$w / bt0$response / bt0$var.name
## ---- tidy=TRUE---------------------------------------------------------------
print(bt0)
## ---- tidy=TRUE---------------------------------------------------------------
str(bt0$BTInit)
## ---- tidy=TRUE---------------------------------------------------------------
str(bt0$BTData)
## ---- tidy=TRUE---------------------------------------------------------------
head(bt0$fitted.values, 5)
str(bt0$BTErrors)
## ---- tidy=TRUE---------------------------------------------------------------
length(bt0$BTIndivFits)
# First tree in the expansion.
bt0$BTIndivFits[[1]]
bt0$BTIndivFits[[1]]$frame
## ---- tidy=TRUE, fig.align='center'-------------------------------------------
perfbt0_OOB <- BT_perf(bt0, method="OOB", oobag.curve = TRUE)
perfbt0_OOB
## ---- tidy=TRUE, fig.align='center'-------------------------------------------
perfbt0_val <- BT_perf(bt0, method="validation")
perfbt0_val
## ---- tidy=TRUE---------------------------------------------------------------
perfbt0_BG <- BT_perf(bt0, plot.it = FALSE)
perfbt0_BG
## ---- tidy=TRUE---------------------------------------------------------------
bt1 <- BT_more(bt0, new.n.iter = 150, seed = 4)
# See parameters and different inputs.
bt1$BTParams$n.iter
## ---- tidy=TRUE---------------------------------------------------------------
perfbt1_OOB <- BT_perf(bt1, method = 'OOB', plot.it = FALSE)
perfbt1_val <- BT_perf(bt1, method = 'validation', plot.it = FALSE)
perfbt1_OOB
perfbt1_val
## ---- tidy=TRUE---------------------------------------------------------------
bt2 <- BT(formula = formFreq,
data = trainSet,
tweedie.power = 1,
ABT = FALSE,
n.iter = 200,
train.fraction = 1,
interaction.depth = 3,
shrinkage = 0.01,
bag.fraction = 0.5,
colsample.bytree = NULL,
keep.data = TRUE,
is.verbose = FALSE,
cv.folds = 3,
folds.id = NULL,
n.cores = 1,
weights = ExpoR,
seed = 4)
## ---- tidy=TRUE---------------------------------------------------------------
bt2$cv.folds
str(bt2$folds)
str(bt2$cv.fitted)
str(bt2$BTErrors)
## ---- tidy=TRUE, fig.align='center'-------------------------------------------
perfbt2_cv <- BT_perf(bt2, method = 'cv')
## ---- tidy=TRUE---------------------------------------------------------------
bt3 <- BT(formula = formFreq,
data = trainSet,
tweedie.power = 1,
ABT = FALSE,
n.iter = 225,
train.fraction = 1,
interaction.depth = 2,
shrinkage = 0.01,
bag.fraction = 0.5,
colsample.bytree = NULL,
keep.data = TRUE,
is.verbose = FALSE,
cv.folds = 3,
folds.id = NULL,
n.cores = 1,
weights = ExpoR,
seed = 4)
## ---- tidy=TRUE---------------------------------------------------------------
indexMin <- which.min(c(min(bt2$BTErrors$cv.error), min(bt3$BTErrors$cv.error)))
btOpt <- if(indexMin==1) bt2 else bt3
perfbtOpt_cv <- BT_perf(btOpt, method='cv', plot.it=FALSE)
btOpt
perfbtOpt_cv
## ---- tidy=TRUE---------------------------------------------------------------
summary(btOpt, n.iter = perfbtOpt_cv)
## ---- tidy=TRUE---------------------------------------------------------------
head(predict(btOpt, n.iter = c(BT_perf(btOpt, method='OOB', plot.it=FALSE), perfbtOpt_cv), type = 'link'), 10)
head(predict(btOpt, n.iter = c(BT_perf(btOpt, method='OOB', plot.it=FALSE), perfbtOpt_cv), type = 'response'), 10)
## ---- tidy=TRUE---------------------------------------------------------------
head(predict(btOpt, n.iter = 40, type = 'response', single.iter = TRUE), 10)
## ---- tidy=TRUE---------------------------------------------------------------
nIterVec <- 225
interactionDepthVec <- c(2, 3)
shrinkageVec <- 0.01
bagFractionVec <- 0.5
gridSearch <- expand.grid(n.iter = nIterVec,
interaction.depth = interactionDepthVec,
shrinkage = shrinkageVec,
bag.fraction = bagFractionVec)
gridSearch
## ---- tidy=TRUE---------------------------------------------------------------
abtRes_cv <- list()
for (iGrid in seq(1, nrow(gridSearch)))
{
currABT <- BT(formula = formFreq,
data = trainSet,
tweedie.power = 1,
ABT = TRUE,
n.iter = gridSearch[iGrid, "n.iter"],
train.fraction = 1,
interaction.depth = gridSearch[iGrid, "interaction.depth"],
shrinkage = gridSearch[iGrid, "shrinkage"],
bag.fraction = gridSearch[iGrid, "bag.fraction"],
colsample.bytree = NULL,
keep.data = FALSE,
is.verbose = FALSE,
cv.folds = 3,
folds.id = NULL,
n.cores = 1,
weights = ExpoR,
seed = 4)
abtRes_cv[[iGrid]] <- currABT
}
## ---- tidy=TRUE, fig.align='center'-------------------------------------------
perfabt1_cv <- BT_perf(abtRes_cv[[1]], method='cv', plot.it=TRUE)
perfabt2_cv <- BT_perf(abtRes_cv[[2]], method='cv', plot.it=TRUE)
## ---- tidy=TRUE---------------------------------------------------------------
indexMin <- which.min(c(min(abtRes_cv[[1]]$BTErrors$cv.error), min(abtRes_cv[[2]]$BTErrors$cv.error)))
abtOpt <- if (indexMin==1) abtRes_cv[[1]] else abtRes_cv[[2]]
perfabtOpt_cv <- if (indexMin==1) perfabt1_cv else perfabt2_cv
abtOpt
abtOpt$BTParams$interaction.depth
perfabtOpt_cv
## ---- tidy=TRUE---------------------------------------------------------------
table(sapply(seq(1, perfbtOpt_cv), function(xx){nrow(btOpt$BTIndivFits[[xx]]$frame[btOpt$BTIndivFits[[xx]]$frame$var != "<leaf>",])}))
table(sapply(seq(1, perfabtOpt_cv), function(xx){nrow(abtOpt$BTIndivFits[[xx]]$frame[abtOpt$BTIndivFits[[xx]]$frame$var != "<leaf>",])}))
## ---- tidy=TRUE---------------------------------------------------------------
btPredTest <- predict(btOpt, newdata = testSet, n.iter = perfbtOpt_cv, type = "response") * testSet$ExpoR
abtPredTest <- predict(abtOpt, newdata = testSet, n.iter = perfabtOpt_cv, type = "response") * testSet$ExpoR
## ---- tidy=TRUE---------------------------------------------------------------
devPoisson <- function(obs, pred) {
2 * (sum(dpois(x = obs, lambda = obs, log = TRUE)) - sum(dpois(x = obs, lambda = pred, log = TRUE)))
}
## ---- tidy=TRUE---------------------------------------------------------------
devPoisson(testSet$Y, btPredTest)
devPoisson(testSet$Y, abtPredTest)
|
/scratch/gouwar.j/cran-all/cranData/BT/inst/doc/BT-usage-example.R
|
---
title: "Getting started with the BT package"
author: "Gireg Willame"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
vignette: >
%\VignetteIndexEntry{BT-usage-example}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
header_includes:
- \usepackage{amsmath}
- \DeclareMathOperator*{\argmin}{argmin}
---
```{r, echo=FALSE}
# write("TMPDIR = 'C:\\Users\\Gireg Willame\\Desktop\\TMP'", file=file.path(Sys.getenv('R_USER'), '.Renviron'))
# knitr::opts_chunk$set(
# fig.path = "c:/Users/Gireg Willame/Desktop/TMP/Figures"
# )
```
# Introduction
The `BT` package implements (Adaptive) Boosting Tree for *Poisson* distributed response variables, using log-link function.
When presented with data, the `BT` package offers the user the ability to build predictive models and explore the influence of different variables on the response, akin to a data mining or exploration task.
The built package is based on the original idea proposed by D. Hainaut, J. Trufin and M. Denuit. For more theoretical details, we refer to the following references:
* M. Denuit, D. Hainaut and J. Trufin (2019). **Effective Statistical Learning Methods for Actuaries |: GLMs and Extensions**, *Springer Actuarial*.
* M. Denuit, D. Hainaut and J. Trufin (2020). **Effective Statistical Learning Methods for Actuaries ||: Tree-Based Methods and Extensions**, *Springer Actuarial*.
* M. Denuit, D. Hainaut and J. Trufin (2019). **Effective Statistical Learning Methods for Actuaries |||: Neural Networks and Extensions**, *Springer Actuarial*.
* M. Denuit, D. Hainaut and J. Trufin (2022). **Response versus gradient boosting trees, GLMs and neural networks under Tweedie loss and log-link**, *Scandinavian Actuarial Journal 2022, 841-866*.
* M. Denuit, J. Huyghe and J. Trufin (2022). **Boosting cost-complexity pruned trees on Tweedie responses: The ABT machine for insurance ratemaking**. Paper submitted for publication.
* M. Denuit, J. Trufin and T. Verdebout (2022). **Boosting on the responses with Tweedie loss functions**. Paper submitted for publication.
We'll now show how to use the `BT` package on classical machine learning problem.
In particular, insurance related model will be investigated in the following.
Let's start by importing the `BT` package.
```{r}
library(BT)
```
# Important modeling remark
As previously mentioned, the `BT` package is only available for *Poisson* distributed response variable $Y$, in a log-link context.
Using *offset* in the Poisson framework is often required.
In the insurance modelling for example, the offset allows to take into account the time exposure-to-risk. It therefore helps to align the asked premium with respect to the contract duration.
Regarding the `BT` package, the weighted approach was favored in place of the offset one. In fact, the two are equivalent given some adjustments.
More precisely, the modeler is allowed to work either with
* the observed claim count $Y$ with offset $ln(d)$ under log link, where $d$ is the exposure to risk, generally measured in time units, sometimes in distance traveled or other meaningful unit depending on the application, or
* the observed claim frequency (also called claim rate) $\tilde{Y} = Y/d$ provided the weight $\nu = d$ enters the analysis. In fact, the distribution of the claim frequency $\tilde{Y}$ still belongs to the Tweedie family and is called the Poisson rate distribution.
We refer to the first book aforementioned (p. 123) for a detailed proof.
We now focus on the impact of such implementation choice on the Boosting Tree Algorithm.
First of all, let us remind the algorithm in our Poisson log-link framework.
Given a training set
$$\mathcal{D} = \Bigl\{ (d_i, y_i, \mathbf{x}_i), i \in \mathcal{I} \Bigl\},$$
where
* $d_i$ the exposure-to-risk (e.g. offset term).
* $y_i$ is the observed count variable.
* $\mathbf{x}_i$ the corresponding features vector.
* $\mathcal{I}$ corresponds to the set of all observations,
the following steps are performed:
1. Initialize the score to
$$
\widehat{\text{score}}_0(x) = \argmin_{\beta} \sum_{i \in \mathcal{I}} L\Bigl(y_i, d_i \exp(\beta) \Bigr).
$$
2. **For** $m = 1$ to $M$ **do**
2.1. Fit a weak learner, regression tree in our context, $T(\mathbf{x}; \widehat{\mathbf{a}_m})$ with
$$
\widehat{\mathbf{a}_m} = \argmin_{\mathbf{a}_m} \sum_{i \in \mathcal{I}} L\biggl(y_i, d_i \exp\Bigl(\widehat{\text{score}}_{m-1}(\mathbf{x}_i) + T(\mathbf{x}_i; \mathbf{a}_m)\Bigr)\biggr),
$$
where
* $\mathbf{a}_m$ gathers the splitting variables and their split values as well as the corresponding observed averages in the terminal nodes, i.e. describes the built tree.
* $L$ is the loss function, defined as the Poisson deviance in our approach.
2.2. Update $\widehat{\text{score}}_m(\mathbf{x}) = \widehat{\text{score}}_{m-1}(\mathbf{x}) + T(\mathbf{x}; \widehat{\mathbf{a}_m})$.
3. Output $\widehat{\text{score}}(\mathbf{x}) = \widehat{\text{score}}_M(\mathbf{x}).$
Suppose that we're at the $m$th boosting iteration, the algorithm then fits a weak learner $T(\mathbf{x}; \widehat{\mathbf{a}_m})$.
Using the above trick, for a given observation $i$ one can rewrite the optimization step 2.1 as
$$
L\biggl(y_i, d_i \exp\Bigl(\widehat{\text{score}}_{m-1}(\mathbf{x}_i) + T(\mathbf{x}_i; \mathbf{a}_m)\Bigr)\biggr) =
\nu_i L\biggl(\tilde{y}_i, \exp\Bigl(\widehat{\text{score}}_{m-1}(\mathbf{x}_i) + T(\mathbf{x}_i; \mathbf{a}_m)\Bigr)\biggr),
$$
where $\nu_i = d_i$ and $\tilde{y_i} = \frac{y_i}{d_i}$.
Using the definition of the Poisson deviance $L$, one can easily rewrite the second term as:
$$
\nu_i L\biggl(\tilde{y}_i, \exp\Bigl(\widehat{\text{score}}_{m-1}(\mathbf{x}_i) + T(\mathbf{x}_i; \mathbf{a}_m)\Bigr)\biggr) = \nu_{mi} L(\tilde{r}_{mi}, \exp(T(\mathbf{x}_i; \mathbf{a}_m))),
$$
with
$$
\nu_{mi} = \nu_i \exp\Bigl(\widehat{\text{score}}_{m-1}(\mathbf{x}_i) \Bigr)
$$
and
$$
\tilde{r}_{mi} = \frac{\tilde{y}_i}{\exp \Bigl( \widehat{\text{score}}_{m-1}(\mathbf{x}_i) \Bigr)}.
$$
The $m$th iteration of the boosting procedure therefore reduces to build a single weak learner, on the working training set
$$
\mathcal{D}^{(m)} = \Bigl\{ (\nu_{mi}, \tilde{r}_{mi}, \mathbf{x}_i), i \in \mathcal{I} \Bigl\},
$$
using the Poisson deviance loss and the log-link function.
Going through iterations, the weights are each time updated together with the responses that are assumed to follow Poisson rate distributions.
# Use-case
The goal of this section is to show how the user can work with `BT` functions and define an optimal model, according to the selected criteria.
We also underline that this use-case is a toy example with some limitations such as running-time constraints.
However, the same concepts can easily be extended to real-world problems.
## Import database
Let us import the simulated database `BT::BT_Simulated_Data`.
We refer to the specific database documentation for more details.
```{r, tidy=TRUE}
db <- BT::BT_Simulated_Data
```
One can then have a look at this database.
```{r, tidy=TRUE}
str(db)
head(db)
```
One can also perform a quick summary
```{r, tidy=TRUE}
summary(db)
```
We leave potential descriptive analysis to the interested reader but we note that the global average claim frequency is
```{r, tidy=TRUE}
sum(db$Y)/sum(db$ExpoR)
```
## Create working datasets
As we're dealing with machine learning models, a classical approach consists in splitting the dataset into two parts, namely:
* A **training set** which will be heavily used to train the different models and will serve for model selection.
* A **testing set** which will be hold off and used at the end to assess generalization performances.
In our example, 80\% of the total dataset will be placed in the training set and the remaining part in the testing set.
One can note that the claims frequency is approximately similar for all the databases.
```{r, tidy=TRUE}
set.seed(404)
trainObs <- sample(seq(1, nrow(db)), 0.8*nrow(db))
trainSet <- db[trainObs,]
testSet <- db[setdiff(seq(1, nrow(db)), trainObs),]
sum(trainSet$Y)/sum(trainSet$ExpoR)
sum(testSet$Y)/sum(testSet$ExpoR)
```
## Boosting Tree (BT)
The basic idea behind this algorithm consists in building weak leaners to explain the remaining error, using all the past iterations. It differs from the Gradient Boosting Methods as we're here boosting the ratios (as previously explained) rather than the pseudo-residuals, using the defined underlying distribution rather than a gaussian approach.
In particular, let us remind that the package does not support offset. However, a problem reformulation can be used as explained before.
We want to make profit of all explanatory variables. We then define the following model formula that will be heavily used.
```{r, tidy=TRUE}
formFreq <- Y_normalized ~ Gender + Age + Split + Sport
```
### `BT` fit and outputs
We propose to begin this section by looking on a simple example resulting from a first run.
We can then discuss the different available package's features.
We refer to the package documentation `?BT::BT` for more details about the arguments of this function.
A first `BT` can be fitted without cross-validation
```{r, tidy=TRUE}
bt0 <- BT(formula = formFreq,
data = trainSet,
tweedie.power = 1,
ABT = FALSE,
n.iter = 50,
train.fraction = 0.8,
interaction.depth = 3,
shrinkage = 0.01,
bag.fraction = 0.5,
colsample.bytree = NULL,
keep.data = TRUE,
is.verbose = FALSE,
cv.folds = 1,
folds.id = NULL,
n.cores = 1,
weights = ExpoR,
seed = 4)
```
One can first have a look at the return object.
Almost all the parameters that have been used during the call are stored.
```{r, tidy=TRUE}
bt0$call
bt0$distribution
bt0$BTParams
bt0$keep.data
bt0$is.verbose
bt0$seed
#bt0$w / bt0$response / bt0$var.name
```
A built-in `print` function is also available. This method prints some of the already presented values.
```{r, tidy=TRUE}
print(bt0)
```
One can have a specific look at the initialization that has been performed via
```{r, tidy=TRUE}
str(bt0$BTInit)
```
If `keep.data=TRUE`, the different databases with the last evaluation are returned
```{r, tidy=TRUE}
str(bt0$BTData)
```
The fitted values (on the score scale) as well as the computed errors across the iterations are available
```{r, tidy=TRUE}
head(bt0$fitted.values, 5)
str(bt0$BTErrors)
```
Finally, each weak learner (tree) built in the expansion are stored within the following object. Each element corresponds to a specific `rpart` object.
```{r, tidy=TRUE}
length(bt0$BTIndivFits)
# First tree in the expansion.
bt0$BTIndivFits[[1]]
bt0$BTIndivFits[[1]]$frame
```
### Optimal iterations number
`BT_perf` function allows the user to determine the best number of iterations that has to be performed. This one also depends on the type of errors that are available/have been computed during training phase.
Depending on the chosen approach, the following methods can be applied to compute the best number of iterations:
* If user wants to use the `validation.error`, the `argmin(BT$BTErrors$validation.error)` will be returned as optimal iteration.
* If user wants to use the `oob.improvement`, the `argmin(-cumsum(BT$BTErrors$oob.improvement))` will be returned as optimal iteration. To be precise, the `oob.improvement` are not used as such but a smoothed version of it.
* If user wants to use the `cv.error`, the `argmin(BT$BTErrors$cv.error)` will be returned as optimal iteration.
We refer to the package documentation `?BT::BT_perf` for a thorough presentation of this function arguments.
In our specific context, only the OOB improvements and validation errors are available for the given run (no cross-validation performed).
```{r, tidy=TRUE, fig.align='center'}
perfbt0_OOB <- BT_perf(bt0, method="OOB", oobag.curve = TRUE)
perfbt0_OOB
```
```{r, tidy=TRUE, fig.align='center'}
perfbt0_val <- BT_perf(bt0, method="validation")
perfbt0_val
```
Using the implemented "best guess" approach
```{r, tidy=TRUE}
perfbt0_BG <- BT_perf(bt0, plot.it = FALSE)
perfbt0_BG
```
### Continue training
It clearly seems that our model does not contain enough weak learners. In fact, the optimal number of iterations is equal to the model number of iterations, meaning that the minimal error (and the related iteration) should still be found.
It's therefore interesting to continue the training.
This training continuation can be performed thanks to the `BT_more` function.
The arguments of this function are explained in `?BT::BT_more` and we therefore refer to it for more details.
It will then return a `BTFit` object (as the `BT` function does) augmented by the new boosting iterations.
We emphasize that the call to this function can only be made if the original `BT` call:
* has no cross-validation;
* has been computed with `keep.data` parameter set to `TRUE`.
```{r, tidy=TRUE}
bt1 <- BT_more(bt0, new.n.iter = 150, seed = 4)
# See parameters and different inputs.
bt1$BTParams$n.iter
```
It clearly seems that we now reached an optimum.
```{r, tidy=TRUE}
perfbt1_OOB <- BT_perf(bt1, method = 'OOB', plot.it = FALSE)
perfbt1_val <- BT_perf(bt1, method = 'validation', plot.it = FALSE)
perfbt1_OOB
perfbt1_val
```
### Cross-validation
We often favor doing cross-validation to find the optimal number of iterations.
That being said, this approach can be time-consuming and a balance has to be found by the modeler.
Let's see the results if a 3-folds cross-validation is performed.
Please note that the `train.fraction` is now set to 1 as creating a validation set is less meaningful in the cross-validation context.
```{r, tidy=TRUE}
bt2 <- BT(formula = formFreq,
data = trainSet,
tweedie.power = 1,
ABT = FALSE,
n.iter = 200,
train.fraction = 1,
interaction.depth = 3,
shrinkage = 0.01,
bag.fraction = 0.5,
colsample.bytree = NULL,
keep.data = TRUE,
is.verbose = FALSE,
cv.folds = 3,
folds.id = NULL,
n.cores = 1,
weights = ExpoR,
seed = 4)
```
Different objects are now available within the new `BT` results
```{r, tidy=TRUE}
bt2$cv.folds
str(bt2$folds)
str(bt2$cv.fitted)
str(bt2$BTErrors)
```
One can also find the optimal number of iterations via
```{r, tidy=TRUE, fig.align='center'}
perfbt2_cv <- BT_perf(bt2, method = 'cv')
```
### Hyperparameter Optimization
We only worked with one parameter set up to now. In practice, this set has to be found.
An usual approach consists in performing a grid search and assessing the performances via cross-validation. Please note that using a validation set can also be used, depending on the computation time.
For this presentation, only one extra boosting tree algorithm will be fitted. In particular, only one different value for `interaction.depth` will be investigated.
In reality the grid search should be way broader, trying multiple multidimensional combinations involving different parameters.
```{r, tidy=TRUE}
bt3 <- BT(formula = formFreq,
data = trainSet,
tweedie.power = 1,
ABT = FALSE,
n.iter = 225,
train.fraction = 1,
interaction.depth = 2,
shrinkage = 0.01,
bag.fraction = 0.5,
colsample.bytree = NULL,
keep.data = TRUE,
is.verbose = FALSE,
cv.folds = 3,
folds.id = NULL,
n.cores = 1,
weights = ExpoR,
seed = 4)
```
We generally select the best model by finding the one with the lowest cross-validation error.
```{r, tidy=TRUE}
indexMin <- which.min(c(min(bt2$BTErrors$cv.error), min(bt3$BTErrors$cv.error)))
btOpt <- if(indexMin==1) bt2 else bt3
perfbtOpt_cv <- BT_perf(btOpt, method='cv', plot.it=FALSE)
btOpt
perfbtOpt_cv
```
### Relative influence
Now that the optimal model has been found, one can compute the relative influence. It corresponds to the gain made by splitting over the features, which is then normalized in order to sum up to 100\%.
The `summary` function allows to compute these values and plot. We refer to this function documentation `?BT:::summary.BTFit` for a thorough presentation.
The computation of the relative influence isn't currently available for the permutation approach. This one should still be developed.
```{r, tidy=TRUE}
summary(btOpt, n.iter = perfbtOpt_cv)
```
### Prediction
Fortunately, once a `BT` object created we can use it to predict on a new database, using the `predict` function.
To this end, the optimal number of iterations is generally a desirable input.
We also underline that the model fitted on the whole training set is used to perform these predictions.
The interested reader can find a description of the function arguments in the related documentation `?BT:::predict.BTFit`.
Please note that if the `keep.data` argument was set to `TRUE` and if the `newdata` is not specified, the prediction will be achieved on the original training set.
We explicit below two usages:
* Prediction (on the link/response scale) using all weak learners up to the best iteration obtained via OOB and CV (it provides 2-dimensional matrix).
This is one of the most common option used.
```{r, tidy=TRUE}
head(predict(btOpt, n.iter = c(BT_perf(btOpt, method='OOB', plot.it=FALSE), perfbtOpt_cv), type = 'link'), 10)
head(predict(btOpt, n.iter = c(BT_perf(btOpt, method='OOB', plot.it=FALSE), perfbtOpt_cv), type = 'response'), 10)
```
* Prediction (on the response scale) using only the 40th weak learner (tree).
```{r, tidy=TRUE}
head(predict(btOpt, n.iter = 40, type = 'response', single.iter = TRUE), 10)
```
## Adaptive Boosting Tree (ABT)
All the functions available on the classical Boosting Tree side are also available in the Adaptive Boosting Tree context.
The only difference lies in the way the number of internal nodes is defined.
For a given `interaction.depth`, ABT will in fact look for the biggest optimal tree having at most `interaction.depth` internal nodes (i.e. the built weak learner). This idea is basically based on the `rpart` complexity parameter. Differently said, all the trees in the expansion won't necessarily contain `interaction.depth` internal nodes.
By construction, it's interesting to note that the built trees will converge to a single root node. This can therefore acts as a natural stopping criteria helping to reduce the computation time.
However, this option is not implemented in the `BT` package. Moreover, this behavior is not necessarily observed when random effects (e.g. bag fraction) are used.
### Hyperparameter Optimization
As we did in the BT side, we'll test two parameters combination and assess their performances via cross-validation.
Precisely, values 2 and 3 will be tried out for the `interaction.depth` parameter.
Let's start by defining the parameters grid thanks to the `base::expand.grid` function.
Once again, we acknowledge that it corresponds to a small representation of real-world problems.
```{r, tidy=TRUE}
nIterVec <- 225
interactionDepthVec <- c(2, 3)
shrinkageVec <- 0.01
bagFractionVec <- 0.5
gridSearch <- expand.grid(n.iter = nIterVec,
interaction.depth = interactionDepthVec,
shrinkage = shrinkageVec,
bag.fraction = bagFractionVec)
gridSearch
```
We can now loop through all the different scenarios.
```{r, tidy=TRUE}
abtRes_cv <- list()
for (iGrid in seq(1, nrow(gridSearch)))
{
currABT <- BT(formula = formFreq,
data = trainSet,
tweedie.power = 1,
ABT = TRUE,
n.iter = gridSearch[iGrid, "n.iter"],
train.fraction = 1,
interaction.depth = gridSearch[iGrid, "interaction.depth"],
shrinkage = gridSearch[iGrid, "shrinkage"],
bag.fraction = gridSearch[iGrid, "bag.fraction"],
colsample.bytree = NULL,
keep.data = FALSE,
is.verbose = FALSE,
cv.folds = 3,
folds.id = NULL,
n.cores = 1,
weights = ExpoR,
seed = 4)
abtRes_cv[[iGrid]] <- currABT
}
```
Check that we've enough iterations and define the best ABT model.
```{r, tidy=TRUE, fig.align='center'}
perfabt1_cv <- BT_perf(abtRes_cv[[1]], method='cv', plot.it=TRUE)
perfabt2_cv <- BT_perf(abtRes_cv[[2]], method='cv', plot.it=TRUE)
```
We can finally define the best ABT model.
```{r, tidy=TRUE}
indexMin <- which.min(c(min(abtRes_cv[[1]]$BTErrors$cv.error), min(abtRes_cv[[2]]$BTErrors$cv.error)))
abtOpt <- if (indexMin==1) abtRes_cv[[1]] else abtRes_cv[[2]]
perfabtOpt_cv <- if (indexMin==1) perfabt1_cv else perfabt2_cv
abtOpt
abtOpt$BTParams$interaction.depth
perfabtOpt_cv
```
## Miscellaneous
Let's have a look at the resulting weak learners (trees) from BT and ABT expansions.
In the BT case, all the trees contain exactly `interaction.depth` internal nodes (or splits) whereas in the ABT case one can notice the variation in number of internal nodes (and so the trees' shapes).
```{r, tidy=TRUE}
table(sapply(seq(1, perfbtOpt_cv), function(xx){nrow(btOpt$BTIndivFits[[xx]]$frame[btOpt$BTIndivFits[[xx]]$frame$var != "<leaf>",])}))
table(sapply(seq(1, perfabtOpt_cv), function(xx){nrow(abtOpt$BTIndivFits[[xx]]$frame[abtOpt$BTIndivFits[[xx]]$frame$var != "<leaf>",])}))
```
## Models comparison
Once the optimal competing models have been defined, one can assess their generalization performances (i.e. on the test set). To do so, multiple criteria might be used, such as deviance, lift curves, concordance measures, …
Only the first criterion will be investigated for this presentation.
**Please note that usually only 1 model is retained beforehand - The test set is not used for model selection. Our specific example remains a case-study!**
Let’s start by computing the different model predictions on the test set.
```{r, tidy=TRUE}
btPredTest <- predict(btOpt, newdata = testSet, n.iter = perfbtOpt_cv, type = "response") * testSet$ExpoR
abtPredTest <- predict(abtOpt, newdata = testSet, n.iter = perfabtOpt_cv, type = "response") * testSet$ExpoR
```
### Deviance
The deviance is defined as 2 times the log-likelihood ratio of the saturated model compared to the reduced (fitted) one.
In other words, it measures the gap between the optimal model and the current one.
```{r, tidy=TRUE}
devPoisson <- function(obs, pred) {
2 * (sum(dpois(x = obs, lambda = obs, log = TRUE)) - sum(dpois(x = obs, lambda = pred, log = TRUE)))
}
```
One can now assess the deviance of the different models.
```{r, tidy=TRUE}
devPoisson(testSet$Y, btPredTest)
devPoisson(testSet$Y, abtPredTest)
```
For this simulated use-case, it therefore seems that the usual boosting tree approach performs better.
|
/scratch/gouwar.j/cran-all/cranData/BT/inst/doc/BT-usage-example.Rmd
|
---
title: "Getting started with the BT package"
author: "Gireg Willame"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
vignette: >
%\VignetteIndexEntry{BT-usage-example}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
header_includes:
- \usepackage{amsmath}
- \DeclareMathOperator*{\argmin}{argmin}
---
```{r, echo=FALSE}
# write("TMPDIR = 'C:\\Users\\Gireg Willame\\Desktop\\TMP'", file=file.path(Sys.getenv('R_USER'), '.Renviron'))
# knitr::opts_chunk$set(
# fig.path = "c:/Users/Gireg Willame/Desktop/TMP/Figures"
# )
```
# Introduction
The `BT` package implements (Adaptive) Boosting Tree for *Poisson* distributed response variables, using log-link function.
When presented with data, the `BT` package offers the user the ability to build predictive models and explore the influence of different variables on the response, akin to a data mining or exploration task.
The built package is based on the original idea proposed by D. Hainaut, J. Trufin and M. Denuit. For more theoretical details, we refer to the following references:
* M. Denuit, D. Hainaut and J. Trufin (2019). **Effective Statistical Learning Methods for Actuaries |: GLMs and Extensions**, *Springer Actuarial*.
* M. Denuit, D. Hainaut and J. Trufin (2020). **Effective Statistical Learning Methods for Actuaries ||: Tree-Based Methods and Extensions**, *Springer Actuarial*.
* M. Denuit, D. Hainaut and J. Trufin (2019). **Effective Statistical Learning Methods for Actuaries |||: Neural Networks and Extensions**, *Springer Actuarial*.
* M. Denuit, D. Hainaut and J. Trufin (2022). **Response versus gradient boosting trees, GLMs and neural networks under Tweedie loss and log-link**, *Scandinavian Actuarial Journal 2022, 841-866*.
* M. Denuit, J. Huyghe and J. Trufin (2022). **Boosting cost-complexity pruned trees on Tweedie responses: The ABT machine for insurance ratemaking**. Paper submitted for publication.
* M. Denuit, J. Trufin and T. Verdebout (2022). **Boosting on the responses with Tweedie loss functions**. Paper submitted for publication.
We'll now show how to use the `BT` package on classical machine learning problem.
In particular, insurance related model will be investigated in the following.
Let's start by importing the `BT` package.
```{r}
library(BT)
```
# Important modeling remark
As previously mentioned, the `BT` package is only available for *Poisson* distributed response variable $Y$, in a log-link context.
Using *offset* in the Poisson framework is often required.
In the insurance modelling for example, the offset allows to take into account the time exposure-to-risk. It therefore helps to align the asked premium with respect to the contract duration.
Regarding the `BT` package, the weighted approach was favored in place of the offset one. In fact, the two are equivalent given some adjustments.
More precisely, the modeler is allowed to work either with
* the observed claim count $Y$ with offset $ln(d)$ under log link, where $d$ is the exposure to risk, generally measured in time units, sometimes in distance traveled or other meaningful unit depending on the application, or
* the observed claim frequency (also called claim rate) $\tilde{Y} = Y/d$ provided the weight $\nu = d$ enters the analysis. In fact, the distribution of the claim frequency $\tilde{Y}$ still belongs to the Tweedie family and is called the Poisson rate distribution.
We refer to the first book aforementioned (p. 123) for a detailed proof.
We now focus on the impact of such implementation choice on the Boosting Tree Algorithm.
First of all, let us remind the algorithm in our Poisson log-link framework.
Given a training set
$$\mathcal{D} = \Bigl\{ (d_i, y_i, \mathbf{x}_i), i \in \mathcal{I} \Bigl\},$$
where
* $d_i$ the exposure-to-risk (e.g. offset term).
* $y_i$ is the observed count variable.
* $\mathbf{x}_i$ the corresponding features vector.
* $\mathcal{I}$ corresponds to the set of all observations,
the following steps are performed:
1. Initialize the score to
$$
\widehat{\text{score}}_0(x) = \argmin_{\beta} \sum_{i \in \mathcal{I}} L\Bigl(y_i, d_i \exp(\beta) \Bigr).
$$
2. **For** $m = 1$ to $M$ **do**
2.1. Fit a weak learner, regression tree in our context, $T(\mathbf{x}; \widehat{\mathbf{a}_m})$ with
$$
\widehat{\mathbf{a}_m} = \argmin_{\mathbf{a}_m} \sum_{i \in \mathcal{I}} L\biggl(y_i, d_i \exp\Bigl(\widehat{\text{score}}_{m-1}(\mathbf{x}_i) + T(\mathbf{x}_i; \mathbf{a}_m)\Bigr)\biggr),
$$
where
* $\mathbf{a}_m$ gathers the splitting variables and their split values as well as the corresponding observed averages in the terminal nodes, i.e. describes the built tree.
* $L$ is the loss function, defined as the Poisson deviance in our approach.
2.2. Update $\widehat{\text{score}}_m(\mathbf{x}) = \widehat{\text{score}}_{m-1}(\mathbf{x}) + T(\mathbf{x}; \widehat{\mathbf{a}_m})$.
3. Output $\widehat{\text{score}}(\mathbf{x}) = \widehat{\text{score}}_M(\mathbf{x}).$
Suppose that we're at the $m$th boosting iteration, the algorithm then fits a weak learner $T(\mathbf{x}; \widehat{\mathbf{a}_m})$.
Using the above trick, for a given observation $i$ one can rewrite the optimization step 2.1 as
$$
L\biggl(y_i, d_i \exp\Bigl(\widehat{\text{score}}_{m-1}(\mathbf{x}_i) + T(\mathbf{x}_i; \mathbf{a}_m)\Bigr)\biggr) =
\nu_i L\biggl(\tilde{y}_i, \exp\Bigl(\widehat{\text{score}}_{m-1}(\mathbf{x}_i) + T(\mathbf{x}_i; \mathbf{a}_m)\Bigr)\biggr),
$$
where $\nu_i = d_i$ and $\tilde{y_i} = \frac{y_i}{d_i}$.
Using the definition of the Poisson deviance $L$, one can easily rewrite the second term as:
$$
\nu_i L\biggl(\tilde{y}_i, \exp\Bigl(\widehat{\text{score}}_{m-1}(\mathbf{x}_i) + T(\mathbf{x}_i; \mathbf{a}_m)\Bigr)\biggr) = \nu_{mi} L(\tilde{r}_{mi}, \exp(T(\mathbf{x}_i; \mathbf{a}_m))),
$$
with
$$
\nu_{mi} = \nu_i \exp\Bigl(\widehat{\text{score}}_{m-1}(\mathbf{x}_i) \Bigr)
$$
and
$$
\tilde{r}_{mi} = \frac{\tilde{y}_i}{\exp \Bigl( \widehat{\text{score}}_{m-1}(\mathbf{x}_i) \Bigr)}.
$$
The $m$th iteration of the boosting procedure therefore reduces to build a single weak learner, on the working training set
$$
\mathcal{D}^{(m)} = \Bigl\{ (\nu_{mi}, \tilde{r}_{mi}, \mathbf{x}_i), i \in \mathcal{I} \Bigl\},
$$
using the Poisson deviance loss and the log-link function.
Going through iterations, the weights are each time updated together with the responses that are assumed to follow Poisson rate distributions.
# Use-case
The goal of this section is to show how the user can work with `BT` functions and define an optimal model, according to the selected criteria.
We also underline that this use-case is a toy example with some limitations such as running-time constraints.
However, the same concepts can easily be extended to real-world problems.
## Import database
Let us import the simulated database `BT::BT_Simulated_Data`.
We refer to the specific database documentation for more details.
```{r, tidy=TRUE}
db <- BT::BT_Simulated_Data
```
One can then have a look at this database.
```{r, tidy=TRUE}
str(db)
head(db)
```
One can also perform a quick summary
```{r, tidy=TRUE}
summary(db)
```
We leave potential descriptive analysis to the interested reader but we note that the global average claim frequency is
```{r, tidy=TRUE}
sum(db$Y)/sum(db$ExpoR)
```
## Create working datasets
As we're dealing with machine learning models, a classical approach consists in splitting the dataset into two parts, namely:
* A **training set** which will be heavily used to train the different models and will serve for model selection.
* A **testing set** which will be hold off and used at the end to assess generalization performances.
In our example, 80\% of the total dataset will be placed in the training set and the remaining part in the testing set.
One can note that the claims frequency is approximately similar for all the databases.
```{r, tidy=TRUE}
set.seed(404)
trainObs <- sample(seq(1, nrow(db)), 0.8*nrow(db))
trainSet <- db[trainObs,]
testSet <- db[setdiff(seq(1, nrow(db)), trainObs),]
sum(trainSet$Y)/sum(trainSet$ExpoR)
sum(testSet$Y)/sum(testSet$ExpoR)
```
## Boosting Tree (BT)
The basic idea behind this algorithm consists in building weak leaners to explain the remaining error, using all the past iterations. It differs from the Gradient Boosting Methods as we're here boosting the ratios (as previously explained) rather than the pseudo-residuals, using the defined underlying distribution rather than a gaussian approach.
In particular, let us remind that the package does not support offset. However, a problem reformulation can be used as explained before.
We want to make profit of all explanatory variables. We then define the following model formula that will be heavily used.
```{r, tidy=TRUE}
formFreq <- Y_normalized ~ Gender + Age + Split + Sport
```
### `BT` fit and outputs
We propose to begin this section by looking on a simple example resulting from a first run.
We can then discuss the different available package's features.
We refer to the package documentation `?BT::BT` for more details about the arguments of this function.
A first `BT` can be fitted without cross-validation
```{r, tidy=TRUE}
bt0 <- BT(formula = formFreq,
data = trainSet,
tweedie.power = 1,
ABT = FALSE,
n.iter = 50,
train.fraction = 0.8,
interaction.depth = 3,
shrinkage = 0.01,
bag.fraction = 0.5,
colsample.bytree = NULL,
keep.data = TRUE,
is.verbose = FALSE,
cv.folds = 1,
folds.id = NULL,
n.cores = 1,
weights = ExpoR,
seed = 4)
```
One can first have a look at the return object.
Almost all the parameters that have been used during the call are stored.
```{r, tidy=TRUE}
bt0$call
bt0$distribution
bt0$BTParams
bt0$keep.data
bt0$is.verbose
bt0$seed
#bt0$w / bt0$response / bt0$var.name
```
A built-in `print` function is also available. This method prints some of the already presented values.
```{r, tidy=TRUE}
print(bt0)
```
One can have a specific look at the initialization that has been performed via
```{r, tidy=TRUE}
str(bt0$BTInit)
```
If `keep.data=TRUE`, the different databases with the last evaluation are returned
```{r, tidy=TRUE}
str(bt0$BTData)
```
The fitted values (on the score scale) as well as the computed errors across the iterations are available
```{r, tidy=TRUE}
head(bt0$fitted.values, 5)
str(bt0$BTErrors)
```
Finally, each weak learner (tree) built in the expansion are stored within the following object. Each element corresponds to a specific `rpart` object.
```{r, tidy=TRUE}
length(bt0$BTIndivFits)
# First tree in the expansion.
bt0$BTIndivFits[[1]]
bt0$BTIndivFits[[1]]$frame
```
### Optimal iterations number
`BT_perf` function allows the user to determine the best number of iterations that has to be performed. This one also depends on the type of errors that are available/have been computed during training phase.
Depending on the chosen approach, the following methods can be applied to compute the best number of iterations:
* If user wants to use the `validation.error`, the `argmin(BT$BTErrors$validation.error)` will be returned as optimal iteration.
* If user wants to use the `oob.improvement`, the `argmin(-cumsum(BT$BTErrors$oob.improvement))` will be returned as optimal iteration. To be precise, the `oob.improvement` are not used as such but a smoothed version of it.
* If user wants to use the `cv.error`, the `argmin(BT$BTErrors$cv.error)` will be returned as optimal iteration.
We refer to the package documentation `?BT::BT_perf` for a thorough presentation of this function arguments.
In our specific context, only the OOB improvements and validation errors are available for the given run (no cross-validation performed).
```{r, tidy=TRUE, fig.align='center'}
perfbt0_OOB <- BT_perf(bt0, method="OOB", oobag.curve = TRUE)
perfbt0_OOB
```
```{r, tidy=TRUE, fig.align='center'}
perfbt0_val <- BT_perf(bt0, method="validation")
perfbt0_val
```
Using the implemented "best guess" approach
```{r, tidy=TRUE}
perfbt0_BG <- BT_perf(bt0, plot.it = FALSE)
perfbt0_BG
```
### Continue training
It clearly seems that our model does not contain enough weak learners. In fact, the optimal number of iterations is equal to the model number of iterations, meaning that the minimal error (and the related iteration) should still be found.
It's therefore interesting to continue the training.
This training continuation can be performed thanks to the `BT_more` function.
The arguments of this function are explained in `?BT::BT_more` and we therefore refer to it for more details.
It will then return a `BTFit` object (as the `BT` function does) augmented by the new boosting iterations.
We emphasize that the call to this function can only be made if the original `BT` call:
* has no cross-validation;
* has been computed with `keep.data` parameter set to `TRUE`.
```{r, tidy=TRUE}
bt1 <- BT_more(bt0, new.n.iter = 150, seed = 4)
# See parameters and different inputs.
bt1$BTParams$n.iter
```
It clearly seems that we now reached an optimum.
```{r, tidy=TRUE}
perfbt1_OOB <- BT_perf(bt1, method = 'OOB', plot.it = FALSE)
perfbt1_val <- BT_perf(bt1, method = 'validation', plot.it = FALSE)
perfbt1_OOB
perfbt1_val
```
### Cross-validation
We often favor doing cross-validation to find the optimal number of iterations.
That being said, this approach can be time-consuming and a balance has to be found by the modeler.
Let's see the results if a 3-folds cross-validation is performed.
Please note that the `train.fraction` is now set to 1 as creating a validation set is less meaningful in the cross-validation context.
```{r, tidy=TRUE}
bt2 <- BT(formula = formFreq,
data = trainSet,
tweedie.power = 1,
ABT = FALSE,
n.iter = 200,
train.fraction = 1,
interaction.depth = 3,
shrinkage = 0.01,
bag.fraction = 0.5,
colsample.bytree = NULL,
keep.data = TRUE,
is.verbose = FALSE,
cv.folds = 3,
folds.id = NULL,
n.cores = 1,
weights = ExpoR,
seed = 4)
```
Different objects are now available within the new `BT` results
```{r, tidy=TRUE}
bt2$cv.folds
str(bt2$folds)
str(bt2$cv.fitted)
str(bt2$BTErrors)
```
One can also find the optimal number of iterations via
```{r, tidy=TRUE, fig.align='center'}
perfbt2_cv <- BT_perf(bt2, method = 'cv')
```
### Hyperparameter Optimization
We only worked with one parameter set up to now. In practice, this set has to be found.
An usual approach consists in performing a grid search and assessing the performances via cross-validation. Please note that using a validation set can also be used, depending on the computation time.
For this presentation, only one extra boosting tree algorithm will be fitted. In particular, only one different value for `interaction.depth` will be investigated.
In reality the grid search should be way broader, trying multiple multidimensional combinations involving different parameters.
```{r, tidy=TRUE}
bt3 <- BT(formula = formFreq,
data = trainSet,
tweedie.power = 1,
ABT = FALSE,
n.iter = 225,
train.fraction = 1,
interaction.depth = 2,
shrinkage = 0.01,
bag.fraction = 0.5,
colsample.bytree = NULL,
keep.data = TRUE,
is.verbose = FALSE,
cv.folds = 3,
folds.id = NULL,
n.cores = 1,
weights = ExpoR,
seed = 4)
```
We generally select the best model by finding the one with the lowest cross-validation error.
```{r, tidy=TRUE}
indexMin <- which.min(c(min(bt2$BTErrors$cv.error), min(bt3$BTErrors$cv.error)))
btOpt <- if(indexMin==1) bt2 else bt3
perfbtOpt_cv <- BT_perf(btOpt, method='cv', plot.it=FALSE)
btOpt
perfbtOpt_cv
```
### Relative influence
Now that the optimal model has been found, one can compute the relative influence. It corresponds to the gain made by splitting over the features, which is then normalized in order to sum up to 100\%.
The `summary` function allows to compute these values and plot. We refer to this function documentation `?BT:::summary.BTFit` for a thorough presentation.
The computation of the relative influence isn't currently available for the permutation approach. This one should still be developed.
```{r, tidy=TRUE}
summary(btOpt, n.iter = perfbtOpt_cv)
```
### Prediction
Fortunately, once a `BT` object created we can use it to predict on a new database, using the `predict` function.
To this end, the optimal number of iterations is generally a desirable input.
We also underline that the model fitted on the whole training set is used to perform these predictions.
The interested reader can find a description of the function arguments in the related documentation `?BT:::predict.BTFit`.
Please note that if the `keep.data` argument was set to `TRUE` and if the `newdata` is not specified, the prediction will be achieved on the original training set.
We explicit below two usages:
* Prediction (on the link/response scale) using all weak learners up to the best iteration obtained via OOB and CV (it provides 2-dimensional matrix).
This is one of the most common option used.
```{r, tidy=TRUE}
head(predict(btOpt, n.iter = c(BT_perf(btOpt, method='OOB', plot.it=FALSE), perfbtOpt_cv), type = 'link'), 10)
head(predict(btOpt, n.iter = c(BT_perf(btOpt, method='OOB', plot.it=FALSE), perfbtOpt_cv), type = 'response'), 10)
```
* Prediction (on the response scale) using only the 40th weak learner (tree).
```{r, tidy=TRUE}
head(predict(btOpt, n.iter = 40, type = 'response', single.iter = TRUE), 10)
```
## Adaptive Boosting Tree (ABT)
All the functions available on the classical Boosting Tree side are also available in the Adaptive Boosting Tree context.
The only difference lies in the way the number of internal nodes is defined.
For a given `interaction.depth`, ABT will in fact look for the biggest optimal tree having at most `interaction.depth` internal nodes (i.e. the built weak learner). This idea is basically based on the `rpart` complexity parameter. Differently said, all the trees in the expansion won't necessarily contain `interaction.depth` internal nodes.
By construction, it's interesting to note that the built trees will converge to a single root node. This can therefore acts as a natural stopping criteria helping to reduce the computation time.
However, this option is not implemented in the `BT` package. Moreover, this behavior is not necessarily observed when random effects (e.g. bag fraction) are used.
### Hyperparameter Optimization
As we did in the BT side, we'll test two parameters combination and assess their performances via cross-validation.
Precisely, values 2 and 3 will be tried out for the `interaction.depth` parameter.
Let's start by defining the parameters grid thanks to the `base::expand.grid` function.
Once again, we acknowledge that it corresponds to a small representation of real-world problems.
```{r, tidy=TRUE}
nIterVec <- 225
interactionDepthVec <- c(2, 3)
shrinkageVec <- 0.01
bagFractionVec <- 0.5
gridSearch <- expand.grid(n.iter = nIterVec,
interaction.depth = interactionDepthVec,
shrinkage = shrinkageVec,
bag.fraction = bagFractionVec)
gridSearch
```
We can now loop through all the different scenarios.
```{r, tidy=TRUE}
abtRes_cv <- list()
for (iGrid in seq(1, nrow(gridSearch)))
{
currABT <- BT(formula = formFreq,
data = trainSet,
tweedie.power = 1,
ABT = TRUE,
n.iter = gridSearch[iGrid, "n.iter"],
train.fraction = 1,
interaction.depth = gridSearch[iGrid, "interaction.depth"],
shrinkage = gridSearch[iGrid, "shrinkage"],
bag.fraction = gridSearch[iGrid, "bag.fraction"],
colsample.bytree = NULL,
keep.data = FALSE,
is.verbose = FALSE,
cv.folds = 3,
folds.id = NULL,
n.cores = 1,
weights = ExpoR,
seed = 4)
abtRes_cv[[iGrid]] <- currABT
}
```
Check that we've enough iterations and define the best ABT model.
```{r, tidy=TRUE, fig.align='center'}
perfabt1_cv <- BT_perf(abtRes_cv[[1]], method='cv', plot.it=TRUE)
perfabt2_cv <- BT_perf(abtRes_cv[[2]], method='cv', plot.it=TRUE)
```
We can finally define the best ABT model.
```{r, tidy=TRUE}
indexMin <- which.min(c(min(abtRes_cv[[1]]$BTErrors$cv.error), min(abtRes_cv[[2]]$BTErrors$cv.error)))
abtOpt <- if (indexMin==1) abtRes_cv[[1]] else abtRes_cv[[2]]
perfabtOpt_cv <- if (indexMin==1) perfabt1_cv else perfabt2_cv
abtOpt
abtOpt$BTParams$interaction.depth
perfabtOpt_cv
```
## Miscellaneous
Let's have a look at the resulting weak learners (trees) from BT and ABT expansions.
In the BT case, all the trees contain exactly `interaction.depth` internal nodes (or splits) whereas in the ABT case one can notice the variation in number of internal nodes (and so the trees' shapes).
```{r, tidy=TRUE}
table(sapply(seq(1, perfbtOpt_cv), function(xx){nrow(btOpt$BTIndivFits[[xx]]$frame[btOpt$BTIndivFits[[xx]]$frame$var != "<leaf>",])}))
table(sapply(seq(1, perfabtOpt_cv), function(xx){nrow(abtOpt$BTIndivFits[[xx]]$frame[abtOpt$BTIndivFits[[xx]]$frame$var != "<leaf>",])}))
```
## Models comparison
Once the optimal competing models have been defined, one can assess their generalization performances (i.e. on the test set). To do so, multiple criteria might be used, such as deviance, lift curves, concordance measures, …
Only the first criterion will be investigated for this presentation.
**Please note that usually only 1 model is retained beforehand - The test set is not used for model selection. Our specific example remains a case-study!**
Let’s start by computing the different model predictions on the test set.
```{r, tidy=TRUE}
btPredTest <- predict(btOpt, newdata = testSet, n.iter = perfbtOpt_cv, type = "response") * testSet$ExpoR
abtPredTest <- predict(abtOpt, newdata = testSet, n.iter = perfabtOpt_cv, type = "response") * testSet$ExpoR
```
### Deviance
The deviance is defined as 2 times the log-likelihood ratio of the saturated model compared to the reduced (fitted) one.
In other words, it measures the gap between the optimal model and the current one.
```{r, tidy=TRUE}
devPoisson <- function(obs, pred) {
2 * (sum(dpois(x = obs, lambda = obs, log = TRUE)) - sum(dpois(x = obs, lambda = pred, log = TRUE)))
}
```
One can now assess the deviance of the different models.
```{r, tidy=TRUE}
devPoisson(testSet$Y, btPredTest)
devPoisson(testSet$Y, abtPredTest)
```
For this simulated use-case, it therefore seems that the usual boosting tree approach performs better.
|
/scratch/gouwar.j/cran-all/cranData/BT/vignettes/BT-usage-example.Rmd
|
#' BTLLasso
#'
#' Performs BTLLasso, a method to model heterogeneity in paired comparison
#' data. Different types of covariates are allowd to have an influence on the
#' attractivity/strength of the objects. Covariates can be subject-specific,
#' object-specific or subject-object-specific. L1 penalties are used to reduce the
#' complexity of the model by enforcing clusters of equal effects or by elimination of irrelevant
#' covariates. Several additional functions are provided, such as
#' cross-validation, bootstrap intervals, and plot functions.
#'
#'
#' @name BTLLasso-package
#' @docType package
#' @author Gunther Schauberger\cr \email{gunther.schauberger@@tum.de}
#' @seealso \code{\link{BTLLasso}}, \code{\link{cv.BTLLasso}}
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, 88(9), 1-29, \doi{10.18637/jss.v088.i09}
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @keywords package BTL Bradley-Terry BTLLasso
#' @examples
#'
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' ##############################
#' ##### Example with simulated data set containing X, Z1 and Z2
#' ##############################
#' data(SimData)
#'
#' ## Specify control argument
#' ## -> allow for object-specific order effects and penalize intercepts
#' ctrl <- ctrl.BTLLasso(penalize.intercepts = TRUE, object.order.effect = TRUE,
#' penalize.order.effect.diffs = TRUE)
#'
#' ## Simple BTLLasso model for tuning parameters lambda
#' m.sim <- BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim
#'
#' par(xpd = TRUE)
#' plot(m.sim)
#'
#'
#' ## Cross-validate BTLLasso model for tuning parameters lambda
#' set.seed(1860)
#' m.sim.cv <- cv.BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim.cv
#' coef(m.sim.cv)
#' logLik(m.sim.cv)
#'
#' head(predict(m.sim.cv, type="response"))
#' head(predict(m.sim.cv, type="trait"))
#'
#' plot(m.sim.cv, plots_per_page = 4)
#'
#'
#' ## Example for bootstrap intervals for illustration only
#' ## Don't calculate bootstrap intervals with B = 20!!!!
#' set.seed(1860)
#' m.sim.boot <- boot.BTLLasso(m.sim.cv, B = 20, cores = 20)
#' m.sim.boot
#' plot(m.sim.boot, plots_per_page = 4)
#'
#'
#' ##############################
#' ##### Example with small version from GLES data set
#' ##############################
#' data(GLESsmall)
#'
#' ## extract data and center covariates for better interpretability
#' Y <- GLESsmall$Y
#' X <- scale(GLESsmall$X, scale = FALSE)
#' Z1 <- scale(GLESsmall$Z1, scale = FALSE)
#'
#' ## vector of subtitles, containing the coding of the X covariates
#' subs.X <- c('', 'female (1); male (0)')
#'
#' ## Cross-validate BTLLasso model
#' m.gles.cv <- cv.BTLLasso(Y = Y, X = X, Z1 = Z1)
#' m.gles.cv
#'
#' coef(m.gles.cv)
#' logLik(m.gles.cv)
#'
#' head(predict(m.gles.cv, type="response"))
#' head(predict(m.gles.cv, type="trait"))
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.gles.cv, subs.X = subs.X, plots_per_page = 4, which = 2:5)
#' paths(m.gles.cv, y.axis = 'L2')
#'
#'
#' ##############################
#' ##### Example with Bundesliga data set
#' ##############################
#' data(Buli1516)
#'
#' Y <- Buli1516$Y5
#'
#' Z1 <- scale(Buli1516$Z1, scale = FALSE)
#'
#' ctrl.buli <- ctrl.BTLLasso(object.order.effect = TRUE,
#' name.order = "Home",
#' penalize.order.effect.diffs = TRUE,
#' penalize.order.effect.absolute = FALSE,
#' order.center = TRUE, lambda2 = 1e-2)
#'
#' set.seed(1860)
#' m.buli <- cv.BTLLasso(Y = Y, Z1 = Z1, control = ctrl.buli)
#' m.buli
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.buli)
#'
#'
#' ##############################
#' ##### Example with Topmodel data set
#' ##############################
#' data("Topmodel2007", package = "psychotree")
#'
#' Y.models <- response.BTLLasso(Topmodel2007$preference)
#' X.models <- scale(model.matrix(preference~., data = Topmodel2007)[,-1])
#' rownames(X.models) <- paste0("Subject",1:nrow(X.models))
#' colnames(X.models) <- c("Gender","Age","KnowShow","WatchShow","WatchFinal")
#'
#' set.seed(5)
#' m.models <- cv.BTLLasso(Y = Y.models, X = X.models)
#' plot(m.models, plots_per_page = 6)
#'
#' par(op)
#' }
NULL
#' Bundesliga Data 2015/16 (Buli1516)
#'
#' Data from the German Bundesliga from the season 2015/16.
#' The data contain all 306 matches of the season treated as paired comparisons with 5 (Y5) or 3 (Y3) different
#' response categories. Additionally, different match-specific covariates are given as, for example,
#' the percentage of ball possession or the total running distance per team and per match.
#'
#' @name Buli1516
#' @docType data
#' @format A list containing data from the German Bundesliga with 306 observations.
#' The list contains both information on the response (paired comparisons) and different covariates.
#' \describe{
#' \item{Y5}{A response.BTLLasso object with 5 response categories for the Buli1516 data including
#' \itemize{
#' \item{response: Ordinal paired comparison response vector}
#' \item{first.object: Vector containing the first-named team per paired comparison (home team)}
#' \item{second.object: Vector containing the second-named team per paired comparison (away team)}
#' \item{subject: Vector containing a match-day identifier per paired comparison}
#' \item{with.order} Vector containing information that each match has to be considered including an order effect.
#' }}
#' \item{Y3}{A response.BTLLasso object with 3 response categories for the Buli1516 data including
#' \itemize{
#' \item{response: Ordinal paired comparison response vector}
#' \item{first.object: Vector containing the first-named team per paired comparison (home team)}
#' \item{second.object: Vector containing the second-named team per paired comparison (away team)}
#' \item{subject: Vector containing a match-day identifier per paired comparison}
#' \item{with.order} Vector containing information that each match has to be considered including an order effect.
#' }}
#' \item{Z1}{Matrix containing all team-match-specific covariates
#' \itemize{
#' \item{Distance: Total amount of km run}
#' \item{BallPossession: Percentage of ball possession}
#' \item{TacklingRate: Rate of won tacklings}
#' \item{ShotsonGoal: Total number of shots on goal}
#' \item{CompletionRate: Percentage of passes reaching teammates}
#' \item{FoulsSuffered: Number of fouls suffered}
#' \item{Offside: Number of offsides (in attack)}
#' }
#' }
#' \item{Z2}{Matrix containing all the average market values of the teams as a team-specific covariate}
#' }
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, to appear
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @source
#' \url{https://www.kicker.de/}
#' @keywords datasets
#' @seealso \code{\link{Buli1415}}, \code{\link{Buli1617}}, \code{\link{Buli1718}}
#' @examples
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' data(Buli1516)
#'
#' Y <- Buli1516$Y5
#' Z1 <- scale(Buli1516$Z1, scale = FALSE)
#'
#' ctrl.buli <- ctrl.BTLLasso(object.order.effect = TRUE,
#' name.order = "Home",
#' penalize.order.effect.diffs = TRUE,
#' penalize.order.effect.absolute = FALSE,
#' order.center = TRUE, lambda2 = 1e-2)
#'
#' set.seed(1860)
#' m.buli <- cv.BTLLasso(Y = Y, Z1 = Z1, control = ctrl.buli)
#' m.buli
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.buli)
#'
#' par(op)
#' }
NULL
#' Bundesliga Data 2014/15 (Buli1415)
#'
#' Data from the German Bundesliga from the season 2014/15.
#' The data contain all 306 matches of the season treated as paired comparisons with 5 (Y5) or 3 (Y3) different
#' response categories. Additionally, different match-specific covariates are given as, for example,
#' the percentage of ball possession or the total running distance per team and per match.
#'
#' @name Buli1415
#' @docType data
#' @format A list containing data from the German Bundesliga with 306 observations.
#' The list contains both information on the response (paired comparisons) and different covariates.
#' \describe{
#' \item{Y5}{A response.BTLLasso object with 5 response categories for the Buli1516 data including
#' \itemize{
#' \item{response: Ordinal paired comparison response vector}
#' \item{first.object: Vector containing the first-named team per paired comparison (home team)}
#' \item{second.object: Vector containing the second-named team per paired comparison (away team)}
#' \item{subject: Vector containing a match-day identifier per paired comparison}
#' \item{with.order} Vector containing information that each match has to be considered including an order effect.
#' }}
#' \item{Y3}{A response.BTLLasso object with 3 response categories for the Buli1516 data including
#' \itemize{
#' \item{response: Ordinal paired comparison response vector}
#' \item{first.object: Vector containing the first-named team per paired comparison (home team)}
#' \item{second.object: Vector containing the second-named team per paired comparison (away team)}
#' \item{subject: Vector containing a match-day identifier per paired comparison}
#' \item{with.order} Vector containing information that each match has to be considered including an order effect.
#' }}
#' \item{Z1}{Matrix containing all team-match-specific covariates
#' \itemize{
#' \item{Distance: Total amount of km run}
#' \item{BallPossession: Percentage of ball possession}
#' \item{TacklingRate: Rate of won tacklings}
#' \item{ShotsonGoal: Total number of shots on goal}
#' \item{CompletionRate: Percentage of passes reaching teammates}
#' \item{FoulsSuffered: Number of fouls suffered}
#' \item{Offside: Number of offsides (in attack)}
#' }
#' }
#' }
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, to appear
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @source
#' \url{https://www.kicker.de/}
#' @keywords datasets
#' @seealso \code{\link{Buli1516}}, \code{\link{Buli1617}}, \code{\link{Buli1718}}
#' @examples
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' data(Buli1415)
#'
#' Y <- Buli1415$Y5
#' Z1 <- scale(Buli1415$Z1, scale = FALSE)
#'
#' ctrl.buli <- ctrl.BTLLasso(object.order.effect = TRUE,
#' name.order = "Home",
#' penalize.order.effect.diffs = TRUE,
#' penalize.order.effect.absolute = FALSE,
#' order.center = TRUE, lambda2 = 1e-2)
#'
#' set.seed(1860)
#' m.buli <- cv.BTLLasso(Y = Y, Z1 = Z1, control = ctrl.buli)
#' m.buli
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.buli)
#'
#' par(op)
#' }
NULL
#' Bundesliga Data 2016/17 (Buli1617)
#'
#' Data from the German Bundesliga from the season 2016/17.
#' The data contain all 306 matches of the season treated as paired comparisons with 5 (Y5) or 3 (Y3) different
#' response categories. Additionally, different match-specific covariates are given as, for example,
#' the percentage of ball possession or the total running distance per team and per match.
#'
#' @name Buli1617
#' @docType data
#' @format A list containing data from the German Bundesliga with 306 observations.
#' The list contains both information on the response (paired comparisons) and different covariates.
#' \describe{
#' \item{Y5}{A response.BTLLasso object with 5 response categories for the Buli1516 data including
#' \itemize{
#' \item{response: Ordinal paired comparison response vector}
#' \item{first.object: Vector containing the first-named team per paired comparison (home team)}
#' \item{second.object: Vector containing the second-named team per paired comparison (away team)}
#' \item{subject: Vector containing a match-day identifier per paired comparison}
#' \item{with.order} Vector containing information that each match has to be considered including an order effect.
#' }}
#' \item{Y3}{A response.BTLLasso object with 3 response categories for the Buli1516 data including
#' \itemize{
#' \item{response: Ordinal paired comparison response vector}
#' \item{first.object: Vector containing the first-named team per paired comparison (home team)}
#' \item{second.object: Vector containing the second-named team per paired comparison (away team)}
#' \item{subject: Vector containing a match-day identifier per paired comparison}
#' \item{with.order} Vector containing information that each match has to be considered including an order effect.
#' }}
#' \item{Z1}{Matrix containing all team-match-specific covariates
#' \itemize{
#' \item{Distance: Total amount of km run}
#' \item{BallPossession: Percentage of ball possession}
#' \item{TacklingRate: Rate of won tacklings}
#' \item{ShotsonGoal: Total number of shots on goal}
#' \item{CompletionRate: Percentage of passes reaching teammates}
#' \item{FoulsSuffered: Number of fouls suffered}
#' \item{Offside: Number of offsides (in attack)}
#' \item{Corners: Number of corners (in attack)}
#' }
#' }
#' }
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, to appear
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @source
#' \url{https://www.kicker.de/}
#' @keywords datasets
#' @seealso \code{\link{Buli1415}}, \code{\link{Buli1516}}, \code{\link{Buli1718}}
#' @examples
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' data(Buli1617)
#'
#' Y <- Buli1617$Y5
#' Z1 <- scale(Buli1617$Z1, scale = FALSE)
#'
#' ctrl.buli <- ctrl.BTLLasso(object.order.effect = TRUE,
#' name.order = "Home",
#' penalize.order.effect.diffs = TRUE,
#' penalize.order.effect.absolute = FALSE,
#' order.center = TRUE, lambda2 = 1e-2)
#'
#' set.seed(1860)
#' m.buli <- cv.BTLLasso(Y = Y, Z1 = Z1, control = ctrl.buli)
#' m.buli
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.buli)
#'
#' par(op)
#' }
NULL
#' Bundesliga Data Response Data (BuliResponse)
#'
#' Data from the German Bundesliga from the season 2015/16. The data contain all
#' variables from the 306 matches that are necessary to create the respective
#' \code{response.BTLLasso} object from the data set \code{\link{Buli1516}}. The purpose
#' of the data set is to provide an example how \code{response.BTLLasso} objects can be created.
#'
#' @name BuliResponse
#' @docType data
#' @format A data set containing all information that is necessary to create a response object
#' for the Bundesliga data \code{link{Buli1516}}
#' \describe{
#' \item{Result}{Ordinal, 5-categorical results from Bundesliga season 2015/16.}
#' \item{TeamHome}{Abbreviation of home team.}
#' \item{TeamAway}{Abbreviation of away team.}
#' \item{Matchday}{Matchdays from 1 to 34.}
#' }
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, to appear
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @source
#' \url{https://www.kicker.de/}
#' @keywords datasets
#' @examples
#' \dontrun{
#' data(BuliResponse)
#'
#' Y.Buli <- response.BTLLasso(response = BuliResponse$Result,
#' first.object = BuliResponse$TeamHome,
#' second.object = BuliResponse$TeamAway,
#' subject = BuliResponse$Matchday)
#' }
NULL
#' German Longitudinal Election Study (GLES)
#'
#' Data from the German Longitudinal Election Study (GLES), see Rattinger et
#' al. (2014). The GLES is a long-term study of the German electoral process.
#' It collects pre- and post-election data for several federal elections, the
#' data used here originate from the pre-election study for 2013.
#'
#' @name GLES
#' @docType data
#' @format A list containing data from the German Longitudinal Election Study with 2003
#' (partly incomplete) observations.
#' The list contains both information on the response (paired comparisons) and different covariates.
#' \describe{
#' \item{Y}{A response.BTLLasso object for the GLES data including
#' \itemize{
#' \item{response: Ordinal paired comparison response vector}
#' \item{first.object: Vector containing the first-named party per paired comparison}
#' \item{second.object: Vector containing the second-named party per paired comparison}
#' \item{subject: Vector containing a person identifier per paired comparison}
#' \item{with.order} Automatically generated vector containing information on order effect. Irrelevant, because
#' no order effect needs to be included in the analysis of GLES data.
#' }}
#' \item{X}{Matrix containing all eight person-specific covariates
#' \itemize{
#' \item{Age: Age in years}
#' \item{Gender (0: male, 1: female)}
#' \item{EastWest (0: West Germany, 1: East Germany)}
#' \item{PersEcon: Personal economic situation, 1: good or very good,
#' 0: else}
#' \item{Abitur: School leaving certificate, 1: Abitur/A
#' levels, 0: else}
#' \item{Unemployment: 1: currently unemployed, 0:
#' else}
#' \item{Church: Frequency of attendence in a
#' church/synagogue/mosque/..., 1: at least once a month, 0: else}
#' \item{Migration: Are you a migrant / not German since birth? 1: yes,
#' 0: no}
#' }
#' }
#' \item{Z1}{Matrix containing all four person-party-specific covariates
#' \itemize{
#' \item{Climate: Self-perceived distance of each person to all five parties with respect to
#' ones attitude towards climate change.}
#' \item{SocioEcon: Self-perceived distance of each person to all five parties with respect to
#' ones attitude towards socio-economic issues.}
#' \item{Immigration: Self-perceived distance of each person to all five parties with respect to
#' ones attitude towards immigration.}
#' }
#' }
#' }
#' @references Rattinger, H., S. Rossteutscher, R. Schmitt-Beck, B. Wessels,
#' and C. Wolf (2014): Pre-election cross section (GLES 2013). \emph{GESIS Data
#' Archive, Cologne ZA5700 Data file Version 2.0.0.}
#'
#' Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, to appear
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#' @source
#' \url{https://www.gesis.org/en/gles/about-gles}
#' @keywords datasets
#' @examples
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' data(GLES)
#' Y <- GLES$Y
#' X <- scale(GLES$X, scale = FALSE)
#'
#' subs <- c("(in years)","female (1); male (0)","East Germany (1); West Germany (0)",
#' "(very) good (1); else (0)", "Abitur/A levels (1); else (0)",
#' "currently unemployed (1); else (0)","at least once a month (1); else (0)",
#' "yes (1); no (0)")
#'
#' set.seed(5)
#' m.gles <- cv.BTLLasso(Y = Y, X = X, control = ctrl.BTLLasso(l.lambda = 50))
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.gles, subs.X = subs)
#'
#' par(op)
#' }
#'
NULL
#' Subset of the GLES data set with 200 observations and 4 covariates.
#'
#' This is a subset of the \code{\link{GLES}} data set from the German
#' Longitudinal Election Study (GLES), see Rattinger et al. (2014). The subset contains
#' only 200 of the 2003 observations and only a small part of the covariates. The GLES is
#' a long-term study of the German electoral process. It collects pre- and
#' post-election data for several federal elections, the data used here
#' originate from the pre-election study for 2013.
#'
#' @name GLESsmall
#' @docType data
#' @format A list containing data from the German Longitudinal Election Study with 200 observations.
#' The list contains both information on the response (paired comparisons) and different covariates.
#' \describe{
#' \item{Y}{A response.BTLLasso object for the GLES data including
#' \itemize{
#' \item{response: Ordinal paired comparison response vector}
#' \item{first.object: Vector containing the first-named party per paired comparison}
#' \item{second.object: Vector containing the second-named party per paired comparison}
#' \item{subject: Vector containing a person identifier per paired comparison}
#' \item{with.order} Automatically generated vector containing information on order effect. Irrelevant, because
#' no order effect needs to be included in the analysis of GLES data.
#' }}
#' \item{X}{Matrix containing all eight person-specific covariates
#' \itemize{
#' \item{Age: Age in years}
#' \item{Gender (0: male, 1: female)}
#' }
#' }
#' \item{Z1}{Matrix containing all four person-party-specific covariates
#' \itemize{
#' \item{Climate: Self-perceived distance of each person to all five parties with respect to
#' ones attitude towards climate change.}
#' \item{Immigration: Self-perceived distance of each person to all five parties with respect to
#' ones attitude towards immigration.}
#' }
#' }
#' }
#' @references Rattinger, H., S. Rossteutscher, R. Schmitt-Beck, B. Wessels,
#' and C. Wolf (2014): Pre-election cross section (GLES 2013). \emph{GESIS Data
#' Archive, Cologne ZA5700 Data file Version 2.0.0.}
#'
#' Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, to appear
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#' @source
#' \url{https://www.gesis.org/en/gles/about-gles}
#' @keywords datasets
#' @seealso \code{\link{GLES}}
#' @examples
#'
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' data(GLESsmall)
#'
#' ## extract data and center covariates for better interpretability
#' Y <- GLESsmall$Y
#' X <- scale(GLESsmall$X, scale = FALSE)
#' Z1 <- scale(GLESsmall$Z1, scale = FALSE)
#'
#' ## vector of subtitles, containing the coding of the X covariates
#' subs.X <- c('', 'female (1); male (0)')
#'
#' ## Cross-validate BTLLasso model
#' m.gles.cv <- cv.BTLLasso(Y = Y, X = X, Z1 = Z1)
#' m.gles.cv
#'
#' coef(m.gles.cv)
#' logLik(m.gles.cv)
#'
#' head(predict(m.gles.cv, type="response"))
#' head(predict(m.gles.cv, type="trait"))
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.gles.cv, subs.X = subs.X, plots_per_page = 4, which = 2:5)
#' paths(m.gles.cv, y.axis = 'L2')
#'
#' par(op)
#' }
#'
NULL
#' Simulated data set for illustration
#'
#' This data set is a simulated data set including all possible types of covariates (X, Z1 and Z2)
#' and is intended to serve for illustration purpose. The data set contains paired comparisons between
#' four objects with five different response categories from 200 subjects.
#'
#' @name SimData
#' @docType data
#' @format A list containing simulated data for 200 observations.
#' The list contains both information on the response (paired comparisons) and different covariates.
#' \describe{
#' \item{Y}{A response.BTLLasso object with simulated responses including
#' \itemize{
#' \item{response: Ordinal paired comparison response vector}
#' \item{first.object: Vector containing the first-named object per paired comparison}
#' \item{second.object: Vector containing the second-named object per paired comparison}
#' \item{subject: Vector containing a subject identifier per paired comparison}
#' \item{with.order} Automatically generated vector containing information on order effect. Each paired
#' comparison is associated with an order effect.
#' }}
#' \item{X}{Matrix containing both subject-specific covariates
#' \itemize{
#' \item{X_var1}
#' \item{X_var2}
#' }
#' }
#' \item{Z1}{Matrix containing both subject-object-specific covariates
#' \itemize{
#' \item{Z1_var1}
#' \item{Z1_var2}
#' }
#' }
#' \item{Z2}{Matrix containing both object-specific covariates
#' \itemize{
#' \item{Z2_var1}
#' \item{Z2_var2}
#' }
#' }
#' }
#' @keywords datasets
#' @examples
#'
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' data(SimData)
#'
#' ## Specify control argument
#' ## -> allow for object-specific order effects and penalize intercepts
#' ctrl <- ctrl.BTLLasso(penalize.intercepts = TRUE, object.order.effect = TRUE,
#' penalize.order.effect.diffs = TRUE)
#'
#' ## Simple BTLLasso model for tuning parameters lambda
#' m.sim <- BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim
#'
#' par(xpd = TRUE)
#' plot(m.sim)
#'
#'
#' ## Cross-validate BTLLasso model for tuning parameters lambda
#' set.seed(1860)
#' m.sim.cv <- cv.BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim.cv
#' coef(m.sim.cv)
#' logLik(m.sim.cv)
#'
#' head(predict(m.sim.cv, type="response"))
#' head(predict(m.sim.cv, type="trait"))
#'
#' plot(m.sim.cv, plots_per_page = 4)
#'
#'
#' ## Example for bootstrap intervals for illustration only
#' ## Don't calculate bootstrap intervals with B = 20!!!!
#' set.seed(1860)
#' m.sim.boot <- boot.BTLLasso(m.sim.cv, B = 20, cores = 20)
#' m.sim.boot
#' plot(m.sim.boot, plots_per_page = 4)
#'
#' par(op)
#' }
NULL
#' Bundesliga Data 2017/18 (Buli1718)
#'
#' Data from the German Bundesliga from the season 2017/18.
#' The data contain all 306 matches of the season treated as paired comparisons with 5 (Y5) or 3 (Y3) different
#' response categories. Additionally, different match-specific covariates are given as, for example,
#' the percentage of ball possession or the total running distance per team and per match.
#'
#' @name Buli1718
#' @docType data
#' @format A list containing data from the German Bundesliga with 306 observations.
#' The list contains both information on the response (paired comparisons) and different covariates.
#' \describe{
#' \item{Y5}{A response.BTLLasso object with 5 response categories for the Buli1516 data including
#' \itemize{
#' \item{response: Ordinal paired comparison response vector}
#' \item{first.object: Vector containing the first-named team per paired comparison (home team)}
#' \item{second.object: Vector containing the second-named team per paired comparison (away team)}
#' \item{subject: Vector containing a match-day identifier per paired comparison}
#' \item{with.order} Vector containing information that each match has to be considered including an order effect.
#' }}
#' \item{Y3}{A response.BTLLasso object with 3 response categories for the Buli1516 data including
#' \itemize{
#' \item{response: Ordinal paired comparison response vector}
#' \item{first.object: Vector containing the first-named team per paired comparison (home team)}
#' \item{second.object: Vector containing the second-named team per paired comparison (away team)}
#' \item{subject: Vector containing a match-day identifier per paired comparison}
#' \item{with.order} Vector containing information that each match has to be considered including an order effect.
#' }}
#' \item{Z1}{Matrix containing all team-match-specific covariates
#' \itemize{
#' \item{Distance: Total amount of km run}
#' \item{BallPossession: Percentage of ball possession}
#' \item{TacklingRate: Rate of won tacklings}
#' \item{ShotsonGoal: Total number of shots on goal}
#' \item{CompletionRate: Percentage of passes reaching teammates}
#' \item{FoulsSuffered: Number of fouls suffered}
#' \item{Offside: Number of offsides (in attack)}
#' \item{Corners: Number of corners (in attack)}
#' }
#' }
#' }
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, to appear
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @source
#' \url{https://www.kicker.de/}
#' @keywords datasets
#' @seealso \code{\link{Buli1415}}, \code{\link{Buli1516}}, \code{\link{Buli1617}}
#' @examples
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' data(Buli1718)
#'
#' Y <- Buli1718$Y5
#' Z1 <- scale(Buli1718$Z1, scale = FALSE)
#'
#' ctrl.buli <- ctrl.BTLLasso(object.order.effect = TRUE,
#' name.order = "Home",
#' penalize.order.effect.diffs = TRUE,
#' penalize.order.effect.absolute = FALSE,
#' order.center = TRUE, lambda2 = 1e-2)
#'
#' set.seed(1860)
#' m.buli <- cv.BTLLasso(Y = Y, Z1 = Z1, control = ctrl.buli)
#' m.buli
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.buli)
#'
#' par(op)
#' }
NULL
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/BTLLasso-package.R
|
#' Control function for BTLLasso
#'
#' Control parameters for different penalty terms and for tuning the fitting algorithm.
#'
#' @param l.lambda Number of tuning parameters. Applies only if \code{lambda = NULL} in the
#' main function.
#' @param log.lambda Should the grid of tuning parameters be created on a logarithmic scale
#' rather than equidistant. Applies only if \code{lambda = NULL} in the main function.
#' @param lambda.min Minimal value for tuning parameter. Applies only if \code{lambda = NULL} in the
#' main function.
#' @param adaptive Should adaptive lasso be used? Default is TRUE.
#' @param scale Should the covariates be scaled so that they are on comparable scales? Default is TRUE.
#' Variables will be properly scaled AND centered. Please note that results will refer to scaled covariates.
#' If \code{adaptive = TRUE} scaling is not necessary to keep penalties comparable.
#' @param norm Specifies the norm used in the penalty term. Currently, only
#' 'L1' and 'L2' are possible. Default is to 'L1', only 'L1' allows for
#' clustering and variable selection.
#' @param epsilon Threshold value for convergence of the algorithm.
#' @param lambda2 Tuning parameter for ridge penalty on all coefficients.
#' Should be small, only used to stabilize results.
#' @param c Internal parameter for the quadratic approximation of the L1
#' penalty. Should be sufficiently small.
#' @param precision Precision for final parameter estimates, specifies number of decimals.
#' @param weight.penalties Should the penalties across the different model components
#' (i.e. intercepts, order effects, X, Z1, Z2) be weighted according to the number of
#' penalties included? Default is \code{TRUE} to minimize the risk of selection bias
#' across different model components.
#' @param include.intercepts Should intercepts be included in the model?
#' @param order.effect Should a global order effect (corresponding to home effect
#' in sports applications) be included in the model?
#' @param object.order.effect Should object-specific order effects (corresponding to home effects
#' in sports applications) be included in the model?
#' @param order.center Should (in case of object-specific order effects) the order effects be centered
#' in the design matrix? Centering is equivalent to the coding scheme of effect coding instead of
#' dummy coding.
#' @param name.order How should the order effect(s) be called in plots or prints.
#' @param penalize.intercepts Should intercepts be penalized? If \code{TRUE},
#' all pairwise differences between intercepts are penalized.
#' @param penalize.X Should effects from X matrix be penalized? If \code{TRUE},
#' all pairwise differences corresponding to one covariate are penalized. Can also be used with
#' a character vector as input. Then, the character vector contains the names of the variables
#' from X whose parameters should be penalized.
#' @param penalize.Z2 Should absolute values of effects from Z2 matrix be
#' penalized? Can also be used with
#' a character vector as input. Then, the character vector contains the names of the variables
#' from Z2 whose parameters should be penalized.
#' @param penalize.Z1.absolute Should absolute values of effects from Z1 matrix
#' be penalized? Can also be used with
#' a character vector as input. Then, the character vector contains the names of the variables
#' from Z1 whose parameters should be penalized.
#' @param penalize.Z1.diffs Should differences of effects from Z1 matrix be
#' penalized? If \code{TRUE}, all pairwise differences corresponding to one
#' covariate are penalized. Can also be used with
#' a character vector as input. Then, the character vector contains the names of the variables
#' from Z1 whose parameters should be penalized.
#' @param penalize.order.effect.absolute Should absolute values of order effect(s) be penalized?
#' Only relevant if either \code{object.order.effect = TRUE} or \code{order.effect = TRUE}.
#' @param penalize.order.effect.diffs Should differences of order effects be
#' penalized? If \code{TRUE}, all pairwise differences are penalized. Only relevant if
#' \code{object.order.effect = TRUE}
#' @author Gunther Schauberger\cr \email{gunther.schauberger@@tum.de}
#' @seealso \code{\link{BTLLasso}}, \code{\link{cv.BTLLasso}}
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, 88(9), 1-29, \doi{10.18637/jss.v088.i09}
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @keywords BTLLasso control
#' @examples
#'
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' ##############################
#' ##### Example with simulated data set containing X, Z1 and Z2
#' ##############################
#' data(SimData)
#'
#' ## Specify control argument
#' ## -> allow for object-specific order effects and penalize intercepts
#' ctrl <- ctrl.BTLLasso(penalize.intercepts = TRUE, object.order.effect = TRUE,
#' penalize.order.effect.diffs = TRUE)
#'
#' ## Simple BTLLasso model for tuning parameters lambda
#' m.sim <- BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim
#'
#' par(xpd = TRUE)
#' plot(m.sim)
#'
#'
#' ## Cross-validate BTLLasso model for tuning parameters lambda
#' set.seed(1860)
#' m.sim.cv <- cv.BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim.cv
#' coef(m.sim.cv)
#' logLik(m.sim.cv)
#'
#' head(predict(m.sim.cv, type="response"))
#' head(predict(m.sim.cv, type="trait"))
#'
#' plot(m.sim.cv, plots_per_page = 4)
#'
#'
#' ## Example for bootstrap intervals for illustration only
#' ## Don't calculate bootstrap intervals with B = 20!!!!
#' set.seed(1860)
#' m.sim.boot <- boot.BTLLasso(m.sim.cv, B = 20, cores = 20)
#' m.sim.boot
#' plot(m.sim.boot, plots_per_page = 4)
#'
#'
#' ##############################
#' ##### Example with small version from GLES data set
#' ##############################
#' data(GLESsmall)
#'
#' ## extract data and center covariates for better interpretability
#' Y <- GLESsmall$Y
#' X <- scale(GLESsmall$X, scale = FALSE)
#' Z1 <- scale(GLESsmall$Z1, scale = FALSE)
#'
#' ## vector of subtitles, containing the coding of the X covariates
#' subs.X <- c('', 'female (1); male (0)')
#'
#' ## Cross-validate BTLLasso model
#' m.gles.cv <- cv.BTLLasso(Y = Y, X = X, Z1 = Z1)
#' m.gles.cv
#'
#' coef(m.gles.cv)
#' logLik(m.gles.cv)
#'
#' head(predict(m.gles.cv, type="response"))
#' head(predict(m.gles.cv, type="trait"))
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.gles.cv, subs.X = subs.X, plots_per_page = 4, which = 2:5)
#' paths(m.gles.cv, y.axis = 'L2')
#'
#'
#' ##############################
#' ##### Example with Bundesliga data set
#' ##############################
#' data(Buli1516)
#'
#' Y <- Buli1516$Y5
#'
#' Z1 <- scale(Buli1516$Z1, scale = FALSE)
#'
#' ctrl.buli <- ctrl.BTLLasso(object.order.effect = TRUE,
#' name.order = "Home",
#' penalize.order.effect.diffs = TRUE,
#' penalize.order.effect.absolute = FALSE,
#' order.center = TRUE, lambda2 = 1e-2)
#'
#' set.seed(1860)
#' m.buli <- cv.BTLLasso(Y = Y, Z1 = Z1, control = ctrl.buli)
#' m.buli
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.buli)
#'
#'
#' ##############################
#' ##### Example with Topmodel data set
#' ##############################
#' data("Topmodel2007", package = "psychotree")
#'
#' Y.models <- response.BTLLasso(Topmodel2007$preference)
#' X.models <- scale(model.matrix(preference~., data = Topmodel2007)[,-1])
#' rownames(X.models) <- paste0("Subject",1:nrow(X.models))
#' colnames(X.models) <- c("Gender","Age","KnowShow","WatchShow","WatchFinal")
#'
#' set.seed(5)
#' m.models <- cv.BTLLasso(Y = Y.models, X = X.models)
#' plot(m.models, plots_per_page = 6)
#'
#' par(op)
#' }
ctrl.BTLLasso <- function(l.lambda = 30, log.lambda = TRUE, lambda.min = 0.05,
adaptive = TRUE, scale = TRUE, norm = c("L1",
"L2"), epsilon = 1e-04, lambda2 = 1e-04, c = 1e-09, precision = 3,
weight.penalties = TRUE, include.intercepts = TRUE, order.effect = FALSE,
object.order.effect = FALSE, order.center = FALSE, name.order = "Order",
penalize.intercepts = FALSE, penalize.X = TRUE, penalize.Z2 = FALSE,
penalize.Z1.absolute = TRUE, penalize.Z1.diffs = TRUE, penalize.order.effect.absolute = TRUE,
penalize.order.effect.diffs = FALSE) {
norm <- match.arg(norm)
RET <- list(l.lambda = l.lambda, log.lambda = log.lambda, lambda.min = lambda.min,
adaptive = adaptive, scale = scale, norm = norm,
epsilon = epsilon, lambda2 = lambda2, c = c, penalize.X = penalize.X,
penalize.Z1.diffs = penalize.Z1.diffs, penalize.Z2 = penalize.Z2,
penalize.Z1.absolute = penalize.Z1.absolute, penalize.intercepts = penalize.intercepts,
include.intercepts = include.intercepts, order.effect = order.effect,
object.order.effect = object.order.effect, order.center = order.center,
penalize.order.effect.diffs = penalize.order.effect.diffs,
penalize.order.effect.absolute = penalize.order.effect.absolute,
name.order = name.order, precision = precision, weight.penalties = weight.penalties)
RET
}
#' Function to perform BTLLasso
#'
#' Performs BTLLasso, a method to model heterogeneity in paired comparison
#' data. Different types of covariates are allowed to have an influence on the
#' attractivity/strength of the objects. Covariates can be subject-specific,
#' object-specific or subject-object-specific. L1 penalties are used to reduce the
#' complexiy of the model by enforcing clusters of equal effects or by elimination of irrelevant
#' covariates.
#'
#'
#' @param Y A \code{response.BTLLasso} object created by
#' \code{\link{response.BTLLasso}}.
#' @param X Matrix containing all \bold{subject-specific covariates} that are
#' to be included with \bold{object-specific effects}. One row represents one
#' subject, one column represents one covariate. X has to be standardized.
#' @param Z1 Matrix containing all \bold{object-subject-specific covariates}
#' that are to be included with \bold{object-specific effects}. One row
#' represents one subject, one column represents one combination between
#' covariate and object. Column names have to follow the scheme
#' 'firstvar.object1',...,'firstvar.objectm',...,'lastvar.objectm'. The object
#' names 'object1',...,'objectm' have to be identical to the object names used
#' in the \code{response.BTLLasso} object \code{Y}. The variable names and the
#' object names have to be separated by '.'. The rownames of the matrix',
#' Z.name, 'have to be equal to the subjects specified in the response object.
#' Z1 has to be standardized.
#' @param Z2 Matrix containing all \bold{object-subject-specific covariates or
#' object-specific covariates} that are to be included with \bold{global
#' effects}. One row represents one subject, one column represents one
#' combination between covariate and object. Column names have to follow the
#' scheme 'firstvar.object1',...,'firstvar.objectm',...,'lastvar.objectm'. The
#' object names 'object1',...,'objectm' have to be identical to the object
#' names used in the \code{response.BTLLasso} object \code{Y}. The variable
#' names and the object names have to be separated by '.'. The rownames of the
#' matrix', Z.name, 'have to be equal to the subjects specified in the response
#' object. Z2 has to be standardized.
#' @param lambda Vector of tuning parameters. If \code{NULL}, automatically a grid
#' of tuning parameters is created.
#' @param control Function for control arguments, mostly for internal use. See
#' also \code{\link{ctrl.BTLLasso}}.
#' @param trace Should the trace of the BTLLasso algorithm be printed?
#' @return
#' \item{coefs}{Matrix containing all (original) coefficients, one row
#' per tuning parameter, one column per coefficient.}
#' \item{coefs.repar}{Matrix
#' containing all reparameterized (for symmetric side constraint) coefficients,
#' one row per tuning parameter, one column per coefficient.}
#' \item{logLik}{Vector of log-likelihoods, one value per tuning parameter.}
#' \item{design}{List containing design matrix and several additional information like,
#' e.g., number and names of covariates.}
#' \item{Y}{Response object.}
#' \item{penalty}{List containing all penalty matrices and some further information on penalties.}
#' \item{response}{Vector containing 0-1 coded
#' response.}
#' \item{X}{X matrix containing subject-specific covariates.}
#' \item{Z1}{Z1 matrix containing subject-object-specific covariates.}
#' \item{Z2}{Z2 matrix containing (subject)-object-specific covariates.}
#' \item{lambda}{Vector of tuning parameters.}
#' \item{control}{Control argument, specified by \code{\link{ctrl.BTLLasso}}.}
#' \item{df}{Vector containing degrees of freedom for all models along the grid
#' of tuning parameters.}
#' @author Gunther Schauberger\cr \email{gunther.schauberger@@tum.de}
#' @seealso \code{\link{cv.BTLLasso}}, \code{\link{boot.BTLLasso}}, \code{\link{ctrl.BTLLasso}},
#' \code{\link{plot.BTLLasso}}, \code{\link{paths}}, \code{\link{print.BTLLasso}},
#' \code{\link{predict.BTLLasso}}, \code{\link{coef}}
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, to appear
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @keywords BTLLasso
#' @examples
#'
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' ##############################
#' ##### Example with simulated data set containing X, Z1 and Z2
#' ##############################
#' data(SimData)
#'
#' ## Specify control argument
#' ## -> allow for object-specific order effects and penalize intercepts
#' ctrl <- ctrl.BTLLasso(penalize.intercepts = TRUE, object.order.effect = TRUE,
#' penalize.order.effect.diffs = TRUE)
#'
#' ## Simple BTLLasso model for tuning parameters lambda
#' m.sim <- BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim
#'
#' par(xpd = TRUE)
#' plot(m.sim)
#'
#'
#' ## Cross-validate BTLLasso model for tuning parameters lambda
#' set.seed(1860)
#' m.sim.cv <- cv.BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim.cv
#' coef(m.sim.cv)
#' logLik(m.sim.cv)
#'
#' head(predict(m.sim.cv, type="response"))
#' head(predict(m.sim.cv, type="trait"))
#'
#' plot(m.sim.cv, plots_per_page = 4)
#'
#'
#' ## Example for bootstrap intervals for illustration only
#' ## Don't calculate bootstrap intervals with B = 20!!!!
#' set.seed(1860)
#' m.sim.boot <- boot.BTLLasso(m.sim.cv, B = 20, cores = 20)
#' m.sim.boot
#' plot(m.sim.boot, plots_per_page = 4)
#'
#'
#' ##############################
#' ##### Example with small version from GLES data set
#' ##############################
#' data(GLESsmall)
#'
#' ## extract data and center covariates for better interpretability
#' Y <- GLESsmall$Y
#' X <- scale(GLESsmall$X, scale = FALSE)
#' Z1 <- scale(GLESsmall$Z1, scale = FALSE)
#'
#' ## vector of subtitles, containing the coding of the X covariates
#' subs.X <- c('', 'female (1); male (0)')
#'
#' ## Cross-validate BTLLasso model
#' m.gles.cv <- cv.BTLLasso(Y = Y, X = X, Z1 = Z1)
#' m.gles.cv
#'
#' coef(m.gles.cv)
#' logLik(m.gles.cv)
#'
#' head(predict(m.gles.cv, type="response"))
#' head(predict(m.gles.cv, type="trait"))
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.gles.cv, subs.X = subs.X, plots_per_page = 4, which = 2:5)
#' paths(m.gles.cv, y.axis = 'L2')
#'
#'
#' ##############################
#' ##### Example with Bundesliga data set
#' ##############################
#' data(Buli1516)
#'
#' Y <- Buli1516$Y5
#'
#' Z1 <- scale(Buli1516$Z1, scale = FALSE)
#'
#' ctrl.buli <- ctrl.BTLLasso(object.order.effect = TRUE,
#' name.order = "Home",
#' penalize.order.effect.diffs = TRUE,
#' penalize.order.effect.absolute = FALSE,
#' order.center = TRUE, lambda2 = 1e-2)
#'
#' set.seed(1860)
#' m.buli <- cv.BTLLasso(Y = Y, Z1 = Z1, control = ctrl.buli)
#' m.buli
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.buli)
#'
#'
#' ##############################
#' ##### Example with Topmodel data set
#' ##############################
#' data("Topmodel2007", package = "psychotree")
#'
#' Y.models <- response.BTLLasso(Topmodel2007$preference)
#' X.models <- scale(model.matrix(preference~., data = Topmodel2007)[,-1])
#' rownames(X.models) <- paste0("Subject",1:nrow(X.models))
#' colnames(X.models) <- c("Gender","Age","KnowShow","WatchShow","WatchFinal")
#'
#' set.seed(5)
#' m.models <- cv.BTLLasso(Y = Y.models, X = X.models)
#' plot(m.models, plots_per_page = 6)
#'
#' par(op)
#' }
BTLLasso <- function(Y, X = NULL, Z1 = NULL, Z2 = NULL, lambda = NULL,
control = ctrl.BTLLasso(), trace = TRUE) {
## create design matrix
get.design <- design.BTLLasso(Y = Y, X = X, Z1 = Z1, Z2 = Z2,
control = control)
## exclude missing values
na.response <- is.na(Y$response)
na.design <- colSums(matrix(is.na(rowSums(get.design$design)),
nrow = Y$q)) != 0
na.total <- na.response | na.design
Y$response <- Y$response[!na.total]
Y$first.object <- Y$first.object[!na.total]
Y$second.object <- Y$second.object[!na.total]
Y$subject <- Y$subject[!na.total]
Y$subject.names <- levels(as.factor(Y$subject))
Y$n <- length(Y$subject.names)
get.design$design <- get.design$design[!rep(na.total, each = Y$q),
]
get.design$design.repar <- get.design$design.repar[!rep(na.total, each = Y$q),
]
## create response vector
if(identical(levels(Y$response),c("0","1"))){
response <- as.numeric(Y$response) -1
} else {
response <- cumul.response(Y)
}
## create penalty matrix
get.penalties <- penalties.BTLLasso(Y = Y, X = X, Z1 = Z1,
Z2 = Z2, control = control, get.design = get.design)
## create sequence of tuning parameters if not pre-specified
if(is.null(lambda)){
lambda <- find.lambda(response = response,
design = get.design$design, penalties = get.penalties,
k = Y$k, m = Y$m, control = control, trace = trace)
}
## fit BTLLasso model, with coefficients and degrees of
## freedom
fit <- fit.BTLLasso(response, get.design$design, get.penalties,
lambda, Y$k, Y$m, control, trace)
coefs <- fit$coefs
## reparameterize coefficients, from reference object to
## symmetric side constraint
coefs.repar <- round(expand.coefs(coefs, get.design, Y, name.order = control$name.order),
control$precision)
## calculate log likelihood
logLik <- c()
for (j in 1:nrow(coefs)) {
logLik[j] <- loglik(coefs[j, ], Y$response, get.design$design,
Y$k)
}
coefs <- round(coefs, control$precision)
df <- df.BTLLasso(coefs.repar, get.design, Y$m)
## return stuff
ret.list <- list(coefs = coefs, coefs.repar = coefs.repar,
logLik = logLik, design = get.design, Y = Y, penalty = get.penalties,
response = response, X = X, Z1 = Z1, Z2 = Z2, lambda = lambda,
control = control, df = df)
class(ret.list) <- "BTLLasso"
return(ret.list)
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/BTLLasso.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
cumfit <- function(betanew2, epsilon, maxiter, acoefs2, lambda, weight2, control, design2, N, n, q, resp2, index, c, gama, norm, hatmatrix, lambda2, checktheta2) {
.Call(`_BTLLasso_cumfit`, betanew2, epsilon, maxiter, acoefs2, lambda, weight2, control, design2, N, n, q, resp2, index, c, gama, norm, hatmatrix, lambda2, checktheta2)
}
binfit <- function(betanew2, epsilon, maxiter, acoefs2, lambda, weight2, control, design2, N, n, q, resp2, index, c, gama, norm, hatmatrix, lambda2) {
.Call(`_BTLLasso_binfit`, betanew2, epsilon, maxiter, acoefs2, lambda, weight2, control, design2, N, n, q, resp2, index, c, gama, norm, hatmatrix, lambda2)
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/RcppExports.R
|
bin.fit.Cpp <- function(resp, design, kat, epsilon = 1e-05, penalty,
lambda, max.iter = 200, start = NULL, adaptive = NULL, norm = "L1",
control = list(c = 1e-06, gama = 20, index = NULL), m, hat.matrix = FALSE,
lambda2 = 1e-04) {
N <- length(resp)
q <- kat - 1
n <- N/q
acoefs <- penalty$acoefs
if (is.null(start)) {
start <- rep(0,ncol(design))
if(any(which(rowSums(abs(acoefs)) == 0))){
start[which(rowSums(abs(acoefs)) == 0)] <- coef(glm.fit(y = resp, x = design[,which(rowSums(abs(acoefs)) == 0)], family = binomial()))
}
if (any(is.na(start))) {
start[which(is.na(start))] <- 0
}
}
if (is.null(adaptive)) {
weight <- as.vector(rep(1, ncol(acoefs)))
} else {
weight <- abs(t(acoefs) %*% adaptive)
if (any(weight == 0))
weight[which(weight == 0)] <- epsilon
weight <- as.vector(weight^(-1))
}
pen.nums <- c(penalty$numpen.order, penalty$numpen.intercepts,
penalty$numpen.X, penalty$numpen.Z1, penalty$numpen.Z2)
if (sum(pen.nums) > 0) {
if (penalty$weight.penalties) {
pen.nums.scaled <- c(penalty$numpen.order/penalty$n.order,
penalty$numpen.intercepts/(m - 1), penalty$numpen.X/penalty$p.X/(m -
1), penalty$numpen.Z1/penalty$p.Z1/m, penalty$numpen.Z2/penalty$p.Z2)
weight <- weight/rep(pen.nums.scaled, pen.nums)
}
}
beta.old <- beta.new <- start
diff <- 1
delta.new <- delta.old <- 1
rcpp.out <- binfit(matrix(beta.new, ncol = 1), epsilon, max.iter,
acoefs, lambda, matrix(weight, ncol = 1), control, design,
N, n, q, matrix(resp, ncol = 1), control$index, control$c,
control$gama, norm, as.numeric(hat.matrix), lambda2)
beta.new <- rcpp.out$beta.new
start <- rcpp.out$start
df <- rcpp.out$df
df2 <- rcpp.out$df2
rownames(beta.new) <- names(start)
if (norm == "grouped") {
beta.pen <- matrix(beta.new[rowSums(acoefs) != 0], nrow = m -
1)
norm.col <- sqrt(colSums((beta.pen)^2))/(m - 1)
beta.pen[, norm.col < epsilon] <- 0
beta.new[rowSums(acoefs) != 0] <- c(beta.pen)
}
return(list(coefficients = beta.new, start = start, df = df, weight = weight, df2 = df2))
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/bin.fit.Cpp.R
|
#' Bootstrap function for BTLLasso
#'
#' Performs bootstrap for BTLLasso to get bootstrap intervals. Main
#' input argument is a \code{cv.BTLLasso} object. The bootstrap is (recommended to be)
#' performed on level of the cross-validation. Therefore, within every bootstrap iteration
#' the complete cross-validation procedure from the \code{cv.BTLLasso} object
#' is performed. A \code{\link[=plot.boot.BTLLasso]{plot}} function can be applied
#' to the resulting \code{boot.BTLLasso} object to plot bootstrap intervals.
#'
#' The method can be highly time-consuming, for high numbers of tuning
#' parameters, high numbers of folds in the cross-validation and high number of
#' bootstrap iterations B. The number of tuning parameters can be reduced by
#' specifying \code{lambda} in the \code{boot.BTLLasso} function. You can control if
#' the range of prespecified tuning parameters was to small by looking at the
#' output values \code{lambda.max.alert} and \code{lambda.min.alert}. They are
#' set \code{TRUE} if the smallest or largest of the specifed lambda values was
#' chosen in at least one bootstrap iteration.
#'
#' @param model A \code{cv.BTLLasso} object.
#' @param B Number of bootstrap iterations.
#' @param lambda Vector of tuning parameters. If not specified (default),
#' tuning parameters from \code{cv.BTLLasso} object are used. See also details.
#' @param cores Number of cores for (parallelized) computation.
#' @param trace Should the trace of the BTLLasso algorithm be printed?
#' @param trace.cv Should the trace fo the cross-validation be printed? If
#' parallelized, the trace is not working on Windows machines.
#' @param with.cv Should cross-validation be performed separately on every
#' bootstrap sample? If \code{FALSE}, the tuning parameter is fixed to the value chosen
#' in the \code{cv.BTLLasso} object.
#' @return \item{cv.model}{\code{cv.BTLLasso} object} \item{estimatesB}{Matrix
#' containing all B estimates for original parameters. For internal use.}
#' \item{estimatesBrepar}{Matrix containing all B estimates for reparameterized
#' (symmetric side constraints) parameters.} \item{lambdaB}{vector of used
#' tuning parameters} \item{lambda.max.alert}{Was the largest value of lambda chosen
#' in at least one bootstrap iteration?} \item{lambda.min.alert}{Was the
#' smallest value of lambda chosen in at least one bootstrap iteration?} \item{number.na}{Total number
#' of failed bootstrap iterations.}
#' @author Gunther Schauberger\cr \email{gunther.schauberger@@tum.de}
#' @seealso \code{\link{BTLLasso}}, \code{\link{cv.BTLLasso}},
#' \code{\link{plot.boot.BTLLasso}}
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, 88(9), 1-29, \doi{10.18637/jss.v088.i09}
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @keywords BTLLasso interval bootstrap
#' @examples
#'
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' ##############################
#' ##### Example with simulated data set containing X, Z1 and Z2
#' ##############################
#' data(SimData)
#'
#' ## Specify control argument
#' ## -> allow for object-specific order effects and penalize intercepts
#' ctrl <- ctrl.BTLLasso(penalize.intercepts = TRUE, object.order.effect = TRUE,
#' penalize.order.effect.diffs = TRUE)
#'
#' ## Simple BTLLasso model for tuning parameters lambda
#' m.sim <- BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim
#'
#' par(xpd = TRUE)
#' plot(m.sim)
#'
#'
#' ## Cross-validate BTLLasso model for tuning parameters lambda
#' set.seed(1860)
#' m.sim.cv <- cv.BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim.cv
#' coef(m.sim.cv)
#' logLik(m.sim.cv)
#'
#' head(predict(m.sim.cv, type="response"))
#' head(predict(m.sim.cv, type="trait"))
#'
#' plot(m.sim.cv, plots_per_page = 4)
#'
#'
#' ## Example for bootstrap intervals for illustration only
#' ## Don't calculate bootstrap intervals with B = 20!!!!
#' set.seed(1860)
#' m.sim.boot <- boot.BTLLasso(m.sim.cv, B = 20, cores = 20)
#' m.sim.boot
#' plot(m.sim.boot, plots_per_page = 4)
#'
#'
#' ##############################
#' ##### Example with small version from GLES data set
#' ##############################
#' data(GLESsmall)
#'
#' ## extract data and center covariates for better interpretability
#' Y <- GLESsmall$Y
#' X <- scale(GLESsmall$X, scale = FALSE)
#' Z1 <- scale(GLESsmall$Z1, scale = FALSE)
#'
#' ## vector of subtitles, containing the coding of the X covariates
#' subs.X <- c('', 'female (1); male (0)')
#'
#' ## Cross-validate BTLLasso model
#' m.gles.cv <- cv.BTLLasso(Y = Y, X = X, Z1 = Z1)
#' m.gles.cv
#'
#' coef(m.gles.cv)
#' logLik(m.gles.cv)
#'
#' head(predict(m.gles.cv, type="response"))
#' head(predict(m.gles.cv, type="trait"))
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.gles.cv, subs.X = subs.X, plots_per_page = 4, which = 2:5)
#' paths(m.gles.cv, y.axis = 'L2')
#'
#'
#' ##############################
#' ##### Example with Bundesliga data set
#' ##############################
#' data(Buli1516)
#'
#' Y <- Buli1516$Y5
#'
#' Z1 <- scale(Buli1516$Z1, scale = FALSE)
#'
#' ctrl.buli <- ctrl.BTLLasso(object.order.effect = TRUE,
#' name.order = "Home",
#' penalize.order.effect.diffs = TRUE,
#' penalize.order.effect.absolute = FALSE,
#' order.center = TRUE, lambda2 = 1e-2)
#'
#' set.seed(1860)
#' m.buli <- cv.BTLLasso(Y = Y, Z1 = Z1, control = ctrl.buli)
#' m.buli
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.buli)
#'
#'
#' ##############################
#' ##### Example with Topmodel data set
#' ##############################
#' data("Topmodel2007", package = "psychotree")
#'
#' Y.models <- response.BTLLasso(Topmodel2007$preference)
#' X.models <- scale(model.matrix(preference~., data = Topmodel2007)[,-1])
#' rownames(X.models) <- paste0("Subject",1:nrow(X.models))
#' colnames(X.models) <- c("Gender","Age","KnowShow","WatchShow","WatchFinal")
#'
#' set.seed(5)
#' m.models <- cv.BTLLasso(Y = Y.models, X = X.models)
#' plot(m.models, plots_per_page = 6)
#'
#' par(op)
#' }
boot.BTLLasso <- function(model, B = 500, lambda = NULL, cores = 1,
trace = TRUE, trace.cv = TRUE, with.cv = TRUE){
cv.crit <- model$cv.crit
design <- model$design$design
response <- model$response
penalty <- model$penalty
control <- model$control
q <- model$Y$q
k <- model$Y$k
n.design <- nrow(design)/q
m <- model$Y$m
folds <- model$folds
if (is.null(lambda)) {
lambda <- model$lambda
}
boot.fun <- function(b) {
cat("Bootstrap sample:", b, "out of", B, "\n")
sample.b <- sample(x = 1:n.design, size = n.design, replace = TRUE)
id.vec <- c(t(matrix(1:nrow(design), ncol = q, byrow = TRUE)[sample.b,
]))
design.b <- design[id.vec, ]
response.b <- response[id.vec]
if(with.cv){
model.b <- try(fit.cv.BTLLasso(response = response.b,
design = design.b, penalty = penalty, q = q, m = m,
folds = folds, lambda = lambda, control = control,
cores = 1, trace = trace, trace.cv = trace.cv, cv.crit = cv.crit))
if (inherits(model.b, "try-error")) {
coef.b <- NA
lambda.b <- NA
} else {
coef.b <- model.b$coefs[which.min(model.b$criterion),
]
lambda.b <- lambda[which.min(model.b$criterion)]
}
}else{
lambda.b <- model$lambda[which.min(model$criterion)]
model.b <- try(fit.BTLLasso(response = response.b,
design = design.b, penalty = penalty,
lambda = lambda.b, k = k, m = m,
control = control, trace = trace))
if (inherits(model.b, "try-error")) {
coef.b <- NA
lambda.b <- NA
} else {
coef.b <- c(model.b$coefs)
}
}
return(list(coef.b = coef.b, lambda.b = lambda.b))
}
if (cores > 1) {
cl <- makeCluster(cores, outfile = "")
clusterSetRNGStream(cl, NULL)
clusterExport(cl, varlist = c("design", "response", "penalty",
"q", "m", "control", "folds", "lambda", "cores",
"B", "n.design", "trace", "trace.cv", "cv.crit"),
envir = sys.frame(sys.nframe()))
outputB <- parLapply(cl, seq(B), boot.fun)
stopCluster(cl)
} else {
outputB <- lapply(seq(B), boot.fun)
}
estimatesB <- matrix(0, ncol = ncol(design), nrow = B)
lambdaB <- c()
for (b in 1:B) {
if (any(is.na(outputB[[b]]$coef.b))) {
estimatesB[b, ] <- rep(NA, ncol(estimatesB))
lambdaB[b] <- NA
} else {
estimatesB[b, ] <- outputB[[b]]$coef.b
lambdaB[b] <- outputB[[b]]$lambda.b
}
}
number.na <- sum(rowSums(is.na(estimatesB))>0)
if(number.na>0){
warning(number.na, " out of ", B, " bootstrap samples did not converge!")
}
estimatesBrepar <- round(expand.coefs(estimatesB, model$design,
model$Y, symmetric = TRUE, model$control$name.order), model$control$precision)
estimatesB <- round(estimatesB, model$control$precision)
# conf.ints <- apply(estimatesB, 2, quantile, probs = quantiles,
# type = 1, na.rm = TRUE)
# conf.ints.repar <- apply(estimatesBrepar, 2, quantile, probs = quantiles,
# type = 1, na.rm = TRUE)
lambda.min.alert <- any(lambdaB == min(lambda))
lambda.max.alert <- any(lambdaB == max(lambda))
returns <- list(cv.model = model, estimatesB = estimatesB,
estimatesBrepar = estimatesBrepar, lambdaB = lambdaB,
lambda.max.alert = lambda.max.alert, lambda.min.alert = lambda.min.alert,
number.na = number.na)
class(returns) <- "boot.BTLLasso"
return(returns)
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/boot.BTLLasso.R
|
coef.cv.BTLLasso <- function(object, ...){
object$coefs.repar[which.min(object$criterion),]
}
coef.BTLLasso <- function(object, ...){
object$coefs.repar
}
logLik.cv.BTLLasso <- function(object, ...){
ll <- object$logLik[which.min(object$criterion)]
class(ll) <- "logLik"
attr(ll, "df") <- object$df[which.min(object$criterion)]
ll
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/coef.cv.BTLLasso.R
|
#### function to create complete design matrix
create.design <- function(X, Z1, Z2, first.object, second.object,
m, subject, control, order.Z1, order.Z2, with.order) {
design.X <- design.X.repar <- design.Z1 <- design.Z2 <- design.order <- c()
I <- m * (m - 1)/2
n <- length(first.object)
design.help <- matrix(0, ncol = m, nrow = n)
for (i in 1:n) {
design.help[i, first.object[i]] <- 1
design.help[i, second.object[i]] <- -1
}
if (!is.null(X)) {
design.X <- create.design.X(design.help[, -m], X[subject,,drop = FALSE])
design.X.repar <- create.design.X(design.help, X[subject,,drop = FALSE])
}
if (!is.null(Z1)) {
p.Z1 <- ncol(Z1)/m
index.Z1 <- (0:(m - 1)) * (m) + (1:(m))
for (pp in 1:p.Z1) {
Z1.help <- Z1[, ((pp - 1) * m) + (1:m)]
design.help.Z1 <- create.design.X(design.help, Z1.help[subject,
order.Z1[, pp]])
design.Z1 <- cbind(design.Z1, design.help.Z1[, index.Z1])
}
}
if (!is.null(Z2)) {
p.Z2 <- ncol(Z2)/m
for (pp in 1:p.Z2) {
Z2.help <- rowSums(design.help * (Z2[subject, ((pp -
1) * m) + (1:m)])[, order.Z2[, pp]])
design.Z2 <- cbind(design.Z2, Z2.help)
}
}
### inclusion of order (home) effects
order.effect <- control$order.effect
object.order.effect <- control$object.order.effect
if (order.effect | object.order.effect) {
if (object.order.effect) {
design.order <- c(design.help)
if (control$order.center) {
design.order[design.order == -1] <- 1
design.order <- design.order * 0.5
} else {
design.order[design.order == -1] <- 0
}
design.order <- matrix(design.order, ncol = m)
} else {
design.order <- matrix(1, nrow = n)
}
design.order[!with.order,] <- 0
}
## inclusion of intercepts
if (control$include.intercepts) {
design.help.repar <- design.help
design.help <- design.help[, -m]
} else {
design.help <- design.help.repar <- c()
}
design <- cbind(design.order, design.help, design.X, design.Z1,
design.Z2)
design.repar <- cbind(design.order, design.help.repar, design.X.repar, design.Z1,
design.Z2)
return(list(design = design, design.repar = design.repar))
}
create.design.X <- function(design.help, X) {
design <- matrix(c(apply(X, 2, function(xx) {
xx * c(design.help)
})), nrow = nrow(design.help))
return(design)
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/create.design.R
|
cum.fit.Cpp <- function(resp, design, kat, epsilon = 1e-05, penalty,
lambda, max.iter = 200, start = NULL, adaptive = NULL, norm = "L1",
control = list(c = 1e-06, gama = 20, index = NULL), m, hat.matrix = FALSE,
lambda2 = 1e-04) {
N <- length(resp)
q <- kat - 1
n <- N/q
acoefs <- penalty$acoefs
if (is.null(start)) {
start <- rep(0,ncol(design))
if(any(which(rowSums(abs(acoefs)) == 0))){
start[which(rowSums(abs(acoefs)) == 0)] <- coef(glm.fit(y = resp, x = design[,which(rowSums(abs(acoefs)) == 0)], family = binomial()))
}
if (any(is.na(start))) {
start[which(is.na(start))] <- 0
}
}
if (is.null(adaptive)) {
weight <- as.vector(rep(1, ncol(acoefs)))
} else {
weight <- abs(t(acoefs) %*% adaptive)
if (any(weight == 0))
weight[which(weight == 0)] <- epsilon
weight <- as.vector(weight^(-1))
}
pen.nums <- c(penalty$numpen.order, penalty$numpen.intercepts,
penalty$numpen.X, penalty$numpen.Z1, penalty$numpen.Z2)
if (sum(pen.nums) > 0) {
if (penalty$weight.penalties) {
pen.nums.scaled <- c(penalty$numpen.order/penalty$n.order,
penalty$numpen.intercepts/(m - 1), penalty$numpen.X/penalty$p.X/(m -
1), penalty$numpen.Z1/penalty$p.Z1/m, penalty$numpen.Z2/penalty$p.Z2)
weight <- weight/rep(pen.nums.scaled, pen.nums)
}
}
check.theta <- c(start[1:floor(q/2)], 0)
if (!all(diff(check.theta) > 0)) {
start[1:floor(q/2)] <- check.theta[1:floor(q/2)] <- (floor(q/2):1) *
(-0.5)
}
rcpp.out <- cumfit(matrix(start, ncol = 1), epsilon, max.iter,
acoefs, lambda, matrix(weight, ncol = 1), control, design,
N, n, q, matrix(resp, ncol = 1), control$index, control$c,
control$gama, norm, as.numeric(hat.matrix), lambda2,
matrix(check.theta, ncol = 1))
beta.new <- rcpp.out$beta.new
df <- rcpp.out$df
rownames(beta.new) <- names(start)
return(list(coefficients = beta.new, start = start, df = df, weight = weight))
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/cum.fit.Cpp.R
|
#' Cross-validation function for BTLLasso
#'
#' Performs cross-validation of BTLLasso, including the BTLLasso algorithm for
#' the whole data set.
#'
#' Cross-validation can be performed parallel, default is 10-fold
#' cross-validation on 10 cores. Output is a cv.BTLLasso object which can then be
#' used for bootstrap intervalls using \code{\link{boot.BTLLasso}}.
#'
#' @param Y A \code{response.BTLLasso} object created by
#' \code{\link{response.BTLLasso}}.
#' @param X Matrix containing all \bold{subject-specific covariates} that are
#' to be included with \bold{object-specific effects}. One row represents one
#' subject, one column represents one covariate. X has to be standardized.
#' @param Z1 Matrix containing all \bold{object-subject-specific covariates}
#' that are to be included with \bold{object-specific effects}. One row
#' represents one subject, one column represents one combination between
#' covariate and object. Column names have to follow the scheme
#' 'firstvar.object1',...,'firstvar.objectm',...,'lastvar.objectm'. The object
#' names 'object1',...,'objectm' have to be identical to the object names used
#' in the \code{response.BTLLasso} object \code{Y}. The variable names and the
#' object names have to be separated by '.'. The rownames of the matrix',
#' Z.name, 'have to be equal to the subjects specified in the response object.
#' Z1 has to be standardized.
#' @param Z2 Matrix containing all \bold{object-subject-specific covariates or
#' object-specific covariates} that are to be included with \bold{global
#' effects}. One row represents one subject, one column represents one
#' combination between covariate and object. Column names have to follow the
#' scheme 'firstvar.object1',...,'firstvar.objectm',...,'lastvar.objectm'. The
#' object names 'object1',...,'objectm' have to be identical to the object
#' names used in the \code{response.BTLLasso} object \code{Y}. The variable
#' names and the object names have to be separated by '.'. The rownames of the
#' matrix', Z.name, 'have to be equal to the subjects specified in the response
#' object. Z2 has to be standardized.
#' @param folds Number of folds for the crossvalidation. Default is 10.
#' @param lambda Vector of tuning parameters. If \code{NULL}, automatically a grid
#' of tuning parameters is created.
#' @param control Function for control arguments, mostly for internal use. See
#' also \code{\link{ctrl.BTLLasso}}.
#' @param cores Number of cores used for (parallelized) cross-validation. By
#' default, equal to the number of folds.
#' @param trace Should the trace of the BTLLasso algorithm be printed?
#' @param trace.cv Should the trace fo the cross-validation be printed? If
#' parallelized, the trace is not working on Windows machines.
#' @param cv.crit Which criterion should be used to evaluate cross-validation. Choice is
#' between Ranked probability score and deviance. Only \code{RPS} considers the ordinal
#' structure of the response.
#' @return
#' \item{coefs}{Matrix containing all (original) coefficients, one row
#' per tuning parameter, one column per coefficient.}
#' \item{coefs.repar}{Matrix
#' containing all reparameterized (for symmetric side constraint) coefficients,
#' one row per tuning parameter, one column per coefficient.}
#' \item{logLik}{Vector of log-likelihoods, one value per tuning parameter.}
#' \item{design}{List containing design matrix and several additional information like,
#' e.g., number and names of covariates.}
#' \item{Y}{Response object.}
#' \item{penalty}{List containing all penalty matrices and some further information on penalties}
#' \item{response}{Vector containing 0-1 coded
#' response.}
#' \item{X}{X matrix containing subject-specific covariates.}
#' \item{Z1}{Z1 matrix containing subject-object-specific covariates.}
#' \item{Z2}{Z2 matrix containing (subject)-object-specific covariates.}
#' \item{lambda}{Vector of tuning parameters.}
#' \item{control}{Control argument, specified by \code{\link{ctrl.BTLLasso}}.}
#' \item{criterion}{Vector containing values of the chosen cross-validation criterion,
#' one value per tuning parameter.}
#' \item{folds}{Number of folds in cross validation.}
#' \item{cv.crit}{Cross-validation criterion, either \code{RPS} or \code{Deviance}.}
#' \item{df}{Vector containing degrees of freedom for all models along the grid
#' of tuning parameters.}
#' @author Gunther Schauberger\cr \email{gunther.schauberger@@tum.de}
#' @seealso \code{\link{BTLLasso}}, \code{\link{boot.BTLLasso}}, \code{\link{ctrl.BTLLasso}},
#' \code{\link{plot.BTLLasso}}, \code{\link{paths}}, \code{\link{print.cv.BTLLasso}},
#' \code{\link{predict.BTLLasso}}, \code{\link{coef}}
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, 88(9), 1-29, \doi{10.18637/jss.v088.i09}
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @keywords BTLLasso cross validation
#' @examples
#'
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' ##############################
#' ##### Example with simulated data set containing X, Z1 and Z2
#' ##############################
#' data(SimData)
#'
#' ## Specify control argument
#' ## -> allow for object-specific order effects and penalize intercepts
#' ctrl <- ctrl.BTLLasso(penalize.intercepts = TRUE, object.order.effect = TRUE,
#' penalize.order.effect.diffs = TRUE)
#'
#' ## Simple BTLLasso model for tuning parameters lambda
#' m.sim <- BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim
#'
#' par(xpd = TRUE)
#' plot(m.sim)
#'
#'
#' ## Cross-validate BTLLasso model for tuning parameters lambda
#' set.seed(1860)
#' m.sim.cv <- cv.BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim.cv
#' coef(m.sim.cv)
#' logLik(m.sim.cv)
#'
#' head(predict(m.sim.cv, type="response"))
#' head(predict(m.sim.cv, type="trait"))
#'
#' plot(m.sim.cv, plots_per_page = 4)
#'
#'
#' ## Example for bootstrap intervals for illustration only
#' ## Don't calculate bootstrap intervals with B = 20!!!!
#' set.seed(1860)
#' m.sim.boot <- boot.BTLLasso(m.sim.cv, B = 20, cores = 20)
#' m.sim.boot
#' plot(m.sim.boot, plots_per_page = 4)
#'
#'
#' ##############################
#' ##### Example with small version from GLES data set
#' ##############################
#' data(GLESsmall)
#'
#' ## extract data and center covariates for better interpretability
#' Y <- GLESsmall$Y
#' X <- scale(GLESsmall$X, scale = FALSE)
#' Z1 <- scale(GLESsmall$Z1, scale = FALSE)
#'
#' ## vector of subtitles, containing the coding of the X covariates
#' subs.X <- c('', 'female (1); male (0)')
#'
#' ## Cross-validate BTLLasso model
#' m.gles.cv <- cv.BTLLasso(Y = Y, X = X, Z1 = Z1)
#' m.gles.cv
#'
#' coef(m.gles.cv)
#' logLik(m.gles.cv)
#'
#' head(predict(m.gles.cv, type="response"))
#' head(predict(m.gles.cv, type="trait"))
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.gles.cv, subs.X = subs.X, plots_per_page = 4, which = 2:5)
#' paths(m.gles.cv, y.axis = 'L2')
#'
#'
#' ##############################
#' ##### Example with Bundesliga data set
#' ##############################
#' data(Buli1516)
#'
#' Y <- Buli1516$Y5
#'
#' Z1 <- scale(Buli1516$Z1, scale = FALSE)
#'
#' ctrl.buli <- ctrl.BTLLasso(object.order.effect = TRUE,
#' name.order = "Home",
#' penalize.order.effect.diffs = TRUE,
#' penalize.order.effect.absolute = FALSE,
#' order.center = TRUE, lambda2 = 1e-2)
#'
#' set.seed(1860)
#' m.buli <- cv.BTLLasso(Y = Y, Z1 = Z1, control = ctrl.buli)
#' m.buli
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.buli)
#'
#'
#' ##############################
#' ##### Example with Topmodel data set
#' ##############################
#' data("Topmodel2007", package = "psychotree")
#'
#' Y.models <- response.BTLLasso(Topmodel2007$preference)
#' X.models <- scale(model.matrix(preference~., data = Topmodel2007)[,-1])
#' rownames(X.models) <- paste0("Subject",1:nrow(X.models))
#' colnames(X.models) <- c("Gender","Age","KnowShow","WatchShow","WatchFinal")
#'
#' set.seed(5)
#' m.models <- cv.BTLLasso(Y = Y.models, X = X.models)
#' plot(m.models, plots_per_page = 6)
#'
#' par(op)
#' }
cv.BTLLasso <- function(Y, X = NULL, Z1 = NULL, Z2 = NULL, folds = 10,
lambda = NULL, control = ctrl.BTLLasso(), cores = folds, trace = TRUE,
trace.cv = TRUE, cv.crit = c("RPS", "Deviance")) {
cv.crit <- match.arg(cv.crit)
get.design <- design.BTLLasso(Y = Y, X = X, Z1 = Z1, Z2 = Z2,
control = control)
## exclude missing values
na.response <- is.na(Y$response)
na.design <- colSums(matrix(is.na(rowSums(get.design$design)),
nrow = Y$q)) != 0
na.total <- na.response | na.design
Y$response <- Y$response[!na.total]
Y$first.object <- Y$first.object[!na.total]
Y$second.object <- Y$second.object[!na.total]
Y$subject <- Y$subject[!na.total]
Y$subject.names <- levels(as.factor(Y$subject))
Y$n <- length(Y$subject.names)
get.design$design <- get.design$design[!rep(na.total, each = Y$q),]
get.design$design.repar <- get.design$design.repar[!rep(na.total, each = Y$q),]
## create response vector
if(identical(levels(Y$response),c("0","1"))){
response <- as.numeric(Y$response) -1
} else {
response <- cumul.response(Y)
}
get.penalties <- penalties.BTLLasso(Y = Y, X = X, Z1 = Z1,
Z2 = Z2, control = control, get.design = get.design)
## create sequence of tuning parameters if not pre-specified
if(is.null(lambda)){
lambda <- find.lambda(response = response,
design = get.design$design, penalties = get.penalties,
k = Y$k, m = Y$m, control = control, trace = trace)
}
fit <- fit.cv.BTLLasso(response = response, design = get.design$design,
penalty = get.penalties, q = Y$q, m = Y$m, folds = folds,
lambda = lambda, control = control, cores = cores, trace = trace,
trace.cv = trace.cv, cv.crit = cv.crit)
coefs <- fit$coefs
## reparameterize coefficients, from reference object to
## symmetric side constraint
coefs.repar <- round(expand.coefs(coefs, get.design, Y, name.order = control$name.order),
control$precision)
## calculate log likelihood
logLik <- c()
for (j in 1:nrow(coefs)) {
logLik[j] <- loglik(coefs[j, ], Y$response, get.design$design,
Y$k)
}
coefs <- round(coefs, control$precision)
df <- df.BTLLasso(coefs.repar, get.design, Y$m)
ret.list <- list(coefs = coefs, coefs.repar = coefs.repar,
logLik = logLik, design = get.design, Y = Y, penalty = get.penalties,
response = response, X = X, Z1 = Z1, Z2 = Z2, lambda = lambda,
control = control, criterion = fit$criterion, folds = folds,
cv.crit = cv.crit, df = df)
class(ret.list) <- c("cv.BTLLasso", "BTLLasso")
return(ret.list)
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/cv.BTLLasso.R
|
design.BTLLasso <- function(Y, X = NULL, Z1 = NULL, Z2 = NULL,
control = ctrl.BTLLasso(), only.first = FALSE, sd.X = NULL,
sd.Z1 = NULL, sd.Z2 = NULL) {
#### get all arguments from responseBTLLasso object
y.ord <- Y$response
first.object <- Y$first.object
second.object <- Y$second.object
if(only.first){
second.object <- rep(NULL, length(second.object))
}
subject <- Y$subject
withS <- Y$withS
subject.names <- Y$subject.names
object.names <- Y$object.names
with.order <- Y$with.order
n <- Y$n
m <- Y$m
k <- Y$k
q <- Y$q
## get some control arguments
include.intercepts <- control$include.intercepts
order.effect <- control$order.effect
object.order.effect <- control$object.order.effect
#### check X, Z1 and Z2 and initialize p.X, p.Z1 and p.Z2
withX <- withZ1 <- withZ2 <- FALSE
p.X <- p.Z1 <- p.Z2 <- 0
par.names.X <- par.names.X.repar <- par.names.Z1 <- par.names.Z2 <- c()
vars.X <- vars.Z1 <- vars.Z2 <- c()
acoefs.X <- acoefs.Z1 <- acoefs.Z2 <- c()
order.Z1 <- order.Z2 <- order(object.names)
if (!is.null(X)) {
withX <- TRUE
p.X <- ncol(X)
vars.X <- colnames(X)
if (!is.matrix(X))
stop("X has to be a matrix")
if (control$scale) {
if(is.null(sd.X)){
sd.X <- apply(X, 2, sd, na.rm = TRUE)
}
X <- t(t(X)/sd.X)
}
par.names.X <- paste(rep(vars.X, each = m - 1), object.names[1:(m -
1)], sep = ".")
par.names.X.repar <- paste(rep(vars.X, each = m), object.names[1:m], sep = ".")
}
if (!is.null(Z1)) {
withZ1 <- TRUE
p.Z1 <- ncol(Z1)/m
if (ncol(Z1)%%m != 0)
stop("Number of columns of Z1 has to be a multiple of the number of objects")
if (!is.matrix(Z1))
stop("Z1 has to be a matrix")
if (control$scale) {
Z1.help <- matrix(c(Z1), ncol = p.Z1)
if(is.null(sd.Z1)){
sd.Z1 <- apply(Z1.help, 2, sd, na.rm = TRUE)
}
Z1 <- matrix(c(t(t(Z1.help)/sd.Z1)), ncol = ncol(Z1),
dimnames = list(rownames(Z1), colnames(Z1)))
Z1.help <- NULL
}
check.Z1 <- check(Z = Z1, object.names = object.names,
subject)
vars.Z1 <- check.Z1$vars.Z
order.Z1 <- check.Z1$order.Z
par.names.Z1 <- paste(rep(vars.Z1, each = m), object.names[1:m],
sep = ".")
}
if (!is.null(Z2)) {
withZ2 <- TRUE
p.Z2 <- ncol(Z2)/m
check.Z2 <- check(Z = Z2, object.names = object.names,
subject)
vars.Z2 <- check.Z2$vars.Z
order.Z2 <- check.Z2$order.Z
if (control$scale) {
if (all(apply(Z2, 2, var) == 0)) {
Z2.help <- matrix(c(Z2[1, ]), ncol = p.Z2)
if(is.null(sd.Z2)){
sd.Z2 <- apply(Z2.help, 2, sd, na.rm = TRUE)
}
Z2 <- Z2/rep(sd.Z2, each = m)
Z2.help <- NULL
} else {
Z2.help <- matrix(c(Z2), ncol = p.Z2)
if(is.null(sd.Z2)){
sd.Z2 <- apply(Z2.help, 2, sd, na.rm = TRUE)
}
Z2 <- matrix(c(t(t(Z2.help)/sd.Z2)), ncol = ncol(Z2),
dimnames = list(rownames(Z2), colnames(Z2)))
Z2.help <- NULL
}
}
par.names.Z2 <- vars.Z2
}
## number of intercepts
n.intercepts <- 0
par.names.intercepts <- par.names.intercepts.repar <- c()
if (include.intercepts) {
n.intercepts <- m - 1
par.names.intercepts <- object.names[1:(m - 1)]
par.names.intercepts.repar <- object.names
}
## number of order effects
n.order <- 0
par.names.order <- c()
if (order.effect) {
n.order <- 1
par.names.order <- control$name.order
}
if (object.order.effect) {
n.order <- m
par.names.order <- paste(control$name.order, object.names,
sep = ".")
}
#### make design matrix design matrix
design <- create.design(X, Z1, Z2, first.object, second.object,
m, subject, control, order.Z1, order.Z2, with.order)
design.repar <- design$design.repar
design <- design$design
## enlarge design matrix so that it fits to the dichotomized
## cumulative response
design <- t(matrix(rep(c(design), each = q), nrow = ncol(design),
byrow = TRUE))
colnames(design) <- c(par.names.order, par.names.intercepts,
par.names.X, par.names.Z1, par.names.Z2)
design.repar <- t(matrix(rep(c(design.repar), each = q), nrow = ncol(design.repar),
byrow = TRUE))
colnames(design.repar) <- c(par.names.order, par.names.intercepts.repar,
par.names.X.repar, par.names.Z1, par.names.Z2)
n.theta <- floor(q/2)
if (k > 2) {
theta.design <- matrix(0, ncol = n.theta, nrow = nrow(design))
colnames(theta.design) <- paste("theta", 1:n.theta, sep = ".")
for (i in 1:n.theta) {
vec1 <- rep(0, q)
vec1[c(i, q - i + 1)] <- c(1, -1)
theta.design[, i] <- rep(vec1, length(y.ord))
}
design <- cbind(theta.design, design)
design.repar <- cbind(theta.design, design.repar)
}
RET <- list(design = design, p.X = p.X, p.Z1 = p.Z1, p.Z2 = p.Z2,
vars.X = vars.X, vars.Z1 = vars.Z1, vars.Z2 = vars.Z2,
n.theta = n.theta, n.intercepts = n.intercepts, n.order = n.order,
sd.X = sd.X, sd.Z1 = sd.Z1, sd.Z2 = sd.Z2, design.repar = design.repar)
return(RET)
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/design.BTLLasso.R
|
df.BTLLasso <- function(coefs, design, m){
df <- c()
for(l in 1:nrow(coefs)){
df.l <- design$n.theta
coefs.l <- coefs[l,]
start <- design$n.theta+1
if(design$n.order>0){
end <- start+design$n.order-1
xhelp <- coefs.l[start:end]
df.l <- df.l + length(unique(xhelp[xhelp!=0]))
start <- end+1
}
if(design$n.intercepts>0){
end <- start+design$n.intercepts
xhelp <- coefs.l[start:end]
df.l <- df.l + length(unique(xhelp)) - 1
start <- end+1
}
if(design$p.X>0){
for(ll in 1:design$p.X){
end <- start+m-1
xhelp <- coefs.l[start:end]
df.l <- df.l + length(unique(xhelp)) - 1
start <- end+1
}
}
if(design$p.Z1>0){
for(ll in 1:design$p.Z1){
end <- start+m-1
xhelp <- coefs.l[start:end]
df.l <- df.l + length(unique(xhelp[xhelp!=0]))
start <- end+1
}
}
if(design$p.Z2>0){
end <- start+design$p.Z2-1
xhelp <- coefs.l[start:end]
df.l <- df.l + length(unique(xhelp[xhelp!=0]))
start <- end+1
}
df[l] <- df.l
}
df
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/df.BTLLasso.R
|
find.lambda <- function(response, design, penalties,
k, m, control, trace){
if(trace){
cat("Find maximal lambda...\n")
}
cur.low <- 0
cur.up <- lambda.ratio <- Inf
cur.lambda <- 100
while(lambda.ratio>0.15){
m.cur <- try(fit.BTLLasso(response, design, penalties,
cur.lambda, k, m, control, trace = FALSE))
# coefs.cur <- m.cur$coefs[rowSums(abs(penalties$acoefs))!=0]
coefs.cur <-m.cur$coefs%*%penalties$acoefs
coefs.cur[abs(coefs.cur) < 1/(10^control$precision)] <-0
if(sum(abs(coefs.cur))==0){
cur.up <- cur.lambda
}else{
cur.low <- cur.lambda
}
if(is.finite(cur.up)){
cur.lambda <- (cur.up-cur.low)*0.7+cur.low
lambda.ratio <- (cur.up-cur.low)/cur.up
}else{
cur.lambda <- cur.lambda*2
}
##end of while loop
}
if(control$log.lambda){
lambda <- exp(seq(log(cur.up+0.01*cur.up),
log(control$lambda.min+0.01*cur.up), length = control$l.lambda))-0.01*cur.up
lambda[control$l.lambda] <- control$lambda.min
}else{
lambda <- seq(cur.up,control$lambda.min,length=control$l.lambda)
}
return(lambda)
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/find.lambda.R
|
fit.BTLLasso <- function(response, design, penalty, lambda, k,
m, control, trace) {
adaptive <- control$adaptive
norm <- control$norm
epsilon <- control$epsilon
lambda2 <- control$lambda2
c <- control$c
#### initialize for estimation
coefs <- matrix(0, nrow = length(lambda), ncol = ncol(design))
colnames(coefs) <- colnames(design)
df <- c()
start <- NULL
## calculate adaptive if needed
if (adaptive) {
if (k > 2) {
m0 <- cum.fit.Cpp(response, design, kat = k, epsilon = epsilon,
start = start, penalty = penalty, lambda = 0,
max.iter = 100, norm = norm, adaptive = NULL,
control = list(c = c, gama = 20, index = 1),
m = m, hat.matrix = FALSE, lambda2 = lambda2)
} else {
m0 <- bin.fit.Cpp(response, design, kat = k, epsilon = epsilon,
start = start, penalty = penalty, lambda = 0,
max.iter = 100, norm = norm, adaptive = NULL,
control = list(c = c, gama = 20, index = 1),
m = m, hat.matrix = FALSE, lambda2 = lambda2)
}
adaptive <- m0$coef
if (any(is.nan(adaptive))) {
stop("Unpenalized parameters for adaptive weights can not be estimated! Please increase lambda2 in
the control argument or set adaptive = FALSE!")
}
} else {
adaptive <- NULL
}
## start estimation
for (i in seq_along(lambda)) {
if (trace) {
cat("lambda =", lambda[i], "\n")
}
if (k > 2) {
m1 <- cum.fit.Cpp(response, design, kat = k, epsilon = epsilon,
start = start, penalty = penalty, lambda = lambda[i],
max.iter = 100, norm = norm, adaptive = adaptive,
control = list(c = c, gama = 20, index = 1),
m = m, hat.matrix = FALSE, lambda2 = lambda2)
} else {
m1 <- bin.fit.Cpp(response, design, kat = k, epsilon = epsilon,
start = start, penalty = penalty, lambda = lambda[i],
max.iter = 100, norm = norm, adaptive = adaptive,
control = list(c = c, gama = 20, index = 1),
m = m, hat.matrix = FALSE, lambda2 = lambda2)
}
coefs[i, ] <- m1$coef
start <- m1$coef
df[i] <- m1$df
}
return(list(coefs = coefs, df = df))
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/fit.BTLLasso.R
|
fit.cv.BTLLasso <- function(response, design, penalty, q, m,
folds = 10, lambda, control = ctrl.BTLLasso(), cores = folds,
trace = TRUE, trace.cv = TRUE, cv.crit) {
k <- q + 1
n.design <- nrow(design)/q
if (trace.cv) {
cat("Full model", "\n")
}
m.all <- fit.BTLLasso(response = response, design = design,
penalty = penalty, lambda = lambda, k = k, m = m, control = control,
trace = trace)
### cross validation
n.cv <- rep(floor(n.design/folds), folds)
rest <- n.design%%folds
if (rest > 0) {
n.cv[1:rest] <- n.cv[1:rest] + 1
}
which.fold <- rep(1:folds, n.cv)
id.fold <- rep(sample(which.fold, n.design, replace = FALSE),
each = q)
cv.fun <- function(ff) {
if (trace.cv) {
cat("CV-fold:", ff, "out of", folds, "\n")
}
design.train <- design[which(id.fold != ff), , drop = FALSE]
design.test <- design[which(id.fold == ff), , drop = FALSE]
if(any(apply(design.train,2,var)==0)){
stop("In cross-validation one of the parameters is not estimable,
probably because all correponding observations were eliminated from the training data.
Please change your seed and/or increase the number of folds!")
}
response.train <- response[which(id.fold != ff)]
response.test <- response[which(id.fold == ff)]
fit.fold <- fit.BTLLasso(response.train, design.train,
penalty = penalty, lambda = lambda, k = k, m = m,
control = control, trace = trace)
coef.fold <- fit.fold$coefs
if (cv.crit == "Deviance") {
y.test <- t(cbind(matrix(response.test, ncol = q,
byrow = TRUE), 1)) * (1:k)
y.test[y.test == 0] <- k + 1
y.test <- apply(y.test, 2, min)
yhelp <- rep(y.test, each = k)
yhelp <- as.numeric(yhelp == rep(1:k, length(y.test)))
preds <- c()
for (u in 1:length(lambda)) {
preds <- cbind(preds, predict.BTLLasso(coef.fold[u,
], q, design.test))
}
criterion <- -2 * colSums(yhelp * log(preds))
} else {
pi.test <- c()
for (u in 1:length(lambda)) {
eta.test <- design.test %*% coef.fold[u, ]
pi.test <- cbind(pi.test, exp(eta.test)/(1 +
exp(eta.test)))
}
criterion <- colSums((pi.test - response.test)^2)
}
criterion
}
cat("Cross-Validation...", "\n")
if (cores > 1) {
cl <- makeCluster(cores, outfile = "")
clusterExport(cl, varlist = c("response", "design", "id.fold",
"lambda", "control", "trace.cv", "trace", "k", "m",
"cv.crit"), envir = sys.frame(sys.nframe()))
criterion <- rowSums(parSapply(cl, seq(folds), cv.fun))
stopCluster(cl)
} else {
criterion <- rowSums(sapply(seq(folds), cv.fun))
}
ret.list <- list(coefs = m.all$coefs, criterion = criterion)
return(ret.list)
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/fit.cv.BTLLasso.R
|
#### create cumulative response vector
cumul.response <- function(Y) {
get.resp <- function(x) {
as.numeric(as.numeric(x) <= 1:(Y$q))
}
cum.resp <- c()
for (i in 1:length(Y$response)) {
cum.resp <- c(cum.resp, get.resp(Y$response[i]))
}
cum.resp
}
#### check functions for matrices Z1 and Z2
fun.check <- function(x, object.names) {
identical(sort(x), sort(object.names))
}
check <- function(Z, object.names, subject) {
## which Z matrix are we talking about
Z.name <- deparse(substitute(Z))
## names of the objects
m <- length(object.names)
## check if ncol(Z) is a multiple of m
if (!is.matrix(Z))
stop(paste(Z.name, "has to be a matrix"))
if (ncol(Z)%%m != 0)
stop(paste("Number of columns of", Z.name, "has to be a multiple of the number of objects"))
## names of objects in Z matrix
objects.Z <- matrix(word(colnames(Z), 2, sep = fixed(".")),
ncol = m, byrow = TRUE)
## number of Z-covariates
p.Z <- nrow(objects.Z)
## order of the objects compared to general order from
## response
order.Z <- apply(objects.Z, 1, order)
## names of the Z-covariates
var.Z <- matrix(word(colnames(Z), 1, sep = fixed(".")), ncol = m,
byrow = TRUE)
## 1. names of the objects have to be identical 2. within Z,
## objects have to be equal for all covariates, in the same
## order
if (!all(apply(objects.Z, 1, fun.check, object.names)) |
!all(apply(var.Z[, -1, drop = FALSE], 2, function(x) {
identical(unique(c(x)), unique(c(var.Z[, 1, drop = FALSE])))
}))) {
text <- paste("The matrix", Z.name, "has to be arranged so that all columns
corresponding to one covariate are put next to each other.\n
The colnames of",
Z.name, "have to be named according to the scheme
'firstvar.object1',...,'firstvar.objectm',...,'lastvar.objectm'.\n The object names
'object1',...,'objectm' have to be identical to the object names used in the response object.\n
The variable names and the object names have to be separated by '.'.")
stop(text)
}
## every subject needs to have its individual line in the Z
## matrix, identified with rownames
if (!all(subject %in% rownames(Z))) {
text <- paste("The rownames of the matrix", Z.name, "have to be equal to the subjects specified in the response object.")
stop(text)
}
list(vars.Z = unique(c(var.Z)), order.Z = order.Z)
}
#### log likelihood function
loglik <- function(coef, y, design, kat) {
eta <- matrix(design %*% coef, ncol = kat - 1, byrow = TRUE)
pi.help <- matrix(exp(eta)/(1 + exp(eta)), ncol = kat - 1)
# print(pi.help)
pi <- pi.help
if (kat > 2) {
for (i in 2:(kat - 1)) {
pi[, i] <- pi.help[, i] - pi.help[, i - 1]
}
}
pi <- cbind(pi, 1 - pi.help[, kat - 1])
yhelp <- rep(y, each = kat)
yhelp <- matrix(as.numeric(yhelp == rep(1:kat, length(y))),
byrow = T, ncol = kat)
loglik <- sum(yhelp * log(pi))
loglik
}
#### predict function
predBTLLasso <- function(coef, q, design) {
k <- q + 1
eta <- matrix(design %*% coef, ncol = q, byrow = TRUE)
pi.help <- matrix(exp(eta)/(1 + exp(eta)), ncol = q)
# print(pi.help)
pi <- pi.help
if (k > 2) {
for (i in 2:(q)) {
pi[, i] <- pi.help[, i] - pi.help[, i - 1]
}
}
pi <- cbind(pi, 1 - pi.help[, q])
c(t(pi))
}
#### function to reparameterize from reference category to
#### symmetric side constraints
reparam <- function(x) {
z <- ncol(x) + 1
K <- matrix((-1/z), ncol = z - 1, nrow = z - 1)
diag(K) <- (z - 1)/z
x.sym <- x %*% K
x2 <- x.sym %*% matrix(rep(-1, z - 1), nrow = z - 1)
x.sym <- cbind(x.sym, x2)
x.sym
}
#### function to create complete coefficient matrix either
#### together with zero-columns for reference categories or with
#### symmetric side constraints
expand.coefs <- function(coef, D, Y, symmetric = TRUE, name.order = "Order") {
n.theta <- D$n.theta
n.order <- D$n.order
n.intercepts <- D$n.intercepts
p.X <- D$p.X
p.Z1 <- D$p.Z1
p.Z2 <- D$p.Z2
m <- Y$m
object.names <- Y$object.names
vars.X <- D$vars.X
## initialize new coefficient vector
coef.new <- c()
## if threshold parameters exist, leave them as they are
if (n.theta > 0) {
coef.new <- coef[, 1:n.theta, drop = FALSE]
colnames(coef.new) <- paste("theta", 1:n.theta, sep = ".")
}
## if order parameters exist, leave them as they are
if (n.order > 0) {
order.effects <- coef[, (n.theta + 1):(n.theta + n.order),
drop = FALSE]
if(n.order==m){
colnames(order.effects) <- paste(name.order, object.names, sep=".")
}else{
colnames(order.effects) <- name.order
}
coef.new <- cbind(coef.new, order.effects)
}
## if intercepts parameters exist, reparameterize them
if (n.intercepts > 0) {
intercepts <- coef[, (n.theta + n.order + 1):(n.theta +
n.order + n.intercepts), drop = FALSE]
if (symmetric) {
intercepts.new <- reparam(intercepts)
} else {
intercepts.new <- cbind(intercepts, 0)
}
colnames(intercepts.new) <- object.names
coef.new <- cbind(coef.new, intercepts.new)
}
## if parameters for subject-specific covariates exist,
## reparameterize them
if (p.X > 0) {
gamma <- coef[, (n.theta + n.intercepts + n.order + 1):(n.theta +
n.intercepts + n.order + p.X * (m - 1)), drop = FALSE]
index <- 1
for (i in 1:p.X) {
if (symmetric) {
coef.X <- reparam(gamma[, index:(index + m -
2), drop = FALSE])
} else {
coef.X <- cbind(0, gamma[, index:(index + m -
2), drop = FALSE])
}
colnames(coef.X) <- paste(rep(vars.X[i], each = m),
object.names, sep = ".")
coef.new <- cbind(coef.new, coef.X)
index <- index + m - 1
}
}
## if parameters for object-specific covariates exist, leave
## them as they are
if (p.Z1 + p.Z2 > 0) {
rest <- coef[, (n.theta + n.intercepts + n.order + p.X *
(m - 1) + 1):ncol(coef), drop = FALSE]
if (p.Z1 > 0) {
colnames(rest) <- c(paste(rep(D$vars.Z1, each = m),
object.names, sep = "."), D$vars.Z2)
} else {
colnames(rest) <- D$vars.Z2
}
coef.new <- cbind(coef.new, rest)
}
## return reparameterized parameter vector
coef.new
}
#### create subset of a response object for cross validation
subsetY <- function(Y, id.ex) {
Y$response <- Y$response[-id.ex]
Y$first.object <- Y$first.object[-id.ex]
Y$second.object <- Y$second.object[-id.ex]
Y$subject <- Y$subject[-id.ex]
Y$subject.names <- levels(as.factor(Y$subject))
# Y$subject.names <- Y$subject.names[-id.ex]
Y$n <- length(Y$subject.names)
Y
}
bootY <- function(Y, id.vec) {
Y$response <- Y$response[id.vec]
Y$first.object <- Y$first.object[id.vec]
Y$second.object <- Y$second.object[id.vec]
Y$subject <- Y$subject[id.vec]
Y$subject.names <- levels(as.factor(Y$subject))
Y$n <- length(Y$subject.names)
Y
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/functions.R
|
#' Plot covariate paths for BTLLasso
#'
#' Plots paths for every covariate of a BTLLasso object or a cv.BTLLasso
#' object. In contrast to \code{\link{plot.BTLLasso}}, only one plot is created,
#' every covariate is illustrated by one path. For \code{cv.BTLLasso} objects, the
#' optimal model according to the cross-validation is marked by a vertical
#' dashed line.
#'
#' @param model \code{BTLLasso} or \code{cv.BTLLasso} object
#' @param y.axis Two possible values for the y-axis. Variables can either be plotted
#' with regard to their contribution to the total penalty term (\code{y.axis='penalty'}) or
#' with regard to the $L_2$ norm of the corresponding parameter vector (\code{y.axis='L2'}).
#' @param x.axis Should the paths be plotted against log(lambda+1) or against lambda?'
#' @author Gunther Schauberger\cr \email{gunther.schauberger@@tum.de}
#' @seealso \code{\link{BTLLasso}}, \code{\link{cv.BTLLasso}},
#' \code{\link{plot.BTLLasso}}
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, 88(9), 1-29, \doi{10.18637/jss.v088.i09}
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @keywords BTLLasso covariate paths
#' @examples
#'
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' ##############################
#' ##### Example with simulated data set containing X, Z1 and Z2
#' ##############################
#' data(SimData)
#'
#' ## Specify control argument
#' ## -> allow for object-specific order effects and penalize intercepts
#' ctrl <- ctrl.BTLLasso(penalize.intercepts = TRUE, object.order.effect = TRUE,
#' penalize.order.effect.diffs = TRUE)
#'
#' ## Simple BTLLasso model for tuning parameters lambda
#' m.sim <- BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim
#'
#' par(xpd = TRUE)
#' plot(m.sim)
#'
#'
#' ## Cross-validate BTLLasso model for tuning parameters lambda
#' set.seed(1860)
#' m.sim.cv <- cv.BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim.cv
#' coef(m.sim.cv)
#' logLik(m.sim.cv)
#'
#' head(predict(m.sim.cv, type="response"))
#' head(predict(m.sim.cv, type="trait"))
#'
#' plot(m.sim.cv, plots_per_page = 4)
#'
#'
#' ## Example for bootstrap intervals for illustration only
#' ## Don't calculate bootstrap intervals with B = 20!!!!
#' set.seed(1860)
#' m.sim.boot <- boot.BTLLasso(m.sim.cv, B = 20, cores = 20)
#' m.sim.boot
#' plot(m.sim.boot, plots_per_page = 4)
#'
#'
#' ##############################
#' ##### Example with small version from GLES data set
#' ##############################
#' data(GLESsmall)
#'
#' ## extract data and center covariates for better interpretability
#' Y <- GLESsmall$Y
#' X <- scale(GLESsmall$X, scale = FALSE)
#' Z1 <- scale(GLESsmall$Z1, scale = FALSE)
#'
#' ## vector of subtitles, containing the coding of the X covariates
#' subs.X <- c('', 'female (1); male (0)')
#'
#' ## Cross-validate BTLLasso model
#' m.gles.cv <- cv.BTLLasso(Y = Y, X = X, Z1 = Z1)
#' m.gles.cv
#'
#' coef(m.gles.cv)
#' logLik(m.gles.cv)
#'
#' head(predict(m.gles.cv, type="response"))
#' head(predict(m.gles.cv, type="trait"))
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.gles.cv, subs.X = subs.X, plots_per_page = 4, which = 2:5)
#' paths(m.gles.cv, y.axis = 'L2')
#'
#'
#' ##############################
#' ##### Example with Bundesliga data set
#' ##############################
#' data(Buli1516)
#'
#' Y <- Buli1516$Y5
#'
#' Z1 <- scale(Buli1516$Z1, scale = FALSE)
#'
#' ctrl.buli <- ctrl.BTLLasso(object.order.effect = TRUE,
#' name.order = "Home",
#' penalize.order.effect.diffs = TRUE,
#' penalize.order.effect.absolute = FALSE,
#' order.center = TRUE, lambda2 = 1e-2)
#'
#' set.seed(1860)
#' m.buli <- cv.BTLLasso(Y = Y, Z1 = Z1, control = ctrl.buli)
#' m.buli
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.buli)
#'
#'
#' ##############################
#' ##### Example with Topmodel data set
#' ##############################
#' data("Topmodel2007", package = "psychotree")
#'
#' Y.models <- response.BTLLasso(Topmodel2007$preference)
#' X.models <- scale(model.matrix(preference~., data = Topmodel2007)[,-1])
#' rownames(X.models) <- paste0("Subject",1:nrow(X.models))
#' colnames(X.models) <- c("Gender","Age","KnowShow","WatchShow","WatchFinal")
#'
#' set.seed(5)
#' m.models <- cv.BTLLasso(Y = Y.models, X = X.models)
#' plot(m.models, plots_per_page = 6)
#'
#' par(op)
#' }
paths <- function(model, y.axis = c("penalty", "L2"), x.axis = c("loglambda",
"lambda")) {
x.axis <- match.arg(x.axis)
y.axis <- match.arg(y.axis)
if (y.axis == "penalty") {
y.text <- "penalty size"
}
if (y.axis == "L2") {
y.text <- "L2 norm"
}
coefs <- model$coefs
covar <- c(model$design$vars.X, model$design$vars.Z1, model$design$vars.Z2)
if (x.axis == "lambda") {
norm <- model$lambda
norm.range <- rev(range(norm))
x.axis.name <- expression(lambda)
}
if (x.axis == "loglambda") {
norm <- log(model$lambda + 1)
norm.range <- rev(range(norm))
x.axis.name <- expression(log(lambda + 1))
}
m <- model$Y$m
n.theta <- model$design$n.theta
n.order <- model$design$n.order
n.intercepts <- model$design$n.intercepts
acoefs <- model$penalty$acoefs
p.X <- model$design$p.X
p.Z1 <- model$design$p.Z1
p.Z2 <- model$design$p.Z2
numpen.order <- model$penalty$numpen.order
numpen.intercepts <- model$penalty$numpen.intercepts
numpen.X <- model$penalty$numpen.X
numpen.Z1 <- model$penalty$numpen.Z1
numpen.Z2 <- model$penalty$numpen.Z2
labels <- model$Y$object.names
criterion <- model$criterion
order.effects <- intercepts <- gamma.X <- gamma.Z1 <- gamma.Z2 <- c()
index.cols.X <- index.cols.Z1 <- index.cols.Z2 <- c()
index.rows.X <- index.rows.Z1 <- index.rows.Z2 <- c()
if (n.order > 0) {
order.effects <- coefs[, (n.theta + 1):(n.theta + n.order)]
}
if (n.intercepts > 0) {
intercepts <- coefs[, (n.theta + n.order + 1):(n.theta +
n.order + n.intercepts)]
}
p <- p.X + p.Z1 + p.Z2
paths <- c()
start.row <- n.theta + n.intercepts + n.order
if (p.X > 0) {
index <- rep((1:p.X), each = m - 1)
for (i in 1:p.X) {
if (y.axis == "penalty") {
paths <- cbind(paths, rowSums(abs(coefs[, start.row +
which(index == i), drop = FALSE] %*% acoefs[start.row +
which(index == i), , drop = FALSE])))
} else {
paths <- cbind(paths, sqrt(rowSums(coefs[, start.row +
which(index == i), drop = FALSE]^2)))
}
}
start.row <- start.row + length(index)
}
if (p.Z1 > 0) {
index <- rep(1:p.Z1, each = m)
for (i in 1:p.Z1) {
if (y.axis == "penalty") {
paths <- cbind(paths, rowSums(abs(coefs[, start.row +
which(index == i), drop = FALSE] %*% acoefs[start.row +
which(index == i), , drop = FALSE])))
} else {
paths <- cbind(paths, sqrt(rowSums(coefs[, start.row +
which(index == i), drop = FALSE]^2)))
}
}
start.row <- start.row + length(index)
}
if (p.Z2 > 0) {
index <- 1:p.Z2
for (i in 1:p.Z2) {
if (y.axis == "penalty") {
paths <- cbind(paths, rowSums(abs(coefs[, start.row +
which(index == i), drop = FALSE] %*% acoefs[start.row +
which(index == i), , drop = FALSE])))
} else {
paths <- cbind(paths, sqrt(rowSums(coefs[, start.row +
which(index == i), drop = FALSE]^2)))
}
}
}
if (!is.null(criterion)) {
x.axis.min <- norm[which.min(criterion)]
}
if (numpen.intercepts > 0) {
if (y.axis == "penalty") {
paths <- cbind(rowSums(abs(intercepts %*% acoefs[(n.theta +
n.order + 1):(n.theta + n.order + n.intercepts),
(numpen.order + 1):(numpen.order + numpen.intercepts)])),
paths)
} else {
paths <- cbind(sqrt(rowSums(intercepts^2)), paths)
}
p <- p + 1
covar <- c("Intercept", covar)
}
if (numpen.order > 0) {
if (y.axis == "penalty") {
paths <- cbind(rowSums(abs(order.effects %*% acoefs[(n.theta +
1):(n.theta + n.order), 1:numpen.order])), paths)
} else {
paths <- cbind(sqrt(rowSums(order.effects^2)), paths)
}
p <- p + 1
covar <- c(model$control$name.order, covar)
}
plot(norm, paths[, 1], type = "l", ylim = range(paths), ylab = y.text,
xlab = x.axis.name, xlim = norm.range, las = 1,lwd=par()$lwd,
frame.plot = FALSE)
for (o in 2:p) {
lines(norm, paths[, o],lwd=par()$lwd)
}
if (!is.null(criterion)) {
segments( x.axis.min, min(paths),
x.axis.min, max(paths) ,
col=2,lty=2,lwd=par()$lwd)
}
x.lab1 <- norm[length(norm)]-abs(diff(range(norm)))*0.02
x.lab2 <- norm[length(norm)]-abs(diff(range(norm)))*0.005
y.lab1 <- paths[nrow(paths), ]
y.lab2 <- spread.labs(y.lab1, 1.2*strheight("A"))
text( x.lab1, y.lab2, covar ,pos=4)
segments( x.lab2, y.lab1,
x.lab1, y.lab2 ,col="gray")
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/paths.R
|
penalties.BTLLasso <- function(Y, X = NULL, Z1 = NULL, Z2 = NULL, get.design = get.design,
control = ctrl.BTLLasso()) {
#### get arguments from responseBTLLasso object
n <- Y$n
m <- Y$m
k <- Y$k
q <- Y$q
object.names <- Y$object.names
#### extract all control arguments
penalize.X <- control$penalize.X
penalize.Z1.diffs <- control$penalize.Z1.diffs
penalize.Z1.absolute <- control$penalize.Z1.absolute
penalize.Z2 <- control$penalize.Z2
penalize.intercepts <- control$penalize.intercepts
include.intercepts <- control$include.intercepts
order.effect <- control$order.effect
object.order.effect <- control$object.order.effect
penalize.order.effect.diffs <- control$penalize.order.effect.diffs
penalize.order.effect.absolute <- control$penalize.order.effect.absolute
## create which.pen vector for X
if(!is.logical(penalize.X)){
if(all(penalize.X %in% get.design$vars.X)){
which.pen.X <- rep(FALSE,get.design$p.X)
which.pen.X[which(get.design$vars.X %in% penalize.X)] <- TRUE
penalize.X <- TRUE
}else{
stop("The argument penalize.X must either be logical or a character vector containing variable names of X which should be penalized!")
}
}else{
which.pen.X <- rep(TRUE,get.design$p.X)
}
## create which.pen vector for Z1 absolute
if(!is.logical(penalize.Z1.absolute)){
if(all(penalize.Z1.absolute %in% get.design$vars.Z1)){
which.pen.Z1.absolute <- rep(FALSE,get.design$p.Z1)
which.pen.Z1.absolute[which(get.design$vars.Z1 %in% penalize.Z1.absolute)] <- TRUE
penalize.Z1.absolute <- TRUE
}else{
stop("The argument penalize.Z1.absolute must either be logical or a character vector containing variable names of Z1 which should be penalized with respect to absolute values!")
}
}else{
which.pen.Z1.absolute <- rep(TRUE,get.design$p.Z1)
}
## create which.pen vector for Z1 diffs
if(!is.logical(penalize.Z1.diffs)){
if(all(penalize.Z1.diffs %in% get.design$vars.Z1)){
which.pen.Z1.diffs <- rep(FALSE,get.design$p.Z1)
which.pen.Z1.diffs[which(get.design$vars.Z1 %in% penalize.Z1.diffs)] <- TRUE
penalize.Z1.diffs <- TRUE
}else{
stop("The argument penalize.Z1.diffs must either be logical or a character vector containing variable names of Z1 which should be penalized with respect to absolute differences!")
}
}else{
which.pen.Z1.diffs <- rep(TRUE,get.design$p.Z1)
}
## create which.pen vector for Z2
if(!is.logical(penalize.Z2)){
if(all(penalize.Z2 %in% get.design$vars.Z2)){
which.pen.Z2 <- rep(FALSE,get.design$p.Z2)
which.pen.Z2[which(get.design$vars.Z2 %in% penalize.Z2)] <- TRUE
penalize.Z2 <- TRUE
}else{
stop("The argument penalize.Z2 must either be logical or a character vector containing variable names of Z2 which should be penalized!")
}
}else{
which.pen.Z2 <- rep(TRUE,get.design$p.Z2)
}
## number of intercepts
n.intercepts <- 0
par.names.intercepts <- c()
if (include.intercepts) {
n.intercepts <- m - 1
par.names.intercepts <- object.names[1:(m - 1)]
}
## number of order effects
n.order <- 0
if (order.effect) {
n.order <- 1
}
if (object.order.effect) {
n.order <- m
}
#### create penalty matrices
numpen.intercepts <- numpen.X <- numpen.Z1 <- numpen.Z2 <- numpen.order <- 0
p.X <- p.Z1 <- p.Z2 <- 0
## penalty matrix for intercepts
if (include.intercepts & penalize.intercepts) {
acoefs.intercepts <- diag(m - 1)
help.pen <- matrix(0, ncol = choose(m - 1, 2), nrow = m -
1)
combis <- combn(m - 1, 2)
for (ff in 1:ncol(combis)) {
help.pen[combis[1, ff], ff] <- 1
help.pen[combis[2, ff], ff] <- -1
}
acoefs.intercepts <- cbind(acoefs.intercepts, help.pen)
numpen.intercepts <- ncol(acoefs.intercepts)
}
## penalty matrix for X
if (!is.null(X)) {
p.X <- ncol(X)
if (penalize.X) {
acoefs.X <- diag(x = as.numeric(rep(which.pen.X,each=m-1)), p.X * (m - 1))
help.pen <- help.pen2 <- matrix(0, ncol = choose(m - 1, 2), nrow = m -
1)
combis <- combn(m - 1, 2)
for (ff in 1:ncol(combis)) {
help.pen[combis[1, ff], ff] <- 1
help.pen[combis[2, ff], ff] <- -1
}
for (pp in 1:p.X) {
m.above <- matrix(rep(matrix(0, ncol = choose(m -
1, 2), nrow = m - 1), pp - 1), ncol = choose(m -
1, 2))
m.below <- matrix(rep(matrix(0, ncol = choose(m -
1, 2), nrow = m - 1), p.X - pp), ncol = choose(m -
1, 2))
if(which.pen.X[pp]){
acoefs.X <- cbind(acoefs.X, rbind(m.above, help.pen,
m.below))
}else{
acoefs.X <- cbind(acoefs.X, rbind(m.above, help.pen2,
m.below))
}
}
acoefs.X <- acoefs.X[,colSums(abs(acoefs.X))>0, drop = FALSE]
numpen.X <- ncol(acoefs.X)
}
}
## penalty matrix for Z1
if (!is.null(Z1)) {
p.Z1 <- ncol(Z1)/m
if (penalize.Z1.diffs | penalize.Z1.absolute) {
acoefs.Z1 <- c()
if (penalize.Z1.absolute) {
acoefs.Z1 <- diag(x = as.numeric(rep(which.pen.Z1.absolute,each=m)), p.Z1 * m)
}
if (penalize.Z1.diffs) {
help.pen <- help.pen2 <- matrix(0, ncol = choose(m, 2), nrow = m)
combis <- combn(m, 2)
for (ff in 1:ncol(combis)) {
help.pen[combis[1, ff], ff] <- 1
help.pen[combis[2, ff], ff] <- -1
}
for (pp in 1:p.Z1) {
m.above <- matrix(rep(matrix(0, ncol = choose(m,
2), nrow = m), pp - 1), ncol = choose(m,
2))
m.below <- matrix(rep(matrix(0, ncol = choose(m,
2), nrow = m), p.Z1 - pp), ncol = choose(m,
2))
if(which.pen.Z1.diffs[pp]){
acoefs.Z1 <- cbind(acoefs.Z1, rbind(m.above,
help.pen, m.below))
}else{
acoefs.Z1 <- cbind(acoefs.Z1, rbind(m.above,
help.pen2, m.below))
}
}
}
acoefs.Z1 <- acoefs.Z1[,colSums(abs(acoefs.Z1))>0, drop = FALSE]
numpen.Z1 <- ncol(acoefs.Z1)
}
}
## penalty matrix for Z2
if (!is.null(Z2)) {
p.Z2 <- ncol(Z2)/m
if (penalize.Z2) {
acoefs.Z2 <- diag(x = as.numeric(which.pen.Z2), p.Z2)
acoefs.Z2 <- acoefs.Z2[,colSums(abs(acoefs.Z2))>0, drop = FALSE]
numpen.Z2 <- ncol(acoefs.Z2)
}
}
## penalty matrix for order effects if global order effect
if (order.effect & penalize.order.effect.absolute) {
acoefs.order <- matrix(1, ncol = 1, nrow = 1)
numpen.order <- 1
}
#### if object-specific order effects
if (object.order.effect & (penalize.order.effect.diffs |
penalize.order.effect.absolute)) {
acoefs.order <- c()
if (penalize.order.effect.absolute) {
acoefs.order <- diag(m)
}
if (penalize.order.effect.diffs) {
help.pen <- matrix(0, ncol = choose(m, 2), nrow = m)
combis <- combn(m, 2)
for (ff in 1:ncol(combis)) {
help.pen[combis[1, ff], ff] <- 1
help.pen[combis[2, ff], ff] <- -1
}
acoefs.order <- cbind(acoefs.order, help.pen)
}
numpen.order <- ncol(acoefs.order)
}
## total number of penalty
numpen <- numpen.intercepts + numpen.X + numpen.Z1 + numpen.Z2 +
numpen.order
## initalize total penalty matrix
acoefs <- matrix(0, ncol = numpen, nrow = n.intercepts +
p.X * (m - 1) + p.Z1 * m + p.Z2 + n.order)
current.row <- 1
current.col <- 1
## add penalties for order effects
if (n.order > 0) {
if (numpen.order > 0) {
acoefs[current.row:(current.row + n.order - 1), current.col:(current.col +
numpen.order - 1)] <- acoefs.order
}
current.row <- current.row + n.order
current.col <- current.col + numpen.order
}
## add penalties for intercepts
if (include.intercepts) {
if (penalize.intercepts) {
acoefs[current.row:(current.row + m - 2), current.col:(current.col +
numpen.intercepts - 1)] <- acoefs.intercepts
}
current.row <- current.row + m - 1
current.col <- current.col + numpen.intercepts
}
## add penalties for X
if (!is.null(X)) {
if (penalize.X) {
acoefs[current.row:(current.row + p.X * (m - 1) -
1), current.col:(current.col + numpen.X - 1)] <- acoefs.X
}
current.row <- current.row + p.X * (m - 1)
current.col <- current.col + numpen.X
}
## add penalties for Z1
if (!is.null(Z1)) {
if (penalize.Z1.diffs | penalize.Z1.absolute) {
acoefs[current.row:(current.row + p.Z1 * m - 1),
current.col:(current.col + numpen.Z1 - 1)] <- acoefs.Z1
}
current.row <- current.row + p.Z1 * m
current.col <- current.col + numpen.Z1
}
## add penalties for Z1
if (!is.null(Z2)) {
if (penalize.Z2) {
acoefs[current.row:(current.row + p.Z2 - 1), current.col:(current.col +
numpen.Z2 - 1)] <- acoefs.Z2
}
current.row <- current.row + p.Z2
current.col <- current.col + numpen.Z2
}
## additional rows for thetas, thetas (thresholds) are never
## penalized
acoefs <- rbind(matrix(0, nrow = floor(q/2), ncol = ncol(acoefs)),
acoefs)
RET <- list(acoefs = acoefs, numpen.intercepts = numpen.intercepts,
numpen.X = numpen.X, numpen.Z1 = numpen.Z1, numpen.Z2 = numpen.Z2,
numpen.order = numpen.order, n.order = n.order, p.X = p.X,
p.Z1 = p.Z1, p.Z2 = p.Z2, weight.penalties = control$weight.penalties)
return(RET)
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/penalties.BTLLasso.R
|
#' Plot parameter paths for BTLLasso
#'
#' Plots single paths for every parameter of a \code{BTLLasso} object or a \code{cv.BTLLasso}
#' object. In contrast, to \code{\link{paths}}, one plot per covariate is
#' created, every single parameter is illustrated by one path. For \code{cv.BTLLasso}
#' objects, the optimal model according to the cross-validation is marked by a
#' vertical dashed line.
#'
#' @param x BTLLasso or cv.BTLLasso object
#' @param plots_per_page Number of plots per page, internally specified by \code{par(mfrow=...)}.
#' @param ask_new If TRUE, the user is asked before each plot.
#' @param rescale Should the parameter estimates be rescaled for plotting? Only
#' applies if \code{scale = TRUE} was specified in \code{BTLLasso} or \code{cv.BTLLasso}.
#' @param which Integer vector to specify which parameters/variables to plot.
#' @param equal.ranges Should all single plots (for different covariates) have
#' equal ranges on the y-axes. FALSE by default.
#' @param x.axis Should the paths be plotted against log(lambda+1) or against lambda?
#' @param rows Optional argument for the number of rows in the plot.
#' Only applies if \code{plots_per_page>1}.
#' @param subs.X Optional vector of subtitles for variables in \code{X}. Can be used
#' to note the encoding of the single covariates, especially for dummy
#' variables.
#' @param subs.Z1 Optional vector of subtitles for variables in \code{Z1}. Can be used
#' to note the encoding of the single covariates, especially for dummy
#' variables.
#' @param main.Z2 Optional character containg main for plot
#' containing intervals for Z2 parameters.
#' @param ... Further plot arguments.
#' @author Gunther Schauberger\cr \email{gunther.schauberger@@tum.de}
#' @seealso \code{\link{BTLLasso}}, \code{\link{cv.BTLLasso}},
#' \code{\link{paths}}
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, 88(9), 1-29, \doi{10.18637/jss.v088.i09}
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @keywords BTLLasso paths parameter paths
#' @examples
#'
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' ##############################
#' ##### Example with simulated data set containing X, Z1 and Z2
#' ##############################
#' data(SimData)
#'
#' ## Specify control argument
#' ## -> allow for object-specific order effects and penalize intercepts
#' ctrl <- ctrl.BTLLasso(penalize.intercepts = TRUE, object.order.effect = TRUE,
#' penalize.order.effect.diffs = TRUE)
#'
#' ## Simple BTLLasso model for tuning parameters lambda
#' m.sim <- BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim
#'
#' par(xpd = TRUE)
#' plot(m.sim)
#'
#'
#' ## Cross-validate BTLLasso model for tuning parameters lambda
#' set.seed(1860)
#' m.sim.cv <- cv.BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim.cv
#' coef(m.sim.cv)
#' logLik(m.sim.cv)
#'
#' head(predict(m.sim.cv, type="response"))
#' head(predict(m.sim.cv, type="trait"))
#'
#' plot(m.sim.cv, plots_per_page = 4)
#'
#'
#' ## Example for bootstrap intervals for illustration only
#' ## Don't calculate bootstrap intervals with B = 20!!!!
#' set.seed(1860)
#' m.sim.boot <- boot.BTLLasso(m.sim.cv, B = 20, cores = 20)
#' m.sim.boot
#' plot(m.sim.boot, plots_per_page = 4)
#'
#'
#' ##############################
#' ##### Example with small version from GLES data set
#' ##############################
#' data(GLESsmall)
#'
#' ## extract data and center covariates for better interpretability
#' Y <- GLESsmall$Y
#' X <- scale(GLESsmall$X, scale = FALSE)
#' Z1 <- scale(GLESsmall$Z1, scale = FALSE)
#'
#' ## vector of subtitles, containing the coding of the X covariates
#' subs.X <- c('', 'female (1); male (0)')
#'
#' ## Cross-validate BTLLasso model
#' m.gles.cv <- cv.BTLLasso(Y = Y, X = X, Z1 = Z1)
#' m.gles.cv
#'
#' coef(m.gles.cv)
#' logLik(m.gles.cv)
#'
#' head(predict(m.gles.cv, type="response"))
#' head(predict(m.gles.cv, type="trait"))
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.gles.cv, subs.X = subs.X, plots_per_page = 4, which = 2:5)
#' paths(m.gles.cv, y.axis = 'L2')
#'
#'
#' ##############################
#' ##### Example with Bundesliga data set
#' ##############################
#' data(Buli1516)
#'
#' Y <- Buli1516$Y5
#'
#' Z1 <- scale(Buli1516$Z1, scale = FALSE)
#'
#' ctrl.buli <- ctrl.BTLLasso(object.order.effect = TRUE,
#' name.order = "Home",
#' penalize.order.effect.diffs = TRUE,
#' penalize.order.effect.absolute = FALSE,
#' order.center = TRUE, lambda2 = 1e-2)
#'
#' set.seed(1860)
#' m.buli <- cv.BTLLasso(Y = Y, Z1 = Z1, control = ctrl.buli)
#' m.buli
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.buli)
#'
#'
#' ##############################
#' ##### Example with Topmodel data set
#' ##############################
#' data("Topmodel2007", package = "psychotree")
#'
#' Y.models <- response.BTLLasso(Topmodel2007$preference)
#' X.models <- scale(model.matrix(preference~., data = Topmodel2007)[,-1])
#' rownames(X.models) <- paste0("Subject",1:nrow(X.models))
#' colnames(X.models) <- c("Gender","Age","KnowShow","WatchShow","WatchFinal")
#'
#' set.seed(5)
#' m.models <- cv.BTLLasso(Y = Y.models, X = X.models)
#' plot(m.models, plots_per_page = 6)
#'
#' par(op)
#' }
plot.BTLLasso <- function(x, plots_per_page = 1, ask_new = TRUE,
rescale = FALSE, which = "all",
equal.ranges = FALSE, x.axis = c("loglambda", "lambda"),
rows = NULL, subs.X = NULL, subs.Z1 = NULL,
main.Z2 = "Obj-spec. Covariates", ...) {
op <- par(no.readonly = TRUE)
if(length(x$lambda)==1){
stop("Only one tuning parameter, nothing to plot!")
}
## get correct x axis
x.axis <- match.arg(x.axis)
# browser()
if (x.axis == "lambda") {
norm <- x$lambda
norm.range <- rev(range(norm))
x.axis.name <- expression(lambda)
}
if (x.axis == "loglambda") {
norm <- log(x$lambda + 1)
norm.range <- rev(range(norm))
x.axis.name <- expression(log(lambda + 1))
}
## get basic parameters
m <- x$Y$m
n.theta <- x$design$n.theta
n.order <- x$design$n.order
n.intercepts <- x$design$n.intercepts
if (n.intercepts > 0) {
n.intercepts <- n.intercepts + 1
}
p.X <- x$design$p.X
p.Z1 <- x$design$p.Z1
p.Z2 <- x$design$p.Z2
## get object labels
labels <- x$Y$object.names
## get all coefficients
coefs <- x$coefs.repar
## get range of all coefs if necessary
y.range <- NA
if(equal.ranges){
y.range <- range(coefs)
}
## initialize matrix of all parameters to plot
coef.plot <- c()
## index.plots will contain number indicating rows for equal plots
index.plots <- c()
## running index for current plot
index.num <- 1
## will contain all labels for right-hand labels
all.labs <- c()
## will contain all mains
all.mains <- c()
all.subs <- c()
## go through all possible model components
if (n.order > 0) {
order.effects <- coefs[, (n.theta + 1):(n.theta + n.order)]
coef.plot <- cbind(coef.plot, order.effects)
index.plots <- c(index.plots, rep(index.num,n.order))
index.num <- index.num+1
if(n.order>1){
all.labs <- c(all.labs,labels)
}else{
all.labs <- c(all.labs,"")
}
all.mains <- c(all.mains, x$control$name.order)
all.subs <- c(all.subs, "")
}
if (n.intercepts > 0) {
intercepts <- coefs[, (n.theta + n.order + 1):(n.theta +
n.order + n.intercepts), drop = FALSE]
coef.plot <- cbind(coef.plot, intercepts)
index.plots <- c(index.plots, rep(index.num, n.intercepts))
index.num <- index.num+1
all.labs <- c(all.labs,labels)
all.mains <- c(all.mains, "Intercepts")
all.subs <- c(all.subs, "")
}
if (p.X > 0) {
gamma.X <- coefs[, (n.theta + n.order + n.intercepts +
1):(n.theta + n.order + n.intercepts + p.X * m),
drop = FALSE]
if (rescale) {
gamma.X <- t(t(gamma.X)/rep(x$design$sd.X, each = m))
}
coef.plot <- cbind(coef.plot, gamma.X)
index.plots <- c(index.plots, rep(index.num:(index.num+p.X-1), each=m))
index.num <- index.num+p.X
all.labs <- c(all.labs,rep(labels,p.X))
all.mains <- c(all.mains, x$design$vars.X)
all.subs <- c(all.subs, subs.X)
}
if (p.Z1 > 0) {
gamma.Z1 <- coefs[, (n.theta + n.order + n.intercepts +
p.X * m + 1):(n.theta + n.order + n.intercepts +
p.X * m + p.Z1 * m), drop = FALSE]
if (rescale) {
gamma.Z1 <- t(t(gamma.Z1)/rep(x$design$sd.Z1,
each = m))
}
coef.plot <- cbind(coef.plot, gamma.Z1)
index.plots <- c(index.plots, rep(index.num:(index.num+p.Z1-1), each=m))
index.num <- index.num+p.Z1
all.labs <- c(all.labs,rep(labels,p.Z1))
all.mains <- c(all.mains, x$design$vars.Z1)
all.subs <- c(all.subs, subs.Z1)
}
if (p.Z2 > 0) {
gamma.Z2 <- coefs[, (n.theta + n.order + n.intercepts +
p.X * m + p.Z1 * m + 1):(n.theta + n.order + n.intercepts +
p.X * m + p.Z1 * m + p.Z2), drop = FALSE]
if (rescale) {
gamma.Z2 <- t(t(gamma.Z2)/x$design$sd.Z2)
}
coef.plot <- cbind(coef.plot, gamma.Z2)
index.plots <- c(index.plots, rep(index.num, p.Z2))
index.num <- index.num+1
all.labs <- c(all.labs,x$design$vars.Z2)
all.mains <- c(all.mains, main.Z2)
all.subs <- c(all.subs, "")
}
n.plots <- index.num-1
suppressWarnings(if(identical(which,"all")){
which <- 1:n.plots
})
pages <- ceiling(length(which)/plots_per_page)
if (is.null(rows)) {
rows <- floor(sqrt(plots_per_page))
}
cols <- ceiling(plots_per_page/rows)
plots_on_page <- 0
pages_done <- 0
par(mfrow=c(rows, cols))
for(u in 1:n.plots){
if(u %in% which){
plothelp.comp(u, norm, coef.plot, index.plots,
all.labs, all.mains, all.subs, y.range,
x.axis.name, x$criterion, norm.range, ...)
plots_on_page <- plots_on_page+1
if(plots_on_page==plots_per_page & pages_done<(pages-1)){
plots_on_page <- 0
pages_done <- pages_done+1
if(interactive() & ask_new)
{readline("Press enter for next plot!")}
par(mfrow=c(rows, cols))
}
}
}
par(op)
}
plothelp.comp <- function(u, norm, coef.plot, index.plots,
all.labs, all.mains, all.subs, y.range,
x.axis.name, criterion, norm.range, ...){
if (!is.null(criterion)) {
x.axis.min <- norm[which.min(criterion)]
}
index.u <- which(index.plots==u)
l.u <- length(index.u)
cur.coef <- coef.plot[,index.u,drop=FALSE]
final.u <- cur.coef[nrow(cur.coef),]
if(is.na(y.range)){
y.range.u <- range(cur.coef)
}else{
y.range.u <- y.range
}
plot(norm, cur.coef[,1], ylim = y.range.u, type = "l",
main = "", ylab = "estimates", xlab = x.axis.name,
xlim = norm.range, frame.plot = FALSE,
lwd=par()$lwd, ...)
if(l.u>1){
for (uu in 2:l.u) {
lines(norm, cur.coef[, uu], lwd=par()$lwd)
}
}
title(main = all.mains[u], line = 1.2)
mtext(all.subs[u], side = 3, line = 0.2, cex = par()$cex)
if (!is.null(criterion)) {
segments( x.axis.min, min(y.range.u),
x.axis.min, max(y.range.u) ,
col=2,lty=2,lwd=par()$lwd)
}
x.lab1 <- norm[length(norm)]-abs(diff(range(norm)))*0.02
x.lab2 <- norm[length(norm)]-abs(diff(range(norm)))*0.005
y.lab1 <- final.u
y.lab2 <- spread.labs(y.lab1, 1.2*strheight("A"))
text( x.lab1, y.lab2, all.labs[index.u],pos=4)
segments( x.lab2, y.lab1,
x.lab1, y.lab2 ,col="gray")
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/plot.BTLLasso.R
|
#' Plot bootstrap intervals for BTLLasso
#'
#' Plots bootstrap intervals for every single coefficient based on bootstrap estimates
#' calculated by \code{\link{boot.BTLLasso}}. Bootstrap
#' intervals are separated by covariates, every covariate is plotted
#' separately.
#'
#' @param x boot.BTLLasso object
#' @param quantiles Which empirical quantiles of the bootstrap estimates should be plotted?
#' @param plots_per_page Number of plots per page, internally specified by \code{par(mfrow=...)}.
#' @param ask_new If TRUE, the user is asked before each plot.
#' @param rescale Should the parameter estimates be rescaled for plotting? Only
#' applies if \code{scale = TRUE} was specified in \code{cv.BTLLasso}.
#' @param which Integer vector to specify which parameters/variables to plot.
#' @param include.zero Should all plots contain zero?
#' @param rows Optional argument for the number of rows in the plot.
#' Only applies if \code{plots_per_page>1}.
#' @param subs.X Optional vector of subtitles for variables in \code{X}. Can be used
#' to note the encoding of the single covariates, especially for dummy
#' variables.
#' @param subs.Z1 Optional vector of subtitles for variables in \code{Z1}. Can be used
#' to note the encoding of the single covariates, especially for dummy
#' variables.
#' @param main.Z2 Optional character containg main for plot
#' containing intervals for Z2 parameters.
#' @param ... other parameters to be passed through to plot function.
#' @author Gunther Schauberger\cr \email{gunther.schauberger@@tum.de}
#' @seealso \code{\link{boot.BTLLasso}}, \code{\link{BTLLasso}},
#' \code{\link{cv.BTLLasso}}
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, 88(9), 1-29, \doi{10.18637/jss.v088.i09}
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @keywords BTLLasso interval bootstrap
#' @examples
#'
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' ##############################
#' ##### Example with simulated data set containing X, Z1 and Z2
#' ##############################
#' data(SimData)
#'
#' ## Specify control argument
#' ## -> allow for object-specific order effects and penalize intercepts
#' ctrl <- ctrl.BTLLasso(penalize.intercepts = TRUE, object.order.effect = TRUE,
#' penalize.order.effect.diffs = TRUE)
#'
#' ## Simple BTLLasso model for tuning parameters lambda
#' m.sim <- BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim
#'
#' par(xpd = TRUE)
#' plot(m.sim)
#'
#'
#' ## Cross-validate BTLLasso model for tuning parameters lambda
#' set.seed(1860)
#' m.sim.cv <- cv.BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim.cv
#' coef(m.sim.cv)
#' logLik(m.sim.cv)
#'
#' head(predict(m.sim.cv, type="response"))
#' head(predict(m.sim.cv, type="trait"))
#'
#' plot(m.sim.cv, plots_per_page = 4)
#'
#'
#' ## Example for bootstrap intervals for illustration only
#' ## Don't calculate bootstrap intervals with B = 20!!!!
#' set.seed(1860)
#' m.sim.boot <- boot.BTLLasso(m.sim.cv, B = 20, cores = 20)
#' m.sim.boot
#' plot(m.sim.boot, plots_per_page = 4)
#'
#'
#' ##############################
#' ##### Example with small version from GLES data set
#' ##############################
#' data(GLESsmall)
#'
#' ## extract data and center covariates for better interpretability
#' Y <- GLESsmall$Y
#' X <- scale(GLESsmall$X, scale = FALSE)
#' Z1 <- scale(GLESsmall$Z1, scale = FALSE)
#'
#' ## vector of subtitles, containing the coding of the X covariates
#' subs.X <- c('', 'female (1); male (0)')
#'
#' ## Cross-validate BTLLasso model
#' m.gles.cv <- cv.BTLLasso(Y = Y, X = X, Z1 = Z1)
#' m.gles.cv
#'
#' coef(m.gles.cv)
#' logLik(m.gles.cv)
#'
#' head(predict(m.gles.cv, type="response"))
#' head(predict(m.gles.cv, type="trait"))
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.gles.cv, subs.X = subs.X, plots_per_page = 4, which = 2:5)
#' paths(m.gles.cv, y.axis = 'L2')
#'
#'
#' ##############################
#' ##### Example with Bundesliga data set
#' ##############################
#' data(Buli1516)
#'
#' Y <- Buli1516$Y5
#'
#' Z1 <- scale(Buli1516$Z1, scale = FALSE)
#'
#' ctrl.buli <- ctrl.BTLLasso(object.order.effect = TRUE,
#' name.order = "Home",
#' penalize.order.effect.diffs = TRUE,
#' penalize.order.effect.absolute = FALSE,
#' order.center = TRUE, lambda2 = 1e-2)
#'
#' set.seed(1860)
#' m.buli <- cv.BTLLasso(Y = Y, Z1 = Z1, control = ctrl.buli)
#' m.buli
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.buli)
#'
#'
#' ##############################
#' ##### Example with Topmodel data set
#' ##############################
#' data("Topmodel2007", package = "psychotree")
#'
#' Y.models <- response.BTLLasso(Topmodel2007$preference)
#' X.models <- scale(model.matrix(preference~., data = Topmodel2007)[,-1])
#' rownames(X.models) <- paste0("Subject",1:nrow(X.models))
#' colnames(X.models) <- c("Gender","Age","KnowShow","WatchShow","WatchFinal")
#'
#' set.seed(5)
#' m.models <- cv.BTLLasso(Y = Y.models, X = X.models)
#' plot(m.models, plots_per_page = 6)
#'
#' par(op)
#' }
plot.boot.BTLLasso <- function(x, quantiles = c(0.025, 0.975),
plots_per_page = 1, ask_new = TRUE, rescale = FALSE,
which = "all", include.zero = TRUE, rows = NULL,
subs.X = NULL, subs.Z1 = NULL,
main.Z2 = "Obj-spec. Covariates", ...){
op <- par(no.readonly = TRUE)
## extract important things from the cv.BTLLasso object
model <- x$cv.model
epsilon <- model$control$epsilon
accuracy <- -log10(epsilon)
covariates <- c(model$design$vars.X, model$design$vars.Z1,
model$design$vars.Z2)
## running index for current plot
index.num <- 1
## create matrix containing bootstrap intervals
conf.ints <- apply(x$estimatesBrepar, 2, quantile, probs = quantiles,
type = 1, na.rm = TRUE)
## some more important parameters
m <- model$Y$m
labels <- model$Y$object.names
n.theta <- model$design$n.theta
n.order <- model$design$n.order
n.intercepts <- model$design$n.intercepts
if (n.intercepts > 0) {
n.intercepts <- n.intercepts + 1
}
p.X <- model$design$p.X
p.Z1 <- model$design$p.Z1
p.Z2 <- model$design$p.Z2
estimates <- model$coefs.repar[which.min(model$criterion),]
estimates <- round(estimates, accuracy)
conf.ints <- round(conf.ints, accuracy)
p <- p.global <- 0
gamma <- gamma.ci <- global <- global.ci <- c()
covar <- covar.global <- c()
start <- n.theta + 1
all.subs <- c()
if (n.order > 0) {
end <- start + n.order - 1
if (n.order == 1) {
global <- c(global, estimates[start:end])
global.ci <- cbind(global.ci, conf.ints[, start:end])
covar.global <- c(covar.global, model$control$name.order)
p.global <- p.global + 1
}
if (n.order > 1) {
gamma <- c(gamma, estimates[start:end])
gamma.ci <- cbind(gamma.ci, conf.ints[, start:end])
covar <- c(covar, model$control$name.order)
p <- p + 1
all.subs <- c(all.subs, "")
}
}
start <- n.theta + n.order + 1
if (n.intercepts > 0) {
end <- start + n.intercepts - 1
covar <- c(covar, "Intercept")
gamma <- c(gamma, estimates[start:end])
gamma.ci <- cbind(gamma.ci, conf.ints[, start:end])
p <- 1 + p
all.subs <- c(all.subs, "")
}
start <- n.theta + n.order + n.intercepts + 1
if (p.X > 0) {
end <- start + p.X * m - 1
covar <- c(covar, model$design$vars.X)
if (rescale) {
est <- estimates[start:end]/rep(model$design$sd.X,
each = m)
est.ci <- t(t(conf.ints[, start:end])/rep(model$design$sd.X,
each = m))
} else {
est <- estimates[start:end]
est.ci <- conf.ints[, start:end]
}
gamma <- c(gamma, est)
gamma.ci <- cbind(gamma.ci, est.ci)
p <- p + p.X
if (is.null(subs.X)) {
subs.X <- rep("", p.X)
}
all.subs <- c(all.subs, subs.X)
}
start <- n.theta + n.order + n.intercepts + p.X * m + 1
if (p.Z1 > 0) {
end <- start + p.Z1 * m - 1
covar <- c(covar, model$design$vars.Z1)
if (rescale) {
est <- estimates[start:end]/rep(model$design$sd.Z1,
each = m)
est.ci <- t(t(conf.ints[, start:end])/rep(model$design$sd.Z1,
each = m))
} else {
est <- estimates[start:end]
est.ci <- conf.ints[, start:end]
}
gamma <- c(gamma, est)
gamma.ci <- cbind(gamma.ci, est.ci)
p <- p + p.Z1
if (is.null(subs.Z1)) {
subs.Z1 <- rep("", p.Z1)
}
all.subs <- c(all.subs, subs.Z1)
}
start <- n.theta + n.order + n.intercepts + p.X * m + p.Z1 *
m + 1
if (p.Z2 > 0) {
end <- start + p.Z2 - 1
covar.global <- c(covar.global, model$design$vars.Z2)
if (rescale) {
est <- estimates[start:end]/model$design$sd.Z2
est.ci <- t(t(conf.ints[, start:end])/model$design$sd.Z2)
} else {
est <- estimates[start:end]
est.ci <- conf.ints[, start:end]
}
global <- c(global, est)
global.ci <- cbind(global.ci, est.ci)
p.global <- p.global + p.Z2
}
p.tot <- p
if (p.global > 0) {
p.tot <- p.tot + 1
}
suppressWarnings(if(which=="all"){
which <- 1:p.tot
})
pages <- ceiling(length(which)/plots_per_page)
if (is.null(rows)) {
rows <- floor(sqrt(plots_per_page))
}
cols <- ceiling(plots_per_page/rows)
plots_on_page <- 0
pages_done <- 0
par(mfrow=c(rows, cols))
index <- 1
for (i in 1:p) {
if(i %in% which){
xlim <- range(gamma.ci[, index:(index + m - 1)])
if (include.zero) {
xlim <- range(0, xlim)
}
plot(gamma[index:(index + m - 1)], 1:m, xlim = xlim,
pch = 16, yaxt = "n", xlab = "", ylab = "", main = "", ...)
segments(y0 = 1:m, x0 = gamma.ci[1, index:(index + m -
1)], x1 = gamma.ci[2, index:(index + m - 1)])
axis(2, at = 1:m, labels = labels, las = 2)
title(covar[i], line = 1.2)
mtext(all.subs[i], side = 3, line = 0.2, cex = par()$cex)
segments( 0, 1, 0, m , col="lightgray",lty=2,lwd=par()$lwd)
plots_on_page <- plots_on_page+1
if(plots_on_page==plots_per_page & pages_done<(pages-1)){
plots_on_page <- 0
pages_done <- pages_done+1
if(interactive() & ask_new)
{readline("Press enter for next plot!")}
par(mfrow=c(rows, cols))
}
}
index <- index + m
}
if (p.global > 0 & (p+1) %in% which) {
xlim <- range(global.ci)
if (include.zero) {
xlim <- range(0, xlim)
}
plot(global, 1:p.global, xlim = xlim, pch = 16, yaxt = "n",
xlab = "", ylab = "", main = "Global Parameters", ...)
segments(y0 = 1:p.global, x0 = global.ci[1, ], x1 = global.ci[2,])
axis(2, at = 1:p.global, labels = covar.global, las = 2)
segments( 0, 1, 0, p.global , col="lightgray",lty=2,lwd=par()$lwd)
}
par(op)
invisible(conf.ints)
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/plot.boot.BTLLasso.R
|
#' Predict function for BTLLasso
#'
#' Predict function for a \code{BTLLasso} object or a \code{cv.BTLLasso}
#' object. Predictions can be linear predictors, probabilities or the values
#' of the latent traits for both competitors in the paired comparisons.
#'
#' Results are lists of matrices with prediction for every single tuning parameter
#' for \code{BTLLasso} objects
#' and a single matrix for \code{cv.BTLLasso} objects.
#'
#' @param object \code{BTLLasso} or \code{cv.BTLLasso} object
#' @param newdata List possibly containing slots Y, X, Z1 and Z2 to use new data for prediction.
#' @param type Type "link" gives the linear predictors for separate categories,
#' type "response" gives the respective probabilities. Type "trait" gives the estimated latent traits
#' of both competitors/objects in the paired comparisons.
#' @param ... Further predict arguments.
#' @author Gunther Schauberger\cr \email{gunther.schauberger@@tum.de}
#' @seealso \code{\link{BTLLasso}}, \code{\link{cv.BTLLasso}}
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, 88(9), 1-29, \doi{10.18637/jss.v088.i09}
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @keywords BTLLasso paths parameter paths
#' @examples
#'
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' ##############################
#' ##### Example with simulated data set containing X, Z1 and Z2
#' ##############################
#' data(SimData)
#'
#' ## Specify control argument
#' ## -> allow for object-specific order effects and penalize intercepts
#' ctrl <- ctrl.BTLLasso(penalize.intercepts = TRUE, object.order.effect = TRUE,
#' penalize.order.effect.diffs = TRUE)
#'
#' ## Simple BTLLasso model for tuning parameters lambda
#' m.sim <- BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim
#'
#' par(xpd = TRUE)
#' plot(m.sim)
#'
#'
#' ## Cross-validate BTLLasso model for tuning parameters lambda
#' set.seed(1860)
#' m.sim.cv <- cv.BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim.cv
#' coef(m.sim.cv)
#' logLik(m.sim.cv)
#'
#' head(predict(m.sim.cv, type="response"))
#' head(predict(m.sim.cv, type="trait"))
#'
#' plot(m.sim.cv, plots_per_page = 4)
#'
#'
#' ## Example for bootstrap intervals for illustration only
#' ## Don't calculate bootstrap intervals with B = 20!!!!
#' set.seed(1860)
#' m.sim.boot <- boot.BTLLasso(m.sim.cv, B = 20, cores = 20)
#' m.sim.boot
#' plot(m.sim.boot, plots_per_page = 4)
#'
#'
#' ##############################
#' ##### Example with small version from GLES data set
#' ##############################
#' data(GLESsmall)
#'
#' ## extract data and center covariates for better interpretability
#' Y <- GLESsmall$Y
#' X <- scale(GLESsmall$X, scale = FALSE)
#' Z1 <- scale(GLESsmall$Z1, scale = FALSE)
#'
#' ## vector of subtitles, containing the coding of the X covariates
#' subs.X <- c('', 'female (1); male (0)')
#'
#' ## Cross-validate BTLLasso model
#' m.gles.cv <- cv.BTLLasso(Y = Y, X = X, Z1 = Z1)
#' m.gles.cv
#'
#' coef(m.gles.cv)
#' logLik(m.gles.cv)
#'
#' head(predict(m.gles.cv, type="response"))
#' head(predict(m.gles.cv, type="trait"))
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.gles.cv, subs.X = subs.X, plots_per_page = 4, which = 2:5)
#' paths(m.gles.cv, y.axis = 'L2')
#'
#'
#' ##############################
#' ##### Example with Bundesliga data set
#' ##############################
#' data(Buli1516)
#'
#' Y <- Buli1516$Y5
#'
#' Z1 <- scale(Buli1516$Z1, scale = FALSE)
#'
#' ctrl.buli <- ctrl.BTLLasso(object.order.effect = TRUE,
#' name.order = "Home",
#' penalize.order.effect.diffs = TRUE,
#' penalize.order.effect.absolute = FALSE,
#' order.center = TRUE, lambda2 = 1e-2)
#'
#' set.seed(1860)
#' m.buli <- cv.BTLLasso(Y = Y, Z1 = Z1, control = ctrl.buli)
#' m.buli
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.buli)
#'
#'
#' ##############################
#' ##### Example with Topmodel data set
#' ##############################
#' data("Topmodel2007", package = "psychotree")
#'
#' Y.models <- response.BTLLasso(Topmodel2007$preference)
#' X.models <- scale(model.matrix(preference~., data = Topmodel2007)[,-1])
#' rownames(X.models) <- paste0("Subject",1:nrow(X.models))
#' colnames(X.models) <- c("Gender","Age","KnowShow","WatchShow","WatchFinal")
#'
#' set.seed(5)
#' m.models <- cv.BTLLasso(Y = Y.models, X = X.models)
#' plot(m.models, plots_per_page = 6)
#'
#' par(op)
#' }
predict.BTLLasso <- function(object, newdata = list(),
type = c("link", "response", "trait"), ...){
type <- match.arg(type)
if(inherits(object,"cv.BTLLasso")){
coef.cv <- object$coefs.repar[which.min(object$criterion),,drop=FALSE]
ret.list <- predict.help(coef.cv, object = object, newdata = newdata,
type = type)
ret.list <- ret.list[[1]]
}else{
ret.list <- predict.help(object$coefs.repar, object = object, newdata = newdata,
type = type)
}
ret.list
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/predict.BTLLasso.R
|
predict.help <- function(coefs, object, newdata , type){
q <- object$Y$q
n.theta <- object$design$n.theta
if(length(newdata)==0){
des.mat <- object$design$design.repar
if(type=="trait"){
des.first <- design.BTLLasso(Y = object$Y, X = object$X, Z1 = object$Z1,
Z2 = object$Z2, control = object$control,
only.first = TRUE)$design.repar
des.mat <- des.mat[seq(1,nrow(des.mat)-1,by=q),]
des.mat[,1:n.theta] <- 0
des.first <- des.first[seq(1,nrow(des.first)-1,by=q),]
des.first[,1:n.theta] <- 0
}
}else{
des.mat <- design.BTLLasso(Y = newdata$Y, X = newdata$X, Z1 = newdata$Z1,
Z2 = newdata$Z2, control = object$control,
sd.X = object$design$sd.X, sd.Z1 = object$design$sd.Z1,
sd.Z2 = object$design$sd.Z2)$design.repar
if(type=="trait"){
des.first <- design.BTLLasso(Y = newdata$Y, X = newdata$X, Z1 = newdata$Z1,
Z2 = newdata$Z2, control = object$control,
sd.X = object$design$sd.X, sd.Z1 = object$design$sd.Z1,
sd.Z2 = object$design$sd.Z2, only.first = TRUE)$design.repar
des.mat <- des.mat[seq(1,nrow(des.mat)-1,by=q),]
des.mat[,1:n.theta] <- 0
des.first <- des.first[seq(1,nrow(des.first)-1,by=q),]
des.first[,1:n.theta] <- 0
}
}
ncoef <- nrow(coefs)
ret.list <- list()
for(l in 1:ncoef){
coefs.l <- coefs[l,]
if(type!="trait"){
eta <- des.mat%*%coefs.l
ret.mat <- matrix(eta, byrow=TRUE, ncol=object$Y$q)
if(type=="response"){
ret.mat <- exp(ret.mat)/(1+exp(ret.mat))
}
}else{
eta.both <- des.mat%*%coefs.l
eta.first <- des.first%*%coefs.l
eta.second <- eta.first-eta.both
ret.mat <- cbind(eta.first, eta.second)
colnames(ret.mat) <- c("first.object", "second.object")
}
ret.list[[l]] <- ret.mat
}
ret.list
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/predict.help.R
|
#' Print function for BTLLasso objects
#'
#' Prints the most important output of \code{BTLLasso} objects.
#'
#' @method print BTLLasso
#' @param x \code{BTLLasso} object
#' @param \dots possible further arguments for print command
#' @author Gunther Schauberger\cr \email{gunther.schauberger@@tum.de}
#' @seealso \code{\link{BTLLasso}}
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, 88(9), 1-29, \doi{10.18637/jss.v088.i09}
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @keywords BTLLasso
#' @examples
#'
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' ##############################
#' ##### Example with simulated data set containing X, Z1 and Z2
#' ##############################
#' data(SimData)
#'
#' ## Specify control argument
#' ## -> allow for object-specific order effects and penalize intercepts
#' ctrl <- ctrl.BTLLasso(penalize.intercepts = TRUE, object.order.effect = TRUE,
#' penalize.order.effect.diffs = TRUE)
#'
#' ## Simple BTLLasso model for tuning parameters lambda
#' m.sim <- BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim
#'
#' par(xpd = TRUE)
#' plot(m.sim)
#'
#'
#' ## Cross-validate BTLLasso model for tuning parameters lambda
#' set.seed(1860)
#' m.sim.cv <- cv.BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim.cv
#' coef(m.sim.cv)
#' logLik(m.sim.cv)
#'
#' head(predict(m.sim.cv, type="response"))
#' head(predict(m.sim.cv, type="trait"))
#'
#' plot(m.sim.cv, plots_per_page = 4)
#'
#'
#' ## Example for bootstrap intervals for illustration only
#' ## Don't calculate bootstrap intervals with B = 20!!!!
#' set.seed(1860)
#' m.sim.boot <- boot.BTLLasso(m.sim.cv, B = 20, cores = 20)
#' m.sim.boot
#' plot(m.sim.boot, plots_per_page = 4)
#'
#'
#' ##############################
#' ##### Example with small version from GLES data set
#' ##############################
#' data(GLESsmall)
#'
#' ## extract data and center covariates for better interpretability
#' Y <- GLESsmall$Y
#' X <- scale(GLESsmall$X, scale = FALSE)
#' Z1 <- scale(GLESsmall$Z1, scale = FALSE)
#'
#' ## vector of subtitles, containing the coding of the X covariates
#' subs.X <- c('', 'female (1); male (0)')
#'
#' ## Cross-validate BTLLasso model
#' m.gles.cv <- cv.BTLLasso(Y = Y, X = X, Z1 = Z1)
#' m.gles.cv
#'
#' coef(m.gles.cv)
#' logLik(m.gles.cv)
#'
#' head(predict(m.gles.cv, type="response"))
#' head(predict(m.gles.cv, type="trait"))
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.gles.cv, subs.X = subs.X, plots_per_page = 4, which = 2:5)
#' paths(m.gles.cv, y.axis = 'L2')
#'
#'
#' ##############################
#' ##### Example with Bundesliga data set
#' ##############################
#' data(Buli1516)
#'
#' Y <- Buli1516$Y5
#'
#' Z1 <- scale(Buli1516$Z1, scale = FALSE)
#'
#' ctrl.buli <- ctrl.BTLLasso(object.order.effect = TRUE,
#' name.order = "Home",
#' penalize.order.effect.diffs = TRUE,
#' penalize.order.effect.absolute = FALSE,
#' order.center = TRUE, lambda2 = 1e-2)
#'
#' set.seed(1860)
#' m.buli <- cv.BTLLasso(Y = Y, Z1 = Z1, control = ctrl.buli)
#' m.buli
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.buli)
#'
#'
#' ##############################
#' ##### Example with Topmodel data set
#' ##############################
#' data("Topmodel2007", package = "psychotree")
#'
#' Y.models <- response.BTLLasso(Topmodel2007$preference)
#' X.models <- scale(model.matrix(preference~., data = Topmodel2007)[,-1])
#' rownames(X.models) <- paste0("Subject",1:nrow(X.models))
#' colnames(X.models) <- c("Gender","Age","KnowShow","WatchShow","WatchFinal")
#'
#' set.seed(5)
#' m.models <- cv.BTLLasso(Y = Y.models, X = X.models)
#' plot(m.models, plots_per_page = 6)
#'
#' par(op)
#' }
print.BTLLasso <- function(x, ...) {
m <- x$Y$m
n <- x$Y$n
k <- x$Y$q + 1
n.theta <- x$design$n.theta
n.intercepts <- x$design$n.intercepts
if (n.intercepts != 0) {
n.intercepts <- n.intercepts + 1
}
n.order <- x$design$n.order
p.X <- x$design$p.X
p.Z1 <- x$design$p.Z1
p.Z2 <- x$design$p.Z2
lambda <- x$lambda
vars.X <- x$design$vars.X
vars.Z1 <- x$design$vars.Z1
vars.Z2 <- x$design$vars.Z2
labels <- x$Y$object.names
cat("Output of BTLLasso estimation:", "\n")
cat("---", "\n")
cat("Setting:")
cat("\n", n, "subjects")
cat("\n", m, "objects")
cat("\n", k, "response categories")
cat("\n", p.X, "subject-specific covariate(s)")
cat("\n", p.Z1, "subject-object-specific covariate(s) with object-specific effects")
cat("\n", p.Z2, "(subject-)object-specific covariate(s) with global effects")
if (n.order == m) {
cat("\n", n.order, "subject-specific order effects")
}
if (n.order == 1) {
cat("\n", "Global order effect")
}
if (n.order == 0) {
cat("\n", "No order effect")
}
cat("\n", length(lambda), "different tuning parameters",
"\n")
invisible(x)
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/print.BTLLasso.R
|
#' Print function for boot.BTLLasso objects
#'
#' Prints the most important output of \code{boot.BTLLasso} objects.
#'
#' @method print boot.BTLLasso
#' @param x \code{boot.BTLLasso} object
#' @param quantiles Which empirical quantiles of the bootstrap estimates should be printed?
#' @param rescale Should the parameter estimates be rescaled for plotting? Only
#' applies if \code{scale = TRUE} was specified in \code{BTLLasso} or \code{cv.BTLLasso}.
#' @param \dots possible further arguments for print command
#' @author Gunther Schauberger\cr \email{gunther.schauberger@@tum.de}
#' @seealso \code{\link{boot.BTLLasso}}
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, 88(9), 1-29, \doi{10.18637/jss.v088.i09}
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @keywords BTLLasso
#' @examples
#'
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' ##############################
#' ##### Example with simulated data set containing X, Z1 and Z2
#' ##############################
#' data(SimData)
#'
#' ## Specify control argument
#' ## -> allow for object-specific order effects and penalize intercepts
#' ctrl <- ctrl.BTLLasso(penalize.intercepts = TRUE, object.order.effect = TRUE,
#' penalize.order.effect.diffs = TRUE)
#'
#' ## Simple BTLLasso model for tuning parameters lambda
#' m.sim <- BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim
#'
#' par(xpd = TRUE)
#' plot(m.sim)
#'
#'
#' ## Cross-validate BTLLasso model for tuning parameters lambda
#' set.seed(1860)
#' m.sim.cv <- cv.BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim.cv
#' coef(m.sim.cv)
#' logLik(m.sim.cv)
#'
#' head(predict(m.sim.cv, type="response"))
#' head(predict(m.sim.cv, type="trait"))
#'
#' plot(m.sim.cv, plots_per_page = 4)
#'
#'
#' ## Example for bootstrap intervals for illustration only
#' ## Don't calculate bootstrap intervals with B = 20!!!!
#' set.seed(1860)
#' m.sim.boot <- boot.BTLLasso(m.sim.cv, B = 20, cores = 20)
#' m.sim.boot
#' plot(m.sim.boot, plots_per_page = 4)
#'
#'
#' ##############################
#' ##### Example with small version from GLES data set
#' ##############################
#' data(GLESsmall)
#'
#' ## extract data and center covariates for better interpretability
#' Y <- GLESsmall$Y
#' X <- scale(GLESsmall$X, scale = FALSE)
#' Z1 <- scale(GLESsmall$Z1, scale = FALSE)
#'
#' ## vector of subtitles, containing the coding of the X covariates
#' subs.X <- c('', 'female (1); male (0)')
#'
#' ## Cross-validate BTLLasso model
#' m.gles.cv <- cv.BTLLasso(Y = Y, X = X, Z1 = Z1)
#' m.gles.cv
#'
#' coef(m.gles.cv)
#' logLik(m.gles.cv)
#'
#' head(predict(m.gles.cv, type="response"))
#' head(predict(m.gles.cv, type="trait"))
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.gles.cv, subs.X = subs.X, plots_per_page = 4, which = 2:5)
#' paths(m.gles.cv, y.axis = 'L2')
#'
#'
#' ##############################
#' ##### Example with Bundesliga data set
#' ##############################
#' data(Buli1516)
#'
#' Y <- Buli1516$Y5
#'
#' Z1 <- scale(Buli1516$Z1, scale = FALSE)
#'
#' ctrl.buli <- ctrl.BTLLasso(object.order.effect = TRUE,
#' name.order = "Home",
#' penalize.order.effect.diffs = TRUE,
#' penalize.order.effect.absolute = FALSE,
#' order.center = TRUE, lambda2 = 1e-2)
#'
#' set.seed(1860)
#' m.buli <- cv.BTLLasso(Y = Y, Z1 = Z1, control = ctrl.buli)
#' m.buli
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.buli)
#'
#'
#' ##############################
#' ##### Example with Topmodel data set
#' ##############################
#' data("Topmodel2007", package = "psychotree")
#'
#' Y.models <- response.BTLLasso(Topmodel2007$preference)
#' X.models <- scale(model.matrix(preference~., data = Topmodel2007)[,-1])
#' rownames(X.models) <- paste0("Subject",1:nrow(X.models))
#' colnames(X.models) <- c("Gender","Age","KnowShow","WatchShow","WatchFinal")
#'
#' set.seed(5)
#' m.models <- cv.BTLLasso(Y = Y.models, X = X.models)
#' plot(m.models, plots_per_page = 6)
#'
#' par(op)
#' }
print.boot.BTLLasso <- function(x, quantiles = c(0.025, 0.975), rescale = FALSE, ...) {
model <- x$cv.model
epsilon <- model$control$epsilon
accuracy <- -log10(epsilon)
covariates <- c(model$design$vars.X, model$design$vars.Z1,
model$design$vars.Z2)
# conf.ints <- apply(x$estimatesB, 2, quantile, probs = quantiles,
# type = 1, na.rm = TRUE)
conf.ints <- apply(x$estimatesBrepar, 2, quantile, probs = quantiles,
type = 1, na.rm = TRUE)
m <- model$Y$m
labels <- model$Y$x.names
n.theta <- model$design$n.theta
n.order <- model$design$n.order
n.intercepts <- model$design$n.intercepts
if (n.intercepts > 0) {
n.intercepts <- n.intercepts + 1
}
p.X <- model$design$p.X
p.Z1 <- model$design$p.Z1
p.Z2 <- model$design$p.Z2
estimates <- model$coefs.repar[which.min(model$criterion),
]
estimates <- round(estimates, accuracy)
conf.ints <- round(conf.ints, accuracy)
gamma.total <- c()
start <- 1
end <- n.theta
cat("Bootstrap intervals:\n")
cat("---", "\n")
if (n.theta > 0) {
cat("Thresholds:", "\n")
gamma <- matrix(NA, nrow = 3, ncol = n.theta)
rownames(gamma)[c(1, 3)] <- rownames(conf.ints)
rownames(gamma)[2] <- "estimate"
gamma[c(1, 3), ] <- conf.ints[, start:end]
gamma[2, ] <- estimates[start:end]
colnames(gamma) <- names(estimates[start:end])
print(gamma, ...)
cat("\n")
gamma.total <- cbind(gamma.total, gamma)
}
start <- n.theta + 1
if (n.order > 0) {
end <- start + n.order - 1
gamma <- matrix(NA, nrow = 3, ncol = n.order)
rownames(gamma)[c(1, 3)] <- rownames(conf.ints)
rownames(gamma)[2] <- "estimate"
cat(paste0(model$control$name.order, ":"), "\n")
gamma[c(1, 3), ] <- conf.ints[, start:end]
gamma[2, ] <- estimates[start:end]
colnames(gamma) <- names(estimates[start:end])
print(gamma, ...)
cat("\n")
gamma.total <- cbind(gamma.total, gamma)
}
start <- n.theta + n.order + 1
if (n.intercepts > 0) {
end <- start + n.intercepts - 1
gamma <- matrix(NA, nrow = 3, ncol = n.intercepts)
rownames(gamma)[c(1, 3)] <- rownames(conf.ints)
rownames(gamma)[2] <- "estimate"
cat("Intercepts:\n")
gamma[c(1, 3), ] <- conf.ints[, start:end]
gamma[2, ] <- estimates[start:end]
colnames(gamma) <- names(estimates[start:end])
print(gamma, ...)
cat("\n")
gamma.total <- cbind(gamma.total, gamma)
}
start <- n.theta + n.order + n.intercepts + 1
if (p.X > 0) {
end <- start + p.X * m - 1
if (rescale) {
est <- estimates[start:end]/rep(model$design$sd.X,
each = m)
est.ci <- t(t(conf.ints[, start:end])/rep(model$design$sd.X,
each = m))
} else {
est <- estimates[start:end]
est.ci <- conf.ints[, start:end]
}
gamma <- matrix(NA, nrow = 3, ncol = p.X * m)
rownames(gamma)[c(1, 3)] <- rownames(conf.ints)
rownames(gamma)[2] <- "estimate"
cat("Object-specific effects for subject-specific covariate(s):",
"\n")
gamma[c(1, 3), ] <- est.ci
gamma[2, ] <- est
colnames(gamma) <- names(estimates[start:end])
print(gamma, ...)
cat("\n")
gamma.total <- cbind(gamma.total, gamma)
}
start <- n.theta + n.order + n.intercepts + p.X * m + 1
if (p.Z1 > 0) {
end <- start + p.Z1 * m - 1
if (rescale) {
est <- estimates[start:end]/rep(model$design$sd.Z1,
each = m)
est.ci <- t(t(conf.ints[, start:end])/rep(model$design$sd.Z1,
each = m))
} else {
est <- estimates[start:end]
est.ci <- conf.ints[, start:end]
}
gamma <- matrix(NA, nrow = 3, ncol = p.Z1 * m)
rownames(gamma)[c(1, 3)] <- rownames(conf.ints)
rownames(gamma)[2] <- "estimate"
cat("Object-specific effects for subject-object-specific covariate(s):",
"\n")
gamma[c(1, 3), ] <- est.ci
gamma[2, ] <- est
colnames(gamma) <- names(estimates[start:end])
print(gamma, ...)
cat("\n")
gamma.total <- cbind(gamma.total, gamma)
}
start <- n.theta + n.order + n.intercepts + p.X * m + p.Z1 *
m + 1
if (p.Z2 > 0) {
end <- start + p.Z2 - 1
if (rescale) {
est <- estimates[start:end]/model$design$sd.Z2
est.ci <- t(t(conf.ints[, start:end])/model$design$sd.Z2)
} else {
est <- estimates[start:end]
est.ci <- conf.ints[, start:end]
}
gamma <- matrix(NA, nrow = 3, ncol = p.Z2)
rownames(gamma)[c(1, 3)] <- rownames(conf.ints)
rownames(gamma)[2] <- "estimate"
cat("Global effects for (subject-)object-specific covariate(s):",
"\n")
gamma[c(1, 3), ] <- est.ci
gamma[2, ] <- est
colnames(gamma) <- names(estimates[start:end])
print(gamma, ...)
gamma.total <- cbind(gamma.total, gamma)
}
invisible(gamma.total)
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/print.boot.BTLLasso.R
|
#' Print function for cv.BTLLasso objects
#'
#' Prints the most important output of \code{cv.BTLLasso} objects.
#'
#' @method print cv.BTLLasso
#' @param x \code{cv.BTLLasso} object
#' @param rescale Should the parameter estimates be rescaled for plotting? Only
#' applies if \code{scale = TRUE} was specified in \code{BTLLasso} or \code{cv.BTLLasso}.
#' @param \dots possible further arguments for print command
#' @author Gunther Schauberger\cr \email{gunther.schauberger@@tum.de}
#' @seealso \code{\link{cv.BTLLasso}}
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, 88(9), 1-29, \doi{10.18637/jss.v088.i09}
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @keywords BTLLasso
#' @examples
#'
#' \dontrun{
#' op <- par(no.readonly = TRUE)
#'
#' ##############################
#' ##### Example with simulated data set containing X, Z1 and Z2
#' ##############################
#' data(SimData)
#'
#' ## Specify control argument
#' ## -> allow for object-specific order effects and penalize intercepts
#' ctrl <- ctrl.BTLLasso(penalize.intercepts = TRUE, object.order.effect = TRUE,
#' penalize.order.effect.diffs = TRUE)
#'
#' ## Simple BTLLasso model for tuning parameters lambda
#' m.sim <- BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim
#'
#' par(xpd = TRUE)
#' plot(m.sim)
#'
#'
#' ## Cross-validate BTLLasso model for tuning parameters lambda
#' set.seed(1860)
#' m.sim.cv <- cv.BTLLasso(Y = SimData$Y, X = SimData$X, Z1 = SimData$Z1,
#' Z2 = SimData$Z2, control = ctrl)
#' m.sim.cv
#' coef(m.sim.cv)
#' logLik(m.sim.cv)
#'
#' head(predict(m.sim.cv, type="response"))
#' head(predict(m.sim.cv, type="trait"))
#'
#' plot(m.sim.cv, plots_per_page = 4)
#'
#'
#' ## Example for bootstrap intervals for illustration only
#' ## Don't calculate bootstrap intervals with B = 20!!!!
#' set.seed(1860)
#' m.sim.boot <- boot.BTLLasso(m.sim.cv, B = 20, cores = 20)
#' m.sim.boot
#' plot(m.sim.boot, plots_per_page = 4)
#'
#'
#' ##############################
#' ##### Example with small version from GLES data set
#' ##############################
#' data(GLESsmall)
#'
#' ## extract data and center covariates for better interpretability
#' Y <- GLESsmall$Y
#' X <- scale(GLESsmall$X, scale = FALSE)
#' Z1 <- scale(GLESsmall$Z1, scale = FALSE)
#'
#' ## vector of subtitles, containing the coding of the X covariates
#' subs.X <- c('', 'female (1); male (0)')
#'
#' ## Cross-validate BTLLasso model
#' m.gles.cv <- cv.BTLLasso(Y = Y, X = X, Z1 = Z1)
#' m.gles.cv
#'
#' coef(m.gles.cv)
#' logLik(m.gles.cv)
#'
#' head(predict(m.gles.cv, type="response"))
#' head(predict(m.gles.cv, type="trait"))
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.gles.cv, subs.X = subs.X, plots_per_page = 4, which = 2:5)
#' paths(m.gles.cv, y.axis = 'L2')
#'
#'
#' ##############################
#' ##### Example with Bundesliga data set
#' ##############################
#' data(Buli1516)
#'
#' Y <- Buli1516$Y5
#'
#' Z1 <- scale(Buli1516$Z1, scale = FALSE)
#'
#' ctrl.buli <- ctrl.BTLLasso(object.order.effect = TRUE,
#' name.order = "Home",
#' penalize.order.effect.diffs = TRUE,
#' penalize.order.effect.absolute = FALSE,
#' order.center = TRUE, lambda2 = 1e-2)
#'
#' set.seed(1860)
#' m.buli <- cv.BTLLasso(Y = Y, Z1 = Z1, control = ctrl.buli)
#' m.buli
#'
#' par(xpd = TRUE, mar = c(5,4,4,6))
#' plot(m.buli)
#'
#'
#' ##############################
#' ##### Example with Topmodel data set
#' ##############################
#' data("Topmodel2007", package = "psychotree")
#'
#' Y.models <- response.BTLLasso(Topmodel2007$preference)
#' X.models <- scale(model.matrix(preference~., data = Topmodel2007)[,-1])
#' rownames(X.models) <- paste0("Subject",1:nrow(X.models))
#' colnames(X.models) <- c("Gender","Age","KnowShow","WatchShow","WatchFinal")
#'
#' set.seed(5)
#' m.models <- cv.BTLLasso(Y = Y.models, X = X.models)
#' plot(m.models, plots_per_page = 6)
#'
#' par(op)
#' }
print.cv.BTLLasso <- function(x, rescale = FALSE, ...) {
m <- x$Y$m
n <- x$Y$n
k <- x$Y$q + 1
n.theta <- x$design$n.theta
n.intercepts <- x$design$n.intercepts
if (n.intercepts != 0) {
n.intercepts <- n.intercepts + 1
}
n.order <- x$design$n.order
p.X <- x$design$p.X
p.Z1 <- x$design$p.Z1
p.Z2 <- x$design$p.Z2
lambda <- x$lambda
vars.X <- x$design$vars.X
vars.Z1 <- x$design$vars.Z1
vars.Z2 <- x$design$vars.Z2
labels <- x$Y$object.names
cv.crit <- x$cv.crit
cat("Output of BTLLasso estimation:", "\n")
cat("---", "\n")
cat("Setting:")
cat("\n", n, "subjects")
cat("\n", m, "objects")
cat("\n", k, "response categories")
cat("\n", p.X, "subject-specific covariate(s)")
cat("\n", p.Z1, "subject-object-specific covariate(s) with object-specific effects")
cat("\n", p.Z2, "(subject-)object-specific covariate(s) with global effects")
if (n.order == m) {
cat("\n", n.order, "subject-specific order effects")
}
if (n.order == 1) {
cat("\n", "Global order effect")
}
if (n.order == 0) {
cat("\n", "No order effect")
}
cat("\n", length(lambda), "different tuning parameters",
"\n")
cat("\n Cross-validation criterion:", cv.crit, "\n")
cat("---", "\n")
cat("Parameter estimates after", x$folds, "-", "fold cross-validation",
"\n")
cat("\n")
coefs <- x$coefs.repar[which.min(x$criterion), ]
theta <- intercepts <- order.effects <- gamma.X <- gamma.Z1 <- gamma.Z2 <- c()
if (n.theta > 0) {
cat("Thresholds:", "\n")
theta <- coefs[1:n.theta]
names(theta) <- paste0("theta", 1:n.theta)
print(theta, ...)
cat("\n")
}
if (n.order > 0) {
cat(paste0(x$control$name.order, ":"), "\n")
orders <- coefs[(n.theta + 1):(n.theta + n.order)]
if (n.order == m) {
names(orders) <- labels
}
if (n.order == 1) {
names(orders) <- NULL
}
print(orders, ...)
cat("\n")
}else{
orders <- NULL
}
if (n.intercepts > 0) {
cat("Intercepts:", "\n")
intercepts <- coefs[(n.theta + n.order + 1):(n.theta +
n.order + n.intercepts)]
names(intercepts) <- labels
print(intercepts, ...)
cat("\n")
}
if (p.X > 0) {
cat("Object-specific effects for subject-specific covariate(s):",
"\n")
gamma.X <- matrix(coefs[(n.theta + n.order + n.intercepts +
1):(n.theta + n.order + n.intercepts + p.X * m)],
nrow = p.X, byrow = TRUE)
if (rescale) {
gamma.X <- t(t(gamma.X)/rep(x$design$sd.X, each = m))
}
colnames(gamma.X) <- labels
rownames(gamma.X) <- vars.X
print(gamma.X, ...)
cat("\n")
}
if (p.Z1 > 0) {
cat("Object-specific effects for subject-object-specific covariate(s):",
"\n")
gamma.Z1 <- matrix(coefs[(n.theta + n.order + n.intercepts +
p.X * m + 1):(n.theta + n.order + n.intercepts +
p.X * m + p.Z1 * m)], nrow = p.Z1, byrow = TRUE)
if (rescale) {
gamma.Z1 <- t(t(gamma.Z1)/rep(x$design$sd.Z1, each = m))
}
colnames(gamma.Z1) <- labels
rownames(gamma.Z1) <- vars.Z1
print(gamma.Z1, ...)
cat("\n")
}
if (p.Z2 > 0) {
cat("Global effects for (subject-)object-specific covariate(s):",
"\n")
gamma.Z2 <- coefs[(n.theta + n.order + n.intercepts +
p.X * m + p.Z1 * m + 1):(n.theta + n.order + n.intercepts +
p.X * m + p.Z1 * m + p.Z2)]
if (rescale) {
gamma.Z2 <- t(t(gamma.Z2)/x$design$sd.Z2)
}
names(gamma.Z2) <- vars.Z2
print(gamma.Z2, ...)
cat("\n")
}
cat("---", "\n")
cat("\n")
cat("Optimal lambda:", x$lambda[which.min(x$criterion)],
"\n")
cat("\n")
cat("Log likelihood:", x$logLik[which.min(x$criterion)],
"\n")
coef.opt <- list(theta = theta, intercepts = intercepts,
order.effects = orders, gamma.X = gamma.X, gamma.Z1 = gamma.Z1,
gamma.Z2 = gamma.Z2)
invisible(coef.opt)
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/print.cv.BTLLasso.R
|
#' Create response object for BTLLasso
#'
#' Create a response object for \code{BTLLasso} and \code{cv.BTLLasso}
#'
#'
#' @param response Vector containing results (binary or ordinal) of single paired
#' comparisons. Alternatively, also a \code{\link[psychotools]{paircomp}} object as defined
#' in the package \code{psychotools} could be used. In this case, none of the further
#' arguments are needed.
#' @param first.object Vector (character or factor, same length as response) indicating the first
#' object of the respective paired comparison from response.
#' @param second.object Vector (character or factor, same length as response) indicating the second
#' object of the respective paired comparison from response.
#' @param subject Vector (character, same length as response) indicating the subject that
#' generated the respective paired comparison from response.
#' @param with.order Boolean vector containing indicators for each paired comparison if an order effect was
#' present. By default, an order effect is assumed for each comparison. This option is relevant whenever
#' only some of the paired comparisons had an order effect and others did not, for example if some matches are
#' played on neutral ground. This option is only effective if either \code{order.effect = TRUE} or \code{object.order.effect = TRUE}.
#' @return Object of class \code{response.BTLLasso}
#' @author Gunther Schauberger\cr \email{gunther.schauberger@@tum.de}
#' @seealso \code{\link{BTLLasso}}, \code{\link{cv.BTLLasso}}
#' @references Schauberger, Gunther and Tutz, Gerhard (2019): BTLLasso - A Common Framework and Software
#' Package for the Inclusion and Selection of Covariates in Bradley-Terry Models, \emph{Journal of
#' Statistical Software}, 88(9), 1-29, \doi{10.18637/jss.v088.i09}
#'
#' Schauberger, Gunther and Tutz, Gerhard (2017): Subject-specific modelling
#' of paired comparison data: A lasso-type penalty approach, \emph{Statistical Modelling},
#' 17(3), 223 - 243
#'
#' Schauberger, Gunther, Groll Andreas and Tutz, Gerhard (2018):
#' Analysis of the importance of on-field covariates in the German Bundesliga,
#' \emph{Journal of Applied Statistics}, 45(9), 1561 - 1578
#' @examples
#'
#' \dontrun{
#' ##############################
#' ##### Example how response object for Bundesliga data Buli1516 was created
#' ##############################
#'
#' data(BuliResponse)
#'
#' Y.Buli <- response.BTLLasso(response = BuliResponse$Result,
#' first.object = BuliResponse$TeamHome,
#' second.object = BuliResponse$TeamAway,
#' subject = BuliResponse$Matchday)
#'
#'
#' ##############################
#' ##### Example to create response object from paircomp object
#' ##############################
#' data("Topmodel2007", package = "psychotree")
#'
#' Y.models <- response.BTLLasso(Topmodel2007$preference)
#' X.models <- scale(model.matrix(preference~., data = Topmodel2007)[,-1])
#' rownames(X.models) <- paste0("Subject",1:nrow(X.models))
#' colnames(X.models) <- c("Gender","Age","KnowShow","WatchShow","WatchFinal")
#'
#' set.seed(5)
#' m.models <- cv.BTLLasso(Y = Y.models, X = X.models)
#' }
response.BTLLasso <- function(response, first.object = NULL, second.object = NULL,
subject = NULL, with.order = rep(TRUE, length(response))) {
if(inherits(response, "paircomp")){
response <- as.matrix(response)
model_names <- str_split(colnames(response),pattern=":")
model_names <- matrix(unlist(model_names),nrow=2)
subject <- paste0("Subject",rep(1:nrow(response),ncol(response)))
first.object <- rep(model_names[1,],each=nrow(response))
second.object <- rep(model_names[2,],each=nrow(response))
}
withS <- FALSE
if (!is.null(subject)) {
withS <- TRUE
if (!is.character(subject))
stop("Argument subject has to be a character vector")
}
if (!withS) {
subject <- 1:length(response)
}
ly <- length(response)
lo1 <- length(first.object)
lo2 <- length(second.object)
ls <- length(subject)
lorder <- length(with.order)
if (!all(sapply(list(lo1, lo2, ls,lorder), identical, ly)))
stop("The arguments response, first.object, second.object and (if specified) subject and with.order
have to be of the same length")
all.objects <- as.factor(as.character(unlist(list(first.object,
second.object))))
object.names <- levels(all.objects)
first.object <- as.numeric(all.objects[1:ly])
second.object <- as.numeric(all.objects[(ly + 1):(2 * ly)])
m <- length(object.names)
## make response ordered
response <- as.ordered(response)
# number of response categories
q <- length(levels(response)) - 1
k <- q + 1
## everything about the subjects
subject.names <- levels(as.factor(subject))
n <- length(subject.names)
RET <- list(response = response, first.object = first.object,
second.object = second.object, subject = subject, withS = withS,
subject.names = subject.names, object.names = object.names,
n = n, m = m, k = k, q = q, with.order = with.order)
class(RET) <- "responseBTLLasso"
RET
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/response.BTLLasso.R
|
spread.labs <- function(x, mindiff, maxiter=1000, stepsize=1/10,
min=-Inf, max=Inf) {
unsort <- order(order(x))
x <- sort(x)
df <- x[-1] - x[ -length(x) ]
stp <- mindiff * stepsize
i <- 1
while( any( df < mindiff ) ) {
tmp <- c( df < mindiff, FALSE )
if( tmp[1] && (x[1] - stp) < min ) { # don't move bottom set
tmp2 <- as.logical( cumprod(tmp) )
tmp <- tmp & !tmp2
}
x[ tmp ] <- x[ tmp ] - stp
tmp <- c( FALSE, df < mindiff )
if( tmp[length(tmp)] && (x[length(x)] + stp) > max ) { # don't move top
tmp2 <- rev( as.logical( cumprod( rev(tmp) ) ) )
tmp <- tmp & !tmp2
}
x[ tmp ] <- x[ tmp] + stp
df <- x[-1] - x[-length(x)]
i <- i + 1
if( i > maxiter ) {
warning("Maximum iterations reached")
break
}
}
x[unsort]
}
|
/scratch/gouwar.j/cran-all/cranData/BTLLasso/R/spread.labs.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
btm <- function(biterms, x, K, W, alpha, beta, iter, win = 15L, background = FALSE, trace = 0L) {
.Call('_BTM_btm', PACKAGE = 'BTM', biterms, x, K, W, alpha, beta, iter, win, background, trace)
}
btm_infer <- function(model, x, type) {
.Call('_BTM_btm_infer', PACKAGE = 'BTM', model, x, type)
}
btm_biterms <- function(btm_model) {
.Call('_BTM_btm_biterms', PACKAGE = 'BTM', btm_model)
}
btm_biterms_text <- function(x, W, win = 15L) {
.Call('_BTM_btm_biterms_text', PACKAGE = 'BTM', x, W, win)
}
|
/scratch/gouwar.j/cran-all/cranData/BTM/R/RcppExports.R
|
#' @title Construct a Biterm Topic Model on Short Text
#' @description
#' The Biterm Topic Model (BTM) is a word co-occurrence based topic model that learns topics by modeling word-word co-occurrences patterns (e.g., biterms)
#'
#' \itemize{
#' \item A biterm consists of two words co-occurring in the same context, for example, in the same short text window.
#' \item BTM models the biterm occurrences in a corpus (unlike LDA models which model the word occurrences in a document).
#' \item It's a generative model. In the generation procedure, a biterm is generated by drawing two words independently from a same topic z.
#' In other words, the distribution of a biterm \eqn{b=(wi,wj)} is defined as: \eqn{P(b) = \sum_k{P(wi|z)*P(wj|z)*P(z)}}
#' where k is the number of topics you want to extract.
#' \item Estimation of the topic model is done with the Gibbs sampling algorithm. Where estimates are provided for \eqn{P(w|k)=phi} and \eqn{P(z)=theta}.
#' }
#' @references Xiaohui Yan, Jiafeng Guo, Yanyan Lan, Xueqi Cheng. A Biterm Topic Model For Short Text. WWW2013,
#' \url{https://github.com/xiaohuiyan/BTM}, \url{https://github.com/xiaohuiyan/xiaohuiyan.github.io/blob/master/paper/BTM-WWW13.pdf}
#' @param data a tokenised data frame containing one row per token with 2 columns
#' \itemize{
#' \item the first column is a context identifier (e.g. a tweet id, a document id, a sentence id, an identifier of a survey answer, an identifier of a part of a text)
#' \item the second column is a column called of type character containing the sequence of words occurring within the context identifier
#' }
#' @param k integer with the number of topics to identify
#' @param alpha numeric, indicating the symmetric dirichlet prior probability of a topic P(z). Defaults to 50/k.
#' @param beta numeric, indicating the symmetric dirichlet prior probability of a word given the topic P(w|z). Defaults to 0.01.
#' @param iter integer with the number of iterations of Gibbs sampling
#' @param window integer with the window size for biterm extraction. Defaults to 15.
#' @param background logical if set to \code{TRUE}, the first topic is set to a background topic that
#' equals to the empirical word distribution. This can be used to filter out common words. Defaults to FALSE.
#' @param trace logical indicating to print out evolution of the Gibbs sampling iterations. Defaults to FALSE.
#' @param biterms optionally, your own set of biterms to use for modelling.\cr
#' This argument should be a data.frame with column names doc_id, term1, term2 and cooc, indicating how many times each biterm (as indicated by terms term1 and term2)
#' is occurring within a certain doc_id. The field cooc indicates how many times this biterm happens with the doc_id. \cr
#' Note that doc_id's which are not in \code{data} are not allowed, as well as terms (in term1 and term2) which are not also in \code{data}.
#' See the examples.\cr
#' If provided, the \code{window} argument is ignored and the \code{data} argument will only be used to calculate the background word frequency distribution.
#' @param detailed logical indicating to return detailed output containing as well the vocabulary and the biterms used to construct the model. Defaults to FALSE.
#' @note
#' A biterm is defined as a pair of words co-occurring in the same text window.
#' If you have as an example a document with sequence of words \code{'A B C B'}, and assuming the window size is set to 3,
#' that implies there are two text windows which can generate biterms namely
#' text window \code{'A B C'} with biterms \code{'A B', 'B C', 'A C'} and text window \code{'B C B'} with biterms \code{'B C', 'C B', 'B B'}
#' A biterm is an unorder word pair where \code{'B C' = 'C B'}. Thus, the document \code{'A B C B'} will have the following biterm frequencies: \cr
#' \itemize{
#' \item 'A B': 1
#' \item 'B C': 3
#' \item 'A C': 1
#' \item 'B B': 1
#' }
#' These biterms are used to create the model.
#' @return an object of class BTM which is a list containing
#' \itemize{
#' \item{model: a pointer to the C++ BTM model}
#' \item{K: the number of topics}
#' \item{W: the number of tokens in the data}
#' \item{alpha: the symmetric dirichlet prior probability of a topic P(z)}
#' \item{beta: the symmetric dirichlet prior probability of a word given the topic P(w|z)}
#' \item{iter: the number of iterations of Gibbs sampling}
#' \item{background: indicator if the first topic is set to the background topic that equals the empirical word distribution.}
#' \item{theta: a vector with the topic probability p(z) which is determinated by the overall proportions of biterms in it}
#' \item{phi: a matrix of dimension W x K with one row for each token in the data. This matrix contains the probability of the token given the topic P(w|z).
#' the rownames of the matrix indicate the token w}
#' \item{vocab: a data.frame with columns token and freq indicating the frequency of occurrence of the tokens in \code{data}. Only provided in case argument \code{detailed} is set to \code{TRUE}}
#' \item{biterms: the result of a call to \code{terms} with type set to biterms, containing all the biterms used in the model. Only provided in case argument \code{detailed} is set to \code{TRUE}}
#' }
#' @export
#' @seealso \code{\link{predict.BTM}}, \code{\link{terms.BTM}}, \code{\link{logLik.BTM}}
#' @examples
#' \dontshow{if(require(udpipe) & require(data.table))\{}
#' library(udpipe)
#' data("brussels_reviews_anno", package = "udpipe")
#' x <- subset(brussels_reviews_anno, language == "nl")
#' x <- subset(x, xpos %in% c("NN", "NNP", "NNS"))
#' x <- x[, c("doc_id", "lemma")]
#' model <- BTM(x, k = 5, alpha = 1, beta = 0.01, iter = 10, trace = TRUE)
#' model
#' terms(model)
#' scores <- predict(model, newdata = x)
#'
#' ## Another small run with first topic the background word distribution
#' set.seed(123456)
#' model <- BTM(x, k = 5, beta = 0.01, iter = 10, background = TRUE)
#' model
#' terms(model)
#'
#' ##
#' ## You can also provide your own set of biterms to cluster upon
#' ## Example: cluster nouns and adjectives in the neighbourhood of one another
#' ##
#' library(data.table)
#' library(udpipe)
#' x <- subset(brussels_reviews_anno, language == "nl")
#' x <- head(x, 5500) # take a sample to speed things up on CRAN
#' biterms <- as.data.table(x)
#' biterms <- biterms[, cooccurrence(x = lemma,
#' relevant = xpos %in% c("NN", "NNP", "NNS", "JJ"),
#' skipgram = 2),
#' by = list(doc_id)]
#' head(biterms)
#' set.seed(123456)
#' x <- subset(x, xpos %in% c("NN", "NNP", "NNS", "JJ"))
#' x <- x[, c("doc_id", "lemma")]
#' model <- BTM(x, k = 5, beta = 0.01, iter = 10, background = TRUE,
#' biterms = biterms, trace = 10, detailed = TRUE)
#' model
#' terms(model)
#' bitermset <- terms(model, "biterms")
#' head(bitermset$biterms, 100)
#'
#' bitermset$n
#' sum(biterms$cooc)
#'
#' \dontshow{\} # End of main if statement running only if the required packages are installed}
#' \dontrun{
#' ##
#' ## Visualisation either using the textplot or the LDAvis package
#' ##
#' library(textplot)
#' library(ggraph)
#' library(concaveman)
#' plot(model, top_n = 4)
#'
#' library(LDAvis)
#' docsize <- table(x$doc_id)
#' scores <- predict(model, x)
#' scores <- scores[names(docsize), ]
#' json <- createJSON(
#' phi = t(model$phi),
#' theta = scores,
#' doc.length = as.integer(docsize),
#' vocab = model$vocabulary$token,
#' term.frequency = model$vocabulary$freq)
#' serVis(json)
#' }
BTM <- function(data, k = 5, alpha = 50/k, beta = 0.01, iter = 1000, window = 15, background = FALSE, trace = FALSE,
biterms, detailed = FALSE){
trace <- as.integer(trace)
background <- as.integer(as.logical(background))
stopifnot(k >= 1)
stopifnot(iter >= 1)
stopifnot(window >= 1)
iter <- as.integer(iter)
window <- as.integer(window)
stopifnot(inherits(data, "data.frame"))
if(ncol(data) == 2){
data <- data.frame(doc_id = data[[1]], token = data[[2]], stringsAsFactors = FALSE)
}else{
if(!all(c("doc_id", "token") %in% colnames(data))){
stop("please provide in data a data.frame with 2 columns as indicated in the help of BTM")
}
}
data <- data[!is.na(data$doc_id) & !is.na(data$token), ]
## Convert tokens to integer numbers which need to be pasted into a string separated by spaces
data$word <- factor(data$token)
if(detailed){
freq <- table(data$word)
freq <- as.data.frame(freq, responseName = "freq", stringsAsFactors = FALSE)
vocabulary <- data.frame(id = seq_along(levels(data$word)) - 1L,
token = levels(data$word),
freq = freq$freq[match(levels(data$word), freq$Var1)],
stringsAsFactors = FALSE)
}else{
vocabulary <- data.frame(id = seq_along(levels(data$word)) - 1L,
token = levels(data$word),
stringsAsFactors = FALSE)
}
data$word <- as.integer(data$word) - 1L
voc <- max(data$word) + 1
context <- split(data$word, data$doc_id)
context <- sapply(context, FUN=function(x) paste(x, collapse = " "))
## Handle manual set of biterms provided by user
if(missing(biterms)){
biterms <- data.frame(doc_id = character(), term1 = integer(), term2 = integer(), cooc = integer(), stringsAsFactors = FALSE)
biterms <- split(biterms, biterms$doc_id)
}else{
stopifnot(is.data.frame(biterms))
if(anyNA(biterms)){
stop("make sure there are no missing data in biterms")
}
if(!all(c("doc_id", "term1", "term2") %in% colnames(biterms))){
stop("please provide in biterms a data.frame with at least 3 columns: doc_id, term1, term2, cooc - see the example in the help of BTM")
}
if(!all("cooc" %in% colnames(biterms))){
biterms$cooc <- 1L
}else{
biterms$cooc <- as.integer(biterms$cooc)
}
recode <- function(x, from, to){
to[match(x, from)]
}
biterms$term1 <- recode(biterms$term1, from = vocabulary$token, to = vocabulary$id)
biterms$term2 <- recode(biterms$term2, from = vocabulary$token, to = vocabulary$id)
if(anyNA(biterms$term1) || anyNA(biterms$term2)){
stop("all terms in biterms should at least be available in data as well")
}
if(!all(biterms$doc_id %in% names(context))){
stop("all doc_id's of the biterms should at least be available data as well")
}
biterms <- split(biterms, factor(biterms$doc_id, levels = names(context)), drop = FALSE)
biterms <- lapply(biterms, FUN=function(x) as.list(x))
}
## build the model
model <- btm(biterms = biterms, x = context, K = k, W = voc, alpha = alpha, beta = beta, iter = iter, win = window, background = background, trace = as.integer(trace))
## make sure integer numbers are back tokens again
rownames(model$phi) <- vocabulary$token
## also include vocabulary
class(model) <- "BTM"
if(detailed){
model$vocabulary <- vocabulary[c("token", "freq")]
model$biterms <- terms.BTM(model, type = "biterms")
}
model
}
#' @export
print.BTM <- function(x, ...){
cat("Biterm Topic Model", sep = "\n")
cat(sprintf(" trained with %s Gibbs iterations, alpha: %s, beta: %s", x$iter, x$alpha, x$beta), sep = "\n")
cat(sprintf(" topics: %s", x$K), sep = "\n")
cat(sprintf(" size of the token vocabulary: %s", x$W), sep = "\n")
cat(sprintf(" topic distribution theta: %s", paste(round(x$theta, 3), collapse = " ")), sep = "\n")
}
#' @title Predict function for a Biterm Topic Model
#' @description Classify new text alongside the biterm topic model.\cr
#'
#' To infer the topics in a document, it is assumed that the topic proportions of a document
#' is driven by the expectation of the topic proportions of biterms generated from the document.
#' @param object an object of class BTM as returned by \code{\link{BTM}}
#' @param newdata a tokenised data frame containing one row per token with 2 columns
#' \itemize{
#' \item the first column is a context identifier (e.g. a tweet id, a document id, a sentence id, an identifier of a survey answer, an identifier of a part of a text)
#' \item the second column is a column called of type character containing the sequence of words occurring within the context identifier
#' }
#' @param type character string with the type of prediction.
#' Either one of 'sum_b', 'sub_w' or 'mix'. Default is set to 'sum_b' as indicated in the paper,
#' indicating to sum over the the expectation of the topic proportions of biterms generated from the document. For the other approaches, please inspect the paper.
#' @param ... not used
#' @references Xiaohui Yan, Jiafeng Guo, Yanyan Lan, Xueqi Cheng. A Biterm Topic Model For Short Text. WWW2013,
#' \url{https://github.com/xiaohuiyan/BTM}, \url{https://github.com/xiaohuiyan/xiaohuiyan.github.io/blob/master/paper/BTM-WWW13.pdf}
#' @seealso \code{\link{BTM}}, \code{\link{terms.BTM}}, \code{\link{logLik.BTM}}
#' @return a matrix containing containing P(z|d) - the probability of the topic given the biterms.\cr
#' The matrix has one row for each unique doc_id (context identifier)
#' which contains words part of the dictionary of the BTM model and has K columns,
#' one for each topic.
#' @export
#' @examples
#' \dontshow{if(require(udpipe))\{}
#' library(udpipe)
#' data("brussels_reviews_anno", package = "udpipe")
#' x <- subset(brussels_reviews_anno, language == "nl")
#' x <- subset(x, xpos %in% c("NN", "NNP", "NNS"))
#' x <- x[, c("doc_id", "lemma")]
#' model <- BTM(x, k = 5, iter = 5, trace = TRUE)
#' scores <- predict(model, newdata = x, type = "sum_b")
#' scores <- predict(model, newdata = x, type = "sub_w")
#' scores <- predict(model, newdata = x, type = "mix")
#' head(scores)
#' \dontshow{\} # End of main if statement running only if the required packages are installed}
predict.BTM <- function(object, newdata, type = c("sum_b", "sub_w", "mix"), ...){
type <- match.arg(type)
stopifnot(inherits(newdata, "data.frame"))
if(ncol(newdata) == 2){
newdata <- data.frame(doc_id = newdata[[1]], token = newdata[[2]], stringsAsFactors = FALSE)
}else{
if(!all(c("doc_id", "token") %in% colnames(newdata))){
stop("please provide in newdata a data.frame with 2 columns as indicated in the help of BTM")
}
}
newdata <- newdata[newdata$token %in% rownames(object$phi), ]
from <- rownames(object$phi)
to <- seq_along(rownames(object$phi))-1L
newdata$word <- to[match(newdata$token, from)]
context <- split(newdata$word, newdata$doc_id)
context <- sapply(context, FUN=function(x) paste(x, collapse = " "))
scores <- btm_infer(object, context, type)
rownames(scores) <- names(context)
scores
}
#' @title Get highest token probabilities for each topic or get biterms used in the model
#' @description Get highest token probabilities for each topic or get biterms used in the model
#' @param x an object of class BTM as returned by \code{\link{BTM}}
#' @param type a character string, either 'tokens' or 'biterms'. Defaults to 'tokens'.
#' @param threshold threshold in 0-1 range. Only the terms which are more likely than the threshold are returned for each topic. Only used in case type = 'tokens'.
#' @param top_n integer indicating to return the top n tokens for each topic only. Only used in case type = 'tokens'.
#' @param ... not used
#' @return
#' Depending if type is set to 'tokens' or 'biterms' the following is returned:
#' \itemize{
#' \item{If \code{type='tokens'}: }{Get the probability of the token given the topic P(w|z).
#' It returns a list of data.frames (one for each topic) where each data.frame contains columns token and probability ordered from high to low.
#' The list is the same length as the number of topics.}
#' \item{If \code{type='biterms'}: }{a list containing 2 elements:
#' \itemize{
#' \item \code{n} which indicates the number of biterms used to train the model
#' \item \code{biterms} which is a data.frame with columns term1, term2 and topic,
#' indicating for all biterms found in the data the topic to which the biterm is assigned to
#' }
#' Note that a biterm is unordered, in the output of \code{type='biterms'} term1 is always smaller than or equal to term2.}
#' }
#' @export
#' @seealso \code{\link{BTM}}, \code{\link{predict.BTM}}, \code{\link{logLik.BTM}}
#' @examples
#' \dontshow{if(require(udpipe))\{}
#' library(udpipe)
#' data("brussels_reviews_anno", package = "udpipe")
#' x <- subset(brussels_reviews_anno, language == "nl")
#' x <- subset(x, xpos %in% c("NN", "NNP", "NNS"))
#' x <- x[, c("doc_id", "lemma")]
#' model <- BTM(x, k = 5, iter = 5, trace = TRUE)
#' terms(model)
#' terms(model, top_n = 10)
#' terms(model, threshold = 0.01, top_n = +Inf)
#' bi <- terms(model, type = "biterms")
#' str(bi)
#' \dontshow{\} # End of main if statement running only if the required packages are installed}
terms.BTM <- function(x, type = c("tokens", "biterms"), threshold = 0, top_n = 5, ...){
type <- match.arg(type)
if(type %in% "biterms"){
from <- seq_along(rownames(x$phi))
to <- rownames(x$phi)
bit <- btm_biterms(x$model)
bit$biterms$term1 <- to[match(bit$biterms$term1, from)]
bit$biterms$term2 <- to[match(bit$biterms$term2, from)]
bit$biterms <- data.frame(term1 = bit$biterms$term1,
term2 = bit$biterms$term2,
topic = bit$biterms$topic, stringsAsFactors = FALSE)
bit <- bit[c("n", "biterms")]
bit
}else if(type == "tokens"){
apply(x$phi, MARGIN=2, FUN=function(x){
x <- data.frame(token = names(x), probability = x)
x <- x[x$probability >= threshold, ]
x <- x[order(x$probability, decreasing = TRUE), ]
rownames(x) <- NULL
head(x, top_n)
})
}
}
#' @title Get the set of Biterms from a tokenised data frame
#' @description
#' This extracts words occurring in the neighbourhood of one another, within a certain window range.
#' The default setting provides the biterms used when fitting \code{\link{BTM}} with the default window parameter.
#' @param x a tokenised data frame containing one row per token with 2 columns
#' \itemize{
#' \item the first column is a context identifier (e.g. a tweet id, a document id, a sentence id, an identifier of a survey answer, an identifier of a part of a text)
#' \item the second column is a column called of type character containing the sequence of words occurring within the context identifier
#' }
#' @param type a character string, either 'tokens' or 'biterms'. Defaults to 'tokens'.
#' @param window integer with the window size for biterm extraction. Defaults to 15.
#' @param ... not used
#' @return
#' Depending if type is set to 'tokens' or 'biterms' the following is returned:
#' \itemize{
#' \item{If \code{type='tokens'}: }{a list containing 2 elements:
#' \itemize{
#' \item \code{n} which indicates the number of tokens
#' \item \code{tokens} which is a data.frame with columns id, token and freq,
#' indicating for all tokens found in the data the frequency of occurrence
#' }
#' }
#' \item{If \code{type='biterms'}: }{a list containing 2 elements:
#' \itemize{
#' \item \code{n} which indicates the number of biterms used to train the model
#' \item \code{biterms} which is a data.frame with columns term1 and term2,
#' indicating all biterms found in the data. The same biterm combination can occur several times.
#' }
#' Note that a biterm is unordered, in the output of \code{type='biterms'} term1 is always smaller than or equal to term2.}
#' }
#' @note If \code{x} is a data.frame which has an attribute called 'terms', it just returns that \code{'terms'} attribute
#' @export
#' @seealso \code{\link{BTM}}, \code{\link{predict.BTM}}, \code{\link{logLik.BTM}}
#' @examples
#' \dontshow{if(require(udpipe))\{}
#' library(udpipe)
#' data("brussels_reviews_anno", package = "udpipe")
#' x <- subset(brussels_reviews_anno, language == "nl")
#' x <- subset(x, xpos %in% c("NN", "NNP", "NNS"))
#' x <- x[, c("doc_id", "lemma")]
#' biterms <- terms(x, window = 15, type = "biterms")
#' str(biterms)
#' tokens <- terms(x, type = "tokens")
#' str(tokens)
#' \dontshow{\} # End of main if statement running only if the required packages are installed}
terms.data.frame <- function(x, type = c("tokens", "biterms"), window = 15, ...){
v <- attr(x, "terms")
if(!is.null(v)){
return(v)
}
type <- match.arg(type)
stopifnot(window >= 1)
window <- as.integer(window)
data <- x
stopifnot(inherits(data, "data.frame"))
if(ncol(data) == 2){
data <- data.frame(doc_id = data[[1]], token = data[[2]], stringsAsFactors = FALSE)
}else{
if(!all(c("doc_id", "token") %in% colnames(data))){
stop("please provide in data a data.frame with 2 columns as indicated in the help of BTM")
}
}
data <- data[!is.na(data$doc_id) & !is.na(data$token), ]
## Convert tokens to integer numbers which need to be pasted into a string separated by spaces
data$word <- factor(data$token)
freq <- table(data$word)
freq <- as.data.frame(freq, responseName = "freq", stringsAsFactors = FALSE)
vocabulary <- data.frame(id = seq_along(levels(data$word)) - 1L,
token = levels(data$word),
freq = freq$freq[match(levels(data$word), freq$Var1)],
stringsAsFactors = FALSE)
if(type == "tokens"){
return(list(n = nrow(vocabulary), tokens = vocabulary))
}
data$word <- as.integer(data$word) - 1L
voc <- max(data$word) + 1
context <- split(data$word, data$doc_id)
context <- sapply(context, FUN=function(x) paste(x, collapse = " "))
from <- vocabulary$id + 1L
to <- vocabulary$token
bit <- btm_biterms_text(x = context, W = voc, win = window)
bit$biterms$term1 <- to[match(bit$biterms$term1, from)]
bit$biterms$term2 <- to[match(bit$biterms$term2, from)]
bit$biterms <- data.frame(term1 = bit$biterms$term1,
term2 = bit$biterms$term2,
stringsAsFactors = FALSE)
bit <- bit[c("n", "biterms")]
bit
}
#' @title Get the likelihood of biterms in a BTM model
#' @description Get the likelihood how good biterms are fit by the BTM model
#' @param object an object of class BTM as returned by \code{\link{BTM}}
#' @param data a data.frame with 2 columns term1 and term2 containing biterms. Defaults to the
#' biterms used to construct the model.
#' @param ... other arguments not used
#' @seealso \code{\link{BTM}}, \code{\link{predict.BTM}}, \code{\link{terms.BTM}}
#' @return a list with elements
#' \itemize{
#' \item likelihood: a vector with the same number of rows as \code{data} containing the likelihood
#' of the biterms alongside the BTM model. Calculated as \code{sum(phi[term1, ] * phi[term2, ] * theta)}.
#' \item \code{ll} the sum of the log of the biterm likelihoods
#' }
#' @export
#' @examples
#' \dontshow{if(require(udpipe))\{}
#' library(udpipe)
#' data("brussels_reviews_anno", package = "udpipe")
#' x <- subset(brussels_reviews_anno, language == "nl")
#' x <- subset(x, xpos %in% c("NN", "NNP", "NNS"))
#' x <- x[, c("doc_id", "lemma")]
#'
#' model <- BTM(x, k = 5, iter = 5, trace = TRUE, detailed = TRUE)
#' fit <- logLik(model)
#' fit$ll
#' \dontshow{\} # End of main if statement running only if the required packages are installed}
logLik.BTM <- function(object, data = terms.BTM(object, type = 'biterms')$biterms, ...){
stopifnot(inherits(data, "data.frame"))
stopifnot(all(c(data[[1]], data[[2]]) %in% rownames(object$phi)))
lik <- mapply(w1 = data[[1]],
w2 = data[[2]],
FUN = function(w1, w2){
sum(object$phi[w1, ] * object$phi[w2, ] * object$theta)
})
list(likelihood = lik, ll = sum(log(lik)))
}
|
/scratch/gouwar.j/cran-all/cranData/BTM/R/btm.R
|
#' @importFrom Rcpp evalCpp
#' @importFrom utils head
#' @useDynLib BTM
NULL
|
/scratch/gouwar.j/cran-all/cranData/BTM/R/pkg.R
|
#' @rdname PredictivePosterior.TSPDE
#' @import ggplot2 gridExtra
# 2020-12-15 CJS fixed problem where arrangeGrob() cannot be used in parallel. Switched to faceting.
# 2020-12-15 CJS Fixed problem where missing discrepancies will cause plots to fail
# 2015-06-10 CJS converted to ggplot()
# 2014-09-01 CJS change any Inf in desrep to NA
# 2012-01-22 CJS made X/Y axis limits the same so that p-value prints properly
# 2011-06-13 CJS returned bayesian p-values
PredictivePosteriorPlot.TSPDE <- function( discrep ) {
# Given the discrepancy measures, creates a set of panel plots.
# It is assumed that the bp has 12 columns
# (1-2) o,s Freeman-Tukey measures for m2
# (3-4) o,s Deviance for m2
# (5-6) o,s Freeman-Tukey measures for u2
# (7-8) o,s Deviance for u2
# 9-10 o,s Freeman-Tukey for m2+u2
# 11-12 o,s Deviance for m2+u2
# Change any Inf to NA
temp <- is.infinite(discrep) & !is.na(discrep)
if(sum(temp, na.rm=TRUE)>0){cat(sum(temp, na.rm=TRUE), " infinite discrepancy measures set to NA\n")}
discrep[ temp ] <- NA
discrep.long <- data.table::melt( data.table::as.data.table(discrep),
measure.vars=list(seq(1,ncol(discrep),2), seq(2,ncol(discrep),2)),
value.name=c("Observed","Simulated"),
variable.name="Statistic",
variable.factor=FALSE)
titles <- data.frame(Statistic=as.character(1:6), Title=c(
"Freeman-Tukey for m2",
"Deviance for m2",
"Freeman-Tukey for u2",
"Deviance for u2",
"Total Freeman-Tukey",
"Total Deviance"), stringsAsFactors=FALSE)
discrep.long <- merge(discrep.long, titles)
# compute the bayesian p-values
p_values <-plyr::ddply(discrep.long, c("Statistic","Title"), function(x){
p.value=mean(x$Observed < x$Simulated, na.rm=TRUE)
data.frame(p.value=p.value)
})
p_values$label = paste("Bayesian GOF P:",formatC(p_values$p.value, digits=2, format="f"))
gof.plot <-ggplot(data=discrep.long, aes_(x=~Simulated, y=~Observed))+
geom_point()+
geom_abline(intercept=0, slope=1)+
geom_text(data=p_values, x=Inf,y=-Inf, hjust=1.05, vjust=-0.2, label=p_values$label)+
facet_wrap(~Title, ncol=2, nrow=3, scales="free")
gof <- list(bp.plot=gof.plot, bp.values=data.frame(test.names=titles, p.value=p_values, stringsAsFactors=FALSE))
gof
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/PredictivePosteriorPlot_TSPDE.R
|
# 2020-12-15 CJS Fixed problem with u2==NA in the plots
# 2015-06-10 CJS Updated to produce ggplot2 plots
# 2014-09-21 CJS change all Inf to NA
# 2012-01-22 CJS made X/Y axis limits the same so p-value prints properly
# 2011-06-13 CJS returned bayesian p-values
PredictivePosteriorPlot.TSPDE.WHCH <- function( discrep ) {
# Given the discrepancy measures, creates a set of panel plots.
# It is assumed that the discrep matrix has the following columns
# (1-2) o,s Freeman-Tukey measures for m2
# (3-4) o,s Freeman-Tukey measures for u2.A
# (5-6) o,s Freeman-Tukey measures for u2.N
# (7-8) o,s Freeman-Tukey for m2+u2.A+u2.N
# Change any Inf to NA
temp <- is.infinite(discrep) & !is.na(discrep)
if(sum(temp, na.rm=TRUE)>0){cat(sum(temp, na.rm=TRUE), " infinite discrepancy measures set to NA\n")}
discrep[ temp ] <- NA
#browser()
discrep.long <- data.table::melt( data.table::as.data.table(discrep),
measure.vars=list(seq(1,ncol(discrep),2), seq(2,ncol(discrep),2)),
value.name=c("Observed","Simulated"),
variable.name="Statistic",
variable.factor=FALSE)
titles <- data.frame(Statistic=as.character(1:(ncol(discrep/2))), Title=c(
"Freeman-Tukey for m2",
"Freeman-Tukey for u2.A",
"Freeman-Tukey for u2.N",
"Total Freeman-Tukey"), stringsAsFactors=FALSE)
discrep.long <- merge(discrep.long, titles)
# compute the bayesian p-values
p_values <-plyr::ddply(discrep.long, c("Statistic","Title"), function(x){
p.value=mean(x$Observed < x$Simulated, na.rm=TRUE)
data.frame(p.value=p.value)
})
p_values$label = paste("Bayesian GOF P:",formatC(p_values$p.value, digits=2, format="f"))
gof.plot <-ggplot(data=discrep.long, aes_(x=~Simulated, y=~Observed))+
geom_point()+
geom_abline(intercept=0, slope=1)+
geom_text(data=p_values, x=Inf,y=-Inf, hjust=1.05, vjust=-0.2, label=p_values$label)+
facet_wrap(~Title, ncol=2, nrow=3, scales="free")
gof <- list(bp.plot=gof.plot, bp.values=data.frame(test.names=titles, p.value=p_values, stringsAsFactors=FALSE))
gof
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/PredictivePosteriorPlot_TSPDE_WHChinook.R
|
#' @rdname PredictivePosterior.TSPDE
#' @import ggplot2 ggforce plyr
#' @importFrom data.table setDT
# We need to use the importFrom here to get the data.table package in the namespace but to avoid warning messages
# from R CMD check about melt() and dcast() being overwritten from the reshape2 package.
# 2020-12-15 CJS Fixed problem with u2==NA causing plots to fail
# 2018-12-03 CJS convert to using facets rather that marrangeGrob as the latter had problems
# 2015-06-10 CJS Converted to ggplot()
# 2014-09-01 CJS Change Inf to NA
# 2012-01-22 CJS Made X/Y axis limits the same so that Bayesian p-value prints properly
# 2011-06-13 CJS returned p-values
# 2010-03-29 CJS First creation of routine
PredictivePosteriorPlot.TSPDE.WHCH2 <- function( discrep, ncol=2, nrow=2 ) {
# Given the discrepancy measures, creates a set of panel plots.
# It is assumed that the discrepancy measure has 16 columns for the Bayesian p-value plot
# ( 1- 2) o,s Freeman-Tukey measures for m2
# ( 3- 4) o,s Freeman-Tukey measures for u2.A.YoY
# ( 5- 6) o,s Freeman-Tukey measures for u2.N.YoY
# ( 7- 8) o,s Freeman-Tukey measures for u2.A.1
# ( 9-10) o,s Freeman-Tukey measures for u2.N.1
# (11-12) o,s Freeman-Tukey for u2.A.YoY+u2.N.YoY
# (13-14) o,s Freeman-Tukey for u2.A.1 +u2.N.1
# (15-16) o,s Freeman-Tukey for all data (m2, YoY and Age 1)`
# Change any Inf to NA
temp <- is.infinite(discrep) & !is.na(discrep)
if(sum(temp, na.rm=TRUE)>0){cat(sum(temp, na.rm=TRUE), " infinite discrepancy measures set to NA\n")}
discrep[ temp ] <- NA
# Convert from wide to long format and add labels
discrep.long <- data.table::melt( data.table::as.data.table(discrep),
measure.vars=list(seq(1,ncol(discrep),2), seq(2,ncol(discrep),2)),
value.name=c("Observed","Simulated"),
variable.name="Statistic",
variable.factor=FALSE)
titles <- data.frame(Statistic=as.character(1:8),
Title=c("Freeman-Tukey for m2",
"Freeman-Tukey for u2.A.YoY",
"Freeman-Tukey for u2.N.YoY",
"Freeman-Tukey for u2.A.1",
"Freeman-Tukey for u2.N.1",
"Freeman-Tukey for YoY",
"Freeman-Tukey for Age 1",
"Total Freeman-Tukey"), stringsAsFactors=FALSE)
discrep.long <- merge(discrep.long, titles)
# compute the bayesian p-values
p_values <-plyr::ddply(discrep.long, c("Statistic","Title"), function(x){
p.value=mean(x$Observed < x$Simulated, na.rm=TRUE)
data.frame(p.value)
})
p_values$label = paste("Bayesian GOF P:",formatC(p_values$p.value, digits=2, format="f"))
#browser()
gof <- plyr::llply(1:2, function (page){
ggplot(data=discrep.long, aes_(x=~Simulated, y=~Observed))+
geom_point()+
geom_abline(intercept=0, slope=1)+
geom_text(data=p_values, x=Inf,y=-Inf, hjust=1, vjust=0, label=p_values$label)+
ggforce::facet_wrap_paginate(~Title, ncol=ncol, nrow=nrow, page=page, scales="free")
})
gof
} # end of function
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/PredictivePosteriorPlot_TSPDE_WHChinook2.r
|
#' @rdname PredictivePosterior.TSPDE
# 2020-12-15 CJS Fixed problem with u2==NA causing plots to fail
# Removed arrangeGrob to organize plots
# 2015-06-10 CJS converted to ggplot()
# 2014-09-01 CJS Inf in discrep set to NA
# 2012-01-22 CJS made X/Y axis limits the same so p-value prints properly
# 2011-06-13 CJS returned bayesian p-values
PredictivePosteriorPlot.TSPDE.WHSteel <- function( discrep ) {
# Given the discrepancy measures, creates a set of panel plots.
# It is assumed that the bp has 12 columns
# (1-2) o,s Freeman-Tukey measures for m2
# (3-4) o,s Freeman-Tukey measures for u2.W.YoY
# (5-6) o,s Freeman-Tukey measures for u2.W.1
# (7-8) o,s Freeman-Tukey measures for U2.H.1
# (9-10) o,s combined Freeman-Tukey
# Change any Inf to NA
temp <- is.infinite(discrep) & !is.na(discrep)
if(sum(temp, na.rm=TRUE)>0){cat(sum(temp, na.rm=TRUE), " infinite discrepancy measures set to NA\n")}
discrep[ temp ] <- NA
discrep.long <- data.table::melt( data.table::as.data.table(discrep),
measure.vars=list(seq(1,ncol(discrep),2), seq(2,ncol(discrep),2)),
value.name=c("Observed","Simulated"),
variable.name="Statistic",
variable.factor=FALSE)
titles <- data.frame(Statistic=as.character(1:(ncol(discrep/2))), Title=c(
"Freeman-Tukey for m2",
"Freeman-Tukey for u2.W.YoY",
"Freeman-Tukey for u2.W.1",
"Freeman-Tukey for u2.H.1",
"Total Freeman-Tukey"), stringsAsFactors=FALSE)
discrep.long <- merge(discrep.long, titles)
# compute the bayesian p-values
p_values <-plyr::ddply(discrep.long, c("Statistic","Title"), function(x){
p.value=mean(x$Observed < x$Simulated, na.rm=TRUE)
data.frame(p.value=p.value)
})
p_values$label = paste("Bayesian GOF P:",formatC(p_values$p.value, digits=2, format="f"))
gof.plot <-ggplot(data=discrep.long, aes_(x=~Simulated, y=~Observed))+
geom_point()+
geom_abline(intercept=0, slope=1)+
geom_text(data=p_values, x=Inf,y=-Inf, hjust=1.05, vjust=-0.2, label=p_values$label)+
facet_wrap(~Title, ncol=2, nrow=3, scales="free")
gof <- list(bp.plot=gof.plot, bp.values=data.frame(test.names=titles, p.value=p_values, stringsAsFactors=FALSE))
gof
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/PredictivePosteriorPlot_TSPDE_WHSteel.R
|
#' @rdname PredictivePosterior.TSPDE
# 2020-12-15 CJS Fixed problem with u2=NA that causes the plot fail
# 2019-02-13 CJS convert from arrangeGrob to facet_wrap because arrangeGrob dosn't work in paralle.
# 2015-06-10 CJS convert to ggplot()
# 2014-09-01 CJS dealing with Inf and -Inf in the discrepancy measures
# 2012-01-22 CJS made X/Y axis limits the same so p-value prints properly
# 2011-06-13 CJS returned bayesian p-values
PredictivePosteriorPlot.TSPNDE <- function( discrep ) {
# Given the discrepancy measures, creates a set of panel plots.
# It is assumed that the bp has 12 columns
# (1-2) o,s Freeman-Tukey measures for m2
# (3-4) o,s Deviance for m2
# (5-6) o,s Freeman-Tukey measures for u2
# (7-8) o,s Deviance for u2
# 9-10 o,s Freeman-Tukey for m2+u2
# 11-12 o,s Deviance for m2+u2
# Change any Inf to NA
#browser()
temp <- is.infinite(discrep) & !is.na(discrep)
if(sum(temp, na.rm=TRUE)>0){cat(sum(temp, na.rm=TRUE), " infinite discrepancy measures set to NA\n")}
discrep[ temp ] <- NA
discrep.long <- data.table::melt( data.table::as.data.table(discrep),
measure.vars=list(seq(1,ncol(discrep),2), seq(2,ncol(discrep),2)),
value.name=c("Observed","Simulated"),
variable.name="Statistic",
variable.factor=FALSE)
titles <- data.frame(Statistic=as.character(1:6), Title=c(
"Freeman-Tukey for m2",
"Deviance for m2",
"Freeman-Tukey for u2",
"Deviance for u2",
"Total Freeman-Tukey",
"Total Deviance"), stringsAsFactors=FALSE)
discrep.long <- merge(discrep.long, titles)
# compute the bayesian p-values
p_values <-plyr::ddply(discrep.long, c("Statistic","Title"), function(x){
p.value=mean(x$Observed < x$Simulated, na.rm=TRUE)
data.frame(p.value=p.value)
})
p_values$label = paste("Bayesian GOF P:",formatC(p_values$p.value, digits=2, format="f"))
gof.plot <-ggplot(data=discrep.long, aes_(x=~Simulated, y=~Observed))+
geom_point()+
geom_abline(intercept=0, slope=1)+
geom_text(data=p_values, x=Inf,y=-Inf, hjust=1, vjust=0, label=p_values$label)+
facet_wrap(~Title, ncol=2, nrow=3, scales="free")
gof <- list(bp.plot=gof.plot, bp.values=data.frame(test.names=titles, p.value=p_values, stringsAsFactors=FALSE))
#browser()
gof
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/PredictivePosteriorPlot_TSPNDE.R
|
#' Generate Predictive Posterior Plots (Bayesian p-values) for number of models.
#'
#' This is an internal function, not normally of use to users.
#' @aliases PredictivePosterior_TSPDE_WHChinook
#' @aliases PredictivePosterior.TSPDE.WHCH2
#' @aliases PredictivePosterior.TSPDE.WHSteel
#' @aliases PredictivePosterior.TSPNDE
#' @aliases PredictivePosterior.TSPNDENP
#' @aliases PredictivePosteriorPlot.TSPDE
#' @aliases PredictivePosteriorPlot.TSPDE.WHCH
#' @aliases PredictivePosteriorPlot.TSPDE.WHCH2
#' @aliases PredictivePosteriorPlot.TSPDE.WHSteel
#' @aliases PredictivePosteriorPlot.TSPNDE
#' @importFrom stats sd dbinom rbinom
#' @import plyr
#' @keywords internal
# 2018-12-15 CJS added in logitP fixed entries.
PredictivePosterior.TSPDE <- function (n1, m2, u2,
logitP.fixed, p,
U) {
# Generate Predictive Posterior Plot (Bayesian p-value) given the data
# for a TimeStratified Petersen with Diagonal Elements and error
# n1, m2, u2 = vectors of input data
# p, U = matrix of values (rows=number of posterior samples, columns=strata)
# These are returned from the call to JAGS
#
#cat("Call to PredictivePosterior\n")
#browser()
discrep <- matrix(0, nrow=0, ncol=12)
select.m2 <- !is.na(m2)
select.u2 <- !is.na(u2)
for(i in 1:nrow(p)){
# generate sample data
gen.m2 <- stats::rbinom(ncol(p), n1, p[i,])
gen.u2 <- stats::rbinom(ncol(p), U[i,], p[i,])
# compute a discrepancy measure
# Observed vs expected values for recaptures of marked fish
temp <- sqrt(m2) - sqrt(n1*p[i,])
d1.m2.o <- sum( temp[select.m2]^2, na.rm=TRUE)
temp <- sqrt(gen.m2) - sqrt(n1*p[i,])
d1.m2.s <- sum( temp[select.m2]^2, na.rm=TRUE)
# Observed vs expected values for captures of unmarked fish
temp <- sqrt(u2) - sqrt(U[i,]*p[i,])
d1.u2.o <- sum( temp[select.u2]^2, na.rm=TRUE)
temp <- sqrt(gen.u2) - sqrt(U[i,]*p[i,])
d1.u2.s <- sum( temp[select.u2]^2, na.rm=TRUE)
# Deviance (-2*log-likelihood )
temp <- stats::dbinom(m2, n1, p[i,], log=TRUE)
d2.m2.o <- -2*sum(temp[select.m2])
temp <- stats::dbinom(gen.m2, n1, p[i,], log=TRUE)
d2.m2.s <- -2*sum(temp[select.m2])
temp <- stats::dbinom(u2, U[i,], p[i,], log=TRUE)
d2.u2.o <- -2*sum(temp[select.u2])
temp <- stats::dbinom(gen.u2, U[i,], p[i,], log=TRUE)
d2.u2.s <- -2*sum(temp[select.u2])
# combined discrepancy measures
d1.o <- d1.m2.o + d1.u2.o
d1.s <- d1.m2.s + d1.u2.s
d2.o <- d2.m2.o + d2.u2.o
d2.s <- d2.m2.s + d2.u2.s
# update the array
discrep <- rbind(discrep,
c(d1.m2.o, d1.m2.s, d2.m2.o, d2.m2.s,
d1.u2.o, d1.u2.s, d2.u2.o, d2.u2.s,
d1.o , d1.s, d2.o, d2.s))
}
#browser()
discrep
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/PredictivePosterior_TSPDE.R
|
#' @rdname PredictivePosterior.TSPDE
#' @importFrom stats sd rbinom
#' @import plyr
PredictivePosterior.TSPDE.WHCH <- function (time, n1, m2, u2.A, u2.N, clip.frac.H, p, U.W, U.H, hatch.after) {
# 2015-06-10 CJS Fixed bug. When I converted to JAGS, I had to add U.H for the time before hatch.after because
# JAGS does not monitor partial arrays properly. In the call, I had expanded the U.H matrix
# for this behaviour, but this is no longer needed.
# Also, the select.u2.A was not computed properly. It should be a combination of >hatch.after and not missing
# Generate Predictive Posterior Plot (Bayesian p-value) given the data
# for a TimeStratified Petersen with Diagonal Elements and error
# n1, m2, u2.A, u2.N = vectors of input data
# p, U.W. U.H = matrix of values (rows=number of posterior samples, columns=strata)
# These are returned from the call to JAGS
#
#cat("Call to PredictivePosterior\n")
#browser()
discrep <- matrix(0, nrow=0, ncol=8)
select.m2 <- !is.na(m2)
select.u2.A <- !is.na(u2.A) & (time>hatch.after)
select.u2.N <- !is.na(u2.N)
#browser()
for(i in 1:nrow(p)){
# generate sample data
gen.m2 <- stats::rbinom(ncol(p), n1, p[i,])
gen.u2.A <- stats::rbinom(ncol(p), U.H[i,], p[i,]*clip.frac.H)*(time>hatch.after) # only hatchery fish can generate adipose clipped fish
gen.u2.N <- stats::rbinom(ncol(p), U.W[i,], p[i,]) +
stats::rbinom(ncol(p), U.H[i,], p[i,]*(1-clip.frac.H))*(time>hatch.after) # wild and hatchery fish generate non-clipped fish
# compute a discrepancy measure
# Observed vs expected values for recaptures of marked fish
temp <- sqrt(m2) - sqrt(n1*p[i,])
d1.m2.o <- sum( temp[select.m2]^2, na.rm=TRUE)
temp <- sqrt(gen.m2) - sqrt(n1*p[i,])
d1.m2.s <- sum( temp[select.m2]^2, na.rm=TRUE)
# Observed vs expected values for captures of unmarked but clipped fish
# These are hatchery fish and only available after 'hatch.after'
temp <- sqrt(u2.A) - sqrt(U.H[i,]*p[i,]*clip.frac.H)
d1.u2.A.o <- sum( temp[select.u2.A]^2, na.rm=TRUE)
temp <- sqrt(gen.u2.A) - sqrt(U.H[i,]*p[i,]*clip.frac.H)
d1.u2.A.s <- sum( temp[select.u2.A]^2, na.rm=TRUE)
# Observed vs expected values for captures of unmarked fish with NO adipose clips (a mixture of wild and hatchery fish)
# Notice that hatchery fish are only available after hatch.after
temp <- sqrt(u2.N) - sqrt(U.W[i,]*p[i,] + U.H[i]*p[i,]*(1-clip.frac.H)*(time>hatch.after))
d1.u2.N.o <- sum( temp[select.u2.N]^2, na.rm=TRUE)
temp <- sqrt(gen.u2.N) - sqrt(U.W[i,]*p[i,] + U.H[i]*p[i,]*(1-clip.frac.H)*(time>hatch.after))
d1.u2.N.s <- sum( temp[select.u2.N]^2, na.rm=TRUE)
# combined discrepancy measures
d1.o <- d1.m2.o + d1.u2.A.o + d1.u2.N.o # observed data total discrepancy
d1.s <- d1.m2.s + d1.u2.A.s + d1.u2.N.s # simulated data total discrepancy
# update the array
discrep <- rbind(discrep,
c(d1.m2.o, d1.m2.s,
d1.u2.A.o, d1.u2.A.s,
d1.u2.N.o, d1.u2.N.s,
d1.o , d1.s
))
}
#browser()
discrep
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/PredictivePosterior_TSPDE_WHChinook.R
|
#' @rdname PredictivePosterior.TSPDE
#' @importFrom stats sd rbinom
#' @import plyr
# 2015-06-10 CJS BUG fix in creating the GOF values. The selection was not done properly.
# d1.o.s was not computed properly either.
# 2010-03-28 CJS initial creation of function for the second Wild-Hatchery Chinook problem of Eric Logan
PredictivePosterior.TSPDE.WHCH2 <- function (time, n1, m2,
u2.A.YoY, u2.N.YoY, u2.A.1, u2.N.1, clip.frac.H.YoY, clip.frac.H.1, p,
U.W.YoY, U.H.YoY, U.W.1, U.H.1, hatch.after.YoY) {
# Generate Predictive Posterior Plot (Bayesian p-value)
# n1, m2, u2.A.YoY, u2.N.YoY, u2.A.1, u2.N.1 = vectors of input data
# p, U.W.YoY, U.H.YoY, U.W.1, U.H.1 = matrix of values (rows=number of posterior samples, columns=strata)
# These are returned from the call to JAGS
#
#cat("Call to PredictivePosterior for Wild vs Hatchery and YoY vs Age1 \n")
#browser()
discrep <- matrix(0, nrow=0, ncol=16)
select.m2 <- !is.na(m2)
select.u2.A.YoY <- !is.na(u2.A.YoY) & (time>hatch.after.YoY)
select.u2.N.YoY <- !is.na(u2.N.YoY)
select.u2.A.1 <- !is.na(u2.A.1) # These could residualize and be available even before hatch.after the next year.
select.u2.N.1 <- !is.na(u2.N.1)
for(i in 1:nrow(p)){
# generate sample data
gen.m2 <- stats::rbinom(ncol(p), n1, p[i,])
gen.u2.A.YoY <- stats::rbinom(ncol(p), U.H.YoY[i,], p[i,]*clip.frac.H.YoY)*(time>hatch.after.YoY) # only hatchery fish can generate adipose clipped fish
gen.u2.N.YoY <- stats::rbinom(ncol(p), U.W.YoY[i,], p[i,]) +
stats::rbinom(ncol(p), U.H.YoY[i,], p[i,]*(1-clip.frac.H.YoY))*(time>hatch.after.YoY) # wild and hatchery fish generate non-clipped fish
gen.u2.A.1 <- stats::rbinom(ncol(p), U.H.1 [i,], p[i,]*clip.frac.H.1)
gen.u2.N.1 <- stats::rbinom(ncol(p), U.W.1 [i,], p[i,]) +
stats::rbinom(ncol(p), U.H.1 [i,], p[i,]*(1-clip.frac.H.1)) # wild and hatchery fish generate non-clipped fish
# compute a discrepancy measure
# Observed vs expected values for recaptures of marked fish
temp <- sqrt(m2) - sqrt(n1*p[i,])
d1.m2.o <- sum( temp[select.m2]^2, na.rm=TRUE)
temp <- sqrt(gen.m2) - sqrt(n1*p[i,])
d1.m2.s <- sum( temp[select.m2]^2, na.rm=TRUE)
# Observed vs expected values for captures of unmarked but clipped fish
temp <- sqrt(u2.A.YoY) - sqrt(U.H.YoY[i,]*p[i,]*clip.frac.H.YoY)*(time>hatch.after.YoY) # YoY fish. Recall clipped YoY only come after hatch.after
d1.u2.A.YoY.o <- sum( temp[select.u2.A.YoY]^2, na.rm=TRUE)
temp <- sqrt(gen.u2.A.YoY) - sqrt(U.H.YoY[i,]*p[i,]*clip.frac.H.YoY)*(time>hatch.after.YoY)
d1.u2.A.YoY.s <- sum( temp[select.u2.A.YoY]^2, na.rm=TRUE)
temp <- sqrt(u2.A.1 ) - sqrt(U.H.1[i,]*p[i,]*clip.frac.H.1) # age 1 fish. These may be residualized and available prior to hatch.after
d1.u2.A.1.o <- sum( temp[select.u2.A.1]^2, na.rm=TRUE)
temp <- sqrt(gen.u2.A.1) - sqrt(U.H.1[i,]*p[i,]*clip.frac.H.1)
d1.u2.A.1.s <- sum( temp[select.u2.A.1]^2, na.rm=TRUE)
# Observed vs expected values for captures of unmarked fish with NO adipose clips (a mixture of wild and hatchery fish)
temp <- sqrt(u2.N.YoY) - sqrt(U.W.YoY[i,]*p[i,]+U.H.YoY[i]*p[i,]*(1-clip.frac.H.YoY))*(time>hatch.after.YoY)
d1.u2.N.YoY.o <- sum( temp[select.u2.N.YoY]^2, na.rm=TRUE)
temp <- sqrt(gen.u2.N.YoY) - sqrt(U.W.YoY[i,]*p[i,] + U.H.YoY[i]*p[i,]*(1-clip.frac.H.YoY))*(time>hatch.after.YoY)
d1.u2.N.YoY.s <- sum( temp[select.u2.N.YoY]^2, na.rm=TRUE)
temp <- sqrt(u2.N.1) - sqrt(U.W.1[i,]*p[i,]+U.H.1[i]*p[i,]*(1-clip.frac.H.1))*(time>hatch.after.YoY)
d1.u2.N.1.o <- sum( temp[select.u2.N.1]^2, na.rm=TRUE)
temp <- sqrt(gen.u2.N.1) - sqrt(U.W.1[i,]*p[i,]+U.H.1[i]*p[i,]*(1-clip.frac.H.1))*(time>hatch.after.YoY)
d1.u2.N.1.s <- sum( temp[select.u2.N.1]^2, na.rm=TRUE)
# combined discrepancy measures
d1.YoY.o <- d1.u2.A.YoY.o + d1.u2.N.YoY.o # observed data total discrepancy for YoY
d1.YoY.s <- d1.u2.A.YoY.s + d1.u2.N.YoY.s # simulated data total discrepancy for YoY
d1.1.o <- d1.u2.A.1.o + d1.u2.N.1.o # observed data total discrepancy for Age1
d1.1.s <- d1.u2.A.1.s + d1.u2.N.1.s # simulated data total discrepancy for Age1
d1.o <- d1.m2.o + d1.YoY.o + d1.1.o # observed data total discrepancy all data
d1.s <- d1.m2.s + d1.YoY.s + d1.1.s # simulated data total discrepancy all data
# update the array
discrep <- rbind(discrep,
c(d1.m2.o, d1.m2.s,
d1.u2.A.YoY.o, d1.u2.A.YoY.s,
d1.u2.N.YoY.o, d1.u2.N.YoY.s,
d1.u2.A.1.o, d1.u2.A.1.s,
d1.u2.N.1.o, d1.u2.N.1.s,
d1.YoY.o, d1.YoY.s,
d1.1.o, d1.1.s,
d1.o , d1.s
))
}
#browser()
discrep
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/PredictivePosterior_TSPDE_WHChinook2.r
|
#' @rdname PredictivePosterior.TSPDE
#' @importFrom stats sd rbinom
#' @import plyr
# 2015-06-10 CJS Bug fix on selecting after hatch after
PredictivePosterior.TSPDE.WHSteel <- function (time, n1, m2, u2.W.YoY, u2.W.1, u2.H.1, p, U.W.YoY, U.W.1, U.H.1, hatch.after) {
# Generate Predictive Posterior Plot (Bayesian p-value) given the data
# for a TimeStratified Petersen with Diagonal Elements and error
# n1, m2, u2.* = vectors of input data
# p, U.* = matrix of values (rows=number of posterior samples, columns=strata)
# These are returned from the call to JAGS
#
#cat("Call to PredictivePosterior\n")
#browser()
discrep <- matrix(0, nrow=0, ncol=10)
select.m2 <- !is.na(m2)
select.u2.W.YoY <- !is.na(u2.W.YoY)
select.u2.W.1 <- !is.na(u2.W.1)
select.u2.H.1 <- !is.na(u2.H.1) & (time>hatch.after)
for(i in 1:nrow(p)){
# generate sample data
gen.m2 <- stats::rbinom(ncol(p), n1, p[i,])
gen.u2.W.YoY <- stats::rbinom(ncol(p), U.W.YoY[i,], p[i,])
gen.u2.W.1 <- stats::rbinom(ncol(p), U.W.1 [i,], p[i,])
gen.u2.H.1 <- stats::rbinom(ncol(p), U.H.1 [i,], p[i,])*(time>hatch.after)
# compute a discrepancy measure
# Observed vs expected values for recaptures of marked fish
temp <- sqrt(m2) - sqrt(n1*p[i,])
d1.m2.o <- sum( temp[select.m2]^2, na.rm=TRUE)
temp <- sqrt(gen.m2) - sqrt(n1*p[i,])
d1.m2.s <- sum( temp[select.m2]^2, na.rm=TRUE)
# Observed vs expected values for observed data
temp <- sqrt(u2.W.YoY) - sqrt(U.W.YoY[i,]*p[i,])
d1.u2.W.YoY.o <- sum( temp[select.u2.W.YoY]^2, na.rm=TRUE)
temp <- sqrt(u2.W.1) - sqrt(U.W.1[i,]*p[i,])
d1.u2.W.1.o <- sum( temp[select.u2.W.1]^2, na.rm=TRUE)
temp <- sqrt(u2.H.1) - sqrt(U.H.1[i,]*p[i,])
d1.u2.H.1.o <- sum( temp[select.u2.H.1]^2, na.rm=TRUE)
# Observed vs expected values for simulated data
temp <- sqrt(gen.u2.W.YoY) - sqrt(U.W.YoY[i,]*p[i,])
d1.u2.W.YoY.s <- sum( temp[select.u2.W.YoY]^2, na.rm=TRUE)
temp <- sqrt(gen.u2.W.1) - sqrt(U.W.1[i,]*p[i,])
d1.u2.W.1.s <- sum( temp[select.u2.W.1]^2, na.rm=TRUE)
temp <- sqrt(gen.u2.H.1) - sqrt(U.H.1[i,]*p[i,])*(time>hatch.after)
d1.u2.H.1.s <- sum( temp[select.u2.H.1]^2, na.rm=TRUE)
# combined discrepancy measures
d1.o <- d1.m2.o + d1.u2.W.YoY.o + d1.u2.W.1.o + d1.u2.H.1.o # observed data total discrepancy
d1.s <- d1.m2.s + d1.u2.W.YoY.s + d1.u2.W.1.s + d1.u2.H.1.s # simulated data total discrepancy
# update the array
discrep <- rbind(discrep,
c(d1.m2.o, d1.m2.s,
d1.u2.W.YoY.o, d1.u2.W.YoY.s,
d1.u2.W.1.o, d1.u2.W.1.s,
d1.u2.H.1.o, d1.u2.H.1.s,
d1.o , d1.s
))
}
#browser()
discrep
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/PredictivePosterior_TSPDE_WHSteel.R
|
# 2020-12-15 CJS If u2 is missing, then some of the test statistics must be modified to exclude
# any contribution from the simulated data when u2 is missing
#' @rdname PredictivePosterior.TSPDE
#' @importFrom stats sd dbinom dmultinom pnorm rbinom rmultinom
#' @import plyr
PredictivePosterior.TSPNDE <- function (n1,
m2,
u2,
logitP.fixed,
p,
U,
mu,
sigma) {
# Generate Predictive Posterior Plot (Bayesian p-value) given the data
# for a TimeStratified Petersen with NonDiagonal Elements and error
# n1, m2, u2 = vectors of input data
# p, U,mu,sigma = matrix of values (rows=number of posterior samples, columns=strata)
# These are returned from the call to JAGS
#
s <- length(n1)
t <- length(u2)
## Interleave p and logitP.fixed, ignoring extra p's added at end
## CJS - 2014-09-01 In JAGS, this is not needed because it passes the logitP matrix alread padded
# the correct dimension. Not sure why this now did this?
p.bkp <- p
# 2014-09-01. Fixed a problem when fixed p is in first position and 1:0 doesn't work properly
# if(any(!is.na(logitP.fixed[1:t])) & tolower(engine)=="openbugs" ){ # the second condition is for JAGS
# for(j in which(!is.na(logitP.fixed[1:t]))){
# if(j==1){ p <- cbind(expit(logitP.fixed[1]), p)} # code below fails when j==1
# if(j> 1){ p <- cbind(p[,1:(j-1)], expit(logitP.fixed[j]), p[,-(1:(j-1))]) }
# }
# }
## Compute matrices of movement probabilities for each iteration
Theta <- lapply(1:nrow(p),function(i) lnTheta(mu[i,],sigma[i,],s,t))
## Simulate data for each iteration
simData <- lapply(1:nrow(p),function(i) simTSPNDE(n1,U[i,],p[i,],Theta[[i]]))
## Compute discrepancy measures
discrep <- t(sapply(1:nrow(p),function(k){
## 1) Observed vs expected values for recaptures of marked fish
## a) Observed data
temp1.o <- sqrt(m2[,1:t]) - sqrt(n1 * t(t(Theta[[k]]) * p[k,1:t]))
notmissing <- !is.na(temp1.o)
d1.m2.o <- sum(temp1.o^2,na.rm=TRUE)
## b) Simulated data
temp1.s <- sqrt(simData[[k]]$m2[,1:t]) - sqrt(n1 * t(t(Theta[[k]]) * p[k,1:t]))
d1.m2.s <- sum(temp1.s[notmissing]^2,na.rm=TRUE)
## 2) Observed vs expected values for captures of unmarked fish
## a) Observed data
#browser()
temp2.o <- sqrt(u2) - sqrt(U[k,] * p[k,1:t])
notmissing <- !is.na(temp2.o)
d1.u2.o <- sum(temp2.o^2,na.rm=TRUE)
## b) Simulated data
temp2.s <- sqrt(simData[[k]]$u2) - sqrt(U[k,] * p[k,1:t])
d1.u2.s <- sum(temp2.s[notmissing]^2,na.rm=TRUE)
## 3) Deviance (-2*log-likelihood)
## a) Observed data
d2.m2.o <- -2 * sum(sapply(1:s,function(i){
cellProbs <- Theta[[k]][i,] * p[k,1:t]
cellProbs <- c(cellProbs,1-sum(cellProbs))
stats::dmultinom(m2[i,],n1[i],cellProbs,log=TRUE)
}))
notmissing <- !is.na(u2) # need to ignore contributions from missing u2 values
d2.u2.o <- -2 * sum(stats::dbinom(u2[notmissing],U[k,notmissing],p[k,notmissing],log=TRUE))
d2.o <- d2.m2.o + d2.u2.o
## b) Simulated data
d2.m2.s <- -2 * sum(sapply(1:s,function(i){
cellProbs <- Theta[[k]][i,] * p[k,1:t]
cellProbs <- c(cellProbs,1-sum(cellProbs))
stats::dmultinom(simData[[k]]$m2[i,],n1[i],cellProbs,log=TRUE)
}))
d2.u2.s <- -2 * sum(stats::dbinom(simData[[k]]$u2[notmissing],U[k,notmissing],p[k,notmissing],log=TRUE))
d2.s <- d2.m2.s + d2.u2.s
c(d1.m2.o, d1.m2.s, d2.m2.o, d2.m2.s,
d1.u2.o, d1.u2.s, d2.u2.o, d2.u2.s,
d1.m2.o+d1.u2.o, d1.m2.s+d1.u2.s, d2.o, d2.s)
}))
discrep
}
lnTheta <- function(mu,sigma,s,t){
## Constructs the matrix of transition probabilities using
## log-normal distributions with the supplied means and variances.
## mu = vectors of log travel-time means for each strata
## sigma = vector of log travel times std devs for each strata
## s,t = number of strata at site 1,2
tmp <- t(sapply(1:s,function(i){
tmp1 <- stats::pnorm(log(1:(t-i+1)),mu[i],sigma[i]) -
stats::pnorm(log(1:(t-i+1)-1),mu[i],sigma[i])
c(rep(0,i-1),tmp1)
}))
}
simTSPNDE <- function(n1,U,p,Theta){
## Simulate data from the TSPNDE model conditional on values of n and U.
s <- length(n1)
t <- length(U)
## 1) Simulate matrix of recoveries
m2 <- t(sapply(1:length(n1),function(i){
cellProbs <- Theta[i,] * p
cellProbs <- c(cellProbs,1-sum(cellProbs))
stats:rmultinom(1,n1[i],cellProbs)[1:t]
}))
## 2) Compute number of marked individuals not recaptured
m2 <- cbind(m2,n1-apply(m2,1,sum))
## 3) Simulate captures of unmarked fish
u2 <- stats::rbinom(t,U,p)
return(list(m2=m2,u2=u2))
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/PredictivePosterior_TSPNDE.R
|
#' @rdname PredictivePosterior.TSPDE
#' @importFrom stats sd dbinom dmultinom rbinom rmultinom
# 2021-10-05 CJS Fixed computation of discrepancy measure when some m2.expanded are missing
# 2020-12-15 CJS Fixed computation of discrepancy measures when u2 is missing
# 2018-11-30 CJS Changed defintion of m2.expanded propogates down here
# 2018-11-27 CJS Removed openbugs stuff
# 2014-09-01 CJS Fixed bug when logitP.fixed is fixed in first position
# 2014-09-01
# There was also a subtle bug in dealing with the multinomial distribution where the length of p
# (that had to be padded to deal with an OpenBugs problem
# had to have the indicies explicitly stated.
PredictivePosterior.TSPNDENP <- function (n1,
m2.expanded,
u2,
logitP.fixed,
p,
U,
Theta,
Delta.max) {
# Generate Predictive Posterior Plot (Bayesian p-value) given the data
# for a TimeStratified Petersen with Non-Diagonal entries and a non-parametric movement model
# n1 - vector of number of releases
# m2.expanded - matrix of number of marks recovered from each value of n1
# u2 - vector of recovieres of unmarked fish
# logitP.fixed - which values of p(i) are fixed
# Delta.max - maximum strata involved in movement from diagonal
# U, Theta = matrix of values (rows=number of posterior samples, columns=strata)
# These are returned from the call to JAGS
#
s <- length(n1)
t <- length(u2)
select.u2 <- !is.na(u2) # which terms involving u2 to use?
## Transform saved iterations for theta from vectors to full movement matrices
Theta.bkp <- Theta
Theta <- lapply(1:nrow(Theta.bkp),function(k){
M <- Theta.bkp[k,,]
tmp <- matrix(0,nrow=s,ncol=t)
for(i in 1:length(n1)){
tmp[i,i:min(t,i+Delta.max)] <- M[i,1:min(t-i+1,Delta.max+1)]
}
tmp
})
## Simulate data for each iteration
#browser()
simData <- lapply(1:nrow(p),function(k) simTSPNDE(n1,U[k,],p[k,],Theta[[k]]))
#browser()
## Compute discrepancy measures
#browser()
discrep <- t(sapply(1:nrow(p),function(k){
## 1) Observed vs expected values for recaptures of marked fish
## a) Observed data
temp1.o <- sqrt(m2.expanded[,1:t]) - sqrt(n1 * t(t(Theta[[k]]) * p[k,1:t]))
d1.m2.o <- sum(temp1.o^2,na.rm=TRUE)
## b) Simulate data
temp1.s <- sqrt(simData[[k]]$m2[,1:t]) - sqrt(n1 * t(t(Theta[[k]]) * p[k,1:t]))
d1.m2.s <- sum(temp1.s^2,na.rm=TRUE)
## 2) Observed vs expected values for captures of unmarked fish
## a) Observed data
temp2.o <- sqrt(u2) - sqrt(U[k,] * p[k,1:t])
d1.u2.o <- sum(temp2.o[select.u2]^2,na.rm=TRUE)
## b) Simulate data
temp2.s <- sqrt(simData[[k]]$u2) - sqrt(U[k,] * p[k,1:t])
d1.u2.s <- sum(temp2.s[select.u2]^2,na.rm=TRUE)
## 3) Deviance (-2*log-likelihood)
## a) Observed data
#browser()
d2.m2.o <- -2 * sum(sapply(1:length(n1),function(i){
cellProbs <- Theta[[k]][i,] * p[k,1:t] # 2014-09-01 need to ignore extra p's at end which were needed for OPENbugs quirk
cellProbs <- c(cellProbs,1-sum(cellProbs))
res <- 0
if(!is.na(sum(m2.expanded[i,1:t]))){ # data is present, so compute the deviance; otherwise nothing
res<- stats::dmultinom(c(m2.expanded[i,1:t], n1[i]-sum(m2.expanded[i,1:t])),n1[i],cellProbs,log=TRUE)
}
res
}))
d2.u2.o <- -2 * sum(stats::dbinom(u2[select.u2],U[k,select.u2],p[k,select.u2],log=TRUE), na.rm=TRUE)
d2.o <- d2.m2.o + d2.u2.o
## b) Simulated data
d2.m2.s <- -2 * sum(sapply(1:length(n1),function(i){
cellProbs <- Theta[[k]][i,] * p[k,1:t] # 2014-09-01 ditto to previous fix
cellProbs <- c(cellProbs,1-sum(cellProbs))
res <- 0
if(!is.na(sum(m2.expanded[i,1:t]))){ # data is present, so compute the deviance on sim data; otherwise nothing
res<- stats::dmultinom(simData[[k]]$m2[i,],n1[i],cellProbs,log=TRUE)
}
res
}))
d2.u2.s <- -2 * sum(stats::dbinom(simData[[k]]$u2[select.u2],U[k,select.u2],p[k,select.u2],log=TRUE), na.rm=TRUE)
d2.s <- d2.m2.s + d2.u2.s
c(d1.m2.o, d1.m2.s, d2.m2.o, d2.m2.s,
d1.u2.o, d1.u2.s, d2.u2.o, d2.u2.s,
d1.m2.o+d1.u2.o, d1.m2.s+d1.u2.s, d2.o, d2.s)
}))
discrep
}
simTSPNDE <- function(n1,U,p,Theta){
## Simulate data from the TSPNDE model conditional on values of n and U.
s <- length(n1)
t <- length(U)
## 1) Simulate matrix of recoveries
m2 <- t(sapply(1:length(n1),function(i){
cellProbs <- Theta[i,] * p[1:t]
cellProbs <- c(cellProbs,1-sum(cellProbs))
if( any(cellProbs < 0)){browser()}
stats::rmultinom(1,n1[i],cellProbs)[1:t]
}))
## 2) Add number of individuals not recovered to last column of m2
m2 <- cbind(m2,n1-apply(m2,1,sum))
## 3) Simulate captures of unmarked fish
u2 <- stats::rbinom(t,U,p)
return(list(m2=m2,u2=u2))
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/PredictivePosterior_TSPNDENP.R
|
#' @rdname PredictivePosterior.TSPDE
#' @param ma.p Proportion of marks available, i.e 1-fallback probability
#' @importFrom stats sd dbinom dmultinom rbinom rmultinom
# 2018-12-14 CJS First edition based on PredictivePosterior.TSPNDENP
# 2014-09-01
# There was also a subtle bug in dealing with the multinomial distribution where the length of p
# (that had to be padded to deal with an OpenBugs problem
# had to have the indicies explicitly stated.
PredictivePosterior.TSPNDENPMarkAvail <- function (n1,
m2.expanded,
u2,
logitP.fixed,
p,
U,
Theta,
ma.p,
Delta.max) {
# Generate Predictive Posterior Plot (Bayesian p-value) given the data
# for a TimeStratified Petersen with Non-Diagonal entries and a non-parametric movement model
# n1 - vector of number of releases
# m2.expanded - matrix of number of marks recovered from each value of n1
# u2 - vector of recovieres of unmarked fish
# logitP.fixed - which values of p(i) are fixed
# Delta.max - maximum strata involved in movement from diagonal
# U, Theta = matrix of values (rows=number of posterior samples, columns=strata)
# These are returned from the call to OpenBugs/ JAGS
# ma.p - mark availability proportion
s <- length(n1)
t <- length(u2)
select.u2 <- !is.na(u2)
## Transform saved iterations for theta from vectors to full movement matrices
Theta.bkp <- Theta
Theta <- lapply(1:nrow(Theta.bkp),function(k){
M <- Theta.bkp[k,,]
tmp <- matrix(0,nrow=s,ncol=t)
for(i in 1:length(n1)){
tmp[i,i:min(t,i+Delta.max)] <- M[i,1:min(t-i+1,Delta.max+1)]
}
tmp
})
## Simulate data for each iteration
#browser()
simData <- lapply(1:nrow(p),function(k) simTSPNDENPMarkAvail(n1,U[k,],p[k,],Theta[[k]],ma.p[k]))
#browser()
## Compute discrepancy measures
discrep <- t(sapply(1:nrow(p),function(k){
## 1) Observed vs expected values for recaptures of marked fish
## a) Observed data
temp1.o <- sqrt(m2.expanded[,1:t]) - sqrt(n1 * t(t(Theta[[k]]) * p[k,1:t]))
d1.m2.o <- sum(temp1.o^2,na.rm=TRUE)
## b) Simulate data
temp1.s <- sqrt(simData[[k]]$m2[,1:t]) - sqrt(n1 * t(t(Theta[[k]]) * p[k,1:t]))
d1.m2.s <- sum(temp1.s^2,na.rm=TRUE)
## 2) Observed vs expected values for captures of unmarked fish
## a) Observed data
temp2.o <- sqrt(u2) - sqrt(U[k,] * p[k,1:t])
d1.u2.o <- sum(temp2.o[select.u2]^2,na.rm=TRUE)
## b) Simulate data
temp2.s <- sqrt(simData[[k]]$u2) - sqrt(U[k,] * p[k,1:t])
d1.u2.s <- sum(temp2.s[select.u2]^2,na.rm=TRUE)
## 3) Deviance (-2*log-likelihood)
## a) Observed data
#browser()
d2.m2.o <- -2 * sum(sapply(1:length(n1),function(i){
cellProbs <- Theta[[k]][i,] * p[k,1:t] # 2014-09-01 need to ignore extra p's at end which were needed for OPENbugs quirk
cellProbs <- c(cellProbs,1-sum(cellProbs))
stats::dmultinom(c(m2.expanded[i,1:t], n1[i]-sum(m2.expanded[i,1:t])),n1[i],cellProbs,log=TRUE)
}))
d2.u2.o <- -2 * sum(stats::dbinom(u2[select.u2],U[k,select.u2],p[k,select.u2],log=TRUE), na.rm=TRUE)
d2.o <- d2.m2.o + d2.u2.o
## b) Simulated data
d2.m2.s <- -2 * sum(sapply(1:length(n1),function(i){
cellProbs <- Theta[[k]][i,] * p[k,1:t] # 2014-09-01 ditto to previous fix
cellProbs <- c(cellProbs,1-sum(cellProbs))
stats::dmultinom(simData[[k]]$m2[i,],n1[i],cellProbs,log=TRUE)
}))
d2.u2.s <- -2 * sum(stats::dbinom(simData[[k]]$u2[select.u2],U[k,select.u2],p[k,select.u2],log=TRUE), na.rm=TRUE)
d2.s <- d2.m2.s + d2.u2.s
c(d1.m2.o, d1.m2.s, d2.m2.o, d2.m2.s,
d1.u2.o, d1.u2.s, d2.u2.o, d2.u2.s,
d1.m2.o+d1.u2.o, d1.m2.s+d1.u2.s, d2.o, d2.s)
}))
discrep
}
simTSPNDENPMarkAvail <- function(n1,U,p,Theta, ma.p){
## Simulate data from the TSPNDENPMarkAvail model conditional on values of n and U.
## and the proportion of marks available (ma.p)
s <- length(n1)
t <- length(U)
## 1) Simulate matrix of recoveries
m2 <- t(sapply(1:length(n1),function(i){
cellProbs <- Theta[i,] * p[1:t] * ma.p
cellProbs <- c(cellProbs,1-sum(cellProbs))
if( any(cellProbs < 0)){browser()}
stats::rmultinom(1,n1[i],cellProbs)[1:t]
}))
## 2) Add number of individuals not recovered to last column of m2
m2 <- cbind(m2,n1-apply(m2,1,sum))
## 3) Simulate captures of unmarked fish
u2 <- stats::rbinom(t,U,p)
return(list(m2=m2,u2=u2))
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/PredictivePosterior_TSPNDENPMarkAvail.R
|
#' Compute percentiles of the run timing distribution.
#'
#' Take the posterior sample of U[1,...nstrata] and compute the percentiles of the run
#' timing.
#' This uses the quantile() function from the "actuar" package which is designed to compute
#' quantiles of grouped data.
#' It is assumed that there are no fish in the system prior to the first point
# in time, and after the last point in time
#' @template time
#' @param U matrix of posterior samples. Each row is a sample from the posterior.
# Columns correspond to U[1]...U[nstrata]
#' @param prob Quantiles of the run timing to estimate.
#' @return An MCMC object with samples from the posterior distribution. A
#' series of graphs and text file are also created in the working directory.
#' This information is now added to the fit object as well and so it is unlikely
#' that you will use this function.
#' @template author
#' @template references
#' @export RunTime
#' @import plyr
#' @importFrom actuar grouped.data
#' @importFrom stats quantile
# 2018-12-14 CJS converted from a for() loop to adply()
RunTime <- function(time, U, prob=seq(0,1,.1)) {
timing <- c(min(time):(1+max(time)))
q.U <- plyr::adply(U, 1, function(U.sample, timing){
quant <- stats::quantile(actuar::grouped.data(Group=timing, Frequency=U.sample), prob=prob, na.rm=TRUE)
quant
}, timing=timing, .id=NULL)
q.U
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/RunTime.R
|
# 2010-05-25 CJS fixed error in se of Petersen estimator
#' Simple Petersen Estimator and test if pooling can be done
#'
#' Computes the Petersen estimator (Chapman correction applied) for the number of UNMARKED animals (U)
#' and total population (N) given n1, m2, and u2.
#'
#' @aliases TestIfPool
#' @param n1 Number of animals tagged and released. Can be a vector in which the estimate is formed for each element of the vector
#' @param m2 Number of animals from n1 that are recaptured.
#' @param u2 Number of unmarked animals in the second sample.
#'
#' @return Data frame with variables U.est, U.se, N.est, and N.se.
#' .
#' @template author
#' @examples
#'
#' SimplePetersen( 200, 10, 300)
#' SimplePetersen(c(200,400), c(10,20), c(300,600))
#'
#' @importFrom stats chisq.test
#' @export SimplePetersen TestIfPool
#'
#'
SimplePetersen <- function( n1, m2, u2) {
#
# Estimate abundance of unmarked fish at second sample occasion using the SimplePetersen estimator
# n1 - number of animals marked
# m2 - number of recaptures of those in n1
# u2 - number of animals unmarked
# All three elements can be vectors in which case a vector of estimates (and se) are returned.
#
# NOTE that the estimate is for UNMARKED at the second sample occasion. Many people want
# the total abundance at the second occasion in which case you need to add the n1 to the total.
#
# The Chapman estimator is returned with its se
#
# Output: a list with elements
# $est and $se
#
U.est <- (n1+1)*(u2+1)/(m2+1) - 1
U.se <- sqrt((n1+1)*(m2+u2+1)*(n1-m2)*(u2)/(m2+1)^2/(m2+2))
N.est <- (n1+1)*(u2+m2+1)/(m2+1) - 1
N.se <- U.se
data.frame(U.est=U.est, U.se=U.se, N.est=N.est, N.se=N.se, stringsAsFactors=FALSE)
} #end of function
TestIfPool <- function(n1, m2){
#
# Test if can use a pooled Petersen experiment by seeing if the recapture rate is
# equal across all strata. This is a simple chi-square test to see if the proportion
# recovered are equal.
#
# Input:
# n1 - vector of marked fish released
# m2 - vector of marks recovered
#
# Output
# $chi - an object of type "htest" with components $chi$statistic with the X2 statistic
# and $chi$o,value containing the p.value. [More components are returned.]
# $fisher - an object of type "htest" with components $fisher$p.value with the p.value (not currently done)
#
#
# browser()
options(warn=-1) # turn off warnng display about small Chi-equare values
temp <- cbind(n1-m2, m2)
chi <- stats::chisq.test(temp)
fisher <- NULL # fisher.test(temp, simulate.p.value=TRUE)
list(chi=chi, fisher=fisher)
} # end of function
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/SimplePetersen.R
|
## 2020-11-07 CJS Allow user to specify prior for beta parameters for covariates on logitP
## 2018-11-26 CJS Removed all OpenBugs stuff
## 2014-09-01 CJS Converted to JAGS
## 2013-12-31 CJS Tried adding u2copy to get back Matts fix for mixing
## 2013-09-22 SJB Changes to model for JAGS compatability:
## -- Removed model name.
## -- Changed C(,20) to T(,20).
## -- Replace dflat() with dnorm(0.0,1.0E-6).
## -- Remove Matt's fix to improve mixing.
# 2013-09-04 CJS removed all references to WinBugs. Fixed problem with initial values for NA in n1, m2, or u2
# 2011-05-15 CJS limited etaU to 20 or less
# 2011-01-24 SB added call to run.windows.openbugs and run.windows.winbugs
# 2010-11-25 CJS add output to show progress of sampling through burnin and post-burnin phases
# 2010-04-26 CJS fixed problem in computing logitPguess when m2=n1 and you get infinite logit value
# 2009-12-05 CJS added title to argument list
# 2009-12-01 CJS (added WinBugs/OpenBugs directory to the argument list
#' @import graphics grDevices splines
#' @importFrom stats lm spline var sd
#' @keywords internal
TimeStratPetersenDiagError <- function(
title,
prefix,
time,
n1,
m2,
u2,
jump.after=NULL,
logitP.cov=as.matrix(rep(1,length(u2))),
logitP.fixed,
n.chains=3,
n.iter=200000,
n.burnin=100000,
n.sims=2000,
tauU.alpha=1, tauU.beta=.05,
taueU.alpha=1, taueU.beta=.05,
prior.beta.logitP.mean = c(logit(sum(m2,na.rm=TRUE)/sum(n1,na.rm=TRUE)),rep(0, ncol(as.matrix(logitP.cov))-1)),
prior.beta.logitP.sd = c(stats::sd(logit((m2+.5)/(n1+1)),na.rm=TRUE), rep(10, ncol(as.matrix(logitP.cov))-1)),
tauP.alpha=.001, tauP.beta=.001,
debug=FALSE,
debug2=FALSE,
InitialSeed,
save.output.to.files=TRUE){
set.seed(InitialSeed) # set prior to initial value computations
#
# Fit the smoothed time-Stratified Petersen estimator with Diagonal recoveries (i.e. no recoveries
# outside stratum of release) and error in the smoothed U curve
#
# This routine assumes that the strata are time (e.g. weeks).
# In each stratum n1 fish are released (with marks). These are ususall
# captured fish that are marked, transported upstream, and released.
# These fish are used only to estimate the recapture rate downstream.
# Of the n1 fish released, m2 fish are recaptured in the same stratum (e.g. week) of release.
# There is a related function that allows fish to be recaptured in subsequent weeks.
# At the same tine, u2 other (unmarked) fish are newly captured in stratum i.
# These EXCLUDE recaptures of marked fish. These are the fish that are "expanded"
# to estimate the population size of fish in stratum i.
#
# Input
# prefix - prefix for file name for initial plot of U's
# time- the stratum number
# n1 - vector of number of fish released in stratum i
# m2 - vector of number of fish recovered in stratum i (EXCLUDING recaps)
# u2 - vector of number of unmarked fish captured in stratum i
# jump.after - points after which the spline is allowed to jump. Specify as a list of integers in the
# range of 1:Nstrata. If jump.after[i]=k, then the spline is split between strata k and k+1
# logitP.cov - covariates for logit(P)=X beta.logitP
# logitP.fixed - indicator if this logitP is fixed. If NA, then not fixed; else fixed to the particular value
# This routine makes a call to the MCMC sampler to fit the model and then gets back the
# coda files for the posteriour distribution.
## Set working directory to current directory (we should allow users to select this)
working.directory <- getwd()
## Define paths for the model, data, and initial value files
model.file <- file.path(working.directory, "model.txt")
data.file <- file.path(working.directory,"data.txt")
init.files <- file.path(working.directory,
paste("inits", 1:n.chains,".txt", sep = ""))
# Save the Bugs progam to the model.txt file
#
sink(model.file) # NOTE: NO " allowed in model as this confuses the cat command
cat("
model{
# Time Stratified Petersen with Diagonal recapture (no spillover in subsequent weeks or marked fish)
# and allowing for error in the smoothed U curve.
#
# Data input:
# Nstrata - number of strata
# n1 - number of marked fish released
# m2 - number of marked fish recaptured
# u2 - number of unmarked fish captured (To be expanded to population).
# logitP.cov - covariates for logitP
# NlogitP.cov - number of logitP covariates
# Nfree.logitP - number of free logitP parameters
# free.logitP.index - vector of length(Nfree.logitP) for the free logitP parameters
# Nfixed.logitP - number of fixed logitP parameters
# fixed.logitP.index - vector of length(Nfixed.logitP) for the free logitP parameters
# fixed.logitP.value - value of fixed logit entries
# SplineDesign- spline design matrix of size [Nstrata, maxelement of n.b.notflat]
# This is set up prior to the call.
# b.flat - vector of strata indices where the prior for the b's will be flat.
# this is normally the first two of each spline segment
# n.b.flat - number of b coefficients that have a flat prior
# b.notflat- vector of strata indices where difference in coefficients is modelled
# n.b.notflat- number of b coefficients that do not have a flat prior
# tauU.alpha, tauU.beta - parameters for prior on tauU
# taueU.alpha, taueU.beta - parameters for prior on taueU
# prior.beta.logitP.mean, prior.beta.logitP.sd - parameters for prior of coefficient of covariates for logitP
# tauP.alpha, tauP.beta - parameter for prior on tauP (residual variance of logit(P)'s after adjusting for
# covariates)
#
# Parameters of the model are:
# p[i]
# logitP[i] = logit(p[i]) = logitP.cov*beta.logitP
# The beta coefficients have a prior that is N(mean= prior.beta.logitP.mean, sd= prior.beta.logitP.sd)
# U[i]
# etaU[i] = log(U[i])
# which comes from spline with parameters bU[1... Knots+q]
# + error term eU[i]
##### Fit the spline and specify hierarchial model for the logit(P)'s ######
for(i in 1:Nstrata){
logUne[i] <- inprod(SplineDesign[i,1:n.bU],bU[1:n.bU]) # spline design matrix * spline coeff
etaU[i] ~ dnorm(logUne[i], taueU)T(,20) # add random error
eU[i] <- etaU[i] - logUne[i]
}
for(i in 1:Nfree.logitP){ # model the free capture rates using covariates
mu.logitP[free.logitP.index[i]] <- inprod(logitP.cov[free.logitP.index[i],1:NlogitP.cov], beta.logitP[1:NlogitP.cov])
## Matt's fix to improve mixing. Use u2copy to break the cycle (this doesn't work??)
mu.epsilon[free.logitP.index[i]] <- mu.logitP[free.logitP.index[i]] - log(u2copy[free.logitP.index[i]] + 1) + etaU[free.logitP.index[i]]
epsilon[free.logitP.index[i]] ~ dnorm(mu.epsilon[free.logitP.index[i]],tauP)
logitP[free.logitP.index[i]] <- max(-10, min(10,log(u2copy[free.logitP.index[i]] + 1) - etaU[free.logitP.index[i]] + epsilon[free.logitP.index[i]]))
}
for(i in 1:Nfixed.logitP){ # logit P parameters are fixed so we need to force epsilon to be defined.
epsilon[fixed.logitP.index[i]] <- 0
}
##### Hyperpriors #####
## Run size - flat priors
for(i in 1:n.b.flat){
bU[b.flat[i]] ~ dnorm(0.0,1.0E-6)
}
## Run size - priors on the difference
for(i in 1:n.b.notflat){
xiU[b.notflat[i]] <- 2*bU[b.notflat[i]-1] - bU[b.notflat[i]-2]
bU [b.notflat[i]] ~ dnorm(xiU[b.notflat[i]],tauU)
}
tauU ~ dgamma(tauU.alpha,tauU.beta) # Notice reduction from .0005 (in thesis) to .05
sigmaU <- 1/sqrt(tauU)
taueU ~ dgamma(taueU.alpha,taueU.beta) # dgamma(100,.05) # Notice reduction from .0005 (in thesis) to .05
sigmaeU <- 1/sqrt(taueU)
## Capture probabilities covariates
for(i in 1:NlogitP.cov){
beta.logitP[i] ~ dnorm(prior.beta.logitP.mean[i], 1/prior.beta.logitP.sd[i]^2) # rest of beta terms are normal 0 and a large variance
}
beta.logitP[NlogitP.cov+1] ~ dnorm(0, .01) # dummy so that covariates of length 1 function properly
tauP ~ dgamma(tauP.alpha,tauP.beta)
sigmaP <- 1/sqrt(tauP)
##### Likelihood contributions #####
for(i in 1:Nstrata){
logit(p[i]) <- logitP[i] # convert from logit scale
U[i] <- round(exp(etaU[i])) # convert from log scale
m2[i] ~ dbin(p[i],n1[i]) # recovery of marked fish
u2[i] ~ dbin(p[i],U [i]) # capture of newly unmarked fish
}
##### Derived Parameters #####
Utot <- sum( U[1:Nstrata]) # Total number of unmarked fish
Ntot <- sum(n1[1:Nstrata]) + Utot # Total population size including those fish marked and released
} # end of model
", fill=TRUE)
sink() # End of saving the Bugs program
# create the B-spline design matrix
# Each set of strata separated at the jump.after[i] points forms a separate spline with a separate basis
# We need to keep track of the breaks as the first two spline coefficients will have a flat
# prior and the others are then related to the previous values.
Nstrata <- length(n1)
ext.jump <- c(0, jump.after, Nstrata) # add the first and last breakpoints to the jump sets
SplineDesign <- matrix(0, nrow=0, ncol=0)
SplineDegree <- 3 # Degree of spline between occasions
b.flat <- NULL # index of spline coefficients with a flat prior distribution -first two of each segment
b.notflat <- NULL # index of spline coefficients where difference is modelled
all.knots <- NULL
for (i in 1:(length(ext.jump)-1)){
nstrata.in.set <- ext.jump[i+1]-ext.jump[i]
if(nstrata.in.set > 7)
{ knots <- seq(5,nstrata.in.set-1,4)/(nstrata.in.set+1) # a knot roughly every 4th stratum
} else{
knots <- .5 # a knot roughly every 4th stratum
}
all.knots <- c(all.knots, knots)
# compute the design matrix for this set of strata
z <- bs((1:nstrata.in.set)/(nstrata.in.set+1), knots=knots, degree=SplineDegree,
intercept=TRUE, Boundary.knots=c(0,1))
# first two elements of b coeffients have a flat prior
b.flat <- c(b.flat, ncol(SplineDesign)+(1:2))
b.notflat <- c(b.notflat, ncol(SplineDesign)+3:(ncol(z)))
# add to the full design matrix which is block diagonal
SplineDesign <- cbind(SplineDesign, matrix(0, nrow=nrow(SplineDesign), ncol=ncol(z)))
SplineDesign <- rbind(SplineDesign,
cbind( matrix(0,nrow=nrow(z),ncol=ncol(SplineDesign)-ncol(z)), z) )
} # end of for loop
n.b.flat <- length(b.flat)
n.b.notflat <- length(b.notflat)
n.bU <- n.b.flat + n.b.notflat
# get the logitP=logit(P) covariate matrix ready
logitP.cov <- as.matrix(logitP.cov)
NlogitP.cov <- ncol(as.matrix(logitP.cov))
# get the logitP's ready to allow for fixed values
logitP <- as.numeric(logitP.fixed)
storage.mode(logitP) <- "double" # if there are no fixed logits, the default class will be logical which bombs
free.logitP.index <- (1:Nstrata)[ is.na(logitP.fixed)] # free values are those where NA is specifed
Nfree.logitP <- length(free.logitP.index)
fixed.logitP.index <- (1:Nstrata)[!is.na(logitP.fixed)]
fixed.logitP.value <- logitP.fixed[ fixed.logitP.index]
Nfixed.logitP <- length(fixed.logitP.index)
# create a copy of the u2 to improve mixing in the MCMC model
u2copy <- exp(stats::spline(x = 1:Nstrata, y = log(u2+1), xout = 1:Nstrata)$y)-1 # on log scale to avoid negative values
u2copy <- pmax(0,round(u2copy)) # round to integers and avoid negative values
#browser()
datalist <- list("Nstrata", "n1", "m2", "u2", "u2copy",
"logitP", "Nfree.logitP", "free.logitP.index", "Nfixed.logitP", "fixed.logitP.index", "fixed.logitP.value", # those indices that are fixed and free to vary
"logitP.cov", "NlogitP.cov",
"SplineDesign",
"b.flat", "n.b.flat", "b.notflat", "n.b.notflat", "n.bU",
"tauU.alpha", "tauU.beta", "taueU.alpha", "taueU.beta",
"prior.beta.logitP.mean", "prior.beta.logitP.sd",
"tauP.alpha", "tauP.beta")
## Generate best guess initial values
## These initial values are used only to draw an initial fitted plot
## and are not used as initial values in the MCMC.
avgP <- sum(m2,na.rm=TRUE)/sum(n1,na.rm=TRUE)
Uguess <- pmax((u2+1)*(n1+2)/(m2+1), u2/avgP, 1, na.rm=TRUE) # try and keep Uguess larger than observed values
Uguess[which(is.na(Uguess))] <- mean(Uguess,na.rm=TRUE)
init.bU <- stats::lm(log(Uguess+1) ~ SplineDesign-1)$coefficients # initial values for spline coefficients
if(debug2) {
cat("compute init.bU \n")
browser() # Stop here to examine the spline design matrix function
}
logitPguess <- pmax(-10, pmin(10, logit( (m2+1)/(n1+1))))
init.beta.logitP <- as.vector(stats::lm( logitPguess ~ logitP.cov-1)$coefficients)
if(debug2) {
cat(" obtained initial values of beta.logitP\n")
browser()
}
# create an initial plot of the fit
plot.data <- data.frame(time=time,
logUguess=log(Uguess),
spline=SplineDesign %*% init.bU, stringsAsFactors=FALSE)
init.plot <- ggplot(data=plot.data, aes_(x=~time, y=~logUguess))+
ggtitle(title, subtitle="Initial spline fit to estimated log U[i]")+
geom_point()+
geom_line(aes_(y=~spline))+
xlab("Stratum")+ylab("log(U[i])")+
scale_x_continuous(breaks=seq(min(plot.data$time, na.rm=TRUE),max(plot.data$time, na.rm=TRUE),2))
if(save.output.to.files)ggsave(init.plot, filename=paste(prefix,"-initialU.pdf",sep=""), height=4, width=6, units="in")
#results$plots$plot.init <- init.plot # do this after running the MCMC chain (see end of function)
parameters <- c("logitP", "beta.logitP", "tauP", "sigmaP",
"bU", "tauU", "sigmaU",
"eU", "taueU", "sigmaeU",
"Ntot", "Utot", "logUne", "etaU", "U")
if( any(is.na(m2))) {parameters <- c(parameters,"m2")} # monitor in case some bad data where missing values present
if( any(is.na(u2))) {parameters <- c(parameters,"u2")}
## init.vals <- function(){
## init.logitP <- logit((m2+1)/(n1+2)) # initial capture rates based on observed recaptures
## init.logitP[is.na(init.logitP)] <- -2 # those cases where initial probability is unknown
## init.beta.logitP <- as.vector(stats::lm( init.logitP ~ logitP.cov-1)$coefficients)
## init.beta.logitP[is.na(init.beta.logitP)] <- 0
## init.beta.logitP <- c(init.beta.logitP, 0) # add one extra element so that single beta is still written as a
## # vector in the init files etc.
## init.tauP <- 1/stats::var(init.logitP, na.rm=TRUE) # 1/variance of logit(p)'s (ignoring the covariates for now)
## init.bU <- stats::lm(log(Uguess+1) ~ SplineDesign-1)$coefficients # initial values for spline coefficients
## init.eU <- as.vector(log(Uguess)-SplineDesign%*%init.bU) # error terms set as differ between obs and pred
## init.etaU <- log(Uguess)
## # variance of spline difference
## sigmaU <- sd( init.bU[b.notflat]-2*init.bU[b.notflat-1]+init.bU[b.notflat-2], na.rm=TRUE)
## init.tauU <- 1/sigmaU^2
## # variance of error in the U' over and above the spline fit
## sigmaeU <- sd(init.eU, na.rm=TRUE)
## init.taueU <- 1/sigmaeU^2
## # initialize the u2 where missing
## init.u2 <- u2
## init.u2[ is.na(u2)] <- 100
## init.u2[!is.na(u2)] <- NA
## list(logitP=init.logitP, beta.logitP=init.beta.logitP, tauP=init.tauP,
## bU=init.bU, tauU=init.tauU, taueU=init.taueU, etaU=init.etaU)
## }
## Generate initial values
init.vals <- genInitVals(model="TSPDE",
n1=n1,
m2=m2,
u2=u2,
logitP.cov=logitP.cov,
logitP.fixed=logitP.fixed,
SplineDesign=SplineDesign,
n.chains=n.chains)
## Generate data list
data.list <- list()
for(i in 1:length(datalist)){
data.list[[length(data.list)+1]] <-get(datalist[[i]])
}
names(data.list) <- as.list(datalist)
# Make the call to the MCMC sampler
results <- run.MCMC(modelFile=model.file,
dataFile=data.file,
dataList=data.list,
initFiles=init.files,
initVals=init.vals,
parameters=parameters,
nChains=n.chains,
nIter=n.iter,
nBurnin=n.burnin,
nSims=n.sims,
overRelax=FALSE,
initialSeed=InitialSeed,
working.directory=working.directory,
debug=debug)
results$plots$init.plot <- init.plot
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/TimeStratPetersenDiagError.R
|
## 2020-11-07 CJS Allow user to specify prior for beta parameters for covariates on logitP
# 2018-12-06 CJS converted initial plot to ggplot2 format
# 2014-09-01 CJS Converted to JAGS
# - no model name
# - C(,20) -> T(,20)
# - dflat() to dnorm(0, 1E-6)
# - added u2.Ncopy to improve mixing based on Matt S. suggestion
# - added u2.Acopy to improve mixing based on Matt S. suggestion
# - fixed monitoring of *H parameters that are only present in hatch.after or later
# JAGS won't monitor these variables unless entries from 1:hatch.after are defined
# 2013-09-04 CJS Add initialization for missing values; removed references to WinBugs
# 2011-05-15 CJS limited etaU to 20 or less
# 2011-01-24 SB added call to run.windows.openbugs and run.windows.winbugs
# 2013-13-31 CJS updated for JAGS
# 2010-11-25 CJS add output to track progress of burnin and post-burnin phases
# 2010-04-26 CJS fixed problem with initial values for logitP when n1=m2 (+infinite) which crashed stats::lm()
# 2009-12-05 CJS added title to argument list
# 2009-12-01 CJS Added open/win bugs path names to argument list
#' @keywords internal
#' @importFrom stats lm spline sd
TimeStratPetersenDiagErrorWHChinook <-
function(title, prefix, time, n1, m2, u2.A, u2.N,
hatch.after=NULL, clip.frac.H=.25,
logitP.cov=as.matrix(rep(1,length(u2.A))),
n.chains=3, n.iter=200000, n.burnin=100000, n.sims=2000,
tauU.alpha=1, tauU.beta=.05, taueU.alpha=1, taueU.beta=.05,
prior.beta.logitP.mean = c(logit(sum(m2,na.rm=TRUE)/sum(n1,na.rm=TRUE)),rep(0, ncol(as.matrix(logitP.cov))-1)),
prior.beta.logitP.sd = c(stats::sd(logit((m2+.5)/(n1+1)),na.rm=TRUE), rep(10, ncol(as.matrix(logitP.cov))-1)),
tauP.alpha=.001, tauP.beta=.001,
debug=FALSE, debug2=FALSE,
InitialSeed,
save.output.to.files=TRUE){
set.seed(InitialSeed) # set prior to initial value computations
#
# Fit the smoothed time-Stratified Petersen estimator with Diagonal recoveries (i.e. no recoveries
# outside stratum of release), error in the smoothed U curve, and separating wild vs hatchery stocks
#
# This routine assumes that the strata are time (e.g. weeks).
# In each stratum n1 fish are released (with marks). These are ususally
# captured fish that are marked, transported upstream, and released.
# These fish are used only to estimate the recapture rate downstream.
# Of the n1 fish released, m2 fish are recaptured in the same stratum (e.g. week) of release.
# There is a related function that allows fish to be recaptured in subsequent weeks.
#
# At the same tine, u2.A (adipose fin clipped) and u2.N (not clipped)
# other (unmarked) fish are newly captured in stratum i.
# These EXCLUDE recaptures of marked fish. These are the fish that are "expanded"
# to estimate the population size of fish in stratum i.
# All wild fish are NOT ad-clipped.
# Only a fraction of hatchery fish are ad-clipped. It is assumed that the fraction of ad-clipped
# hatchery fish is constant over the life of the run.
#
# Input
# prefix - prefix for file name for initial plot of U's
# time - the stratum number
# n1 - vector of number of fish released in stratum i
# m2 - vector of number of fish recovered in stratum i (EXCLUDING recaps)
# u2.A - vector of number of adclipped unmarked fish captured in stratum i
# u2.N - vector of number of non-clipped unmarked fish captured in stratum i
# hatch.after - point AFTER which the hatchery fish are released.
# clip.frac.H - what fraction of hatchery fish are clipped
# logitP.cov - covariates for logit(P)=X beta.logitP
# This routine makes a call to the MCMC sampler to fit the model and then gets back the
# coda files for the posteriour distribution.
## Set working directory to current directory (we should allow users to select this)
working.directory <- getwd()
## Define paths for the model, data, and initial value files
model.file <- file.path(working.directory, "model.txt")
data.file <- file.path(working.directory,"data.txt")
init.files <- file.path(working.directory,
paste("inits", 1:n.chains,".txt", sep = ""))
## Save the Bugs progam to the model.txt file
##
sink(model.file) # NOTE: NO " allowed in model as this confuses the cat command
cat("
model {
# Time Stratified Petersen with Diagonal recapture (no spillover in subsequent weeks or marked fish)
# and allowing for error in the smoothed U curve with separation of wild and hatchery fish
# Each of the wild and hatchery populations are fit using a SINGLE spline curve as this should be flexible
# enough to capture the individual behaviours
# Data input:
# Nstrata - number of strata
# n1 - number of marked fish released
# m2 - number of marked fish recaptured
# u2.A - number of adclipped unmarked fish captured (must be hatchery fish).
# u2.N - number of non-clipped unmarked fish captured (wild + hatchery fish)
# clip.frac.H- what fraction of hatchery fish are clipped
# logitP.cov - covariates for logitP
# NlogitP.cov - number of logitP covariates
# SplineDesign.W- wildfish spline design matrix of size [Nstrata, maxelement of n.b.notflat.W]
# SplineDesign.H- hatchery spline design matrix of size [Nstrata, maxelement of n.b.notflat.H]
# This is set up prior to the call.
# b.flat.W - vector of strata indices where the prior for the b's will be flat for wild fish
# b.flat.H - vector of strata indices where the prior for the b's will be flat for hatchery fish
# this is normally the first two weeks of each spline segment
# n.b.flat.W - number of b coefficients that have a flat prior - wild fish
# n.b.flat.H - number of b coefficients that have a flat prior - hatchery fish
# b.notflat.W - vector of strata indices where difference in coefficients is modelled - wild fish
# b.notflat.H - vector of strata indices where difference in coefficients is modelled - hatchery fish
# n.b.notflat.W - number of b coefficients that do not have a flat prior - wild fish
# n.b.notflat.H - number of b coefficients that do not have a flat prior - hatchery fish
# tauU.alpha, tauU.beta - parameters for prior on tauU
# taueU.alpha, taueU.beta - parameters for prior on taueU
# prior.beta.logitP.mean, prior.beta.logitP.sd - parameters for prior of coefficient of covariates for logitP
# tauP.alpha, tauP.beta - parameter for prior on tauP (residual variance of logit(P)'s after adjusting for
# covariates)
# clip.frac.H - what fraction of hatchery fish are clipped (KNOWN in advance)
#
# Parameters of the model are:
# p[i]
# logitP[i] = logit(p[i]) = logitP.cov*beta.logitP
# The beta coefficients have a prior that is N(mean= prior.beta.logitP.mean, sd= prior.beta.logitP.sd)
# U.W[i] - number of unmarked wild fish passing stratam i in population
# U.H[i] - number of unmarked hatchery fish passing stratum i in population
# etaU.W[i] = log(U.W[i])
# etaU.H[i] = log(U.H[i])
# which comes from spline with parameters bU.W[1... Knots+q] or bU.H[1... knots+q]
# + error term eU.W[i] or eu.H[i]
##### Fit the spline for wildfish - this covers the entire experiment ######
for(i in 1:Nstrata){
logUne.W[i] <- inprod(SplineDesign.W[i,1:n.bU.W],bU.W[1:n.bU.W]) # spline design matrix * spline coeff
etaU.W[i] ~ dnorm(logUne.W[i], taueU)T(,20) # add random error
eU.W[i] <- etaU.W[i] - logUne.W[i]
}
##### Fit the spline for hatchery fish - these fish only enter AFTER hatch.after ######
for(i in (hatch.after+1):Nstrata){
logUne.H[i] <- inprod(SplineDesign.H[i,1:n.bU.H],bU.H[1:n.bU.H]) # spline design matrix * spline coeff
etaU.H[i] ~ dnorm(logUne.H[i], taueU)T(,20) # add random error
eU.H[i] <- etaU.H[i] - logUne.H[i]
}
##### Model the capture probabilities #####
for(i in 1:hatch.after){
mu.logitP[i] <- inprod(logitP.cov[i,1:NlogitP.cov], beta.logitP[1:NlogitP.cov])
## logitP[i] ~ dnorm(mu.logitP[i],tauP)
# use the u2.Ncopy to break the cycle (in OpenBugs) and improve mixing (see Matt S.)
mu.epsilon[i] <- mu.logitP[i] - log(u2.Ncopy[i] + 1) + etaU.W[i]
epsilon[i] ~ dnorm(mu.epsilon[i],tauP)
logitP[i] <- log(u2.Ncopy[i] + 1) - etaU.W[i] + epsilon[i] # Matts trick to speed mixing
}
for(i in (hatch.after+1):Nstrata){
mu.logitP[i] <- inprod(logitP.cov[i,1:NlogitP.cov], beta.logitP[1:NlogitP.cov])
## logitP[i] ~ dnorm(mu.logitP[i],tauP)
# use the u2.Ncopy and u2.Acopy to break the cycle (in OpenBugs) and improve mixing (see Matt S.)
mu.epsilon[i] <- mu.logitP[i] - log(u2.Ncopy[i] + u2.Acopy[i] + 1) + (etaU.W[i] + etaU.H[i]) # Matts tricek to speed mixing
epsilon[i] ~ dnorm(mu.epsilon[i],tauP)
logitP[i] <- log(u2.Ncopy[i] + u2.Acopy[i] + 1) - log(exp(etaU.W[i]) + exp(etaU.H[i])) + epsilon[i]
}
##### Hyperpriors #####
## Run size - wild and hatchery fish - flat priors
for(i in 1:n.b.flat.W){
bU.W[b.flat.W[i]] ~ dnorm(0, 1E-6)
}
for(i in 1:n.b.flat.H){
bU.H[b.flat.H[i]] ~ dnorm(0, 1E-6)
}
## Run size - priors on the difference for wild and hatchery fish
for(i in 1:n.b.notflat.W){
xiU.W[b.notflat.W[i]] <- 2*bU.W[b.notflat.W[i]-1] - bU.W[b.notflat.W[i]-2]
bU.W [b.notflat.W[i]] ~ dnorm(xiU.W[b.notflat.W[i]],tauU)
}
for(i in 1:n.b.notflat.H){
xiU.H[b.notflat.H[i]] <- 2*bU.H[b.notflat.H[i]-1] - bU.H[b.notflat.H[i]-2]
bU.H [b.notflat.H[i]] ~ dnorm(xiU.H[b.notflat.H[i]],tauU)
}
tauU ~ dgamma(tauU.alpha,tauU.beta) # Notice reduction from .0005 (in thesis) to .05
sigmaU <- 1/sqrt(tauU)
taueU ~ dgamma(taueU.alpha,taueU.beta) # dgamma(100,.05) # Notice reduction from .0005 (in thesis) to .05
sigmaeU <- 1/sqrt(taueU)
## Capture probabilities covariates
for(i in 1:NlogitP.cov){
beta.logitP[i] ~ dnorm(prior.beta.logitP.mean[i], 1/prior.beta.logitP.sd[i]^2) # rest of beta terms are normal 0 and a large variance
}
beta.logitP[NlogitP.cov+1] ~ dnorm(0, .01) # dummy so that covariates of length 1 function properly
tauP ~ dgamma(tauP.alpha,tauP.beta)
sigmaP <- 1/sqrt(tauP)
##### Likelihood contributions #####
## Number of marked fish recovered ##
for(i in 1:Nstrata){
logit(p[i]) <- logitP[i] # convert from logit scale
m2[i] ~ dbin(p[i],n1[i]) # recovery of marked fish
}
## captures of wild (unclipped fish) - these are the only fish available upto (and including) hatch.after
for(i in 1:hatch.after){
U.W[i] <- round(exp(etaU.W[i])) # convert from log scale
u2.N[i] ~ dbin(p[i],U.W[i])
}
## captures of hatchery (clipped fish) - these can only occur AFTER hatch.after
for(i in (hatch.after+1):Nstrata){
U.W[i] <- round(exp(etaU.W[i])) # convert from log scale
U.H[i] <- round(exp(etaU.H[i])) # convert from log scale
U.clip[i] ~ dbin(clip.frac.H, U.H[i])
p.temp[i] <- p[i]*clip.frac.H
u2.A[i] ~ dbin(p.temp[i], U.H[i]) # must be hatchery and clipped
}
## captures of wild+hatchery unclipped fish - these can only occur AFTER hatch.after
for(i in (hatch.after+1):Nstrata){
U.noclip[i] <- U.W[i] + U.H[i] - U.clip[i]
u2.N[i] ~ dbin(p[i], U.noclip[i])
}
##### Derived Parameters #####
Utot.W <- sum( U.W[1:Nstrata]) # Total number of unmarked fish - wild
Utot.H <- sum( U.H[(hatch.after+1):Nstrata])# Total number of unmarked fish - hatchery
Utot <- Utot.W + Utot.H # Grand total number of fish
# Because JAGES does not properly monitory partially defined vectors (see Section 2.5 of the JAGES user manual)
# we need to add dummy distribution for the parameters of interest prior to the hatchery fish arriving.
# This is not needed in OpenBugs who returns the subset actually monitored, but we add this to be consistent
# among the two programs
for(i in 1:hatch.after){
U.H[i] ~ dnorm(0,1) # These are complete arbitrary and never gets updated
etaU.H[i] ~ dnorm(0,1)
logUne.H[i] ~ dnorm(0,1)
eU.H[i] ~ dnorm(0,1)
}
} # end of model
", fill=TRUE)
sink() # End of saving the Bugs program
Nstrata <- length(n1)
# make a copy of u2.N to improve mixing in the MCMC model
u2.Ncopy <- stats::spline(x=1:Nstrata, y=u2.N, xout=1:Nstrata)$y
u2.Ncopy <- round(u2.Ncopy) # round to integers
# similarly make a copy of u2.A to improve mixing in the MCMC model
# notice that Adipose clips only occur at hatch.after or later
u2.Acopy <- u2.A * 0
u2.Acopy[hatch.after:Nstrata] <- stats::spline(x=hatch.after:Nstrata, y=u2.A[hatch.after:Nstrata], xout=hatch.after:Nstrata)$y
u2.Acopy <- round(u2.Acopy) # round to integers
datalist <- list("Nstrata", "n1", "m2",
"u2.A", "u2.Acopy",
"u2.N", "u2.Ncopy",
"hatch.after", "clip.frac.H",
"logitP.cov", "NlogitP.cov",
"SplineDesign.W",
"b.flat.W", "n.b.flat.W", "b.notflat.W", "n.b.notflat.W", "n.bU.W",
"SplineDesign.H",
"b.flat.H", "n.b.flat.H", "b.notflat.H", "n.b.notflat.H", "n.bU.H",
"tauU.alpha", "tauU.beta", "taueU.alpha", "taueU.beta",
"prior.beta.logitP.mean", "prior.beta.logitP.sd",
"tauP.alpha", "tauP.beta")
parameters <- c("logitP", "beta.logitP", "tauP", "sigmaP",
"bU.W", "bU.H", "tauU", "sigmaU",
"eU.W", "eU.H", "taueU", "sigmaeU",
"Utot.W", "Utot.H", "Utot", "logUne.W", "logUne.H",
"etaU.W", "etaU.H", "U.W", "U.H")
if( any(is.na(m2))) {parameters <- c(parameters,"m2")} # monitor in case some bad data where missing values present
if( any(is.na(u2.A))) {parameters <- c(parameters,"u2.A")}
if( any(is.na(u2.N))) {parameters <- c(parameters,"u2.N")}
## Now to create the initial values, and the data prior to call to the MCMC sampler
# Estimate number of wild and hatchery fish based on clip rate
u2.H <- u2.A/clip.frac.H # only a portion of the hatchery fish are clipped
u2.W <- pmax(u2.N - u2.H*(1-clip.frac.H),0) # subtract the questimated number of hatchery fish
u2.H[is.na(u2.H)] <- 1 # in case of missing values
u2.W[is.na(u2.W)] <- 1 # in case of missing values
avg.P <- sum(m2,na.rm=TRUE)/sum(n1, na.rM=TRUE)
Uguess.W <- pmax((u2.W+1)*(n1+2)/(m2+1), u2.W/avg.P, 1, na.rm=TRUE) # try and keep Uguess larger than observed values
Uguess.H <- pmax((u2.H+1)*(n1+2)/(m2+1), u2.H/avg.P, 1, na.rm=TRUE)
Uguess.H[1:hatch.after] <- 0 # no hatchery fish prior to release from hatchery
## create the B-spline design matrix for wild and hatchery fish
## The design matrix for hatchery fish will still have rows corresponding to entries PRIOR to
## the hatchery release but these are never used in the winbugs fitting routines
## There is a separate (single) spline for hatchery and wild fish with NO breakpoints
## The first two coefficient have a flat prior and the rest of the coefficients are modelled using
## differences between the succesive coefficients
## Wild fish. This covers the entire experiment.
SplineDegree <- 3 # Degree of spline between occasions
knots <- seq(4,Nstrata,4)/(Nstrata+1) # a knot roughly every 4th stratum
SplineDesign.W <- bs((1:Nstrata)/(Nstrata+1), knots=knots, degree=SplineDegree, intercept=TRUE, Boundary.knots=c(0,1))
b.flat.W <- c(1,2)
b.notflat.W <- 3:(ncol(SplineDesign.W))
n.b.flat.W <- length(b.flat.W)
n.b.notflat.W <- length(b.notflat.W)
n.bU.W <- n.b.flat.W + n.b.notflat.W
init.bU.W <- stats::lm(log(Uguess.W+1) ~ SplineDesign.W-1)$coefficients # initial values for spline coefficients
## hatchery fish. Notice they can only enter AFTER hatch.after, The spline design matrix still has rows
## of zero for 1:hatch.after to make it easier in Bugs
SplineDegree <- 3 # Degree of spline between occasions
knots <- (seq((hatch.after+4),Nstrata-1,4)-hatch.after)/(Nstrata-hatch.after+1) # a knot roughly every 4th stratum
SplineDesign.H <- bs((1:(Nstrata-hatch.after))/(Nstrata-hatch.after+1), knots=knots, degree=SplineDegree, intercept=TRUE, Boundary.knots=c(0,1))
b.flat.H <- c(1,2)
b.notflat.H <- 3:(ncol(SplineDesign.H))
n.b.flat.H <- length(b.flat.H)
n.b.notflat.H <- length(b.notflat.H)
n.bU.H <- n.b.flat.H + n.b.notflat.H
init.bU.H <- stats::lm(log(Uguess.H[(hatch.after+1):Nstrata]+1) ~ SplineDesign.H-1)$coefficients # initial values for spline coefficients
# patch up the initial rows of the spline design matrix
SplineDesign.H <- rbind(matrix(0,nrow=hatch.after, ncol=ncol(SplineDesign.H)), SplineDesign.H)
## create an initial plot of the fit to the number of unmarked fish
plot.data <- data.frame(time=time,
logUguess.H = log(Uguess.H+1),
logUguess.W = log(Uguess.W+1),
spline.H=SplineDesign.H %*% init.bU.H,
spline.W=SplineDesign.W %*% init.bU.W, stringsAsFactors=FALSE)
init.plot <- ggplot(data=plot.data, aes_(x=~time))+
ggtitle(title, subtitle="Initial spline fit to estimated log U[i]")+
geom_point(aes_(y=~logUguess.H), pch="H", color="green")+
geom_point(aes_(y=~logUguess.W), pch="W", color="blue")+
geom_line(aes_(y=~spline.H), color="green")+
geom_line(aes_(y=~spline.W), color="blue")+
xlab("Stratum")+ylab("log(U[i])")+
scale_x_continuous(breaks=seq(min(plot.data$time, na.rm=TRUE),max(plot.data$time, na.rm=TRUE),2))
if(save.output.to.files)ggsave(init.plot, filename=paste(prefix,"-initialU.pdf",sep=""), height=4, width=6, units="in")
#results$plots$plot.init <- init.plot # do this after running the MCMC chain (see end of function)
# get the logitP=logit(P) covariate matrix ready
logitP.cov <- as.matrix(logitP.cov)
NlogitP.cov <- ncol(as.matrix(logitP.cov))
## initial values for the parameters of the model
init.vals <- genInitVals(model="TSPDE-WHchinook",
n1=n1,
m2=m2,
u2=list(W=u2.W,H=u2.H, A=u2.A, N=u2.N),
logitP.cov=logitP.cov,
SplineDesign=list(W=SplineDesign.W,H=SplineDesign.H),
hatch.after=hatch.after,
n.chains=n.chains)
## Generate data list
data.list <- list()
for(i in 1:length(datalist)){
data.list[[length(data.list)+1]] <-get(datalist[[i]])
}
names(data.list) <- as.list(datalist)
## Call MCMC sampler
results <- run.MCMC(modelFile=model.file,
dataFile=data.file,
dataList=data.list,
initFiles=init.files,
initVals=init.vals,
parameters=parameters,
nChains=n.chains,
nIter=n.iter,
nBurnin=n.burnin,
nSims=n.sims,
overRelax=FALSE,
initialSeed=InitialSeed,
working.directory=working.directory,
debug=debug)
results$plots$plot.init <- init.plot # save initial plot to results object
return(results)
} # end of function
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/TimeStratPetersenDiagErrorWHChinook.R
|
## 2020-11-07 CJS Allow user to specify prior for beta parameters for covariates on logitP
# 2018-12-06 CJS converted initial plot to ggplot2
# 2018-11-25 CJS removed all openbugs references
# 2013-12-31 CJS conversion to JAGS
# - no model name
# - C(,20) -> T(,20)
# - dflat() to dnorm(0, 1E-6)
# - added u2.N.1copy to improve mixing based on Matt S. suggestion ????? - not done here?
# - added u2.A.1copy to improve mixing based on Matt S. suggestion
# - added u2.N.YoYcopy to improve mixing based on Matt S. suggestion
# - added u2.A.YoYcopy to improve mixing based on Matt S. suggestion
# - fixed monitoring of *H parameters that are only present in hatch.after or later
# JAGS won't monitor these variables unless entries from 1:hatch.after are defined
# 2011-05-15 CJS limited etaU to 20 or less
# 2011-01-24 SB added call to run.windows.openbugs and run.windows.winbugs
# 2010-11-25 CJS add output to track progress of sampling through burnin and post-burnin
# 2010-04-26 CJS fixed problem where init.logitP failed when n1=m2 (logit=infinite) and lm() failed.
# 2010-03-29 CJS Created first release
#' @keywords internal
#' @importFrom stats lm var sd
# This DIFFERS from the TimeStratPetersenDiagErrorWHChinook routine in the following ways.
# YoY chinook are separated from age 1 chinook
# The wild YoY chinook are present in the stream with NO AD clips until the hatchery fish arrive
# The Age 1 chinook (from last year) are still present for the entire experiment with some of them
# having ad-fin clips using the clip-rate from last year.
# The n1/m2 recapture portion is assumed to be common to both ages of fish (a doubtful assumption?)
TimeStratPetersenDiagErrorWHChinook2 <-
function(title, prefix, time, n1, m2,
u2.A.YoY, u2.N.YoY, u2.A.1, u2.N.1,
hatch.after.YoY=NULL,
clip.frac.H.YoY=.25, clip.frac.H.1 = .25,
logitP.cov=as.matrix(rep(1,length(u2.A.YoY))),
n.chains=3, n.iter=200000, n.burnin=100000, n.sims=2000,
tauU.alpha=1, tauU.beta=.05, taueU.alpha=1, taueU.beta=.05,
prior.beta.logitP.mean = c(logit(sum(m2,na.rm=TRUE)/sum(n1,na.rm=TRUE)),rep(0, ncol(as.matrix(logitP.cov))-1)),
prior.beta.logitP.sd = c(stats::sd(logit((m2+.5)/(n1+1)),na.rm=TRUE), rep(10, ncol(as.matrix(logitP.cov))-1)),
tauP.alpha=.001, tauP.beta=.001,
debug=FALSE, debug2=FALSE,
InitialSeed,
save.output.to.files=TRUE){
set.seed(InitialSeed) # set prior to initial value computations
#
# Fit the smoothed time-Stratified Petersen estimator with Diagonal recoveries (i.e. no recoveries
# outside stratum of release), error in the smoothed U curve, and separating wild vs hatchery stocks
# for chinook.
#
# This routine assumes that the strata are time (e.g. weeks).
# In each stratum n1 fish are released (with marks). These are ususally
# captured fish that are marked, transported upstream, and released.
# These fish are used only to estimate the recapture rate downstream.
# Of the n1 fish released, m2 fish are recaptured in the same stratum (e.g. week) of release.
# There is a related function that allows fish to be recaptured in subsequent weeks.
#
# Both YoY and age 1 fish are present in the stream.
# The traps capture u2.A.YoY (YoY with adipose fin clipped) and u2.N.YoY (YoY not clipped)
# are newly captured in stratum i.
# Prior to the hatch.after.YoY, there are no ad-fin clipped YoY fish and all YoY fish captured
# are assumed to be wild. The clip-fraction of the hatchery fish is clip.frac.H.YoY
# The traps also capture u2.A.1 (age 1 with ad-fin clipped) and u2.N.1 (age 1 with no ad-fin clipped)
# which represent fish from LAST year that have residualized in the stream. These are a mixture of
# wild and hatchery fish. The clip rate for these fish from last year is clip.frac.H.1
#
# All wild fish are NOT ad-clipped.
# Only a fraction of hatchery fish are ad-clipped. It is assumed that the fraction of ad-clipped
# hatchery fish is constant over the life of the run.
#
# Input
# prefix - prefix for file name for initial plot of U's
# time - the stratum number
# n1 - vector of number of fish released in stratum i
# m2 - vector of number of fish recovered in stratum i (EXCLUDING recaps)
# u2.A.YoY - vector of number of ad-clipped YoY unmarked fish captured in stratum i
# u2.N.YoY - vector of number of non-clipped YoY unmarked fish captured in stratum i
# u2.A.1 - vector of number of ad-clipped Age1 unmarked fish captured in stratum i
# u2.N.1 - vector of number of non-clipped Age1 unmarked fish captured in stratum i
# hatch.after.YoY - point AFTER which the YoY hatchery fish are released.
# clip.frac.H.YoY - what fraction of hatchery fish are clipped at YoY
# clip.frac.H.1 - what fraction of hatchery fish are clipped as YoY LAST YEAR who are now age 1
# logitP.cov - covariates for logit(P)=X beta.logitP
# This routine makes a call to the MCMC sampler to fit the model and then gets back the
# coda files for the posteriour distribution.
## Set working directory to current directory (we should allow users to select this)
working.directory <- getwd()
## Define paths for the model, data, and initial value files
model.file <- file.path(working.directory, "model.txt")
data.file <- file.path(working.directory,"data.txt")
init.files <- file.path(working.directory,
paste("inits", 1:n.chains,".txt", sep = ""))
# Save the Bugs progam to the model.txt file
#
sink(model.file) # NOTE: NO " allowed in model as this confuses the cat command
cat("
model {
# Time Stratified Petersen with Diagonal recapture (no spillover in subsequent weeks or marked fish)
# and allowing for error in the smoothed U curve with separation of wild and hatchery fish
# Each of the wild and hatchery populations are fit using a SINGLE spline curve as this should be flexible
# enough to capture the individual behaviours
# Data input:
# Nstrata - number of strata
# n1 - number of marked fish released
# m2 - number of marked fish recaptured
# u2.A.YoY - number of adclipped YoY unmarked fish captured (must be hatchery fish).
# u2.N.YoY - number of non-clipped YoY unmarked fish captured (wild + hatchery fish)
# u2.A.1 - number of adclipped Age1 unmarked fish captured (must be hatchery fish from last year)
# u2.A.1 - number of adclipped Age1 unmarked fish captured (wild + hatchery fish from last year)
# clip.frac.H.YoY- what fraction of YoY hatchery fish are clipped
# clip.frac.H.1 - what fraction of Age1 hatcery fish are clipped (from last years hatchery release)
# logitP.cov - covariates for logitP
# NlogitP.cov - number of logitP covariates
# SplineDesign.W.YoY- YoY wildfish spline design matrix of size [Nstrata, maxelement of n.b.notflat.W.YoY]
# SplineDesign.H.YoY- YoY hatchery spline design matrix of size [Nstrata, maxelement of n.b.notflat.H.YoY]
# SplineDesign.W.1 - Age1 wildfish spline design matrix of size [Nstrata, maxelement of n.b.notflat.W.1]
# SplineDesign.H.1 - Age1 hatchery spline design matrix of size [Nstrata, maxelement of n.b.notflat.H.1]
# These design matrices are set up prior to the call.
# b.flat.W.YoY - vector of strata indices where the prior for the b's will be flat for YoY wild fish
# b.flat.H.YoY - vector of strata indices where the prior for the b's will be flat for YoY hatchery fish
# b.flat.W.1 - vector of strata indices where the prior for the b's will be flat for Age1 wild fish
# b.flat.H.1 - vector of strata indices where the prior for the b's will be flat for Age1 hatchery fish
# this are normally the first two strata of each spline segment
# n.b.flat.W.YoY - number of b coefficients that have a flat prior - YoY wild fish
# n.b.flat.H.YoY - number of b coefficients that have a flat prior - YoY hatchery fish
# n.b.flat.W.1 - number of b coefficients that have a flat prior - Age1 wild fish
# n.b.flat.H.1 - number of b coefficients that have a flat prior - Age1 hatchery fish
# b.notflat.W.YoY - vector of strata indices where difference in coefficients is modelled - YoY wild fish
# b.notflat.H.YoY - vector of strata indices where difference in coefficients is modelled - YoY hatchery fish
# b.notflat.W.1 - vector of strata indices where difference in coefficients is modelled - Age1 wild fish
# b.notflat.H.1 - vector of strata indices where difference in coefficients is modelled - Age1 hatchery fish
# n.b.notflat.W.YoY - number of b coefficients that do not have a flat prior - YoY wild fish
# n.b.notflat.H.YoY - number of b coefficients that do not have a flat prior - YoY hatchery fish
# n.b.notflat.W.1 - number of b coefficients that do not have a flat prior - Age1 wild fish
# n.b.notflat.H.1 - number of b coefficients that do not have a flat prior - Age1 hatchery fish
# tauU.alpha, tauU.beta - parameters for prior on tauU
# taueU.alpha, taueU.beta - parameters for prior on taueU
# prior.beta.logitP.mean, prior.beta.logitP.sd - parameters for prior of coefficient of covariates for logitP
# tauP.alpha, tauP.beta - parameter for prior on tauP (residual variance of logit(P)'s after adjusting for
# covariates)
# clip.frac.H.YoY - what fraction of YoY hatchery fish are clipped (KNOWN in advance)
# clip.frac.H.1 - what fraction of Age1 hatchery fish are clipped (KNOWN in advance from last year's releases)
#
# Parameters of the model are:
# p[i]
# logitP[i] = logit(p[i]) = logitP.cov*beta.logitP
# The beta coefficients have a prior that is N(mean= prior.beta.logitP.mean, sd= prior.beta.logitP.sd)
# U.W.YoY[i] - number of YoY unmarked wild fish passing stratam i in population
# U.H.YoY[i] - number of YoY unmarked hatchery fish passing stratum i in population
# etaU.W.YoY[i] = log(U.W.YoY[i])
# etaU.H.YoY[i] = log(U.H.YoY[i])
# which comes from spline with parameters bU.W.YoY[1... Knots+q] or bU.H.YoY[1... knots+q]
# + error term eU.W.YoY[i] or eu.H.YoY[i]
# U.W.1[i] - number of Age1 unmarked wild fish passing stratam i in population
# U.H.1[i] - number of Age1 unmarked hatchery fish passing stratum i in population
# etaU.W.1[i] = log(U.W.1[i])
# etaU.H.1[i] = log(U.H.1[i])
# which comes from spline with parameters bU.W.1[1... Knots+q] or bU.H.1[1... knots+q]
# + error term eU.W.1[i] or eu.H.1[i]
##### Fit the spline for YoY wildfish - this covers the entire experiment ######
for(i in 1:Nstrata){
logUne.W.YoY[i] <- inprod(SplineDesign.W.YoY[i,1:n.bU.W.YoY],bU.W.YoY[1:n.bU.W.YoY]) # spline design matrix * spline coeff
etaU.W.YoY[i] ~ dnorm(logUne.W.YoY[i], taueU)T(,20) # add random error
eU.W.YoY [i] <- etaU.W.YoY[i] - logUne.W.YoY[i]
}
##### Fit the spline for YoY hatchery fish - these fish only enter AFTER hatch.after.YoY ######
for(i in (hatch.after.YoY+1):Nstrata){
logUne.H.YoY[i] <- inprod(SplineDesign.H.YoY[i,1:n.bU.H.YoY],bU.H.YoY[1:n.bU.H.YoY]) # spline design matrix * spline coeff
etaU.H.YoY[i] ~ dnorm(logUne.H.YoY[i], taueU)T(,20) # add random error
eU.H.YoY [i] <- etaU.H.YoY[i] - logUne.H.YoY[i]
}
##### Fit the spline for Age1 wildfish - this covers the entire experiment ######
for(i in 1:Nstrata){
logUne.W.1[i] <- inprod(SplineDesign.W.1[i,1:n.bU.W.1],bU.W.1[1:n.bU.W.1]) # spline design matrix * spline coeff
etaU.W.1[i] ~ dnorm(logUne.W.1[i], taueU)T(,20) # add random error
eU.W.1 [i] <- etaU.W.1[i] - logUne.W.1[i]
}
##### Fit the spline for Age1 hatchery fish - this covers the entire experiment because the have residualized from last year
for(i in 1:Nstrata){
logUne.H.1[i] <- inprod(SplineDesign.H.1[i,1:n.bU.H.1],bU.H.1[1:n.bU.H.1]) # spline design matrix * spline coeff
etaU.H.1[i] ~ dnorm(logUne.H.1[i], taueU)T(,20) # add random error
eU.H.1 [i] <- etaU.H.1[i] - logUne.H.1[i]
}
##### Model the capture probabilities #####
for(i in 1:Nstrata){
mu.logitP[i] <- inprod(logitP.cov[i,1:NlogitP.cov], beta.logitP[1:NlogitP.cov])
logitP[i] ~ dnorm(mu.logitP[i],tauP)
}
##### Hyperpriors #####
## Run size - wild and hatchery fish - flat priors
for(i in 1:n.b.flat.W.YoY){
bU.W.YoY[b.flat.W.YoY[i]] ~ dnorm(0, 1E-6)
}
for(i in 1:n.b.flat.H.YoY){
bU.H.YoY[b.flat.H.YoY[i]] ~ dnorm(0, 1E-6)
}
for(i in 1:n.b.flat.W.1){
bU.W.1[b.flat.W.1[i]] ~ dnorm(0, 1E-6)
}
for(i in 1:n.b.flat.H.1){
bU.H.1[b.flat.H.1[i]] ~ dnorm(0, 1E-6)
}
## Run size - priors on the difference for YoY wild and hatchery fish
for(i in 1:n.b.notflat.W.YoY){
xiU.W.YoY[b.notflat.W.YoY[i]] <- 2*bU.W.YoY[b.notflat.W.YoY[i]-1] - bU.W.YoY[b.notflat.W.YoY[i]-2]
bU.W.YoY [b.notflat.W.YoY[i]] ~ dnorm(xiU.W.YoY[b.notflat.W.YoY[i]],tauU)
}
for(i in 1:n.b.notflat.H.YoY){
xiU.H.YoY[b.notflat.H.YoY[i]] <- 2*bU.H.YoY[b.notflat.H.YoY[i]-1] - bU.H.YoY[b.notflat.H.YoY[i]-2]
bU.H.YoY [b.notflat.H.YoY[i]] ~ dnorm(xiU.H.YoY[b.notflat.H.YoY[i]],tauU)
}
## Run size - priors on the difference for AGE1 wild and hatchery fish
for(i in 1:n.b.notflat.W.1){
xiU.W.1[b.notflat.W.1[i]] <- 2*bU.W.1[b.notflat.W.1[i]-1] - bU.W.1[b.notflat.W.1[i]-2]
bU.W.1 [b.notflat.W.1[i]] ~ dnorm(xiU.W.1[b.notflat.W.1[i]],tauU)
}
for(i in 1:n.b.notflat.H.1){
xiU.H.1[b.notflat.H.1[i]] <- 2*bU.H.1[b.notflat.H.1[i]-1] - bU.H.1[b.notflat.H.1[i]-2]
bU.H.1 [b.notflat.H.1[i]] ~ dnorm(xiU.H.1[b.notflat.H.1[i]],tauU)
}
tauU ~ dgamma(tauU.alpha,tauU.beta) # Notice reduction from .0005 (in thesis) to .05
sigmaU <- 1/sqrt(tauU)
taueU ~ dgamma(taueU.alpha,taueU.beta) # dgamma(100,.05) # Notice reduction from .0005 (in thesis) to .05
sigmaeU <- 1/sqrt(taueU)
## Capture probabilities covariates
for(i in 1:NlogitP.cov){
beta.logitP[i] ~ dnorm(prior.beta.logitP.mean[i], 1/prior.beta.logitP.sd[i]^2) # rest of beta terms are normal 0 and a large variance
}
beta.logitP[NlogitP.cov+1] ~ dnorm(0, .01) # dummy so that covariates of length 1 function properly
tauP ~ dgamma(tauP.alpha,tauP.beta)
sigmaP <- 1/sqrt(tauP)
##### Likelihood contributions #####
## Number of marked fish recovered ##
for(i in 1:Nstrata){
logit(p[i]) <- logitP[i] # convert from logit scale
m2[i] ~ dbin(p[i],n1[i]) # recovery of marked fish
}
## captures of YoY wild (unclipped fish) - these are the only fish available upto (and including) hatch.after
for(i in 1:hatch.after.YoY){
U.W.YoY[i] <- round(exp(etaU.W.YoY[i])) # convert from log scale
u2.N.YoY[i] ~ dbin(p[i],U.W.YoY[i])
}
## captures of YoY hatchery (clipped fish) - these can only occur AFTER hatch.after
for(i in (hatch.after.YoY+1):Nstrata){
U.W.YoY[i] <- round(exp(etaU.W.YoY[i])) # convert from log scale
U.H.YoY[i] <- round(exp(etaU.H.YoY[i])) # convert from log scale
U.clip.YoY[i] ~ dbin(clip.frac.H.YoY, U.H.YoY [i])
p.temp.YoY[i] <- p[i]*clip.frac.H.YoY
u2.A.YoY[i] ~ dbin(p.temp.YoY[i], U.H.YoY[i]) # must be hatchery and clipped
}
## captures of YoY wild+hatchery unclipped fish - these can only occur AFTER hatch.after
for(i in (hatch.after.YoY+1):Nstrata){
U.noclip.YoY[i] <- U.W.YoY[i] + U.H.YoY[i] - U.clip.YoY[i]
u2.N.YoY[i] ~ dbin(p[i], U.noclip.YoY[i])
}
## captures of Age1 wild+hatchery (clipped fish)
for(i in 1:Nstrata){
U.W.1[i] <- round(exp(etaU.W.1[i])) # convert from log scale
U.H.1[i] <- round(exp(etaU.H.1[i])) # convert from log scale
U.clip.1[i] ~ dbin(clip.frac.H.1, U.H.1[i])
#u2.A[i] ~ dbin(p[i], U.clip.YoY[i])
p.temp.1[i] <- p[i]*clip.frac.H.1
u2.A.1[i] ~ dbin(p.temp.1[i], U.H.1[i]) # must be hatchery and clipped
}
## captures of Age1 wild+hatchery unclipped fish
for(i in 1:Nstrata){
U.noclip.1[i] <- U.W.1[i] + U.H.1[i] - U.clip.1[i]
u2.N.1[i] ~ dbin(p[i], U.noclip.1[i])
}
##### Derived Parameters #####
Utot.W.YoY <- sum( U.W.YoY[1:Nstrata]) # Total number of YoY unmarked fish - wild
Utot.H.YoY <- sum( U.H.YoY[(hatch.after.YoY+1):Nstrata])# Total number of YoY unmarked fish - hatchery
Utot.W.1 <- sum( U.W.1[1:Nstrata]) # Total number of Age1 unmarked fish - wild
Utot.H.1 <- sum( U.H.1[1:Nstrata]) # Total number of Age1 unmarked fish - hatchery
Utot.YoY <- Utot.W.YoY + Utot.H.YoY # Grand total number of YoY fish
Utot.1 <- Utot.W.1 + Utot.H.1 # Grand total number of Age1 fish
Utot <- Utot.YoY + Utot.1
# Because JAGES does not properly monitory partially defined vectors (see Section 2.5 of the JAGES user manual)
# we need to add dummy distribution for the parameters of interest prior to the hatchery fish arriving.
# This is not needed in OpenBugs who returns the subset actually monitored, but we add this to be consistent
# among the two programs
for(i in 1:hatch.after.YoY){
U.H.YoY[i] ~ dnorm(0,1) # These are complete arbitrary and never gets updated
etaU.H.YoY[i] ~ dnorm(0,1)
logUne.H.YoY[i] ~ dnorm(0,1)
eU.H.YoY[i] ~ dnorm(0,1)
}
} # end of model
", fill=TRUE)
sink() # End of saving the Bugs program
datalist <- list("Nstrata", "n1", "m2",
"u2.A.YoY", "u2.N.YoY", "u2.A.1", "u2.N.1",
"hatch.after.YoY", "clip.frac.H.YoY", "clip.frac.H.1",
"logitP.cov", "NlogitP.cov",
"SplineDesign.W.YoY",
"b.flat.W.YoY", "n.b.flat.W.YoY", "b.notflat.W.YoY", "n.b.notflat.W.YoY", "n.bU.W.YoY",
"SplineDesign.H.YoY",
"b.flat.H.YoY", "n.b.flat.H.YoY", "b.notflat.H.YoY", "n.b.notflat.H.YoY", "n.bU.H.YoY",
"SplineDesign.W.1",
"b.flat.W.1", "n.b.flat.W.1", "b.notflat.W.1", "n.b.notflat.W.1", "n.bU.W.1",
"SplineDesign.H.1",
"b.flat.H.1", "n.b.flat.H.1", "b.notflat.H.1", "n.b.notflat.H.1", "n.bU.H.1",
"tauU.alpha", "tauU.beta", "taueU.alpha", "taueU.beta",
"prior.beta.logitP.mean", "prior.beta.logitP.sd",
"tauP.alpha", "tauP.beta")
parameters <- c("logitP", "beta.logitP", "tauP", "sigmaP",
"bU.W.YoY", "bU.H.YoY", "tauU", "sigmaU",
"eU.W.YoY", "eU.H.YoY", "taueU", "sigmaeU",
"Utot.W.YoY", "Utot.H.YoY", "Utot.YoY", "logUne.W.YoY", "logUne.H.YoY",
"etaU.W.YoY", "etaU.H.YoY", "U.W.YoY", "U.H.YoY",
"bU.W.1", "bU.H.1",
"eU.W.1", "eU.H.1",
"Utot.W.1", "Utot.H.1", "Utot.1", "logUne.W.1", "logUne.H.1",
"etaU.W.1", "etaU.H.1", "U.W.1", "U.H.1",
"Utot" )
if( any(is.na(m2))) {parameters <- c(parameters,"m2")} # monitor in case some bad data where missing values present
if( any(is.na(u2.A.YoY))) {parameters <- c(parameters,"u2.A.YoY")}
if( any(is.na(u2.N.YoY))) {parameters <- c(parameters,"u2.N.YoY")}
if( any(is.na(u2.A.1 ))) {parameters <- c(parameters,"u2.A.1")}
if( any(is.na(u2.N.1 ))) {parameters <- c(parameters,"u2.N.1")}
# Now to create the initial values, and the data prior to call to the MCMC sampler
Nstrata <- length(n1)
# Estimate number of YoY wild and hatchery fish based on clip rate
u2.H.YoY <- u2.A.YoY/clip.frac.H.YoY # only a portion of the YoY hatchery fish are clipped
u2.W.YoY <- pmax(u2.N.YoY - u2.H.YoY*(1-clip.frac.H.YoY),0) # subtract the questimated number of hatchery fish
u2.H.YoY[is.na(u2.H.YoY)] <- 1 # in case of missing values
u2.W.YoY[is.na(u2.W.YoY)] <- 1 # in case of missing values
# Estimate number of Age1 wild and hatchery fish based on clip rate
u2.H.1 <- u2.A.1/clip.frac.H.1 # only a portion of the AGE1 hatchery fish are clipped
u2.W.1 <- pmax(u2.N.1 - u2.H.1*(1-clip.frac.H.1),0) # subtract the questimated number of hatchery fish
u2.H.1[is.na(u2.H.1)] <- 1 # in case of missing values
u2.W.1[is.na(u2.W.1)] <- 1 # in case of missing values
avg.P <- sum(m2,na.rm=TRUE)/sum(n1, na.rM=TRUE)
Uguess.W.YoY <- pmax((u2.W.YoY+1)*(n1+2)/(m2+1), u2.W.YoY/avg.P, 1, na.rm=TRUE) # try and keep Uguess larger than observed values
Uguess.H.YoY <- pmax((u2.H.YoY+1)*(n1+2)/(m2+1), u2.H.YoY/avg.P, 1, na.rm=TRUE)
Uguess.H.YoY[1:hatch.after.YoY] <- 0 # no YoY hatchery fish prior to release from hatchery
Uguess.W.1 <- pmax((u2.W.1+1)*(n1+2)/(m2+1), u2.W.1/avg.P, 1, na.rm=TRUE) # try and keep Uguess larger than observed values
Uguess.H.1 <- pmax((u2.H.1+1)*(n1+2)/(m2+1), u2.H.1/avg.P, 1, na.rm=TRUE)
# create the B-spline design matrix for YoY wild and hatchery fish
# The design matrix for hatchery fish will still have rows corresponding to entries PRIOR to
# the hatchery release but these are never used in the winbugs fitting routines
# There is a separate (single) spline for hatchery and wild fish with NO breakpoints
# The first two coefficient have a flat prior and the rest of the coefficients are modelled using
# differences between the succesive coefficients
# YoY Wild fish. This covers the entire experiment.
SplineDegree <- 3 # Degree of spline between occasions
knots <- seq(4,Nstrata,4)/(Nstrata+1) # a knot roughly every 4th stratum
SplineDesign.W.YoY <- bs((1:Nstrata)/(Nstrata+1), knots=knots, degree=SplineDegree, intercept=TRUE, Boundary.knots=c(0,1))
b.flat.W.YoY <- c(1,2)
b.notflat.W.YoY <- 3:(ncol(SplineDesign.W.YoY))
n.b.flat.W.YoY <- length(b.flat.W.YoY)
n.b.notflat.W.YoY <- length(b.notflat.W.YoY)
n.bU.W.YoY <- n.b.flat.W.YoY + n.b.notflat.W.YoY
init.bU.W.YoY <- stats::lm(log(Uguess.W.YoY+1) ~ SplineDesign.W.YoY-1)$coefficients # initial values for spline coefficients
# Age1 Wild fish. This covers the entire experiment.
SplineDegree <- 3 # Degree of spline between occasions
knots <- seq(4,Nstrata,4)/(Nstrata+1) # a knot roughly every 4th stratum
SplineDesign.W.1 <- bs((1:Nstrata)/(Nstrata+1), knots=knots, degree=SplineDegree, intercept=TRUE, Boundary.knots=c(0,1))
b.flat.W.1 <- c(1,2)
b.notflat.W.1 <- 3:(ncol(SplineDesign.W.1))
n.b.flat.W.1 <- length(b.flat.W.1)
n.b.notflat.W.1 <- length(b.notflat.W.1)
n.bU.W.1 <- n.b.flat.W.1 + n.b.notflat.W.1
init.bU.W.1 <- stats::lm(log(Uguess.W.1+1) ~ SplineDesign.W.1-1)$coefficients # initial values for spline coefficients
# YoY hatchery fish. Notice they can only enter AFTER hatch.after, The spline design matrix still has rows
# of zero for 1:hatch.after to make it easier in Bugs
SplineDegree <- 3 # Degree of spline between occasions
knots <- (seq((hatch.after.YoY+4),Nstrata-1,4)-hatch.after.YoY)/(Nstrata-hatch.after.YoY+1) # a knot roughly every 4th stratum
SplineDesign.H.YoY <- bs((1:(Nstrata-hatch.after.YoY))/(Nstrata-hatch.after.YoY+1), knots=knots, degree=SplineDegree, intercept=TRUE, Boundary.knots=c(0,1))
b.flat.H.YoY <- c(1,2)
b.notflat.H.YoY <- 3:(ncol(SplineDesign.H.YoY))
n.b.flat.H.YoY <- length(b.flat.H.YoY)
n.b.notflat.H.YoY <- length(b.notflat.H.YoY)
n.bU.H.YoY <- n.b.flat.H.YoY + n.b.notflat.H.YoY
init.bU.H.YoY <- stats::lm(log(Uguess.H.YoY[(hatch.after.YoY+1):Nstrata]+1) ~ SplineDesign.H.YoY-1)$coefficients # initial values for spline coefficients
# patch up the initial rows of the spline design matrix
SplineDesign.H.YoY <- rbind(matrix(0,nrow=hatch.after.YoY, ncol=ncol(SplineDesign.H.YoY)), SplineDesign.H.YoY)
# Age1 hatchery fish. These are present from last year and so we must model the entire experiment
SplineDegree <- 3 # Degree of spline between occasions
knots <- seq(4,Nstrata,4)/(Nstrata+1) # a knot roughly every 4th stratum
SplineDesign.H.1 <- bs((1:Nstrata)/(Nstrata+1), knots=knots, degree=SplineDegree, intercept=TRUE, Boundary.knots=c(0,1))
b.flat.H.1 <- c(1,2)
b.notflat.H.1 <- 3:(ncol(SplineDesign.H.1))
n.b.flat.H.1 <- length(b.flat.H.1)
n.b.notflat.H.1 <- length(b.notflat.H.1)
n.bU.H.1 <- n.b.flat.H.1 + n.b.notflat.H.1
init.bU.H.1 <- stats::lm(log(Uguess.H.1+1) ~ SplineDesign.H.1-1)$coefficients # initial values for spline coefficients
# create an initial plot of the fit to the number of YoY and Age1 unmarked fish
plot.data <- rbind(data.frame(time=time, group="H.1", pch="H",
logUguess = log(Uguess.H.1+1),
spline=SplineDesign.H.1 %*% init.bU.H.1, stringsAsFactors=FALSE),
data.frame(time=time, group="H.YoY", pch="h",
logUguess = log(Uguess.H.YoY+1),
spline=SplineDesign.H.YoY %*% init.bU.H.YoY, stringsAsFactors=FALSE),
data.frame(time=time, group="W.1", pch="W",
logUguess = log(Uguess.W.1+1),
spline=SplineDesign.W.1 %*% init.bU.W.1, stringsAsFactors=FALSE),
data.frame(time=time, group="W.YoY", pch="w",
logUguess = log(Uguess.W.YoY+1),
spline=SplineDesign.W.YoY %*% init.bU.W.YoY, stringsAsFactors=FALSE))
plot.data$logUguess[ plot.data$group=="H.YoY" & time <= (hatch.after.YoY+min(plot.data$time))] <- NA
plot.data$spline [ plot.data$group=="H.YoY" & time <= (hatch.after.YoY+min(plot.data$time))] <- NA
init.plot <- ggplot(data=plot.data, aes_(x=~time, color=~group, shape=~group))+
ggtitle(title, subtitle="Initial spline fit to estimated log U[i] for W and H and age 1 and YoY")+
geom_point(aes_(y=~logUguess), position=position_dodge(width=0.2))+
geom_line(aes_(y=~spline), position=position_dodge(width=0.2))+
xlab("Stratum")+ylab("log(U[i])")+
theme(legend.position=c(0,0), legend.justification=c(0,0))+
scale_x_continuous(breaks=seq(min(plot.data$time, na.rm=TRUE),max(plot.data$time, na.rm=TRUE),2))
if(save.output.to.files)ggsave(init.plot, filename=paste(prefix,"-initialU.pdf",sep=""), height=4, width=6, units="in")
#results$plots$plot.init <- init.plot # do this after running the MCMC chain (see end of function)
#browser()
# get the logitP=logit(P) covariate matrix ready
logitP.cov <- as.matrix(logitP.cov)
NlogitP.cov <- ncol(as.matrix(logitP.cov))
# initial values for the parameters of the model
init.vals <- function(){
# browser()
# Initial values for the probability of capture
init.logitP <- pmax(-10,pmin(10,logit((m2+1)/(n1+2)))) # initial capture rates based on observed recaptures
init.logitP[is.na(init.logitP)] <- -2 # those cases where initial probability is unknown
init.beta.logitP <- as.vector(stats::lm( init.logitP ~ logitP.cov-1)$coefficients)
init.beta.logitP[init.beta.logitP=NA] <- 0
init.beta.logitP <- c(init.beta.logitP, 0) # add one extra element so that single beta is still written as a
# vector in the init files etc.
init.tauP <- 1/stats::var(init.logitP, na.rm=TRUE) # 1/variance of logit(p)'s (ignoring the covariates for now)
# inital values for the YoY spline coefficients
init.bU.W.YoY <- stats::lm(log(Uguess.W.YoY+1) ~ SplineDesign.W.YoY-1)$coefficients
init.bU.H.YoY <- stats::lm(log(Uguess.H.YoY[(hatch.after.YoY+1):Nstrata]+1) ~ SplineDesign.H.YoY[(hatch.after.YoY+1):Nstrata,]-1)$coefficients
# inital values for the Age1 spline coefficients
init.bU.W.1 <- stats::lm(log(Uguess.W.1+1) ~ SplineDesign.W.1-1)$coefficients
init.bU.H.1 <- stats::lm(log(Uguess.H.1+1) ~ SplineDesign.H.1-1)$coefficients
init.eU.W.YoY <- as.vector(log(Uguess.W.YoY+1)-SplineDesign.W.YoY%*%init.bU.W.YoY) # error terms set as differ between obs and pred
init.etaU.W.YoY <- log(Uguess.W.YoY+1)
init.eU.W.1 <- as.vector(log(Uguess.W.1 +1)-SplineDesign.W.1 %*%init.bU.W.1 ) # error terms set as differ between obs and pred
init.etaU.W.1 <- log(Uguess.W.1 +1)
init.eU.H.YoY <- as.vector(log(Uguess.H.YoY+1)-SplineDesign.H.YoY%*%init.bU.H.YoY) # error terms set as differ between obs and pred
init.etaU.H.YoY <- log(Uguess.H.YoY+1)
# init.etaU.H.YoY[1:hatch.after.YoY] <- NA # these are never used.
init.eU.H.1 <- as.vector(log(Uguess.H.1 +1)-SplineDesign.H.1 %*%init.bU.H.1 ) # error terms set as differ between obs and pred
init.etaU.H.1 <- log(Uguess.H.1 +1)
# variance of spline difference (use only the YoY wild fish to initialize)
sigmaU <- stats::sd( init.bU.W.YoY[b.notflat.W.YoY]-2*init.bU.W.YoY[b.notflat.W.YoY-1]+init.bU.W.YoY[b.notflat.W.YoY-2], na.rm=TRUE)
init.tauU <- 1/sigmaU^2
# variance of error in the U' over and above the spline fit (use only the YoY wild fish to initialize)
sigmaeU <- stats::sd(init.eU.W.YoY, na.rm=TRUE)
init.taueU <- 1/sigmaeU^2
# initialize the u2.A.YoY and u2.N.YoY where missing
init.u2.A.YoY <- u2.A.YoY
init.u2.A.YoY[ is.na(u2.A.YoY)] <- 100
init.u2.A.YoY[!is.na(u2.A.YoY)] <- NA
init.u2.A.1 <- u2.A.1
init.u2.A.1 [ is.na(u2.A.1 )] <- 100
init.u2.A.1 [!is.na(u2.A.1 )] <- NA
init.u2.N.YoY <- u2.N.YoY
init.u2.N.YoY[ is.na(u2.N.YoY)] <- 100
init.u2.N.YoY[!is.na(u2.N.YoY)] <- NA
init.u2.N.1 <- u2.N.1
init.u2.N.1 [ is.na(u2.N.1 )] <- 100
init.u2.N.1 [!is.na(u2.N.1 )] <- NA
list(logitP=init.logitP, beta.logitP=init.beta.logitP, tauP=init.tauP,
bU.W.YoY=init.bU.W.YoY, bU.H.YoY=init.bU.H.YoY, tauU=init.tauU, taueU=init.taueU,
etaU.W.YoY=init.etaU.W.YoY, etaU.H.YoY=init.etaU.H.YoY,
bU.W.1 =init.bU.W.1 , bU.H.1 =init.bU.H.1 ,
etaU.W.1=init.etaU.W.1, etaU.H.1=init.etaU.H.1)
}
# make a list of initial values
init.vals.list <- lapply(1:n.chains, function(x){init.vals()})
#browser()
## Generate data list
data.list <- list()
for(i in 1:length(datalist)){
data.list[[length(data.list)+1]] <-get(datalist[[i]])
}
names(data.list) <- as.list(datalist)
# Call the MCMC sampler
results <- run.MCMC(modelFile=model.file,
dataFile=data.file,
dataList=data.list,
initFiles=init.files,
initVals=init.vals.list,
parameters=parameters,
nChains=n.chains,
nIter=n.iter,
nBurnin=n.burnin,
nSims=n.sims,
overRelax=FALSE,
initialSeed=InitialSeed,
working.directory=working.directory,
debug=debug)
results$plots$plot.init <- init.plot # save initial plot to results object
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/TimeStratPetersenDiagErrorWHChinook2.R
|
# 2021-10-23 CJS Added trunc.logitP to fix plotting problems with extreme logitP
# 2020-12-15 CJS Removed all references to sampfrac in the code
# 2020-11-07 CJS Allowed user to specify prior for beta coefficient for logitP
# 2018-12-19 CJS deprecation of sampling fraction
# 2018-12-06 CJS saved output to a textconnection that is saved
# 2018-12-02 CJS converted trace plots to ggplot
# 2018-12-01 CJS converted posterior plot to ggplot
# 2018-11-30 CJS converted acf plot to ggplot
# 2018-11-29 CJS fixed problem where print got cut off in large problems
# 2018-11-28 CJS removed reference of OpenBugs
# 2015-06-10 CJS converted gof plots to ggplot(). Bug fix.
# 2014-09-01 CJS converstion to JAGS
# 2012-08-30 CJS fixed problem with missing values in any() and all()
# 2011-06-13 CJS added p-values to results
# 2010-11-25 CJS pretty printing of final estimates of population sizes
# 2010-09-06 CJS forced input vectors to be vectors
# 2010-08-06 CJS added creation of traceplots
# 2010-08-03 CJS added version/date to final object
# 2010-03-29 CJS Inital version of code
#' @rdname TimeStratPetersenDiagErrorWHChinook_fit
#' @export TimeStratPetersenDiagErrorWHChinook2_fit
#' @importFrom stats runif var sd
TimeStratPetersenDiagErrorWHChinook2_fit<-
function( title="TSPDE-WHChinook2", prefix="TSPDE-WHChinook2-",
time, n1, m2,
u2.A.YoY, u2.N.YoY, u2.A.1, u2.N.1,
clip.frac.H.YoY, clip.frac.H.1, sampfrac=rep(1,length(u2.A.YoY)),
hatch.after.YoY=NULL,
bad.m2=c(), bad.u2.A.YoY=c(), bad.u2.N.YoY=c(), bad.u2.A.1=c(), bad.u2.N.1=c(),
logitP.cov=as.matrix(rep(1,length(n1))),
n.chains=3, n.iter=200000, n.burnin=100000, n.sims=2000,
tauU.alpha=1, tauU.beta=.05, taueU.alpha=1, taueU.beta=.05,
prior.beta.logitP.mean = c(logit(sum(m2,na.rm=TRUE)/sum(n1,na.rm=TRUE)),rep(0, ncol(as.matrix(logitP.cov))-1)),
prior.beta.logitP.sd = c(stats::sd(logit((m2+.5)/(n1+1)),na.rm=TRUE), rep(10, ncol(as.matrix(logitP.cov))-1)),
tauP.alpha=.001, tauP.beta=.001,
run.prob=seq(0,1,.1), # what percentiles of run timing are wanted
debug=FALSE, debug2=FALSE,
InitialSeed=ceiling(stats::runif(1,min=0,1000000)),
save.output.to.files=TRUE,
trunc.logitP=15) {
# Fit a Time Stratified Petersen model with diagonal entries and with smoothing on U allowing for random error,
# covariates for the the capture probabilities, and separating the YoY and Age1 wild vs hatchery fish
# The "diagonal entries" implies that no marked fish are recaptured outside the (time) stratum of release
#
version <- '2021-11-02'
options(width=200)
# Input parameters are
# title - title for the analysis
# prefix - prefix used for files created with the analysis results
# this should be in standard Window's format, eg. JC-2002-ST-TSPDE
# to which is appended various suffixes for plots etc
# time - vector of stratum numbers. For example, 9:38 would indicate that the
# Trinity River system sampled weeks 9 to 38. If some values are omitted
# e.g. time=10 not present, this indicates sampling did not take place this
# week. The data are expanded and interpolation for the missing week takes place
# n1, m2 - the input data consisting of fish marked and released and then recaptured.
# The n1 and m2 are used to calibrate the trap
# u2.A.YoY - number of YoY unmarked fish with adipose fin clips
# u2.N.YoY - number of YoY unmarked fish with NO adipose fin clips
# All YoY wild fish have NO adipose fin clips; however, hatchery fish are a mixture
# of fish with adipose fin clips (a known percentage are marked) unmarked fish.
# So u2.A.YoY MUST be hatchery fish.
# u2.N.YoY is a mixture of wild and hatchery fish.
# u2.A.1 - number of Age1 unmarked fish with adipose fin clips
# u2.N.1 - number of Age1 unmarked fish with NO adipose fin clips
# All Age1 wild fish have NO adipose fin clips; however, hatchery fish are a mixture
# of fish with adipose fin clips (a known percentage are marked) unmarked fish.
# So u2.A.1 MUST be hatchery fish.
# u2.N.1 is a mixture of wild and hatchery fish.
# clip.frac.H.YoY - what fraction of the YoY hatchery fish are clipped?
# clip.frac.H.1 - what fraction of the Age1 hatchery fish are clipped (from last year's releases)?
# sampfrac - Deprecated **** DO NOT USE ANYMORE **** sampling fraction to adjust for how many days of the week was the trap operating
# hatch.after - julian week AFTER which hatchery fish are released
# bad.m2 - list of julian numbers where the value of m2 is suspect.
# For example, the capture rate could be extremely low.
# These are set to NA prior to the call to JAGS
# bad.u2.A.YoY - list of julian weeks where the value of u2.A.YoY is suspect.
# These are set to NA prior to the call to JAGS
# bad.u2.N.YoY - list of julian weeks where the value of u2.N.YoY is suspect.
# These are set to NA prior to the call to JAGS
# bad.u2.A.1 - list of julian weeks where the value of u2.A.1 is suspect.
# These are set to NA prior to the call to JAGS
# bad.u2.N.1 - list of julian weeks where the value of u2.N.1 is suspect.
# These are set to NA prior to the call to JAGS
# logitP.cov - matrix of covariates for logit(P). If the strata times are "missing" some values, an intercept is assumed
# for the first element of the covariance matrix and 0 for the rest of the covariates.
# CAUTION - this MAY not be what you want to do. It is likely best to enter ALL strata
# if you have any covariates. The default, if not specified, is a constant (the mean logit)
# tauU.alpha, tauU.beta - parameters for the prior on variance in spline coefficients
# taueU.alpha, taueU.beta - parameters for the prior on variance in log(U) around fitted spline
# prior.beta.logitP.mean, prior.beta.logitP.sd - parameters for the prior on mean logit(P)'s [The intercept term]
# The other covariates are assigned priors of a mean of 0 and a sd of 30
# tauP.alpha, tauP.beta - parameters for the prior on 1/var of residual error in logit(P)'s
# run.prob - percentiles of run timing wanted
# debug - if TRUE, then this is a test run with very small MCMC chains run to test out the data
# and JAGS will run and stop waiting for your to exit and complete
# force the input vectors to be vectors
time <- as.vector(time)
n1 <- as.vector(n1)
m2 <- as.vector(m2)
u2.A.YoY <- as.vector(u2.A.YoY)
u2.N.YoY <- as.vector(u2.N.YoY)
u2.A.1 <- as.vector(u2.A.1)
u2.N.1 <- as.vector(u2.N.1)
sampfrac <- as.vector(sampfrac)
# Do some basic error checking
# 1. Check that length of n1, m2, u2, sampfrac, time all match
if(stats::var(c(length(n1),length(m2),length(u2.A.YoY),length(u2.N.YoY),length(u2.A.1),length(u2.N.1),
length(sampfrac),length(time)))>0){
cat("***** ERROR ***** Lengths of n1, m2, u2.A.YoY, u2.N.YoY, u2.A.1, u2.N.1, sampfrac, time must all be equal. They are:",
length(n1)," ",length(m2)," ",length(u2.A.YoY)," ",length(u2.N.YoY)," ",length(u2.A.1)," ",length(u2.N.1),
length(sampfrac)," ",length(time),"\n")
return()}
if(!is.numeric(n1)){
cat("***** ERROR ***** n1 must be numeric. You have:",
paste(n1,collapse=", "),"\n")
return()}
if(any(is.na(n1))){
cat("***** ERROR ***** All values of n1 must not be missing. You have: ",
paste(n1,collapse=", "),"\n")
return()}
if(any(n1 < 0, na.rm=TRUE)){
cat("***** ERROR ***** All values of n1 must be non-negative. You have: ",
paste(n1,collapse=", "),"\n")
return()}
if(length(logitP.cov) %% length(n1) != 0){
cat("***** ERROR ***** Dimension of covariate vector doesn't match length of n1 etc They are:",
length(n1)," ",length(logitP.cov)," ",dim(logitP.cov),"\n")
return()}
# 2. Check that m2<= n1
if(any(m2>n1,na.rm=TRUE)){
cat("***** ERROR ***** m2 must be <= n1. The arguments are \n n1:",
paste(n1,collapse=","),"\n m2:",
paste(m2.collapse=","),"\n")
return()}
# 3. Elements of bad.m2, bad.u2.A.YoY, bad.u2.A.1, bad.u2.N.YoY, bad.u2.N.1, and hatch.after.YoY must belong to time
if(!all(bad.m2 %in% time,na.rm=TRUE)){
cat("***** ERROR ***** bad.m2 must be elements of strata identifiers. You entered \n bad.m2:",
paste(bad.m2,collapse=","),"\n Strata identifiers are \n time:",
paste(time, collapse=","), "\n")
return()}
if(!all(bad.u2.A.YoY %in% time,na.rm=TRUE)){
cat("***** ERROR ***** bad.u2.A.YoY must be elements of strata identifiers. You entered \n bad.u2.A.YoY:",
paste(bad.u2.A.YoY,collapse=","),"\n Strata identifiers are \n time:",
paste(time ,collapse=","), "\n")
return()}
if(!all(bad.u2.A.1 %in% time, na.rm=TRUE)){
cat("***** ERROR ***** bad.u2.A.1 must be elements of strata identifiers. You entered \n bad.u2.A.1:",
paste(bad.u2.A.1,collapse=","),"\n Strata identifiers are \n time:",
paste(time ,collapse=","), "\n")
return()}
if(!all(bad.u2.N.YoY %in% time, na.rm=TRUE)){
cat("***** ERROR ***** bad.u2.N.YoY must be elements of strata identifiers. You entered \n bad.u2.N.YoY:",
paste(bad.u2.N.YoY,collapse=","),"\n Strata identifiers are \n time:",
paste(time, collapse=","), "\n")
return()}
if(!all(bad.u2.N.1 %in% time, na.rm=TRUE)){
cat("***** ERROR ***** bad.u2.N.1 must be elements of strata identifiers. You entered \n bad.u2.N.1:",
paste(bad.u2.N.1,collapse=","),"\n Strata identifiers are \n time:",
paste(time, collapse=","), "\n")
return()}
if(!all(hatch.after.YoY %in% time, na.rm=TRUE)){
cat("***** ERROR ***** hatch.after.YoY must be elements of strata identifiers. You entered \n hatch.after.YoY:",
paste(hatch.after.YoY,collapse=","),"\n Strata identifiers are \n time:",
paste(time, collapse=","), "\n")
return()}
# 4. check that strata numbers are contiguous between smallest and largest value of the strata numbers
if( any(seq(min(time),max(time),1) != time,na.rm=TRUE)){
cat("***** ERROR ***** Strata numbers must be contiguous. \n You entered :", paste(time,collapse=","), "\n")
return()
}
# Check that that the prior.beta.logitP.mean and prior.beta.logitP.sd length=number of columns of covariates
logitP.cov <- as.matrix(logitP.cov)
if(!is.vector(prior.beta.logitP.mean) | !is.vector(prior.beta.logitP.sd)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be vectors")
}
if(!is.numeric(prior.beta.logitP.mean) | !is.numeric(prior.beta.logitP.sd)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be numeric")
}
if(length(prior.beta.logitP.mean) != ncol(logitP.cov) | length(prior.beta.logitP.sd) != ncol(logitP.cov)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be same length as number columns in covariate matrix")
}
# Deprication of sampling fraction.
if(any(sampfrac != 1)){
cat("***** ERROR ***** Sampling fraction is depricated for any values other than 1. DO NOT USE ANYMORE. ")
return()
}
results.filename <- paste(prefix,"-results.txt",sep="")
stdout <- vector('character')
report <- textConnection('stdout', 'wr', local = TRUE)
sink(report)
cat(paste("Time Stratified Petersen with Diagonal recaptures, error in smoothed U, separating YoY and Age 1 wild and hatchery fish - ", date()))
cat("\nVersion:", version)
cat("\n\n", title, "Results \n\n")
cat("*** Raw data *** \n")
temp<- cbind(time, n1, m2, u2.A.YoY, u2.N.YoY, u2.A.1, u2.N.1, logitP.cov)
colnames(temp)<- c('time', 'n1','m2','u2.A.YoY', 'u2.N.YoY',"u2.A.1", "u2.N.1",
paste("logitPcov[", 1:ncol(as.matrix(logitP.cov)),"]",sep="") )
print(temp)
cat("\n\n")
cat("YoY Hatchery fish are released AFTER strata: ", hatch.after.YoY,"\n\n")
cat("YoY Hatchery fish are clipped at a rate of :", clip.frac.H.YoY,"\n\n")
cat("Age1 Hatchery fish are clipped at a rate of :", clip.frac.H.1 ,"\n\n")
cat("The following strata had m2 set to missing: ",
if(length(bad.m2)>0){bad.m2} else {" NONE"}, "\n")
cat("The following strata had u2.A.YoY set to missing: ",
if(length(bad.u2.A.YoY)>0){bad.u2.A.YoY} else {" NONE"}, "\n")
cat("The following strata had u2.N.YoY set to missing: ",
if(length(bad.u2.N.YoY)>0){bad.u2.N.YoY} else {" NONE"}, "\n")
cat("The following strata had u2.A.1 set to missing: ",
if(length(bad.u2.A.1)>0){bad.u2.A.1} else {" NONE"}, "\n")
cat("The following strata had u2.N.1 set to missing: ",
if(length(bad.u2.N.1)>0){bad.u2.N.1} else {" NONE"}, "\n")
# Pooled Petersen estimator over ALL of the data including when no releases take place, bad m2, bad.u2.A or bad.u2.N values.
cat("\n\n*** Pooled Petersen Estimate based on pooling over ALL strata***\n\n")
cat("Total n1=", sum(n1, na.rm=TRUE),"; m2=",sum(m2, na.rm=TRUE),"; u2=",
sum(u2.A.YoY, na.rm=TRUE)+sum(u2.N.YoY, na.rm=TRUE)+
sum(u2.A.1 , na.rm=TRUE)+sum(u2.N.1, na.rm=TRUE),"\n\n")
pp <- SimplePetersen(sum(n1, na.rm=TRUE), sum(m2, na.rm=TRUE),
sum(u2.A.YoY, na.rm=TRUE)+sum(u2.N.YoY, na.rm=TRUE)+
sum(u2.A.1 , na.rm=TRUE)+sum(u2.N.1 , na.rm=TRUE))
cat("Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")n")
cat("Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
# estimate for YoY clipped fish (hatchery) and expand by the clip fraction
cat("Total n1=", sum(n1, na.rm=TRUE),
"; m2=", sum(m2, na.rm=TRUE),
"; u2.A.YoY=", sum(u2.A.YoY, na.rm=TRUE),"\n")
cat("Clip fraction :", clip.frac.H.YoY, "\n\n")
pp <- SimplePetersen(
sum(n1, na.rm=TRUE),
sum(m2, na.rm=TRUE),
sum(u2.A.YoY, na.rm=TRUE))
cat("Est U.H.YoY(total) ", format(round(pp$U.est)/clip.frac.H.YoY,big.mark=","),
" (SE ", format(round(pp$U.se) /clip.frac.H.YoY,big.mark=","), ")\n")
cat("Est N.H.YoY(total) ", format(round(pp$N.est)/clip.frac.H.YoY,big.mark=","),
" (SE ", format(round(pp$N.se) /clip.frac.H.YoY,big.mark=","), ")\n\n\n")
# estimate for Age1 clipped fish (hatchery) and expand by the clip fraction
cat("Total n1=", sum(n1, na.rm=TRUE),
"; m2=", sum(m2, na.rm=TRUE),
"; u2.A.1=", sum(u2.A.1, na.rm=TRUE),"\n")
cat("Clip fraction :", clip.frac.H.1, "\n\n")
pp <- SimplePetersen(
sum(n1, na.rm=TRUE),
sum(m2, na.rm=TRUE),
sum(u2.A.1, na.rm=TRUE))
cat("Est U.H.1(total) ", format(round(pp$U.est)/clip.frac.H.1,big.mark=","),
" (SE ", format(round(pp$U.se) /clip.frac.H.1,big.mark=","), ")\n")
cat("Est N.H.1(total) ", format(round(pp$N.est)/clip.frac.H.1,big.mark=","),
" (SE ", format(round(pp$N.se) /clip.frac.H.1,big.mark=","), ")\n\n\n")
# estimate for YoY wild fish found by subtraction
cat("Total n1=", sum(n1, na.rm=TRUE),
"; m2=", sum(m2, na.rm=TRUE),
"; u2.W.YoY=", sum((u2.N.YoY+u2.A.YoY-u2.A.YoY/clip.frac.H.YoY), na.rm=TRUE),
"[Formed by interpolation based on clip rate]\n")
cat("Clip fraction :", clip.frac.H.YoY, "\n\n")
pp <- SimplePetersen(
sum(n1, na.rm=TRUE),
sum(m2, na.rm=TRUE),
sum((u2.N.YoY+u2.A.YoY-u2.A.YoY/clip.frac.H.YoY), na.rm=TRUE))
cat("Est U.W.YoY(total) ", format(round(pp$U.est),big.mark=","),
" (SE ", format(round(pp$U.se) ,big.mark=","), ") APPROXIMATE\n")
cat("Est N.W.YoY(total) ", format(round(pp$N.est),big.mark=","),
" (SE ", format(round(pp$N.se) ,big.mark=","), ") APPROXIMATE\n\n\n")
# estimate for Age1 wild fish found by subtraction
cat("Total n1=", sum(n1, na.rm=TRUE),
"; m2=", sum(m2, na.rm=TRUE),
"; u2.W.1=", sum((u2.N.1+u2.A.1-u2.A.1/clip.frac.H.1), na.rm=TRUE),
"[Formed by interpolation based on clip rate]\n")
cat("Clip fraction :", clip.frac.H.1, "\n\n")
pp <- SimplePetersen(
sum(n1, na.rm=TRUE),
sum(m2, na.rm=TRUE),
sum((u2.N.1+u2.A.1-u2.A.1/clip.frac.H.1), na.rm=TRUE))
cat("Est U.W.1(total) ", format(round(pp$U.est),big.mark=","),
" (SE ", format(round(pp$U.se) ,big.mark=","), ") APPROXIMATE\n")
cat("Est N.W.1(total) ", format(round(pp$N.est),big.mark=","),
" (SE ", format(round(pp$N.se) ,big.mark=","), ") APPROXIMATE\n\n\n")
# Obtain the Pooled Petersen estimator without excluding bad.m2, bad.u2.A.YoY, or bad.u2.N.YoY,
# bad.u2.A.1, or bad.u2.N.1 values but after removing 0 or NA values
select <- (n1>0) & (!is.na(n1)) & (!is.na(m2)) & (!is.na(u2.A.YoY)) & (!is.na(u2.N.YoY)) &
(!is.na(u2.A.1)) & (!is.na(u2.N.1))
cat("\n\n*** Pooled Petersen Estimate AFTER excluding bad m2, u2.A.YoY, u2.A.1, u2.N.YoY, or u2.N.1 values ***\n\n")
cat("The following strata are excluded because n1=0 or NA values in m2, u2.A.YoY, u2.N.YoY, u2.A.1, u2.N.1 :", time[!select],"\n\n")
temp.n1 <- n1 [select]
temp.m2 <- m2 [select]
temp.u2.A.YoY <- u2.A.YoY[select]
temp.u2.N.YoY <- u2.N.YoY[select]
temp.u2.A.1 <- u2.A.1 [select]
temp.u2.N.1 <- u2.N.1 [select]
cat("Total n1=", sum(temp.n1),"; m2=",sum(temp.m2),"; u2.YoY=",
sum(temp.u2.A.YoY+temp.u2.N.YoY+
temp.u2.A.1 +temp.u2.N.1, na.rm=TRUE),"\n\n")
pp <- SimplePetersen(sum(temp.n1), sum(temp.m2),
sum(temp.u2.A.YoY+temp.u2.N.YoY+
temp.u2.A.1 +temp.u2.N.1 , na.rm=TRUE))
cat("Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
# estimate for YoY clipped fish (hatchery) and expand by the clip fraction
cat("Total n1=", sum(temp.n1, na.rm=TRUE),
"; m2=", sum(temp.m2, na.rm=TRUE),
"; u2.A.YoY=", sum(temp.u2.A.YoY, na.rm=TRUE),"\n")
cat("Clip fraction :", clip.frac.H.YoY, "\n\n")
pp <- SimplePetersen(
sum(temp.n1, na.rm=TRUE),
sum(temp.m2, na.rm=TRUE),
sum(temp.u2.A.YoY, na.rm=TRUE))
cat("Est U.H.YoY(total) ", format(round(pp$U.est)/clip.frac.H.YoY,big.mark=","),
" (SE ", format(round(pp$U.se) /clip.frac.H.YoY,big.mark=","), ")\n")
cat("Est N.H.YoY(total) ", format(round(pp$N.est)/clip.frac.H.YoY,big.mark=","),
" (SE ", format(round(pp$N.se) /clip.frac.H.YoY,big.mark=","), ")\n\n\n")
# estimate for YoY wild fish
cat("Total n1=", sum(temp.n1, na.rm=TRUE),
"; m2=", sum(temp.m2, na.rm=TRUE),
"; u2.W.YoY=", sum((temp.u2.N.YoY+temp.u2.A.YoY-temp.u2.A.YoY/clip.frac.H.YoY), na.rm=TRUE),
"[Formed by interpolation based on clip rate]\n")
cat("Clip fraction YoY :", clip.frac.H.YoY, "\n\n")
pp <- SimplePetersen(
sum(temp.n1, na.rm=TRUE),
sum(temp.m2, na.rm=TRUE),
sum((temp.u2.N.YoY+temp.u2.A.YoY-temp.u2.A.YoY/clip.frac.H.YoY), na.rm=TRUE))
cat("Est U.W.YoY(total) ", format(round(pp$U.est),big.mark=","),
" (SE ", format(round(pp$U.se) ,big.mark=","), ") APPROXIMATE \n")
cat("Est N.W.YoY(total) ", format(round(pp$N.est),big.mark=","),
" (SE ", format(round(pp$N.se) ,big.mark=","), ") APPROXIMATE \n\n\n")
# estimate for Age1 clipped fish (hatchery) and expand by the clip fraction
cat("Total n1=", sum(temp.n1, na.rm=TRUE),
"; m2=", sum(temp.m2, na.rm=TRUE),
"; u2.A.1=", sum(temp.u2.A.1, na.rm=TRUE),"\n")
cat("Clip fraction :", clip.frac.H.1, "\n\n")
pp <- SimplePetersen(
sum(temp.n1, na.rm=TRUE),
sum(temp.m2, na.rm=TRUE),
sum(temp.u2.A.1, na.rm=TRUE))
cat("Est U.H.1(total) ", format(round(pp$U.est)/clip.frac.H.1,big.mark=","),
" (SE ", format(round(pp$U.se) /clip.frac.H.1,big.mark=","), ")\n\n\n")
cat("Est N.H.1(total) ", format(round(pp$N.est)/clip.frac.H.1,big.mark=","),
" (SE ", format(round(pp$N.se) /clip.frac.H.1,big.mark=","), ")\n\n\n")
# estimate for Age1 wild fish
cat("Total n1=", sum(temp.n1, na.rm=TRUE),
"; m2=", sum(temp.m2, na.rm=TRUE),
"; u2.W.1=", sum((temp.u2.N.1+temp.u2.A.1-temp.u2.A.1/clip.frac.H.1), na.rm=TRUE),
"[Formed by interpolation based on clip rate]\n")
cat("Clip fraction 1 :", clip.frac.H.1, "\n\n")
pp <- SimplePetersen(
sum(temp.n1, na.rm=TRUE),
sum(temp.m2, na.rm=TRUE),
sum((temp.u2.N.1+temp.u2.A.1-temp.u2.A.1/clip.frac.H.1), na.rm=TRUE))
cat("Est U.W.1(total) ", format(round(pp$U.est),big.mark=","),
" (SE ", format(round(pp$U.se) ,big.mark=","), ") APPROXIMATE \n")
cat("Est N.W.1(total) ", format(round(pp$N.est),big.mark=","),
" (SE ", format(round(pp$N.se) ,big.mark=","), ") APPROXIMATE \n\n\n")
# Set the bad values to missing
temp.n1 <- n1
temp.m2 <- m2
temp.u2.A.YoY <- u2.A.YoY
temp.u2.N.YoY <- u2.A.YoY
temp.u2.A.1 <- u2.A.1
temp.u2.N.1 <- u2.A.1
temp.m2 [bad.m2 -min(time)+1] <- NA
temp.u2.A.YoY[bad.u2.A.YoY-min(time)+1] <- NA
temp.u2.N.YoY[bad.u2.N.YoY-min(time)+1] <- NA
temp.u2.A.1 [bad.u2.A.1 -min(time)+1] <- NA
temp.u2.N.1 [bad.u2.N.1 -min(time)+1] <- NA
# Obtain Stratified-Petersen estimator for each stratum after the removal of bad values
cat("*** Stratified Petersen Estimator for each stratum AFTER removing bad m2 values after adjusting for sampling fration ***\n\n")
temp.u2 <- (temp.u2.A.YoY + temp.u2.N.YoY)
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2)
temp <- cbind(time, temp.n1, temp.m2, temp.u2, round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','(u2.A.YoY+u2.N.YoY)*adj', 'U.YoY[i]', 'SE(U[i])')
print(temp)
cat("\n")
cat("Est U.YoY(total) ", format(round(sum(sp$U.est, na.rm=TRUE)),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))), big.mark=","), ")\n\n\n")
cat("*** Stratified Petersen Estimator for each stratum YoY Hatchery YoY AFTER removing bad m2 values after adjusting for sampling fration ***\n\n")
temp.u2 <- u2.A.YoY
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2)
temp <- cbind(time, temp.n1, temp.m2, temp.u2, round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','u2.A.YoY*adj', 'U.H.YoY[i]', 'SE(U[i])')
print(temp)
cat("** Estimates not adjusted for clip fraction above \n")
cat("Est U.H(total) ", format(round(sum(sp$U.est, na.rm=TRUE)/clip.frac.H.YoY),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))/clip.frac.H.YoY), big.mark=","), ")\n\n\n")
cat("*** Stratified Petersen Estimator for each stratum YoY Wild YoY after removing bad m2 values after adjusting for sampling fration ***\n\n")
temp.u2 <- pmax(0,(u2.N.YoY+u2.A.YoY-u2.A.YoY/clip.frac.H.YoY))
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2)
temp <- cbind(time, temp.n1, temp.m2, temp.u2, round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','u2.W.YoY-est', 'U.W.YoY[i]', 'SE(U[i])')
print(temp)
cat("Est U.W.YoY(total) ", format(round(sum(sp$U.est, na.rm=TRUE)),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))), big.mark=","), ") APPROXIMATE\n\n\n")
cat("*** Stratified Petersen Estimator for each stratum Age1 Hatchery AFTER removing bad m2 values after adjusting for sampling fration ***\n\n")
temp.u2 <- u2.A.1
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2)
temp <- cbind(time, temp.n1, temp.m2, temp.u2, round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','u2.A.1*adj', 'U.H.1[i]', 'SE(U[i])')
print(temp)
cat("** Estimates not adjusted for clip fraction above \n")
cat("Est U.H(total) ", format(round(sum(sp$U.est, na.rm=TRUE)/clip.frac.H.YoY),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))/clip.frac.H.1), big.mark=","), ")\n\n\n")
cat("*** Stratified Petersen Estimator for each stratum Age1 Wild after removing bad m2 values after adjusting for sampling fration ***\n\n")
temp.u2 <- pmax(0,(u2.N.1+u2.A.1-u2.A.1/clip.frac.H.1))
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2)
temp <- cbind(time, temp.n1, temp.m2, temp.u2, round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','u2.W.1-est', 'U.W.1[i]', 'SE(U[i])')
print(temp)
cat("Est U.W.1(total) ", format(round(sum(sp$U.est, na.rm=TRUE)),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))), big.mark=","), ") APPROXIMATE\n\n\n")
# Test if pooling can be done
cat("*** Test if pooled Petersen is allowable. [Check if marked fractions are equal] ***\n\n")
select <- (n1>0) & (!is.na(n1)) & (!is.na(temp.m2))
temp.n1 <- n1[select]
temp.m2 <- m2[select]
test <- TestIfPool( temp.n1, temp.m2)
cat("(Large Sample) Chi-square test statistic ", test$chi$statistic," has p-value", test$chi$p.value,"\n\n")
temp <- cbind(time[select],test$chi$observed, round(test$chi$expected,1), round(test$chi$residuals^2,1))
colnames(temp) <- c('time','n1-m2','m2','E[n1-m2]','E[m2]','X2[n1-m2]','X2[m2]')
print(temp)
cat("\n Be cautious of using this test in cases of small expected values. \n\n")
# Fix up any data problems and prepare for the call.
# Notice that for strata entries that are missing any covariate values, only an intercept is added
# Expand the entries in case of missing time entries
new.n1 <- rep(0, max(time)-min(time)+1)
new.m2 <- rep(0, max(time)-min(time)+1)
new.u2.A.YoY <- rep(0, max(time)-min(time)+1)
new.u2.N.YoY <- rep(0, max(time)-min(time)+1)
new.u2.A.1 <- rep(0, max(time)-min(time)+1)
new.u2.N.1 <- rep(0, max(time)-min(time)+1)
new.logitP.cov <- matrix(NA, nrow=max(time)-min(time)+1, ncol=ncol(as.matrix(logitP.cov)))
new.time <- min(time):max(time)
new.n1 [time-min(time)+1] <- n1
new.m2 [time-min(time)+1] <- m2
new.m2 [bad.m2-min(time)+1] <- NA # wipe out strata where m2 is known to be bad
new.u2.A.YoY[time-min(time)+1] <- u2.A.YoY
new.u2.A.YoY[bad.u2.A.YoY-min(time)+1] <- NA # wipe out strata where u2.A is known to be bad
new.u2.N.YoY[time-min(time)+1] <- u2.N.YoY
new.u2.N.YoY[bad.u2.N.YoY-min(time)+1] <- NA # wipe out strata where u2.N is known to be bad
new.u2.A.1 [time-min(time)+1] <- u2.A.1
new.u2.A.1 [bad.u2.A.1 -min(time)+1] <- NA # wipe out strata where u2.A is known to be bad
new.u2.N.1 [time-min(time)+1] <- u2.N.1
new.u2.N.1 [bad.u2.N.1 -min(time)+1] <- NA # wipe out strata where u2.N is known to be bad
new.logitP.cov[time-min(time)+1,]<- as.matrix(logitP.cov)
new.logitP.cov[ is.na(new.logitP.cov[,1]), 1] <- 1 # insert a 1 into first columns where not specified
new.logitP.cov[ is.na(new.logitP.cov)] <- 0 # other covariates are forced to zero not in column 1
# Check for and fix problems with the data
# If n1=m2=0, then set n1 to 1, and set m2<-NA
new.m2[new.n1==0] <- NA
new.n1[new.n1==0] <- 1
# Adjust data when a stratum has less than 100% sampling fraction to "estimate" the number
# of unmarked fish that were captured. It is not necessary to adjust the n1 and m2 values
# as these are used ONLY to estimate the capture efficiency.
# In reality, there should be a slight adjustment
# to the precision to account for this change, but this is not done.
# Similarly, if the sampling fraction is more than 1, the adjustment forces the total unmarked catch back to a single week.
new.u2.A.YoY <- round(new.u2.A.YoY)
new.u2.N.YoY <- round(new.u2.N.YoY)
new.u2.A.1 <- round(new.u2.A.1 )
new.u2.N.1 <- round(new.u2.N.1 )
# Print out the revised data
hatch.indicator <- rep(' ', max(time)-min(time)+1)
hatch.indicator[hatch.after.YoY-min(time)+1]<- '***'
cat("\n\n*** Revised data *** \n")
temp<- data.frame(time=new.time, n1=new.n1, m2=new.m2,
u2.A.YoY=new.u2.A.YoY, u2.N.YoY=new.u2.N.YoY, u2.A.1=new.u2.A.1, u2.N.1=new.u2.N.1,
new.logitP.cov=new.logitP.cov,
hatch.indicator=hatch.indicator)
print(temp)
cat("\n\n")
# Print out information on the prior distributions used
cat("\n\n*** Information on priors *** \n")
cat(" Parameters for prior on tauU (variance in spline coefficients: ", tauU.alpha, tauU.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(tauU.alpha/tauU.beta,2),round(sqrt(tauU.alpha/tauU.beta^2),2),"\n")
cat(" Parameters for prior on taueU (variance of log(U) about spline: ",taueU.alpha, taueU.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(taueU.alpha/taueU.beta,2),round(sqrt(taueU.alpha/taueU.beta^2),2),"\n")
cat(" Parameters for prior on beta.logitP[1] (intercept) (mean, sd): \n", cbind(round(prior.beta.logitP.mean,3), round(prior.beta.logitP.sd,5)),"\n")
cat(" Parameters for prior on tauP (residual variance of logit(P) after adjusting for covariates: ",tauP.alpha, tauP.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(tauP.alpha/tauP.beta,2),round(sqrt(tauP.alpha/tauP.beta^2),2),"\n")
cat("\n\nInitial seed for this run is: ",InitialSeed, "\n")
sink()
if (debug2) {
cat("\nprior to formal call to TimeStratPetersenDiagErrorWHChinook\n")
browser()
}
if (debug)
{results <- TimeStratPetersenDiagErrorWHChinook2(title=title, prefix=prefix,
time=new.time, n1=new.n1, m2=new.m2,
u2.A.YoY=new.u2.A.YoY, u2.N.YoY=new.u2.N.YoY, u2.A.1=new.u2.A.1, u2.N.1=new.u2.N.1,
hatch.after.YoY=hatch.after.YoY-min(time)+1,
clip.frac.H.YoY=clip.frac.H.YoY, clip.frac.H.1=clip.frac.H.1,
logitP.cov=new.logitP.cov,
n.chains=3, n.iter=10000, n.burnin=5000, n.sims=500, # set to low values for debugging only
prior.beta.logitP.mean=prior.beta.logitP.mean,
prior.beta.logitP.sd =prior.beta.logitP.sd,
tauU.alpha=tauU.alpha, tauU.beta=tauU.beta, taueU.alpha=taueU.alpha, taueU.beta=taueU.beta,
debug=debug, InitialSeed=InitialSeed,
save.output.to.files=save.output.to.files)
} else #notice R syntax requires { before the else
{results <- TimeStratPetersenDiagErrorWHChinook2(title=title, prefix=prefix,
time=new.time, n1=new.n1, m2=new.m2,
u2.A.YoY=new.u2.A.YoY, u2.N.YoY=new.u2.N.YoY, u2.A.1=new.u2.A.1, u2.N.1=new.u2.N.1,
hatch.after.YoY=hatch.after.YoY-min(time)+1,
clip.frac.H.YoY=clip.frac.H.YoY, clip.frac.H.1=clip.frac.H.1,
logitP.cov=new.logitP.cov,
n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.sims=n.sims,
prior.beta.logitP.mean=prior.beta.logitP.mean,
prior.beta.logitP.sd =prior.beta.logitP.sd,
tauU.alpha=tauU.alpha, tauU.beta=tauU.beta, taueU.alpha=taueU.alpha, taueU.beta=taueU.beta,
InitialSeed=InitialSeed,
save.output.to.files=save.output.to.files)
}
# Now to create the various summary tables of the results
# A plot of the initial and fitted values for YoY, Age 1, and Hatchery chinook
# In the diagonal case, time, n1, m2, u2 are the same length
Nstrata <- length(n1)
plot.df <- data.frame(time =new.time)
# adjust the u2 for the clipping fractions
plot.df$n1 <- new.n1
plot.df$m2 <- new.m2
plot.df$u2.H.YoY <- new.u2.A.YoY/clip.frac.H.YoY # only a portion of the hatchery fish are clipped
plot.df$u2.N.YoY <- new.u2.N.YoY
plot.df$u2.H.1 <- new.u2.A.1 /clip.frac.H.1 # only a portion of the hatchery fish are clipped
plot.df$u2.N.1 <- new.u2.N.1
# estimate how many wild fish are present given that only a fraction of hatchery fish are marked
plot.df$u2.W.1 <- pmax(plot.df$u2.N.1 - plot.df$u2.H.1 *(1-clip.frac.H.1) ,0) # subtract the guestimated number of hatchery fish
plot.df$u2.W.YoY <- pmax(plot.df$u2.N.YoY - plot.df$u2.H.YoY*(1-clip.frac.H.YoY),0) # subtract the questimated number of hatchery fish
plot.df$u2.H.YoY[is.na(plot.df$u2.H.YoY)] <- 1 # in case of missing values
plot.df$u2.W.YoY[is.na(plot.df$u2.W.YoY)] <- 1 # in case of missing values
plot.df$u2.H.1 [is.na(plot.df$u2.H.1) ] <- 1 # in case of missing values
plot.df$u2.W.1 [is.na(plot.df$u2.W.1)] <- 1 # in case of missing values
get.est <- function(est.name, plot.df, hatch.after.YoY, results){
# get the inital estimates, and extract from the results data structure and put into a data frame
est.df <- data.frame(group=est.name, time=plot.df$time)
avgP <- sum(plot.df$m2,na.rm=TRUE)/sum(plot.df$n1, na.rM=TRUE)
#browser()
# initial guess
est.df$logUguess <- log(1+pmax( (plot.df[, paste("u2.",est.name,sep="")]+1)*(plot.df$n1+2)/(plot.df$m2+1),
plot.df[, paste("u2.",est.name,sep="")]/avgP, na.rm=TRUE))
# extract estimates from results
results.row.names <- rownames(results$summary)
est.row.index <- grep(paste("etaU.",est.name, sep=""), results.row.names)
etaU <- results$summary[est.row.index,]
est.df$logU =etaU[,"mean"]
est.df$logUlcl =etaU[,"2.5%"]
est.df$logUucl =etaU[,"97.5%"]
# extract the spline
logUne.row.index <- grep(paste("logUne.",est.name,sep=""), results.row.names)
est.df$spline <- results$summary[logUne.row.index,"mean"]
if(est.name=="H.YoY"){
est.df$logUguess[1:(hatch.after.YoY-min(plot.df$time)+1)]<- NA
est.df$logU [1:(hatch.after.YoY-min(plot.df$time)+1)]<- NA
est.df$logUlcl [1:(hatch.after.YoY-min(plot.df$time)+1)]<- NA
est.df$logUucl [1:(hatch.after.YoY-min(plot.df$time)+1)]<- NA
est.df$spline [1:(hatch.after.YoY-min(plot.df$time)+1)]<- NA
}
est.df
}
plot.data <-rbind( get.est("H.YoY",plot.df, hatch.after.YoY, results),
get.est("H.1" ,plot.df, hatch.after.YoY, results),
get.est("W.YoY",plot.df, hatch.after.YoY, results),
get.est("W.1" ,plot.df, hatch.after.YoY, results))
# add limits to the plot to avoid non-monotone secondary axis problems with extreme values
plot.data$logUguess <- pmax(-10 , pmin(20, plot.data$logUguess))
plot.data$logU <- pmax(-10 , pmin(20, plot.data$logU ))
plot.data$logUlcl <- pmax(-10 , pmin(20, plot.data$logUlcl ))
plot.data$logUucl <- pmax(-10 , pmin(20, plot.data$logUucl ))
plot.data$spline<- pmax(-10 , pmin(20, plot.data$spline))
fit.plot <- ggplot(data=plot.data, aes_(x=~time, color=~group))+
ggtitle(title, subtitle="Fitted spline curve with 95% credible intervals")+
geom_point(aes_(y=~logUguess), shape=16, position=position_dodge(width=.2))+ # guesses for population
geom_point(aes_(y=~logU), shape=19, position=position_dodge(width=.2))+
geom_line (aes_(y=~logU), position=position_dodge(width=.2))+
geom_errorbar(aes_(ymin=~logUlcl, ymax=~logUucl), width=.1, position=position_dodge(width=.2))+
geom_line(aes_(y=~spline),linetype="dashed", position=position_dodge(width=.2)) +
xlab("Time Index\nFitted/Smoothed/Raw values plotted for W(black) and H(blue)")+ylab("log(U[i]) + 95% credible interval")+
theme(legend.justification = c(0, 0), legend.position = c(0, 0))+
scale_color_discrete(name="Group")+
scale_x_continuous(breaks=seq(min(plot.data$time, na.rm=TRUE),max(plot.data$time, na.rm=TRUE),2))+
scale_y_continuous(sec.axis = sec_axis(~ exp(.), name="U + 95% credible interval",
breaks=c(1,10,20,50,
100,200,500,
1000,2000,5000,
10000,20000, 50000,
100000,200000, 500000,
1000000,2000000,5000000,10000000),
labels = scales::comma))
if(save.output.to.files)ggsave(plot=fit.plot, filename=paste(prefix,"-fit.pdf",sep=""), height=6, width=10, units="in")
results$plots$fit.plot <- fit.plot
# plot logit P vs time
logitP.plot <- plot_logitP(title=title, time=new.time, n1=new.n1, m2=new.m2,
u2=u2.A.YoY+u2.N.YoY+u2.A.1+u2.N.1, logitP.cov=new.logitP.cov, results=results,
trunc.logitP=trunc.logitP)
if(save.output.to.files)ggsave(plot=logitP.plot, filename=paste(prefix,"-logitP.pdf",sep=""), height=6, width=10, units="in")
results$plots$logitP.plot <- logitP.plot
# Look at autocorrelation function for Utot.W.YoY, Utot.H.YoY, Utota.W.1, Utot.H.1
mcmc.sample1<- data.frame(parm="Utot.W.YoY", sample=results$sims.matrix[,"Utot.W.YoY"], stringsAsFactors=FALSE)
mcmc.sample2<- data.frame(parm="Utot.H.YoY", sample=results$sims.matrix[,"Utot.H.YoY"], stringsAsFactors=FALSE)
mcmc.sample3<- data.frame(parm="Utot.W.1", sample=results$sims.matrix[,"Utot.W.1"], stringsAsFactors=FALSE)
mcmc.sample4<- data.frame(parm="Utot.H.1", sample=results$sims.matrix[,"Utot.H.1"], stringsAsFactors=FALSE)
mcmc.sample <- rbind(mcmc.sample1, mcmc.sample2, mcmc.sample3, mcmc.sample4)
acf.Utot.plot <- plot_acf(mcmc.sample)
if(save.output.to.files)ggsave(plot=acf.Utot.plot, filename=paste(prefix,"-Utot-acf.pdf",sep=""), height=4, width=6, units="in")
results$plots$acf.Utot.plot <- acf.Utot.plot
# Look at the shape of the posterior distribution
mcmc.sample1<- data.frame(parm="Utot.W.YoY", sample=results$sims.matrix[,"Utot.W.YoY"], stringsAsFactors=FALSE)
mcmc.sample2<- data.frame(parm="Utot.H.YoY", sample=results$sims.matrix[,"Utot.H.YoY"], stringsAsFactors=FALSE)
mcmc.sample3<- data.frame(parm="Utot.W.1", sample=results$sims.matrix[,"Utot.W.1"], stringsAsFactors=FALSE)
mcmc.sample4<- data.frame(parm="Utot.H.1", sample=results$sims.matrix[,"Utot.H.1"], stringsAsFactors=FALSE)
mcmc.sample <- rbind(mcmc.sample1, mcmc.sample2, mcmc.sample3, mcmc.sample4)
post.Utot.plot <- plot_posterior(mcmc.sample, ncol=2)
post.Utot.plot
if(save.output.to.files)ggsave(plot=post.Utot.plot, filename=paste(prefix,"-Utot-posterior.pdf",sep=""),
height=ifelse(length(unique(mcmc.sample$parm))<=2,4,6), width=6, units="in")
results$plots$post.Utot.plot <- post.Utot.plot
# make the Bayesian predictive distribution (Bayesian p-value plots)
#browser()
discrep <-PredictivePosterior.TSPDE.WHCH2 (time, new.n1, new.m2, # get the discrepancy measures
new.u2.A.YoY, new.u2.N.YoY, new.u2.A.1, new.u2.N.1,
clip.frac.H.YoY, clip.frac.H.1,
expit(results$sims.list$logitP),
round(results$sims.list$U.W.YoY),
round(pmax(results$sims.list$U.H.YoY,0)),
round(results$sims.list$U.W.1),
round(results$sims.list$U.H.1),
hatch.after.YoY) #don't forget that hatchery fish is 0 until hatch.after
#browser()
gof <- PredictivePosteriorPlot.TSPDE.WHCH2 (discrep)
if(save.output.to.files){
pdf(file=paste(prefix,"-GOF.pdf",sep=""))
plyr::l_ply(gof, function(x){plot(x)})
dev.off()
}
results$plots$gof.plot <- gof
# create traceplots of logU, U, and logitP (along with R value) to look for non-convergence
# the plot_trace will return a list of plots (one for each page as needed)
varnames <- names(results$sims.array[1,1,]) # extract the names of the variables
# Trace plots of logitP
trace.plot <- plot_trace(title=title, results=results, parms_to_plot=varnames[grep("^logitP", varnames)])
if(save.output.to.files){
pdf(file=paste(prefix,"-trace-logitP.pdf",sep=""))
plyr::l_ply(trace.plot, function(x){plot(x)})
dev.off()
}
results$plots$trace.logitP.plot <- trace.plot
# now for the traceplots of logU (etaU), Utot, and Ntot
trace.plot <- plot_trace(title=title, results=results, parms_to_plot=varnames[c(grep("Utot",varnames), grep("Ntot",varnames), grep("^etaU", varnames))])
if(save.output.to.files){
pdf(file=paste(prefix,"-trace-logU.pdf",sep=""))
plyr::l_ply(trace.plot, function(x){plot(x)})
dev.off()
}
results$plots$trace.logU.plot <- trace.plot
sink(report, append=TRUE)
# What was the initial seed
cat("\n\n*** Initial Seed for this run ***: ", results$Seed.initial,"\n")
# Global summary of results
cat("\n\n*** Summary of MCMC results *** \n\n")
save.max.print <- getOption("max.print")
options(max.print=.Machine$integer.max)
print(results, digits.summary=3)#, max=.Machine$integer.max)
options(max.print=save.max.print)
# Give an alternate computation of DIC based on the variance of the deviance
# Refer to http://www.mrc-bsu.cam.ac.uk/bugs/winbugs/DIC-slides.pdf for derivation and why
# this alternate method may be superior to that automatically computed by WinBugs/OpenBugs
cat("\n\n*** Alternate DIC computation based on p_D = var(deviance)/2 \n")
results.row.names <- rownames(results$summary)
deviance.row.index<- grep("deviance", results.row.names)
deviance <- results$summary[deviance.row.index,]
p.D <- deviance["sd"]^2/2
dic <- deviance["mean"]+p.D
cat(" D-bar: ", deviance["mean"],"; var(dev): ", deviance["sd"]^2,
"; p.D: ", p.D, "; DIC: ", dic)
# Summary of population sizes. Add pretty printing for the final results
cat("\n\n\n\n*** Summary of Unmarked Population Size ***\n")
cat("Wild YoY \n")
temp<- results$summary[ grep("Utot.W.YoY", rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
cat("\n\nWild Age 1 \n")
temp<-results$summary[ grep("Utot.W.1" , rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
cat("\n\nHatchery YoY\n")
temp<- results$summary[ grep("Utot.H.YoY", rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
cat("\n\nHatchery Age 1\n")
temp<-results$summary[ grep("Utot.H.1" , rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
cat("\n\nGrand Total\n")
temp<- results$summary[ rownames(results$summary) == "Utot",]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
#browser()
time.H <- time>hatch.after.YoY
cat("\n\n\n\n*** Summary of Quantiles of Run Timing.Wild *** \n")
cat( " This is based on the sample weeks provided and the U.W.YoY[i] values \n")
q <- RunTime(time=time, U=results$sims.list$U.W.YoY, prob=run.prob)
temp <- rbind(apply(q,2,mean), apply(q,2,sd))
rownames(temp) <- c("Mean", "Sd")
print(round(temp,2))
cat( "\n This is based on the sample weeks provided and the U.W.1[i] values \n")
q <- RunTime(time=time, U=results$sims.list$U.W.1, prob=run.prob)
temp <- rbind(apply(q,2,mean), apply(q,2,sd))
rownames(temp) <- c("Mean", "Sd")
print(round(temp,2))
cat("\n\n*** Summary of Quantiles of Run Timing.Hatchery *** \n")
cat( " This is based on the sample weeks provided and the U.H.YoY[i] values \n")
q <- RunTime(time=time[time.H], U=results$sims.list$U.H.YoY[,time.H], prob=run.prob)
temp <- rbind(apply(q,2,mean), apply(q,2,sd))
rownames(temp) <- c("Mean", "Sd")
print(round(temp,2))
cat( "\n This is based on the sample weeks provided and the U.H.1[i] values \n")
q <- RunTime(time=time, U=results$sims.list$U.H.1, prob=run.prob)
temp <- rbind(apply(q,2,mean), apply(q,2,sd))
rownames(temp) <- c("Mean", "Sd")
print(round(temp,2))
# Add the runtiming to the output object
results$runTime <- temp
cat("\n\n")
cat(paste("*** end of fit *** ", date()))
sink()
# save the report to a files?
if(save.output.to.files)writeLines(stdout, results.filename)
results$report <- stdout
# add some of the raw data to the bugs object for simplicity in referencing it later
results$data <- list( time=time, n1=n1, m2=m2,
u2.A.YoY=u2.A.YoY, u2.N.YoY=u2.N.YoY, u2.A.1=u2.A.1, u2.N.1=u2.N.1,
clip.frac.H.YoY=clip.frac.H.YoY, clip.frac.H.1=clip.frac.H.1,
hatch.after.YoY=hatch.after.YoY,
bad.m2=bad.m2,
bad.u2.A.YoY=bad.u2.A.YoY, bad.u2.N.YoY=bad.u2.N.YoY,
bad.u2.A.1=bad.u2.A.1, bad.u2.N.1=bad.u2.N.1,
logitP.cov=logitP.cov,
version=version, date_run=date(), title=title)
return(results)
} # end of function
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/TimeStratPetersenDiagErrorWHChinook2_fit.R
|
# 2021-10-23 CJS Added trunc.logitP parameter to avoid plotting problems
# 2020-12-15 CJS Removed all uses of sampfrac in the code
# 2020-11-07 CJS Allowed user to specify prior for beta coefficient for logitP
# 2018-12-19 CSJ deprecated use of sampling fraction
# 2018-12-06 CJS saved report to a text connections
# 2018-12-05 CJS converted final spline plot gggplot
# 2018-12-02 CJS converted trace plots to ggplot
# 2018-12-01 CJS changed posterior plot to ggplot
# 2018-11-30 CJS changed acf to ggplot
# 2018-11-28 CJS fixed issued with printing of results getting cut off
# 2018-11-27 CJS Remove refrence to OpenBugs
# 2015-06-10 CJS Fixed error in Bayesian p-value plots. Converted them to ggplot
# 2014-09-01 CJS converted to JAGS
# 2012-08-30 CJS fixed problem in any() and all() in error checking with NAs
# 2011-06-13 CJS add p-values to results
# 2010-11-25 CJS pretty printing of final population estimates
# 2010-09-06 CJS forced input vectors to be vectors
# 2010-08-06 CJS added creation of trace plots to output
# 2010-08-03 CJS added version/date to final data structure
# 2010-03-12 CJS added n.chains etc to calling arguments of the _fit function
# 2009-12-08 CJS added some basic error checking on arguments
# 2009-12-05 CJS added title to argument list
# 2009-12-01 CJS added openbugs/winbugs to argument list; some basic error checking
#' Wrapper (*_fit) to fit the Time Stratified Petersen Estimator
#' with Diagonal Entries and separating Wild from Hatchery Chinook function.
#'
#' Takes the number of marked fish released, the number of recaptures, and the
#' number of unmarked fish and uses Bayesian methods to fit a fit a spline
#' through the population numbers and a hierarchical model for the trap
#' efficiencies over time. The output is written to files and an MCMC object
#' is also created with samples from the posterior.
#'
#' Normally use the *_fit to pass the data to the fitting function.
#'
#'
#' @aliases TimeStratPetersenDiagErrorWHChinook_fit TimeStratPetersenDiagErrorWHChinook2_fit
#'
#' @template title
#' @template prefix
#' @template time
#' @template n1
#' @param m2 A numeric vector of the number of marked fish from n1 that are
#' recaptured in each time stratum. All recaptures take place within the
#' stratum of release. Use the \code{\link{TimeStratPetersenNonDiagError_fit}}
#' function for cases where recaptures take place outside the stratum of
#' release.
#' @param u2.A A numeric vector of the number of unmarked fish with adipose
#' clips captured in each stratum.
#' @param u2.N A numeric vector of the number of unmarked fish with NO-adipose
#' clips captured in each stratum.
#' @param u2.A.YoY,u2.N.YoY Number of YoY unmarked fish with/without adipose fin clips
#' All YoY wild fish have NO adipose fin clips; however, hatchery fish are a mixture
#' of fish with adipose fin clips (a known percentage are marked) and unmarked fish.
#' So u2.A.YoY MUST be hatchery fish.
#' u2.N.YoY is a mixture of wild and hatchery fish.
#' @param u2.A.1,u2.N.1 Number of Age1 unmarked fish with/with out adipose fin clips
#' All Age1 wild fish have NO adipose fin clips; however, hatchery fish are a mixture
#' of fish with adipose fin clips (a known percentage are marked) and unmarked fish.
#' So u2.A.1 MUST be hatchery fish.
#' u2.N.1 is a mixture of wild and hatchery fish.
#' @param clip.frac.H.YoY,clip.frac.H.1 Fraction of the YoY hatchery/Age1 (from last year's releases) hatchery fish are clipped?\ (between 0 and 1)
#' @param clip.frac.H A numeric value for the fraction of the hatchery fish
#' that have the adipose fin clipped (between 0 and 1).
#' @template sampfrac
#' @param hatch.after A numeric vector with elements belonging to \code{time}.
#' At which point do hatchery fish arrive? They arrive in the immediate stratum
#' AFTER these entries.
#' @param hatch.after.YoY A numeric vector with elements belonging to
#' \code{time}. At which point do YoY hatchery fish arrive? They arrive in the
#' immediate stratum AFTER these entries.
#' @template bad.n1
#' @template bad.m2
#' @param bad.u2.N A numeric vector with elements belonging to \code{time}. In
#' some cases, something goes wrong in the stratum, and the number of unmarked
#' fish with NO adipose fin clip should be ignored.
#' @param bad.u2.A.YoY,bad.u2.N.YoY List of julian weeks where the value of u2.A.YoY/u2.N.YoY is suspect.
#' These are set to NA prior to the fit.
#' @param bad.u2.A A numeric vector with elements belonging to \code{time}. In
#' some cases, something goes wrong in the stratum, and the number of unmarked
#' fish with an adipose fin clip should be ignored.
#' @param bad.u2.A.1,bad.u2.N.1 List of julian weeks where the value of u2.A.1/u2.N.1 is suspect.
#' These are set to NA prior to the fit.
#' @template logitP.cov
#' @template mcmc-parms
#' @template tauU.alpha.beta
#' @template taueU.alpha.beta
#' @template prior.beta.logitP.mean.sd
#' @template tauP.alpha.beta
#' @template run.prob
#' @template debug
#' @template InitialSeed
#' @template save.output.to.files
#' @template trunc.logitP
#' @return An MCMC object with samples from the posterior distribution. A
#' series of graphs and text file are also created in the working directory.
#' @template author
#' @template references
#' @keywords ~models ~smooth
#' @examples
#'
#' ##---- See the vignettes for examples on how to run this analysis.
#'
#' @export TimeStratPetersenDiagErrorWHChinook_fit
#' @importFrom stats runif var sd
TimeStratPetersenDiagErrorWHChinook_fit<-
function( title="TSPDE-WHChinook", prefix="TSPDE-WHChinook-",
time, n1, m2, u2.A, u2.N, clip.frac.H, sampfrac=rep(1,length(u2.A)),
hatch.after=NULL,
bad.n1=c(), bad.m2=c(), bad.u2.A=c(), bad.u2.N=c(),
logitP.cov=as.matrix(rep(1,length(n1))),
n.chains=3, n.iter=200000, n.burnin=100000, n.sims=2000,
tauU.alpha=1, tauU.beta=.05, taueU.alpha=1, taueU.beta=.05,
prior.beta.logitP.mean = c(logit(sum(m2,na.rm=TRUE)/sum(n1,na.rm=TRUE)),rep(0, ncol(as.matrix(logitP.cov))-1)),
prior.beta.logitP.sd = c(stats::sd(logit((m2+.5)/(n1+1)),na.rm=TRUE), rep(10, ncol(as.matrix(logitP.cov))-1)),
tauP.alpha=.001, tauP.beta=.001,
run.prob=seq(0,1,.1), # what percentiles of run timing are wanted
debug=FALSE, debug2=FALSE,
InitialSeed=ceiling(stats::runif(1,min=0, max=1000000)),
save.output.to.files=TRUE,
trunc.logitP=15) {
# Fit a Time Stratified Petersen model with diagonal entries and with smoothing on U allowing for random error,
# covariates for the the capture probabilities, and separating the wild vs hatchery fish
# The "diagonal entries" implies that no marked fish are recaptured outside the (time) stratum of release
#
version <- '2021-11-02'
options(width=200)
# Input parameters are
# prefix - prefix used for files created with the analysis results
# this should be in standard Window's format, eg. JC-2002-ST-TSPDE
# to which is appended various suffixes for plots etc
# time - vector of stratum numbers. For example, 9:38 would indicate that the
# Trinity River system sampled weeks 9 to 38. If some values are omitted
# e.g. time=10 not present, this indicates sampling did not take place this
# week. The data are expanded and interpolation for the missing week takes place
# n1, m2 - the input data consisting of fish marked and released and then recaptured.
# The n1 and m2 are used to calibrate the trap
# u2.A - number of unmarked fish with adipose fin clips
# u2.N - number of unmarked fish with NO adipose fin clips
# All wild fish have NO adipose fin clips; however, hatchery fish are a mixture
# of fish with adipose fin clips (a known percentage are marked) unmarked fish.
# So u2.A MUST be hatchery fish.
# u2.N is a mixture of wild and hatchery fish.
# clip.frac.H - what fraction of the hatchery fish are clipped?
# sampfrac - Deprecated. DO NOT USE ANYMORE.
# hatch.after - julian week AFTER which hatchery fish are released
# bad.m2 - list of julian numbers where the value of m2 is suspect.
# For example, the capture rate could be extremely low.
# These are set to NA prior to the call to JAGS
# bad.u2.A - list of julian weeks where the value of u2.A is suspect.
# These are set to NA prior to the call to JAGS
# bad.u2.N - list of julian weeks where the value of u2.N is suspect.
# These are set to NA prior to the call to JAGS
# logitP.cov - matrix of covariates for logit(P). If the strata times are "missing" some values, an intercept is assumed
# for the first element of the covariance matrix and 0 for the rest of the covariates.
# CAUTION - this MAY not be what you want to do. It is likely best to enter ALL strata
# if you have any covariates. The default, if not specified, is a constant (the mean logit)
# tauU.alpha, tauU.beta - parameters for the prior on variance in spline coefficients
# taueU.alpha, taueU.beta - parameters for the prior on variance in log(U) around fitted spline
# prior.beta.logitP.mean, prior.beta.logitP.sd - parameters for the prior on mean logit(P)'s [The intercept term]
# The other covariates are assigned priors of a mean of 0 and a sd of 30
# tauP.alpha, tauP.beta - parameters for the prior on 1/var of residual error in logit(P)'s
# run.prob - percentiles of run timing wanted
# debug - if TRUE, then this is a test run with very small MCMC chains run to test out the data
# and JAGS will run and stop waiting for your to exit and complete
# force the input vectors to be vectors
time <- as.vector(time)
n1 <- as.vector(n1)
m2 <- as.vector(m2)
u2.A <- as.vector(u2.A)
sampfrac <- as.vector(sampfrac)
# Do some basic error checking
# 1. Check that length of n1, m2, u2, sampfrac, time all match
if(stats::var(c(length(n1),length(m2),length(u2.A),length(u2.N),length(sampfrac),length(time)))>0){
cat("***** ERROR ***** Lengths of n1, m2, u2.A, u2.N, sampfrac, time must all be equal. They are:",
length(n1)," ",length(m2)," ",length(u2.A)," ",length(u2.N)," ",length(sampfrac)," ",length(time),"\n")
return()}
if(!is.numeric(n1)){
cat("***** ERROR ***** n1 must be numeric. You have:",
paste(n1,collapse=", "),"\n")
return()}
if(any(is.na(n1))){
cat("***** ERROR ***** All values of n1 must not be missing. You have: ",
paste(n1,collapse=", "),"\n")
return()}
if(any(n1 < 0, na.rm=TRUE)){
cat("***** ERROR ***** All values of n1 must be non-negative. You have: ",
paste(n1,collapse=", "),"\n")
return()}
if(length(logitP.cov) %% length(n1) != 0){
cat("***** ERROR ***** Dimension of covariate vector doesn't match length of n1 etc They are:",
length(n1)," ",length(logitP.cov)," ",paste(dim(logitP.cov),collapse=","),"\n")
return()}
# 2. Check that m2<= n1
if(any(m2>n1, na.rm=TRUE)){
cat("***** ERROR ***** m2 must be <= n1. The arguments are \n n1:",
paste(n1,collapse=","),"\n m2:",
paste(m2,collapse=","),"\n")
return()}
# 3. Elements of bad.m2, bad.u2.A, and bad.u2.N, and hatch.after must belong to time
if(!all(bad.m2 %in% time, na.rm=TRUE)){
cat("***** ERROR ***** bad.m2 must be elements of strata identifiers. You entered \n bad.m2:",
paste(bad.m2,collapse=","),"\n Strata identifiers are \n time:",
paste(time ,collapse=","), "\n")
return()}
if(!all(bad.u2.A %in% time, na.rm=TRUE)){
cat("***** ERROR ***** bad.u2.A must be elements of strata identifiers. You entered \n bad.u2.A:",
paste(bad.u2.A,collapse=","),"\n Strata identifiers are \n time:",
paste(time, collapse=","), "\n")
return()}
if(!all(bad.u2.N %in% time, na.rm=TRUE)){
cat("***** ERROR ***** bad.u2.N must be elements of strata identifiers. You entered \n bad.u2.N:",
paste(bad.u2.N, collapse=","),"\n Strata identifiers are \n time:",
paste(time, collapse=","), "\n")
return()}
if(!all(hatch.after %in% time, na.rm=TRUE)){
cat("***** ERROR ***** hatch.after must be elements of strata identifiers. You entered \n hatch.after:",
paste(hatch.after,collapse=","),"\n Strata identifiers are \n time:",
paste(time, collapse=","), "\n")
return()}
# Check that that the prior.beta.logitP.mean and prior.beta.logitP.sd length=number of columns of covariates
logitP.cov <- as.matrix(logitP.cov)
if(!is.vector(prior.beta.logitP.mean) | !is.vector(prior.beta.logitP.sd)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be vectors")
}
if(!is.numeric(prior.beta.logitP.mean) | !is.numeric(prior.beta.logitP.sd)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be numeric")
}
if(length(prior.beta.logitP.mean) != ncol(logitP.cov) | length(prior.beta.logitP.sd) != ncol(logitP.cov)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be same length as number columns in covariate matrix")
}
# Deprecation of sampling fraction.
if(any(sampfrac != 1)){
cat("***** ERROR ***** Sampling fraction is deprecated for any values other than 1. DO NOT USE ANYMORE. ")
return()
}
results.filename <- paste(prefix,"-results.txt",sep="")
stdout <- vector('character')
report <- textConnection('stdout', 'wr', local = TRUE)
sink(report)
cat(paste("Time Stratified Petersen with Diagonal recaptures, error in smoothed U, separating wild and hatchery fish - ", date()))
cat("\nVersion:", version)
cat("\n\n", title, "Results \n\n")
cat("*** Raw data *** \n")
temp<- cbind(time, n1, m2, u2.A, u2.N, logitP.cov)
colnames(temp)<- c('time', 'n1','m2','u2.A', 'u2.N', paste("logitPcov[", 1:ncol(as.matrix(logitP.cov)),"]",sep="") )
print(temp)
cat("\n\n")
cat("Hatchery fish are released AFTER strata: ", hatch.after,"\n\n")
cat("Hatchery fish are clipped at a rate of :", clip.frac.H,"\n\n")
cat("The following strata had m2 set to missing: ",
if(length(bad.m2)>0){bad.m2} else {" NONE"}, "\n")
cat("The following strata had u2.A set to missing: ",
if(length(bad.u2.A)>0){bad.u2.A} else {" NONE"}, "\n")
cat("The following strata had u2.N set to missing: ",
if(length(bad.u2.N)>0){bad.u2.N} else {" NONE"}, "\n")
# Pooled Petersen estimator over ALL of the data including when no releases take place, bad m2, bad.u2.A or bad.u2.N values.
cat("\n\n*** Pooled Petersen Estimate based on pooling over ALL strata adjusting for sampling fraction***\n\n")
cat("Total n1=", sum(n1, na.rm=TRUE),"; m2=",sum(m2, na.rm=TRUE),"; u2=",
sum(u2.A, na.rm=TRUE)+sum(u2.N, na.rm=TRUE),"\n\n")
pp <- SimplePetersen(sum(n1, na.rm=TRUE), sum(m2, na.rm=TRUE), sum(u2.A, na.rm=TRUE)+sum(u2.N, na.rm=TRUE))
cat("Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
# estimate for clipped fish (hatchery) and expand by the clip fraction
cat("Total n1=", sum(n1, na.rm=TRUE),
"; m2=", sum(m2, na.rm=TRUE),
"; u2.A=", sum(u2.A, na.rm=TRUE),"\n")
cat("Clip fraction :", clip.frac.H, "\n\n")
pp <- SimplePetersen(
sum(n1, na.rm=TRUE),
sum(m2, na.rm=TRUE),
sum(u2.A, na.rm=TRUE))
cat("Est U.H(total) ", format(round(pp$U.est)/clip.frac.H,big.mark=","),
" (SE ", format(round(pp$U.se) /clip.frac.H,big.mark=","), ")\n")
cat("Est N.H(total) ", format(round(pp$N.est)/clip.frac.H,big.mark=","),
" (SE ", format(round(pp$N.se) /clip.frac.H,big.mark=","), ")\n\n\n")
# estimate for wild YoY fish found by subtraction
cat("Total n1=", sum(n1, na.rm=TRUE),
"; m2=", sum(m2, na.rm=TRUE),
"; u2.W=", sum((u2.N+u2.A-u2.A/clip.frac.H), na.rm=TRUE),
"[Formed by interpolation based on clip rate]\n")
cat("Clip fraction :", clip.frac.H, "\n\n")
pp <- SimplePetersen(
sum(n1, na.rm=TRUE),
sum(m2, na.rm=TRUE),
sum((u2.N+u2.A-u2.A/clip.frac.H), na.rm=TRUE))
cat("Est U.W(total) ", format(round(pp$U.est),big.mark=","),
" (SE ", format(round(pp$U.se) ,big.mark=","), ") APPROXIMATE\n")
cat("Est N.W(total) ", format(round(pp$N.est),big.mark=","),
" (SE ", format(round(pp$N.se) ,big.mark=","), ") APPROXIMATE\n\n\n")
# Obtain the Pooled Petersen estimator without excluding bad.m2, bad.u2.A, or bad.u2.N values but after removing 0 or NA values
select <- (n1>0) & (!is.na(n1)) & (!is.na(m2)) & (!is.na(u2.A)) & (!is.na(u2.N))
cat("\n\n*** Pooled Petersen Estimate prior to excluding bad m2, u2.A, or u2.N values ***\n\n")
cat("The following strata are excluded because n1=0 or NA values in m2, u2.A, u2.N :", time[!select],"\n\n")
temp.n1 <- n1 [select]
temp.m2 <- m2 [select]
temp.u2.A <- u2.A [select]
temp.u2.N <- u2.N [select]
cat("Total n1=", sum(temp.n1),"; m2=",sum(temp.m2),"; u2=",sum(temp.u2.A+temp.u2.N),"\n\n")
pp <- SimplePetersen(sum(temp.n1), sum(temp.m2), sum(temp.u2.A+temp.u2.N))
cat("Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
# estimate for clipped fish (hatchery) and expand by the clip fraction
cat("Total n1=", sum(temp.n1, na.rm=TRUE),
"; m2=", sum(temp.m2, na.rm=TRUE),
"; u2.A=", sum(temp.u2.A, na.rm=TRUE),"\n")
cat("Clip fraction :", clip.frac.H, "\n\n")
pp <- SimplePetersen(
sum(temp.n1, na.rm=TRUE),
sum(temp.m2, na.rm=TRUE),
sum(temp.u2.A, na.rm=TRUE))
cat("Est U.H(total) ", format(round(pp$U.est)/clip.frac.H,big.mark=","),
" (SE ", format(round(pp$U.se) /clip.frac.H,big.mark=","), ")\n")
cat("Est N.H(total) ", format(round(pp$N.est)/clip.frac.H,big.mark=","),
" (SE ", format(round(pp$N.se) /clip.frac.H,big.mark=","), ")\n\n\n")
# estimate for wild YoY fish
cat("Total n1=", sum(temp.n1, na.rm=TRUE),
"; m2=", sum(temp.m2, na.rm=TRUE),
"; u2.W=", sum((temp.u2.N+temp.u2.A-temp.u2.A/clip.frac.H), na.rm=TRUE),
"[Formed by interpolation based on clip rate]\n")
cat("Clip fraction :", clip.frac.H, "\n\n")
pp <- SimplePetersen(
sum(temp.n1, na.rm=TRUE),
sum(temp.m2, na.rm=TRUE),
sum((temp.u2.N+temp.u2.A-temp.u2.A/clip.frac.H), na.rm=TRUE))
cat("Est U.W(total) ", format(round(pp$U.est),big.mark=","),
" (SE ", format(round(pp$U.se) ,big.mark=","), ") APPROXIMATE \n")
cat("Est N.W(total) ", format(round(pp$N.est),big.mark=","),
" (SE ", format(round(pp$N.se) ,big.mark=","), ") APPROXIMATE \n\n\n")
# Obtain the Pooled Petersen estimator after fixup of bad.m2, bad.u2.A, and bad.u2.N values
temp.m2 <- m2
index.bad.m2 <- as.vector((1:length(time)) %*% outer(time,bad.m2,"=="))
temp.m2[index.bad.m2] <- NA
temp.u2.A <- u2.A
index.bad.u2.A <- as.vector((1:length(time)) %*% outer(time,bad.u2.A,"=="))
temp.u2.A[index.bad.u2.A] <- NA
temp.u2.N <- u2.A
index.bad.u2.N <- as.vector((1:length(time)) %*% outer(time,bad.u2.N,"=="))
temp.u2.N[index.bad.u2.N] <- NA
select <- (n1>0) & (!is.na(n1)) & (!is.na(temp.m2)) & (!is.na(temp.u2.A) & (!is.na(temp.u2.N)) )
cat("\n\n*** Pooled Petersen Estimate after removing bad m2, u2.A, and u2.N values adjusting for sampling fraction ***\n\n")
cat("The following strata had m2 set to missing: ",
if(length(bad.m2)>0){bad.m2} else {" NONE"}, "\n")
cat("The following strata had u2.A set to missing: ",
if(length(bad.u2.A)>0){bad.u2.A} else {" NONE"}, "\n")
cat("The following strata had u2.N set to missing: ",
if(length(bad.u2.N)>0){bad.u2.N} else {" NONE"}, "\n")
cat("The following strata are excluded because n1=0 or NA values in m2, u2.A, or u2.N:", time[!select],"\n\n")
temp.n1 <- n1 [select]
temp.m2 <- m2 [select]
temp.u2.A <- u2.A [select]
temp.u2.N <- u2.N [select]
cat("Total n1=", sum(temp.n1),"; m2=",sum(temp.m2),"; u2=",sum(temp.u2.A+temp.u2.N),"\n\n")
pp <- SimplePetersen(sum(temp.n1), sum(temp.m2), sum(temp.u2.A+temp.u2.N))
cat("Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
# estimate for clipped fish (hatchery) and expand by the clip fraction
cat("Total n1=", sum(temp.n1, na.rm=TRUE),
"; m2=", sum(temp.m2, na.rm=TRUE),
"; u2.A=", sum(temp.u2.A, na.rm=TRUE),"\n")
cat("Clip fraction :", clip.frac.H, "\n\n")
pp <- SimplePetersen(
sum(temp.n1, na.rm=TRUE),
sum(temp.m2, na.rm=TRUE),
sum(temp.u2.A, na.rm=TRUE))
cat("Est U.H(total) ", format(round(pp$U.est)/clip.frac.H,big.mark=","),
" (SE ", format(round(pp$U.se) /clip.frac.H,big.mark=","), ")\n")
cat("Est N.H(total) ", format(round(pp$N.est)/clip.frac.H,big.mark=","),
" (SE ", format(round(pp$N.se) /clip.frac.H,big.mark=","), ")\n\n\n")
# estimate for wild YoY fish
cat("Total n1=", sum(temp.n1, na.rm=TRUE),
"; m2=", sum(temp.m2, na.rm=TRUE),
"; u2.W=", sum((temp.u2.N+temp.u2.A-temp.u2.A/clip.frac.H), na.rm=TRUE),
"[Formed by interpolation based on clip rate]\n")
cat("Clip fraction :", clip.frac.H, "\n\n")
pp <- SimplePetersen(
sum(temp.n1, na.rm=TRUE),
sum(temp.m2, na.rm=TRUE),
sum((temp.u2.N+temp.u2.A-temp.u2.A/clip.frac.H), na.rm=TRUE))
cat("Est U.W(total) ", format(round(pp$U.est),big.mark=","),
" (SE ", format(round(pp$U.se) ,big.mark=","), ") APPROXIMATE\n")
cat("Est N.W(total) ", format(round(pp$N.est),big.mark=","),
" (SE ", format(round(pp$N.se) ,big.mark=","), ") APPROXIMATE\n\n\n")
# Obtain Stratified-Petersen estimator for each stratum prior to removing bad m2 values
cat("*** Stratified Petersen Estimator for each stratum PRIOR to removing bad m2 values after adjusting for sampling fration ***\n\n")
temp.n1 <- n1
temp.m2 <- m2
temp.u2 <- (u2.A + u2.N)
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2)
temp <- cbind(time, temp.n1, temp.m2, temp.u2, round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','(u2.A+u2.N)*adj', 'U[i]', 'SE(U[i])')
print(temp)
cat("\n")
cat("Est U(total) ", format(round(sum(sp$U.est, na.rm=TRUE)),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))), big.mark=","), ")\n\n\n")
cat("*** Stratified Petersen Estimator for each stratum Hatchery YoY PRIOR to removing bad m2 values after adjusting for sampling fration ***\n\n")
temp.n1 <- n1
temp.m2 <- m2
temp.u2 <- u2.A
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2)
temp <- cbind(time, temp.n1, temp.m2, temp.u2, round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','u2.A*adj', 'U[i]', 'SE(U[i])')
print(temp)
cat("** Estimates not adjusted for clip fraction above \n")
cat("Est U.H(total) ", format(round(sum(sp$U.est, na.rm=TRUE)/clip.frac.H),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))/clip.frac.H), big.mark=","), ")\n\n\n")
cat("*** Stratified Petersen Estimator for each stratum Wild YoY PRIOR to removing bad m2 values after adjusting for sampling fration ***\n\n")
temp.n1 <- n1
temp.m2 <- m2
temp.u2 <- pmax(0,(u2.N+u2.A-u2.A/clip.frac.H))
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2)
temp <- cbind(time, temp.n1, temp.m2, temp.u2, round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','u2.W-est', 'U[i]', 'SE(U[i])')
print(temp)
cat("Est U.W(total) ", format(round(sum(sp$U.est, na.rm=TRUE)),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))), big.mark=","), ") APPROXIMATE\n\n\n")
# Obtain Stratified-Petersen estimator for each stratum after removing bad m2, u2.A, or u2.N values
cat("*** Stratified Petersen Estimator for each stratum AFTER removing bad m2, u2.A, u2.N values***\n\n")
temp.n1 <- n1
temp.m2 <- m2
temp.m2[index.bad.m2] <- NA
temp.u2.A <- u2.A
temp.u2.A[index.bad.u2.A] <- NA
temp.u2.N <- u2.N
temp.u2.N[index.bad.u2.N] <- NA
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2.A+temp.u2.N)
temp <- cbind(time, temp.n1, temp.m2, (temp.u2.A+temp.u2.N), round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','(u2.a+u2.N)*adj', 'U[i]', 'SE(U[i])')
print(temp)
cat("\n")
cat("Est U(total) ", format(round(sum(sp$U.est, na.rm=TRUE)),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))), big.mark=","), ")\n\n\n")
cat("*** Stratified Petersen Estimator for each stratum YoY hatchery PRIOR to removing bad m2 values ***\n\n")
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2.A)
temp <- cbind(time, temp.n1, temp.m2, round(temp.u2.A), round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','u2.A*adj', 'U[i]', 'SE(U[i])')
print(temp)
cat("** Estimates not adjusted for clip fraction above \n")
cat("Est U.H(total) ", format(round(sum(sp$U.est, na.rm=TRUE)/clip.frac.H),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))/clip.frac.H), big.mark=","), ")\n\n\n")
cat("*** Stratified Petersen Estimator for each stratum Wild YoY PRIOR to removing bad m2 values after adjusting for sampling fration ***\n\n")
temp.u2.W <- pmax(0,(temp.u2.N+temp.u2.A-temp.u2.A/clip.frac.H))
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2.W)
temp <- cbind(time, temp.n1, temp.m2, round(temp.u2.W), round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','u2.W-est', 'U[i]', 'SE(U[i])')
print(temp)
cat("Est U.W(total) ", format(round(sum(sp$U.est, na.rm=TRUE)),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))), big.mark=","), ") APPROXIMATE\n\n\n")
# Test if pooling can be done
cat("*** Test if pooled Petersen is allowable. [Check if marked fractions are equal] ***\n\n")
select <- (n1>0) & (!is.na(n1)) & (!is.na(temp.m2))
temp.n1 <- n1[select]
temp.m2 <- m2[select]
test <- TestIfPool( temp.n1, temp.m2)
cat("(Large Sample) Chi-square test statistic ", test$chi$statistic," has p-value", test$chi$p.value,"\n\n")
temp <- cbind(time[select],test$chi$observed, round(test$chi$expected,1), round(test$chi$residuals^2,1))
colnames(temp) <- c('time','n1-m2','m2','E[n1-m2]','E[m2]','X2[n1-m2]','X2[m2]')
print(temp)
cat("\n Be cautious of using this test in cases of small expected values. \n\n")
# Fix up any data problems and prepare for the call.
# Notice that for strata entries that are missing any covariate values, only an intercept is added
# Expand the entries in case of missing time entries
new.n1 <- rep(0, max(time)-min(time)+1)
new.m2 <- rep(0, max(time)-min(time)+1)
new.u2.A <- rep(0, max(time)-min(time)+1)
new.u2.N <- rep(0, max(time)-min(time)+1)
new.logitP.cov <- matrix(NA, nrow=max(time)-min(time)+1, ncol=ncol(as.matrix(logitP.cov)))
new.time <- min(time):max(time)
new.n1 [time-min(time)+1] <- n1
new.m2 [time-min(time)+1] <- m2
new.m2 [bad.m2-min(time)+1] <- NA # wipe out strata where m2 is known to be bad
new.u2.A[time-min(time)+1] <- u2.A
new.u2.A[bad.u2.A-min(time)+1] <- NA # wipe out strata where u2.A is known to be bad
new.u2.N[time-min(time)+1] <- u2.N
new.u2.N[bad.u2.N-min(time)+1] <- NA # wipe out strata where u2.N is known to be bad
new.logitP.cov[time-min(time)+1,]<- as.matrix(logitP.cov)
new.logitP.cov[ is.na(new.logitP.cov[,1]), 1] <- 1 # insert a 1 into first columns where not specified
new.logitP.cov[ is.na(new.logitP.cov)] <- 0 # other covariates are forced to zero not in column 1
# Check for and fix problems with the data
# If n1=m2=0, then set n1 to 1, and set m2<-NA
new.m2[new.n1==0] <- NA
new.n1[new.n1==0] <- 1
# Adjust data when a stratum has less than 100% sampling fraction to "estimate" the number
# of unmarked fish that were captured. It is not necessary to adjust the n1 and m2 values
# as these are used ONLY to estimate the capture efficiency.
# In reality, there should be a slight adjustment
# to the precision to account for this change, but this is not done.
# Similarly, if the sampling fraction is more than 1, the adjustment forces the total unmarked catch back to a single week.
new.u2.A <- round(new.u2.A)
new.u2.N <- round(new.u2.N)
# Print out the revised data
hatch.indicator <- rep(' ', max(time)-min(time)+1)
hatch.indicator[hatch.after-min(time)+1]<- '***'
cat("\n\n*** Revised data *** \n")
temp<- data.frame(time=new.time, n1=new.n1, m2=new.m2, u2.A=new.u2.A, u2.N=new.u2.N,
new.logitP.cov=new.logitP.cov,
hatch.indicator=hatch.indicator)
print(temp)
cat("\n\n")
# Print out information on the prior distributions used
cat("\n\n*** Information on priors *** \n")
cat(" Parameters for prior on tauU (variance in spline coefficients: ", tauU.alpha, tauU.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(tauU.alpha/tauU.beta,2),round(sqrt(tauU.alpha/tauU.beta^2),2),"\n")
cat(" Parameters for prior on taueU (variance of log(U) about spline: ",taueU.alpha, taueU.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(taueU.alpha/taueU.beta,2),round(sqrt(taueU.alpha/taueU.beta^2),2),"\n")
cat(" Parameters for prior on beta.logitP[1] (intercept) (mean, sd): \n", cbind(round(prior.beta.logitP.mean,3), round(prior.beta.logitP.sd,5)),"\n")
cat(" Parameters for prior on tauP (residual variance of logit(P) after adjusting for covariates: ",tauP.alpha, tauP.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(tauP.alpha/tauP.beta,2),round(sqrt(tauP.alpha/tauP.beta^2),2),"\n")
cat("\n\nInitial seed for this run is: ",InitialSeed, "\n")
sink()
if (debug2) {
cat("\nprior to formal call to TimeStratPetersenDiagErrorWHChinook\n")
browser()
}
if (debug)
{results <- TimeStratPetersenDiagErrorWHChinook(title=title, prefix=prefix,
time=new.time, n1=new.n1, m2=new.m2, u2.A=new.u2.A, u2.N=new.u2.N,
hatch.after=hatch.after-min(time)+1, clip.frac.H=clip.frac.H,
logitP.cov=new.logitP.cov,
n.chains=3, n.iter=10000, n.burnin=5000, n.sims=500, # set to low values for debugging only
prior.beta.logitP.mean=prior.beta.logitP.mean,
prior.beta.logitP.sd =prior.beta.logitP.sd,
tauU.alpha=tauU.alpha, tauU.beta=tauU.beta, taueU.alpha=taueU.alpha, taueU.beta=taueU.beta,
debug=debug,InitialSeed=InitialSeed,
save.output.to.files=save.output.to.files)
} else #notice R syntax requires { before the else
{results <- TimeStratPetersenDiagErrorWHChinook(title=title, prefix=prefix,
time=new.time, n1=new.n1, m2=new.m2, u2.A=new.u2.A, u2.N=new.u2.N,
hatch.after=hatch.after-min(time)+1, clip.frac.H=clip.frac.H,
logitP.cov=new.logitP.cov,
n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.sims=n.sims,
prior.beta.logitP.mean=prior.beta.logitP.mean,
prior.beta.logitP.sd =prior.beta.logitP.sd,
tauU.alpha=tauU.alpha, tauU.beta=tauU.beta, taueU.alpha=taueU.alpha, taueU.beta=taueU.beta,
InitialSeed=InitialSeed,
save.output.to.files=save.output.to.files)
}
# Now to create the various summary tables of the results
if (debug2) {
cat("\nAfter formal call to TimeStratPetersenDiagErrorWHChinook\n")
browser()
}
# A plot of the observered log(U) on the log scale, and the final mean log(U)
# A plot of the observered log(U) on the log scale, and the final mean log(U)
# Create the data frame needed for ggplot.
# In the diagonal case, time, n1, m2, u2 are the same length
Nstrata <- length(n1)
plot.df <- data.frame(time =new.time)
# adjust the u2 for the clipping fractions
plot.df$n1 <- new.n1
plot.df$m2 <- new.m2
plot.df$u2.H <- new.u2.A/clip.frac.H # only a portion of the hatchery fish are clipped
plot.df$u2.N <- new.u2.N
plot.df$u2.W <- pmax(plot.df$u2.N - plot.df$u2.H*(1-clip.frac.H),0) # subtract the guestimated number of hatchery fish
plot.df$u2.H[is.na(plot.df$u2.H)] <- 1 # in case of missing values
plot.df$u2.W[is.na(plot.df$u2.W)] <- 1 # in case of missing values
avg.P <- sum(plot.df$m2,na.rm=TRUE)/sum(plot.df$n1, na.rM=TRUE)
plot.df$logUguess.W <- log(pmax((plot.df$u2.W+1)*(plot.df$n1+2)/(plot.df$m2+1), plot.df$u2.W/avg.P, na.rm=TRUE) ) # try and keep Uguess larger than observed values
plot.df$logUguess.H <- log(pmax((plot.df$u2.H+1)*(plot.df$n1+2)/(plot.df$m2+1), plot.df$u2.H/avg.P, na.rm=TRUE) )
plot.df$logUguess.H[1:(hatch.after-min(time))] <- NA # no hatchery fish prior to release from hatchery
# extract the fitted U values for W (wild) and H (hatchery)
results.row.names <- rownames(results$summary)
etaU.W.row.index <- grep("etaU.W", results.row.names)
etaU.W <- results$summary[etaU.W.row.index,]
plot.df$logU.W = etaU.W[,"mean"]
plot.df$logUlcl.W = etaU.W[,"2.5%"]
plot.df$logUucl.W = etaU.W[,"97.5%"]
etaU.H.row.index <- grep("etaU.H", results.row.names)
etaU.H <- results$summary[etaU.H.row.index,]
plot.df$logU.H = etaU.H[,"mean"]
plot.df$logUlcl.H = etaU.H[,"2.5%"]
plot.df$logUucl.H = etaU.H[,"97.5%"]
plot.df$logU.H [1:(hatch.after - min(time)+1)] <- NA # no hatchery fish until release at hatch.after
plot.df$logUlcl.H [1:(hatch.after - min(time)+1)] <- NA
plot.df$logUucl.H [1:(hatch.after - min(time)+1)] <- NA
# extract the spline values for W (wild) and H (hatchery) fish
logUne.W.row.index <- grep("logUne.W", results.row.names)
plot.df$spline.W <- results$summary[logUne.W.row.index,"mean"]
logUne.H.row.index <- grep("logUne.H", results.row.names)
plot.df$spline.H <- results$summary[logUne.H.row.index,"mean"]
plot.df$spline.H [1:(hatch.after - min(time)+1)] <- NA # no hatchery fish until release at hatch.after
# add limits to the plot to avoid non-monotone secondary axis problems with extreme values
plot.df$logUguess.W <- pmax(-10 , pmin(20, plot.df$logUguess.W))
plot.df$logUguess.H <- pmax(-10 , pmin(20, plot.df$logUguess.H))
plot.df$logU.W <- pmax(-10 , pmin(20, plot.df$logU.W ))
plot.df$logU.H <- pmax(-10 , pmin(20, plot.df$logU.H ))
plot.df$logUlcl.W <- pmax(-10 , pmin(20, plot.df$logUlcl.W ))
plot.df$logUlcl.H <- pmax(-10 , pmin(20, plot.df$logUlcl.H ))
plot.df$logUucl.W <- pmax(-10 , pmin(20, plot.df$logUucl.W ))
plot.df$logUucl.H <- pmax(-10 , pmin(20, plot.df$logUucl.H ))
plot.df$spline.W <- pmax(-10 , pmin(20, plot.df$spline.W))
plot.df$spline.H <- pmax(-10 , pmin(20, plot.df$spline.H))
fit.plot <- ggplot(data=plot.df, aes_(x=~time))+
ggtitle(title, subtitle="Fitted spline curve to raw U.W[i] U.H[i] with 95% credible intervals")+
geom_point(aes_(y=~logUguess.W), color="red", shape="w")+ # guesses for wild file
geom_point(aes_(y=~logUguess.H), color="green", shape="h")+ # guesses for hatchery fish
xlab("Time Index\nFitted/Smoothed/Raw values plotted for W(black) and H(blue)")+
ylab("log(U[i]) + 95% credible interval")+
geom_point(aes_(y=~logU.W), color="black", shape=19)+
geom_line (aes_(y=~logU.W), color="black")+
geom_errorbar(aes_(ymin=~logUlcl.W, ymax=~logUucl.W), width=.1)+
geom_line(aes_(y=~spline.W),linetype="dashed") +
geom_point(aes_(y=~logU.H), color="blue", shape=19)+
geom_line (aes_(y=~logU.H), color="blue")+
geom_errorbar(aes_(ymin=~logUlcl.H, ymax=~logUucl.H), width=.1, color="blue")+
geom_line(aes_(y=~spline.H),linetype="dashed",color="blue")+
ylim(c(-2,NA))+
scale_x_continuous(breaks=seq(min(plot.df$time, na.rm=TRUE),max(plot.df$time, na.rm=TRUE),2))+
scale_y_continuous(sec.axis = sec_axis(~ exp(.), name="U + 95% credible interval",
breaks=c(1,10,20,50,
100,200,500,
1000,2000,5000,
10000,20000, 50000,
100000,200000, 500000,
1000000,2000000,5000000,10000000),
labels = scales::comma))
if(save.output.to.files)ggsave(plot=fit.plot, filename=paste(prefix,"-fit.pdf",sep=""), height=6, width=10, units="in")
results$plots$fit.plot <- fit.plot
# Plot the logitP over time
logitP.plot <- plot_logitP(title=title, time=new.time, n1=new.n1, m2=new.m2, u2=new.u2.A+new.u2.N,
logitP.cov=new.logitP.cov, results=results,
trunc.logitP=trunc.logitP)
if(save.output.to.files)ggsave(plot=logitP.plot, filename=paste(prefix,"-logitP.pdf",sep=""), height=6, width=10, units="in", dpi=300)
results$plots$logitP.plot <- logitP.plot
# Look at autocorrelation function for Utot.W and Utot.H
mcmc.sample1<- data.frame(parm="Utot.W", sample=results$sims.matrix[,"Utot.W"], stringsAsFactors=FALSE)
mcmc.sample2<- data.frame(parm="Utot.H", sample=results$sims.matrix[,"Utot.H"], stringsAsFactors=FALSE)
mcmc.sample <- rbind(mcmc.sample1, mcmc.sample2)
acf.Utot.plot <- plot_acf(mcmc.sample)
if(save.output.to.files)ggsave(plot=acf.Utot.plot, filename=paste(prefix,"-Utot-acf.pdf",sep=""), height=4, width=6, units="in")
results$plots$acf.Utot.plot <- acf.Utot.plot
# Look at the shape of the posterior distribution
mcmc.sample1<- data.frame(parm="Utot.W", sample=results$sims.matrix[,"Utot.W"], stringsAsFactors=FALSE)
mcmc.sample2<- data.frame(parm="Utot.H", sample=results$sims.matrix[,"Utot.H"], stringsAsFactors=FALSE)
mcmc.sample <- rbind(mcmc.sample1, mcmc.sample2)
post.Utot.plot <- plot_posterior(mcmc.sample)
if(save.output.to.files)ggsave(plot=post.Utot.plot, filename=paste(prefix,"-Utot-posterior.pdf",sep=""), height=4, width=6, units="in")
results$plots$post.Utot.plot <- post.Utot.plot
#save the Bayesian predictive distribution (Bayesian p-value plots)
#browser()
discrep <-PredictivePosterior.TSPDE.WHCH (time, new.n1, new.m2, new.u2.A, new.u2.N, clip.frac.H,
expit(results$sims.list$logitP), round(results$sims.list$U.W),
round(pmax(results$sims.list$U.H,0)),
hatch.after)
gof <- PredictivePosteriorPlot.TSPDE.WHCH (discrep)
if(save.output.to.files)ggsave(gof[[1]],filename=paste(prefix,"-GOF.pdf",sep=""), height=8, width=8, units="in", dpi=300 )
results$plots$gof.plot <- gof
# create traceplots of logU, U, and logitP (along with R value) to look for non-convergence
# the plot_trace will return a list of plots (one for each page as needed)
varnames <- names(results$sims.array[1,1,]) # extract the names of the variables
# Trace plots of logitP
trace.plot <- plot_trace(title=title, results=results, parms_to_plot=varnames[grep("^logitP", varnames)])
if(save.output.to.files){
pdf(file=paste(prefix,"-trace-logitP.pdf",sep=""))
plyr::l_ply(trace.plot, function(x){plot(x)})
dev.off()
}
results$plots$trace.logitP.plot <- trace.plot
# now for the traceplots of logU (etaU), Utot, and Ntot
trace.plot <- plot_trace(title=title, results=results, parms_to_plot=varnames[c(grep("Utot",varnames), grep("Ntot",varnames), grep("^etaU", varnames))])
if(save.output.to.files){
pdf(file=paste(prefix,"-trace-logU.pdf",sep=""))
plyr::l_ply(trace.plot, function(x){plot(x)})
dev.off()
}
results$plots$trace.logU.plot <- trace.plot
sink(report, append=TRUE)
# What was the initial seed
cat("\n\n*** Initial Seed for this run ***: ", results$Seed.initial,"\n")
# Global summary of results
cat("\n\n*** Summary of MCMC results *** \n\n")
save.max.print <- getOption("max.print")
options(max.print=.Machine$integer.max)
print(results, digits.summary=3)#, max=.Machine$integer.max)
options(max.print=save.max.print)
# Give an alternate computation of DIC based on the variance of the deviance
# Refer to http://www.mrc-bsu.cam.ac.uk/bugs/winbugs/DIC-slides.pdf for derivation and why
# this alternate method may be superior to that automatically computed by WinBugs/OpenBugs
cat("\n\n*** Alternate DIC computation based on p_D = var(deviance)/2 \n")
results.row.names <- rownames(results$summary)
deviance.row.index<- grep("deviance", results.row.names)
deviance <- results$summary[deviance.row.index,]
p.D <- deviance["sd"]^2/2
dic <- deviance["mean"]+p.D
cat(" D-bar: ", deviance["mean"],"; var(dev): ", deviance["sd"]^2,
"; p.D: ", p.D, "; DIC: ", dic)
# Summary of population sizes. Add pretty printing.
cat("\n\n\n\n*** Summary of Unmarked Population Size ***\n")
cat("Wild\n")
temp<- results$summary[ grep("Utot.W", rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
cat("\n\nHatchery\n")
temp<- results$summary[ grep("Utot.H", rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
cat("\n\nTotal\n")
temp<- results$summary[ rownames(results$summary) == "Utot",]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
#browser()
cat("\n\n\n\n*** Summary of Quantiles of Run Timing.Wild *** \n")
cat( " This is based on the sample weeks provided and the U.W[i] values \n")
q <- RunTime(time=time, U=results$sims.list$U.W, prob=run.prob)
temp <- rbind(apply(q,2,mean), apply(q,2,sd))
rownames(temp) <- c("Mean", "Sd")
print(round(temp,2))
cat("\n\n*** Summary of Quantiles of Run Timing.Hatchery *** \n")
cat( " This is based on the sample weeks provided and the U.H[i] values \n")
q <- RunTime(time=time[time>hatch.after], U=results$sims.list$U.H[,time>hatch.after], prob=run.prob)
temp <- rbind(apply(q,2,mean), apply(q,2,sd))
rownames(temp) <- c("Mean", "Sd")
print(round(temp,2))
cat("\n\n")
cat(paste("*** end of fit *** ", date()))
sink()
# save the report to a files?
if(save.output.to.files)writeLines(stdout, results.filename)
results$report <- stdout
# add some of the raw data to the bugs object for simplicity in referencing it later
results$data <- list( time=time, n1=n1, m2=m2, u2.A=u2.A, u2.N=u2.N, clip.frac.H=clip.frac.H,
hatch.after=hatch.after,
bad.m2=bad.m2, bad.u2.A=bad.u2.A, bad.u2.N=bad.u2.N,
logitP.cov=logitP.cov,
version=version, date_run=date(),
title=title)
return(results)
} # end of function
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/TimeStratPetersenDiagErrorWHChinook_fit.R
|
## 2020-11-07 CJS Allow user to specify prior for beta parameters for covariates on logitP
# 2018-12-06 CJS created initial plot
# 2018-11-27 CJS removed openBugs
# 2014-09-01 CJS conversion to JAGS
# - no model name
# - C(,20) -> T(,20)
# - dflat() to dnorm(0, 1E-6)
# - added u2.W.YoYcopy to improve mixing based on Matt S. suggestion
# - added u2.W.1copy to improve mixing based on Matt S. suggestion
# - added u2.H.1copy to improve mixing based on Matt S. suggestion
# - fixed monitoring of *H.1 parameters that are only present in hatch.after or later
# JAGS won't monitor these variables unless entries from 1:hatch.after are defined
# 2013-09-06 CJS added initializations for n1, m2, u2.W.YoY, u2.W.1, u2.H.1
# when these are missing (typically when set to bad).
# Also removed references to WinBugs
# 2011-05-15 CJS limited etaU to 20 or less
# 2011-01-24 SB added call to run.windows.openbugs and run.windows.winbugs
# 2010-11-25 CJS output to track progress of burnin and post-burnin phases
# 2010-04-26 CJS fixed problem with init.logiP that failed when n1=m2 logit=+infinity and lm() failed
# 2009-12-05 CJS added title to argument list
# 2009-12-01 CJS added openbugs/winbugs directory to argument list
#' @keywords internal
#' @importFrom stats lm spline var sd
TimeStratPetersenDiagErrorWHSteel <-
function(title, prefix,
time, n1, m2,
u2.W.YoY, u2.W.1, u2.H.1,
hatch.after=NULL,
logitP.cov=as.matrix(rep(1,length(u2.W.YoY))),
n.chains=3, n.iter=200000, n.burnin=100000, n.sims=2000,
tauU.alpha=1, tauU.beta=.05, taueU.alpha=1, taueU.beta=.05,
prior.beta.logitP.mean = c(logit(sum(m2,na.rm=TRUE)/sum(n1,na.rm=TRUE)),rep(0, ncol(as.matrix(logitP.cov))-1)),
prior.beta.logitP.sd = c(stats::sd(logit((m2+.5)/(n1+1)),na.rm=TRUE), rep(10, ncol(as.matrix(logitP.cov))-1)),
tauP.alpha=.001, tauP.beta=.001,
debug=FALSE, debug2=FALSE,
InitialSeed,
save.output.to.files=TRUE){
set.seed(InitialSeed) # set prior to initial value computations
#
# Fit the smoothed time-Stratified Petersen estimator with Diagonal recoveries (i.e. no recoveries
# outside stratum of release), error in the smoothed U curve, and separating wild vs hatchery stocks
# for steelhead where 100% of hatchery fish are cliped
#
# This routine assumes that the strata are time (e.g. weeks).
# In each stratum n1 fish are released (with marks). These are ususally
# captured fish that are marked, transported upstream, and released.
# These fish are used only to estimate the recapture rate downstream.
# Of the n1 fish released, m2 fish are recaptured in the same stratum (e.g. week) of release.
# There is a related function that allows fish to be recaptured in subsequent weeks.
#
# There are 3 distinct populations in the stream
# u2.W.YoY - young of year wild populations
# u2.W.1 - wild populations of age 1+
# u2.H.1 - hatchery population of age 1 that appears after hatch.after
# Input
# prefix - prefix for file name for initial plot of U's
# time - the stratum number
# n1 - vector of number of fish released in stratum i
# m2 - vector of number of fish recovered in stratum i (EXCLUDING recaps)
# u2.W.YoY - vector of number of wild YoY captured in stratum i
# u2.W.1 - vector of number of wild YoY captured in stratum i
# u2.H.1 - vector of number of wild YoY captured in stratum i
# hatch.after - point AFTER which the hatchery fish are released.
# logitP.cov - covariates for logit(P)=X beta.logitP
# This routine makes a call to the MCMC sampler to fit the model and then gets back the
# coda files for the posteriour distribution.
## Set working directory to current directory (we should allow users to select this)
working.directory <- getwd()
## Define paths for the model, data, and initial value files
model.file <- file.path(working.directory, "model.txt")
data.file <- file.path(working.directory,"data.txt")
init.files <- file.path(working.directory,
paste("inits", 1:n.chains,".txt", sep = ""))
# Save the Bugs progam to the model.txt file
#
sink(model.file) # NOTE: NO " allowed in model as this confuses the cat command
cat("
model {
# Time Stratified Petersen with Diagonal recapture (no spillover in subsequent weeks or marked fish)
# and allowing for error in the smoothed U curve with separation of wild and hatchery fish
# for steel head stocks in the Trinity River
# Each of the populations are fit using a SINGLE spline curve as this should be flexible
# enough to capture the individual behaviours
# Data input:
# Nstrata - number of strata
# n1 - number of marked fish released
# m2 - number of marked fish recaptured
# u2.W.YoY - number of wild YoY fish captured
# u2.W.1 - number of wild 1+ fish captured
# u2.H.1 - number of hatchery 1+ fish captured
# logitP.cov - covariates for logitP
# NlogitP.cov - number of logitP covariates
# SplineDesign.W.YoY- wild YoY fish spline design matrix of size [Nstrata, maxelement of n.b.notflat.W.YoY]
# SplineDesign.W.1 - wild 1+ fish spline design matrix of size [Nstrata, maxelement of n.b.notflat.W.1 ]
# SplineDesign.H.1 - hatchery 1+ fish spline design matrix of size [Nstrata, maxelement of n.b.notflat.H.1]
# This is set up prior to the call.
# b.flat.W.YoY - vector of strata indices where the prior for the b's will be flat for wild YoY fish
# b.flat.W.1 - vector of strata indices where the prior for the b's will be flat for wild 1+ fish
# b.flat.H.1 - vector of strata indices where the prior for the b's will be flat for hatchery 1+ fish
# this is normally the first two weeks of each spline segment
# n.b.flat.W.YoY - number of b coefficients that have a flat prior - wild YoY fish
# n.b.flat.W.1 - number of b coefficients that have a flat prior - wild 1+ fish
# n.b.flat.H.1 - number of b coefficients that have a flat prior - hatchery fish
# b.notflat.W.YoY - vector of strata indices where difference in coefficients is modelled - wild YoY fish
# b.notflat.W.1 - vector of strata indices where difference in coefficients is modelled - wild 1+ fish
# b.notflat.H.1 - vector of strata indices where difference in coefficients is modelled - hatchery 1+ fish
# n.b.notflat.W.YoY - number of b coefficients that do not have a flat prior - wild YoY fish
# n.b.notflat.W.1 - number of b coefficients that do not have a flat prior - wild 1+ fish
# n.b.notflat.H.1 - number of b coefficients that do not have a flat prior - hatchery 1+ fish
# tauU.alpha, tauU.beta - parameters for prior on tauU
# taueU.alpha, taueU.beta - parameters for prior on taueU
# prior.beta.logitP.mean, prior.beta.logitP.sd - parameters for prior of coefficient of covariates for logitP
# tauP.alpha, tauP.beta - parameter for prior on tauP (residual variance of logit(P)'s after adjusting for
# covariates)
#
# Parameters of the model are:
# p[i]
# logitP[i] = logit(p[i]) = logitP.cov*beta.logitP
# The beta coefficients have a prior that is N(mean= prior.beta.logitP.mean, sd= prior.beta.logitP.sd)
# U.W.YoY[i] - number of unmarked wild YoY fish passing stratam i in population
# U.W.1 [i] - number of unmarked wild 1+ fish passing stratam i in population
# U.H.1 [i] - number of unmarked hatchery 1+ fish passing stratum i in population
# etaU.W.YoY[i] = log(U.W.YoY[i])
# etaU.W.1 [i] = log(U.W.1 [i])
# etaU.H.1 [i] = log(U.H.1 [i])
# which comes from spline with parameters bU.*[1... Knots+q] + error term eU.*[i]
##### Fit the spline for W.YoY - this covers the entire experiment ######
for(i in 1:Nstrata){
logUne.W.YoY[i] <- inprod(SplineDesign.W.YoY[i,1:n.bU.W.YoY],bU.W.YoY[1:n.bU.W.YoY]) # spline design matrix * spline coeff
etaU.W.YoY[i] ~ dnorm(logUne.W.YoY[i], taueU)T(,20) # add random error
eU.W.YoY[i] <- etaU.W.YoY[i] - logUne.W.YoY[i]
}
##### Fit the spline for W.1 - this covers the entire experiment ######
for(i in 1:Nstrata){
logUne.W.1[i] <- inprod(SplineDesign.W.1[i,1:n.bU.W.1],bU.W.1[1:n.bU.W.1]) # spline design matrix * spline coeff
etaU.W.1[i] ~ dnorm(logUne.W.1[i], taueU)T(,20) # add random error
eU.W.1[i] <- etaU.W.1[i] - logUne.W.1[i]
}
##### Fit the spline for hatchery fish - these fish only enter AFTER hatch.after ######
for(i in (hatch.after+1):Nstrata){
logUne.H.1[i] <- inprod(SplineDesign.H.1[i,1:n.bU.H.1],bU.H.1[1:n.bU.H.1]) # spline design matrix * spline coeff
etaU.H.1[i] ~ dnorm(logUne.H.1[i], taueU)T(,20) # add random error
eU.H.1[i] <- etaU.H.1[i] - logUne.H.1[i]
}
##### Model the capture probabilities #####
for(i in 1:hatch.after){
mu.logitP[i] <- inprod(logitP.cov[i,1:NlogitP.cov], beta.logitP[1:NlogitP.cov])
## logitP[i] ~ dnorm(mu.logitP[i],tauP)
# Use the u2.W.YoYcopy to break the cycle (in OpenBugs/Jags) and improve mixing
mu.epsilon[i] <- mu.logitP[i] - log(u2.W.YoYcopy[i] + u2.W.1copy[i] + 1) +
log(exp(etaU.W.YoY[i]) + exp(etaU.W.1[i]))
epsilon[i] ~ dnorm(mu.epsilon[i],tauP)
logitP[i] <- log(u2.W.YoYcopy[i] + u2.W.1copy[i] + 1) -
log(exp(etaU.W.YoY[i]) + exp(etaU.W.1[i])) + epsilon[i]
}
for(i in (hatch.after+1):Nstrata){
mu.logitP[i] <- inprod(logitP.cov[i,1:NlogitP.cov], beta.logitP[1:NlogitP.cov])
## logitP[i] ~ dnorm(mu.logitP[i],tauP)
mu.epsilon[i] <- mu.logitP[i] - log(u2.W.YoYcopy[i] + u2.W.1copy[i] + u2.H.1copy[i] + 1) +
log(exp(etaU.W.YoY[i]) + exp(etaU.W.1[i]) + exp(etaU.H.1[i]))
epsilon[i] ~ dnorm(mu.epsilon[i],tauP)
logitP[i] <- log(u2.W.YoYcopy[i] + u2.W.1copy[i] + u2.H.1copy[i] + 1) -
log(exp(etaU.W.YoY[i]) + exp(etaU.W.1[i]) + exp(etaU.H.1[i])) + epsilon[i]
}
##### Hyperpriors #####
## Run size - wild and hatchery fish - flat priors
for(i in 1:n.b.flat.W.YoY){
bU.W.YoY[b.flat.W.YoY[i]] ~ dnorm(0, 1E-6)
}
for(i in 1:n.b.flat.W.1){
bU.W.1[b.flat.W.1[i]] ~ dnorm(0, 1E-6)
}
for(i in 1:n.b.flat.H.1){
bU.H.1[b.flat.H.1[i]] ~ dnorm(0, 1E-6)
}
## Run size - priors on the difference for wild and hatchery fish
for(i in 1:n.b.notflat.W.YoY){
xiU.W.YoY[b.notflat.W.YoY[i]] <- 2*bU.W.YoY[b.notflat.W.YoY[i]-1] - bU.W.YoY[b.notflat.W.YoY[i]-2]
bU.W.YoY [b.notflat.W.YoY[i]] ~ dnorm(xiU.W.YoY[b.notflat.W.YoY[i]],tauU)
}
for(i in 1:n.b.notflat.W.1){
xiU.W.1[b.notflat.W.1[i]] <- 2*bU.W.1[b.notflat.W.1[i]-1] - bU.W.1[b.notflat.W.1[i]-2]
bU.W.1 [b.notflat.W.1[i]] ~ dnorm(xiU.W.1[b.notflat.W.1[i]],tauU)
}
for(i in 1:n.b.notflat.H.1){
xiU.H.1[b.notflat.H.1[i]] <- 2*bU.H.1[b.notflat.H.1[i]-1] - bU.H.1[b.notflat.H.1[i]-2]
bU.H.1 [b.notflat.H.1[i]] ~ dnorm(xiU.H.1[b.notflat.H.1[i]],tauU)
}
tauU ~ dgamma(tauU.alpha,tauU.beta) # Notice reduction from .0005 (in thesis) to .05
sigmaU <- 1/sqrt(tauU)
taueU ~ dgamma(taueU.alpha,taueU.beta) # dgamma(100,.05) # Notice reduction from .0005 (in thesis) to .05
sigmaeU <- 1/sqrt(taueU)
## Capture probabilities covariates
for(i in 1:NlogitP.cov){
beta.logitP[i] ~ dnorm(prior.beta.logitP.mean[i], 1/prior.beta.logitP.sd[i]^2) # rest of beta terms are normal 0 and a large variance
}
beta.logitP[NlogitP.cov+1] ~ dnorm(0, .01) # dummy so that covariates of length 1 function properly
tauP ~ dgamma(tauP.alpha,tauP.beta)
sigmaP <- 1/sqrt(tauP)
##### Likelihood contributions #####
## Number of marked fish recovered ##
for(i in 1:Nstrata){
logit(p[i]) <- logitP[i] # convert from logit scale
m2[i] ~ dbin(p[i],n1[i]) # recovery of marked fish
}
## captures of wild YoY and wild 1+ fish
for(i in 1:Nstrata){
U.W.YoY[i] <- round(exp(etaU.W.YoY[i])) # convert from log scale
U.W.1 [i] <- round(exp(etaU.W.1 [i])) # convert from log scale
u2.W.YoY[i] ~ dbin(p[i],U.W.YoY[i])
u2.W.1 [i] ~ dbin(p[i],U.W.1 [i])
}
## captures of hatchery fish - these can only occur AFTER hatch.after
for(i in (hatch.after+1):Nstrata){
U.H.1[i] <- round(exp(etaU.H.1[i])) # convert from log scale
u2.H.1[i] ~ dbin(p[i], U.H.1[i])
}
##### Derived Parameters #####
Utot.W.YoY <- sum( U.W.YoY[1:Nstrata]) # Total number of unmarked fish - wild YoY
Utot.W.1 <- sum( U.W.1 [1:Nstrata]) # Total number of unmarked fish - wild 1+
Utot.H.1 <- sum( U.H.1 [(hatch.after+1):Nstrata])# Total number of unmarked fish - hatchery 1+
Utot <- Utot.W.YoY + Utot.W.1 + Utot.H.1 # Grand total number of fish
# Because JAGES does not properly monitory partially defined vectors (see Section 2.5 of the JAGES user manual)
# we need to add dummy distribution for the parameters of interest prior to the hatchery fish arriving.
# This is not needed in OpenBugs who returns the subset actually monitored, but we add this to be consistent
# among the two programs
for(i in 1:hatch.after){
U.H.1[i] ~ dnorm(0,1) # These are complete arbitrary and never gets updated
etaU.H.1[i] ~ dnorm(0,1)
logUne.H.1[i] ~ dnorm(0,1)
eU.H.1[i] ~ dnorm(0,1)
}
} # end of model
", fill=TRUE)
sink() # End of saving the Bugs program
Nstrata <- length(n1)
# Make a copy of u2.W.YoY/u2.W.1/u2.H.1 to improve mixing in the MCMC model
u2.W.YoYcopy <- stats::spline(x=1:Nstrata, y=u2.W.YoY, xout=1:Nstrata)$y
u2.W.YoYcopy <- round(u2.W.YoYcopy) # round to integers
u2.W.1copy <- stats::spline(x=1:Nstrata, y=u2.W.1, xout=1:Nstrata)$y
u2.W.1copy <- round(u2.W.1copy) # round to integers
# similarly make a copy of u2.H.1 to improve mixing in the MCMC model
# notice that hatchery fish occur at hatch.after or later
u2.H.1copy <- u2.H.1 * 0
u2.H.1copy[hatch.after:Nstrata] <- stats::spline(x=hatch.after:Nstrata, y=u2.H.1[hatch.after:Nstrata], xout=hatch.after:Nstrata)$y
u2.H.1copy <- round(u2.H.1copy) # round to integers
datalist <- list("Nstrata", "n1", "m2",
"u2.W.YoY", "u2.W.YoYcopy",
"u2.W.1", "u2.W.1copy",
"u2.H.1", "u2.H.1copy",
"hatch.after",
"logitP.cov", "NlogitP.cov",
"SplineDesign.W.YoY",
"b.flat.W.YoY", "n.b.flat.W.YoY", "b.notflat.W.YoY", "n.b.notflat.W.YoY", "n.bU.W.YoY",
"SplineDesign.W.1",
"b.flat.W.1", "n.b.flat.W.1", "b.notflat.W.1", "n.b.notflat.W.1", "n.bU.W.1",
"SplineDesign.H.1",
"b.flat.H.1", "n.b.flat.H.1", "b.notflat.H.1", "n.b.notflat.H.1", "n.bU.H.1",
"tauU.alpha", "tauU.beta", "taueU.alpha", "taueU.beta",
"prior.beta.logitP.mean", "prior.beta.logitP.sd",
"tauP.alpha", "tauP.beta")
parameters <- c("logitP", "beta.logitP", "tauP", "sigmaP",
"bU.W.YoY", "bU.W.1", "bU.H.1", "tauU", "sigmaU",
"eU.W.YoY", "eU.W.1", "eU.H.1", "taueU", "sigmaeU",
"Utot.W.YoY", "Utot.W.1", "Utot.H.1", "Utot", "logUne.W.YoY", "logUne.W.1", "logUne.H.1",
"etaU.W.YoY", "etaU.W.1", "etaU.H.1", "U.W.YoY", "U.W.1", "U.H.1")
if( any(is.na(m2))) {parameters <- c(parameters,"m2")} # monitor in case some bad data where missing values present
if( any(is.na(u2.W.YoY))) {parameters <- c(parameters,"u2.W.YoY")}
if( any(is.na(u2.W.1))) {parameters <- c(parameters,"u2.W.1")}
if( any(is.na(u2.H.1))) {parameters <- c(parameters,"u2.H.1")}
# Now to create the initial values, and the data prior to call to the MCMC sampler
Nstrata <- length(n1)
# Estimate number of wild and hatchery fish based on clip rate
u2.W.YoY[is.na(u2.W.YoY)] <- 1 # in case of missing values
u2.W.1 [is.na(u2.W.1)] <- 1 # in case of missing values
u2.H.1 [is.na(u2.H.1)] <- 1 # in case of missing values
avg.P <- sum(m2,na.rm=TRUE)/sum(n1, na.rM=TRUE)
Uguess.W.YoY <- pmax((u2.W.YoY+1)*(n1+2)/(m2+1), u2.W.YoY/avg.P, 1,na.rm=TRUE) # try and keep Uguess larger than observed values
Uguess.W.1 <- pmax((u2.W.1 +1)*(n1+2)/(m2+1), u2.W.1 /avg.P, 1, na.rm=TRUE) # try and keep Uguess larger than observed values
Uguess.H.1 <- pmax((u2.H.1 +1)*(n1+2)/(m2+1), u2.H.1 /avg.P, 1, na.rm=TRUE) # try and keep Uguess larger than observed values
Uguess.H.1[1:hatch.after] <- 0 # no hatchery fish prior to release from hatchery
# create the B-spline design matrix for wild and hatchery fish
# The design matrix for hatchery fill will still have rows corresponding to entries PRIOR to
# the hatchery release but these are never used in the winbugs fitting routines
# There is a separate (single) spline for hatchery and wild fish with NO breakpoints
# The first two coefficient have a flat prior and the rest of the coefficients are modelled using
# differences between the succesive coefficients
# Wild YoY fish. This covers the entire experiment.
SplineDegree <- 3 # Degree of spline between occasions
knots <- seq(4,Nstrata,4)/(Nstrata+1) # a knot roughly every 4th stratum
SplineDesign.W.YoY <- bs((1:Nstrata)/(Nstrata+1), knots=knots, degree=SplineDegree, intercept=TRUE, Boundary.knots=c(0,1))
b.flat.W.YoY <- c(1,2)
b.notflat.W.YoY <- 3:(ncol(SplineDesign.W.YoY))
n.b.flat.W.YoY <- length(b.flat.W.YoY)
n.b.notflat.W.YoY <- length(b.notflat.W.YoY)
n.bU.W.YoY <- n.b.flat.W.YoY + n.b.notflat.W.YoY
init.bU.W.YoY <- stats::lm(log(Uguess.W.YoY+1) ~ SplineDesign.W.YoY-1)$coefficients # initial values for spline coefficients
# Wild 1+ fish. This covers the entire experiment.
SplineDegree <- 3 # Degree of spline between occasions
knots <- seq(4,Nstrata,4)/(Nstrata+1) # a knot roughly every 4th stratum
SplineDesign.W.1 <- bs((1:Nstrata)/(Nstrata+1), knots=knots, degree=SplineDegree, intercept=TRUE, Boundary.knots=c(0,1))
b.flat.W.1 <- c(1,2)
b.notflat.W.1 <- 3:(ncol(SplineDesign.W.1))
n.b.flat.W.1 <- length(b.flat.W.1)
n.b.notflat.W.1 <- length(b.notflat.W.1)
n.bU.W.1 <- n.b.flat.W.1 + n.b.notflat.W.1
init.bU.W.1 <- stats::lm(log(Uguess.W.1+1) ~ SplineDesign.W.1-1)$coefficients # initial values for spline coefficients
# hatchery fish. Notice they can only enter AFTER hatch.after, The spline design matrix still has rows
# of zero for 1:hatch.after to make it easier in Bugs
SplineDegree <- 3 # Degree of spline between occasions
knots <- (seq((hatch.after+4),Nstrata-1,4)-hatch.after)/(Nstrata-hatch.after+1) # a knot roughly every 4th stratum
SplineDesign.H.1 <- bs((1:(Nstrata-hatch.after))/(Nstrata-hatch.after+1), knots=knots, degree=SplineDegree, intercept=TRUE, Boundary.knots=c(0,1))
b.flat.H.1 <- c(1,2)
b.notflat.H.1 <- 3:(ncol(SplineDesign.H.1))
n.b.flat.H.1 <- length(b.flat.H.1)
n.b.notflat.H.1 <- length(b.notflat.H.1)
n.bU.H.1 <- n.b.flat.H.1 + n.b.notflat.H.1
init.bU.H.1 <- stats::lm(log(Uguess.H.1[(hatch.after+1):Nstrata]+1) ~ SplineDesign.H.1-1)$coefficients # initial values for spline coefficients
# patch up the initial rows of the spline design matrix
SplineDesign.H.1 <- rbind(matrix(0,nrow=hatch.after, ncol=ncol(SplineDesign.H.1)), SplineDesign.H.1)
#browser()
# Initial plot
# create an initial plot of the fit to the number of YoY and Age1 unmarked fish
plot.data <- rbind(data.frame(time=time, group="H.1", pch="H",
logUguess = log(Uguess.H.1+1),
spline=SplineDesign.H.1 %*% init.bU.H.1, stringsAsFactors=FALSE),
data.frame(time=time, group="W.1", pch="W",
logUguess = log(Uguess.W.1+1),
spline=SplineDesign.W.1 %*% init.bU.W.1, stringsAsFactors=FALSE),
data.frame(time=time, group="W.YoY", pch="w",
logUguess = log(Uguess.W.YoY+1),
spline=SplineDesign.W.YoY %*% init.bU.W.YoY, stringsAsFactors=FALSE))
init.plot <- ggplot(data=plot.data, aes_(x=~time, color=~group, shape=~group))+
ggtitle(title, subtitle="Initial spline fit to estimated log U[i] for W and H")+
geom_point(aes_(y=~logUguess), position=position_dodge(width=0.2))+
geom_line(aes_(y=~spline), position=position_dodge(width=0.2))+
xlab("Stratum")+ylab("log(U[i])")+
theme(legend.position=c(0,0), legend.justification=c(0,0))+
scale_x_continuous(breaks=seq(min(plot.data$time,na.rm=TRUE),max(plot.data$time,na.rm=TRUE),2))
if(save.output.to.files)ggsave(init.plot, filename=paste(prefix,"-initialU.pdf",sep=""), height=4, width=6, units="in")
#results$plots$plot.init <- init.plot # do this after running the MCMC chain (see end of function)
# get the logitP=logit(P) covariate matrix ready
logitP.cov <- as.matrix(logitP.cov)
NlogitP.cov <- ncol(as.matrix(logitP.cov))
# initial values for the parameters of the model
## init.vals <- function(){
## # Initial values for the probability of capture
## init.logitP <- pmax(-10,pmin(10,logit((m2+1)/(n1+2)))) # initial capture rates based on observed recaptures
## init.logitP[is.na(init.logitP)] <- -2 # those cases where initial probability is unknown
## init.beta.logitP <- as.vector(stats::lm( init.logitP ~ logitP.cov-1)$coefficients)
## init.beta.logitP[init.beta.logitP=NA] <- 0
## init.beta.logitP <- c(init.beta.logitP, 0) # add one extra element so that single beta is still written as a
## # vector in the init files etc.
## init.tauP <- 1/stats::var(init.logitP, na.rm=TRUE) # 1/variance of logit(p)'s (ignoring the covariates for now)
## # inital values for the spline coefficients
## init.bU.W.YoY <- stats::lm(log(Uguess.W.YoY+1) ~ SplineDesign.W.YoY-1)$coefficients # initial values for spline coefficients
## init.bU.W.1 <- stats::lm(log(Uguess.W.1 +1) ~ SplineDesign.W.1 -1)$coefficients # initial values for spline coefficients
## init.bU.H.1 <- stats::lm(log(Uguess.H.1[(hatch.after+1):Nstrata]+1) ~ SplineDesign.H.1[(hatch.after+1):Nstrata,]-1)$coefficients # initial values for spline coefficients
## init.eU.W.YoY <- as.vector(log(Uguess.W.YoY+1)-SplineDesign.W.YoY%*%init.bU.W.YoY) # error terms set as differ between obs and pred
## init.eU.W.1 <- as.vector(log(Uguess.W.1 +1)-SplineDesign.W.1 %*%init.bU.W.1) # error terms set as differ between obs and pred
## init.eU.H.1 <- as.vector(log(Uguess.H.1 +1)-SplineDesign.H.1 %*%init.bU.H.1) # error terms set as differ between obs and pred
## init.etaU.W.YoY <- log(Uguess.W.YoY+1)
## init.etaU.W.1 <- log(Uguess.W.1 +1)
## init.etaU.H.1 <- log(Uguess.H.1 +1)
## init.etaU.H.1[1:hatch.after] <- NA # these are never used.
## # variance of spline difference (use only the wild fish to initialize)
## sigmaU <- stats::sd( init.bU.W.YoY[b.notflat.W.YoY]-2*init.bU.W.YoY[b.notflat.W.YoY-1]+init.bU.W.YoY[b.notflat.W.YoY-2], na.rm=TRUE)
## init.tauU <- 1/sigmaU^2
## # variance of error in the U' over and above the spline fit (use only the wild fish to initialize)
## sigmaeU <- stats::sd(init.eU.W.YoY, na.rm=TRUE)
## init.taueU <- 1/sigmaeU^2
## # initialize the u2.* where missing
## init.u2.W.YoY <- u2.W.YoY
## init.u2.W.YoY[ is.na(u2.W.YoY)] <- 100
## init.u2.W.YoY[!is.na(u2.W.YoY)] <- NA
## init.u2.W.1 <- u2.W.1
## init.u2.W.1 [ is.na(u2.W.1)] <- 100
## init.u2.W.1 [!is.na(u2.W.1)] <- NA
## init.u2.H.1 <- u2.H.1
## init.u2.H.1 [ is.na(u2.H.1)] <- 100
## init.u2.H.1 [!is.na(u2.H.1)] <- NA
## list(logitP=init.logitP, beta.logitP=init.beta.logitP, tauP=init.tauP,
## bU.W.YoY=init.bU.W.YoY,
## bU.W.1 =init.bU.W.1,
## bU.H.1 =init.bU.H.1,
## tauU=init.tauU, taueU=init.taueU,
## etaU.W.YoY=init.etaU.W.YoY, etaU.W.1=init.etaU.W.1, etaU.H.1=init.etaU.H.1)
## }
## #browser()
## initial values for the parameters of the model
init.vals <- genInitVals(model="TSPDE-WHsteel",
n1=n1,
m2=m2,
u2=list(W.YoY=u2.W.YoY,W.1=u2.W.1,H.1=u2.H.1),
logitP.cov=logitP.cov,
SplineDesign=list(W.YoY=SplineDesign.W.YoY,W.1=SplineDesign.W.1,H.1=SplineDesign.H.1),
hatch.after=hatch.after,
n.chains=n.chains)
## Generate data list
data.list <- list()
for(i in 1:length(datalist)){
data.list[[length(data.list)+1]] <-get(datalist[[i]])
}
names(data.list) <- as.list(datalist)
# Call the MCMC sampler
results <- run.MCMC(modelFile=model.file,
dataFile=data.file,
dataList=data.list,
initFiles=init.files,
initVals=init.vals,
parameters=parameters,
nChains=n.chains,
nIter=n.iter,
nBurnin=n.burnin,
nSims=n.sims,
overRelax=FALSE,
initialSeed=InitialSeed,
working.directory=working.directory,
debug=debug)
results$plots$plot.init <- init.plot # do this after running the MCMC chain (see end of function)
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/TimeStratPetersenDiagErrorWHSteel.R
|
# 2021-10-24 CJS Added trunc.logitP to fix plotting problems with extreme values of logitP
# 2020-12-15 CJS Remove sampfrac from code
# 2020-11-07 CJS Allowed user to specify prior for beta coefficient for logitP
# 2018-12-19 CJS deprecated use of sampling fraction
# 2018-12-06 CJS converted report to textConnection
# 2018-12-06 CJS converted initial plot to ggplots
# 2018-12-02 CJS converted trace plots to ggplots
# 2018-12-01 CJS converted posterior plots to ggplot2
# 2018-11-30 CJS converted acf to ggplot2
# 2018-11-29 CJS fixed issue with printing of large results got cutoff
# 2018-11-28 CJS remove reference to OpenBugs
# 2015-06-10 CJS convert gof plot to ggplot. Bug fix
# 2014-09-01 CJS conversion to jags
# 2012-08-30 CJS fixed problem with NAs in any() and all() in error checking
# 2011-06-13 CJS added p-values to results
# 2010-11-25 CJS pretty printing of final population estimates
# 2010-09-06 CJS forced input vectors to be vectors
# 2010-08-06 CJS added trace plots of logitP and logU
# 2010-08-03 CJS added version/date to final results
# 2010-03-12 CJS added n.sims etc to calling arguments so users can set
# 2009-12-01 CJS added openbugs/winbugs directory to argument list; added some basic error checking of arguments
#' Wrapper (*_fit) and function to call the Time Stratified Petersen Estimator
#' with Diagonal Entries and separating Wild from Hatchery Steelhead function.
#'
#' Takes the number of marked fish released, the number of recaptures, and the
#' number of unmarked fish and uses Bayesian methods to fit a fit a spline
#' through the population numbers and a hierarchical model for the trap
#' efficiencies over time. The output is written to files and an MCMC object
#' is also created with samples from the posterior.
#'
#' Normally, data is passed to the wrapper which then calls the fitting
#' function.
#'
#'
#' @aliases TimeStratPetersenDiagErrorWHSteel_fit
#' @template title
#' @template prefix
#' @template time
#' @template n1
#' @param m2 A numeric vector of the number of marked fish from n1 that are
#' recaptured in each time stratum. All recaptures take place within the
#' stratum of release. Use the \code{\link{TimeStratPetersenNonDiagError_fit}}
#' function for cases where recaptures take place outside the stratum of
#' release.
#' @param u2.W.YoY A numeric vector of the number of unmarked wild Young-of-Year
#' fish captured in each stratum.
#' @param u2.W.1 A numeric vector of the number of unmarked wild age 1+ fish
#' captured in each stratum.
#' @param u2.H.1 A numeric vector of the number of unmarked hatchery age 1+ fish
#' (i.e. adipose fin clipped) captured in each stratum.
#' @template sampfrac
#' @param hatch.after A numeric vector with elements belonging to \code{time}.
#' At which point do hatchery fish arrive? They arrive in the immediate stratum
#' AFTER these entries.
#' @template bad.n1
#' @template bad.m2
#' @param bad.u2.W.YoY A numeric vector with elements belonging to \code{time}.
#' In some cases, something goes wrong in the stratum, and the number of wild
#' unmarked Young-of-Year fish should be ignored.
#' @param bad.u2.W.1 A numeric vector with elements belonging to \code{time}.
#' In some cases, something goes wrong in the stratum, and the number of wild
#' unmarked age 1+ fish should be ignored.
#' @param bad.u2.H.1 A numeric vector with elements belonging to \code{time}.
#' In some cases, something goes wrong in the stratum, and the number of
#' hatchery unmarked (but adipose fin clipped) age 1+ fish should be ignored.
#' @template logitP.cov
#' @template mcmc-parms
#' @template tauU.alpha.beta
#' @template taueU.alpha.beta
#' @template prior.beta.logitP.mean.sd
#' @template tauP.alpha.beta
#' @template run.prob
#' @template debug
#' @template InitialSeed
#' @template save.output.to.files
#' @template trunc.logitP
#'
#' @return An MCMC object with samples from the posterior distribution. A
#' series of graphs and text file are also created in the working directory.
#' @template author
#' @template references
#' @keywords ~models ~smooth
#' @examples
#'
#' ##---- See the vignettes for example on how to use this package.
#'
#' @export TimeStratPetersenDiagErrorWHSteel_fit
#' @importFrom stats runif var sd
TimeStratPetersenDiagErrorWHSteel_fit <-
function( title="TSPDE-WHSteel", prefix="TSPDE-WHSteel-",
time, n1, m2, u2.W.YoY, u2.W.1, u2.H.1, sampfrac=rep(1,length(u2.W.YoY)),
hatch.after=NULL,
bad.n1=c(), bad.m2=c(), bad.u2.W.YoY=c(), bad.u2.W.1=c(), bad.u2.H.1=c(),
logitP.cov=as.matrix(rep(1,length(n1))),
n.chains=3, n.iter=200000, n.burnin=100000, n.sims=2000,
tauU.alpha=1, tauU.beta=.05, taueU.alpha=1, taueU.beta=.05,
prior.beta.logitP.mean = c(logit(sum(m2,na.rm=TRUE)/sum(n1,na.rm=TRUE)),rep(0, ncol(as.matrix(logitP.cov))-1)),
prior.beta.logitP.sd = c(stats::sd(logit((m2+.5)/(n1+1)),na.rm=TRUE), rep(10, ncol(as.matrix(logitP.cov))-1)),
tauP.alpha=.001, tauP.beta=.001,
run.prob=seq(0,1,.1), # what percentiles of run timing are wanted
debug=FALSE, debug2=FALSE,
InitialSeed=ceiling(stats::runif(1,min=0, 1000000)),
save.output.to.files=TRUE,
trunc.logitP=15) {
# Fit a Time Stratified Petersen model with diagonal entries and with smoothing on U allowing for random error,
# covariates for the the capture probabilities, and separating the wild vs hatchery fish for STEELHEAD releases
# The steelhead are nice because 100% of hatchery fish are adipose fin clipped and no wild fish are adipose fin clipped
# The "diagonal entries" implies that no marked fish are recaptured outside the (time) stratum of release
#
version <- '2021-11-02'
options(width=200)
# Input parameters are
# prefix - prefix used for files created with the analysis results
# this should be in standard Window's format, eg. JC-2002-ST-TSPDE
# to which is appended various suffixes for plots etc
# time - vector of stratum numbers. For example, 9:38 would indicate that the
# Trinity River system sampled weeks 9 to 38. If some values are omitted
# e.g. time=10 not present, this indicates sampling did not take place this
# week. The data are expanded and interpolation for the missing week takes place
# n1, m2 - the input data consisting of fish marked and released and then recaptured.
# The n1 and m2 are used to calibrate the trap
# u2.W.YoY - number of wild YoY fish (no clips)
# u2.W.1 - number of wild age 1+ fish (no clips)
# u2.H.1 - number of hatchery age 1+ fish (ad fin clipped). 100% of hatchery production is fin clipped
# sampfrac - DEPRECATED. DO NOT USE ANYMORE.
# hatch.after - julian week AFTER which hatchery fish are released
# bad.m2 - list of julian numbers where the value of m2 is suspect.
# For example, the capture rate could be extremely low.
# These are set to NA prior to the call to JAGS
# bad.u2.W.YoY
# bad.u2.W.1
# bad.u2.H.1 - list of julian weeks where the values of u2.* are suspect.
# These are set to NA prior to the call to JAGS
# logitP.cov - matrix of covariates for logit(P). If the strata times are "missing" some values, an intercept is assumed
# for the first element of the covariance matrix and 0 for the rest of the covariates.
# CAUTION - this MAY not be what you want to do. It is likely best to enter ALL strata
# if you have any covariates. The default, if not specified, is a constant (the mean logit)
# tauU.alpha, tauU.beta - parameters for the prior on variance in spline coefficients
# taueU.alpha, taueU.beta - parameters for the prior on variance in log(U) around fitted spline
# prior.beta.logitP.mean, prior.beta.logitP.sd - parameters for the prior on mean logit(P)'s [The intercept term]
# The other covariates are assigned priors of a mean of 0 and a sd of 30
# tauP.alpha, tauP.beta - parameters for the prior on 1/var of residual error in logit(P)'s
# run.prob - percentiles of run timing wanted
# debug - if TRUE, then this is a test run with very small MCMC chains run to test out the data
# and JAGS will run and stop waiting for your to exit and complete
# force input vectors to be vectors
time <- as.vector(time)
n1 <- as.vector(n1)
m2 <- as.vector(m2)
u2.W.YoY <- as.vector(u2.W.YoY)
u2.W.1 <- as.vector(u2.W.1)
u2.H.1 <- as.vector(u2.H.1)
sampfrac <- as.vector(sampfrac)
# Do some basic error checking
# 1. Check that length of n1, m2, u2, sampfrac, time all match
if(stats::var(c(length(n1),length(m2),length(u2.W.YoY),length(u2.W.1),length(u2.H.1), length(sampfrac),length(time)))>0){
cat("***** ERROR ***** Lengths of n1, m2, u2.W.YoY, u2.W.1, u2.H.1, sampfrac, time must all be equal. They are:",
length(n1)," ",length(m2)," ",length(u2.W.YoY)," ",length(u2.W.1)," ",length(u2.H.1)," ", length(sampfrac)," ",length(time),"\n")
return()}
if(!is.numeric(n1)){
cat("***** ERROR ***** n1 must be numeric. You have:",
paste(n1,collapse=", "),"\n")
return()}
if(any(is.na(n1))){
cat("***** ERROR ***** All values of n1 must not be missing. You have: ",
paste(n1,collapse=", "),"\n")
return()}
if(any(n1 < 0, na.rm=TRUE)){
cat("***** ERROR ***** All values of n1 must be non-negative. You have: ",
paste(n1,collapse=", "),"\n")
return()}
if(length(logitP.cov) %% length(n1) != 0){
cat("***** ERROR ***** Dimension of covariate vector doesn't match length of n1 etc They are:",
length(n1)," ",length(logitP.cov)," ",dim(logitP.cov),"\n")
return()}
# 2. Check that m2<= n1
if(any(m2>n1, na.rm=TRUE)){
cat("***** ERROR ***** m2 must be <= n1. The arguments are \n n1:",
paste(n1,collapse=","),"\n m2:",
paste(m2,collapse=","),"\n")
return()}
# 3. Elements of bad.m2, bad.u2.W.YoY, bad.u2.W.1, bad.u2.H.1, and hatch.after must belong to time
if(!all(bad.m2 %in% time, na.rm=TRUE)){
cat("***** ERROR ***** bad.m2 must be elements of strata identifiers. You entered \n bad.m2:",
paste(bad.m2,collapse=","),"\n Strata identifiers are \n time:",
paste(time, collapse=","), "\n")
return()}
if(!all(bad.u2.W.YoY %in% time, na.rm=TRUE)){
cat("***** ERROR ***** bad.u2.W.YoY must be elements of strata identifiers. You entered \n bad.u2.W.YoY:",
paste(bad.u2.W.YoY,collapse=","),"\n Strata identifiers are \n time:",
paste(time, collapse=","), "\n")
return()}
if(!all(bad.u2.W.1 %in% time, na.rm=TRUE)){
cat("***** ERROR ***** bad.u2.W.1 must be elements of strata identifiers. You entered \n bad.u2.W.1:",
paste(bad.u2.W.1,collapse=","),"\n Strata identifiers are \n time:",
paste(time, collapse=","), "\n")
return()}
if(!all(bad.u2.H.1 %in% time, na.rm=TRUE)){
cat("***** ERROR ***** bad.u2.H.1 must be elements of strata identifiers. You entered \n bad.u2.H.1:",
paste(bad.u2.H.1,collapse=","),"\n Strata identifiers are \n time:",
paste(time ,collapse=","), "\n")
return()}
if(!all(hatch.after %in% time, na.rm=TRUE)){
cat("***** ERROR ***** hatch.after must be elements of strata identifiers. You entered \n hatch.after:",
paste(hatch.after,collapse=","),"\n Strata identifiers are \n time:",
paste(time, collapse=","), "\n")
return()}
# Check that that the prior.beta.logitP.mean and prior.beta.logitP.sd length=number of columns of covariates
logitP.cov <- as.matrix(logitP.cov)
if(!is.vector(prior.beta.logitP.mean) | !is.vector(prior.beta.logitP.sd)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be vectors")
}
if(!is.numeric(prior.beta.logitP.mean) | !is.numeric(prior.beta.logitP.sd)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be numeric")
}
if(length(prior.beta.logitP.mean) != ncol(logitP.cov) | length(prior.beta.logitP.sd) != ncol(logitP.cov)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be same length as number columns in covariate matrix")
}
# Deprecation of sampling fraction.
if(any(sampfrac != 1)){
cat("***** ERROR ***** Sampling fraction is deprecated for any values other than 1. DO NOT USE ANYMORE. ")
return()
}
results.filename <- paste(prefix,"-results.txt",sep="")
stdout <- vector('character')
report <- textConnection('stdout', 'wr', local = TRUE)
sink(report)
cat(paste("Time Stratified Petersen with Diagonal recaptures, error in smoothed U, separating wild and hatchery fish, STEELHEAD ONLY - ", date()))
cat("\nVersion: ", version)
cat("\n\n", title, "Results \n\n")
cat("*** Raw data *** \n")
temp<- cbind(time, n1, m2, u2.W.YoY, u2.W.1, u2.H.1, logitP.cov)
colnames(temp)<- c('time', 'n1','m2','u2.W.YoY', 'u2.W.1', 'u2.H.1', paste("logitPcov[", 1:ncol(as.matrix(logitP.cov)),"]",sep="") )
print(temp)
cat("\n\n")
cat("Hatchery fish are released AFTER strata: ", hatch.after,"\n\n")
cat("The following strata had m2 set to missing: ",
if(length(bad.m2)>0){bad.m2} else {" NONE"}, "\n")
cat("The following strata had u2.W.YoY set to missing: ",
if(length(bad.u2.W.YoY)>0){bad.u2.W.YoY} else {" NONE"}, "\n")
cat("The following strata had u2.W.1 set to missing: ",
if(length(bad.u2.W.1)>0){bad.u2.W.1} else {" NONE"}, "\n")
cat("The following strata had u2.H.1 set to missing: ",
if(length(bad.u2.H.1)>0){bad.u2.H.1} else {" NONE"}, "\n")
# Pooled Petersen estimator over ALL of the data including when no releases take place, bad m2, bad.u2.* values.
cat("\n\n*** Pooled Petersen Estimates based on pooling over ALL strata ***\n\n")
cat("W.YoY Total n1=", sum(n1, na.rm=TRUE),"; m2=",sum(m2, na.rm=TRUE),"; u2=",sum(u2.W.YoY, na.rm=TRUE),"\n")
pp <- SimplePetersen(sum(n1, na.rm=TRUE), sum(m2, na.rm=TRUE), sum(u2.W.YoY, na.rm=TRUE))
cat("W.YoY Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("W.YoY Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
cat("W.1 Total n1=", sum(n1, na.rm=TRUE),"; m2=",sum(m2, na.rm=TRUE),"; u2=", sum(u2.W.1, na.rm=TRUE),"\n")
pp <- SimplePetersen(sum(n1, na.rm=TRUE), sum(m2, na.rm=TRUE), sum(u2.W.1, na.rm=TRUE))
cat("W.1 Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("W.1 Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
cat("H.1 Total n1=", sum(n1, na.rm=TRUE),"; m2=",sum(m2, na.rm=TRUE),"; u2=",sum(u2.H.1, na.rm=TRUE),"\n")
pp <- SimplePetersen(sum(n1, na.rm=TRUE), sum(m2, na.rm=TRUE), sum(u2.H.1, na.rm=TRUE))
cat("H.1 Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("H.1 Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
# Obtain the Pooled Petersen estimator for each group excluding bad.m2, bad.u2.* values
select <- (n1>0) & (!is.na(n1)) & (!is.na(m2)) & (!is.na(u2.W.YoY)) & (!is.na(u2.W.1)) & (!is.na(u2.H.1))
cat("\n\n*** Pooled Petersen Estimate excluding bad m2, u2.* values adjusting for sampling fractions ***\n\n")
cat("The following strata are excluded because n1=0 or NA values in m2, u2.W.YoY, u2.W.1, u2.H.1 :", time[!select],"\n\n")
temp.n1 <- n1 [select]
temp.m2 <- m2 [select]
temp.u2.W.YoY <- u2.W.YoY[select]
temp.u2.W.1 <- u2.W.1 [select]
temp.u2.H.1 <- u2.H.1 [select]
cat("W.YoY Total n1=", sum(temp.n1, na.rm=TRUE),"; m2=",sum(temp.m2, na.rm=TRUE),"; u2=",sum(temp.u2.W.YoY, na.rm=TRUE),"\n")
pp <- SimplePetersen(sum(temp.n1, na.rm=TRUE), sum(temp.m2, na.rm=TRUE), sum(temp.u2.W.YoY, na.rm=TRUE))
cat("W.YoY Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("W.YoY Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
cat("W.1 Total n1=", sum(temp.n1, na.rm=TRUE),"; m2=",sum(temp.m2, na.rm=TRUE),"; u2=", sum(temp.u2.W.1, na.rm=TRUE),"\n")
pp <- SimplePetersen(sum(temp.n1, na.rm=TRUE), sum(temp.m2, na.rm=TRUE), sum(temp.u2.W.1, na.rm=TRUE))
cat("W.1 Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("W.1 Est U(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
cat("H.1 Total n1=", sum(temp.n1, na.rm=TRUE),"; m2=",sum(temp.m2, na.rm=TRUE),"; u2=",sum(temp.u2.H.1, na.rm=TRUE),"\n")
pp <- SimplePetersen(sum(temp.n1, na.rm=TRUE), sum(temp.m2, na.rm=TRUE), sum(temp.u2.H.1, na.rm=TRUE))
cat("H.1 Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("H.1 Est U(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
# Obtain the Pooled Petersen estimator after fixup of bad.m2, bad.u2.* values after adjusting for sampling fractions
temp.m2 <- m2
index.bad.m2 <- as.vector((1:length(time)) %*% outer(time,bad.m2,"=="))
temp.m2[index.bad.m2] <- NA
temp.u2.W.YoY <- u2.W.YoY
index.bad.u2.W.YoY <- as.vector((1:length(time)) %*% outer(time,bad.u2.W.YoY,"=="))
temp.u2.W.YoY[index.bad.u2.W.YoY] <- NA
temp.u2.W.1 <- u2.W.1
index.bad.u2.W.1 <- as.vector((1:length(time)) %*% outer(time,bad.u2.W.1,"=="))
temp.u2.W.1[index.bad.u2.W.1] <- NA
temp.u2.HJ.1 <- u2.H.1
index.bad.u2.H.1 <- as.vector((1:length(time)) %*% outer(time,bad.u2.H.1,"=="))
temp.u2.H.1[index.bad.u2.H.1] <- NA
select <- (n1>0) & (!is.na(n1)) & (!is.na(temp.m2)) & (!is.na(temp.u2.W.YoY) & (!is.na(temp.u2.W.1)) & (!is.na(temp.u2.H.1)) )
cat("\n\n*** Pooled Petersen Estimate after removing bad m2, u2.* values adjusting for sampling fraction ***\n\n")
cat("The following strata had m2 set to missing: ",
if(length(bad.m2)>0){bad.m2} else {" NONE"}, "\n")
cat("The following strata had u2.W.YoY set to missing: ",
if(length(bad.u2.W.YoY)>0){bad.u2.W.YoY} else {" NONE"}, "\n")
cat("The following strata had u2.W.1 set to missing: ",
if(length(bad.u2.W.1)>0){bad.u2.W.1} else {" NONE"}, "\n")
cat("The following strata had u2.H.1 set to missing: ",
if(length(bad.u2.H.1)>0){bad.u2.H.1} else {" NONE"}, "\n")
cat("\nThe following strata are excluded because n1=0 or NA values in m2, u2.*:", time[!select],"\n\n")
temp.n1 <- n1 [select]
temp.m2 <- m2 [select]
temp.u2.W.YoY <- u2.W.YoY[select]
temp.u2.W.1 <- u2.W.1 [select]
temp.u2.H.1 <- u2.H.1 [select]
cat("W.YoY Total n1=", sum(temp.n1),"; m2=",sum(temp.m2),"; u2=",sum(temp.u2.W.YoY),"\n")
pp <- SimplePetersen(sum(temp.n1), sum(temp.m2), sum(temp.u2.W.YoY))
cat("W.YoY Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("W.YoY Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
cat("W.1 Total n1=", sum(temp.n1),"; m2=",sum(temp.m2),"; u2=",sum(temp.u2.W.1),"\n")
pp <- SimplePetersen(sum(temp.n1), sum(temp.m2), sum(temp.u2.W.1))
cat("W.1 Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("W.1 Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
cat("H.1 Total n1=", sum(temp.n1),"; m2=",sum(temp.m2),"; u2=",sum(temp.u2.H.1),"\n")
pp <- SimplePetersen(sum(temp.n1), sum(temp.m2), sum(temp.u2.H.1))
cat("H.1 Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("H.1 Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
# Obtain Petersen estimator for each stratum prior to removing bad m2 values
cat("*** Stratified Petersen Estimator for each stratum PRIOR to removing bad m2 values adjusting for sampling fractions ***\n\n")
cat("W.YoY raw data\n")
temp.n1 <- n1
temp.m2 <- m2
temp.u2 <- u2.W.YoY
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2)
temp <- cbind(time, temp.n1, temp.m2, temp.u2, round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','u2.W.YoY', 'U[i]', 'SE(U[i])')
print(temp)
cat("\n")
cat("W.YoY Est U(total) ", format(round(sum(sp$U.est, na.rm=TRUE)),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))), big.mark=","), ")\n\n\n")
cat("W.1 raw data\n")
temp.n1 <- n1
temp.m2 <- m2
temp.u2 <- u2.W.1
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2)
temp <- cbind(time, temp.n1, temp.m2, temp.u2, round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','u2.W.1', 'U[i]', 'SE(U[i])')
print(temp)
cat("\n")
cat("W.1 Est U(total) ", format(round(sum(sp$U.est, na.rm=TRUE)),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))), big.mark=","), ")\n\n\n")
cat("H.1 raw data\n")
temp.n1 <- n1
temp.m2 <- m2
temp.u2 <- u2.H.1
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2)
temp <- cbind(time, temp.n1, temp.m2, temp.u2, round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','u2.W.1', 'U[i]', 'SE(U[i])')
print(temp)
cat("\n")
cat("H.1 Est U(total) ", format(round(sum(sp$U.est, na.rm=TRUE)),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))), big.mark=","), ")\n\n\n")
# Obtain Petersen estimator for each stratum after removing bad m2, u2.* values
cat("*** Stratified Petersen Estimator for each stratum AFTER removing bad m2, u2.* values ***\n\n")
temp.n1 <- n1
temp.m2 <- m2
temp.m2[index.bad.m2] <- NA
temp.u2.W.YoY <- u2.W.YoY
temp.u2.W.YoY[index.bad.u2.W.YoY] <- NA
temp.u2.W.1 <- u2.W.1
temp.u2.W.1[index.bad.u2.W.1] <- NA
temp.u2.H.1 <- u2.H.1
temp.u2.H.1[index.bad.u2.H.1] <- NA
cat("\nW.YoY raw data\n")
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2.W.YoY)
temp <- cbind(time, temp.n1, temp.m2, temp.u2.W.YoY, round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','u2.W.YoY(adj)', 'U[i]', 'SE(U[i])')
print(temp)
cat("\n")
cat("W.YoY Est U(total) ", format(round(sum(sp$U.est, na.rm=TRUE)),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))), big.mark=","), ")\n\n\n")
cat("\nW.1 raw data\n")
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2.W.1)
temp <- cbind(time, temp.n1, temp.m2, temp.u2.W.1, round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','u2.W.1(adj)', 'U[i]', 'SE(U[i])')
print(temp)
cat("\n")
cat("W.1 Est U(total) ", format(round(sum(sp$U.est, na.rm=TRUE)),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))), big.mark=","), ")\n\n\n")
cat("\nH.1 raw data\n")
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2.H.1)
temp <- cbind(time, temp.n1, temp.m2, temp.u2.H.1, round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','u2.H.1(adj)', 'U[i]', 'SE(U[i])')
print(temp)
cat("\n")
cat("H.1 Est U(total) ", format(round(sum(sp$U.est, na.rm=TRUE)),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))), big.mark=","), ")\n\n\n")
# Test if pooling can be done
cat("*** Test if pooled Petersen is allowable. [Check if marked fractions are equal] ***\n\n")
select <- (n1>0) & (!is.na(n1)) & (!is.na(temp.m2))
temp.n1 <- n1[select]
temp.m2 <- m2[select]
test <- TestIfPool( temp.n1, temp.m2)
cat("(Large Sample) Chi-square test statistic ", test$chi$statistic," has p-value", test$chi$p.value,"\n\n")
temp <- cbind(time[select],test$chi$observed, round(test$chi$expected,1), round(test$chi$residuals^2,1))
colnames(temp) <- c('time','n1-m2','m2','E[n1-m2]','E[m2]','X2[n1-m2]','X2[m2]')
print(temp)
cat("\n Be cautious of using this test in cases of small expected values. \n\n")
# Fix up any data problems and prepare for the call.
# Notice that for strata entries that are missing any covariate values, only an intercept is added
# Expand the entries in case of missing time entries
new.n1 <- rep(0, max(time)-min(time)+1)
new.m2 <- rep(0, max(time)-min(time)+1)
new.u2.W.YoY <- rep(0, max(time)-min(time)+1)
new.u2.W.1 <- rep(0, max(time)-min(time)+1)
new.u2.H.1 <- rep(0, max(time)-min(time)+1)
new.logitP.cov <- matrix(NA, nrow=max(time)-min(time)+1, ncol=ncol(as.matrix(logitP.cov)))
new.time <- min(time):max(time)
new.n1 [time-min(time)+1] <- n1
new.m2 [time-min(time)+1] <- m2
new.m2 [bad.m2-min(time)+1] <- NA # wipe out strata where m2 is known to be bad
new.u2.W.YoY[time-min(time)+1] <- u2.W.YoY
new.u2.W.YoY[bad.u2.W.YoY-min(time)+1] <- NA # wipe out strata where u2.W.YoY is known to be bad
new.u2.W.1 [time-min(time)+1] <- u2.W.1
new.u2.W.1 [bad.u2.W.1-min(time)+1] <- NA # wipe out strata where u2.W.1 is known to be bad
new.u2.H.1 [time-min(time)+1] <- u2.H.1
new.u2.H.1 [bad.u2.H.1-min(time)+1] <- NA # wipe out strata where u2.W.1 is known to be bad
new.logitP.cov[time-min(time)+1,]<- as.matrix(logitP.cov)
new.logitP.cov[ is.na(new.logitP.cov[,1]), 1] <- 1 # insert a 1 into first columns where not specified
new.logitP.cov[ is.na(new.logitP.cov)] <- 0 # other covariates are forced to zero not in column 1
# Check for and fix problems with the data
# If n1=m2=0, then set n1 to 1, and set m2<-NA
new.m2[new.n1==0] <- NA
new.n1[new.n1==0] <- 1
# Print out the revised data
hatch.indicator <- rep(' ', max(time)-min(time)+1)
hatch.indicator[hatch.after-min(time)+1]<- '***'
cat("\n\n*** Revised data *** \n")
temp<- data.frame(time=new.time, n1=new.n1, m2=new.m2, u2.W.YoY=new.u2.W.YoY, u2.W.1=new.u2.W.1, u2.H.1=new.u2.H.1,
new.logitP.cov=new.logitP.cov,
hatch.indicator=hatch.indicator)
print(temp)
cat("\n\n")
# Print out information on the prior distributions used
cat("\n\n*** Information on priors *** \n")
cat(" Parameters for prior on tauU (variance in spline coefficients: ", tauU.alpha, tauU.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(tauU.alpha/tauU.beta,2),round(sqrt(tauU.alpha/tauU.beta^2),2),"\n")
cat(" Parameters for prior on taueU (variance of log(U) about spline: ",taueU.alpha, taueU.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(taueU.alpha/taueU.beta,2),round(sqrt(taueU.alpha/taueU.beta^2),2),"\n")
cat(" Parameters for prior on beta.logitP[1] (intercept) (mean, sd): \n", cbind(round(prior.beta.logitP.mean,3), round(prior.beta.logitP.sd,5)),"\n")
cat(" Parameters for prior on tauP (residual variance of logit(P) after adjusting for covariates: ",tauP.alpha, tauP.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(tauP.alpha/tauP.beta,2),round(sqrt(tauP.alpha/tauP.beta^2),2),"\n")
cat("\n\nInitial seed for this run is: ",InitialSeed, "\n")
sink()
if (debug2) {
cat("\nprior to formal call to TimeStratPetersenDiagErrorWH\n")
browser()
}
if (debug)
{results <- TimeStratPetersenDiagErrorWHSteel(title=title, prefix=prefix,
time=new.time, n1=new.n1, m2=new.m2, u2.W.YoY=new.u2.W.YoY, u2.W.1=new.u2.W.1, u2.H.1=new.u2.H.1,
hatch.after=hatch.after-min(time)+1,
logitP.cov=new.logitP.cov,
n.chains=3, n.iter=10000, n.burnin=5000, n.sims=500, # set to low value for debugging only
prior.beta.logitP.mean=prior.beta.logitP.mean,
prior.beta.logitP.sd =prior.beta.logitP.sd,
tauU.alpha=tauU.alpha, tauU.beta=tauU.beta, taueU.alpha=taueU.alpha, taueU.beta=taueU.beta,
debug=debug, debug2=debug2, InitialSeed=InitialSeed,
save.output.to.files=save.output.to.files)
} else #notice R syntax requires { before the else
{results <- TimeStratPetersenDiagErrorWHSteel(title=title, prefix=prefix,
time=new.time, n1=new.n1, m2=new.m2, u2.W.YoY=new.u2.W.YoY, u2.W.1=new.u2.W.1, u2.H.1=new.u2.H.1,
hatch.after=hatch.after-min(time)+1,
logitP.cov=new.logitP.cov,
n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.sims=n.sims,
prior.beta.logitP.mean=prior.beta.logitP.mean,
prior.beta.logitP.sd =prior.beta.logitP.sd,
tauU.alpha=tauU.alpha, tauU.beta=tauU.beta, taueU.alpha=taueU.alpha, taueU.beta=taueU.beta,
debug=debug, debug2=debug2, InitialSeed=InitialSeed,
save.output.to.files=save.output.to.files)
}
# Now to create the various summary tables of the results
#browser()
# A plot of the observered log(U) on the log scale, and the final mean log(U)
# in the diagonal case, all of n1, m2, u2 have the same length
Nstrata <- length(n1)
plot.df <- data.frame(time =new.time)
# adjust the u2 for the clipping fractions
plot.df$n1 <- new.n1
plot.df$m2 <- new.m2
plot.df$u2.H.1 <- new.u2.H.1
plot.df$u2.W.1 <- new.u2.W.1
plot.df$u2.W.YoY <- new.u2.W.YoY
plot.df$u2.W.YoY[is.na(plot.df$u2.W.YoY)] <- 1 # in case of missing values
plot.df$u2.H.1 [is.na(plot.df$u2.H.1) ] <- 1 # in case of missing values
plot.df$u2.W.1 [is.na(plot.df$u2.W.1) ] <- 1 # in case of missing values
get.est <- function(est.name, plot.df, hatch.after, results){
# get the inital estimates, and extract from the results data structure and put into a data frame
est.df <- data.frame(group=est.name, time=plot.df$time)
avgP <- sum(plot.df$m2,na.rm=TRUE)/sum(plot.df$n1, na.rM=TRUE)
#browser()
# initial guess
est.df$logUguess <- log(1+pmax( (plot.df[, paste("u2.",est.name,sep="")]+1)*(plot.df$n1+2)/(plot.df$m2+1),
plot.df[, paste("u2.",est.name,sep="")]/avgP, na.rm=TRUE))
# extract estimates from results
results.row.names <- rownames(results$summary)
est.row.index <- grep(paste("etaU.",est.name, sep=""), results.row.names)
etaU <- results$summary[est.row.index,]
est.df$logU =etaU[,"mean"]
est.df$logUlcl =etaU[,"2.5%"]
est.df$logUucl =etaU[,"97.5%"]
# extract the spline
logUne.row.index <- grep(paste("logUne.",est.name,sep=""), results.row.names)
est.df$spline <- results$summary[logUne.row.index,"mean"]
if(est.name=="H.1"){
est.df$logUguess[1:(hatch.after-min(plot.df$time)+1)]<- NA
est.df$logU [1:(hatch.after-min(plot.df$time)+1)]<- NA
est.df$logUlcl [1:(hatch.after-min(plot.df$time)+1)]<- NA
est.df$logUucl [1:(hatch.after-min(plot.df$time)+1)]<- NA
est.df$spline [1:(hatch.after-min(plot.df$time)+1)]<- NA
}
est.df
}
plot.data <-rbind( get.est("H.1" ,plot.df, hatch.after, results),
get.est("W.YoY",plot.df, hatch.after, results),
get.est("W.1" ,plot.df, hatch.after, results))
# add limits to the plot to avoid non-monotone secondary axis problems with extreme values
plot.data$logUguess <- pmax(-10 , pmin(20, plot.data$logUguess))
plot.data$logU <- pmax(-10 , pmin(20, plot.data$logU ))
plot.data$logUlcl <- pmax(-10 , pmin(20, plot.data$logUlcl ))
plot.data$logUucl <- pmax(-10 , pmin(20, plot.data$logUucl ))
plot.data$spline <- pmax(-10 , pmin(20, plot.data$spline))
fit.plot <- ggplot(data=plot.data, aes_(x=~time, color=~group))+
ggtitle(title, subtitle="Fitted spline curve with 95% credible intervals")+
geom_point(aes_(y=~logUguess), shape=16, position=position_dodge(width=.2))+ # guesses for population
geom_point(aes_(y=~logU), shape=19, position=position_dodge(width=.2))+
geom_line (aes_(y=~logU), position=position_dodge(width=.2))+
geom_errorbar(aes_(ymin=~logUlcl, ymax=~logUucl), width=.1, position=position_dodge(width=.2))+
geom_line(aes_(y=~spline),linetype="dashed", position=position_dodge(width=.2)) +
xlab("Time Index\nFitted/Smoothed/Raw values plotted for W(black) and H(blue)")+
ylab("log(U[i]) + 95% credible interval")+
theme(legend.justification = c(0, 0), legend.position = c(0, 0))+
scale_color_discrete(name="Group")+
scale_x_continuous(breaks=seq(min(plot.data$time, na.rm=TRUE),max(plot.data$time, na.rm=TRUE),2))+
scale_y_continuous(sec.axis = sec_axis(~ exp(.), name="U + 95% credible interval",
breaks=c(1,10,20,50,
100,200,500,
1000,2000,5000,
10000,20000, 50000,
100000,200000, 500000,
1000000,2000000,5000000,10000000),
labels = scales::comma))
if(save.output.to.files)ggsave(plot=fit.plot, filename=paste(prefix,"-fit.pdf",sep=""), height=6, width=10, units="in")
results$plots$fit.plot <- fit.plot
# plot the estimated logits over time
logitP.plot <- plot_logitP(title=title, time=new.time, n1=new.n1, m2=new.m2,
u2=new.u2.W.YoY+new.u2.W.1+new.u2.H.1, logitP.cov=new.logitP.cov, results=results,
trunc.logitP=trunc.logitP)
if(save.output.to.files)ggsave(plot=logitP.plot, filename=paste(prefix,"-logitP.pdf",sep=""), height=6, width=10, units="in")
results$plots$logitP.plot <- logitP.plot
# Look at autocorrelation function for Utot.W.YoY and Utot.W.1, U.tot.H.1
mcmc.sample1<- data.frame(parm="Utot.W.YoY", sample=results$sims.matrix[,"Utot.W.YoY"], stringsAsFactors=FALSE)
mcmc.sample2<- data.frame(parm="Utot.W.1", sample=results$sims.matrix[,"Utot.W.1"], stringsAsFactors=FALSE)
mcmc.sample3<- data.frame(parm="Utot.H.1", sample=results$sims.matrix[,"Utot.H.1"], stringsAsFactors=FALSE)
mcmc.sample <- rbind(mcmc.sample1, mcmc.sample2, mcmc.sample3)
acf.Utot.plot <- plot_acf(mcmc.sample)
if(save.output.to.files)ggsave(plot=acf.Utot.plot, filename=paste(prefix,"-Utot-acf.pdf",sep=""), height=4, width=6, units="in")
results$plots$acf.Utot.plot <- acf.Utot.plot
# Look at posterior plot for Utot.W.YoY and Utot.W.1, U.tot.H.1
mcmc.sample1<- data.frame(parm="Utot.W.YoY", sample=results$sims.matrix[,"Utot.W.YoY"], stringsAsFactors=FALSE)
mcmc.sample2<- data.frame(parm="Utot.W.1", sample=results$sims.matrix[,"Utot.W.1"], stringsAsFactors=FALSE)
mcmc.sample3<- data.frame(parm="Utot.H.1", sample=results$sims.matrix[,"Utot.H.1"], stringsAsFactors=FALSE)
mcmc.sample <- rbind(mcmc.sample1, mcmc.sample2, mcmc.sample3)
post.Utot.plot <- plot_posterior(mcmc.sample)
if(save.output.to.files)ggsave(plot=post.Utot.plot, filename=paste(prefix,"-Utot-posterior.pdf",sep=""), height=4, width=6, units="in")
results$plots$post.Utot.plot <- post.Utot.plot
#save the Bayesian predictive distribution (Bayesian p-value plots)
#browser()
discrep <-PredictivePosterior.TSPDE.WHSteel (time, new.n1, new.m2, new.u2.W.YoY, new.u2.W.1, new.u2.H.1,
expit(results$sims.list$logitP),
round(results$sims.list$U.W.YoY),
round(results$sims.list$U.W.1),
round(pmax(results$sims.list$U.H.1,0)), hatch.after) #don't forget that hatchery fish is 0 until hatch.after
gof <- PredictivePosteriorPlot.TSPDE.WHSteel(discrep)
if(save.output.to.files)ggsave(gof[[1]],filename=paste(prefix,"-GOF.pdf",sep=""), height=8, width=8, units="in", dpi=300 )
results$plots$gof.plot <- gof
# create traceplots of logU, U, and logitP (along with R value) to look for non-convergence
# the plot_trace will return a list of plots (one for each page as needed)
varnames <- names(results$sims.array[1,1,]) # extract the names of the variables
# Trace plots of logitP
trace.plot <- plot_trace(title=title, results=results, parms_to_plot=varnames[grep("^logitP", varnames)])
if(save.output.to.files){
pdf(file=paste(prefix,"-trace-logitP.pdf",sep=""))
plyr::l_ply(trace.plot, function(x){plot(x)})
dev.off()
}
results$plots$trace.logitP.plot <- trace.plot
# now for the traceplots of logU (etaU), Utot, and Ntot
trace.plot <- plot_trace(title=title, results=results, parms_to_plot=varnames[c(grep("Utot",varnames), grep("Ntot",varnames), grep("^etaU", varnames))])
if(save.output.to.files){
pdf(file=paste(prefix,"-trace-logU.pdf",sep=""))
plyr::l_ply(trace.plot, function(x){plot(x)})
dev.off()
}
results$plots$trace.logU.plot <- trace.plot
sink(report, append=TRUE)
# What was the initial seed
cat("\n\n*** Initial State for this run ***: ", results$Seed.initial,"\n")
cat("*** See help(modelSetRN) for details. ***\n")
# Global summary of results
cat("\n\n*** Summary of MCMC results *** \n\n")
save.max.print <- getOption("max.print")
options(max.print=.Machine$integer.max)
print(results, digits.summary=3)#, max=.Machine$integer.max)
options(max.print=save.max.print)
# Give an alternate computation of DIC based on the variance of the deviance
# Refer to http://www.mrc-bsu.cam.ac.uk/bugs/winbugs/DIC-slides.pdf for derivation and why
# this alternate method may be superior to that automatically computed by WinBugs/OpenBugs
cat("\n\n*** Alternate DIC computation based on p_D = var(deviance)/2 \n")
results.row.names <- rownames(results$summary)
deviance.row.index<- grep("deviance", results.row.names)
deviance <- results$summary[deviance.row.index,]
p.D <- deviance["sd"]^2/2
dic <- deviance["mean"]+p.D
cat(" D-bar: ", deviance["mean"],"; var(dev): ", deviance["sd"]^2,
"; p.D: ", p.D, "; DIC: ", dic)
# Summary of population sizes. Add pretty printing
cat("\n\n\n\n*** Summary of Unmarked Population Size ***\n")
cat("W.YoY \n")
temp <- results$summary[ grep("Utot.W.YoY", rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
cat("\n\nW.1 \n")
temp<- results$summary[ grep("Utot.W.1", rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
cat("\n\nH.1\n")
temp<- results$summary[ grep("Utot.H.1", rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
#browser()
time.H <- time>hatch.after
cat("\n\n\n\n*** Summary of Quantiles of Run Timing.Wild.YoY *** \n")
cat( " This is based on the sample weeks provided and the U.W.YoY[i] values \n")
q <- RunTime(time=time, U=results$sims.list$U.W.YoY, prob=run.prob)
temp <- rbind(apply(q,2,mean), apply(q,2,sd))
rownames(temp) <- c("Mean", "Sd")
print(round(temp,2))
cat("\n\n*** Summary of Quantiles of Run Timing.Wild.1 *** \n")
cat( " This is based on the sample weeks provided and the U.W.1[i] values \n")
q <- RunTime(time=time, U=results$sims.list$U.W.1, prob=run.prob)
temp <- rbind(apply(q,2,mean), apply(q,2,sd))
rownames(temp) <- c("Mean", "Sd")
print(round(temp,2))
cat("\n\n*** Summary of Quantiles of Run Timing.Hatchery.1 *** \n")
cat( " This is based on the sample weeks provided and the U.H.1[i] values \n")
q <- RunTime(time=time[time.H], U=results$sims.list$U.H.1[,time.H], prob=run.prob)
temp <- rbind(apply(q,2,mean), apply(q,2,sd))
rownames(temp) <- c("Mean", "Sd")
print(round(temp,2))
# Add the runtiming to the output object
results$runTime <- temp
cat("\n\n")
cat(paste("*** end of fit *** ", date()))
sink()
# save the report to a files?
if(save.output.to.files)writeLines(stdout, results.filename)
results$report <- stdout
# add some of the raw data to the bugs object for simplicity in referencing it later
results$data <- list( time=time, n1=n1, m2=m2, u2.W.YoY=u2.W.YoY, u2.W.1=u2.W.1, u2.H.1=u2.H.1,
hatch.after=hatch.after,
bad.m2=bad.m2, bad.u2.W.YoY=bad.u2.W.YoY, bad.u2.W.1=bad.u2.W.1, bad.u2.H.1=bad.u2.H.1,
logitP.cov=logitP.cov,
version=version, date_run=date(),title=title)
return(results)
} # end of function
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/TimeStratPetersenDiagErrorWHSteel_fit.R
|
# 2021-10-24 CJS added trunc.logitP to avoid problems with plotting
# 2020-12-15 CJS removed sampfrac from code but left argument with warning message
# 2020-11-07 CJS Allowed user to specify prior for beta coefficient for logitP
# 2018-12-19 CJS Deprecated use of sampling.fraction
# 2018-12-15 CJS Added ability to fix some logitP values
# 2018-12-02 CJS Convert trace plots to ggplot
# 2018-12-01 CJS COnvert posterior plots to ggplot
# 2018-11-30 CJS Convert acf plot to ggplot
# 2018-11-28 CJS Fixed problem where printing results got cutoff
# 2018-11-25 CJS Removed all WinBugs/OpenBugs stuff
# 2015-06-10 CJS Change gof plot to ggplot()
# 2014-09-01 CJS Converted to JAGS engine from OpenBugs
# 2013-09-04 CJS Initialized n1, m2, u2 that are NA to sensible values.
# Removed references to WinBugs
# 2012-08-30 CJS fixed problem in error checking in any() function that includes missing values
# 2011-06-13 CJS inserted the bayesian p-values in the results
# 2010-11-29 CJS added code for bad.n1 to the call.
# 2010-11-25 CJS added code for bad.u2 to the call. Simplified the two pooled and simple Petersen estimates
# 2010-11-25 CJS pretty printing for estimates of Utot, Ntot
# 2010-09-06 CJS forced time, n1, m2, u2, sampfrac to be vectors
# 2010-08-04 CJS added traceplots of logitP, logU, Utot, and Ntot to help diagnose non-mixing
# 2010-08-04 CJS added version/date to results data structure
# 2009-12005 CJS added title to argument list
# 2009-12-01 CJS Added some basic error checking; added OPENBUGS/WINBUGS to argument list
#' Wrapper (*_fit) to call the Time Stratified Petersen Estimator
#' with Diagonal Entries function.
#'
#' Takes the number of marked fish released, the number of recaptures, and the
#' number of unmarked fish and uses Bayesian methods to fit a fit a spline
#' through the population numbers and a hierarchical model for the trap
#' efficiencies over time. The output is written to files and an MCMC object
#' is also created with samples from the posterior.
#'
#' Normally, the wrapper (*_fit) function is called which then calls the
#' fitting routine.
#'
#' Use the \code{\link{TimeStratPetersenNonDiagError_fit}} function for cases
#' where recaptures take place outside the stratum of release.
#'
#'
#' @aliases TimeStratPetersenDiagError_fit
#' @template title
#' @template prefix
#' @template time
#' @template n1
#' @param m2 A numeric vector of the number of marked fish from n1 that are
#' recaptured in each time stratum. All recaptures take place within the
#' stratum of release.
#' @template u2.D
#' @template sampfrac
#' @template jump.after
#' @template bad.n1
#' @template bad.m2
#' @template bad.u2
#' @template logitP.cov
#' @template param.logitP.fixed
#' @template mcmc-parms
#' @template tauU.alpha.beta
#' @template taueU.alpha.beta
#' @template prior.beta.logitP.mean.sd
#' @template tauP.alpha.beta
#' @template run.prob
#' @template debug
#' @template InitialSeed
#' @template save.output.to.files
#' @template trunc.logitP
#'
#' @return An MCMC object with samples from the posterior distribution. A
#' series of graphs and text file are also created in the working directory.
#' @template author
#' @template references
#' @keywords ~models ~smooth
#' @importFrom stats runif var sd
#' @export TimeStratPetersenDiagError_fit
#'
#'
TimeStratPetersenDiagError_fit <-
function( title="TSDPE", prefix="TSPDE-",
time, n1, m2, u2, sampfrac=rep(1,length(u2)),
jump.after=NULL,
bad.n1=c(), bad.m2=c(), bad.u2=c(),
logitP.cov=as.matrix(rep(1,length(n1))),
logitP.fixed=NULL, logitP.fixed.values=NULL,
n.chains=3, n.iter=200000, n.burnin=100000, n.sims=2000,
tauU.alpha=1, tauU.beta=.05, taueU.alpha=1, taueU.beta=.05,
prior.beta.logitP.mean = c(logit(sum(m2,na.rm=TRUE)/sum(n1,na.rm=TRUE)),rep(0, ncol(as.matrix(logitP.cov))-1)),
prior.beta.logitP.sd = c(stats::sd(logit((m2+.5)/(n1+1)),na.rm=TRUE), rep(10, ncol(as.matrix(logitP.cov))-1)),
tauP.alpha=.001, tauP.beta=.001,
run.prob=seq(0,1,.1), # what percentiles of run timing are wanted
debug=FALSE, debug2=FALSE,
InitialSeed=ceiling(stats::runif(1,min=0, max=1000000)),
save.output.to.files=TRUE,
trunc.logitP=15) {
# Fit a Time Stratified Petersen model with diagonal entries and with smoothing on U allowing for random error
# The "diagonal entries" implies that no marked fish are recaptured outside the (time) stratum of release
#
version <- '2021-11-02'
options(width=200)
# Input parameters are
# prefix - prefix used for files created with the analysis results
# this should be in standard Window's format, eg. JC-2002-ST-TSPDE
# to which is appended various suffixes for plots etc
# time - vector of stratum numbers. For example, 9:38 would indicate that the
# Trinity River system sampled weeks 9 to 38. If some values are omitted
# e.g. time=10 not present, this indicates sampling did not take place this
# week. The data are expanded and interpolation for the missing week takes place
# n1, m2, u2 - the input data consisting of fish marked and released, recapture, and unmarked captured
# sampfrac - sampling fraction - depricated because never did work properly
# jump.after - in some cases, a single spline is still not flexible enough to cope with rapid
# changes in the run curve. For example, in the Trinity River project, a larger
# hatchery release occurs around stratum 14. This is a vector indicating the
# strata AFTER which the spline curve is allowed to jump.
# bad.n1 - list of stratum numbers where the value of n1 is suspect.
# Note that if the value of n1 is suspect, the value of m2 is also likely suspect.
# These are replaced by the value of (1,0). We need to specify a value of 1 for bad.n1 values
# because OpenBugs gets upset with n1=0 or n1=NA.
# bad.m2 - list of stratum numbers where the value of m2 is suspect.
# For example, the capture rate could be extremely low.
# These are set to NA prior to the call to OpenBugs
# bad.u2 - list of stratum numbers where the value of u2 is suspect.
# For example, the trap may not be operating completely for some strata, or there was miss counting?
# These are set to NA prior to the call to OpenBugs
# logitP.cov - matrix of covariates for logit(P). If the strata times are "missing" some values, an intercept is assumed
# for the first element of the covariance matrix and 0 for the rest of the covariates.
# CAUTION - this MAY not be what you want to do. It is likely best to enter ALL strata
# if you have any covariates. The default, if not specified, is a constant (the mean logit)
# logitP.fixed, logitP.fixed.values - if you are fixing any of the logit P and at what values.
# tauU.alpha, tauU.beta - parameters for the prior on variance in spline coefficients
# taueU.alpha, taueU.beta - parameters for the prior on variance in log(U) around fitted spline
# prior.beta.logitP.mean, prior.beta.logitP.sd - parameters for the prior on mean logit(P)'s [The intercept term]
# The other covariates are assigned priors of a mean of 0 and a sd of 30
# tauP.alpha, tauP.beta - parameters for the prior on 1/var of residual error in logit(P)'s
# run.prob - percentiles of run timing wanted
# debug - if TRUE, then this is a test run with very small MCMC chains run to test out the data
# and OpenBUGS will run and stop waiting for your to exit and complete
# force input parameters to be vectors
time <- as.vector(time)
n1 <- as.vector(n1)
m2 <- as.vector(m2)
u2 <- as.vector(u2)
sampfrac <- as.vector(sampfrac)
# Do some basic error checking
# 1. Check that length of n1, m2, u2, sampfrac, time all match
if(stats::var(c(length(n1),length(m2),length(u2),length(sampfrac),length(time)))>0){
cat("***** ERROR ***** Lengths of n1, m2, u2, sampfrac, time must all be equal. They are:",
length(n1),length(m2),length(u2),length(sampfrac),length(time),"\n")
return()}
if(!is.numeric(n1)){
cat("***** ERROR ***** n1 must be numeric. You have:",
paste(n1,collapse=", "),"\n")
return()}
if(any(is.na(n1))){
cat("***** ERROR ***** All values of n1 must not be missing. You have: ",
paste(n1,collapse=", "),"\n")
return()}
if(any(n1 < 0, na.rm=TRUE)){
cat("***** ERROR ***** All values of n1 must be non-negative. You have: ",
paste(n1,collapse=", "),"\n")
return()}
if(length(logitP.cov) %% length(n1) != 0){
cat("***** ERROR ***** Dimension of covariate vector doesn't match length of n1 etc They are:",
length(n1),length(logitP.cov),dim(logitP.cov),"\n")
return()}
# 2. Check that m2<= n1
if(any(m2>n1, na.rm=TRUE)){
cat("***** ERROR ***** m2 must be <= n1. The arguments are \n n1:",n1,"\n m2:",m2,"\n")
return()}
# 3. Elements of bad.n1, bad.m2, bad.u2, and jump.after must belong to time
if(!all(bad.n1 %in% time, na.rm=TRUE)){
cat("***** ERROR ***** bad.n1 must be elements of strata identifiers. You entered \n bad.n1:",bad.n1,"\n Strata identifiers are \n time:",time, "\n")
return()}
if(!all(bad.m2 %in% time, na.rm=TRUE)){
cat("***** ERROR ***** bad.m2 must be elements of strata identifiers. You entered \n bad.m2:",bad.m2,"\n Strata identifiers are \n time:",time, "\n")
return()}
if(!all(bad.u2 %in% time, na.rm=TRUE)){
cat("***** ERROR ***** bad.u2 must be elements of strata identifiers. You entered \n bad.u2:",bad.u2,"\n Strata identifiers are \n time:",time, "\n")
return()}
if(!all(jump.after %in% time, na.rm=TRUE)){
cat("***** ERROR ***** jump.after must be elements of strata identifiers. You entered \n jump.after:",jump.after,"\n Strata identifiers are \n time:",time, "\n")
return()}
# 4. check that index of logitP.fixed belong to time
if(!all(logitP.fixed %in% time,na.rm=TRUE)){
cat("***** ERROR ***** logitP.fixed must be elements of strata identifiers. You entered \n logitP.fixed:",logitP.fixed,"\n Strata identifiers are \n time:",time, "\n")
return()}
if(length(logitP.fixed)!=length(logitP.fixed.values)){
cat("***** ERROR ***** Lengths of logitP.fixed and logitP.fixed.values must all be equal. They are:",
length(logitP.fixed),length(logitP.fixed.values),"\n")
return()}
# Check that that the prior.beta.logitP.mean and prior.beta.logitP.sd length=number of columns of covariates
logitP.cov <- as.matrix(logitP.cov)
if(!is.vector(prior.beta.logitP.mean) | !is.vector(prior.beta.logitP.sd)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be vectors")
}
if(!is.numeric(prior.beta.logitP.mean) | !is.numeric(prior.beta.logitP.sd)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be numeric")
}
if(length(prior.beta.logitP.mean) != ncol(logitP.cov) | length(prior.beta.logitP.sd) != ncol(logitP.cov)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be same length as number columns in covariate matrix")
}
# Deprecation of sampling fraction.
if(any(sampfrac != 1)){
cat("***** ERROR ***** Sampling fraction is deprecated for any values other than 1. DO NOT USE ANYMORE. ")
return()
}
results.filename <- paste(prefix,"-results.txt",sep="")
# Create the report
stdout <- vector('character')
report <- textConnection('stdout', 'wr', local = TRUE)
sink(report)
#sink(results.filename)
cat(paste("Time Stratified Petersen with Diagonal recaptures and error in smoothed U - ", date()))
cat("\nVersion:", version, "\n\n")
cat("\n\n", title, "Results \n\n")
cat("*** Raw data *** \n")
temp<- cbind(time, n1, m2, u2, logitP.cov)
colnames(temp)<- c('time', 'n1','m2','u2', paste0("logitPcov[", 1:ncol(as.matrix(logitP.cov)),"]") )
print(temp)
cat("\n\n")
cat("Jump point are after strata: ", jump.after)
if(length(jump.after)==0) cat("none - A single spline is fit")
cat("\nFixed logitP indices are:", logitP.fixed)
if(length(logitP.fixed)==0) cat("none - NO fixed values")
cat("\nFixed logitP values are:", logitP.fixed.values)
if(length(logitP.fixed)==0) cat("none - NO fixed values")
cat("\n\nValues of bad.n1 are : ", bad.n1, ". The value of n1 will be set to 1 and m2 to NA for these strata: ")
if(length(bad.n1)==0) cat("none.")
cat( "\nValues of bad.m2 are : ", bad.m2, ". The value of m2 will be set to NA for these strata: ")
if(length(bad.m2)==0) cat("none.")
cat( "\nValues of bad.u2 are : ", bad.u2, ". The value of u2 will be set to NA for these strata: ")
if(length(bad.u2)==0) cat("none.")
# Pooled Petersen estimator over ALL of the data including when no releases take place, bad.n1, bad.m2, bad.u2 and missing values.
cat("\n\n*** Pooled Petersen Estimate based on pooling over ALL strata")
cat("\nValues of u2 are adjusting for sampling fraction \n\n")
cat("Total n1=", sum(n1, na.rm=TRUE),"; m2=",sum(m2, na.rm=TRUE),"; u2=",sum(u2, na.rm=TRUE),"\n\n")
pp <- SimplePetersen(sum(n1, na.rm=TRUE), sum(m2, na.rm=TRUE), sum(u2, na.rm=TRUE))
cat("Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
# Obtain the Pooled Petersen estimator after EXCLUDING strata with missing data or strata that are flagged as having bad.n1, bad.m2, and bad.u2 values
temp.n1 <- n1
temp.n1[match(bad.n1,time)] <- NA
temp.m2 <- m2
temp.m2[match(bad.m2,time)] <- NA
temp.u2 <- u2
temp.u2[match(bad.u2,time)] <- NA
select <- (n1>0) & (!is.na(temp.n1)) & (!is.na(temp.m2)) & (!is.na(temp.u2))
cat("\n\n*** Pooled Petersen Estimate after EXCLUDING strata with missing value or flagged as bad.n1, bad.m2 or bad.m2. ")
cat("\nValues of u2 are adjusted for sampling fraction\n\n")
cat("The following strata are excluded because n1=0, NA values in m2 or u2, or flagged by bad.n1, bad.m2 or bad.u2:", time[!select],"\n\n")
temp.n1 <- n1[select]
temp.m2 <- m2[select]
temp.u2 <- u2[select]
cat("Total n1=", sum(temp.n1, na.rm=TRUE),"; m2=",sum(temp.m2, na.rm=TRUE),"; u2=",sum(temp.u2, na.rm=TRUE),"\n\n")
pp <- SimplePetersen(sum(temp.n1, na.rm=TRUE), sum(temp.m2, na.rm=TRUE), sum(temp.u2, na.rm=TRUE))
cat("Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
# Obtain Petersen estimator for each stratum prior to excluding any strata flagged as bad values
cat( "*** Stratified Petersen Estimator for each stratum PRIOR to removing strata with bad.n1, bad.m2, or bad.u2 values.")
cat("\n Values of u2 are adjusted for sampling fraction ***\n\n")
temp.n1 <- n1
temp.m2 <- m2
temp.u2 <- u2
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2)
temp <- cbind(time, temp.n1, temp.m2, temp.u2, round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','u2', 'U[i]', 'SE(U[i])')
print(temp)
cat("\n")
cat("Est U(total) ", format(round(sum(sp$U.est, na.rm=TRUE)),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))), big.mark=","), ")\n\n\n")
# Obtain Petersen estimator for each stratum after excluding strata where n1=0, or flagged by bad.n1, bad.m2, or bad.u2
cat( "*** Stratified Petersen Estimator for each stratum EXCLUDING strata with n1=0, NA values, or flagged by bad.n1, bad.m2, or bad.u2 values ***")
cat("\n Values of u2 are adjusted for sampling fraction ***\n\n")
temp.n1 <- n1
temp.n1[match(bad.n1,time)] <- NA # if any value is bad, exclude this entire stratum
temp.n1[match(bad.m2,time)] <- NA
temp.n1[match(bad.u2,time)] <- NA
temp.n1[temp.n1==0] <- NA # if n1 is zero, then there is no estimate of capture probability for this stratum
temp.m2 <- m2
temp.m2[match(bad.n1,time)] <- NA # if any value is bad, exlude this entire stratum
temp.m2[match(bad.m2,time)] <- NA
temp.m2[match(bad.u2,time)] <- NA
temp.u2 <- u2
temp.u2[match(bad.n1,time)] <- NA # if any value is bad, exclude this entire stratum
temp.u2[match(bad.m2,time)] <- NA
temp.u2[match(bad.u2,time)] <- NA
sp <- SimplePetersen(temp.n1, temp.m2, temp.u2)
temp <- cbind(time, temp.n1, temp.m2, temp.u2, round(sp$U.est), round(sp$U.se))
colnames(temp) <- c('time', 'n1','m2','u2', 'U[i]', 'SE(U[i])')
print(temp)
cat("\n")
cat("Est U(total) ", format(round(sum(sp$U.est, na.rm=TRUE)),big.mark=","),
" (SE ", format(round(sqrt(sum(sp$U.se^2, na.rm=TRUE))), big.mark=","), ")\n\n\n")
# Test if pooling can be done
cat("*** Test if pooled Petersen is allowable on strata without problems in n1 or m2. [Check if marked fractions are equal] ***\n\n")
select <- (temp.n1>0) & (!is.na(temp.n1)) & (!is.na(temp.m2))
temp.n1 <- n1[select]
temp.m2 <- m2[select]
test <- TestIfPool( temp.n1, temp.m2)
cat("(Large Sample) Chi-square test statistic ", test$chi$statistic," has p-value", test$chi$p.value,"\n\n")
temp <- cbind(time[select],test$chi$observed, round(test$chi$expected,1), round(test$chi$residuals^2,1))
colnames(temp) <- c('time','n1-m2','m2','E[n1-m2]','E[m2]','X2[n1-m2]','X2[m2]')
print(temp)
cat("\n Be cautious of using this test in cases of small expected values. \n\n")
# Fix up any data problems and prepare for the call.
# Notice that for strata entries that are missing any covariate values, only an intercept is added
# Expand the entries in case of missing time entries
new.n1 <- rep(0, max(time)-min(time)+1)
new.m2 <- rep(0, max(time)-min(time)+1)
new.u2 <- rep(0, max(time)-min(time)+1)
new.logitP.cov <- matrix(NA, nrow=max(time)-min(time)+1, ncol=ncol(as.matrix(logitP.cov)))
new.time <- min(time):max(time)
new.n1[time-min(new.time)+1] <- n1
new.m2[time-min(new.time)+1] <- m2
new.m2[match(bad.m2,new.time)] <- NA # wipe out where m2 is flagged as bad
new.u2[time-min(new.time)+1] <- u2
new.u2[match(bad.u2,new.time)] <- NA # wipe out where u2 is flagged as bad
new.logitP.cov[time-min(new.time)+1,]<- as.matrix(logitP.cov)
new.logitP.cov[ is.na(new.logitP.cov[,1]), 1] <- 1 # insert a 1 into first columns where not specified
new.logitP.cov[ is.na(new.logitP.cov)] <- 0 # other covariates are forced to zero not in column 1
# Check for and fix problems with the data
# If n1=m2=0, then set n1 to 1, and set m2<-NA
new.m2[new.n1==0] <- NA
new.n1[new.n1==0] <- 1
new.n1[match(bad.n1,new.time)] <- 1
new.m2[match(bad.n1,new.time)] <- NA
# Print out the revised data
jump.indicator <- rep(' ', max(time)-min(time)+1)
jump.indicator[jump.after-min(time)+1]<- '***'
cat("\n\n*** Revised data *** \n")
temp<- data.frame(time=new.time, n1=new.n1, m2=new.m2, u2=new.u2,
new.logitP.cov=new.logitP.cov,
jump.indicator=jump.indicator)
print(temp)
cat("\n\n")
# assign the logitP fixed values etc.
new.logitP.fixed <- rep(NA, length(new.u2))
new.logitP.fixed[match(logitP.fixed, time)] <- logitP.fixed.values
# Print out information on the prior distributions used
cat("\n\n*** Information on priors *** \n")
cat(" Parameters for prior on tauU (variance in spline coefficients: ", tauU.alpha, tauU.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(tauU.alpha/tauU.beta,2),round(sqrt(tauU.alpha/tauU.beta^2),2),"\n")
cat(" Parameters for prior on taueU (variance of log(U) about spline: ",taueU.alpha, taueU.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(taueU.alpha/taueU.beta,2),round(sqrt(taueU.alpha/taueU.beta^2),2),"\n")
cat(" Parameters for prior on beta.logitP[1] (intercept) (mean, sd): \n", cbind(round(prior.beta.logitP.mean,3), round(prior.beta.logitP.sd,5)),"\n")
cat(" Parameters for prior on tauP (residual variance of logit(P) after adjusting for covariates: ",tauP.alpha, tauP.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(tauP.alpha/tauP.beta,2),round(sqrt(tauP.alpha/tauP.beta^2),2),"\n")
cat("\n\n*** Initial seed for this run is: ",InitialSeed, "\n")
sink()
if (debug2) {
cat("\nprior to formal call to TimeStratPetersenDiagError\n")
browser()
}
if (debug)
{results <- TimeStratPetersenDiagError(title=title, prefix=prefix,
time=new.time, n1=new.n1, m2=new.m2, u2=new.u2,
jump.after=jump.after-min(time)+1,
logitP.cov=new.logitP.cov, logitP.fixed=new.logitP.fixed,
n.chains=3, n.iter=10000, n.burnin=5000, n.sims=500, # set to low values for debugging purposes only
prior.beta.logitP.mean=prior.beta.logitP.mean,
prior.beta.logitP.sd =prior.beta.logitP.sd,
tauU.alpha=tauU.alpha, tauU.beta=tauU.beta, taueU.alpha=taueU.alpha, taueU.beta=taueU.beta,
debug=debug, debug2=debug2, InitialSeed=InitialSeed, save.output.to.files=save.output.to.files)
} else #notice R syntax requires { before the else
{results <- TimeStratPetersenDiagError(title=title, prefix=prefix,
time=new.time, n1=new.n1, m2=new.m2, u2=new.u2,
jump.after=jump.after-min(time)+1,
logitP.cov=new.logitP.cov, logitP.fixed=new.logitP.fixed,
n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.sims=n.sims,
prior.beta.logitP.mean=prior.beta.logitP.mean,
prior.beta.logitP.sd =prior.beta.logitP.sd,
tauU.alpha=tauU.alpha, tauU.beta=tauU.beta, taueU.alpha=taueU.alpha, taueU.beta=taueU.beta,
debug=debug, debug2=debug2, InitialSeed=InitialSeed, save.output.to.files=save.output.to.files)
}
# Now to create the various summary tables of the results
# A plot of the observered log(U) on the log scale, and the final mean log(U)
# Create the data frame needed for ggplot.
# In the diagonal case, time, n1, m2, u2 are the same length
plot.df <- data.frame(time =new.time)
plot.df$logUi <- log((new.u2+1)*(new.n1+2)/(new.m2+1)) # initial guess for logU
# extract the fitted U values
results.row.names <- rownames(results$summary)
etaU.row.index <- grep("etaU", results.row.names)
etaU<- results$summary[etaU.row.index,]
plot.df$logU = etaU[,"mean"]
plot.df$logUlcl = etaU[,"2.5%"]
plot.df$logUucl = etaU[,"97.5%"]
# extract the spline values
logUne.row.index <- grep("logUne", results.row.names)
logUne<- results$summary[logUne.row.index,"mean"]
plot.df$spline <- results$summary[logUne.row.index,"mean"]
#browser()
# add limits to the plot to avoid non-monotone secondary axis problems with extreme values
plot.df$logUi <- pmax(-10 , pmin(20, plot.df$logUi))
plot.df$logU <- pmax(-10 , pmin(20, plot.df$logU ))
plot.df$logUlcl <- pmax(-10 , pmin(20, plot.df$logUlcl ))
plot.df$logUucl <- pmax(-10 , pmin(20, plot.df$logUucl ))
plot.df$spline <- pmax(-10 , pmin(20, plot.df$spline))
fit.plot <- ggplot(data=plot.df, aes_(x=~time))+
ggtitle(title, subtitle="Fitted spline curve with 95% credible intervals for estimated log(U[i])")+
geom_point(aes_(y=~logUi), color="red", shape=1)+ # open circle
xlab("Time Index\nOpen/closed circles - initial and final estimates")+ylab("log(U[i]) + 95% credible interval")+
geom_point(aes_(y=~logU), color="black", shape=19)+
geom_line (aes_(y=~logU), color="black")+
geom_errorbar(aes_(ymin=~logUlcl, ymax=~logUucl), width=.1)+
geom_line(aes_(y=~spline),linetype="dashed")+
scale_x_continuous(breaks=seq(min(plot.df$time, na.rm=TRUE),max(plot.df$time, na.rm=TRUE),2))+
scale_y_continuous(sec.axis = sec_axis(~ exp(.), name="U + 95% credible interval",
breaks=c(1,10,20,50,
100,200,500,
1000,2000,5000,
10000,20000, 50000,
100000,200000, 500000,
1000000,2000000,5000000,10000000),
labels = scales::comma))
if(save.output.to.files)ggsave(plot=fit.plot, filename=paste(prefix,"-fit.pdf",sep=""), height=6, width=10, units="in")
results$plots$fit.plot <- fit.plot
# plot logit(P) over time
logitP.plot <- plot_logitP(title=title, time=new.time, n1=new.n1, m2=new.m2, u2=new.u2,
logitP.cov=new.logitP.cov, results=results,
trunc.logitP=trunc.logitP)
if(save.output.to.files)ggsave(plot=logitP.plot, filename=paste(prefix,"-logitP.pdf",sep=""), height=6, width=10, units="in")
results$plots$logitP.plot <- logitP.plot
# Look at autocorrelation function for Utot
mcmc.sample <- data.frame(parm="Utot", sample=results$sims.matrix[,"Utot"], stringsAsFactors=FALSE)
acf.Utot.plot <- plot_acf(mcmc.sample)
if(save.output.to.files)ggsave(plot=acf.Utot.plot, filename=paste(prefix,"-Utot-acf.pdf",sep=""), height=4, width=6, units="in")
results$plots$acf.Utot.plot <- acf.Utot.plot
# Look at the shape of the posterior distribution
mcmc.sample1 <- data.frame(parm="Utot", sample=results$sims.matrix[,"Utot"], stringsAsFactors=FALSE)
mcmc.sample2 <- data.frame(parm="Ntot", sample=results$sims.matrix[,"Ntot"], stringsAsFactors=FALSE)
mcmc.sample <- rbind(mcmc.sample1, mcmc.sample2)
post.UNtot.plot <- plot_posterior(mcmc.sample)
post.UNtot.plot
if(save.output.to.files)ggsave(plot=post.UNtot.plot, filename=paste(prefix,"-UNtot-posterior.pdf",sep=""),
height=ifelse(length(unique(mcmc.sample$parm))==1,4,6), width=6, units="in")
results$plots$post.UNtot.plot <- post.UNtot.plot
#save the Bayesian predictive distribution (Bayesian p-value plots)
#browser()
discrep <-PredictivePosterior.TSPDE (new.n1, new.m2, new.u2,
new.logitP.fixed,
expit(results$sims.list$logitP),
round(results$sims.list$U))
gof <- PredictivePosteriorPlot.TSPDE (discrep) # get the bayesian p-values
if(save.output.to.files)ggsave(gof[[1]],filename=paste(prefix,"-GOF.pdf",sep=""), height=8, width=8, units="in", dpi=300 )
results$plots$gof.plot <- gof
# create traceplots of logU, U, and logitP (along with R value) to look for non-convergence
# the plot_trace will return a list of plots (one for each page as needed)
varnames <- names(results$sims.array[1,1,]) # extract the names of the variables
# Trace plots of logitP
trace.plot <- plot_trace(title=title, results=results, parms_to_plot=varnames[grep("^logitP", varnames)])
if(save.output.to.files){
pdf(file=paste(prefix,"-trace-logitP.pdf",sep=""))
plyr::l_ply(trace.plot, function(x){plot(x)})
dev.off()
}
results$plots$trace.logitP.plot <- trace.plot
# now for the traceplots of logU (etaU), Utot, and Ntot
trace.plot <- plot_trace(title=title, results=results, parms_to_plot=varnames[c(grep("Utot",varnames), grep("Ntot",varnames), grep("^etaU", varnames))])
if(save.output.to.files){
pdf(file=paste(prefix,"-trace-logU.pdf",sep=""))
plyr::l_ply(trace.plot, function(x){plot(x)})
dev.off()
}
results$plots$trace.logU.plot <- trace.plot
#sink(results.filename, append=TRUE)
sink(report, append=TRUE)
# Global summary of results
cat("\n\n*** Summary of MCMC results *** \n\n")
save.max.print <- getOption("max.print")
options(max.print=.Machine$integer.max)
print(results, digits.summary=3)#, max=.Machine$integer.max)
options(max.print=save.max.print)
# Give an alternate computation of DIC based on the variance of the deviance
# Refer to http://www.mrc-bsu.cam.ac.uk/bugs/winbugs/DIC-slides.pdf for derivation and why
# this alternate method may be superior to that automatically computed by OpenBugs
cat("\n\n*** Alternate DIC computation based on p_D = var(deviance)/2 \n")
results.row.names <- rownames(results$summary)
deviance.row.index<- grep("deviance", results.row.names)
deviance <- results$summary[deviance.row.index,]
p.D <- deviance["sd"]^2/2
dic <- deviance["mean"]+p.D
cat(" D-bar: ", deviance["mean"],"; var(dev): ", deviance["sd"]^2,
"; p.D: ", p.D, "; DIC: ", dic)
# Summary of population sizes. Extra code rounds results to integers except for Rhat
cat("\n\n\n\n*** Summary of Unmarked Population Size ***\n")
temp<- results$summary[ grep("Utot", rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
cat("\n\n*** Summary of Total Population Size *** \n")
temp<- results$summary[ grep("Ntot", rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
cat("\n\n\n\n*** Summary of Quantiles of Run Timing *** \n")
cat( " This is based on the sample weeks provided and the U[i] values \n")
q <- RunTime(time=time, U=results$sims.list$U, prob=run.prob)
temp <- rbind(apply(q,2,mean), apply(q,2,sd))
rownames(temp) <- c("Mean", "Sd")
print(round(temp,2))
# Add the runtiming to the output object
results$runTime <- temp
cat("\n\n")
cat(paste("*** end of fit *** ", date()))
sink()
# save the report to a files?
if(save.output.to.files)writeLines(stdout, results.filename)
results$report <- stdout
# add some of the raw data to the bugs object for simplicity in referencing it later
results$data <- list( time=time, n1=n1, m2=m2, u2=u2,
jump.after=jump.after,
bad.n1=bad.n1, bad.m2=bad.m2, bad.u2=bad.u2,
logitP.cov=logitP.cov, version=version, date_run=date(),
title=title)
return(results)
} # end of function
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/TimeStratPetersenDiagError_fit.R
|
## 2020-11-07 CJS Allow user to specify prior for beta parameters for covariates on logitP
# 2018-12-06 CJS saved initial plot to ggplots format
# 2018-11-26 CJS Removed all references to OpenBugs
# 2014-09-01 CJS conversion to JAGS
# - no model name
# - C(,20) -> T(,20)
# - dflat() to dnorm(0, 1E-6)
# - created u2copy to improve mixing
# - added .000001 to Pmarked for JAGS????? Need to check this out to see what is the problems
# 2011-05-15 CJS limited etaU to 20 or less
# 2011-01-24 SB added call to run.windows.openbugs and run.windows.winbugs
# 2010-11-20 CJS added code to display progress of sampling during burnin and posterior to the user
# 2010-11-19 SB add code to make initial U a minimum of 1 to prevent crashing
# 2010-04-26 CJS fixed problem with init.logitP when n1=m2=k and you get +infinity which craps out lm()
# 2010-03-03 CJS allowed some logitP[j] top be fixed at arbitrary values (on the logit scale)
# added definition of storage.class(logitP) to deal with no fixed values where the program bombed
# 2009-12-07 CJS changed etaP to logitP
# 2009-12-05 CJS added title to argument list
# 2009-12-01 CJS added openbugs/winbugs directory to argument list
#' @keywords internal
#' @importFrom stats lm spline sd
TimeStratPetersenNonDiagError <- function(title,
prefix,
time,
n1,
m2,
u2,
jump.after=NULL,
logitP.cov=as.matrix(rep(1,length(u2))),
logitP.fixed=rep(NA,length(u2)),
n.chains=3,
n.iter=200000,
n.burnin=100000,
n.sims=2000,
tauU.alpha=1,
tauU.beta=.05,
taueU.alpha=1,
taueU.beta=.05,
prior.beta.logitP.mean = c(logit(sum(m2,na.rm=TRUE)/sum(n1,na.rm=TRUE)),rep(0, ncol(as.matrix(logitP.cov))-1)),
prior.beta.logitP.sd = c(2, rep(10, ncol(as.matrix(logitP.cov))-1)),
tauP.alpha=.001,
tauP.beta=.001,
debug=FALSE,
debug2=FALSE,
InitialSeed,
save.output.to.files=TRUE){
set.seed(InitialSeed) # set prior to initial value computations
#
# Fit the smoothed time-Stratified Petersen estimator with NON-Diagonal recoveries
# This model allows recoveries outside the stratum of release and error in the smoothed U curve
#
# This routine assumes that the strata are time (e.g. weeks).
# In each stratum n1 fish are released (with marks). These are ususally
# captured fish that are marked, transported upstream, and released.
# These fish are used only to estimate the recapture rate downstream.
# Of the n1 fish released, some are recapturd in this stratum of release (column 1 of m2) or in
# subsequent strata (subsequent columns of m2). No fish are assumed to be available for capture
# outside the range of strata considered in the matrix of m2
# At the same tine, u2 other (unmarked) fish are newly captured in stratum i.
# These EXCLUDE recaptures of marked fish. These are the fish that are "expanded"
# to estimate the population size of fish in stratum i.
#
# Input
# prefix - prefix for file name for initial plot of U's
# time- the stratum number
# n1 - vector of number of fish released in stratum i
# m2 - matrix of number of fish recovered who were released in stratum i and recovered in stratum j
# u2 - vector of number of unmarked fish captured in stratum i
# jump.after - points after which the spline is allowed to jump. Specify as a list of integers in the
# range of 1:Nstrata. If jump.after[i]=k, then the spline is split between strata k and k+1
# logitP.cov - covariates for logit(P)=X beta.logitP.cov
# - specify anything you want for fixed logitP's as the covariate values are simply ignored.
# - recommend that you specify 1 for the intercept and 0's for everything else
# logitP.fixed- values for logitP that are fixed in advance. Use NA if corresponding value is not fixed,
# otherwise specify the logitP value.
# This routine makes a call to the MCMC sampler to fit the model and then gets back the
# coda files for the posteriour distribution.
## Set working directory to current directory (we should allow users to select this)
working.directory <- getwd()
## Define paths for the model, data, and initial value files
model.file <- file.path(working.directory, "model.txt")
data.file <- file.path(working.directory,"data.txt")
init.files <- file.path(working.directory,
paste("inits", 1:n.chains,".txt", sep = ""))
# Save the Bugs progam to the model.txt file
#
sink(model.file) # NOTE: NO " allowed in model as this confuses the cat command
cat("
model {
# Time Stratified Petersen with NON Diagonal recapture and allowing for error in the smoothed U curve.
# Data input:
# Nstrata.rel - number of strata where fish are releases
# Nstrata.cap - number of (future strata) where fish are recaptured.
# n1 - number of marked fish released
# m2 - number of marked fish recaptured
# This is a matrix of size Nstrata.rel x (Nstrata.cap+1)
# with entries m2[i,j] = number of fish released in i and recaptured in j
# Entries in the last column are the number of fish NEVER recaptured from those
# released
# u2 - number of unmarked fish captured (To be expanded to population).
# logitP - the recapture rates. Use NA if these are modelled, otherwise specify the logit(fixed value, e.g. -10 for 0).
# Nfree.logitP - number of free logitP parameters
# free.logitP.index - vector of length(Nfree.logitP) for the free logitP parameters
# logitP.cov - covariates for logitP
# NlogitP.cov - number of logitP covariates
# SplineDesign- spline design matrix of size [Nstrata, maxelement of n.b.notflat]
# This is set up prior to the call.
# b.flat - vector of strata indices where the prior for the b's will be flat.
# this is normally the first two of each spline segment
# n.b.flat - number of b coefficients that have a flat prior
# b.notflat- vector of strata indices where difference in coefficients is modelled
# n.b.notflat- number of b coefficients that do not have a flat prior
# tauU.alpha, tauU.beta - parameters for prior on tauU
# taueU.alpha, taueU.beta - parameters for prior on taueU
# prior.beta.logitP.mean, prior.beta.logitP.sd - parameters for prior of coefficient of covariates for logitP
# tauP.alpha, tauP.beta - parameter for prior on tauP (residual variance of logit(P)'s after adjusting for
# covariates)
# xiMu, tauMu - mean and precision (1/variance) for prior on mean(log travel-times)
# xiSd, tauSd - mean and precision (1/variance) for prior on stats::sd(log travel times) - ON THE LOG SCALE
#
# Parameters of the model are:
# p[i]
# logitP[i] = logit(p[i]) = logitP.cov*beta.logitP
# The beta coefficients have a prior that is N(mean= prior.beta.logitP.mean, sd= prior.beta.logitP.sd)
# U[i]
# etaU[i] = log(U[i])
# which comes from spline with parameters bU[1... Knots+q]
# + error term eU[i]
#
# muLogTT[i] = mean log(travel time) assuming a log-normal distribution for travel time
# sdLogTT[i] = sd log(travel time) assuming a log-normal distribution for travel time
# Note that the etasdLogTT=log(sdLogTT) is modelled to keep the sd positive
#
##### Fit the spline for the U's and specify hierarchial model for the logit(P)'s ######
for(i in 1:Nstrata.cap){
logUne[i] <- inprod(SplineDesign[i,1:n.bU],bU[1:n.bU]) # spline design matrix * spline coeff
etaU[i] ~ dnorm(logUne[i], taueU)T(,20) # add random error
eU[i] <- etaU[i] - logUne[i]
}
for(i in 1:Nfree.logitP){ # model the free capture rates using covariates
mu.logitP[free.logitP.index[i]] <- inprod(logitP.cov[free.logitP.index[i],1:NlogitP.cov], beta.logitP[1:NlogitP.cov])
## logitP[free.logitP.index[i]] ~ dnorm(mu.logitP[free.logitP.index[i]],tauP)
mu.epsilon[free.logitP.index[i]] <- mu.logitP[free.logitP.index[i]] - log(u2copy[free.logitP.index[i]] + 1) + etaU[free.logitP.index[i]]
epsilon[free.logitP.index[i]] ~ dnorm(mu.epsilon[free.logitP.index[i]],tauP)
logitP[free.logitP.index[i]] <- log(u2copy[free.logitP.index[i]] + 1) - etaU[free.logitP.index[i]] + epsilon[free.logitP.index[i]]
}
for(i in 1:Nfixed.logitP){ # logit P parameters are fixed so we need to force epsilon to be defined.
epsilon[fixed.logitP.index[i]] <- 0
}
##### Hyperpriors #####
## Mean and sd of log travel-times
for(i in 1:Nstrata.rel){
muLogTT[i] ~ dnorm(xiMu,tauMu)
etasdLogTT[i] ~ dnorm(xiSd,tauSd)
}
## Run size - flat priors
for(i in 1:n.b.flat){
bU[b.flat[i]] ~ dnorm(0, 1E-6)
}
## Run size - priors on the difference
for(i in 1:n.b.notflat){
xiU[b.notflat[i]] <- 2*bU[b.notflat[i]-1] - bU[b.notflat[i]-2]
bU [b.notflat[i]] ~ dnorm(xiU[b.notflat[i]],tauU)
}
tauU ~ dgamma(tauU.alpha,tauU.beta) # Notice reduction from .0005 (in thesis) to .05
sigmaU <- 1/sqrt(tauU)
taueU ~ dgamma(taueU.alpha,taueU.beta) # dgamma(100,.05) # Notice reduction from .0005 (in thesis) to .05
sigmaeU <- 1/sqrt(taueU)
## Capture probabilities covariates
for(i in 1:NlogitP.cov){
beta.logitP[i] ~ dnorm(prior.beta.logitP.mean[i], 1/prior.beta.logitP.sd[i]^2) # rest of beta terms are normal 0 and a large variance
}
beta.logitP[NlogitP.cov+1] ~ dnorm(0, .01) # dummy so that covariates of length 1 function properly
tauP ~ dgamma(tauP.alpha,tauP.beta)
sigmaP <- 1/sqrt(tauP)
## prior on the Mean and log(sd) of log travel times
xiMu ~ dnorm(0,.0625)
tauMu ~ dgamma(1,.01)
sigmaMu <- 1/sqrt(tauMu)
xiSd ~ dnorm(0,.0625)
tauSd ~ dgamma(1,.1)
sigmaSd <- 1/sqrt(tauSd)
##### Compute derived parameters #####
## Get the sd of the log(travel times)
for(i in 1:Nstrata.rel){
log(sdLogTT[i]) <- etasdLogTT[i]
}
baseMu <- xiMu # mean and sd of log(travel time) distribution
baseSd <- exp(xiSd) # for the base distribution
## Transition probabilities
for(i in 1:Nstrata.rel){
# Probability of transition in 0 days (T<1 days)
Theta[i,i] <- phi((log(1)-muLogTT[i])/sdLogTT[i])
for(j in (i+1):Nstrata.cap){
# Probability of transition in j days (j-1<T<j)
Theta[i,j] <- phi((log(j-i+1)-muLogTT[i])/sdLogTT[i])- phi((log(j-i)-muLogTT[i])/sdLogTT[i])
}
Theta[i,Nstrata.cap+1] <- 1-sum(Theta[i,i:Nstrata.cap]) # fish never seen again
}
##### Likelihood contributions #####
## marked fish ##
for(i in 1:Nstrata.rel){
# Compute cell probabilities
for(j in i:Nstrata.cap){
Pmarked[i,j] <- Theta[i,j] * p[j] + .0000001 # potential problem in Jags?
}
Pmarked[i,Nstrata.cap+1] <- 1- sum(Pmarked[i,i:Nstrata.cap])
# Likelihood contribution
m2[i,i:(Nstrata.cap+1)] ~ dmulti(Pmarked[i,i:(Nstrata.cap+1)],n1[i])
}
## Capture probabilities and run size
for(j in 1:Nstrata.cap){
logit(p[j]) <- max(-10,min(10,logitP[j])) # convert from logit scale; use limits to avoid over/underflow
U[j] <- round(exp(etaU[j])) # convert from log scale
u2[j] ~ dbin(p[j],U[j]) # capture of newly unmarked fish
}
##### Derived Parameters #####
Utot <- sum( U[1:Nstrata.cap]) # Total number of unmarked fish
Ntot <- sum(n1[1:Nstrata.rel]) + Utot # Total population size including those fish marked and released
} # end of model
", fill=TRUE)
sink() # End of saving the Bugs program
# Now to create the initial values, and the data prior to call to MCMC sampler
Nstrata.rel <- length(n1)
Nstrata.cap <- ncol(m2)-1 # remember last column of m2 has the number of fish NOT recovered
Uguess <- pmax(c((u2[1:Nstrata.rel]+1)*(n1+2)/
(apply(m2[,1:Nstrata.cap],1,sum)+1),
rep(1,Nstrata.cap-Nstrata.rel)),
(u2+1)/expit(prior.beta.logitP.mean[1])) # try and keep Uguess larger than observed values
Uguess[which(is.na(Uguess))] <- mean(Uguess,na.rm=TRUE)
# create the B-spline design matrix
# Each set of strata separated at the jump.after[i] points forms a separate spline with a separate basis
# We need to keep track of the breaks as the first two spline coefficients will have a flat
# prior and the others are then related to the previous values.
ext.jump <- c(0, jump.after, Nstrata.cap) # add the first and last breakpoints to the jump sets
SplineDesign <- matrix(0, nrow=0, ncol=0)
SplineDegree <- 3 # Degree of spline between occasions
b.flat <- NULL # index of spline coefficients with a flat prior distribution -first two of each segment
b.notflat <- NULL # index of spline coefficients where difference is modelled
all.knots <- NULL
for (i in 1:(length(ext.jump)-1)){
nstrata.in.set <- ext.jump[i+1]-ext.jump[i]
if(nstrata.in.set > 7)
{ knots <- seq(5,nstrata.in.set-1,4)/(nstrata.in.set+1) # a knot roughly every 4th stratum
} else{
knots <- .5 # a knot roughly every 4th stratum
}
all.knots <- c(all.knots, knots)
# compute the design matrix for this set of strata
z <- bs((1:nstrata.in.set)/(nstrata.in.set+1), knots=knots, degree=SplineDegree,
intercept=TRUE, Boundary.knots=c(0,1))
# first two elements of b coeffients have a flat prior
b.flat <- c(b.flat, ncol(SplineDesign)+(1:2))
b.notflat <- c(b.notflat, ncol(SplineDesign)+3:(ncol(z)))
# add to the full design matrix which is block diagonal
SplineDesign <- cbind(SplineDesign, matrix(0, nrow=nrow(SplineDesign), ncol=ncol(z)))
SplineDesign <- rbind(SplineDesign,
cbind( matrix(0,nrow=nrow(z),ncol=ncol(SplineDesign)-ncol(z)), z) )
} # end of for loop
n.b.flat <- length(b.flat)
n.b.notflat <- length(b.notflat)
n.bU <- n.b.flat + n.b.notflat
# get the logitP covariate matrix ready
logitP.cov <- as.matrix(logitP.cov)
NlogitP.cov <- ncol(as.matrix(logitP.cov))
# get the logitP's ready to allow for fixed values
logitP <- as.numeric(logitP.fixed)
storage.mode(logitP) <- "double" # if there are no fixed logits, the default class will be logical which bombs
free.logitP.index <- (1:Nstrata.cap)[ is.na(logitP.fixed)] # free values are those where NA is specifed
Nfree.logitP <- length(free.logitP.index)
fixed.logitP.index <- (1:Nstrata.cap)[!is.na(logitP.fixed)]
fixed.logitP.value <- logitP.fixed[ fixed.logitP.index]
Nfixed.logitP <- length(fixed.logitP.index)
# create copy of u2 for use in improving mixing
u2copy <- exp(stats::spline(x = 1:length(u2), y = log(u2+1), xout = 1:length(u2))$y)-1 # on log scale to avoid negative values
u2copy <- pmax(0,round(u2copy)) # round to integers
datalist <- list("Nstrata.rel", "Nstrata.cap", "n1", "m2", "u2", "u2copy",
"logitP", "Nfree.logitP", "free.logitP.index", # those indices that are fixed and free to vary
"Nfixed.logitP","fixed.logitP.index","fixed.logitP.value", # indices that are fixed and cannot vary
"logitP.cov", "NlogitP.cov",
"SplineDesign",
"b.flat", "n.b.flat", "b.notflat", "n.b.notflat", "n.bU",
"tauU.alpha", "tauU.beta", "taueU.alpha", "taueU.beta",
"prior.beta.logitP.mean", "prior.beta.logitP.sd",
"tauP.alpha", "tauP.beta")
## Generate best guess initial values
## These values are only used to draw an initial fit plot and are not
## used as initial values in MCMC.
Uguess <- pmax(c((u2[1:Nstrata.rel]+1)*(n1+2)/
(apply(m2[,1:Nstrata.cap],1,sum)+1),
rep(1,Nstrata.cap-Nstrata.rel)),
(u2+1)/expit(prior.beta.logitP.mean[1]), na.rm=TRUE) # try and keep Uguess larger than observed values
Uguess[which(is.na(Uguess))] <- mean(Uguess,na.rm=TRUE)
init.bU <- stats::lm(log(Uguess) ~ SplineDesign-1)$coefficients # initial values for spline coefficients
if(debug2) {
cat("compute init.bU \n")
browser() # Stop here to examine the spline design matrix function
}
logitPguess <- c(logit(pmax(0.05,pmin(.95,(apply(m2[,1:Nstrata.cap],1,sum,na.rm=TRUE)+1)/(n1+1))))
,rep(prior.beta.logitP.mean[1],Nstrata.cap-Nstrata.rel))
#browser()
init.beta.logitP <- as.vector(stats::lm( logitPguess ~ logitP.cov-1)$coefficients)
if(debug2) {
cat(" obtained initial values of beta.logitP\n")
browser()
}
# create an initial plot of the fit
plot.data <- data.frame(time=time,
logUguess=log(Uguess),
spline=SplineDesign %*% init.bU, stringsAsFactors=FALSE)
init.plot <- ggplot(data=plot.data, aes_(x=~time, y=~logUguess))+
ggtitle(title, subtitle="Initial spline fit to estimated log U[i]")+
geom_point()+
geom_line(aes_(y=~spline))+
xlab("Stratum")+ylab("log(U[i])")+
scale_x_continuous(breaks=seq(min(plot.data$time, na.rm=TRUE),max(plot.data$time, na.rm=TRUE),2))
if(save.output.to.files)ggsave(init.plot, filename=paste(prefix,"-initialU.pdf",sep=""), height=4, width=6, units="in")
#results$plots$plot.init <- init.plot # do this after running the MCMC chain (see end of function)
parameters <- c("logitP", "beta.logitP", "tauP", "sigmaP",
"bU", "tauU", "sigmaU",
"eU", "taueU", "sigmaeU",
"Ntot", "Utot", "logUne", "etaU", "U",
"baseMu","baseSd", # mean and sd of base log(travel time)
"Theta", # the movement probabilities
"muLogTT", "sdLogTT") # mean and sd of log(travel times)
if( any(is.na(m2))) {parameters <- c(parameters,"m2")} # monitor in case some bad data where missing values present
if( any(is.na(u2))) {parameters <- c(parameters,"u2")}
## Generate initial values
init.vals <- genInitVals(model="TSPNDE",
n1=n1,
m2=m2,
u2=u2,
logitP.cov=logitP.cov,
logitP.fixed=logitP.fixed,
SplineDesign=SplineDesign,
n.chains=n.chains)
## Generate data list
data.list <- list()
for(i in 1:length(datalist)){
data.list[[length(data.list)+1]] <-get(datalist[[i]])
}
names(data.list) <- as.list(datalist)
# Set up for the call to the MCMC sampler
results <- run.MCMC(modelFile=model.file,
dataFile=data.file,
dataList=data.list,
initFiles=init.files,
initVals=init.vals,
parameters=parameters,
nChains=n.chains,
nIter=n.iter,
nBurnin=n.burnin,
nSims=n.sims,
overRelax=FALSE,
initialSeed=InitialSeed,
working.directory=working.directory,
debug=debug)
results$plots$plot.init <- init.plot # save initial plot as well
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/TimeStratPetersenNonDiagError.R
|
## 2020-11-07 CJS Allow user to specify prior for beta parameters for covariates on logitP
# 2018-12-06 CJS converted initial plot to ggplot2
# 2018-11-25 CJS Remove all references to OpenBugs
# 2014-09-01 CJS converstion to JAGS
# - no model name
# - C(,20) -> T(,20)
# - dflat() to dnorm(0, 1E-6)
# - added u2copy to improve mixing based on Matt S. suggestion
# 2011-05-15 CJS limited etaU to 20 to prevent overflow in binomial computations
# 2011-03-09 CJS added prior to muTT (mean.muTT and sd.muTT) with defaults same a previously
# 2011-02-17 CJS limited initial Pguess to between .01 and .99 to avoid taking logit of 0 or 1
# 2011-02-17 CJS fixed initial values for theta; added as.matrix to deal with case of Delta.max=1
# 2011-01-24 SB added call to run.windows.openbugs and run.windows.winbugs
# 2010-11-25 CJS output on progress of burnin and post-burnin phases
# 2010-04-25 CJS fixed problems of init.logitP=+infinity if n1=m2=k which crapped out the lm() call
# 2010-03-11 Added fixed values of logitP[j] to be provided by user.
# 2010-03-02 SJB Created file.
#' @keywords internal
#' @importFrom stats lm spline sd
TimeStratPetersenNonDiagErrorNP <- function(title,
prefix,
time,
n1,
m2,
u2,
jump.after=NULL,
logitP.cov=as.matrix(rep(1,length(u2))),
logitP.fixed=rep(NA,length(u2)),
n.chains=3,
n.iter=200000,
n.burnin=100000,
n.sims=2000,
tauU.alpha=1,
tauU.beta=.05,
taueU.alpha=1,
taueU.beta=.05,
Delta.max,
mean.muTT=rep(0,Delta.max),
sd.muTT=rep(sqrt(.666),Delta.max),
tauTT.alpha=.1,
tauTT.beta=.1,
prior.beta.logitP.mean = c(logit(sum(m2,na.rm=TRUE)/sum(n1,na.rm=TRUE)),rep(0, ncol(as.matrix(logitP.cov))-1)),
prior.beta.logitP.sd = c(2, rep(10, ncol(as.matrix(logitP.cov))-1)),
tauP.alpha=.001,
tauP.beta=.001,
debug=FALSE,
debug2=FALSE,
InitialSeed,
save.output.to.files=TRUE){
# browser()
set.seed(InitialSeed) # set prior to initial value computations
#
# Fit the smoothed time-Stratified Petersen estimator with NON-Diagonal recoveries.
# This model allows recoveries outside the stratum of release and error in the smoothed U curve.
# The travel time model is based on the continuation ratio and makes no parametric assumptions.
#
# This routine assumes that the strata are time (e.g. weeks).
# In each stratum n1 fish are released (with marks). These are ususally
# captured fish that are marked, transported upstream, and released.
# These fish are used only to estimate the recapture rate downstream.
# Of the n1 fish released, some are recapturd in this stratum of release (column 1 of m2) or in
# subsequent strata (subsequent columns of m2). No fish are assumed to be available for capture
# outside the range of strata considered in the matrix of m2
# At the same tine, u2 other (unmarked) fish are newly captured in stratum i.
# These EXCLUDE recaptures of marked fish. These are the fish that are "expanded"
# to estimate the population size of fish in stratum i.
#
# Input
# prefix - prefix for file name for initial plot of U's
# time- the stratum number
# n1 - vector of number of fish released in stratum i
# m2 - matrix of number of fish recovered who were released in stratum i and recovered in stratum j
# u2 - vector of number of unmarked fish captured in stratum i
# jump.after - points after which the spline is allowed to jump. Specify as a list of integers in the
# range of 1:Nstrata. If jump.after[i]=k, then the spline is split between strata k and k+1
# logitP.cov - covariates for logit(P)=X beta.logitP.cov
# - specify anything you want for fixed logitP's as the covariate values are simply ignored.
# - recommend that you specify 1 for the intercept and 0's for everything else
# logitP.fixed - values for logitP that are fixed in advance. Use NA if corresponding value is not fixed,
# otherwise specify the logitP value.
# This routine makes a call to the MCMC sample to fit the model and then gets back the
# coda files for the posteriour distribution.
## Set working directory to current directory (we should allow users to select this)
working.directory <- getwd()
## Define paths for the model, data, and initial value files
model.file <- file.path(working.directory, "model.txt")
data.file <- file.path(working.directory,"data.txt")
init.files <- file.path(working.directory,
paste("inits", 1:n.chains,".txt", sep = ""))
# Save the Bugs progam to the model.txt file
#
sink(model.file) # NOTE: NO " allowed in model as this confuses the cat command
cat("
model {
# Time Stratified Petersen with NON Diagonal recapture and allowing for error in the smoothed U curve.
# Non-parametric estimateion of travel times for marked individuals.
#
# Data input:
# Nstrata.rel - number of strata where fish are releases
# Nstrata.cap - number of (future strata) where fish are recaptured.
# n1 - number of marked fish released
# m2 - number of marked fish recaptured
# This is a matrix of size Nstrata.rel x (Nstrata.cap+1)
# with entries m2[i,j] = number of fish released in i and recaptured in j
# Entries in the last column are the number of fish NEVER recaptured from those
# released
# u2 - number of unmarked fish captured (To be expanded to population).
# logitP - the recapture rates. Use NA if these are modelled, otherwise specify the logit(fixed value, e.g. -10 for 0).
# Nfree.logitP - number of free logitP parameters
# free.logitP.index - vector of length(Nfree.logitP) for the free logitP parameters
# logitP.cov - covariates for logitP
# NlogitP.cov - number of logitP covariates
# SplineDesign- spline design matrix of size [Nstrata, maxelement of n.b.notflat]
# This is set up prior to the call.
# b.flat - vector of strata indices where the prior for the b's will be flat.
# this is normally the first two of each spline segment
# n.b.flat - number of b coefficients that have a flat prior
# b.notflat- vector of strata indices where difference in coefficients is modelled
# n.b.notflat- number of b coefficients that do not have a flat prior
# tauU.alpha, tauU.beta - parameters for prior on tauU
# taueU.alpha, taueU.beta - parameters for prior on taueU
# prior.beta.logitP.mean, prior.beta.logitP.sd - parameters for prior of coefficient of covariates for logitP
# tauP.alpha, tauP.beta - parameter for prior on tauP (residual variance of logit(P)'s after adjusting for
# covariates)
#
# Parameters of the model are:
# p[i]
# logitP[i] = logit(p[i]) = logitP.cov*beta.logitP
# The beta coefficients have a prior that is N(mean= prior.beta.logitP.mean, sd= prior.beta.logitP.sd)
# U[i]
# etaU[i] = log(U[i])
# which comes from spline with parameters bU[1... Knots+q]
# + error term eU[i]
#
# muTT[j] = mean(logit(delta[i,i+j-1])), j=1,...,Delta.max
# sdTT = stats::sd(logit(delta[i,i+j-1])), j=1,....,Delta.max
# delta[i,i+j-1]=Theta[i,i+j-1]/(1-Theta[i,i]-...-Theta[i,i+j-2])
#
##### Fit the spline for the U's and specify hierarchial model for the logit(P)'s ######
for(i in 1:(Nstrata.cap)){
## Model for U's
logUne[i] <- inprod(SplineDesign[i,1:n.bU],bU[1:n.bU]) # spline design matrix * spline coeff
etaU[i] ~ dnorm(logUne[i], taueU)T(,20) # add random error but keep from getting too large
eU[i] <- etaU[i] - logUne[i]
}
for(i in 1:Nfree.logitP){ # model the free capture rates using covariates
mu.logitP[free.logitP.index[i]] <- inprod(logitP.cov[free.logitP.index[i],1:NlogitP.cov], beta.logitP[1:NlogitP.cov])
## logitP[free.logitP.index[i]] ~ dnorm(mu.logitP[free.logitP.index[i]],tauP)
mu.epsilon[free.logitP.index[i]] <- mu.logitP[free.logitP.index[i]] - log(u2copy[free.logitP.index[i]] + 1) + etaU[free.logitP.index[i]]
epsilon[free.logitP.index[i]] ~ dnorm(mu.epsilon[free.logitP.index[i]],tauP)
logitP[free.logitP.index[i]] <- log(u2copy[free.logitP.index[i]] + 1) - etaU[free.logitP.index[i]] + epsilon[free.logitP.index[i]]
}
# define the last epsilon (including the extra needed for m2)
for(i in Extra.strata.cap){
epsilon[ Nstrata.cap + i] <- 0 # forces definition of epsilon1 ...epsilon[Nstrata.cap -> Extra.strata.cap]
}
##### Priors and hyperpriors #####
## Transition probabilities -- continuation ratio model
for(i in 1:Nstrata.rel){
## delta[i,j] is the probability that a marked fish released on day i passes the second trap
## on day i+j-1 given that it does not pass the on days i,...,i+j-2. r[i,j]=logit(delta[i,j])
## is assumed to have a normal distribution with mean muTT[j] and precision tauTT.
r[i,1] ~ dnorm(muTT[1],tauTT)
logit(Theta[i,1] ) <- r[i,1]
for(j in 2:Delta.max){
r[i,j] ~ dnorm(muTT[j],tauTT)
logit(delta[i,j]) <- r[i,j]
Theta[i,j] <- delta[i,j] * (1 - sum(Theta[i,1:(j-1)]))
}
Theta[i,Delta.max+1] <- 1- sum(Theta[i,1:Delta.max])
}
# derived parameters on actual movement probabilities
logit(movep[1]) <- muTT[1]
for(j in 2:Delta.max){
movep[j] <- ilogit(muTT[j]) *(1- sum(movep[1:(j-1)]))
}
movep[Delta.max+1] <- 1- sum(movep[1:Delta.max])
# prior on the movement rates. These are specified using the make.muTT.prior function
for(j in 1:Delta.max){
muTT[j] ~ dnorm(mean.muTT[j],tau.muTT[j])
}
tauTT~ dgamma(tauTT.alpha,tauTT.beta)
sdTT <- 1/sqrt(tauTT)
## Run size - flat priors
for(i in 1:n.b.flat){
bU[b.flat[i]] ~ dnorm(0, 1E-6)
}
## Run size - priors on the difference
for(i in 1:n.b.notflat){
xiU[b.notflat[i]] <- 2*bU[b.notflat[i]-1] - bU[b.notflat[i]-2]
bU [b.notflat[i]] ~ dnorm(xiU[b.notflat[i]],tauU)
}
tauU ~ dgamma(tauU.alpha,tauU.beta) # Notice reduction from .0005 (in thesis) to .05
sigmaU <- 1/sqrt(tauU)
taueU ~ dgamma(taueU.alpha,taueU.beta) # dgamma(100,.05) # Notice reduction from .0005 (in thesis) to .05
sigmaeU <- 1/sqrt(taueU)
## Capture probabilities covariates
for(i in 1:NlogitP.cov){
beta.logitP[i] ~ dnorm(prior.beta.logitP.mean[i], 1/prior.beta.logitP.sd[i]^2) # rest of beta terms are normal 0 and a large variance
}
beta.logitP[NlogitP.cov+1] ~ dnorm(0, .01) # dummy so that covariates of length 1 function properly
tauP ~ dgamma(tauP.alpha,tauP.beta)
sigmaP <- 1/sqrt(tauP)
##### Likelihood contributions #####
## marked fish ##
for(i in 1:Nstrata.rel){
# Compute cell probabilities
for(j in 1:(Delta.max+1)){
Pmarked[i,j] <- Theta[i,j] * p[i+j-1]
}
Pmarked[i,Delta.max+2] <- 1- sum(Pmarked[i,1:(Delta.max+1)])
# Likelihood contribution
m2[i,1:(Delta.max+2)] ~ dmulti(Pmarked[i,],n1[i])
}
## Capture probabilities and run size
for(j in 1:(Nstrata.cap + Extra.strata.cap)){
logit(p[j]) <- logitP[j] # convert from logit scale
}
for(j in 1:Nstrata.cap){
U[j] <- round(exp(etaU[j])) # convert from log scale
u2[j] ~ dbin(p[j],U[j]) # capture of newly unmarked fish
}
##### Derived Parameters #####
Utot <- sum( U[1:Nstrata.cap]) # Total number of unmarked fish
Ntot <- sum(n1[1:Nstrata.rel]) + Utot # Total population size including those fish marked and released
} # end of model
", fill=TRUE)
sink() # End of saving the Bugs program
# Now to create the initial values, and the data prior to call to MCMC sampler
Nstrata.rel <- length(n1)
Nstrata.cap <- length(u2)
## Count extra columns that will have to be added to account for Delta.max
Extra.strata.cap <- max(0,Nstrata.rel + ncol(m2) - Nstrata.cap -1)
# create the B-spline design matrix
# Each set of strata separated at the jump.after[i] points forms a separate spline with a separate basis
# We need to keep track of the breaks as the first two spline coefficients will have a flat
# prior and the others are then related to the previous values.
ext.jump <- c(0, jump.after, Nstrata.cap) # add the first and last breakpoints to the jump sets
SplineDesign <- matrix(0, nrow=0, ncol=0)
SplineDegree <- 3 # Degree of spline between occasions
b.flat <- NULL # index of spline coefficients with a flat prior distribution -first two of each segment
b.notflat <- NULL # index of spline coefficients where difference is modelled
all.knots <- NULL
for (i in 1:(length(ext.jump)-1)){
nstrata.in.set <- ext.jump[i+1]-ext.jump[i]
if(nstrata.in.set > 7)
{ knots <- seq(5,nstrata.in.set-1,4)/(nstrata.in.set+1) # a knot roughly every 4th stratum
} else{
knots <- .5 # a knot roughly every 4th stratum
}
all.knots <- c(all.knots, knots)
# compute the design matrix for this set of strata
z <- bs((1:nstrata.in.set)/(nstrata.in.set+1), knots=knots, degree=SplineDegree,
intercept=TRUE, Boundary.knots=c(0,1))
# first two elements of b coeffients have a flat prior
b.flat <- c(b.flat, ncol(SplineDesign)+(1:2))
b.notflat <- c(b.notflat, ncol(SplineDesign)+3:(ncol(z)))
# add to the full design matrix which is block diagonal
SplineDesign <- cbind(SplineDesign, matrix(0, nrow=nrow(SplineDesign), ncol=ncol(z)))
SplineDesign <- rbind(SplineDesign,
cbind( matrix(0,nrow=nrow(z),ncol=ncol(SplineDesign)-ncol(z)), z) )
} # end of for loop
n.b.flat <- length(b.flat)
n.b.notflat <- length(b.notflat)
n.bU <- n.b.flat + n.b.notflat
# get the logitP covariate matrix ready
logitP.cov <- as.matrix(logitP.cov)
NlogitP.cov <- ncol(as.matrix(logitP.cov))
# get the logitP's ready to allow for fixed values
logitP <- c(as.numeric(logitP.fixed),rep(-10,Extra.strata.cap))
storage.mode(logitP) <- "double" # force the storage class to be correct if there are no fixed values
free.logitP.index <- (1:Nstrata.cap)[ is.na(logitP.fixed)] # free values are those where NA is specifed
Nfree.logitP <- length(free.logitP.index)
tau.muTT <- 1/sd.muTT**2 # convert from sd to precision = 1/variance
# make a copy of u2 to improve mixing
u2copy <- exp(stats::spline(x = 1:length(u2), y = log(u2+1), xout = 1:length(u2))$y)-1 # on log scale to avoid negative values
u2copy <- pmax(0,round(u2copy)) # round to integers
datalist <- list("Nstrata.rel", "Nstrata.cap","Extra.strata.cap",
"Delta.max","n1", "m2", "u2", "u2copy",
"logitP", "Nfree.logitP", "free.logitP.index",
"logitP.cov", "NlogitP.cov",
"SplineDesign",
"b.flat", "n.b.flat", "b.notflat", "n.b.notflat", "n.bU",
"mean.muTT", "tau.muTT", # priors on muTT
"tauTT.alpha","tauTT.beta",
"tauU.alpha", "tauU.beta", "taueU.alpha", "taueU.beta",
"prior.beta.logitP.mean", "prior.beta.logitP.sd",
"tauP.alpha", "tauP.beta")
## Generate the initial values for the parameters of the model
## 1) U and spline coefficients
Uguess <- pmax((u2+1)/expit(prior.beta.logitP.mean[1]),1) # try and keep Uguess larger than observed values
Uguess[which(is.na(Uguess))] <- mean(Uguess,na.rm=TRUE)
init.bU <- stats::lm(log(Uguess) ~ SplineDesign-1)$coefficients # initial values for spline coefficients
if(debug2) {
cat("compute init.bU \n")
browser() # Stop here to examine the spline design matrix function
}
## 2) Capture probabilities
logitPguess <- c(logit(pmin(.99,pmax(.01,(apply(m2[,1:(Delta.max+1)],1,sum)+1)/(n1+1)))),
rep(prior.beta.logitP.mean[1],Nstrata.cap-Nstrata.rel))
#browser()
init.beta.logitP <- as.vector(stats::lm( logitPguess ~ logitP.cov-1)$coefficients)
if(debug2) {
cat(" obtained initial values of beta.logitP\n")
browser()
}
# create an initial plot of the fit
plot.data <- data.frame(time=time,
logUguess=log(Uguess[1:Nstrata.cap]),
spline=SplineDesign %*% init.bU, stringsAsFactors=FALSE)
init.plot <- ggplot(data=plot.data, aes_(x=~time, y=~logUguess))+
ggtitle(title, subtitle="Initial spline fit to estimated log U[i]")+
geom_point()+
geom_line(aes_(y=~spline))+
xlab("Stratum")+ylab("log(U[i])")+
scale_x_continuous(breaks=seq(min(plot.data$time, na.rm=TRUE),max(plot.data$time,na.rm=TRUE),2))
if(save.output.to.files)ggsave(init.plot, filename=paste(prefix,"-initialU.pdf",sep=""), height=4, width=6, units="in")
#results$plots$plot.init <- init.plot # do this after running the MCMC chain (see end of function)
parameters <- c("logitP", "beta.logitP", "tauP", "sigmaP",
"bU", "tauU", "sigmaU",
"eU", "taueU", "sigmaeU",
"Ntot", "Utot", "logUne", "etaU", "U",
"muTT","sdTT","Theta","movep")
if( any(is.na(m2))) {parameters <- c(parameters,"m2")} # monitor in case some bad data where missing values present
if( any(is.na(u2))) {parameters <- c(parameters,"u2")}
init.vals <- genInitVals("TSPNDENP",
n1,m2,u2,
Delta.max=Delta.max,
logitP.cov=logitP.cov,
logitP.fixed=logitP.fixed,
SplineDesign=SplineDesign,
n.chains=n.chains)
#browser()
## Generate data list
data.list <- list()
for(i in 1:length(datalist)){
data.list[[length(data.list)+1]] <-get(datalist[[i]])
}
names(data.list) <- as.list(datalist)
# Set up for the call to the MCMC sampler
results <- run.MCMC(modelFile=model.file,
dataFile=data.file,
dataList=data.list,
initFiles=init.files,
initVals=init.vals,
parameters=parameters,
nChains=n.chains,
nIter=n.iter,
nBurnin=n.burnin,
nSims=n.sims,
overRelax=FALSE,
initialSeed=InitialSeed,
working.directory=working.directory,
debug=debug)
results$plots$plot.init <- init.plot # save initial plot to results object
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/TimeStratPetersenNonDiagErrorNP.R
|
## 2020-11-07 CJS Allow user to specify prior for beta parameters for covariates on logitP
# 2018-12-23 CJS added movep to BUGS code
# 2018-11-28 CJS removed referece to OpenBugs
# 2014-09-01 CJS conversion to JAGS
# - no model name
# - C(,20) -> T(,20)
# - dflat() to dnorm(0, 1E-6)
# - added u2copy to improve mixing based on Matt S. suggestion
# 2011-05-15 CJS Limited etaU to max of 20
# 2011-02-28 CJS First version
#' @keywords internal
#' @importFrom stats lm spline var sd
TimeStratPetersenNonDiagErrorNPMarkAvail <- function(title,
prefix,
time,
n1,
m2,
u2,
jump.after=NULL,
logitP.cov=as.matrix(rep(1,length(u2))),
logitP.fixed=rep(NA,length(u2)),
ma.p.alpha,
ma.p.beta,
n.chains=3,
n.iter=200000,
n.burnin=100000,
n.sims=2000,
tauU.alpha=1,
tauU.beta=.05,
taueU.alpha=1,
taueU.beta=.05,
Delta.max,
tauTT.alpha=.1,
tauTT.beta=.1,
prior.beta.logitP.mean = c(logit(sum(m2,na.rm=TRUE)/sum(n1,na.rm=TRUE)),rep(0, ncol(as.matrix(logitP.cov))-1)),
prior.beta.logitP.sd = c(2, rep(10, ncol(as.matrix(logitP.cov))-1)),
tauP.alpha=.001,
tauP.beta=.001,
debug=FALSE,
debug2=FALSE,
InitialSeed,
save.output.to.files=TRUE){
set.seed(InitialSeed) # set prior to initial value computations
#
# Fit the smoothed time-Stratified Petersen estimator with NON-Diagonal recoveries and less than 100%
# marked fish available for recapture.
# This model allows recoveries outside the stratum of release and error in the smoothed U curve.
# The travel time model is based on the continuation ratio and makes no parametric assumptions.
# It allows for only a fraction of marked fish to be available based on prior information
# on the marked availability rate (ma.p).
#
# This routine assumes that the strata are time (e.g. weeks).
# In each stratum n1 fish are released (with marks). These are ususally
# captured fish that are marked, transported upstream, and released.
# These fish are used only to estimate the recapture rate downstream.
# Not all of the marked fish are available for subsequent recapture. Only a fraction ma.p
# are available. This lack of availability could be because of fall back, handling mortality, etc.
# Of the n1 fish released and available, some are recapturd in this stratum of release (column 1 of m2) or in
# subsequent strata (subsequent columns of m2). No fish are assumed to be available for capture
# outside the range of strata considered in the matrix of m2
# At the same tine, u2 other (unmarked) fish are newly captured in stratum i.
# These EXCLUDE recaptures of marked fish. These are the fish that are "expanded"
# to estimate the population size of fish in stratum i.
#
# Input
# prefix - prefix for file name for initial plot of U's
# time- the stratum number
# n1 - vector of number of fish released in stratum i
# m2 - matrix of number of fish recovered who were released in stratum i and recovered in stratum j
# u2 - vector of number of unmarked fish captured in stratum i
# jump.after - points after which the spline is allowed to jump. Specify as a list of integers in the
# range of 1:Nstrata. If jump.after[i]=k, then the spline is split between strata k and k+1
# logitP.cov - covariates for logit(P)=X beta.logitP.cov
# - specify anything you want for fixed logitP's as the covariate values are simply ignored.
# - recommend that you specify 1 for the intercept and 0's for everything else
# logitP.fixed - values for logitP that are fixed in advance. Use NA if corresponding value is not fixed,
# otherwise specify the logitP value.
# ma.p.alpha, ma.p.beta - information on mark available. Assumed to be prior beta(ma.p.alpha, ma.p.beta)
# This routine makes a call to the MCMC sampler to fit the model and then gets back the
# coda files for the posteriour distribution.
## Set working directory to current directory (we should allow users to select this)
working.directory <- getwd()
## Define paths for the model, data, and initial value files
model.file <- file.path(working.directory, "model.txt")
data.file <- file.path(working.directory,"data.txt")
init.files <- file.path(working.directory,
paste("inits", 1:n.chains,".txt", sep = ""))
# Save the Bugs progam to the model.txt file
#
sink(model.file) # NOTE: NO " allowed in model as this confuses the cat command
cat("
model {
# Time Stratified Petersen with NON Diagonal recapture and allowing for error in the smoothed U curve.
# Non-parametric estimateion of travel times for marked individuals.
#
# Data input:
# Nstrata.rel - number of strata where fish are releases
# Nstrata.cap - number of (future strata) where fish are recaptured.
# n1 - number of marked fish released
# m2 - number of marked fish recaptured
# This is a matrix of size Nstrata.rel x (Nstrata.cap+1)
# with entries m2[i,j] = number of fish released in i and recaptured in j
# Entries in the last column are the number of fish NEVER recaptured from those
# released
# u2 - number of unmarked fish captured (To be expanded to population).
# logitP - the recapture rates. Use NA if these are modelled, otherwise specify the logit(fixed value, e.g. -10 for 0).
# Nfree.logitP - number of free logitP parameters
# free.logitP.index - vector of length(Nfree.logitP) for the free logitP parameters
# logitP.cov - covariates for logitP
# NlogitP.cov - number of logitP covariates
# ma.p.alpha, ma.p.beta - prior beta(alpha,beta) on mark availability
# SplineDesign- spline design matrix of size [Nstrata, maxelement of n.b.notflat]
# This is set up prior to the call.
# b.flat - vector of strata indices where the prior for the b's will be flat.
# this is normally the first two of each spline segment
# n.b.flat - number of b coefficients that have a flat prior
# b.notflat- vector of strata indices where difference in coefficients is modelled
# n.b.notflat- number of b coefficients that do not have a flat prior
# tauU.alpha, tauU.beta - parameters for prior on tauU
# taueU.alpha, taueU.beta - parameters for prior on taueU
# prior.beta.logitP.mean, prior.beta.logitP.sd - parameters for prior of coefficient of covariates for logitP
# tauP.alpha, tauP.beta - parameter for prior on tauP (residual variance of logit(P)'s after adjusting for
# covariates)
# xiMu, tauMu - mean and precision (1/variance) for prior on mean(log travel-times)
# siSd, tauSd - mean and precision (1/variance) for prior on sd(log travel times)
#
# Parameters of the model are:
# p[i]
# logitP[i] = logit(p[i]) = logitP.cov*beta.logitP
# The beta coefficients have a prior that is N(mean= prior.beta.logitP.mean, sd= prior.beta.logitP.sd)
# U[i]
# etaU[i] = log(U[i])
# which comes from spline with parameters bU[1... Knots+q]
# + error term eU[i]
#
# muTT[j] = mean(logit(delta[i,i+j-1])), j=1,...,Delta.max
# sdTT = stats::sd(logit(delta[i,i+j-1])), j=1,....,Delta.max
# delta[i,i+j-1]=Theta[i,i+j-1]/(1-Theta[i,i]-...-Theta[i,i+j-2])
#
##### Fit the spline for the U's and specify hierarchial model for the logit(P)'s ######
for(i in 1:(Nstrata.cap)){
## Model for U's
logUne[i] <- inprod(SplineDesign[i,1:n.bU],bU[1:n.bU]) # spline design matrix * spline coeff
etaU[i] ~ dnorm(logUne[i], taueU)T(,20) # add random error
eU[i] <- etaU[i] - logUne[i]
}
for(i in 1:Nfree.logitP){ # model the free capture rates using covariates
mu.logitP[free.logitP.index[i]] <- inprod(logitP.cov[free.logitP.index[i],1:NlogitP.cov], beta.logitP[1:NlogitP.cov])
logitP[free.logitP.index[i]] ~ dnorm(mu.logitP[free.logitP.index[i]],tauP)
}
##### Priors and hyperpriors #####
##### Prior information on mark availability #####
## There is no other information in the actual study to update the ma.p other than the prior
## information.
ma.p ~ dbeta(ma.p.alpha, ma.p.beta)
## Transition probabilities -- continuation ratio model
for(i in 1:Nstrata.rel){
## delta[i,j] is the probability that a marked fish released on day i passes the second trap
## on day i+j-1 given that it does not pass the on days i,...,i+j-2. r[i,j]=logit(delta[i,j])
## is assumed to have a normal distribution with mean muTT[j] and precision tauTT.
r[i,1] ~ dnorm(muTT[1],tauTT)
logit(Theta[i,1] ) <- r[i,1]
for(j in 2:Delta.max){
r[i,j] ~ dnorm(muTT[j],tauTT)
logit(delta[i,j]) <- r[i,j]
Theta[i,j] <- delta[i,j] * (1 - sum(Theta[i,1:(j-1)]))
}
Theta[i,Delta.max+1] <- 1- sum(Theta[i,1:Delta.max])
}
for(j in 1:Delta.max){
muTT[j] ~ dnorm(0,.666)
}
tauTT~ dgamma(tauTT.alpha,tauTT.beta)
sdTT <- 1/sqrt(tauTT)
## Run size - flat priors
for(i in 1:n.b.flat){
bU[b.flat[i]] ~ dnorm(0, 1E-6)
}
## Run size - priors on the difference
for(i in 1:n.b.notflat){
xiU[b.notflat[i]] <- 2*bU[b.notflat[i]-1] - bU[b.notflat[i]-2]
bU [b.notflat[i]] ~ dnorm(xiU[b.notflat[i]],tauU)
}
tauU ~ dgamma(tauU.alpha,tauU.beta) # Notice reduction from .0005 (in thesis) to .05
sigmaU <- 1/sqrt(tauU)
taueU ~ dgamma(taueU.alpha,taueU.beta) # dgamma(100,.05) # Notice reduction from .0005 (in thesis) to .05
sigmaeU <- 1/sqrt(taueU)
## Capture probabilities covariates
for(i in 1:NlogitP.cov){
beta.logitP[i] ~ dnorm(prior.beta.logitP.mean[i], 1/prior.beta.logitP.sd[i]^2) # rest of beta terms are normal 0 and a large variance
}
beta.logitP[NlogitP.cov+1] ~ dnorm(0, .01) # dummy so that covariates of length 1 function properly
tauP ~ dgamma(tauP.alpha,tauP.beta)
sigmaP <- 1/sqrt(tauP)
# derived parameters on actual movement probabilities
logit(movep[1]) <- muTT[1]
for(j in 2:Delta.max){
movep[j] <- ilogit(muTT[j]) *(1- sum(movep[1:(j-1)]))
}
movep[Delta.max+1] <- 1- sum(movep[1:Delta.max])
##### Likelihood contributions #####
## marked fish ##
for(i in 1:Nstrata.rel){
# Compute cell probabilities
for(j in 1:(Delta.max+1)){
Pmarked[i,j] <- Theta[i,j] * p[i+j-1] * ma.p # adjust for availability
}
Pmarked[i,Delta.max+2] <- 1- sum(Pmarked[i,1:(Delta.max+1)])
# Likelihood contribution
m2[i,1:(Delta.max+2)] ~ dmulti(Pmarked[i,],n1[i])
}
## Capture probabilities and run size
for(j in 1:(Nstrata.cap + Extra.strata.cap)){
logit(p[j]) <- logitP[j] # convert from logit scale
}
for(j in 1:Nstrata.cap){
U[j] <- round(exp(etaU[j])) # convert from log scale
u2[j] ~ dbin(p[j],U[j]) # capture of newly unmarked fish
}
##### Derived Parameters #####
Utot <- sum( U[1:Nstrata.cap]) # Total number of unmarked fish
n1.avail ~ dbin( ma.p, sum(n1[1:Nstrata.rel]))
Ntot <- n1.avail + Utot # Total population size including those fish marked and released but excluding fallback
} # end of model
", fill=TRUE)
sink() # End of saving the Bugs program
# Now to create the initial values, and the data prior to call to the MCMC sampler
Nstrata.rel <- length(n1)
Nstrata.cap <- length(u2)
## Count extra columns that will have to be added to account for Delta.max
Extra.strata.cap <- max(0,Nstrata.rel + ncol(m2) - Nstrata.cap -1)
# create the B-spline design matrix
# Each set of strata separated at the jump.after[i] points forms a separate spline with a separate basis
# We need to keep track of the breaks as the first two spline coefficients will have a flat
# prior and the others are then related to the previous values.
ext.jump <- c(0, jump.after, Nstrata.cap) # add the first and last breakpoints to the jump sets
SplineDesign <- matrix(0, nrow=0, ncol=0)
SplineDegree <- 3 # Degree of spline between occasions
b.flat <- NULL # index of spline coefficients with a flat prior distribution -first two of each segment
b.notflat <- NULL # index of spline coefficients where difference is modelled
all.knots <- NULL
for (i in 1:(length(ext.jump)-1)){
nstrata.in.set <- ext.jump[i+1]-ext.jump[i]
if(nstrata.in.set > 7)
{ knots <- seq(5,nstrata.in.set-1,4)/(nstrata.in.set+1) # a knot roughly every 4th stratum
} else{
knots <- .5 # a knot roughly every 4th stratum
}
all.knots <- c(all.knots, knots)
# compute the design matrix for this set of strata
z <- bs((1:nstrata.in.set)/(nstrata.in.set+1), knots=knots, degree=SplineDegree,
intercept=TRUE, Boundary.knots=c(0,1))
# first two elements of b coeffients have a flat prior
b.flat <- c(b.flat, ncol(SplineDesign)+(1:2))
b.notflat <- c(b.notflat, ncol(SplineDesign)+3:(ncol(z)))
# add to the full design matrix which is block diagonal
SplineDesign <- cbind(SplineDesign, matrix(0, nrow=nrow(SplineDesign), ncol=ncol(z)))
SplineDesign <- rbind(SplineDesign,
cbind( matrix(0,nrow=nrow(z),ncol=ncol(SplineDesign)-ncol(z)), z) )
} # end of for loop
n.b.flat <- length(b.flat)
n.b.notflat <- length(b.notflat)
n.bU <- n.b.flat + n.b.notflat
# get the logitP covariate matrix ready
logitP.cov <- as.matrix(logitP.cov)
NlogitP.cov <- ncol(as.matrix(logitP.cov))
# get the logitP's ready to allow for fixed values
logitP <- c(as.numeric(logitP.fixed),rep(-10,Extra.strata.cap))
storage.mode(logitP) <- "double" # force the storage class to be correct if there are no fixed values
free.logitP.index <- (1:Nstrata.cap)[ is.na(logitP.fixed)] # free values are those where NA is specifed
Nfree.logitP <- length(free.logitP.index)
# make a copy of u2 to improve mixing (not yet implemented)
#u2copy <- stats::spline(x=1:length(u2), y=u2, xout=1:length(u2))$y
#u2copy <- exp(stats::spline(x = 1:length(u2), y = log(u2+1), xout = 1:length(u2))$y)-1 # on log scale to avoid negative values
#u2copy <- pmax(0,round(u2copy)) # round to integers
datalist <- list("Nstrata.rel", "Nstrata.cap","Extra.strata.cap",
"Delta.max","n1", "m2", "u2", # "u2copy", # u2copy not yet implemented
"logitP", "Nfree.logitP", "free.logitP.index",
"logitP.cov", "NlogitP.cov",
"ma.p.alpha","ma.p.beta",
"SplineDesign",
"b.flat", "n.b.flat", "b.notflat", "n.b.notflat", "n.bU",
"tauTT.alpha","tauTT.beta",
"tauU.alpha", "tauU.beta", "taueU.alpha", "taueU.beta",
"prior.beta.logitP.mean", "prior.beta.logitP.sd",
"tauP.alpha", "tauP.beta")
## Generate the initial values for the parameters of the model
## 1) U and spline coefficients
Uguess <- pmax((u2+1)/expit(prior.beta.logitP.mean[1]),1) # try and keep Uguess larger than observed values
Uguess[which(is.na(Uguess))] <- mean(Uguess,na.rm=TRUE)
init.bU <- stats::lm(log(Uguess) ~ SplineDesign-1)$coefficients # initial values for spline coefficients
if(debug2) {
cat("compute init.bU \n")
browser() # Stop here to examine the spline design matrix function
}
## 2) Capture probabilities
logitPguess <- c(logit(pmin(.99,pmax(.01,(apply(m2[,1:(Delta.max+1)],1,sum)+1)/(n1+1)))),
rep(prior.beta.logitP.mean[1],Nstrata.cap-Nstrata.rel))
#browser()
init.beta.logitP <- as.vector(stats::lm( logitPguess ~ logitP.cov-1)$coefficients)
if(debug2) {
cat(" obtained initial values of beta.logitP\n")
browser()
}
## 3) marked availability
ma.p.guess <- ma.p.alpha/(ma.p.alpha+ma.p.beta)
# create an initial plot of the fit
plot.data <- data.frame(time=time,
logUguess=log(Uguess[1:Nstrata.cap]),
spline=SplineDesign %*% init.bU, stringsAsFactors=FALSE)
init.plot <- ggplot(data=plot.data, aes_(x=~time, y=~logUguess))+
ggtitle(title, subtitle="Initial spline fit to estimated log U[i]")+
geom_point()+
geom_line(aes_(y=~spline))+
xlab("Stratum")+ylab("log(U[i])")+
scale_x_continuous(breaks=seq(min(plot.data$time, na.rm=TRUE),max(plot.data$time, na.rm=TRUE),2))
if(save.output.to.files)ggsave(init.plot, filename=paste(prefix,"-initialU.pdf",sep=""), height=4, width=6, units="in")
#results$plots$plot.init <- init.plot # do this after running the MCMC chain (see end of function)
parameters <- c("logitP", "beta.logitP", "tauP", "sigmaP",
"bU", "tauU", "sigmaU",
"eU", "taueU", "sigmaeU",
"Ntot", "Utot", "logUne", "etaU", "U",
"muTT","sdTT","Theta","ma.p", "movep")
if( any(is.na(m2))) {parameters <- c(parameters,"m2")} # monitor in case some bad data where missing values present
if( any(is.na(u2))) {parameters <- c(parameters,"u2")}
init.vals <- function(){
init.logitP <- c(logit((apply(m2[,1:(Delta.max+1)],1,sum)+1)/(n1+1)),
rep(prior.beta.logitP.mean[1],Nstrata.cap-Nstrata.rel)) # initial capture rates based on observed recaptures
init.logitP <- pmin(10,pmax(-10,init.logitP))
init.logitP[is.na(init.logitP)] <- -2 # those cases where initial probability is unknown
init.logitP[!is.na(logitP.fixed)] <- NA # no need to initialize the fixed values
init.beta.logitP <- as.vector(stats::lm( init.logitP ~ logitP.cov-1)$coefficients)
init.beta.logitP[is.na(init.beta.logitP)] <- 0
init.beta.logitP <- c(init.beta.logitP, 0) # add one extra element so that single beta is still written as a
# vector in the init files etc.
init.logitP <- c(init.logitP,rep(NA,Extra.strata.cap)) # Add values for extra capture probabilities
init.tauP <- 1/stats::var(init.logitP, na.rm=TRUE) # 1/variance of logit(p)'s (ignoring the covariates for now)
init.bU <- stats::lm(log(Uguess) ~ SplineDesign-1)$coefficients # initial values for spline coefficients
init.eU <- as.vector(log(Uguess)-SplineDesign%*%init.bU) # error terms set as differ between obs and pred
init.etaU <- log(Uguess)
# variance of spline difference
sigmaU <- stats::sd( init.bU[b.notflat]-2*init.bU[b.notflat-1]+init.bU[b.notflat-2], na.rm=TRUE)
init.tauU <- 1/sigmaU^2
# variance of error in the U' over and above the spline fit
sigmaeU <- stats::sd(init.eU, na.rm=TRUE)
init.taueU <- 1/sigmaeU^2
# initialize the u2 where missing
init.u2 <- u2
init.u2[ is.na(u2)] <- 100
init.u2[!is.na(u2)] <- NA
## Transition probabilities
init.Theta <- t(sapply(1:Nstrata.rel,function(i){
if(all(is.na(m2[i,])) || sum(m2[i,])==0)
return(rep(NA,Delta.max+1))
else{
thetatmp <- pmax(.01,pmin(m2[i,-(Delta.max+2)]/sum(m2[i,-(Delta.max+2)],na.rm=TRUE),.99,na.rm=TRUE)) # CJS 2011-02-16
return(thetatmp/sum(thetatmp))
}
}))
# cat('Initial values')
# browser()
init.delta <- as.matrix(apply(init.Theta[,-(Delta.max+1),drop=FALSE],1, # CJS 2019-04-24 dealing with delta.max=1
function(theta){ # CJS fixed -(Delta.max+1)
if(length(theta) == 1){theta}
else {theta/(1-c(0,cumsum(theta[-Delta.max])))}
}))
if(nrow(init.delta)==Delta.max){init.delta <- t(init.delta)}
## mean and standard deviation of transition probabilties
init.muTT <- apply(logit(init.delta),2,mean,na.rm=TRUE)
init.sdTT <- stats::sd(as.vector(t(logit(init.delta)))-init.muTT,na.rm=TRUE)
## ma.p
init.ma.p <- ma.p.alpha/(ma.p.alpha+ma.p.beta)
#browser()
list(logitP=init.logitP, beta.logitP=init.beta.logitP, tauP=init.tauP,
bU=init.bU, tauU=init.tauU, taueU=init.taueU, etaU=init.etaU,
muTT=init.muTT, tauTT=1/init.sdTT^2,r=logit(init.delta), ma.p=init.ma.p)
}
#browser()
## Generate data list
data.list <- list()
for(i in 1:length(datalist)){
data.list[[length(data.list)+1]] <-get(datalist[[i]])
}
names(data.list) <- as.list(datalist)
## Generate the initial values and put into a list
# make a list of initial values
init.vals.list <- lapply(1:n.chains, function(x){init.vals()})
# Call the MCMC sampler
results <- run.MCMC(modelFile=model.file,
dataFile=data.file,
dataList=data.list,
initFiles=init.files,
initVals=init.vals.list,
parameters=parameters,
nChains=n.chains,
nIter=n.iter,
nBurnin=n.burnin,
nSims=n.sims,
overRelax=FALSE,
initialSeed=InitialSeed,
working.directory=working.directory,
debug=debug)
results$plots$plot.init <- init.plot # save initial plot to results object
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/TimeStratPetersenNonDiagErrorNPMarkAvail.R
|
## 2021-10-23 CJS Added trunc.logitP to deal with extreme values of logitP during plotting
## 2020-12-15 CJS removed sampfrac from body of code
## 2020-12-15 CJS Fixed problem when specifyin u2==NA
## 2020-11-07 CJS Allowed user to specify prior for beta coefficient for logitP
## 2018-12-22 CJS add code to estimate mean movement vector (movep)
## 2018-12-19 CJS sampling fraction deprecated
## 2018-12-14 CJS bayesian p-value plots added
## 2018-12-06 CJS convert report to textConnections
## 2018-12-02 CJS convert trace plots to ggplot
## 2018-12-01 CJS converted acf, posterior plots to ggplot
## 2018-11-30 CJS Fixed problem of epsilon not being right length
## 2018-11-29 CJS Fixed problem of printing large results
## 2018-11-28 CJS remove reference to OpenBugs
## 2014-09-01 CJS conversion to jags
## 2012-08-30 CJS fixed problem in any() and all() in error checking with NAs
## 2011-02-21 CJS changed u2 to new.u2 in code for expanded.m2
## 2011-02-19 CJS First development
#' Wrapper (*_fit) to call the function to fit a Time Stratified Petersen Estimator
#' with NON Diagonal Entries with an non-parametric travel time and fall back
#'
#' Takes the number of marked fish released, the number of recaptures, and the
#' number of unmarked fish and uses Bayesian methods to fit a fit a spline
#' through the population numbers and a hierarchical model for the trap
#' efficiencies over time. The output is written to files and an MCMC object
#' is also created with samples from the posterior.
#'
#' Normally the user makes a call to the *_fit function which then calls the
#' fitting function.
#'
#' Use the \code{\link{TimeStratPetersenDiagError_fit}} function for cases
#' where recaptures take place ONLY in the stratum of release, i.e. the
#' diagonal case.
#'
#' The non-diagonal case fits a log-normal distribution for the travel time.
#' The *NP functions fit a non-parametric distribution for the travel times.
#' The *MarkAvail functions extend the *NP functions to allow for reductions in
#' mark availability because of fall back, immediate tagging mortality, etc.
#'
#' @template title
#' @template prefix
#' @template time
#' @template n1
#' @param m2 A numeric matrix of the number of fish released in stratum [i] and
#' recovered in [j-1] strata later. For example m2[3,5] is the number of
#' marked fish released in stratum 3 and recovered 4 strata later in stratum 7.
#' The first column is the number of marked fish recovered in the stratum of
#' release, i.e. 0 strata later. Use the
#' \code{\link{TimeStratPetersenDiagError_fit}} function for cases where
#' recaptures take place ONLY in the stratum of release, i.e. the diagonal
#' case.
#' @template u2.ND
#' @template sampfrac
#' @template jump.after
#' @template bad.n1
#' @template bad.m2
#' @template bad.u2
#' @template logitP.cov
#' @template param.logitP.fixed
#' @param marked_available_n Information, usually from prior studies, on the
#' fraction of marks that will be available. The *_n and *_x are used to create
#' a "binomial" distribution for information on the marked availability. For
#' example, if *_n=66 and *_x=40, then you estimate that about 40/66=61\% of marks
#' are available and 39\% have dropped out or fallen back.
#' @param marked_available_x See marked_available_n
#' @template mcmc-parms
#' @template tauU.alpha.beta
#' @template taueU.alpha.beta
#' @template Delta.max
#' @template tauTT.alpha.beta
#' @template prior.beta.logitP.mean.sd
#' @template tauP.alpha.beta
#' @template run.prob
#' @template debug
#' @template InitialSeed
#' @template save.output.to.files
#' @template trunc.logitP
#'
#' @return An MCMC object with samples from the posterior distribution. A
#' series of graphs and text file are also created in the working directory.
#' @template author
#' @template references
#' @keywords ~models ~smooth
#' @examples
#'
#' ##---- See the vignettes for examples of how to use this package
#'
#' @export TimeStratPetersenNonDiagError_fit
#' @importFrom stats runif var sd
#' @export TimeStratPetersenNonDiagErrorNPMarkAvail_fit
TimeStratPetersenNonDiagErrorNPMarkAvail_fit<- function( title="TSPNDENP-avail", prefix="TSPNDENP-avail-",
time, n1, m2, u2, sampfrac=rep(1,length(u2)), jump.after=NULL,
bad.n1=c(), bad.m2=c(), bad.u2=c(),
logitP.cov=rep(1,length(u2)),
logitP.fixed=NULL, logitP.fixed.values=NULL,
marked_available_n, marked_available_x,
n.chains=3, n.iter=200000, n.burnin=100000, n.sims=2000,
tauU.alpha=1, tauU.beta=.05, taueU.alpha=1, taueU.beta=.05,
prior.beta.logitP.mean = c(logit(sum(m2,na.rm=TRUE)/sum(n1,na.rm=TRUE)),rep(0, ncol(as.matrix(logitP.cov))-1)),
prior.beta.logitP.sd = c(2, rep(10, ncol(as.matrix(logitP.cov))-1)),
tauP.alpha=.001, tauP.beta=.001,
Delta.max=NULL,tauTT.alpha=.1,tauTT.beta=.1,
run.prob=seq(0,1,.1), # what percentiles of run timing are wanted
debug=FALSE, debug2=FALSE,
InitialSeed=ceiling(stats::runif(1,min=0, max=1000000)),
save.output.to.files=TRUE,
trunc.logitP=15) {
## Fit a Time Stratified Petersen model with NON-diagonal entries and with smoothing on U allowing for random error
## and fall back after tagging. This is based on the Skeena River study, where only 40/66 (60%) acoustically tagged fish
## were observed above the canyon spot and hence 40% of tagged fish never migrated forward of their tagging release spot.
## This reduces the number of tagged fish available for recapture and so, if not accounted for, leads to
## positive biases in the estimates of abundance.
## This is the classical stratified Petersen model where the recoveries can take place for this and multiple
## strata later. Transisions of marked fish are modelled non-parametrically.
##
version <- '2021-11-02'
options(width=200)
## Input parameters are
## title - title for the analysis (character string)
## prefix - prefix used for files created with the analysis results
## this should be in standard Window's format, eg. JC-2002-ST-TSPNDE
## to which is appended various suffixes for plots etc (character string)
## time - vector of stratum numbers. For example, 9:38 would indicate that the
## Trinity River system sampled weeks 9 to 38.
## These values are never used in the analysis and only serve as labels for the weeks and for plotting purposes.
## They should be contiguous equally spaced values and be the same length as u2.
## n1, m2, u2 - the input data consisting of fish marked and released, recapture, and unmarked captured
## Note that m2 is a MATRIX. The first column are those fish recaptured in the stratum of release
## and the subsequent columns are those recoveries in later strata.
## This is expanded to the full matrix [i,j] for released released in stratum i and recovered in stratum j
## The vector u2 should be long enough to account for any fish that are recaptured later on
## from releases late in the season. The bottom right diagonal of m2 may be all zeros - that is ok
## Notice that length(u2) can be longer than length(n1)+nrow(m2).
## sampfrac - Deprecated. DO NOT USE.
## jump.after - in some cases, a single spline is still not flexible enough to cope with rapid
## changes in the run curve. For example, in the Trinity River project, a larger
## hatchery release occurs around stratum 14. This is a vector indicating the
## strata AFTER which the spline curve is allowed to jump.
## null or vector of arbitrary length.
## bad.n1 - list of stratum numbers where the value of n1 is suspect.
## bad.m2 - list of stratum numbers where the value of m2 is suspect.
## For example, the capture rate could be extremely low.
## These are set to NA prior to the call to JAGS
## bad.u2 - list of stratum numbers where the value of u2 is suspect.
## logitP.cov - matrix of covariates for logit(P). If the strata times are "missing" some values, an intercept is assumed
## for the first element of the covariance matrix and 0 for the rest of the covariates.
## CAUTION - this MAY not be what you want to do. It is likely best to enter ALL strata
## if you have any covariates. The default, if not specified, is a constant (the mean logit)
## marked_available_n, marked_available_x Information on the movement forward rate. Treat this a binomial data
## which will be applied uniformly over all released. For example, use *_n=60 and *_x=40 to represent
## data from the telemetry study that had 40/60 tagged fish move forward. You can vary the precision of the
## estimate of the marked_availability_fraction by chaning the _n and _x values to create the illusion
## of better and worse information on the availability value.
##
## tauU.alpha, tauU.beta - parameters for the prior on variance in spline coefficients
## taueU.alpha, taueU.beta - parameters for the prior on variance in log(U) around fitted spline
# prior.beta.logitP.mean, prior.beta.logitP.sd - parameters for the prior on mean logit(P)'s [The intercept term]
# The other covariates are assigned priors of a mean of 0 and a sd of 30
## tauP.alpha, tauP.beta - parameters for the prior on 1/var of residual error in logit(P)'s
## Delta.max - maximum transition time for marked fish
## tauTT.alpha, tauTT.beta - parameters of the prior on 1/var of logit continuation ratio for travel times
## run.prob - percentiles of run timing wanted
## debug - if TRUE, then this is a test run with very small MCMC chains run to test out the data
# force input vectors to be vectors. Note that m2 is NOT a vector
time <- as.vector(time)
n1 <- as.vector(n1)
u2 <- as.vector(u2)
sampfrac <- as.vector(sampfrac)
## Do some basic error checking
## 1. Check that length of n1, m2, u2, sampfrac, time are consistent with each other.
## In the non-diagonal case, they don't have to match
if(length(n1)!=nrow(m2))
stop("***** ERROR ***** Length of n1 and number of rows of m2 must be equal. They are:",
length(n1)," ",nrow(u2),"\n")
if(!is.numeric(n1)){
cat("***** ERROR ***** n1 must be numeric. You have:",
paste(n1,collapse=", "),"\n")
return()}
if(any(is.na(n1))){
cat("***** ERROR ***** All values of n1 must not be missing. You have: ",
paste(n1,collapse=", "),"\n")
return()}
if(any(n1 < 0, na.rm=TRUE)){
cat("***** ERROR ***** All values of n1 must be non-negative. You have: ",
paste(n1,collapse=", "),"\n")
return()}
if(stats::var(c(length(u2),length(sampfrac),length(time)))>0)
stop("***** ERROR ***** Lengths of u2, sampfrac, time must all be equal. They are:",
length(u2),' ',length(sampfrac),' ',length(time),"\n")
if(length(logitP.cov) %% length(u2) != 0)
stop("***** ERROR ***** Dimension of covariate vector doesn't match length of u2. They are:",
length(u2),' ',length(logitP.cov),' ',dim(logitP.cov),"\n")
## 2. Check that rowsum of m2<= n1
if(any(apply(m2,1,sum, na.rm=TRUE)>n1))
stop("***** ERROR ***** m2[i,+] must be <= n1[i]. The arguments are \n n1:",paste(n1,collapse=","),
"\n m2:",paste(m2,collapse=","),"\n")
## 3. Elements of bad.m2 and jump.after must belong to time
if(!all(bad.n1 %in% time, na.rm=TRUE))
stop("***** ERROR ***** bad.n1 must be elements of strata identifiers. You entered \n bad.n1:",
paste(bad.n1,collapse=","),"\n Strata identifiers are \n time:",
paste(time,collapse=","),"\n")
if(!all(bad.m2 %in% time, na.rm=TRUE))
stop("***** ERROR ***** bad.m2 must be elements of strata identifiers. You entered \n bad.m2:",
paste(bad.m2,collapse=","),"\n Strata identifiers are \n time:",
paste(time,collapse=","),"\n")
if(!all(bad.u2 %in% time, na.rm=TRUE))
stop("***** ERROR ***** bad.u2 must be elements of strata identifiers. You entered \n bad.u2:",
paste(bad.u2,collapse=","),"\n Strata identifiers are \n time:",
paste(time,collapse=","), "\n")
if(!all(jump.after %in% time, na.rm=TRUE))
stop("***** ERROR ***** jump.after must be elements of strata identifiers. You entered \n jump.after:",
paste(jump.after,collapse=","),"\n Strata identifiers are \n time:",
paste(time,collapse=","), "\n")
# 4. check that index of logitP.fixed belong to time
if(!all(logitP.fixed %in% time, na.rm=TRUE)){
cat("***** ERROR ***** logitP.fixed must be elements of strata identifiers. You entered \n logitP.fixed:",
paste(logitP.fixed,collapse=","),"\n Strata identifiers are \n time:",
paste(time,collapse=","), "\n")
return()}
if(length(logitP.fixed)!=length(logitP.fixed.values)){
cat("***** ERROR ***** Lengths of logitP.fixed and logitP.fixed.values must all be equal. They are:",
length(logitP.fixed),length(logitP.fixed.values),"\n")
return()}
# 5. Check that some basic information on marked availability is given
if( is.na(marked_available_n) | is.na(marked_available_x) | marked_available_x > marked_available_n){
cat("***** ERROR ***** Bad marked_availability values. You entered:",marked_available_n," ",marked_available_x,"\n")
return()}
#7 check that the length of u2
if(length(u2) < length(n1) | length(u2) > (length(n1)+ ncol(m2)-1)){
cat("***** ERROR ***** Length(u2) must between length(n1) and length(n1)+ncol(m2) \n")
return()
}
# Check that that the prior.beta.logitP.mean and prior.beta.logitP.sd length=number of columns of covariates
logitP.cov <- as.matrix(logitP.cov)
if(!is.vector(prior.beta.logitP.mean) | !is.vector(prior.beta.logitP.sd)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be vectors")
}
if(!is.numeric(prior.beta.logitP.mean) | !is.numeric(prior.beta.logitP.sd)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be numeric")
}
if(length(prior.beta.logitP.mean) != ncol(logitP.cov) | length(prior.beta.logitP.sd) != ncol(logitP.cov)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be same length as number columns in covariate matrix")
}
# Deprecation of sampling fraction.
if(any(sampfrac != 1)){
cat("***** ERROR ***** Sampling fraction is deprecated for any values other than 1. DO NOT USE ANYMORE. ")
return()
}
## Define maximum travel time if not supplied by user
if(is.null(Delta.max)) Delta.max <- ncol(m2)-1
## Define output filename
results.filename <- paste(prefix,"-results.txt",sep="")
## Open sink to output file
stdout <- vector('character')
report <- textConnection('stdout', 'wr', local = TRUE)
sink(report)
cat(paste("Time Stratified Petersen with Non-Diagonal recaptures, error in smoothed U, non-parametric modelling of travel times, and incorporating mark availability- ", date()))
cat("\nVersion: ", version)
cat("\n\n", title, "Results \n\n")
## m2(i,+) < n1(i)
cat("*** Raw data *** (padded to match length of u2) \n")
jump.indicator <- rep(' ', length(u2))
jump.indicator[time %in% jump.after]<- '***'
ex.n1 <- c(n1, rep(NA, length(u2)-length(n1)))
ex.m2 <- rbind(m2,matrix(NA, nrow=length(u2)-length(n1), ncol=ncol(m2)))
temp<- data.frame(time=time, n1=ex.n1, m2=ex.m2, u2=u2, logitP.cov=logitP.cov, jump=jump.indicator)
print(temp)
cat("\n\n")
cat("*** Marked Availability prior information *** \n")
cat(" Set marked available n=", marked_available_n," with x=",marked_available_x,"\n\n\n")
## Print information about jump points
cat("Jump point are after strata: ", jump.after)
if(length(jump.after)==0) cat("none - A single spline is fit")
## Print information about delta max
cat("\nMaximum travel time (Delta.max): ",Delta.max)
cat("\nFixed logitP indices are: ", logitP.fixed)
if(length(logitP.fixed)==0) cat("none - NO fixed values")
cat("\nFixed logitP values are: ", logitP.fixed.values)
if(length(logitP.fixed)==0) cat("none - NO fixed values")
## Obtain the Pooled Petersen estimator prior to fixup of bad.n1, bad.m2, and bad.u2 values
cat("\n\n*** Pooled Petersen Estimate prior to fixing bad n1, m2, or u2 values CHECK - CHECK - CHECK - CHECK ***\n\n")
cat(" *** NOT ADJUSTED FOR MARK AVAILABILITY/Dropout/Fallback ***\n")
temp.n1 <- n1
temp.m2 <- m2
temp.u2 <- u2
cat("Total n1=", sum(temp.n1,na.rm=TRUE),"; m2=",sum(temp.m2,na.rm=TRUE),"; u2=",sum(temp.u2,na.rm=TRUE),"\n\n")
pp <- SimplePetersen(sum(temp.n1,na.rm=TRUE), sum(temp.m2,na.rm=TRUE), sum(temp.u2,na.rm=TRUE))
cat("Est U(total) not adjusted for fallback ", format(round(pp$U.est),big.mark=","),
" (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("Est N(total) not adjusted for fallback ", format(round(pp$N.est),big.mark=","),
" (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
# adjustment for dropout
dr <- 1-marked_available_x/marked_available_n # dropout probability
se_dr <- sqrt(dr*(1-dr)/marked_available_n)
cat("\n\nAdjusting for fallback/dropout \n")
cat("Estimated dropout is", dr, "with se of ", se_dr, "\n")
# adjust the petersen estimator for drop out including the uncertainty in the dropout probability
pp.adj <- pp
pp.adj$N.est <- pp.adj$N.est * (1-dr)
pp.adj$N.se <- sqrt(pp$N.se^2 * se_dr^2+
pp$N.se^2 * (1-dr)^2 +
pp$N.est^2 * se_dr^2)
pp.adj$U.est <- pp.adj$U.est * (1-dr)
pp.adj$U.se <- sqrt(pp$U.se^2 * se_dr^2+
pp$U.se^2 * (1-dr)^2 +
pp$U.est^2 * se_dr^2)
cat("Est U(total) adjusting for dropout is ", format(round(pp.adj$U.est),big.mark=","),
" (SE ", format(round(pp.adj$U.se), big.mark=","), ")\n")
cat("Est N(total) adjusting for dropout is ", format(round(pp.adj$N.est),big.mark=","),
" (SE ", format(round(pp.adj$N.se), big.mark=","), ")\n\n\n")
## Test if pooling can be done
cat("*** Test if pooled Petersen is allowable. [Check if fraction captured equal] ***\n\n")
select <- (n1>0) & (!is.na(n1)) & (!is.na(apply(m2,1,sum)))
temp.n1 <- n1[select]
temp.m2 <- m2[select,]
test <- TestIfPool( temp.n1, apply(temp.m2,1,sum))
cat("(Large Sample) Chi-square test statistic ", test$chi$statistic," has p-value", test$chi$p.value,"\n\n")
temp <- cbind(time[1:length(n1)][select],test$chi$observed, round(test$chi$expected,1), round(test$chi$residuals^2,1))
colnames(temp) <- c('time','n1-m2*','m2*','E[n1-m2]','E[m2]','X2[n1-m2]','X2[m2]')
print(temp)
cat("\n Be cautious of using this test in cases of small expected values. \n\n")
## Adjust the data for the explicity bad values or other problems
new.time <- time
new.n1 <- n1
new.m2 <- m2
new.u2 <- u2
new.logitP.cov <- logitP.cov
## Set the bad values to missing
new.n1[time[1:length(n1)] %in% c(bad.n1, bad.m2)] <- 0
new.m2[time[1:length(n1)] %in% c(bad.m2, bad.n1)] <- 0
new.u2[time %in% bad.u2] <- NA
## Print out the revised data
cat("\n\n*** Revised data *** \n")
jump.indicator <- rep(' ', length(u2))
jump.indicator[time %in% jump.after]<- '***'
ex.n1 <- c(new.n1, rep(NA, length(new.u2)-length(new.n1)))
ex.m2 <- rbind(new.m2,matrix(NA, nrow=length(new.u2)-length(new.n1), ncol=ncol(new.m2)))
temp<- data.frame(time=new.time, n1=ex.n1, m2=ex.m2, u2=new.u2, logitP.cov=new.logitP.cov,
jump.after=jump.indicator)
print(temp)
cat("\n\n")
cat("*** Marked Availability prior information *** \n")
cat(" Set marked available n=", marked_available_n," with x=",marked_available_x,"\n\n\n")
## The NP analysis does not need the expanded m2 array, but this is
## needed late on. So, we'd better compute it here. The last column
## of this matrix will be the number of individuals from each
## stratum that are not recaptured.
##
expanded.m2 <- matrix(0, nrow=length(new.n1), ncol=length(new.u2)+1)
for(i in 1:length(new.n1)){
expanded.m2[i,1:length(new.u2)] <- c(rep(0,i-1),new.m2[i,],rep(0,length(new.u2)))[1:length(new.u2)]
expanded.m2[i,length(new.u2)+1] <- new.n1[i] - sum(new.m2[i,])
}
cat("*** Expanded m2 array with column sum and u2 ***\n\n")
save.max.print <- getOption("max.print")
options(max.print=.Machine$integer.max)
temp <- rbind(expanded.m2, apply(expanded.m2,2,sum, na.rm=TRUE))
rownames(temp)[nrow(temp)] <- 'Column totals'
temp <- rbind(temp, c(u2, rep(NA, ncol(expanded.m2)-length(u2)) ))
rownames(temp)[nrow(temp)] <- "Untagged (u2)"
temp <- rbind(temp, c(new.u2, rep(NA, ncol(expanded.m2)-length(new.u2)) ))
rownames(temp)[nrow(temp)] <- "Untagged - after fixups"
new.logitP.fixed <- rep(NA, length(new.u2))
new.logitP.fixed[match(logitP.fixed, time)] <- logitP.fixed.values
temp <- rbind(temp, c(new.logitP.fixed, rep(NA, ncol(expanded.m2)-length(new.u2)) ))
rownames(temp)[nrow(temp)] <- "Logit P fixed"
rownames(temp)[1:length(n1)] <- 1:length(n1)
print(temp)
options(max.print=save.max.print)
sink()
# some further checking on u2. Make sure that every columns where there are recoveries has a u2
# browser()
if( (length(u2)+1) <= (ncol(temp)-1)) {
if(any( temp["Column totals", (length(u2)+1):(ncol(temp)-1)] >0)){
cat("***** ERROR ***** Non-zero recoveries and u2 not available at end of experiment??? \n Check above matrix\n")
return()
}
}
sink(report, append=TRUE)
# assign the logitP fixed values etc.
new.logitP.fixed <- rep(NA, length(new.u2))
new.logitP.fixed[match(logitP.fixed, time)] <- logitP.fixed.values
## We do need to add the column of not recaptured counts to the m2
## array.
new.m2 <- cbind(new.m2,new.n1-apply(new.m2,1,sum))
## We construct a prior probability on the P(marks available) based on the information provided
## by assuming a beta prior that would give the binomial results
ma.p.alpha <- marked_available_x
ma.p.beta <- marked_available_n - marked_available_x
## Print out information on the prior distributions used
cat("\n\n*** Information on priors *** \n")
## 0) ma.p = p(marked_availability for subsequent recapture)
cat(" P(marked fish available for subsequent recapture) has beta(",ma.p.alpha,ma.p.beta,") which corresponds \n",
" to a mean of ", round(ma.p.alpha/(ma.p.alpha+ma.p.beta),2),' and sd of ',
round(sqrt(ma.p.alpha*ma.p.beta/(ma.p.alpha+ma.p.beta+1)/(ma.p.alpha+ma.p.beta)**2),3),"\n")
## 1) tauU = (variance of spline coefficients)^-1
cat(" Parameters for prior on tauU (variance in spline coefficients): ", tauU.alpha, tauU.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(tauU.alpha/tauU.beta,2),round(sqrt(tauU.alpha/tauU.beta^2),2),"\n")
## 2) taueU = (variance of errors)^-1
cat(" Parameters for prior on taueU (variance of log(U) about spline): ",taueU.alpha, taueU.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(taueU.alpha/taueU.beta,2),round(sqrt(taueU.alpha/taueU.beta^2),2),"\n")
## 3) prior.beta.logitP = priors for coefficients of covariates for logitP
cat(" Parameters for prior on beta.logitP[1] (intercept) (mean, sd): \n", cbind(round(prior.beta.logitP.mean,3), round(prior.beta.logitP.sd,5)),"\n")
## 4) tauP = (variance of capture probabilites conditional on covariates)^-1
cat(" Parameters for prior on tauP (residual variance of logit(P) after adjusting for covariates): ",tauP.alpha, tauP.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(tauP.alpha/tauP.beta,2),round(sqrt(tauP.alpha/tauP.beta^2),2),"\n")
## 5) tauTT = (variance of continuation ratios for theta)^-1
cat(" Parameters for prior on tauTT (variance of continuation rations for travel times): ",tauTT.alpha, tauTT.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(tauTT.alpha/tauTT.beta,2),round(sqrt(tauTT.alpha/tauTT.beta^2),2),"\n")
cat("\n\nInitial seed for this run is: ",InitialSeed, "\n")
sink()
if (debug2) {
cat("\nprior to formal call to TimeStratPetersenNonDiagError\n")
browser()
}
if (debug)
{results <- TimeStratPetersenNonDiagErrorNPMarkAvail(
title=title, prefix=prefix,
time=new.time, n1=new.n1, m2=new.m2, u2=new.u2,
jump.after=(1:length(u2))[time %in% jump.after],
logitP.cov=new.logitP.cov, logitP.fixed=new.logitP.fixed,
ma.p.alpha, ma.p.beta,
n.chains=3, n.iter=10000, n.burnin=5000, n.sims=500, # set to small values for debugging only
prior.beta.logitP.mean=prior.beta.logitP.mean,
prior.beta.logitP.sd =prior.beta.logitP.sd,
tauU.alpha=tauU.alpha, tauU.beta=tauU.beta,
taueU.alpha=taueU.alpha, taueU.beta=taueU.beta,
Delta.max=Delta.max,tauTT.alpha=tauTT.alpha,tauTT.beta=tauTT.beta,
debug=debug, debug2=debug2, InitialSeed=InitialSeed,
save.output.to.files=save.output.to.files)
} else #notice R syntax requires { before the else
{results <- TimeStratPetersenNonDiagErrorNPMarkAvail(
title=title, prefix=prefix,
time=new.time, n1=new.n1, m2=new.m2, u2=new.u2,
jump.after=(1:length(u2))[time %in% jump.after],
logitP.cov=new.logitP.cov, logitP.fixed=new.logitP.fixed,
ma.p.alpha, ma.p.beta,
n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.sims=n.sims,
prior.beta.logitP.mean=prior.beta.logitP.mean,
prior.beta.logitP.sd =prior.beta.logitP.sd,
tauU.alpha=tauU.alpha, tauU.beta=tauU.beta,
taueU.alpha=taueU.alpha, taueU.beta=taueU.beta,
Delta.max=Delta.max,tauTT.alpha=tauTT.alpha,tauTT.beta=tauTT.beta,
debug=debug, debug2=debug2, InitialSeed=InitialSeed,
save.output.to.files=save.output.to.files)
}
results$PP$using.all.data <-pp
results$PP$using.all.data.fallback <- pp.adj
results$dr <- data.frame(est=dr, se=se_dr)
## Now to create the various summary tables of the results
Nstrata.rel <- length(n1)
Nstrata.cap <- ncol(expanded.m2) -1 ## don't forget that last column of m2 is number of fish never seen
# A plot of the observered log(U) on the log scale, and the final mean log(U)
plot.df <- data.frame(time =new.time)
plot.df$logUi <-log( c((new.u2[1:Nstrata.rel]+1)*(new.n1+2)/(apply(expanded.m2[,1:Nstrata.cap],1,sum)+1), rep(NA, length(u2)-Nstrata.rel)))
# extract the fitted U values
results.row.names <- rownames(results$summary)
etaU.row.index <- grep("etaU", results.row.names)
etaU<- results$summary[etaU.row.index,]
plot.df$logU = etaU[,"mean"]
plot.df$logUlcl = etaU[,"2.5%"]
plot.df$logUucl = etaU[,"97.5%"]
# extract the spline values
logUne.row.index <- grep("logUne", results.row.names)
logUne<- results$summary[logUne.row.index,"mean"]
plot.df$spline <- results$summary[logUne.row.index,"mean"]
# add limits to the plot to avoid non-monotone secondary axis problems with extreme values
plot.df$logUi <- pmax(-10 , pmin(20, plot.df$logUi))
plot.df$logU <- pmax(-10 , pmin(20, plot.df$logU ))
plot.df$logUlcl <- pmax(-10 , pmin(20, plot.df$logUlcl ))
plot.df$logUucl <- pmax(-10 , pmin(20, plot.df$logUucl ))
plot.df$spline <- pmax(-10 , pmin(20, plot.df$spline))
fit.plot <- ggplot(data=plot.df, aes_(x=~time))+
ggtitle(title, subtitle="Fitted spline curve with 95% credible intervals for estimated log(U[i])")+
geom_point(aes_(y=~logUi), color="red", shape=1)+ # open circle
xlab("Time Index\nOpen/closed circles - initial and final estimates")+
ylab("log(U[i]) + 95% credible interval")+
geom_point(aes_(y=~logU), color="black", shape=19)+
geom_line (aes_(y=~logU), color="black")+
geom_errorbar(aes_(ymin=~logUlcl, ymax=~logUucl), width=.1)+
geom_line(aes_(y=~spline),linetype="dashed")+
scale_x_continuous(breaks=seq(min(plot.df$time,na.rm=TRUE),max(plot.df$time, na.rm=TRUE),2))+
scale_y_continuous(sec.axis = sec_axis(~ exp(.), name="U + 95% credible interval",
breaks=c(1,10,20,50,
100,200,500,
1000,2000,5000,
10000,20000, 50000,
100000,200000, 500000,
1000000,2000000,5000000,10000000),
labels = scales::comma))
if(save.output.to.files)ggsave(plot=fit.plot, filename=paste(prefix,"-fit.pdf",sep=""), height=6, width=10, units="in")
results$plots$fit.plot <- fit.plot
## acf plot
logitP.plot <- plot_logitP(title=title, time=new.time, n1=new.n1, m2=expanded.m2, u2=new.u2,
logitP.cov=new.logitP.cov, results=results,
trunc.logitP=trunc.logitP)
if(save.output.to.files)ggsave(plot=logitP.plot, filename=paste(prefix,"-logitP.pdf",sep=""), height=6, width=10, units="in")
results$plots$logitP.plot <- logitP.plot
## Look at autocorrelation function for Utot
mcmc.sample <- data.frame(parm="Utot", sample=results$sims.matrix[,"Utot"], stringsAsFactors=FALSE)
acf.Utot.plot <- plot_acf(mcmc.sample)
if(save.output.to.files)ggsave(plot=acf.Utot.plot, filename=paste(prefix,"-Utot-acf.pdf",sep=""), height=4, width=6, units="in")
results$plots$acf.Utot.plot <- acf.Utot.plot
## Look at the shape of the posterior distribution browser()
mcmc.sample1 <- data.frame(parm="Utot", sample=results$sims.matrix[,"Utot"], stringsAsFactors=FALSE)
mcmc.sample2 <- data.frame(parm="Ntot", sample=results$sims.matrix[,"Ntot"], stringsAsFactors=FALSE)
mcmc.sample <- rbind(mcmc.sample1, mcmc.sample2)
post.UNtot.plot <- plot_posterior(mcmc.sample)
post.UNtot.plot
if(save.output.to.files)ggsave(plot=post.UNtot.plot, filename=paste(prefix,"-UNtot-posterior.pdf",sep=""),
height=ifelse(length(unique(mcmc.sample$parm))<=2,4,6), width=6, units="in")
results$plots$post.UNtot.plot <- post.UNtot.plot
## Bayesian P-values
discrep <-PredictivePosterior.TSPNDENPMarkAvail(new.n1, expanded.m2, new.u2,
new.logitP.fixed,
expit(results$sims.list$logitP),
round(results$sims.list$U),
results$sims.list$Theta,
results$sims.list$ma.p,
Delta.max)
gof <- PredictivePosteriorPlot.TSPNDE (discrep)
if(save.output.to.files)ggsave(gof[[1]],filename=paste(prefix,"-GOF.pdf",sep=""), height=8, width=8, units="in", dpi=300 )
results$plots$gof <- gof
# create traceplots of logU, U, and logitP (along with R value) to look for non-convergence
# the plot_trace will return a list of plots (one for each page as needed)
varnames <- names(results$sims.array[1,1,]) # extract the names of the variables
# Trace plots of logitP
trace.plot <- plot_trace(title=title, results=results, parms_to_plot=varnames[grep("^logitP", varnames)])
if(save.output.to.files){
pdf(file=paste(prefix,"-trace-logitP.pdf",sep=""))
plyr::l_ply(trace.plot, function(x){plot(x)})
dev.off()
}
results$plots$trace.logitP.plot <- trace.plot
# now for the traceplots of logU (etaU), Utot, and Ntot
trace.plot <- plot_trace(title=title, results=results, parms_to_plot=varnames[c(grep("Utot",varnames), grep("Ntot",varnames), grep("^etaU", varnames))])
if(save.output.to.files){
pdf(file=paste(prefix,"-trace-logU.pdf",sep=""))
plyr::l_ply(trace.plot, function(x){plot(x)})
dev.off()
}
results$plots$trace.logU.plot <- trace.plot
sink(report, append=TRUE)
## Global summary of results
cat("\n\n*** Summary of MCMC results *** \n\n")
save.max.print <- getOption("max.print")
options(max.print=.Machine$integer.max)
print(results, digits.summary=3)#, max=.Machine$integer.max)
options(max.print=save.max.print)
cat("\n\n*** Alternate DIC computation based on p_D = var(deviance)/2 \n")
results.row.names <- rownames(results$summary)
deviance.row.index<- grep("deviance", results.row.names)
deviance <- results$summary[deviance.row.index,]
p.D <- deviance["sd"]^2/2
dic <- deviance["mean"]+p.D
cat(" D-bar: ", deviance["mean"],"; var(dev): ", deviance["sd"]^2,
"; p.D: ", p.D, "; DIC: ", dic)
## Summary of population sizes
cat("\n\n\n\n*** Summary of Unmarked Population Size ***\n")
temp<- results$summary[ grep("Utot", rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
cat("\n\n*** Summary of Total Population Size *** \n")
temp<- results$summary[ grep("Ntot", rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
cat("\n\n\n\n*** Summary of Quantiles of Run Timing *** \n")
cat( " This is based on the sample weeks provided and the U[i] values \n")
q <- RunTime(time=time, U=results$sims.list$U, prob=run.prob)
temp <- rbind(apply(q,2,mean), apply(q,2,sd))
rownames(temp) <- c("Mean", "Sd")
print(round(temp,2))
# Add the runtiming to the output object
results$runTime <- temp
cat("\n\n")
cat(paste("*** end of fit *** ", date()))
sink()
# save the report to a files?
if(save.output.to.files)writeLines(stdout, results.filename)
results$report <- stdout
## add some of the raw data to the bugs object for simplicity in referencing it later
results$data <- list( time=time, n1=n1, m2=m2, u2=u2,
jump.after=jump.after,
bad.n1=bad.n1, bad.m2=bad.m2, bad.u2=bad.u2,
logitP.cov=logitP.cov,
version=version, date_run=date(),title=title)
return(results)
} ## end of function
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/TimeStratPetersenNonDiagErrorNPMarkAvail_fit.R
|
## 2021-10-23 CJS Added trunc.logitP to avoid plotting problems with extreme values of logitP
## 2020-12-15 CJS Removed sampfrac from code
## 2020-12-15 CJS Fixed problems when u2 is set to missing
## 2020-11-07 CJS Allowed user to specify prior for beta coefficient for logitP
## 2018-12-18 CJS deprecation of sampling fraction
## 2018-12-06 CJS converted report to textConnection
## 2018-12-03 CJS converted fit plot to ggplot
## 2018-12-02 CJS converted traceplots to ggplot
## 2018-12-01 CJS created posterior plots using ggplot
## 2018-11-30 CJS created acf plot using ggplot
## 2018-11-30 CJS Fixed problem of epsilon not being right length
## 2018-11-28 CJS Fixed problem of printing results getting cutoff if too large
## 2018-11-27 CJS Added explicit library refrences
## 2018-11-25 CJS Removed all references to OpenBugs
## 2015-06-10 CJS gof converted to ggplot()
## 2014-09-01 CJS conversion to jags
## 2012-08-30 CJS fixed errors with any() and all() with NAs in error checking
## 2012-02-15 CJS fixed plotting limits on log(U) plot
## 2011-06-13 CJS added bayesian p-values to results
## 2011-03-15 CJS fixed code to do error checking
## 2011-03-09 CJS added priors for movement parameters (muTT)
## 2010-11-21 CJS fixed some code in expanded.m2 where used u2 rather than new.u2
## 2010-11-25 CJS pretty printing of final population estimates
## 2010-09-06 CJS forced input vectors to be vectors
## 2010-08-06 CJS create trace plots of logitP and logU
## 2010-08-04 CJS added version/date to final result
## 2010-03-21 Added notification of Delta.max value.
## 2010-03-12 CJS added n.chains etc to argument list; added optional call for debugging purposes
## 2010-03-03 SJB Created File
#' Wrapper (*_fit) to fit the Time Stratified Petersen Estimator
#' with NON Diagonal Entries function and a non-parametric travel time estimator..
#'
#' Takes the number of marked fish released, the number of recaptures, and the
#' number of unmarked fish and uses Bayesian methods to fit a fit a spline
#' through the population numbers and a hierarchical model for the trap
#' efficiencies over time. The output is written to files and an MCMC object
#' is also created with samples from the posterior.
#'
#' Normally the user makes a call to the *_fit function which then calls the
#' fitting function.
#'
#' Use the \code{\link{TimeStratPetersenDiagError_fit}} function for cases
#' where recaptures take place ONLY in the stratum of release, i.e. the
#' diagonal case.
#'
#' The *NP functions fit a non-parametric distribution for the travel times.
#'
#' @template title
#' @template prefix
#' @template time
#' @template n1
#' @param m2 A numeric matrix of the number of fish released in stratum [i] and
#' recovered in [j-1] strata later. For example m2[3,5] is the number of
#' marked fish released in stratum 3 and recovered 4 strata later in stratum 7.
#' The first column is the number of marked fish recovered in the stratum of
#' release, i.e. 0 strata later. Use the
#' \code{\link{TimeStratPetersenDiagError_fit}} function for cases where
#' recaptures take place ONLY in the stratum of release, i.e. the diagonal
#' case.
#' @template u2.ND
#' @template sampfrac
#' @template jump.after
#' @template bad.n1
#' @template bad.m2
#' @template bad.u2
#' @template logitP.cov
#' @template param.logitP.fixed
#' @template mcmc-parms
#' @template tauU.alpha.beta
#' @template taueU.alpha.beta
#' @template Delta.max
#' @template tauTT.alpha.beta
#' @template prior.beta.logitP.mean.sd
#' @param prior.muTT - prior for movement rates.
#' These are like a Dirchelet type prior
#' where x are values representing belief in the travel times.
#' For example, x=c(1,4,3,2) represents a system where the
#' maximum travel time is 3 strata after release with
#' 1/10=.1 of the animals moving in the stratum of release
#' 4/10=.4 of the animals taking 1 stratum to move etc
#' So if x=c(10,40,30,20), this represent the same movement pattern
#' but a strong degree of belief
#' @template tauP.alpha.beta
#' @template run.prob
#' @template debug
#' @template InitialSeed
#' @template save.output.to.files
#' @template trunc.logitP
#'
#' @return An MCMC object with samples from the posterior distribution. A
#' series of graphs and text file are also created in the working directory.
#' @template author
#' @template references
#' @keywords ~models ~smooth
#' @examples
#'
#' ##---- See the vignette for examples of how to use this package
#' ##
#'
#' @export TimeStratPetersenNonDiagErrorNP_fit
#' @importFrom stats runif var sd
TimeStratPetersenNonDiagErrorNP_fit<- function( title="TSPNDENP", prefix="TSPNDENP-",
time, n1, m2, u2, sampfrac=rep(1,length(u2)), jump.after=NULL,
bad.n1=c(), bad.m2=c(), bad.u2=c(),
logitP.cov=rep(1,length(u2)),
logitP.fixed=NULL, logitP.fixed.values=NULL,
n.chains=3, n.iter=200000, n.burnin=100000, n.sims=2000,
tauU.alpha=1, tauU.beta=.05, taueU.alpha=1, taueU.beta=.05,
prior.beta.logitP.mean = c(logit(sum(m2,na.rm=TRUE)/sum(n1,na.rm=TRUE)),rep(0, ncol(as.matrix(logitP.cov))-1)),
prior.beta.logitP.sd = c(2, rep(10, ncol(as.matrix(logitP.cov))-1)),
tauP.alpha=.001, tauP.beta=.001,
Delta.max=NULL,
prior.muTT=NULL, # prior on movement rates
tauTT.alpha=.1,tauTT.beta=.1,
run.prob=seq(0,1,.1), # what percentiles of run timing are wanted
debug=FALSE, debug2=FALSE,
InitialSeed=ceiling(stats::runif(1,min=0,1000000)),
save.output.to.files=TRUE,
trunc.logitP=15) {
## Fit a Time Stratified Petersen model with NON-diagonal entries and with smoothing on U allowing for random error
## This is the classical stratified Petersen model where the recoveries can take place for this and multiple
## strata later. Transisions of marked fish are modelled non-parametrically.
##
version <- '2021-11-02'
options(width=200)
## Input parameters are
## title - title for the analysis (character string)
## prefix - prefix used for files created with the analysis results
## this should be in standard Window's format, eg. JC-2002-ST-TSPNDE
## to which is appended various suffixes for plots etc (character string)
## time - vector of stratum numbers. For example, 9:38 would indicate that the
## Trinity River system sampled weeks 9 to 38.
## These values are never used in the analysis and only serve as labels for the weeks and for plotting purposes.
## They should be contiguous equally spaced values and be the same length as u2.
## n1, m2, u2 - the input data consisting of fish marked and released, recapture, and unmarked captured
## Note that m2 is a MATRIX. The first column are those fish recaptured in the stratum of release
## and the subsequent columns are those recoveries in later strata.
## This is expanded to the full matrix [i,j] for released released in stratum i and recovered in stratum j
## The vector u2 should be long enough to account for any fish that are recaptured later on
## from releases late in the season. The bottom right diagonal of m2 may be all zeros - that is ok
## Notice that length(u2) can be longer than length(n1)+nrow(m2).
## sampfrac - Deprecated. Do not use anymore.
## jump.after - in some cases, a single spline is still not flexible enough to cope with rapid
## changes in the run curve. For example, in the Trinity River project, a larger
## hatchery release occurs around stratum 14. This is a vector indicating the
## strata AFTER which the spline curve is allowed to jump.
## null or vector of arbitrary length.
## bad.n1 - list of stratum numbers where the value of n1 is suspect.
## bad.m2 - list of stratum numbers where the value of m2 is suspect.
## For example, the capture rate could be extremely low.
## These are set to NA prior to the call to JAGS
## bad.u2 - list of stratum numbers where the value of u2 is suspect.
## logitP.cov - matrix of covariates for logit(P). If the strata times are "missing" some values, an intercept is assumed
## for the first element of the covariance matrix and 0 for the rest of the covariates.
## CAUTION - this MAY not be what you want to do. It is likely best to enter ALL strata
## if you have any covariates. The default, if not specified, is a constant (the mean logit)
## prior.muTT - prior for movement rates.
## These are like a Dirchelet type prior
## where x are values representing belief in the travel times.
## For example, x=c(1,4,3,2) represents a system where the
## maximum travel time is 3 strata after release with
## 1/10=.1 of the animals moving in the stratum of release
## 4/10=.4 of the animals taking 1 stratum to move
## etc
## So if x=c(10,40,30,20), this represent the same movement pattern
## but a strong degree of belief
## tauU.alpha, tauU.beta - parameters for the prior on variance in spline coefficients
## taueU.alpha, taueU.beta - parameters for the prior on variance in log(U) around fitted spline
# prior.beta.logitP.mean, prior.beta.logitP.sd - parameters for the prior on mean logit(P)'s [The intercept term]
# The other covariates are assigned priors of a mean of 0 and a sd of 30
## tauP.alpha, tauP.beta - parameters for the prior on 1/var of residual error in logit(P)'s
## Delta.max - maximum transition time for marked fish
## tauTT.alpha, tauTT.beta - parameters of the prior on 1/var of logit continuation ratio for travel times
## run.prob - percentiles of run timing wanted
## debug - if TRUE, then this is a test run with very small MCMC chains run to test out the data
## and JAGS will run and stop waiting for your to exit and complete
# force input vectors to be vectors. Note that m2 is NOT a vector
time <- as.vector(time)
n1 <- as.vector(n1)
u2 <- as.vector(u2)
sampfrac <- as.vector(sampfrac)
## Do some basic error checking
## 1. Check that length of n1, m2, u2, sampfrac, time are consistent with each other.
## In the non-diagonal case, they don't have to match
if(length(n1)!=nrow(m2)){
cat("***** ERROR ***** Length of n1 and number of rows of m2 must be equal. They are:",
length(n1)," ",nrow(u2),"\n")
return()}
if(!is.numeric(n1)){
cat("***** ERROR ***** n1 must be numeric. You have:",
paste(n1,collapse=", "),"\n")
return()}
if(any(is.na(n1))){
cat("***** ERROR ***** All values of n1 must not be missing. You have: ",
paste(n1,collapse=", "),"\n")
return()}
if(any(n1 < 0, na.rm=TRUE)){
cat("***** ERROR ***** All values of n1 must be non-negative. You have: ",
paste(n1,collapse=", "),"\n")
return()}
if(stats::var(c(length(u2),length(sampfrac),length(time)))>0){
cat("***** ERROR ***** Lengths of u2, sampfrac, time must all be equal. They are:",
length(u2)," ",length(sampfrac)," ",length(time),"\n")
return()}
if(length(logitP.cov) %% length(u2) != 0){
cat("***** ERROR ***** Dimension of covariate vector doesn't match length of u2. They are:",
length(u2)," ",length(logitP.cov)," ",dim(logitP.cov),"\n")
return()}
## 2. Check that rowsum of m2<= n1
if(any(apply(m2,1,sum, na.rm=TRUE)>n1)){
cat("***** ERROR ***** Row sum of m2 (m2[i,+], number of marks returned, must be <= n1[i]=number released.\n",
"The number released are \n n1:",paste(n1,collapse=","),
"\n The total number of recoveries is m2[2,+]:",paste(apply(m2,1,sum,na.rm=TRUE),collapse=","),"\n")
return()}
## 3. Elements of bad.m2 and jump.after must belong to time
if(!all(bad.n1 %in% time,na.rm=TRUE)){
cat("***** ERROR ***** bad.n1 must be elements of strata identifiers. You entered \n bad.n1:",
paste(bad.n1,collapse=","),
"\n Strata identifiers are \n time:",paste(time,collapse=","), "\n")
return()}
if(!all(bad.m2 %in% time,na.rm=TRUE)){
cat("***** ERROR ***** bad.m2 must be elements of strata identifiers. You entered \n bad.m2:",
paste(bad.m2,collapse=","),
"\n Strata identifiers are \n time:",paste(time,collapse=","), "\n")
return()}
if(!all(bad.u2 %in% time,na.rm=TRUE)){
cat("***** ERROR ***** bad.u2 must be elements of strata identifiers. You entered \n bad.u2:",paste(bad.u2,collapse=","),
"\n Strata identifiers are \n time:",paste(time,collapse=","), "\n")
return()}
if(!all(jump.after %in% time,na.rm=TRUE)){
cat("***** ERROR ***** jump.after must be elements of strata identifiers. You entered \n jump.after:",paste(jump.after,collapse=","),
"\n Strata identifiers are \n time:",paste(time,collapse=","), "\n")
return()}
# 5. check that index of logitP.fixed belong to time
if(!all(logitP.fixed %in% time,na.rm=TRUE)){
cat("***** ERROR ***** logitP.fixed must be elements of strata identifiers. You entered \n logitP.fixed:",paste(logitP.fixed,collapse=","),
"\n Strata identifiers are \n time:",paste(time,collapse=","), "\n")
return()}
if(length(logitP.fixed)!=length(logitP.fixed.values)){
cat("***** ERROR ***** Lengths of logitP.fixed and logitP.fixed.values must all be equal. They are:",
length(logitP.fixed)," ",length(logitP.fixed.values),"\n")
return()}
# 6. check that prior for muTT is correct length
if(!is.null(prior.muTT) & length(prior.muTT) != ncol(m2)){
cat("***** ERROR ***** Prior muTT must have same length as columns of m2. You entered \n prior.muTT:",prior.muTT,"\n")
return()}
#7 check that the length of u2
if(length(u2) < length(n1) | length(u2) > (length(n1)+ ncol(m2)-1)){
cat("***** ERROR ***** Length(u2) must between length(n1) and length(n1)+ncol(m2) \n")
return()
}
# Check that that the prior.beta.logitP.mean and prior.beta.logitP.sd length=number of columns of covariates
logitP.cov <- as.matrix(logitP.cov)
if(!is.vector(prior.beta.logitP.mean) | !is.vector(prior.beta.logitP.sd)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be vectors")
}
if(!is.numeric(prior.beta.logitP.mean) | !is.numeric(prior.beta.logitP.sd)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be numeric")
}
if(length(prior.beta.logitP.mean) != ncol(logitP.cov) | length(prior.beta.logitP.sd) != ncol(logitP.cov)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be same length as number columns in covariate matrix")
}
# Deprecation of sampling fraction.
if(any(sampfrac != 1)){
cat("***** ERROR ***** Sampling fraction is deprecated for any values other than 1. DO NOT USE ANYMORE. ")
return()
}
## Define maximum travel time if not supplied by user
if(is.null(Delta.max))
Delta.max <- ncol(m2)-1
## Define priors on travel time (muTT) if not specified by user
if(is.null(prior.muTT)){
mean.muTT <- rep(0,Delta.max)
sd.muTT <- rep(1/sqrt(.666),Delta.max)
} else { # convert the input values to actual priors on the movement rates
temp <- make.muTT.prior(prior.muTT)
mean.muTT <- temp$mean.muTT
sd.muTT <- temp$sd.muTT
}
## Define output filename
results.filename <- paste(prefix,"-results.txt",sep="")
## Open sink to output file
stdout <- vector('character')
report <- textConnection('stdout', 'wr', local = TRUE)
sink(report)
cat(paste("Time Stratified Petersen with Non-Diagonal recaptures, error in smoothed U, and non-parametric modelling of travel times - ", date()))
cat("\nVersion: ", version)
cat("\n\n", title, "Results \n\n")
cat("*** Raw data *** (padded to match length of u2) \n")
jump.indicator <- rep(' ', length(u2))
jump.indicator[time %in% jump.after]<- '***'
ex.n1 <- c(n1, rep(NA, length(u2)-length(n1)))
ex.m2 <- rbind(m2,matrix(NA, nrow=length(u2)-length(n1), ncol=ncol(m2)))
temp<- data.frame(time=time, n1=ex.n1, m2=ex.m2, u2=u2, logitP.cov=logitP.cov, jump=jump.indicator)
print(temp)
cat("\n\n")
## Print information about jump points
cat("Jump point are after strata: ", jump.after)
if(length(jump.after)==0) cat("none - A single spline is fit")
## Print information about delta max
cat("\nMaximum travel time (Delta.max): ",Delta.max)
cat("\nFixed logitP indices are: ", logitP.fixed)
if(length(logitP.fixed)==0) cat("none - NO fixed values")
cat("\nFixed logitP values are: ", logitP.fixed.values)
if(length(logitP.fixed)==0) cat("none - NO fixed values")
## Obtain the Pooled Petersen estimator prior to fixup of bad.n1, bad.m2, and bad.u2 values
cat("\n\n*** Pooled Petersen Estimate prior to fixing bad n1, m2, or u2 values\n\n")
temp.n1 <- n1
temp.m2 <- m2
temp.u2 <- u2
cat("Total n1=", sum(temp.n1,na.rm=TRUE),"; m2=",sum(temp.m2,na.rm=TRUE),"; u2=",sum(temp.u2,na.rm=TRUE),"\n\n")
pp <- SimplePetersen(sum(temp.n1,na.rm=TRUE), sum(temp.m2,na.rm=TRUE), sum(temp.u2,na.rm=TRUE))
cat("Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
# Obtain the Pooled Petersen estimator after removal of entries with bad.n1, m2, or u2 values
select.rel <- !(time[1:length(n1)] %in% bad.n1 | time[1:length(n1)] %in% bad.m2 )
select.rec <- ! time %in% bad.u2
cat("\n\n*** Pooled Petersen Estimate after removing release and recovery strata flagged as bad ***\n\n")
cat("The following release strata were excluded: ",
if(length(time[!select.rel])>0){time[!select.rel]} else {" NONE"}, "\n")
cat("The following recovery strata were excluded: ",
if(length(time[!select.rec])>0){time[!select.rec]} else {" NONE"}, "\n")
temp.n1 <- n1[select.rel]
temp.m2 <- m2[select.rel]
temp.u2 <- u2[select.rec]
cat("Total n1=", sum(temp.n1,na.rm=TRUE),"; m2=",sum(temp.m2,na.rm=TRUE),"; u2=",sum(temp.u2, na.rm=TRUE),"\n\n")
pp <- SimplePetersen(sum(temp.n1,na.rm=TRUE), sum(temp.m2,na.rm=TRUE), sum(temp.u2,na.rm=TRUE))
cat("Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
# Test if pooling can be done
# We only do the release strata that are not flagged as bad and have no missing values
cat("*** Test if pooled Petersen is allowable. [Check if equal recovery from each stratum not flagged and without missing recoveries] ***\n\n")
#browser()
select <- select.rel & (!is.na(apply(m2,1,sum)))
temp.n1 <- n1[select.rel]
temp.m2 <- m2[select.rel,]
test <- TestIfPool( temp.n1, apply(temp.m2,1,sum, na.rm=TRUE))
cat("(Large Sample) Chi-square test statistic ", test$chi$statistic," has p-value", test$chi$p.value,"\n\n")
temp <- cbind(time[1:length(n1)][select],test$chi$observed, round(test$chi$expected,1), round(test$chi$residuals^2,1))
colnames(temp) <- c('time','n1-m2','m2','E[n1-m2]','E[m2]','X2[n1-m2]','X2[m2]')
print(temp)
cat("\n Be cautious of using this test in cases of small expected values. \n\n")
## Adjust the data for the explicity bad values or other problems
new.time <- time
new.n1 <- n1
new.m2 <- m2
new.u2 <- u2
new.logitP.cov <- logitP.cov
# Set the bad n1 values to 0 for the number of fish released and corresponding values of m2 also to 0 recovered subsequently.
# Set any bad m2 values to 0 for the number of releases and subsequent recoveries as well.
# But we don't set bqd(u2) values to 0 as this would imply no catch. We set these to missing
new.n1[time[1:length(n1)] %in% c(bad.n1,bad.m2) ] <- 0
new.m2[time[1:length(n1)] %in% c(bad.m2,bad.n1),] <- 0
new.u2[time %in% bad.u2] <- NA
## Print out the revised data
cat("\n\n*** Revised data *** \n")
jump.indicator <- rep(' ', length(u2))
jump.indicator[time %in% jump.after]<- '***'
ex.n1 <- c(new.n1, rep(NA, length(new.u2)-length(new.n1)))
ex.m2 <- rbind(new.m2,matrix(NA, nrow=length(new.u2)-length(new.n1), ncol=ncol(new.m2)))
temp<- data.frame(time=new.time, n1=ex.n1, m2=ex.m2, u2=new.u2, logitP.cov=new.logitP.cov,
jump.after=jump.indicator)
print(temp)
cat("\n\n")
## The NP analysis does not need the expanded m2 array, but this is
## needed later on. So, we'd better compute it here. The last column
## of this matrix will be the number of individuals from each
## stratum that are not recaptured.
##
#browser()
expanded.m2 <- matrix(0, nrow=length(new.n1), ncol=length(new.n1)+ncol(m2)+1)
for(i in 1:length(new.n1)){
expanded.m2[i,1:(ncol(expanded.m2)-1)] <- c(rep(0,i-1),new.m2[i,],rep(0,ncol(expanded.m2)))[1:(ncol(expanded.m2)-1)]
expanded.m2[i,ncol(expanded.m2)] <- new.n1[i] - sum(new.m2[i,])
}
cat("*** Expanded m2 array with column sum and u2 ***\n\n")
save.max.print <- getOption("max.print")
options(max.print=.Machine$integer.max)
temp <- rbind(expanded.m2, apply(expanded.m2,2,sum, na.rm=TRUE))
rownames(temp)[nrow(temp)] <- 'Column totals'
temp <- rbind(temp, c(u2, rep(NA, ncol(expanded.m2)-length(u2)) ))
rownames(temp)[nrow(temp)] <- "Untagged (u2)"
temp <- rbind(temp, c(new.u2, rep(NA, ncol(expanded.m2)-length(new.u2)) ))
rownames(temp)[nrow(temp)] <- "Untagged - after fixups"
new.logitP.fixed <- rep(NA, length(new.u2))
new.logitP.fixed[match(logitP.fixed, time)] <- logitP.fixed.values
temp <- rbind(temp, c(new.logitP.fixed, rep(NA, ncol(expanded.m2)-length(new.u2)) ))
rownames(temp)[nrow(temp)] <- "Logit P fixed"
rownames(temp)[1:length(n1)] <- 1:length(n1)
print(temp)
options(max.print=save.max.print)
sink()
# some further checking on u2. Make sure that every columns where there are recoveries has a u2
if( (length(u2)+1) <= (ncol(temp)-1)) {
if(any( temp["Column totals", (length(u2)+1):(ncol(temp)-1)] >0)){
cat("***** ERROR ***** Non-zero recoveries and u2 not available at end of experiment??? \n Check above matrix\n")
return()
}
}
sink(report, append=TRUE)
# assign the logitP fixed values etc (replicates what was done above, but convenient to put here)
new.logitP.fixed <- rep(NA, length(new.u2))
new.logitP.fixed[match(logitP.fixed, time)] <- logitP.fixed.values
## We do need to add the column of not recaptured counts to the m2
## array.
new.m2 <- cbind(new.m2,new.n1-apply(new.m2,1,sum))
## Print out information on the prior distributions used
cat("\n\n*** Information on priors *** \n")
## 1) tauU = (variance of spline coefficients)^-1
cat(" Parameters for prior on tauU (variance in spline coefficients): ", tauU.alpha, tauU.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(tauU.alpha/tauU.beta,2),round(sqrt(tauU.alpha/tauU.beta^2),2),"\n")
## 2) taueU = (variance of errors)^-1
cat(" Parameters for prior on taueU (variance of log(U) about spline): ",taueU.alpha, taueU.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(taueU.alpha/taueU.beta,2),round(sqrt(taueU.alpha/taueU.beta^2),2),"\n")
## 3) beta.logitP coefficients for covariates for capture probabilities
cat(" Parameters for prior on beta.logitP[1] (intercept) (mean, sd): \n", cbind(round(prior.beta.logitP.mean,3), round(prior.beta.logitP.sd,5)),"\n")
## 4) tauP = (variance of capture probabilites conditional on covariates)^-1
cat(" Parameters for prior on tauP (residual variance of logit(P) after adjusting for covariates): ",tauP.alpha, tauP.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(tauP.alpha/tauP.beta,2),round(sqrt(tauP.alpha/tauP.beta^2),2),"\n")
## 5) tauTT = (variance of continuation ratios for theta)^-1
cat(" Parameters for prior on tauTT (variance of continuation ratios for travel times): ",tauTT.alpha, tauTT.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(tauTT.alpha/tauTT.beta,2),round(sqrt(tauTT.alpha/tauTT.beta^2),2),"\n")
## 6) priors on movement rates
cat(" Parameters for prior on muTT (movement rates).\n",
" Input values", prior.muTT,"\n",
" which corresponds to \n",
" mean.muTT: ", mean.muTT,"\n",
" sd.muTT: ", sd.muTT,"\n",
" Use the visualize.muTT.prior function for more details on prior on movements\n\n")
cat("\n\nInitial seed for this run is: ",InitialSeed, "\n")
sink()
if (debug2) {
cat("\nprior to formal call to TimeStratPetersenNonDiagError\n")
browser()
}
if (debug)
{results <- TimeStratPetersenNonDiagErrorNP(
title=title, prefix=prefix,
time=new.time, n1=new.n1, m2=new.m2, u2=new.u2,
jump.after=(1:length(u2))[time %in% jump.after],
logitP.cov=new.logitP.cov, logitP.fixed=new.logitP.fixed,
n.chains=3, n.iter=10000, n.burnin=5000, n.sims=500, # set to small values for debugging only
prior.beta.logitP.mean=prior.beta.logitP.mean,
prior.beta.logitP.sd =prior.beta.logitP.sd,
tauU.alpha=tauU.alpha, tauU.beta=tauU.beta,
taueU.alpha=taueU.alpha, taueU.beta=taueU.beta,
Delta.max=Delta.max,
mean.muTT=mean.muTT, sd.muTT=sd.muTT,
tauTT.alpha=tauTT.alpha,tauTT.beta=tauTT.beta,
debug=debug, debug2=debug2,
InitialSeed=InitialSeed, save.output.to.files=save.output.to.files)
} else #notice R syntax requires { before the else
{results <- TimeStratPetersenNonDiagErrorNP(
title=title, prefix=prefix,
time=new.time, n1=new.n1, m2=new.m2, u2=new.u2,
jump.after=(1:length(u2))[time %in% jump.after],
logitP.cov=new.logitP.cov, logitP.fixed=new.logitP.fixed,
n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.sims=n.sims,
prior.beta.logitP.mean=prior.beta.logitP.mean,
prior.beta.logitP.sd =prior.beta.logitP.sd,
tauU.alpha=tauU.alpha, tauU.beta=tauU.beta,
taueU.alpha=taueU.alpha, taueU.beta=taueU.beta,
Delta.max=Delta.max,
mean.muTT=mean.muTT, sd.muTT=sd.muTT,
tauTT.alpha=tauTT.alpha,tauTT.beta=tauTT.beta,
debug=debug, debug2=debug2,
InitialSeed=InitialSeed, save.output.to.files=save.output.to.files)
}
## Now to create the various summary tables of the results
## Add the pooled Petersen estimators to the result
results$PP$using.all.data <-pp
Nstrata.rel <- length(n1)
Nstrata.cap <- ncol(expanded.m2) -1 ## don't forget that last column of m2 is number of fish never seen
# A plot of the observered log(U) on the log scale, and the final mean log(U)
plot.df <- data.frame(time =new.time)
plot.df$logUi <-log( c((new.u2[1:Nstrata.rel]+1)*(new.n1+2)/(apply(expanded.m2[,1:Nstrata.cap],1,sum)+1), rep(NA, length(u2)-Nstrata.rel)))
# extract the fitted U values
results.row.names <- rownames(results$summary)
etaU.row.index <- grep("etaU", results.row.names)
etaU<- results$summary[etaU.row.index,]
plot.df$logU =etaU[,"mean"]
plot.df$logUlcl =etaU[,"2.5%"]
plot.df$logUucl =etaU[,"97.5%"]
# extract the spline values
logUne.row.index <- grep("logUne", results.row.names)
logUne<- results$summary[logUne.row.index,"mean"]
plot.df$spline <- results$summary[logUne.row.index,"mean"]
# add limits to the plot to avoid non-monotone secondary axis problems with extreme values
plot.df$logUi <- pmax(-10 , pmin(20, plot.df$logUi))
plot.df$logU <- pmax(-10 , pmin(20, plot.df$logU ))
plot.df$logUlcl <- pmax(-10 , pmin(20, plot.df$logUlcl ))
plot.df$logUucl <- pmax(-10 , pmin(20, plot.df$logUucl ))
plot.df$spline <- pmax(-10 , pmin(20, plot.df$spline))
fit.plot <- ggplot(data=plot.df, aes_(x=~time))+
ggtitle(title, subtitle="Fitted spline curve with 95% credible intervals for estimated log(U[i])")+
geom_point(aes_(y=~logUi), color="red", shape=1)+ # open circle
xlab("Time Index\nOpen/closed circles - initial and final estimates")+
ylab("log(U[i]) + 95% credible interval")+
geom_point(aes_(y=~logU), color="black", shape=19)+
geom_line (aes_(y=~logU), color="black")+
geom_errorbar(aes_(ymin=~logUlcl, ymax=~logUucl), width=.1)+
geom_line(aes_(y=~spline),linetype="dashed")+
scale_x_continuous(breaks=seq(min(plot.df$time, na.rm=TRUE),max(plot.df$time, na.rm=TRUE),2))+
scale_y_continuous(sec.axis = sec_axis(~ exp(.), name="U + 95% credible interval",
breaks=c(1,10,20,50,
100,200,500,
1000,2000,5000,
10000,20000, 50000,
100000,200000, 500000,
1000000,2000000,5000000,10000000),
labels = scales::comma))
if(save.output.to.files)ggsave(plot=fit.plot, filename=paste(prefix,"-fit.pdf",sep=""), height=6, width=10, units="in")
results$plots$fit.plot <- fit.plot
## plot the logitP over time
logitP.plot <- plot_logitP(title=title, time=new.time, n1=new.n1, m2=expanded.m2, u2=new.u2,
logitP.cov=new.logitP.cov, results=results,
trunc.logitP=trunc.logitP)
if(save.output.to.files)ggsave(plot=logitP.plot, filename=paste(prefix,"-logitP.pdf",sep=""), height=6, width=10, units="in")
results$plots$logitP.plot <- logitP.plot
## Look at autocorrelation function for Ntot
mcmc.sample <- data.frame(parm="Utot", sample=results$sims.matrix[,"Utot"], stringsAsFactors=FALSE)
acf.Utot.plot <- plot_acf(mcmc.sample)
if(save.output.to.files)ggsave(plot=acf.Utot.plot, filename=paste(prefix,"-Utot-acf.pdf",sep=""), height=4, width=6, units="in")
results$plots$acf.Utot.plot <- acf.Utot.plot
## Look at the shape of the posterior distribution
mcmc.sample1 <- data.frame(parm="Utot", sample=results$sims.matrix[,"Utot"], stringsAsFactors=FALSE)
mcmc.sample2 <- data.frame(parm="Ntot", sample=results$sims.matrix[,"Ntot"], stringsAsFactors=FALSE)
mcmc.sample <- rbind(mcmc.sample1, mcmc.sample2)
post.UNtot.plot <- plot_posterior(mcmc.sample)
post.UNtot.plot
if(save.output.to.files)ggsave(plot=post.UNtot.plot, filename=paste(prefix,"-UNtot-posterior.pdf",sep=""),
height=ifelse(length(unique(mcmc.sample$parm))<=2,4,6), width=6, units="in")
results$plots$post.UNtot.plot <- post.UNtot.plot
#browser()
## Bayesian P-values
#browser()
discrep <-PredictivePosterior.TSPNDENP(new.n1, expanded.m2, new.u2,
new.logitP.fixed,
expit(results$sims.list$logitP),
round(results$sims.list$U),
results$sims.list$Theta,
Delta.max)
#browser()
gof <- PredictivePosteriorPlot.TSPNDE (discrep)
if(save.output.to.files)ggsave(gof[[1]],filename=paste(prefix,"-GOF.pdf",sep=""), height=8, width=8, units="in", dpi=300 )
results$plots$gof.plot <- gof
# create traceplots of logU, U, and logitP (along with R value) to look for non-convergence
# the plot_trace will return a list of plots (one for each page as needed)
varnames <- names(results$sims.array[1,1,]) # extract the names of the variables
#browser()
# Trace plots of logitP
trace.plot <- plot_trace(title=title, results=results, parms_to_plot=varnames[grep("^logitP", varnames)])
if(save.output.to.files){
pdf(file=paste(prefix,"-trace-logitP.pdf",sep=""))
plyr::l_ply(trace.plot, function(x){plot(x)})
dev.off()
}
results$plots$trace.logitP.plot <- trace.plot
# now for the traceplots of logU (etaU), Utot, and Ntot
trace.plot <- plot_trace(title=title, results=results, parms_to_plot=varnames[c(grep("Utot",varnames), grep("Ntot",varnames), grep("^etaU", varnames))])
if(save.output.to.files){
pdf(file=paste(prefix,"-trace-logU.pdf",sep=""))
plyr::l_ply(trace.plot, function(x){plot(x)})
dev.off()
}
results$plots$trace.logU.plot <- trace.plot
sink(report, append=TRUE)
## Global summary of results
cat("\n\n*** Summary of MCMC results *** \n\n")
save.max.print <- getOption("max.print")
options(max.print=.Machine$integer.max)
print(results, digits.summary=3)#, max=.Machine$integer.max)
options(max.print=save.max.print)
cat("\n\n*** Alternate DIC computation based on p_D = var(deviance)/2 \n")
results.row.names <- rownames(results$summary)
deviance.row.index<- grep("deviance", results.row.names)
deviance <- results$summary[deviance.row.index,]
p.D <- deviance["sd"]^2/2
dic <- deviance["mean"]+p.D
cat(" D-bar: ", deviance["mean"],"; var(dev): ", deviance["sd"]^2,
"; p.D: ", p.D, "; DIC: ", dic)
## Summary of population sizes
cat("\n\n\n\n*** Summary of Unmarked Population Size ***\n")
temp<- results$summary[ grep("Utot", rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
cat("\n\n*** Summary of Total Population Size *** \n")
temp<- results$summary[ grep("Ntot", rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
cat("\n\n\n\n*** Summary of Quantiles of Run Timing *** \n")
cat( " This is based on the sample weeks provided and the U[i] values \n")
q <- RunTime(time=time, U=results$sims.list$U, prob=run.prob)
temp <- rbind(apply(q,2,mean), apply(q,2,sd))
rownames(temp) <- c("Mean", "Sd")
print(round(temp,2))
# Add the runtiming to the output object
results$runTime <- temp
cat("\n\n")
cat(paste("*** end of fit *** ", date()))
sink()
# save the report to a files?
if(save.output.to.files)writeLines(stdout, results.filename)
results$report <- stdout
## add some of the raw data to the bugs object for simplicity in referencing it later
results$data <- list( time=time, n1=n1, m2=m2, u2=u2,
jump.after=jump.after,
bad.n1=bad.n1, bad.m2=bad.m2, bad.u2=bad.u2,
logitP.cov=logitP.cov,
version=version, date_run=date(),title=title)
results$gof <- gof
return(results)
} ## end of function
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/TimeStratPetersenNonDiagErrorNP_fit.R
|
# 2021-10-23 CJS Added trunc.logitP to deal with extreme values of logitP during plotting
# 2020-12-15 CJS Removed sampfrac from code
# 2020-12-14 CJS All bad.n1 or bad.m2 are set to 0. No NA allows for n1 or m2
# Fixed problem with some u2=NA in the GOF computations and plots
# 2020-11-07 CJS Allowed user to specify prior for beta coefficient for logitP
# 2108-12-19 CJS deprecate of use of sampling fraction
# 2018-12-15 CJS converted the muTT and sdTT plots to ggplot objects
# 2018-12-15 CJS tested fixing logitP to certain values especially at the end of the sampling chain
# 2018-12-06 CJS converted report to textConnection() object
# 2018-12-03 CJS converted fit plot to ggplot2
# 2018-12-02 CJS converted trace plots to ggplot2
# 2018-12-01 CJS Converted acf, posterior plots to ggplot2
# 2018-11-28 CJS Fixed problem of large printing being cutoff in results
# 2018-11-25 CJS Removed all references to OpenBugs
# 2014-09-01 CJS converstion to jags
# 2012-08-30 CJS fixed NAs problem in any() and all() in error checking
# 2011-06-13 CJS added p-values to results
# 2010-02-21 CJS changed u2 to new.u2 in expanded.m2 section
# 2010-11-25 CJS pretty printing of final population estimates
# 2010-09-06 CJS forced input vectors to be vectors
# 2010-08-06 CJS produced traceplot
# 2010-08-04 CJS added version/date to final result
# 2010-03-12 CJS added n.chains etc to calling arguments
# 2010-03-03 CJS allowed the user for fix some logitPs to account for time when no sampling (or other events
# with known probability of capture take place
# 2009-12-08 CJS added some basic error checking on arguments
# 2009-12-07 CJS added bad.n1, bad.u2 arguments and fixups
# 2009-12-01 CJS added open/winbugs directory to argument list
#' Wrapper (*_fit) to fit the Time Stratified Petersen Estimator
#' with NON Diagonal Entries function.
#'
#' Takes the number of marked fish released, the number of recaptures, and the
#' number of unmarked fish and uses Bayesian methods to fit a fit a spline
#' through the population numbers and a hierarchical model for the trap
#' efficiencies over time. The output is written to files and an MCMC object
#' is also created with samples from the posterior.
#'
#' Normally the user makes a call to the *_fit function which then calls the
#' fitting function.
#'
#' Use the \code{\link{TimeStratPetersenDiagError_fit}} function for cases
#' where recaptures take place ONLY in the stratum of release, i.e. the
#' diagonal case.
#'
#' The non-diagonal case fits a log-normal distribution for the travel time.
#' The *NP functions fit a non-parametric distribution for the travel times.
#' The *MarkAvail functions extend the *NP functions to allow for reductions in
#' mark availability because of fall back, immediate tagging mortality, etc.
#'
#'
#' @template title
#' @template prefix
#' @template time
#' @template n1
#' @param m2 A numeric matrix of the number of fish released in stratum [i] and
#' recovered in [j-1] strata later. For example m2[3,5] is the number of
#' marked fish released in stratum 3 and recovered 4 strata later in stratum 7.
#' The first column is the number of marked fish recovered in the stratum of
#' release, i.e. 0 strata later. Use the
#' \code{\link{TimeStratPetersenDiagError_fit}} function for cases where
#' recaptures take place ONLY in the stratum of release, i.e. the diagonal
#' case.
#' @template u2.ND
#' @template sampfrac
#' @template jump.after
#' @template bad.n1
#' @template bad.m2
#' @template bad.u2
#' @template logitP.cov
#' @template param.logitP.fixed
#' @template mcmc-parms
#' @template tauU.alpha.beta
#' @template taueU.alpha.beta
#' @template prior.beta.logitP.mean.sd
#' @template tauP.alpha.beta
#' @template run.prob
#' @template debug
#' @template InitialSeed
#' @template save.output.to.files
#' @template trunc.logitP
#' @return An MCMC object with samples from the posterior distribution. A
#' series of graphs and text file are also created in the working directory.
#' @template author
#' @template references
#' @keywords ~models ~smooth
#' @examples
#'
#' ##---- See the vignettes for examples of how to use this package
#'
#' @export TimeStratPetersenNonDiagError_fit
#' @importFrom stats runif var sd
TimeStratPetersenNonDiagError_fit <-
function( title="TSPNDE",
prefix="TSPNDE-", time, n1, m2, u2,
sampfrac=rep(1,length(u2)), jump.after=NULL,
bad.n1=c(), bad.m2=c(), bad.u2=c(),
logitP.cov=as.matrix(rep(1,length(u2))),
logitP.fixed=NULL, logitP.fixed.values=NULL,
n.chains=3, n.iter=200000, n.burnin=100000, n.sims=2000,
tauU.alpha=1,tauU.beta=.05,
taueU.alpha=1, taueU.beta=.05,
prior.beta.logitP.mean = c(logit(sum(m2,na.rm=TRUE)/sum(n1,na.rm=TRUE)),rep(0, ncol(as.matrix(logitP.cov))-1)),
prior.beta.logitP.sd = c(2, rep(10, ncol(as.matrix(logitP.cov))-1)),
tauP.alpha=.001,
tauP.beta=.001,
run.prob=seq(0,1,.1), # what percentiles of run timing are wanted
debug=FALSE, debug2=FALSE,
InitialSeed=ceiling(stats::runif(1,min=0,1000000)),
save.output.to.files=TRUE,
trunc.logitP=15) {
# Fit a Time Stratified Petersen model with NON-diagonal entries and with smoothing on U allowing for random error
# This is the classical stratified Petersen model where the recoveries can take place for this and multiple
# strata later
#
version <- '2021-11-02'
options(width=200)
# Input parameters are
# title - title for the analysis (character string)
# prefix - prefix used for files created with the analysis results
# this should be in standard Window's format, eg. JC-2002-ST-TSPNDE
# to which is appended various suffixes for plots etc (character string)
# time - vector of stratum numbers. For example, 9:38 would indicate that the
# Trinity River system sampled weeks 9 to 38.
# These values are never used in the analysis and only serve as labels for the weeks and for plotting purposes.
# They should be contiguous equally spaced values and be the same length as u2.
# n1, m2, u2 - the input data consisting of fish marked and released, recapture, and unmarked captured
# Note that m2 is a MATRIX. The first column are those fish recaptured in the stratum of release
# and the subsequent columns are those recoveries in later strata.
# This is expanded to the full matrix [i,j] for released released in stratum i and recovered in stratum j
# The vector u2 should be long enough to account for any fish that are recaptured later on
# from releases late in the season. The bottom right diagonal of m2 may be all zeros - that is ok
# Notice that length(u2) can be longer than length(n1)+nrow(m2).
# sampfrac - Deprecated. DO NOT USE ANYMORE.
# jump.after - in some cases, a single spline is still not flexible enough to cope with rapid
# changes in the run curve. For example, in the Trinity River project, a larger
# hatchery release occurs around stratum 14. This is a vector indicating the
# strata AFTER which the spline curve is allowed to jump.
# null or vector of arbitrary length.
# bad.n1 - vector of stratum numbers where the value of n1 is suspect.
# bad.m2 - vector of stratum numbers where the value of m2 is suspect.
# For example, the capture rate could be extremely low.
# These are set to NA prior to the call to JAGS
# bad.u2 - vector of stratum numbers where the value of u2 is suspect.
# logitP.cov - matrix of covariates for logit(P). If the strata times are "missing" some values, an intercept is assumed
# for the first element of the covariance matrix and 0 for the rest of the covariates.
# CAUTION - this MAY not be what you want to do. It is likely best to enter ALL strata
# if you have any covariates. The default, if not specified, is a constant (the mean logit)
# logitP.fixed - vector of time values where the logitP will be specified in advance. Typically this occurs
# when no sampling takes place and logitP <- logit(0) = -10
# logitP.fixed.values - vector of fixed values (on the logit scale). Use -10 for p[i] <- 0, and 10 for p[i] <- 1
# tauU.alpha, tauU.beta - parameters for the prior on variance in spline coefficients
# taueU.alpha, taueU.beta - parameters for the prior on variance in log(U) around fitted spline
# prior.beta.logitP.mean, prior.beta.logitP.sd - parameters for the prior on mean logit(P)'s [The intercept term]
# The other covariates are assigned priors of a mean of 0 and a sd of 30
# tauP.alpha, tauP.beta - parameters for the prior on 1/var of residual error in logit(P)'s
# run.prob - percentiles of run timing wanted
# debug - if TRUE, then this is a test run with very small MCMC chains run to test out the data
# and JAGS will run and stop waiting for your to exit and complete
# force input vectors to be vectors as needed. Note that m2 is NOT a vector!
time <- as.vector(time)
n1 <- as.vector(n1)
u2 <- as.vector(u2)
sampfrac <- as.vector(sampfrac)
# Do some basic error checking
# 1. Check that length of n1, m2, u2, sampfrac, time are consistent with each other.
# In the non-diagonal case, they don't have to match
if(length(n1)!=nrow(m2)){
cat("***** ERROR ***** Length of n1 and number of rows of m2 must be equal. They are:",
length(n1)," ",nrow(u2),"\n")
return()}
if(!is.numeric(n1)){
cat("***** ERROR ***** n1 must be numeric. You have:",
paste(n1,collapse=", "),"\n")
return()}
if(any(is.na(n1))){
cat("***** ERROR ***** All values of n1 must not be missing. You have: ",
paste(n1,collapse=", "),"\n")
return()}
if(any(n1 < 0, na.rm=TRUE)){
cat("***** ERROR ***** All values of n1 must be non-negative. You have: ",
paste(n1,collapse=", "),"\n")
return()}
if(stats::var(c(length(u2),length(sampfrac),length(time)))>0){
cat("***** ERROR ***** Lengths of u2, sampfrac, time must all be equal. They are:",
length(u2)," ",length(sampfrac)," ",length(time),"\n")
return()}
if(length(logitP.cov) %% length(u2) != 0){
cat("***** ERROR ***** Dimension of covariate vector doesn't match length of u2. They are:",
length(u2)," ",length(logitP.cov)," ",dim(logitP.cov),"\n")
return()}
# 2. Check that rowsum of m2<= n1
if(any(apply(m2,1,sum, na.rm=TRUE)>n1)){
cat("***** ERROR ***** m2[i,+] must be <= n1[i]. The arguments are \n n1:",
paste(n1,collapse=","),"\n m2:",
paste(m2,collapse=","),"\n")
return()}
# 3. Elements of bad.m2 and jump.after must belong to time
if(!all(bad.n1 %in% time,na.rm=TRUE)){
cat("***** ERROR ***** bad.n1 must be elements of strata identifiers. You entered \n bad.n1:",
paste(bad.n1,collapse=","),"\n Strata identifiers are \n time:",
paste(time, collapse=","), "\n")
return()}
if(!all(bad.m2 %in% time,na.rm=TRUE)){
cat("***** ERROR ***** bad.m2 must be elements of strata identifiers. You entered \n bad.m2:",
paste(bad.m2,collapse=","),"\n Strata identifiers are \n time:",
paste(time, collapse=","), "\n")
return()}
if(!all(bad.u2 %in% time,na.rm=TRUE)){
cat("***** ERROR ***** bad.u2 must be elements of strata identifiers. You entered \n bad.u2:",
paste(bad.u2,collapse=","),"\n Strata identifiers are \n time:",
paste(time ,collapse=","), "\n")
return()}
if(!all(jump.after %in% time,na.rm=TRUE)){
cat("***** ERROR ***** jump.after must be elements of strata identifiers. You entered \n jump.after:",
paste(jump.after,collapse=","),"\n Strata identifiers are \n time:",
paste(time, collapse=","), "\n")
return()}
# 4. check that index of logitP.fixed belong to time
if(!all(logitP.fixed %in% time,na.rm=TRUE)){
cat("***** ERROR ***** logitP.fixed must be elements of strata identifiers. You entered \n logitP.fixed:",
paste(logitP.fixed,collapse=","),"\n Strata identifiers are \n time:",
paste(time ,collapse=","), "\n")
return()}
if(length(logitP.fixed)!=length(logitP.fixed.values)){
cat("***** ERROR ***** Lengths of logitP.fixed and logitP.fixed.values must all be equal. They are:",
length(logitP.fixed)," ",length(logitP.fixed.values),"\n")
return()}
# Check that that the prior.beta.logitP.mean and prior.beta.logitP.sd length=number of columns of covariates
logitP.cov <- as.matrix(logitP.cov)
if(!is.vector(prior.beta.logitP.mean) | !is.vector(prior.beta.logitP.sd)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be vectors")
}
if(!is.numeric(prior.beta.logitP.mean) | !is.numeric(prior.beta.logitP.sd)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be numeric")
}
if(length(prior.beta.logitP.mean) != ncol(logitP.cov) | length(prior.beta.logitP.sd) != ncol(logitP.cov)){
stop("prior.beta.logitP.mean and prior.beta.logitP.sd must be same length as number columns in covariate matrix")
}
# Deprecation of sampling fraction.
if(any(sampfrac != 1)){
cat("***** ERROR ***** Sampling fraction is deprecated for any values other than 1. DO NOT USE ANYMORE. ")
return()
}
results.filename <- paste(prefix,"-results.txt",sep="")
stdout <- vector('character')
report <- textConnection('stdout', 'wr', local = TRUE)
sink(report)
cat(paste("Time Stratified Petersen with Non-Diagonal recaptures, error in smoothed U, and log-normal distribution for travel time - ", date()))
cat("\nVersion: ", version)
cat("\n\n", title, "Results \n\n")
cat("*** Raw data *** (padded to match length of u2) \n")
jump.indicator <- rep(' ', length(u2))
jump.indicator[time %in% jump.after]<- '***'
ex.n1 <- c(n1, rep(NA, length(u2)-length(n1)))
ex.m2 <- rbind(m2,matrix(NA, nrow=length(u2)-length(n1), ncol=ncol(m2)))
temp<- data.frame(time=time, n1=ex.n1, m2=ex.m2, u2=u2, logitP.cov=logitP.cov, jump=jump.indicator)
print(temp)
cat("\n\n")
cat("Jump point are after strata: ", jump.after)
if(length(jump.after)==0) cat("none - A single spline is fit")
cat("\nFixed logitP indices are: ", logitP.fixed)
if(length(logitP.fixed)==0) cat("none - NO fixed values")
cat("\nFixed logitP values are: ", logitP.fixed.values)
if(length(logitP.fixed)==0) cat("none - NO fixed values")
# Obtain the Pooled Petersen estimator prior to fixup of bad.n1, bad.m2, and bad.u2 values
cat("\n\n*** Pooled Petersen Estimate prior to fixing bad n1, m2, or u2 values ***\n\n")
temp.n1 <- n1
temp.m2 <- m2
temp.u2 <- u2
cat("Total n1=", sum(temp.n1,na.rm=TRUE),"; m2=",sum(temp.m2,na.rm=TRUE),"; u2=",sum(temp.u2,na.rm=TRUE),"\n\n")
pp <- SimplePetersen(sum(temp.n1,na.rm=TRUE), sum(temp.m2,na.rm=TRUE), sum(temp.u2,na.rm=TRUE))
cat("Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
.
# Obtain the Pooled Petersen estimator after removal of entries with bad.n1, m2, or u2 values
select.rel <- !(time[1:length(n1)] %in% bad.n1 | time[1:length(n1)] %in% bad.m2 )
select.rec <- ! time %in% bad.u2
cat("\n\n*** Pooled Petersen Estimate after removing release and recovery strata flagged as bad ***\n\n")
cat("The following release strata were excluded:",
if(length(time[!select.rel])>0){time[!select.rel]} else {" NONE"}, "\n")
cat("The following recovery strata were excluded:",
if(length(time[!select.rec])>0){time[!select.rec]} else {" NONE"}, "\n")
temp.n1 <- n1[select.rel]
temp.m2 <- m2[select.rel]
temp.u2 <- u2[select.rec]
cat("Total n1=", sum(temp.n1,na.rm=TRUE),"; m2=",sum(temp.m2,na.rm=TRUE),"; u2=",sum(temp.u2, na.rm=TRUE),"\n\n")
pp <- SimplePetersen(sum(temp.n1,na.rm=TRUE), sum(temp.m2,na.rm=TRUE), sum(temp.u2,na.rm=TRUE))
cat("Est U(total) ", format(round(pp$U.est),big.mark=",")," (SE ", format(round(pp$U.se), big.mark=","), ")\n")
cat("Est N(total) ", format(round(pp$N.est),big.mark=",")," (SE ", format(round(pp$N.se), big.mark=","), ")\n\n\n")
# Test if pooling can be done
# We only do the release strata that are not flagged as bad and have no missing values
cat("*** Test if pooled Petersen is allowable. [Check if equal recovery from each stratum not flagged and without missing recoveries] ***\n\n")
#browser()
select <- select.rel & (!is.na(apply(m2,1,sum)))
temp.n1 <- n1[select.rel]
temp.m2 <- m2[select.rel,]
test <- TestIfPool( temp.n1, apply(temp.m2,1,sum, na.rm=TRUE))
cat("(Large Sample) Chi-square test statistic ", test$chi$statistic," has p-value", test$chi$p.value,"\n\n")
temp <- cbind(time[1:length(n1)][select],test$chi$observed, round(test$chi$expected,1), round(test$chi$residuals^2,1))
colnames(temp) <- c('time','n1-m2','m2','E[n1-m2]','E[m2]','X2[n1-m2]','X2[m2]')
print(temp)
cat("\n Be cautious of using this test in cases of small expected values. \n\n")
# Adjust the data for the explicitly bad values or other problems
new.time <- time
new.n1 <- n1
new.m2 <- m2
new.u2 <- u2
new.logitP.cov <- logitP.cov
# Set the bad n1 values to 0 for the number of fish released and corresponding values of m2 also to 0 recovered subsequently.
# Set any bad m2 values to 0 for the number of releases and subsequent recoveries as well.
# But we don't set bqd(u2) values to 0 as this would imply no catch. We set these to missing
new.n1[time[1:length(n1)] %in% c(bad.n1,bad.m2) ] <- 0
new.m2[time[1:length(n1)] %in% c(bad.m2,bad.n1),] <- 0
new.u2[time %in% bad.u2] <- NA
# Print out the revised data
cat("\n\n*** Revised data after setting flagged strata to 0 (releases) or NA (recoveries) *** \n")
jump.indicator <- rep(' ', length(u2))
jump.indicator[time %in% jump.after]<- '***'
ex.n1 <- c(new.n1, rep(NA, length(new.u2)-length(new.n1)))
ex.m2 <- rbind(new.m2,matrix(NA, nrow=length(new.u2)-length(new.n1), ncol=ncol(new.m2)))
temp<- data.frame(time=new.time, n1=ex.n1, m2=ex.m2, u2=new.u2, logitP.cov=new.logitP.cov,
jump.after=jump.indicator)
print(temp)
cat("\n\n")
# Need to expand the m2 matrix to the full Nstrata.rel x Nstrata.cap+1 matrix with
# the [i,j] entry for the number of fish released in stratum i and recaptured in stratum j
# The last column of the expanded m2 matrix is the number of fish released but never recaptured later.
expanded.m2 <- matrix(0, nrow=length(new.n1), ncol=length(new.u2)+1)
for(i in 1:length(new.n1)){
expanded.m2[i,1:length(new.u2)] <- c(rep(0,i-1),new.m2[i,],rep(0,length(new.u2)))[1:length(new.u2)]
expanded.m2[i,length(new.u2)+1] <- new.n1[i] - sum(new.m2[i,])
}
cat("*** Expanded m2 array ***\n\n")
save.max.print <- getOption("max.print")
options(max.print=.Machine$integer.max)
print(expanded.m2)
options(max.print=save.max.print)
# assign the logitP fixed values etc.
new.logitP.fixed <- rep(NA, length(new.u2))
new.logitP.fixed[match(logitP.fixed, time)] <- logitP.fixed.values
# Print out information on the prior distributions used
cat("\n\n*** Information on priors *** \n")
cat(" Parameters for prior on tauU (variance in spline coefficients: ", tauU.alpha, tauU.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(tauU.alpha/tauU.beta,2),round(sqrt(tauU.alpha/tauU.beta^2),2),"\n")
cat(" Parameters for prior on taueU (variance of log(U) about spline: ",taueU.alpha, taueU.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(taueU.alpha/taueU.beta,2),round(sqrt(taueU.alpha/taueU.beta^2),2),"\n")
cat(" Parameters for prior on beta.logitP[1] (intercept) (mean, sd): \n", cbind(round(prior.beta.logitP.mean,3), round(prior.beta.logitP.sd,5)),"\n")
cat(" Parameters for prior on tauP (residual variance of logit(P) after adjusting for covariates: ",tauP.alpha, tauP.beta,
" which corresponds to a mean/std dev of 1/var of:",
round(tauP.alpha/tauP.beta,2),round(sqrt(tauP.alpha/tauP.beta^2),2),"\n")
cat("\n\nInitial seed for this run is: ",InitialSeed, "\n")
sink()
if (debug2) {
cat("\nprior to formal call to TimeStratPetersenNonDiagError\n")
browser()
}
if (debug)
{results <- TimeStratPetersenNonDiagError(
title=title, prefix=prefix,
time=new.time, n1=new.n1, m2=expanded.m2, u2=new.u2,
jump.after=(1:length(u2))[time %in% jump.after],
logitP.cov=new.logitP.cov, logitP.fixed=new.logitP.fixed,
n.chains=3, n.iter=10000, n.burnin=5000, n.sims=500, # set to small values for debugging only
prior.beta.logitP.mean=prior.beta.logitP.mean,
prior.beta.logitP.sd =prior.beta.logitP.sd,
tauU.alpha=tauU.alpha, tauU.beta=tauU.beta, taueU.alpha=taueU.alpha, taueU.beta=taueU.beta,
debug=debug, debug2=debug2, InitialSeed=InitialSeed, save.output.to.files=save.output.to.files)
}
else #notice R syntax requires { before the else
{results <- TimeStratPetersenNonDiagError(
title=title, prefix=prefix,
time=new.time, n1=new.n1, m2=expanded.m2, u2=new.u2,
jump.after=(1:length(u2))[time %in% jump.after],
logitP.cov=new.logitP.cov, logitP.fixed=new.logitP.fixed,
n.chains=n.chains, n.iter=n.iter, n.burnin=n.burnin, n.sims=n.sims,
prior.beta.logitP.mean=prior.beta.logitP.mean,
prior.beta.logitP.sd =prior.beta.logitP.sd,
tauU.alpha=tauU.alpha, tauU.beta=tauU.beta, taueU.alpha=taueU.alpha, taueU.beta=taueU.beta,
debug=debug, debug2=debug2, InitialSeed=InitialSeed, save.output.to.files=save.output.to.files)
}
# Now to create the various summary tables of the results
Nstrata.rel <- length(n1)
Nstrata.cap <- ncol(expanded.m2) -1 # don't forget that last column of m2 is number of fish never seen
# A plot of the observed log(U) on the log scale, and the final mean log(U)
plot.df <- data.frame(time =new.time)
plot.df$logUi <-log( c((new.u2[1:Nstrata.rel]+1)*(new.n1+2)/(apply(expanded.m2[,1:Nstrata.cap],1,sum)+1), rep(NA, length(u2)-Nstrata.rel)))
# extract the fitted U values
results.row.names <- rownames(results$summary)
etaU.row.index <- grep("etaU", results.row.names)
etaU<- results$summary[etaU.row.index,]
plot.df$logU =etaU[,"mean"]
plot.df$logUlcl =etaU[,"2.5%"]
plot.df$logUucl =etaU[,"97.5%"]
# extract the spline values
logUne.row.index <- grep("logUne", results.row.names)
logUne<- results$summary[logUne.row.index,"mean"]
plot.df$spline <- results$summary[logUne.row.index,"mean"]
# add limits to the plot to avoid non-monotone secondary axis problems with extreme values
plot.df$logUi <- pmax(-10 , pmin(20, plot.df$logUi))
plot.df$logU <- pmax(-10 , pmin(20, plot.df$logU ))
plot.df$logUlcl <- pmax(-10 , pmin(20, plot.df$logUlcl ))
plot.df$logUucl <- pmax(-10 , pmin(20, plot.df$logUucl ))
plot.df$spline <- pmax(-10 , pmin(20, plot.df$spline))
fit.plot <- ggplot(data=plot.df, aes_(x=~time))+
ggtitle(title, subtitle="Fitted spline curve with 95% credible intervals for estimated log(U[i])")+
geom_point(aes_(y=~logUi), color="red", shape=1)+ # open circle
xlab("Time Index\nOpen/closed circles - initial and final estimates")+
ylab("log(U[i]) + 95% credible interval")+
geom_point(aes_(y=~logU), color="black", shape=19)+
geom_line (aes_(y=~logU), color="black")+
geom_errorbar(aes_(ymin=~logUlcl, ymax=~logUucl), width=.1)+
geom_line(aes_(y=~spline),linetype="dashed")+
scale_x_continuous(breaks=seq(min(plot.df$time, na.rm=TRUE),max(plot.df$time, na.rm=TRUE),2))+
scale_y_continuous(sec.axis = sec_axis(~ exp(.), name="U + 95% credible interval",
breaks=c(1,10,20,50,
100,200,500,
1000,2000,5000,
10000,20000, 50000,
100000,200000, 500000,
1000000,2000000,5000000,10000000),
labels = scales::comma))
if(save.output.to.files)ggsave(plot=fit.plot, filename=paste(prefix,"-fit.pdf",sep=""), height=6, width=10, units="in")
results$plots$fit.plot <- fit.plot
# plot of the logit(p) over time
logitP.plot <- plot_logitP(title=title, time=new.time, n1=new.n1, m2=expanded.m2, u2=new.u2,
logitP.cov=new.logitP.cov, results=results,
trunc.logitP=trunc.logitP)
if(save.output.to.files)ggsave(plot=logitP.plot, filename=paste(prefix,"-logitP.pdf",sep=""), height=6, width=10, units="in")
results$plots$logitP.plot <- logitP.plot
# Look at autocorrelation function for Ntot
mcmc.sample <- data.frame(parm="Utot", sample=results$sims.matrix[,"Utot"], stringsAsFactors=FALSE)
acf.Utot.plot <- plot_acf(mcmc.sample)
if(save.output.to.files)ggsave(plot=acf.Utot.plot, filename=paste(prefix,"-Utot-acf.pdf",sep=""), height=4, width=6, units="in")
results$plots$acf.Utot.plot <- acf.Utot.plot
# plot the posterior plots
mcmc.sample1 <- data.frame(parm="Utot", sample=results$sims.matrix[,"Utot"], stringsAsFactors=FALSE)
mcmc.sample2 <- data.frame(parm="Ntot", sample=results$sims.matrix[,"Ntot"], stringsAsFactors=FALSE)
mcmc.sample <- rbind(mcmc.sample1, mcmc.sample2)
post.UNtot.plot <- plot_posterior(mcmc.sample)
post.UNtot.plot
if(save.output.to.files)ggsave(plot=post.UNtot.plot, filename=paste(prefix,"-UNtot-posterior.pdf",sep=""),
height=ifelse(length(unique(mcmc.sample$parm))<=2,4,6), width=6, units="in")
results$plots$post.UNtot.plot <- post.UNtot.plot
# plot the mean and sd log(travel times) (the muLogTT and the sdLogTT) vs release stratum number
#browser()
results.row.names <- rownames(results$summary)
muLogTT.row.index <- grep("muLogTT", results.row.names)
muLogTT<- data.frame(results$summary[muLogTT.row.index,])
muLogTT$stratum <- new.time[1:Nstrata.rel]
muLogTT$source <- "Mean log(travel time)"
results.row.names <- rownames(results$summary)
sdLogTT.row.index <- grep("sdLogTT", results.row.names)
sdLogTT<- data.frame(results$summary[sdLogTT.row.index,])
sdLogTT$stratum <- new.time[1:Nstrata.rel]
sdLogTT$source <- "SD log(travel time)"
plotdata <- rbind( muLogTT, sdLogTT)
musdLogTT.plot <- ggplot(data=plotdata, aes_(x=~stratum, y=~mean))+
ggtitle(title)+
geom_point()+
geom_line()+
geom_errorbar(aes_(ymin=~X2.5., ymax=~X97.5.), width=.1)+
xlab("Stratum")+ylab("mean/sd log(travel time)")+
facet_wrap(~source, ncol=1, scales="free_y")
musdLogTT.plot
if(save.output.to.files)ggsave(plot=musdLogTT.plot, filename=paste(prefix,"-musdLogTT.pdf",sep=""),
height=6, width=6, units="in")
results$plots$musdLogTTt <- musdLogTT.plot
## save the Bayesian predictive distribution (Bayesian p-value plots)
discrep <-PredictivePosterior.TSPNDE (new.n1, expanded.m2, new.u2,
new.logitP.fixed,
expit(results$sims.list$logitP),
round(results$sims.list$U),
results$sims.list$muLogTT,
results$sims.list$sdLogTT)
#browser()
gof <- PredictivePosteriorPlot.TSPNDE (discrep)
if(save.output.to.files)ggsave(gof[[1]],filename=paste(prefix,"-GOF.pdf",sep=""), height=12, width=8, units="in", dpi=300 )
results$plots$gof.plot <- gof
# create traceplots of logU, U, and logitP (along with R value) to look for non-convergence
# the plot_trace will return a list of plots (one for each page as needed)
varnames <- names(results$sims.array[1,1,]) # extract the names of the variables
#browser()
# Trace plots of logitP
trace.plot <- plot_trace(title=title, results=results, parms_to_plot=varnames[grep("^logitP", varnames)])
if(save.output.to.files){
pdf(file=paste(prefix,"-trace-logitP.pdf",sep=""))
plyr::l_ply(trace.plot, function(x){plot(x)})
dev.off()
}
results$plots$trace.logitP.plot <- trace.plot
# now for the traceplots of logU (etaU), Utot, and Ntot
trace.plot <- plot_trace(title=title, results=results, parms_to_plot=varnames[c(grep("Utot",varnames), grep("Ntot",varnames), grep("^etaU", varnames))])
if(save.output.to.files){
pdf(file=paste(prefix,"-trace-logU.pdf",sep=""))
plyr::l_ply(trace.plot, function(x){plot(x)})
dev.off()
}
results$plots$trace.logU.plot <- trace.plot
sink(report, append=TRUE)
# Global summary of results
cat("\n\n*** Summary of MCMC results *** \n\n")
save.max.print <- getOption("max.print")
options(max.print=.Machine$integer.max)
print(results, digits.summary=3)#, max=.Machine$integer.max)
options(max.print=save.max.print)
cat("\n\n*** Alternate DIC computation based on p_D = var(deviance)/2 \n")
results.row.names <- rownames(results$summary)
deviance.row.index<- grep("deviance", results.row.names)
deviance <- results$summary[deviance.row.index,]
p.D <- deviance["sd"]^2/2
dic <- deviance["mean"]+p.D
cat(" D-bar: ", deviance["mean"],"; var(dev): ", deviance["sd"]^2,
"; p.D: ", p.D, "; DIC: ", dic)
# Summary of population sizes. Add pretty printing of results.
cat("\n\n\n\n*** Summary of Unmarked Population Size ***\n")
temp<- results$summary[ grep("Utot", rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
cat("\n\n*** Summary of Total Population Size *** \n")
temp<- results$summary[ grep("Ntot", rownames(results$summary)),]
old.Rhat <- temp["Rhat"]
temp<- formatC(temp, big.mark=",", format="d")
temp["Rhat"] <- formatC(old.Rhat,digits=2,format="f",flag="#")
print(temp, quote=FALSE)
cat("\n\n\n\n*** Summary of Quantiles of Run Timing *** \n")
cat( " This is based on the sample weeks provided and the U[i] values \n")
q <- RunTime(time=time, U=results$sims.list$U, prob=run.prob)
temp <- rbind(apply(q,2,mean), apply(q,2,sd))
rownames(temp) <- c("Mean", "Sd")
print(round(temp,2))
# Add the runtiming to the output object
results$runTime <- temp
cat("\n\n")
cat(paste("*** end of fit *** ", date()))
sink()
# save the report to a files?
if(save.output.to.files)writeLines(stdout, results.filename)
results$report <- stdout
# add some of the raw data to the bugs object for simplicity in referencing it later
results$data <- list( time=time, n1=n1, m2=m2, u2=u2,
jump.after=jump.after,
bad.n1=bad.n1, bad.m2=bad.m2, bad.u2=bad.u2,
logitP.cov=logitP.cov,
version=version, date_run=date(),title=title)
return(results)
} # end of function
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/TimeStratPetersenNonDiagError_fit.R
|
#' Computes and plots posterior distribution of time to get target run size.
#' For example, the time to reach a cumulative run of 10,000 fish.
#'
#' Takes a sim.list object from the MCMC runs, computes the posterior
#' distribution of the time to the target runsize, plots the posterior
#' #'
#'
#' @param U Elements of sim.list from MCMC object for U - the estimate runsize
#' in each stratum
#' @template time
#' @param targetU The targeted cumulative run size. E.g. 10,000
#' @param file_prefix Character string giving prefix for plot. A plot will be
#' produced of the posterior in the filename
#' paste(file_prefix,"-target.pdf",sep="")).
#' @param ci_prob What size of credible interval should be computed?
#' @return A list with a sample of the posterior (index), quantiles
#' (quantiles), mean (mean), median(median), and standard deviation (sd), and
#' target value (targetU)
#' @template author
#' @keywords ~models ~plots
#' @examples
#'
#' \dontrun{
#' # Compute the posterior of time to reach 10,000 fish. Results contains the MCMC object
#' #
#' results$TimeToTargetRunSize <- TimeToTargetRunSize(
#' U=results$sims.list$U,
#' time=results$data$time,
#' targetU=10000,
#' file_prefix = 'Time10000')
#'
#' } # end of dontrun
#'
#' @export TimeToTargetRunSize
#' @importFrom stats approx density median quantile sd
TimeToTargetRunSize <- function(U, time, targetU, file_prefix, ci_prob=.95){
#
# Take the results from the MCMC runs and use it to estimate the
# time until the target value of the cumulative U is found. For example,
# what is the time when 20,000 fish have passed through the system.
#
# Arguments:
# U = sim.list from the MCMC run. Usually results$sims.list$U
# time = time values for strata. Usually results$data$time
# targetU = target value
# file_prefix = file prefix for plot
# ci_prob = what level of credible interval do you want?
#
# Returns posterior sample of TimeToTargetRunSize along with summary statistics and quantiles
# Plots the posterior and adds some basic summary statistics on the plot
#
# This will plot the posterior distribution, put the summary values
# of the target distribution on the plot, and return the usual summary
# statistics (mean, median, and credible intervals, and sample from
# the posterior).
index <- rep(0,nrow(U)) #array to save times to reach the target value
for(i in 1:nrow(U)){
cumU <- cumsum(U[i,])
T <- stats::approx( cumU, 1:ncol(U), xout=targetU, rule=2) # find when the target value is reached
index[i] <- T$y
}
index <- index + min(time) - 1 # convert to strata units
mean.index <- mean(index)
med.index <- stats::median(index)
sd.index <- stats::sd(index)
probs <- c(seq(0,1,.05),round((1-ci_prob)/2,3),round(1-(1-ci_prob)/2,3))
quant.index <- stats::quantile(index, probs, names=TRUE) # get the quantiles
# Generate the density plot and give the relevant statistics as well
pdf(file=paste(file_prefix,"-target.pdf",sep=""))
temp<- stats::density(index)
plot(temp,
main=paste("Posterior distribution of time needed to reach U=",targetU),
xlab="Stratum")
text(min(index),max(temp$y),
label=paste("Mean : ", round(mean.index,1),
"; SD: ", round(sd.index,1),
"; ",round(100*ci_prob,0),"% CI: ",
round(stats::quantile(index,prob=(1-ci_prob)/2),1),
round(stats::quantile(index,prob=1-(1-ci_prob)/2),1)),pos=4)
dev.off()
return( list(targetU=targetU, mean=mean.index, median=med.index, sd=sd.index, quantiles=quant.index, index=index))
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/TimeToTargetRunSize.r
|
## 2019-04-24 CJS if sdmuTT=0 in genInitsTTnp gives infinite value for taumuTT. So i set this to a minimum of .01
## if Delta.max=1, then deal with droping row dimensions in np routines.
## 2014-09-01 CJS bug in init.muLogTT which gives log(0) if a release has all recoveries only in initial strataum of
# of release. In those cases, I set the mean to those values that are not infinite
## 2013-12-18 CJS Any init.epsilon that correspond to logitP.fixed (typically to -10 or 0 on the p scale) must be set to NA
## 2013-09-04 CJS if any init.epsilon are NA, then set it to the mean of the non-missing values.
## If any of n1, m2, u2 are missing set to average (but m2 <= n1). This tries to keep
## JAGS from wandering off too far and generating nonsense values.
## 2012-02-01 CJS added na.rm=TRUE in computation of pScale to avoid passing NA
## 2011-05-15 CJS limited the etaU=log(U) to a maximum of 15 which corresponds to around 400,000,000 fish.
## 2011-05-09 CJS subtle bug with initial values of epsilon where if fixed values for logitP at the end of the
## experiment, then the initial values for epsilon must be truncated
## Function to generate initial values for each chain of the MCMC algorithm.
##
## Every model requires different initial values, though much of the
## code can be reused.
#' @keywords internal
#' @importFrom stats lm median pnorm rnorm sd var
#' @inheritParams TimeStratPetersenNonDiagErrorNP_fit
genInitsTTln <-
function(n1,m2,u2){
## Generate initial parameters for log-normal travel time model
Nstrata.rel <- length(n1)
Nstrata.cap <- length(u2)
m2dot1 <- apply(m2[,1:Nstrata.cap],1,sum, na.rm=TRUE)
init.muLogTT <- rep(NA,Nstrata.rel)
tmp1 <- (m2dot1>0)
init.muLogTT[tmp1] <- log((m2[tmp1,1:Nstrata.cap] %*% 1:Nstrata.cap)/(m2dot1[tmp1]) - (1:Nstrata.rel)[tmp1])
init.muLogTT[tmp1] <- log(pmax(1, (m2[tmp1,1:Nstrata.cap] %*% 1:Nstrata.cap)/(m2dot1[tmp1]) - (1:Nstrata.rel)[tmp1])) # 2014-09-01 added the pmax(1, xxx) to avoid taking log of zero
init.muLogTT[!tmp1] <- mean(init.muLogTT[tmp1])
init.xiMu <- mean(init.muLogTT)
init.tauMu <- 1/max(0.2,stats::var(init.muLogTT)) # avoid variances that are zero
init.etasdLogTT <- log(rep(.5,Nstrata.rel)) # note that log (sd(log travel time)) is being modelled
init.xiSd <- mean(init.etasdLogTT)
init.tauSd <- 1
return(list(muLogTT=init.muLogTT,
xiMu=init.xiMu,
tauMu=init.tauMu,
xiSd=init.xiSd,
tauSd=init.tauSd,
etasdLogTT=init.etasdLogTT))
}
genInitsTTnp <- function(n1,m2,u2,Delta.max){
## Generate initial parameters for non-parametric travel time model
Nstrata.rel <- length(n1)
Nstrata.cap <- length(u2)
## Compute empirical theta matrix
init.Theta <- t(sapply(1:Nstrata.rel,function(i){
if(all(is.na(m2[i,])) || sum(m2[i,])==0)
return(rep(NA,Delta.max+1))
else{
thetatmp <- pmax(.01,
pmin(m2[i,-(Delta.max+2)]/sum(m2[i,-(Delta.max+2)],na.rm=TRUE),
.99,na.rm=TRUE)) # CJS 2011-02-16
return(thetatmp/sum(thetatmp))
}
}))
## Compute initial r
#init.delta <- t(apply(as.matrix(init.Theta[,-(Delta.max+1),drop=FALSE]),1, # CJS 2011-02-16 as.matrix added
# function(theta){ # CJS fixed -(Delta.max+1)
# if(length(theta) == 1){theta}
# else {theta/(1-c(0,cumsum(theta[-Delta.max])))}
# }))
init.delta <- as.matrix(apply(init.Theta[,-(Delta.max+1),drop=FALSE],1, # CJS 2019-04-24 dealing with delta.max=1
function(theta){ # CJS fixed -(Delta.max+1)
if(length(theta) == 1){theta}
else {theta/(1-c(0,cumsum(theta[-Delta.max])))}
}))
if(nrow(init.delta)==Delta.max){init.delta <- t(init.delta)}
init.r <- log(init.delta)
## mean and standard deviation of transition probabilties
init.muTT <- apply(logit(init.delta),2,mean,na.rm=TRUE)
init.sdTT <- max(.01,stats::sd(as.vector(t(logit(init.delta)))-init.muTT,na.rm=TRUE))
#browser()
return(list(muTT =init.muTT,
tauTT =1/init.sdTT^2,
r =init.r,
Theta =init.Theta))
}
genInitValsChain <- function(
model,
n1, # Individuals marked per strata at first location
m2, # Individuals recovered at second location
u2, # (List of) unmarked individuals captured per strata with a single spline
Delta.max=NULL, # Max travel time for NP model
logitP.cov, # Covariate matrix for capture probabilities
logitP.fixed=NULL, # Which logitP are fixed (typically to zero)?
SplineDesign, # (List of) design matrix(ces) for splines
hatch.after=NULL, # Data of release for hatchery fish in model with two splines
pScale=1){
#cat("\n** genInitValsChain \n")
#browser()
## Generate initial values for a single chain
Nstrata.rel <- length(n1)
Nstrata.cap <- length(u2)
inits <- list() # Create empty list of initial values
## 1) Travel time parameters (for non-diagonal models only)
if(model %in% "TSPNDE"){
inits <- append(inits,genInitsTTln(n1,m2,u2))
}
if(model %in% "TSPNDENP"){
inits <- append(inits,genInitsTTnp(n1,m2,u2,Delta.max))
}
## 2) Capture probabilities
## 2.1) Compute initial logit capture probabilities
if(model %in% c("TSPDE","TSPDE-WHchinook","TSPDE-WHsteel")){
init.P <- (m2+1)/(n1+2) * pScale
}
if(model %in% c("TSPNDE")){
## Compute expected number of marked fish in each cell
Theta <- t(sapply(1:Nstrata.rel,function(i){
tmp <- stats::pnorm(log(i:Nstrata.cap),inits$muLogTT[i],exp(inits$etasdLogTT[i]))
c(rep(0,(i-1)),tmp - c(0,tmp[-(Nstrata.cap - (i-1))]))
}))
M <- Theta * n1
m2dot2 <- apply(m2[,1:Nstrata.cap],2,sum, na.rm=TRUE)
init.P <- (m2dot2 + 1)/(apply(M,2,sum,na.rm=TRUE) + m2dot2 + 1) * pScale
}
if(model %in% c("TSPNDENP")){
## Compute expected number of marked fish in each cell
N2 <- lapply(1:Nstrata.rel,function(i) inits$Theta[i,]*n1[i])
## Compute expected number of marked fish in each capture strata
n2 <- sapply(1:Nstrata.cap,function(i){
n2tmp <- 0
for(j in max(i-Delta.max,1):min(i,Nstrata.rel))
n2tmp <- N2[[j]][i-j+1] + n2tmp
n2tmp
})
m2dot2 <- sapply(1:Nstrata.cap,function(i){
m2tmp <- 0
for(j in max(i-Delta.max,1):min(i,Nstrata.rel))
m2tmp <- m2[j,i-j+1] + m2tmp
m2tmp
})
init.P <- (m2dot2 + 1)/(n2 + m2dot2 + 1) * pScale
}
init.P <- pmax(.00001,pmin(init.P,.99999)) # constrain p to the interval (.00001, .99999)
init.P[is.na(init.P)] <- mean(init.P,na.rm=TRUE) # remove missing values
init.P[!is.na(logitP.fixed)] <- NA # remove fixed values from initial vector
init.logitP <- logit(init.P) # Compute the logit
## 2.2) Compute associated coefficients for design matrix
#browser()
init.beta.logitP <- as.vector(stats::lm(init.logitP ~ logitP.cov - 1)$coeff)
## 2.3) Set variance for hierarchical model of capture probabilities
if(length(init.beta.logitP)==1)
init.tauP <- 1/stats::var(init.logitP - logitP.cov*init.beta.logitP,na.rm=TRUE)
else
init.tauP <- 1/stats::var(init.logitP - logitP.cov %*% init.beta.logitP,na.rm=TRUE)
init.beta.logitP <- c(init.beta.logitP, 0) # add one extra element so that single beta is still written as a vector
init.beta.logitP[is.na(init.beta.logitP)] <- 0
inits <- append(inits,list(beta.logitP=init.beta.logitP,tauP=as.numeric(init.tauP)))
## 3) Numbers of unmarked individuals per strata (where u2 observed)
## Option 1: Models with one spline
if(model %in% c("TSPDE","TSPNDE","TSPNDENP")){
init.U <- ceiling((u2+1)/init.P)
}
## Option 2: Chinook model with separate splines for wild and hatchery fish
if(model %in% c("TSPDE-WHchinook")){
init.U.W <- ceiling((u2$W+1)/init.P)
init.U.H <- ceiling((u2$H+1)/init.P)
init.U.H[1:hatch.after] <- 0 # no hatchery fish prior to release from hatchery
}
## Option 3: Steelhead model with separate splines for wild, wild YOY, and hatchery fish
if(model %in% c("TSPDE-WHsteel")){
init.U.W.YoY <- ceiling((u2$W.YoY+1)/init.P)
init.U.W.1 <- ceiling((u2$W.1+1)/init.P)
init.U.H.1 <- ceiling((u2$H.1+1)/init.P)
init.U.H.1[1:hatch.after] <- 0 # no hatchery fish prior to release from hatchery
}
## 4) Spline coefficients
## Option 1: Models with one spline
if(model %in% c("TSPDE","TSPNDE","TSPNDENP")){
## 4.1) Fit Spline to strata with u2 observed
tmp1 <- !is.na(init.U)
init.bU <- stats::lm(log(init.U[tmp1]) ~ SplineDesign[tmp1,]-1)$coeff
init.bU[is.na(init.bU)] <- mean(init.bU,na.rm=TRUE) # Fix any coefficients that can't be computed
## 4.2) Compute variance of second differences between coefficients
tmp2 <- 3:length(init.bU)
sigmaU <- stats::sd(init.bU[tmp2]-2*init.bU[tmp2-1]+init.bU[tmp2-2])
init.tauU <- 1/sigmaU^2
inits <- append(inits,list(bU=init.bU,tauU=init.tauU))
}
## Option 2: Chinook model with separate splines for wild and hatchery fish
if(model %in% c("TSPDE-WHchinook")){
## 4.1.a) Fit spline to wild fish
tmp1.W <- !is.na(init.U.W)
init.bU.W <- stats::lm(log(init.U.W[tmp1.W]) ~ SplineDesign$W[tmp1.W,]-1)$coeff
init.bU.W[is.na(init.bU.W)] <- mean(init.bU.W,na.rm=TRUE) # Fix any coefficients that can't be computed
## 4.1.b) Fit spline to hatchery fish
tmp1.H <- c(rep(FALSE,hatch.after),!is.na(init.U.H[-(1:hatch.after)]))
init.bU.H <- stats::lm(log(init.U.H[tmp1.H]) ~ SplineDesign$H[tmp1.H,]-1)$coeff
init.bU.H[is.na(init.bU.H)] <- mean(init.bU.H,na.rm=TRUE) # Fix any coefficients that can't be c
## 4.2) Variance of second differences between coefficients (use only wild fish to initialize)
tmp2 <- 3:length(init.bU.W)
sigmaU <- stats::sd(init.bU.W[tmp2]-2*init.bU.W[tmp2-1]+init.bU.W[tmp2-2])
init.tauU <- 1/sigmaU^2
inits <- append(inits,list(bU.W=init.bU.W,bU.H=init.bU.H,tauU=init.tauU))
}
## Option 3: Steelhead model with separate splines for wild and hatchery fish
if(model %in% c("TSPDE-WHsteel")){
## 4.1.a) Fit spline to wild YoY fish
tmp1.W.YoY <- !is.na(init.U.W.YoY)
init.bU.W.YoY <- stats::lm(log(init.U.W.YoY[tmp1.W.YoY]) ~ SplineDesign$W.YoY[tmp1.W.YoY,]-1)$coeff
init.bU.W.YoY[is.na(init.bU.W.YoY)] <- mean(init.bU.W.YoY,na.rm=TRUE) # Fix any coefficients that can't be computed
## 4.1.b) Fit spline to wild 1 fish
tmp1.W.1 <- !is.na(init.U.W.1)
init.bU.W.1 <- stats::lm(log(init.U.W.1[tmp1.W.1]) ~ SplineDesign$W.1[tmp1.W.1,]-1)$coeff
init.bU.W.1[is.na(init.bU.W.1)] <- mean(init.bU.W.1,na.rm=TRUE) # Fix any coefficients that can't be computed
## 4.1.c) Fit spline to hatchery fish
tmp1.H.1 <- c(rep(FALSE,hatch.after),!is.na(init.U.H.1[-(1:hatch.after)]))
init.bU.H.1 <- stats::lm(log(init.U.H.1[tmp1.H.1]) ~ SplineDesign$H.1[tmp1.H.1,]-1)$coeff
init.bU.H.1[is.na(init.bU.H.1)] <- mean(init.bU.H.1,na.rm=TRUE) # Fix any coefficients that can't be c
## 4.2) Variance of second differences between coefficients (use only wild YoY fish to initialize)
tmp2 <- 3:length(init.bU.W.YoY)
sigmaU <- stats::sd(init.bU.W.YoY[tmp2]-2*init.bU.W.YoY[tmp2-1]+init.bU.W.YoY[tmp2-2])
init.tauU <- 1/sigmaU^2
inits <- append(inits,list(bU.W.YoY=init.bU.W.YoY,bU.W.1=init.bU.W.1,
bU.H.1=init.bU.H.1,tauU=init.tauU))
}
## 5) Variance about spline
## Option 1: Models with one spline
if(model %in% c("TSPDE","TSPNDE","TSPNDENP")){
sigmaeU <- stats::sd(log(init.U+1) - SplineDesign %*% init.bU,na.rm=TRUE)
init.taueU <- 1/sigmaeU^2
}
## Option 2: Chinook models with two splines -- use only wild fish to initialize
if(model %in% c("TSPDE-WHchinook")){
sigmaeU <- stats::sd(log(init.U.W+1) - SplineDesign$W %*% init.bU.W,na.rm=TRUE)
init.taueU <- 1/sigmaeU^2
}
## Option 3: Steelhead models with three splines -- use only wild fish to initialize
if(model %in% c("TSPDE-WHsteel")){
sigmaeU <- stats::sd(log(init.U.W.YoY+1) - SplineDesign$W.YoY %*% init.bU.W.YoY,na.rm=TRUE)
init.taueU <- 1/sigmaeU^2
}
inits <- append(inits,list(taueU=init.taueU))
## 6) Initialize missing U values by fitting spline and generating errors
## Option 1: Models with only 1 spline
if(model %in% c("TSPDE","TSPNDE","TSPNDENP")){
if(sum(!tmp1)>0)
init.U[!tmp1] <- ceiling(exp(as.vector(SplineDesign[!tmp1,] %*% init.bU)
+ stats::rnorm(sum(!tmp1),0,sigmaeU))) + 1
init.etaU <- pmin(log(init.U),20) # limit the initial values to reasonable values
inits <- append(inits,list(etaU=init.etaU))
}
## Option 2: Chinook models with two splines
if(model %in% c("TSPDE-WHchinook")){
## Wild fish
if(sum(!tmp1.W)>0)
init.U[!tmp1.W] <- ceiling(exp(as.vector(SplineDesign$W[!tmp1.W,] %*% init.bU.W)
+ stats::rnorm(sum(!tmp1.W),0,sigmaeU))) + 1
init.etaU.W <- pmin(log(init.U.W), 15) # limit the initial values to reasonable values
## Hatchery fish
tmp2.H <- tmp1.H[-(1:hatch.after)]
if(sum(!tmp2.H)>0)
init.U[!tmp2.H] <- ceiling(exp(as.vector(SplineDesign$H[!tmp2.H,] %*% init.bU.H)
+ stats::rnorm(sum(!tmp2.H),0,sigmaeU))) + 1
init.etaU.H <- c(rep(NA,hatch.after),pmin(20,log(init.U.H[tmp1.H])))
inits <- append(inits,list(etaU.W=init.etaU.W,etaU.H=init.etaU.H))
}
## Option 3: Steelhead models with three splines
if(model %in% c("TSPDE-WHsteel")){
## Wild YoY fish
if(sum(!tmp1.W.YoY)>0)
init.U[!tmp1.W.YoY] <- ceiling(exp(as.vector(SplineDesign$W.YoY[!tmp1.W.YoY,] %*% init.bU.W.YoY)
+ stats::rnorm(sum(!tmp1.W.YoY),0,sigmaeU))) + 1
init.etaU.W.YoY <- pmin(20,log(init.U.W.YoY))
## Wild 1 fish
if(sum(!tmp1.W.1)>0)
init.U[!tmp1.W.1] <- ceiling(exp(as.vector(SplineDesign$W.1[!tmp1.W.1,] %*% init.bU.W.1)
+ stats::rnorm(sum(!tmp1.W.1),0,sigmaeU))) + 1
init.etaU.W.1 <- pmin(20,log(init.U.W.1))
## Hatchery 1 fish
tmp2.H.1 <- tmp1.H.1[-(1:hatch.after)]
if(sum(!tmp2.H.1)>0)
init.U[!tmp2.H.1] <- ceiling(exp(as.vector(SplineDesign$H.1[!tmp2.H.1,] %*% init.bU.H.1)
+ stats::rnorm(sum(!tmp2.H.1),0,sigmaeU))) + 1
init.etaU.H.1 <- c(rep(NA,hatch.after),pmin(20,log(init.U.H.1[tmp1.H.1])))
inits <- append(inits,list(etaU.W.YoY=init.etaU.W.YoY,
etaU.W.1=init.etaU.W.1,etaU.H.1=init.etaU.H.1))
}
## 7) Transform initial values for logitP to initial values for epsilon
# If some of the logitP are fixed, you need to set the corresponding value of epsilon to NA
# This is done at the end of these possible model choices.
## Option 1: Models with only one spline
#cat("GenInitVals \n")
#browser()
if(model %in% c("TSPNDE")){
#cat("GenInitVals - setting epsilon: ", model, "\n")
#browser()
init.epsilon <- init.logitP - log(u2 + 1) + inits$etaU
# subtle problem. If the logitP.fixed include elements at the end of the experiment
# then init.epsion needs to be truncated at the end, otherwise JAGS gets upset because these terms never define
# see the TSPND NP routine for a fix in the jags code that should fix this.
#if(length(logitP.fixed)>0){
# for(i in length(logitP.fixed):1){
# if( is.na(logitP.fixed[i])){break}
# init.epsilon <- init.epsilon[-length(init.epsilon)] # drop last term
# }
#}
}
if(model %in% c("TSPDE")){
#cat("GenInitVals - setting epsilon: ", model, "\n")
#browser()
init.epsilon <- init.logitP - log(u2 + 1) + inits$etaU
}
#browser()
if(model %in% c("TSPNDENP")){
#cat("GenInitVals - setting epsilon: ", model, "\n")
#browser()
init.epsilon <- init.logitP - log(u2 + 1) + inits$etaU
# need to add epsilon for extra because of delta-max
init.epsilon <- c(init.epsilon, rep(NA, length(n1)+ncol(m2)-length(u2)-1 )) # extra logits at end for delta max
}
## Option 2: Chinook models with two splines -- use only wild fish to initialize
if(model %in% c("TSPDE-WHchinook")){
init.epsilon <- init.logitP - log(u2$W + 1) + inits$etaU.W
}
## Option 3: Steelhead models with three splines -- use only wild fish to initialize
if(model %in% c("TSPDE-WHsteel")){
init.epsilon <- init.logitP - log(u2$W.YoY + 1) + inits$etaU.W.YoY
}
## Change any missing epsilon values to the mean of the epsilon unless these were from
## logitP values that were fixed. In those cases, the epsilon must remain as missing
init.epsilon[is.na(init.epsilon)] <- mean(init.epsilon, na.rm=TRUE) ## CJS 2013-09-04
init.epsilon[!is.na(logitP.fixed)] <- NA ## CJS 2013-12-17 (if logitP is fixed, don't initialize epsilon
if(model %in% c("TSPNDENP")){
if( length(n1)+ncol(m2)-length(u2)-1 >0 ){ # extra epsilons added to end which need to be set to na
init.epsilon[ (length(u2)+1):(length(n1)+ncol(m2)-1)]<- NA
}
}
inits <- append(inits,list(epsilon=c(init.epsilon)))
## Remove working objects from the initial values
if(model=="TSPNDENP"){
inits$Theta <- NULL
}
##9. Generate initial values for missing n1, m2, or u2 as the average of the other values (rounded to integers)
if(model %in% c("TSPDE","TSPDE-WHchinook","TSPNDE","TSPNDENP","TSPDE-WHsteel")){
init.n1 <- rep(NA, length(n1))
init.n1[is.na(n1)] <- round(mean(n1, na.rm=TRUE))
if(any(is.na(n1))){inits <- append(inits, list(n1=init.n1))}
}
if(model %in% c("TSPDE","TSPDE-WHchinook","TSPDE-WHsteel")){
init.m2 <- rep(NA, length(m2))
init.m2[is.na(m2)] <- round(pmin(n1[is.na(m2)],mean(m2, na.rm=TRUE)))
if(any(is.na(m2))){inits <- append(inits, list(m2=init.m2))}
}
if(model %in% c("TSPNDE","TSPNDENP")){
# not sure how to initialize bad m2 values for the non-diagonal case
# because m2 is a full matrix with elements arranges diagonally
}
if(model %in% c("TSPDE", "TSPNDE","TSPNDENP")){
init.u2 <- rep(NA, length(u2))
init.u2[is.na(u2)] <- round(mean(u2, na.rm=TRUE))
if(any(is.na(u2))){inits <- append(inits, list(u2=init.u2))}
}
if(model %in% c("TSPDE-WHchinook")){ # u2 is a list with components W and H, A and N
init.u2.A <- rep(NA, length(u2$A))
init.u2.A[is.na(u2$A)] <- pmin(init.U.H[is.na(u2$A)], round(stats::median(u2$A, na.rm=TRUE)))
if(any(is.na(u2$A))){inits <- append(inits, list(u2.A=init.u2.A))}
init.u2.N <- rep(NA, length(u2$N))
init.u2.N[is.na(u2$N)] <- pmin(init.U.W[is.na(u2$N)],round(stats::median(u2$N, na.rm=TRUE))) # This is too strict as some hatchery have no clips
if(any(is.na(u2$N))){inits <- append(inits, list(u2.N=init.u2.N))}
}
if(model %in% c("TSPDE-WHsteel")){ # u2 is a list with components W.YoY, W.1, H.1
init.u2.W.1 <- rep(NA, length(u2$W.1))
init.u2.W.1[is.na(u2$W.1)] <- pmin(init.U.W.1[is.na(u2$W.1)], round(stats::median(u2$W.1, na.rm=TRUE)))
if(any(is.na(u2$W.1))){inits <- append(inits, list(u2.W.1=init.u2.W.1))}
init.u2.W.YoY <- rep(NA, length(u2$W.YoY))
init.u2.W.YoY[is.na(u2$W.YoY)] <- pmin(init.U.W.YoY[is.na(u2$W.YoY)], round(stats::median(u2$W.YoY, na.rm=TRUE)))
if(any(is.na(u2$W.YoY))){inits <- append(inits, list(u2.W.YoY=init.u2.W.YoY))}
init.u2.H.1 <- rep(NA, length(u2$H.1))
init.u2.H.1[is.na(u2$H.1)] <- pmin(init.U.H.1[is.na(u2$H.1)], round(stats::median(u2$H.1, na.rm=TRUE)))
if(any(is.na(u2$H.1))){inits <- append(inits, list(u2.H.1=init.u2.H.1))}
}
return(inits)
}
genInitVals <-function(
model,
n1, # Individuals marked per strata at first location
m2, # Individuals recovered at second location
u2=NULL, # (List of) unmarked individuals captured
Delta.max=NULL, # Max travel time for NP model
logitP.cov, # Covariate matrix for capture probabilities
logitP.fixed=NULL, # Which values of logitP are fixed (typically at zero)?
SplineDesign, # (List of) desgin matrices for spline for models
hatch.after=NULL, # Data of release for hatchery fish in model with two splines
n.chains=3,
pStep=5){ # Relative change in p between chains
## Determine maximum scaling factor in order to avoid tauP=Inf
pScaleMax <- 1/(1.1*sum(m2,na.rm=TRUE)/sum(n1,na.rm=TRUE))
## Generate initial values for n.chains
inits <- lapply(1:n.chains,function(i){
## Compute scaling factor for ith chain
#cat("\n*** gen init values ***\n")
#browser()
pScale <- min(pStep ^(-(n.chains-1)/2 + (i-1)),pScaleMax,1,na.rm=TRUE)
## Generate initial values
genInitValsChain(model,
n1,m2,u2,Delta.max,
logitP.cov,logitP.fixed,
SplineDesign,hatch.after,
pScale)
})
return(inits)
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/initial_values.R
|
#' Logit and anti-logit function.
#'
#' Compute the logit or anti-logit.
#'
#'
#' @aliases logit expit
#' @param p probability between 0 and 1.
#' @param theta logit between -infinity and +infinity
#' @return Computed logit or anti-logit
#' @author C.J.Schwarz \email{cschwarz@@stat.sfu.ca}
#' @keywords ~misc
#' @examples
#'
#' ##---- compute the logit and its inverse
#' logitp <- logit(.3)
#' p <- expit(-.84)
#'
#' @export logit expit
#'
logit <- function(p){
# logit of p
log(p/(1-p))
}
#' @rdname logit
expit <- function(theta){
# anti logit function
1/(1+exp(-theta))
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/logit.R
|
#' Create an acf plot of a parameter
#'
#' @param mcmc.sample Data frame with 2 columns. parm, and sample. A separate ACF plot is generated for each parameter using facet_wrap.
#' @param ncol Number of columns in the plot.
#'
#' @return acf plot(s) as an ggplot2 object
#' @keywords internal
#' @import plyr ggplot2
#' @importFrom stats acf
#'
plot_acf <- function(mcmc.sample, ncol=2){
acf.parm <- plyr::ddply(mcmc.sample, "parm", function(x){
acf.list <- stats::acf(x$sample, plot=FALSE)
data.frame(lag=acf.list$lag, acf=acf.list$acf, stringsAsFactors=FALSE)
})
acfplot <- ggplot(data=acf.parm, aes_(x =~lag, y =~acf)) +
ggtitle("Autocorrelation")+
geom_hline(aes(yintercept = 0)) +
geom_segment(aes_(xend =~lag, yend = 0))+
facet_wrap(~parm, ncol=ncol)
acfplot
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/plot_acf.R
|
# 2021-10-24 CJS Truncate the logitP to avoid problems with plotting
# 2014-09-01 CJS First edition of this function
# Take the input values and create a ggplot object for the logitP's with the credible intervals plotted
# Input are the usual data values along with the MCMC results
#' @keywords internal
#' @import ggplot2 plyr
plot_logitP <- function(title, time, n1, m2, u2, logitP.cov, results, trunc.logitP=15){
# Plot the observed and fitted logit(p) values along with posterior limits
# n1, m2, u2 are the raw data
# logitP.cov is the covariate matrix for modelling the logit(P)'s
# results is the summary table from JAGS
#
Nstrata.rel <- length(n1)
Nstrata.cap <- length(u2)
# which rows of the result summary contain the logitP[xx] ?
results.row.names <- rownames(results$summary)
logitP.row.index <- grep("^logitP", results.row.names)
logitP.res<- as.data.frame(results$summary[logitP.row.index,]) # summary statistics
# We need to extract the time index from the row.names
logitP.res$time.index <- aaply(rownames(logitP.res), 1, function(x){
# extract the time index from logitP[xx]
temp <- unlist(strsplit(x, "[", fixed=TRUE))[2]
temp <- unlist(strsplit(temp, "]", fixed=TRUE))[1]
temp <- as.numeric(temp)
temp
})
# Only retain entries in the range of 1... length(u2)
logitP.res <- logitP.res[logitP.res$time.index <= Nstrata.cap,]
logitP.res$time <- time[logitP.res$time.index]
# Set up the bottom axis title
xtitle <- paste("Time\nHorizontal line is estimated beta.logitP[1]",
"\nInner fence is c.i. on beta.logitP[1]",
"\nOuter fence is 95% range on logit(p)")
if(ncol(as.matrix(logitP.cov))>1){
xtitle<-paste(xtitle,"\nDashed line is second covariate")}
# Extract the upper and lower ci
logitP.res$lcl <- logitP.res[, "2.5%"]
logitP.res$ucl <- logitP.res[,"97.5%"]
# apply limits to the points for plotting purposes
logitP.res$lcl <- pmax(-trunc.logitP, pmin(trunc.logitP, logitP.res$lcl))
logitP.res$ucl <- pmax(-trunc.logitP, pmin(trunc.logitP, logitP.res$ucl))
logitP.res$mean <- pmax(-trunc.logitP, pmin(trunc.logitP, logitP.res$mean))
#browser()
myplot <- ggplot(data=logitP.res, aes(x=time, y=mean))+
ggtitle( paste(title,"\nPlot of logit(p[i]) with 95% credible intervals"))+
xlab(xtitle)+ylab("logit(p) + 95% credible interval")+
geom_point(size=3)+
geom_line()+
geom_errorbar(aes(ymin=lcl, ymax=ucl), width=.1)+
scale_x_continuous(breaks=min(logitP.res$time):max(logitP.res$time))+
scale_y_continuous(sec.axis = sec_axis(~ 1/(1+exp(-.)), name="p + 95% credible interval"))
# If this is a non-diagonal case, also plot the raw logits
if(!is.matrix(m2)){
raw_logitP <- pmax(-trunc.logitP, pmin(trunc.logitP,logit((m2+1)/(n1+2))))
myplot <- myplot + annotate("point", x=time, y=raw_logitP, shape=1)
} # based on raw data
# plot the posterior mean of the logitP if there is only one column for a covariate
if(ncol(as.matrix(logitP.cov))==1){ # if only 1 column for covariate vector, usually an intercept
# plot the posterior mean of the beta.logitP[1] term which is usually
# the intercept in most models with covariates along with 95% credible interval
intercept.row.index <- grep("beta.logitP[1]", results.row.names, fixed=TRUE)
intercept <- results$summary[intercept.row.index,]
mean<- pmax(-trunc.logitP, pmin(trunc.logitP, intercept["mean"] ))
lcl <- pmax(-trunc.logitP, pmin(trunc.logitP, intercept["2.5%"] ))
ucl <- pmax(-trunc.logitP, pmin(trunc.logitP, intercept["97.5%"]))
myplot <- myplot +
geom_hline(yintercept=mean)+
geom_hline(yintercept=lcl, linetype=2)+
geom_hline(yintercept=ucl, linetype=2)
# plot the posterior "95% range" for the logit(P)'s based on N(xip, sigmaP^2)
sigmaP.row.index <- grep("sigmaP", results.row.names)
sigmaP <- results$summary[sigmaP.row.index,]
lcl <- pmax(-trunc.logitP, pmin(trunc.logitP,intercept["mean"]-2*sigmaP["mean"]))
ucl <- pmax(-trunc.logitP, pmin(trunc.logitP,intercept["mean"]+2*sigmaP["mean"]))
myplot <- myplot +
geom_hline(yintercept=lcl, linetype=3)+
geom_hline(yintercept=ucl, linetype=3)
}
# plot residuals of the logit(P)'s against the various covariates
# to be done in my next life
return(myplot)
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/plot_logit.R
|
#' Create posterior plot of a parameter with credible interval limits shown as vertical lines
#'
#' @param mcmc.sample Data frame with 2 columns. parm, and sample. A separate posterior plot is generated for each parameter.
#' @param alpha Used to determine credible interval.
#' @param ncol Number of columns in the plot (default=1).
#' @return Posterior plot(s) as an ggplot2 object
#' @keywords internal
#' @import plyr ggplot2 scales
#' @importFrom stats quantile sd
#'
#'
# R CMD check gets upset with ggplot because it thinks there are no visible bindings for quant etc.
# see https://stackoverflow.com/questions/9439256/how-can-i-handle-r-cmd-check-no-visible-binding-for-global-variable-notes-when
# for how to get around this
plot_posterior <- function(mcmc.sample, alpha=0.05, ncol=1){
qparm <- plyr::ddply(mcmc.sample, "parm", function(x){
quants<- stats::quantile(x$sample, probs=c(alpha/2, 1-alpha/2))
data.frame(quants=quants, stringsAsFactors=FALSE)
})
post_stat <- plyr::ddply(mcmc.sample, "parm", plyr::summarize,
post.mean=signif( mean(sample),5),
post.sd =signif(stats::sd (sample),5))
postplot <- ggplot(data=mcmc.sample, aes_(x = ~sample, y =~ ..density..)) +
ggtitle(paste("Posterior plots with ", formatC(100*(1-alpha), format="f", digits=0),"% credible intervals",sep=""))+
geom_vline(data=qparm, aes_(xintercept =~ quants)) +
geom_density()+
facet_wrap(~parm, ncol=ncol,scales="free")+xlab("Value of parameter")+
geom_text(data=post_stat, aes_(x=Inf, y=Inf, label=~paste("Posterior mean: ",post.mean,"\nPosterior sd : ",post.sd,sep=""),
vjust=1, hjust=1))+
scale_x_continuous(labels=scales::comma)
postplot
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/plot_posterior.R
|
# 2020-08-07 CJS ggforce::facet_wrap_paginate() has a bug where it fails if the last page has only a few plots
# we added some dummy parameters to the last page (zzz_dummy1, etc)
#' Creates trace plots of specified parameters showing the multiple chains and
#' the value of Rhat
#'
#' Takes the MCMC object returned from a split and produces trace_plots for the
#' listed parameters. It shows a separate line on the plot for each chain and
#' also shows the value of Rhat
#'
#'
#' @template title
#' @param results The MCMC object containing the results from the call to JAGS
#' @param parms_to_plot A character vector of names of parameters to plot.
#' These must match exactly to the parameter names used in the simulation.
#' @param ncol,nrow How many plots to put on a page (number of rows and columns)
#' @return List of ggplot2 objects using facet_wrap_paginate (...., page=...) with each element of the list
#' corresponding to one page of the plot.
#' @template author
#' @import ggforce reshape2 ggplot2 plyr
#' @keywords internal
#' @examples
#' \dontrun{
#' # Create trace plots of the logitP parameters
#' #
#' # Trace plots of logitP
#' varnames <- names(results$sims.array[1,1,]) # extract the names of the variables
#' trace.plot <- plot_trace(title=title,
#' results=results,
#' parms_to_plot=varnames[grep("^logitP", varnames)])
#' if(save.output.to.files){
#' pdf(file=paste(prefix,"-trace-logitP.pdf",sep=""))
#' plyr::l_ply(trace.plot, function(x){plot(x)})
#' dev.off()
#'}
#' } # end of dontrun
#'
plot_trace <- function(title=" ", results=NULL, parms_to_plot=NULL, nrow=2, ncol=2){
#
# Takes the MCMC object from the fit (could be TPSDE etc), a list of parameters and produces
# the traceplots.
#
# title - title of the plot
# results - the MCMC object containing the necessary information
# parms_to_plot - character vector containing the names of the parms to plot
# e.g. c("logitP[1]", "logitP[2]")
# - this should be an exact match#
varnames <- colnames(results$sims.matrix)
index <- match(parms_to_plot, varnames) # find where these parms exist in the array
trace.df <- reshape2::melt(results$sims.array[,,varnames[index]],
varnames=c("Simulation","Chain","Parameter"),
value.name="Value")
npages <- ceiling(length(index)/ncol/nrow)
# we need to add "extra" panels to deal with facet_wrap_paginate() error when
# last page has only a single plot
#browser()
if(npages*nrow*ncol > length(unique(trace.df$Parameter))){
trace.df <- plyr::rbind.fill(trace.df,
data.frame(Parameter=paste0("zzz_dummy",1:(npages*nrow*ncol - length(unique(trace.df$Parameter)))),
Simulation=1,
Value=0,
Chain=1, stringsAsFactors=TRUE # be sure that ordering is kept of parameter names
)
)
}
allplots <-plyr::llply(1:npages, function(page){
ggplot(data=trace.df, aes_(x=~Simulation, y=~Value, color=~as.factor(Chain)))+
ggtitle(title)+
geom_line()+
scale_color_discrete(name="Chain")+
ggforce::facet_wrap_paginate(~Parameter, ncol=ncol, nrow=nrow, page=page, scales="free_y")
})
allplots
} # end of function
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/plot_trace.R
|
# 2011-03-10 CJS Utility functions for specifying prior for muTT
#' @import utils
#' @importFrom stats rnorm
#****************************************************************************
make.muTT.prior <- function(x){
#
# estimate the muTT prior based on a dirchelete type prior
# x are values representing belief in the travel times.
# For example, x=c(1,4,3,2) represents a system where the
# maximum travel time is 3 strata after release with
# 1/10=.1 of the animals moving in the stratum of release
# 4/10=.4 of the animals taking 1 stratum to move
# etc
#
# So if x=c(10,40,30,20), this represent the same movement pattern
# but a strong degree of belief
#
# We convert these into muTT which are the mean(on logit scale) of
# conditional movement probabilities
n <- sum(x)
p <- x/n
ndelta <-(n-cumsum(c(0,x[-length(x)])))
delta<- x/ndelta
delta<- delta[-length(delta)] # drop the last term
sd.delta <- sqrt(delta*(1-delta)/ndelta[-length(ndelta)])
mean.muTT <- logit(delta)
sd.muTT <- sd.delta/delta/(1-delta)
list(mean.muTT=mean.muTT, sd.muTT=sd.muTT)
}
#****************************************************************************
visualize.muTT.prior<- function( muTT.prior, npoints=1000, title=NULL){
#
# Visualize the range of movement rates based on the muTT.prior
# We will generate normal distribution based on mean and sd and then
# back transform to actual movement rates which are then plotted
#
# muTT.prior is a list with 2 element. mean.muTT, and sd.muTT
# browser()
p<- matrix(NA, nrow=npoints, ncol=length(muTT.prior$mean.muTT)+1)
for(i in 1:npoints){
# generate the movement probability
muTT <- stats::rnorm(length(muTT.prior$mean.muTT),
mean=muTT.prior$mean.muTT,
sd =muTT.prior$sd.muTT)
# convert these back to probabilities
delta <- expit(muTT)
Theta <- delta[1]
for(j in 2:length(delta)){
Theta <- c(Theta, delta[j]*(1-sum(Theta)))
}
Theta <- c(Theta,1-sum(Theta))
p[i,] <- Theta
}
#browser()
colnames(p) <- seq(0,ncol(p)-1,1)
temp3 <- stack(as.data.frame(p))
#temp3[1:5,]
boxplot( values ~ ind, data=temp3,
main=if(is.null(title)){'Visualization of prior on movement'} else {title},
ylab='P(movement)',
xlab='Time to move')
} # end of function
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/prior.muTT.functions.R
|
# 2018-11-27 CJS - removed call to OpenBugs. Left function here in case we want to change samplers in the future.
# 2013-12-30 CJS - function to switch between the three samplers as needed.
# This way we wont't have to modify much code (hopefully)
#' @keywords internal
run.MCMC <-
function(modelFile,
dataFile,
dataList,
initFiles,
initVals,
parameters,
nChains,
nIter,
nBurnin,
nSims,
overRelax=FALSE,
initialSeed,
working.directory,
debug=FALSE){
results <- run.jags(modelFile=modelFile,
dataFile=dataFile,
dataList=dataList,
initFiles=initFiles,
initVals=initVals,
parameters=parameters,
nChains=nChains,
nIter=nIter,
nBurnin=nBurnin,
nSims=nSims,
overRelax=overRelax,
initialSeed=initialSeed,
working.directory=working.directory,
debug=debug)
return(results)
} # end of function
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/run_MCMC.r
|
## 2018-12-21 CJS converted to simple call to jags using R2jags package. This fixed problem that arrays not stored properly in output
## 2014-09-01 CJS added code to dump out mcmc.list to coda files to match functionality of OpenBugs
## Set the seed here.
## 2013-12-31 CJS added code to dump out data and initial values to files as in run.openbugs
## 2013-12-30 CJS changed program argument in as.bugs.array to JAGS
## 2013-09-22 sjb Created file. Copied from run_openbugs.R
#' @import R2jags utils
#' @importFrom coda as.mcmc.list
#' @importFrom stats runif
#' @keywords internal
#'
run.jags <-
function(modelFile,
dataFile,
dataList,
initFiles,
initVals,
parameters,
nChains,
nIter,
nBurnin,
nSims,
overRelax,
initialSeed,
working.directory,
debug=FALSE){
cat("\n\n*** Start of call to JAGS \n")
cat("Working directory: ",working.directory,"\n")
nIterPostBurnin <- nIter - nBurnin # Number of iterations to run after burnin
nThin <- round(nIterPostBurnin/nSims) # Thinning to obtain desired number of samples
#cat("In run_jags.R\n")
#browser()
## Set seed. We need to set a separate seed for each chain.
## We start by setting the seed in R, and then generating nchain values between 1 and 10000000.
cat("Initial seed for JAGS set to:", initialSeed, "\n")
set.seed(initialSeed)
initVals <- llply(initVals, function(x){
# add to this list
x$.RNG.seed <- round(stats::runif(1, min=1, max=1000000))
x$.RNG.name <- "base::Wichmann-Hill"
cat("Random number seed for chain ", x$.RNG.seed, "\n")
x
})
# Dump out the data list (useful for debugging)
#file.remove(dataFile)
with(dataList, dump(names(dataList), file = dataFile))
#browser()
# Dump out the initial values (useful for debugging)
for (i in 1:nChains) {
#file.remove(initFiles[i])
initial.values <- initVals[[i]]
with(initial.values, dump(names(initial.values), file = initFiles[i]))
}
#browser()
parametersToSave <- unique(c(parameters))
results1 <- R2jags::jags(
data =dataList, # list of data variables
inits =initVals, # list/function for initial values
parameters =parametersToSave,# list of parameters to monitor
model.file =modelFile, # where the model is saved by the cat above
n.chains =nChains,
n.iter =nIter, # total iterations INCLUDING burn in
n.burnin =nBurnin, # number of burning iterations
n.thin =nThin, # how much to thin
DIC=TRUE, # is DIC to be computed?
jags.seed = initialSeed,
working.dir=working.directory # store results in current working directory
)
## save the sample of the posteriors as a coda file (for debugging)
## taken from http://stackoverflow.com/questions/12078152/how-can-i-convert-an-mcmc-list-to-a-bugs-object
s2 <- as.array(coda::as.mcmc.list(results1$BUGSoutput))
lapply(seq_len(dim(s2)[3]), function(i) {
write.table(cbind(rep(seq_len(nrow(s2[,,i])), ncol(s2)), c(s2[,,i])),
paste0(working.directory, '/CODAchain', i, '.txt'),
row.names=FALSE, col.names=FALSE)
})
cat(paste(colnames(s2), 1+(seq_len(ncol(s2))-1) * nrow(s2), nrow(s2)*seq_len(ncol(s2))),
sep='\n',
file=file.path(working.directory, 'codaIndex.txt'))
#browser()
results <- results1$BUGSoutput
results$model <- results1$model
results$parameters.to.save <- results1$parameters.to.save
## Return results
cat("\n\n*** Finished JAGS ***\n\n")
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/run_jags.R
|
# 2015-06-11 CJS Updated for fixed to Bayesian p-value plots
# 2013-12-30 CJS Updated for JAGS
# 2013-01-25 CJS Changed messages from .onLoad to .onAttach in accordance with R policies
# 2012-01-10 CJS Change name of .First.lib to .onLoad with changes for R2.14+
# 2011-06-01 SB remove usage of winbugs; change initial seed; speed up mixing
# 2009-12-06 CJS test if the standard winbugs/openbugs directory exists and warn the user - now removed.
# 2009-12-01 CJS added openbugs/winbugs to arguments of all functions. No need for global variables
#
#' Message to display when package is loaded
#'
#' @keywords internal
#'
.onAttach <- function(libname,pkgname){
packageStartupMessage("***** BTSPAS: Bayesian Time Stratified Petersen Analysis System - Version 2021.11.2 (2021.11.2) ***** \n\n",
" Help available with help(package='BTSPAS') \n",
' Several vignettes are available. See browseVignettes(package="BTSPAS") \n\n')
}
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/R/zzz.R
|
---
title: "Diagonal Case"
author: "Carl James Schwarz"
date: "`r Sys.Date()`"
output:
html_vignette:
toc: true # table of content true
toc_depth: 3 # upto three depths of headings (specified by #, ## and ###)
number_sections: true ## if you want number sections at each table header
#vignette: >
# %\VignetteIndexEntry{01 - Diagonal Case}
# %\VignetteEncoding{UTF-8}
# %\VignetteEngine{knitr::rmarkdown_notangle}
editor_options:
chunk_output_type: inline
---
```{r setup, include = FALSE,message=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
library(binom)
library(BTSPAS)
library(ggplot2)
library(gridExtra)
library(kableExtra)
library(knitr)
max.width=70
```
# Location of vignette source and code.
Because of the length of time needed to run the vignettes, only
static vignettes have been included with this package.
The original of the vignettes and the code can be obtained from
the GitHub site at
https://github.com/cschwarz-stat-sfu-ca/BTSPAS
# Introduction
The two-sample capture-recapture experiment is one of the simplest possible studies
with a long history. The standard Lincoln-Petersen
estimator used to estimate abundance is
$$ \widehat{N} = \frac{n_1 n_2}{m_2}$$
where $n_1$ is the number of animals captured, marked and released at the first capture event;
$n_2$ is the number of animals captured at the second capture event;
and $m_2$ is the number of animals from $n_1$ that were recaptured at the second event
(i.e. the number of recaptured marked animals).
A key assumption of the Lincoln-Petersen estimator is that there is no correlation in
capture-probabilities at the two events. The most common way in which this can occur is
if the probability of capture is equal for all animals at either the first or second
event.
If capture probabilities are heterogeneous, then estimates can be biased. One way to
account for heterogeneous capture-probabilities is through stratification. For example,
if males and females have different catchabilities, then separate Lincoln-Petersen estimators
can be computed for each sex, and the estimated abundance for the entire population
is found by summing the two estimated abundances (one for each sex).
Stratification can be based on animal attributes (e.g. sex), geographic location (e.g. parts
of a study area), or temporal (e.g. when captured). The $BTSPAS$ package deals with
temporal stratification.
# Experimental Protocol
Consider an experiment to estimate the number of outgoing smolts on a small river. The
run of smolts extends over several weeks. As smolts migrate, they are captured and marked
with individually numbered tags and released at the first capture location using, for example, a
fishwheel. The migration continues, and a second fishwheel takes a second sample several
kilometers down stream. At the second fishwheel, the captures consist of a mixture of marked
(from the first fishwheel) and unmarked fish.
The efficiency of the fishwheels varies over time in response to stream flow, run size passing
the wheel and other uncontrollable events. So it is unlikely that the capture probabilities are
equal over time at either location, i.e. are heterogeneous over time.
We suppose that we can temporally stratify the data into, for example, weeks, where the
capture-probabilities are (mostly) homogeneous at each wheel in each week. Furthermore, suppose that
fish captured and marked in each week tend to migrate together so that they are
captured in a single subsequent stratum. For example,
suppose that in each julian week $j$, $n1[j]$ fish are marked and released above the rotary screw trap.
Of these, $m2[j]$ are recaptured. All recaptures take place in the week of release,
i.e. the matrix of releases and recoveries is diagonal.
The $n1[j]$ and $m2[j]$ establish the capture efficiency of the second trap in julian week $j$.
At the same time, $u2[j]$ unmarked fish are captured at the screw trap.
This implies that the data can be structured
as a **diagonal** array similar to:
```{}
Recovery Stratum
tagged rs1 rs2 rs3 ... rsk
Marking ms1 n1[1] m2[1] 0 0 ... 0
Stratum ms2 n1[2] 0 m2[2] 0 ... 0
ms3 n1[3] 0 0 m2[3] ... 0
...
msk n1[k] 0 0 0 ... m2[k]
Newly
Untagged u2[1] u2[2] u2[3] ... u2[k]
captured
```
Here the tagging and recapture events have been stratified into $k$ temporal strata.
Marked fish from one stratum tend to move at similar rates and so are recaptured
together with unmarked fish.
Recaptures of marked fish take place along the "diagonal."
# Example of basic BTSPAS fit.
## Reading in the data
Because the matrix is diagonal, and because the $u2$ vector is the same length as
the $n1$ and $m2$ vectors, the data can be entered as several columns.
Here is an example of some raw data:
```{r}
demo.data.csv <- textConnection(
'jweek, n1, m2, u2
9 ,0, 0, 4135
10,1465, 51, 10452
11,1106,121, 2199
12, 229, 25, 655
13, 20, 0, 308
14, 177, 17, 719
15, 702, 74, 973
16, 633, 94, 972
17,1370, 62, 2386
18, 283, 10, 469
19, 647, 32, 897
20, 276, 11, 426
21, 277, 13, 407
22, 333, 15, 526
23,3981,242, 39969
24,3988, 55, 17580
25,2889,115, 7928
26,3119,198, 6918
27,2478, 80, 3578
28,1292, 71, 1713
29,2326,153, 4212
30,2528,156, 5037
31,2338,275, 3315
32,1012,101, 1300
33, 729, 66, 989
34, 333, 44, 444
35, 269, 33, 339
36, 77, 7, 107
37, 62, 9, 79
38, 26, 3, 41
39, 20, 1, 23
40,4757,188, 35118
41,2876, 8, 34534
42,3989, 81, 14960
43,1755, 27, 3643
44,1527, 30, 1811
45, 485, 14, 679
46, 115, 4, 154')
demo.data <- read.csv(demo.data.csv, header=TRUE, as.is=TRUE, strip.white=TRUE)
print(demo.data)
```
The first stratum was defined as julian week `r demo.data$jweek[1]`.
At this time, `r demo.data$n1[1]` fish were captured, tagged, and released, but
`r demo.data$u2[1]` unmarked fish were recovered in the first recovery stratum.
In the next week, `r demo.data$n1[2]` fish were captured, tagged, and released, with
`r demo.data$m2[2]` fish recaptured, and
`r demo.data$u2[2]` unmarked fish recovered.
```{r pp,message=FALSE,echo=FALSE}
Nhat <-BTSPAS::SimplePetersen( sum(demo.data$n1), sum(demo.data$n2), sum(demo.data$u2))
```
## Preliminary screening of the data
A pooled-Petersen estimator would add all of the marked, recaptured and unmarked fish
to give an estimate of `r formatC( round(Nhat$N.est), digits=0, width=20, format="f",big.mark=",")` which seems unbelievable!
What went wrong? Let us first examine a plot of the estimated capture efficiency
at the second trap for each (recovery) julian week.
```{r p.plot,echo=FALSE, fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Empirical capture probabilities"), warning=FALSE}
# plot the estimated probability of recapture for each stratum along
# with a confidence limit. Use use the binom package to find the ci when the
# number of recaptures is zero
demo.data$phat <- demo.data$m2/demo.data$n1
demo.data$phat.lcl <- binom::binom.confint(demo.data$m2, demo.data$n1, method="exact")$lower
demo.data$phat.ucl <- pmin(0.3, binom::binom.confint(demo.data$m2, demo.data$n1, method='exact')$upper)
ggplot(data=demo.data, aes(x=jweek, y=phat))+
ggtitle("Estimated recapture probabiity by julian week")+
geom_point()+
geom_line()+
geom_errorbar(aes(ymin=phat.lcl, ymax=phat.ucl), width=.1)+
ylab("Estimated recapture probability (95% ci)\nMean recapture probability in red")+
xlab('Julian week')+
ylim(0, .3)+
geom_hline(aes(yintercept=sum(demo.data$m2)/sum(demo.data$n1)), color="red")
```
There are several unusual features
* No fish were tagged and released in the first stratum. So no information is available
to estimate the capture efficiency at the second trap in the first week.
* In at least two of the recovery strata, there were no recaptures and so the
estimated recapture probability will be zero. But the data shows that some unmarked
fish were captured in these strata, so the actual efficiency must have been non-zero.
* There appears to be heterogeneity in the capture probabilities.
* In some julian weeks, the number of marked fish released and recaptured is very small which
lead to estimates with poor precision.
Similarly, let us look at the pattern of unmarked fish captured at the second trap:
```{r unmarked, echo=FALSE, fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Observed number of unmarked recaptures"), warning=FALSE}
ggplot(data=demo.data, aes(x=jweek, y=log(u2)))+
ggtitle("Number of unmarked fish captured by julian week")+
geom_point()+
geom_line()+
xlab("Julian week")+
ylab("log(Number of unmarked fish captured (u2))")
```
There are two julian weeks where the number of unmarked fish captured suddenly jumps by
several orders of magnitude (remember the above plot is on the log() scale). These
jumps correspond to releases of hatchery fish into the system.
Finally, let us look at the individual estimates for each stratum found by
computing a Petersen estimator for the total number of unmarked fish for each individual stratum:
```{r Uplot, echo=FALSE,echo=FALSE, include=TRUE, fig.align="center", fig.cap=c("Estimated log(total unmarked) by julian week"), fig.height=4, fig.width=6}
demo.data$Uest <- BTSPAS::SimplePetersen( demo.data$n1, demo.data$m2, demo.data$u2)$U.est
ggplot(data=demo.data, aes(x=jweek, y=log(Uest)))+
ggtitle("Estimated total unmarked fish captured by julian week")+
geom_point()+
geom_line()+
xlab("Julian week")+
ylab("log(Estimated total unmarked fish)")
```
* The sudden jumps in abundance due to the hatchery releases is apparent, but the
estimate in julian week 41 is far too large(!) with an estimate of almost 13 million fish from the simple stratified Petersen.
* There is a fairly regular pattern
in abundance with a slow increase until the first hatchery release, followed by
a steady decline, followed by a jump for the second hatchery release,
followed by a steady decline.
## Fitting the basic BTSPAS diagonal model
The $BTSPAS$ package attempts to strike a balance between the completely pooled Petersen estimator and the completely stratified Petersen estimator.
In the former, capture probabilities are assumed to equal for all fish in all strata,
while in the latter, capture probabilities are allowed to vary among strata in no structured way.
Furthermore, fish populations often have a general structure to the run, rather than arbitrarily jumping around
from stratum to stratum.
Bonner and Schwarz (2011) developed a suite of models that add structure. In the basic model,
* A spline is used to smooth the total number of unmarked fish presenting themselves at the second trap
over the strata
* A hierarchical model for the capture-probabilities is assumed where individual stratum capture
probabilities are assumed to vary around a common mean.
The model also allows the user to use covariates to explain some of the variation in the
capture probabilities. Bonner and Schwarz (2011) also developed models where fish
are recaptured in more than stratum.
The $BTSPAS$ package also has additional features and options:
* if $u2$ is missing for any stratum, the program will use the spline to interpolate the
number of unmarked fish in the population for the
missing stratum.
* if $n1$ **and** $m2$ are 0, then these strata provide no information towards recapture probabilities.
This is useful when no release take place in a stratum (e.g. trap did not run) and so you need
'dummy' values as placeholders.
Of course if $n1>0$ and $m2=0$, this provides information that the capture probability may be small.
If $n1=0$ and $m2>0$, this is an error (recoveries from no releases).
* the program allows you specify break points in the underlying spline to account
for external events. We was in the above example, that hatchery fish were released at
in julian weeks 23 and 40 resulting in sudden jump
in abundance. The $jump.after$ parameter gives the julian weeks just BEFORE the sudden jump,
i.e. the spline is allowed to jump AFTER the julian weeks in jump.after.
* sometimes bad thing happen. The vector $bad.m2$ indicates which julian weeks something went wrong. In the above example, the
number of recoveries in julian week 41 is far below expectations and leads to an impossible
Petersen estimate for julian week 41. Similarly, the vector $bad.u2$
indicates in which julian weeks, the number of unmarked fish is suspect.
In both cases, the suspect values of $m2$ and $u2$ are set to missing.
Alternatively, the user can set the $m2$ and $u2$ values to missing in the data input directly.
I arbitrarily chose the third julian week to demonstrate this feature.
I find it easiest if bad recaptures (a value of $m2$) result in zeroing out both
$n1$ and $m2$ for that stratum.
The $BTSPAS$ function also allows you specify
* The prefix is used to identify the output files for this run.
* The title is used to title the output.
* Various parameters to control the Bayesian MCMC phase of model fitting. Please contact us for help in setting these
if problem arise.
We already read in the data above. Here we set the rest of the parameters. Don't forget to set the working directory as appropriate
```{r setparms}
library("BTSPAS")
# After which weeks is the spline allowed to jump?
demo.jump.after <- c(22,39) # julian weeks after which jump occurs
# Which julian weeks have "bad" recapture values. These will be set to missing and estimated.
demo.bad.m2 <- c(41) # list julian weeks with bad m2 values. This is used in the Trinity Example
demo.bad.u2 <- c(11) # list julian weeks with bad u2 values. [This was arbitrary to demostrate the feature.]
demo.bad.n1 <- c(38) # list julian weeks with bad n1 values. [This was arbitrary to demonstrate the feature.]
# The prefix for the output files:
demo.prefix <- "demo-JC-2003-CH-TSPDE"
# Title for the analysis
demo.title <- "Junction City 2003 Chinook "
cat("*** Starting ",demo.title, "\n\n")
# Make the call to fit the model and generate the output files
demo.fit <- TimeStratPetersenDiagError_fit(
title=demo.title,
prefix=demo.prefix,
time=demo.data$jweek,
n1=demo.data$n1,
m2=demo.data$m2,
u2=demo.data$u2,
jump.after=demo.jump.after,
bad.n1=demo.bad.n1,
bad.m2=demo.bad.m2,
bad.u2=demo.bad.u2,
InitialSeed=890110,
debug=TRUE, # this generates only 10,000 iterations of the MCMC chain for checking.
save.output.to.files=FALSE)
```
The final parameter (*save.output.to.files*) can be set to automatically to save plots and reports in files with the appropriate prefix in the working directory.
```{r deletefiles,echo=FALSE,results="hide" }
# delete extra files that were created
file.remove("data.txt" )
file.remove("CODAindex.txt" )
file.remove("CODAchain1.txt" )
file.remove("CODAchain2.txt" )
file.remove("CODAchain3.txt" )
file.remove("inits1.txt" )
file.remove("inits2.txt" )
file.remove("inits3.txt" )
file.remove("model.txt" )
```
The output object contains all of the results and can be saved for later interrogations. This is
useful if the run takes considerable time (e.g. overnight) and you want to save the results
for later processing. Notice that I didn't save the results below as part of this vignette.
```{r saveres}
# save the results in a data dump that can be read in later using the load() command.
#save(list=c("demo.fit"), file="demo-fit-saved.Rdata") # save the results from this run
```
## The output from the basic fit
The final object has many components
```{r components,results="hide"}
names(demo.fit)
```
```{r echo=FALSE}
save.options <- options()
options(width=max.width)
names(demo.fit)
options(save.options)
```
The *plots* sub-object contains many plots:
```{r results="hide"}
names(demo.fit$plots)
```
```{r echo=FALSE}
save.options <- options()
options(width=max.width)
names(demo.fit$plots)
options(save.options)
```
In particular, it contains plots of the initial spline fit (*init.plot*),
the final fitted spline (*fit.plot*),
the estimated capture probabilities (on the logit scale) (*logitP.plot*),
plots of the distribution of the posterior sample
for the total unmarked and marked fish (*post.UNtot.plot*)
and model diagnostic plots (goodness of fit (*gof.plot*), trace (*trace...plot*), and autocorrelation plots (*act.Utot.plot*).
These plots are all created using the $ggplot2$ packages, so the user can modify the plot (e.g. change titles etc).
The $BTSPAS$ program also creates a report, which includes information about the data used in the fitting,
the pooled- and stratified-Petersen estimates, a test for pooling, and summaries of the posterior. Only the first few lines
are shown below:
```{r }
head(demo.fit$report)
```
Here is the fitted spline curve to the number of unmarked fish available in each recovery sample
```{r message=FALSE,warning=FALSE,fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Fitted spline curve")}
demo.fit$plots$fit.plot
```
The jump in the spline when hatchery fish are released is evident. The actual number
of unmarked fish is allowed to vary around the spline as shown below.
The distribution of the posterior sample for the total number unmarked and total abundance is available:
```{r fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Distribution of posterior samples")}
demo.fit$plots$post.UNtot.plot
```
A plot of the $logit(P)$ is
```{r warnings=FALSE,message=FALSE,warning=FALSE, fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Estimates of logit(p)")}
demo.fit$plots$logitP.plot
```
In cases where there is no information, $BTSPAS$ has interpolated based on the distribution of catchability
in the other strata and so the credible interval is very wide (e.g. julian weeks 7, 13, and 41).
A summary of the posterior for each parameter is also available. In particular, here are the
summary statistics on the posterior sample for the total number unmarked and total abundance:
```{r}
demo.fit$summary[ row.names(demo.fit$summary) %in% c("Ntot","Utot"),]
```
This also includes the Rubin-Brooks-Gelman statistic ($Rhat$) on mixing of the chains and the effective sample size
of the posterior (after
accounting for autocorrelation).
The estimated total abundance from $BTSPAS$ is
`r formatC(round(demo.fit$summary[ "Ntot","mean"]), big.mark=",", digits=0, format="f")` (SD
`r formatC(round(demo.fit$summary[ "Ntot","sd" ]), big.mark=",", digits=0, format="f")` ) fish.
Samples from the posterior are also included in the *sims.matrix*, *sims.array* and *sims.list* elements
of the results object.
It is always important to do model assessment before accepting the results from the model fit.
Please contact me for details on how to interpret
the goodness of fit, trace, and autocorrelation plots.
# Fixing p's
In some cases, the second trap is not running and so there are no recaptures of tagged fish and
no captures of untagged fish.
We need to set the p's in these strata to 0 rather than letting *BTSPAS* impute a value.
## Reading in the data
Here is an example of some raw data that is read in:
```{r}
demo2.data.csv <- textConnection(
'jweek, n1, m2, u2
9 ,0, 0, 4135
10,1465, 51, 10452
11,1106,121, 2199
12, 229, 25, 655
13, 20, 0, 308
14, 177, 17, 719
15, 702, 74, 973
16, 633, 94, 972
17,1370, 62, 2386
18, 283, 10, 469
19, 647, 32, 897
20, 276, 11, 426
21, 277, 13, 407
22, 333, 15, 526
23,3981,242, 39969
24,3988, 55, 17580
25,2889,115, 7928
26,3119,198, 6918
27,2478, 80, 3578
28,1292, 71, 1713
29,2326,153, 4212
30,2528,156, 5037
31,2338,275, 3315
32,1012,101, 1300
33, 729, 66, 989
34, 333, 44, 444
35, 269, 33, 339
36, 77, 7, 107
37, 62, 0, 0
38, 26, 0, 0
39, 20, 0, 0
40,4757,188, 35118
41,2876, 8, 34534
42,3989, 81, 14960
43,1755, 27, 3643
44,1527, 30, 1811
45, 485, 14, 679
46, 115, 0, 0')
demo2.data <- read.csv(demo2.data.csv, header=TRUE, as.is=TRUE, strip.white=TRUE)
print(demo2.data)
```
```{r echo=FALSE}
weeks.with.zero <- demo2.data$jweek[ demo2.data$m2==0 & demo2.data$u2==0]
weeks.with.zero.text <- paste(paste( weeks.with.zero[-length(weeks.with.zero)],', ', sep="", collapse=""), "and ", weeks.with.zero[length(weeks.with.zero)], sep="", collapse="" )
```
Notice that there are no recaptures of marked fish and no recaptures of unmarked fish in julian weeks `r weeks.with.zero.text` when the trap was not operating.
Notice that this differs from the case where a small number of tagged fish released
with no recaptures but captures of tagged fish occur such as in julian week 13.
## Fitting the BTSPAS diagonal model and fixing p.
Two additional arguments to the *BTSPAS* allow you specify the julian weeks in which
the capture probability is fixed to a known (typically zero) value.
```{r }
demo2.logitP.fixed <- c(37,38,39, 46)
demo2.logitP.fixed.values <- rep(-10, length(demo2.logitP.fixed))
```
The strata where the value of *p* is to be fixed is specified along with the value (on the logit scale) at which the capture probabilities are fixed.
Technically, the $logit(0.0)$ is negative infinity, but $-10$ is ``close enough''.
The rest of the call is basically the same -- don't forget to specify the additional arguments in the call
```{r demo2.run}
library("BTSPAS")
# After which weeks is the spline allowed to jump?
demo2.jump.after <- c(22,39) # julian weeks after which jump occurs
# Which julian weeks have "bad" recapture values. These will be set to missing and estimated.
demo2.bad.m2 <- c(41) # list julian weeks with bad m2 values. This is used in the Trinity Example
demo2.bad.u2 <- c(11) # list julian weeks with bad u2 values. [This was arbitrary to demostrate the feature.]
demo2.bad.n1 <- c(38) # list julian weeks with bad n1 values. [This was arbitrary to demonstrate the feature.]
# The prefix for the output files:
demo2.prefix <- "demo2-JC-2003-CH-TSPDE"
# Title for the analysis
demo2.title <- "Junction City 2003 Chinook with p fixed "
cat("*** Starting ",demo2.title, "\n\n")
# Make the call to fit the model and generate the output files
demo2.fit <- TimeStratPetersenDiagError_fit(
title=demo2.title,
prefix=demo2.prefix,
time=demo2.data$jweek,
n1=demo2.data$n1,
m2=demo2.data$m2,
u2=demo2.data$u2,
jump.after=demo2.jump.after,
logitP.fixed=demo2.logitP.fixed, # ***** NEW ****8
logitP.fixed.values=demo2.logitP.fixed.values, # ***** NEW *****
bad.n1=demo2.bad.n1,
bad.m2=demo2.bad.m2,
bad.u2=demo2.bad.u2,
InitialSeed=890110,
debug=TRUE, # this generates only 10,000 iterations of the MCMC chain for checking.
save.output.to.files=FALSE)
```
```{r deletefiles2,echo=FALSE,results="hide" }
# delete extra files that were created
file.remove("data.txt" )
file.remove("CODAindex.txt" )
file.remove("CODAchain1.txt" )
file.remove("CODAchain2.txt" )
file.remove("CODAchain3.txt" )
file.remove("inits1.txt" )
file.remove("inits2.txt" )
file.remove("inits3.txt" )
file.remove("model.txt" )
```
## The output from the fit
Here is the fitted spline curve to the number of unmarked fish available in each recovery sample.
Note how the spline interpolates across the julian weeks when no unmarked fish were captured in julian
weeks `r weeks.with.zero.text` but the uncertainty is much larger.
```{r message=FALSE,warning=FALSE,fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Fitted spline curve with fixed p's")}
demo2.fit$plots$fit.plot
```
The jump in the spline when hatchery fish are released is evident.
The distribution of the posterior sample for the total number unmarked and total abundance is available as before:
```{r fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Distribution of posterior samples")}
demo2.fit$plots$post.UNtot.plot
```
A plot of the $logit(P)$ is
```{r warnings=FALSE,message=FALSE,warning=FALSE, fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Estimates of logit(p)")}
demo2.fit$plots$logitP.plot
```
Notice how the fixed values at $-10$ (on the logit scale) occur.
A summary of the posterior for each parameter is also available. In particular, here are the
summary statistics on the posterior sample for the total number unmarked and total abundance:
```{r}
demo2.fit$summary[ row.names(demo2.fit$summary) %in% c("Ntot","Utot"),]
```
The estimated total abundance from $BTSPAS$ is
`r formatC(round(demo2.fit$summary[ "Ntot","mean"]), big.mark=",", digits=0, format="f")` (SD
`r formatC(round(demo2.fit$summary[ "Ntot","sd" ]), big.mark=",", digits=0, format="f")` ) fish.
# Using covariates to model the p's
*BTSPAS* also allows you to model the p's with additional covariates, such a temperature, stream flow, etc.
It is not possible to use covariates to model the total number of unmarked fish.
## Reading in the data
Here is an example of some raw data that includes the covariate $log(flow)$:
```{r}
demo3.data.csv <- textConnection(
'jweek, n1, m2, u2, logflow
9, 0, 0, 4135, 6.617212
10, 1465, 51, 10452, 6.51217
11, 1106, 121, 2199, 7.193686
12, 229, 25, 655, 6.960754
13, 20, 0, 308, 7.008376
14, 177, 17, 719, 6.761573
15, 702, 74, 973, 6.905753
16, 633, 94, 972, 7.062314
17, 1370, 62, 2386, 7.600188
18, 283, 10, 469, 8.246509
19, 647, 32, 897, 8.110298
20, 276, 11, 426, 8.035001
21, 277, 13, 407, 7.859965
22, 333, 15, 526, 7.774255
23, 3981, 242, 39969, 7.709116
24, 3988, 55, 17580, 7.653766
25, 2889, 115, 7928, 7.622105
26, 3119, 198, 6918, 7.593734
27, 2478, 80, 3578, 7.585063
28, 1292, 71, 1713, 7.291072
29, 2326, 153, 4212, 6.55556
30, 2528, 156, 5037, 6.227665
31, 2338, 275, 3315, 6.278789
32, 1012, 101, 1300, 6.273685
33, 729, 66, 989, 6.241111
34, 333, 44, 444, 6.687999
35, 269, 33, 339, 7.222566
36, 77, 7, 107, 7.097194
37, 62, 9, 79, 6.949993
38, 26, 3, 41, 6.168714
39, 20, 1, 23, 6.113682
40, 4757, 188, 35118, 6.126557
41, 2876, 8, 34534, 6.167217
42, 3989, 81, 14960, 5.862413
43, 1755, 27, 3643, 5.696614
44, 1527, 30, 1811, 5.763847
45, 485, 14, 679, 5.987528
46, 115, 4, 154, 5.912344')
demo3.data <- read.csv(demo3.data.csv, header=TRUE, as.is=TRUE, strip.white=TRUE)
print(demo3.data)
```
A preliminary plot of the empirical logit (excluding those weeks when the trap was not running) shows an approximate quadratic fit to $log(flow)$,
but the uncertainty in each week is enormous!
```{r echo=FALSE, fig.height=4, fig.width=6, fig.caption="Logit(p) vs log(flow)", message=FALSE, warning=FALSE}
demo3.data$elogitphat <- log( (demo3.data$m2+.5)/(demo3.data$n1+1) / (1 - (demo3.data$m2+.5)/(demo3.data$n1+1) ))
plotdata <- demo3.data[ demo3.data$u2 >0, ]
ggplot(data=plotdata, aes(x=logflow, y=elogitphat))+
ggtitle("Empirical logit vs. log(flow) ")+
geom_point()+
geom_smooth(method="lm", se=FALSE, formula = y ~ x + I(x^2))
```
## Fitting the BTSPAS diagonal model and fixing p and covariates for p.
We need to create a matrix with the covariate values. We will need three columns - one for the intercept, the value of log(flow) and the square of its values.
In practice, it is often advisable to standardize covariates to prevent numerical difficulties, but in this case, the values are small enough that standardization
is not really needed.
```{r }
demo3.logitP.cov <- cbind(1, demo3.data$logflow, demo3.data$logflow^2)
head(demo3.logitP.cov)
```
The rest of the call is basically the same -- don't forget to specify the additional arguments in the call
```{r demo3.run}
library("BTSPAS")
# After which weeks is the spline allowed to jump?
demo3.jump.after <- c(22,39) # julian weeks after which jump occurs
# Which julian weeks have "bad" recapture values. These will be set to missing and estimated.
demo3.bad.m2 <- c(41) # list julian weeks with bad m2 values. This is used in the Trinity Example
demo3.bad.u2 <- c(11) # list julian weeks with bad u2 values. [This was arbitrary to demostrate the feature.]
demo3.bad.n1 <- c(38) # list julian weeks with bad n1 values. [This was arbitrary to demonstrate the feature.]
# The prefix for the output files:
demo3.prefix <- "demo3-JC-2003-CH-TSPDE"
# Title for the analysis
demo3.title <- "Junction City 2003 Chinook with covariates for p "
cat("*** Starting ",demo3.title, "\n\n")
# Make the call to fit the model and generate the output files
demo3.fit <- TimeStratPetersenDiagError_fit(
title=demo3.title,
prefix=demo3.prefix,
time=demo3.data$jweek,
n1=demo3.data$n1,
m2=demo3.data$m2,
u2=demo3.data$u2,
jump.after=demo3.jump.after,
logitP.cov = demo3.logitP.cov, # ***** NEW *****
bad.n1=demo3.bad.n1,
bad.m2=demo3.bad.m2,
bad.u2=demo3.bad.u2,
InitialSeed=890110,
debug=TRUE, # this generates only 10,000 iterations of the MCMC chain for checking.
save.output.to.files=FALSE)
```
```{r deletefiles3,echo=FALSE,results="hide" }
# delete extra files that were created
file.remove("data.txt" )
file.remove("CODAindex.txt" )
file.remove("CODAchain1.txt" )
file.remove("CODAchain2.txt" )
file.remove("CODAchain3.txt" )
file.remove("inits1.txt" )
file.remove("inits2.txt" )
file.remove("inits3.txt" )
file.remove("model.txt" )
```
## The output from the fit
Here is the fitted spline curve to the number of unmarked fish available in each recovery sample.
```{r message=FALSE,warning=FALSE,fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Fitted spline curve with covariate for p")}
demo3.fit$plots$fit.plot
```
The jump in the spline when hatchery fish are released is evident.
The distribution of the posterior sample for the total number unmarked and total abundance is available as before:
```{r fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Distribution of posterior samples")}
demo3.fit$plots$post.UNtot.plot
```
A plot of the $logit(P)$ is
```{r warnings=FALSE,message=FALSE,warning=FALSE, fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Estimates of logit(p)")}
demo3.fit$plots$logitP.plot
```
Here is a plot of the estimated $logit(p)$'s vs. the log(flow) and its fitted curve:
```{r echo=FALSE, fig.width=6, fig.height=4, fig.caption="Fitted relationship between logit(p) and log(flow)"}
demo3.row.names <- rownames(demo3.fit$summary)
demo3.coeff.row.index <- grep("beta.logitP[", demo3.row.names, fixed=TRUE)
demo3.coeff.row.index <- demo3.coeff.row.index[1:3] # the 3rd index is a dummy value needed when there is a single beta
demo3.coeff <- demo3.fit$summary[demo3.coeff.row.index,"mean"]
demo3.coeff.sd <- demo3.fit$summary[demo3.coeff.row.index, "sd"]
demo3.pred.logitP <- demo3.logitP.cov %*% demo3.coeff
demo3.logitP.row.index <- grep("^logitP", demo3.row.names)
demo3.logitP <- demo3.fit$summary[demo3.logitP.row.index, "mean"] # extract the logit(P) values
plotdata <- data.frame(jweek =demo3.data$jweek,
logflow =demo3.data$logflow,
pred =demo3.pred.logitP,
actual =demo3.logitP,
actual.lcl=demo3.fit$summary[demo3.logitP.row.index, "2.5%"],
actual.ucl=demo3.fit$summary[demo3.logitP.row.index, "97.5%"])
ggplot(data=plotdata, aes(x=logflow, y=pred))+
ggtitle("Relationship of logit(p) and flow")+
geom_line()+
geom_errorbar(aes(ymin=actual.lcl, ymax=actual.ucl), color="blue", width=.01)+
geom_point(aes(y=actual),color="blue")+
ylab("logit(p) and 95% ci")+xlab("log(flow)")
```
There is virtually no evidence of a relationship with flow because of the very large uncertainties
in each of the estimated $logit(p)$.
The estimated coefficients of the quadratic relationship between logit(p) and *log(flow)* are:
```{r }
round(demo3.fit$summary[demo3.coeff.row.index,],3)
```
A summary of the posterior for each parameter is also available. In particular, here are the
summary statistics on the posterior sample for the total number unmarked and total abundance:
```{r}
demo3.fit$summary[ row.names(demo3.fit$summary) %in% c("Ntot","Utot"),]
```
The estimated total abundance from $BTSPAS$ is
`r formatC(round(demo3.fit$summary[ "Ntot","mean"]), big.mark=",", digits=0, format="f")` (SD
`r formatC(round(demo3.fit$summary[ "Ntot","sd" ]), big.mark=",", digits=0, format="f")` ) fish.
# Using covariates to model the p's - prior information about beta coefficients
*BTSPAS* also allows you to model the p's with additional covariates, such a temperature, stream flow, etc.
It is not possible to use covariates to model the total number of unmarked fish.
In some cases, you may have additional information about the effect of the covariates that you
would like to incorporate into the analysis. For example, a rotary screw trap
may have run for many years and plots of the relationship between the
*logit(catchability)* vs. *log(flow)* generally shows a relationship that you
may not be able to capture in a single year because of lack of contrast (i.e.
the flow within a year does not vary enough) or because of smallish sample sizes.
*BTSPAS* allows you to specify prior information on the coefficients of the
relationship between *logit(catchability)* and covariates.
We will show an example where four models are fit to the same data
- no relationship between *logit(catchability)* and *log(flow)*;
- linear relationship between *logit(catchability)* and *log(flow)* with the default priors;
- linear relationship between *logit(catchability)* and *log(flow)* with weak priors;
- linear relationship between *logit(catchability)* and *log(flow)* with strong priors (and
the prior information conflicts with the fit),
## Reading in the data
Here is an example of some (fictitious) raw data that includes the covariate $log(flow)$:
```{r}
demo5.data.csv <- textConnection(
'time, n1, m2, u2, logFlow
1 , 0 , 0 , 21 , 5.415
2 , 56 , 8 , 2266 , 5.358
3 , 1009 , 59 , 11314 , 6.737
4 , 1284 , 25 , 5035 , 7.993
5 , 1504 , 13 , 396 , 8.693
6 , 0 , 0 , 45 , 8.861
7 , 0 , 0 , 26 , 8.587
8 , 1560 , 17 , 12 , 8.347
9 , 1643 , 14 , 43 , 8.260
10 , 0 , 0 , 63 , 8.606
11 , 1487 , 7 , 24 , 8.671
12 , 0 , 0 , 5 , 8.737
13 , 0 , 0 , 4 , 7.862')
demo5.data <- read.csv(demo5.data.csv, header=TRUE, as.is=TRUE, strip.white=TRUE)
print(demo5.data)
```
A preliminary plot of the empirical logit (excluding those weeks when the trap was not running) shows an approximate linear fit to $log(flow)$,
but the uncertainty in each week is enormous!
```{r echo=FALSE, fig.height=4, fig.width=6, fig.caption="Logit(p) vs log(flow)", message=FALSE, warning=FALSE}
demo5.data$elogitphat <- log( (demo5.data$m2+.5)/(demo5.data$n1+1) / (1 - (demo5.data$m2+.5)/(demo5.data$n1+1) ))
plotdata <- demo5.data[ demo5.data$u2 >0, ]
ggplot(data=plotdata, aes(x=logFlow, y=elogitphat))+
ggtitle("Empirical logit vs. log(flow) ")+
geom_point()+
geom_smooth(method="lm", se=FALSE, formula = y ~ x)
```
## Fitting the BTSPAS diagonal model with no covariates
We start with fitting BTSPAS with no covariates
```{r }
fit1 <- BTSPAS::TimeStratPetersenDiagError_fit(
title="no covariates"
,prefix="fit1"
,time = demo5.data$time
,n1 = demo5.data$n1
,m2 = demo5.data$m2
,u2 = demo5.data$u2
,InitialSeed=234323
,save.output.to.files=FALSE
)
```
Estimated values of the overall mean *logitP* and the residual variation around the
fitted line are:
```{r }
select <- grepl("beta.logitP", row.names(fit1$summary))
fit1$summary[select,][1]
select <- grepl("sigmaP", row.names(fit1$summary)) # sd(logitP) around mean logitP or regression curve if covariates present
fit1$summary[select,]
```
The plot of the estimated *logit(catchability)* vs. *log(flow)* and
the overall mean is:
```{r fig.width=6, fig.height=4, fig.align="center", fig.cap="Fit1: logitP vs log(flow)"}
plotdata1 <- data.frame( logFlow = demo5.data$logFlow,
logitP = fit1$mean$logitP,
time = demo5.data$time,
n1 = demo5.data$n1)
plotdata1$fit <- 'fit1 - nocovariates'
ggplot(data=plotdata1, aes(x=logFlow, y=logitP))+
ggtitle("no covariates")+
geom_point(aes(size=n1))+
#geom_smooth(method="lm", se=FALSE)+
geom_text(aes(label=time), vjust=-.5)+
geom_hline(yintercept=fit1$mean$beta.logitP[1], color="red")
```
The appears to be a relationship with *log(flow)* that has not been captured with this model.
## Fitting the BTSPAS diagonal model with using log(flow) and default priors
We need to create a matrix with the covariate values. We will need two columns - one for the intercept, and one for the value of log(flow).
In practice, it is often advisable to standardize covariates to prevent numerical difficulties,
but in this case, the values are small enough that standardization
is not really needed.
```{r }
fit2 <- BTSPAS::TimeStratPetersenDiagError_fit(
title="log(Flow)- default prior"
,prefix="fit2"
,time = demo5.data$time
,n1 = demo5.data$n1
,m2 = demo5.data$m2
,u2 = demo5.data$u2
,logitP.cov=cbind( 1, demo5.data$logFlow)
,InitialSeed=3542343
,save.output.to.files=FALSE
)
```
Estimated values of the beta coefficients (the intercept and slope) and the residual variation around the
fitted line are:
```{r }
select <- grepl("beta.logitP", row.names(fit2$summary))
fit2$summary[select,][1:2]
select <- grepl("sigmaP", row.names(fit2$summary)) # sd(logitP) around mean logitP or regression curve if covariates present
fit2$summary[select,]
```
The plot of the estimated *logit(catchability)* vs. *log(flow)* and
the overall mean is:
```{r fig.width=6, fig.height=4, fig.align="center", fig.cap="Fit2: logitP vs log(flow)"}
plotdata2 <- data.frame(logFlow = demo5.data$logFlow,
logitP = fit2$mean$logitP,
time = demo5.data$time,
n1 = demo5.data$n1)
plotdata2$fit <- 'fit2 - log flow default priors'
ggplot(data=plotdata2, aes(x=logFlow, y=logitP))+
ggtitle("log(flow) - default priors")+
geom_point(aes(size=n1))+
#geom_smooth(method="lm", se=FALSE)+
geom_text(aes(label=time), vjust=-.5)+
#geom_hline(yintercept=fit1$mean$beta.logitP[1], color="red")+
geom_abline(intercept=fit2$mean$beta.logitP[1],
slope =fit2$mean$beta.logitP[2], color="green")
```
We now have a relationship with *log(flow)*, but as you will see later, the
evidence is not very strong.
## Fitting the BTSPAS diagonal model with using log(flow) and weak prior
Prior information on the beta coefficients (the intercept and slope) are given
using the *prior.beta.logitP.mean* and *prior.beta.logitP.sd* parameters in the call.
The first specifies the values of the intercept and slope and the second
specifies the uncertainty in these prior values.
Consider the fit:
```{r }
fit3 <- BTSPAS::TimeStratPetersenDiagError_fit(
title="log(Flow) - weak priors"
,prefix="fit3"
,time = demo5.data$time
,n1 = demo5.data$n1
,m2 = demo5.data$m2
,u2 = demo5.data$u2
,logitP.cov=cbind( 1, demo5.data$logFlow)
,prior.beta.logitP.mean=c( .3, -.7) # prior for intercept and slope on logit scale - mean
,prior.beta.logitP.sd =c( 2, 2) # prior for intercept and slope on logit scale - sd
,InitialSeed=3542343
,save.output.to.files=FALSE
)
```
Here the prior for the intercept is set to 0.3 and the prior for the slope is set to -.7 (the *prior.beta.logitP.mean*).
The *prior.beta.logitP.sd* gives the uncertainty (like a standard error) in these values.
In this fit, I used a large uncertainty, a standard deviation of 2 for both.
This indicates the prior puts information on $.3 \pm 2 \times 2$ for the intercept and $-7 \pm 2 \times 2$ for the slope, i.e.
about 95% of the information in the prior within two standard deviations of the mean.
Estimated values of the beta coefficients (the intercept and slope) and the residual variation around the
fitted line are:
```{r }
select <- grepl("beta.logitP", row.names(fit3$summary))
fit3$summary[select,][1:2]
select <- grepl("sigmaP", row.names(fit3$summary)) # sd(logitP) around mean logitP or regression curve if covariates present
fit3$summary[select,]
```
The plot of the estimated *logit(catchability)* vs. *log(flow)* and
the overall mean is:
```{r fig.width=6, fig.height=4, fig.align="center", fig.cap="Fit3: logitP vs log(flow)"}
plotdata3 <- data.frame( logFlow = demo5.data$logFlow,
logitP = fit3$mean$logitP,
time = demo5.data$time,
n1 = demo5.data$n1)
plotdata3$fit <- 'fit3 - log(flow) - weak priors'
ggplot(data=plotdata3, aes(x=logFlow, y=logitP))+
ggtitle("log(flow) - weak priors")+
geom_point(aes(size=n1))+
#geom_smooth(method="lm", se=FALSE)+
geom_text(aes(label=time), vjust=-.5)+
#geom_hline(yintercept=fit1$mean$beta.logitP[1], color="red")+
#geom_abline(intercept=fit2$mean$beta.logitP[1],
# slope =fit2$mean$beta.logitP[2], color="green")+
geom_abline(intercept=fit3$mean$beta.logitP[1],
slope =fit3$mean$beta.logitP[2], color="brown", linetype="solid")
```
The weak information on the slope gives a boost to the relationship between *log(flow)*
and the the *logit(catchability)* especially for those weeks when the sample size
is very small.
## Fitting the BTSPAS diagonal model with using log(flow) and strong prior
We repeat the fit but with very strong prior information:
```{r }
fit4 <- BTSPAS::TimeStratPetersenDiagError_fit(
title="log(Flow) - strong priors"
,prefix="fit4"
,time = demo5.data$time
,n1 = demo5.data$n1
,m2 = demo5.data$m2
,u2 = demo5.data$u2
,logitP.cov=cbind( 1, demo5.data$logFlow)
,prior.beta.logitP.mean=c( .3, -.7) # prior for intercept and slope on logit scale - mean
,prior.beta.logitP.sd =c(.01, .01) # prior for intercept and slope on logit scale - sd
,InitialSeed=3542343
,save.output.to.files=FALSE
)
```
Here the prior for the intercept is set to 0.3 and the prior for the slope is set to -.7 (the *prior.beta.logitP.mean*).
The *prior.beta.logitP.sd* gives the uncertainty (like a standard error) in these values.
In this fit, I used a very small uncertainty, a standard deviation of .01 for both.
This indicates the prior puts information on $.3 \pm 2 \times .01$ for the intercept and $-7 \pm 2 \times .01$ for the slope, i.e.
about 95% of the information in the prior within two standard deviations of the mean.
Estimated values of the beta coefficients (the intercept and slope) and the residual variation around the
fitted line are:
```{r }
select <- grepl("beta.logitP", row.names(fit4$summary))
fit4$summary[select,][1:2]
select <- grepl("sigmaP", row.names(fit4$summary)) # sd(logitP) around mean logitP or regression curve if covariates present
fit4$summary[select,]
```
The plot of the estimated *logit(catchability)* vs. *log(flow)* and
the overall mean is:
```{r fig.width=6, fig.height=4, fig.align="center", fig.cap="Fit4: logitP vs log(flow)"}
plotdata4 <- data.frame( logFlow = demo5.data$logFlow,
logitP = fit4$mean$logitP,
time = demo5.data$time,
n1 = demo5.data$n1)
plotdata4$fit <- 'fit4 - log(flow) - strong priors'
ggplot(data=plotdata4, aes(x=logFlow, y=logitP))+
ggtitle("log(flow) - strong prior")+
geom_point(aes(size=n1))+
#geom_smooth(method="lm", se=FALSE)+
geom_text(aes(label=time), vjust=-.5)+
#geom_hline(yintercept=fit1$mean$beta.logitP[1], color="red")+
#geom_abline(intercept=fit2$mean$beta.logitP[1],
# slope =fit2$mean$beta.logitP[2], color="green")+
#geom_abline(intercept=fit3$mean$beta.logitP[1],
# slope =fit3$mean$beta.logitP[2], color="brown", linetype="solid")+
geom_abline(intercept=fit4$mean$beta.logitP[1],
slope =fit4$mean$beta.logitP[2], color="brown", linetype="dashed")
```
Notice that the (strong) prior is now in conflict with the data. The model
now believes that the variation around the line must be large, which allows
it to move estimates of *logit(catchability)* below the line.
## Comparing the results of the fits
### Estimates of abundance
```{r echo=FALSE, message=FALSE, warning=FALSE }
select <- grepl("Ntot", row.names(fit1$summary))
t1 <- data.frame(as.list(fit1$summary[select,]))
t1$Source <- "No covariates"
select <- grepl("Ntot", row.names(fit2$summary))
t2 <- data.frame(as.list(fit2$summary[select,]))
t2$Source <- "Default prior"
select <- grepl("Ntot", row.names(fit3$summary))
t3 <- data.frame(as.list(fit3$summary[select,]))
t3$Source <- "Weak prior"
select <- grepl("Ntot", row.names(fit4$summary))
t4 <-data.frame(as.list(fit4$summary[select,]))
t4$Source <- "Strong prior"
temp <- rbind(t1, t2, t3, t4)
kable(temp[,c("Source","mean","sd")], row.names=FALSE,
caption="Comparing estimates of abundance among fit",
col.names=c("Source","Mean","SD"),
digits=c(0,0,0)) %>%
column_spec(column=c(1), width="3cm" ) %>%
column_spec(column=c(2,3), width="1.5cm") %>%
kable_styling("bordered",position = "center", full_width=FALSE, latex_options = "HOLD_position")
```
There appears to be little impact on the estimate of abundance, but notice that the
uncertainty declines as you add information from *fit1* to *fit3(), but when you add
a strong conflicting prior (see below), the uncertainty now increases.
### Estimates of coefficients
```{r echo=FALSE, message=FALSE, warning=FALSE }
select <- grepl("beta.logitP", row.names(fit1$summary))
t1 <- data.frame(as.list(fit1$summary[select,][1,]))
t1$Source <- "No covariates"
t1$Est <- "Intercept"
select <- grepl("beta.logitP", row.names(fit2$summary))
t2 <- data.frame(fit2$summary[select,][1:2,])
t2$Source <- "Default prior"
t2$Est <- c("Intercept","Slope")
select <- grepl("beta.logitP", row.names(fit3$summary))
t3 <- data.frame(fit3$summary[select,][1:2,])
t3$Source <- "Weak prior"
t3$Est <- c("Intercept","Slope")
select <- grepl("beta.logitP", row.names(fit4$summary))
t4 <-data.frame(fit4$summary[select,][1:2,])
t4$Source <- "Strong prior"
t4$Est <- c("Intercept","Slope")
temp <- rbind(t1, t2, t3, t4)
kable(temp[,c("Source","Est","mean","sd")], row.names=FALSE,
caption="Comparing estimates of beta coefficients among fit",
col.names=c("Source","Estimate","Mean","SD"),
digits=c(0,0,3,3)) %>%
column_spec(column=c(1:2), width="3cm" ) %>%
column_spec(column=c(3,4), width="1.5cm") %>%
kable_styling("bordered",position = "center", full_width=FALSE, latex_options = "HOLD_position")
```
With the strong prior, the data plays essentially no role in determining the slope and intercept.
With a weak prior, the estimated slope has lower precision than with the even weaker (default) prior.
### Comparing the residual standard deviation around the line of fit
```{r echo=FALSE, message=FALSE, warning=FALSE }
select <- grepl("sigmaP", row.names(fit1$summary))
t1 <- data.frame(as.list(fit1$summary[select,]))
t1$Source <- "No covariates"
select <- grepl("sigmaP", row.names(fit2$summary))
t2 <- data.frame(as.list(fit2$summary[select,]))
t2$Source <- "Default prior"
select <- grepl("sigmaP", row.names(fit3$summary))
t3 <- data.frame(as.list(fit3$summary[select,]))
t3$Source <- "Weak prior"
select <- grepl("sigmaP", row.names(fit4$summary))
t4 <-data.frame(as.list(fit4$summary[select,]))
t4$Source <- "Strong prior"
temp <- rbind(t1, t2, t3, t4)
kable(temp[,c("Source","mean","sd")], row.names=FALSE,
caption="Comparing estimates of residual variation in logitP among fit",
col.names=c("Source","Mean","SD"),
digits=c(0,3,3)) %>%
column_spec(column=c(1), width="3cm" ) %>%
column_spec(column=c(2,3), width="1.5cm") %>%
kable_styling("bordered",position = "center", full_width=FALSE, latex_options = "HOLD_position")
```
The residual variation declines as more information is added via the prior for the first 3 fits, but
then increases when a strong (conflicting) prior is added (last fit).
### Comparing the fits
```{r echo=FALSE, message=FALSE, warning=FALSE,fig.width=6, fig.height=6, fig.align="center", fig.cap="All fits: logitP vs log(flow)"}
# fitted logit(p) vs covariate for each model
plotdata <- plyr::rbind.fill(plotdata1, plotdata2, plotdata3, plotdata4)
ggplot(data=plotdata, aes(x=logFlow, y=logitP))+
ggtitle("Fitted logitP vs log(flow) - each model")+
geom_point(aes(size=n1))+
#geom_smooth(method="lm", se=FALSE)+
geom_text(aes(label=time), vjust=-.5)+
geom_hline(yintercept=fit1$mean$beta.logitP[1], color="red")+
geom_abline(intercept=fit2$mean$beta.logitP[1],
slope =fit2$mean$beta.logitP[2], color="green")+
geom_abline(intercept=fit3$mean$beta.logitP[1],
slope =fit3$mean$beta.logitP[2], color="brown", linetype="solid")+
geom_abline(intercept=fit4$mean$beta.logitP[1],
slope =fit4$mean$beta.logitP[2], color="brown", linetype="dashed")+
facet_wrap(~fit, ncol=2,dir="v")
```
In the plots of *logitP* vs. *log(flow)*, the red horizontal line is from *fit1* (no covariates) and represents the mean.
The green line is from the fit of *logitP* vs *logFlow* using the default prior.
The solid brown line is from the fit of *logitP* vs *logFlow* for the weak prior and the dashed brown line is for the strong prior.
The size of the dots represents n1 the number of fish released.
So for *fit1*, there is very little data for time 1 and time 2, and and so their *logitP* are basically free to vary.
For *fit2*, the default prior gives some (but not much information) about the relationship between
*logitP* and *logFlow*, so the estimates of *logitP* for periods 1 and 2 are moved "closer" to the green line.
For *fit3*, the weak prior gives more information and so the points move very close to the line.
As well, the periods with lots of data can be well fit with very little noise and so the model
says the the noise of periods in *logitP* must also be small, and so all the points are forced to lie on the line.
For *fit4*, the very strong prior is placed on a WRONG line (the dashed brown).
Now the model is in a quandary and says that the only way the *logitP* for weeks 3, 4, etc.
with lots of data are consistent with the dashed brown line is if the among period variance is very large and so
the *logitP* for weeks with very poor data are allowed to move away from the brown dashed line.
We can also compare the spline fit to the number of unmarked fish:
```{r echo=FALSE, message=FALSE, warning=FALSE,fig.width=6, fig.height=4, fig.align="center", fig.cap="Allfts: spline fit to U"}
# Look at the spline fits for each model
allplots <- gridExtra::arrangeGrob(
fit1$plots$fit.plot,
fit2$plots$fit.plot,
fit3$plots$fit.plot,
fit4$plots$fit.plot, ncol=2, as.table=FALSE)
plot(allplots)
```
The spline is only affected slightly.
We can also compare the trend of *logitP* over time:
```{r echo=FALSE, message=FALSE, warning=FALSE,fig.width=6, fig.height=4, fig.align="center", fig.cap="All fits: logitP vs time"}
# Look at the logitP over time fits for each model
allplots <- gridExtra::arrangeGrob(
fit1$plots$logitP.plot,
fit2$plots$logitP.plot,
fit3$plots$logitP.plot,
fit4$plots$logitP.plot, ncol=2, as.table=FALSE)
plot(allplots)
```
The trend over time is mostly unchanged for period with lots of data, and for periods
with very small amount of data, the points are allowed to vary as needed.
```{r echo=FALSE,results="hide" }
# delete extra files that were created
file.remove("data.txt" )
file.remove("CODAindex.txt" )
file.remove("CODAchain1.txt" )
file.remove("CODAchain2.txt" )
file.remove("CODAchain3.txt" )
file.remove("inits1.txt" )
file.remove("inits2.txt" )
file.remove("inits3.txt" )
file.remove("model.txt" )
```
# Adding structure to the p's with a spline.
The previous example still showed some temporal structure in the $p$'s. This additional structure
can be imposed by using a spline for the $p$'s.
## Reading in the data
Here is an example of some raw data:
```{r}
demo4.data.csv <- textConnection(
'jweek, n1, m2, u2
9, 0, 0, 4135
10, 1465, 51, 10452
11, 1106, 121, 2199
12, 229, 25, 655
13, 20, 0, 308
14, 177, 17, 719,
15, 702, 74, 973
16, 633, 94, 972
17, 1370, 62, 2386
18, 283, 10, 469
19, 647, 32, 897
20, 276, 11, 426
21, 277, 13, 407
22, 333, 15, 526
23, 3981, 242, 39969
24, 3988, 55, 17580
25, 2889, 115, 7928
26, 3119, 198, 6918
27, 2478, 80, 3578
28, 1292, 71, 1713
29, 2326, 153, 4212
30, 2528, 156, 5037
31, 2338, 275, 3315
32, 1012, 101, 1300
33, 729, 66, 989
34, 333, 44, 444
35, 269, 33, 339
36, 77, 7, 107
37, 62, 9, 79
38, 26, 3, 41
39, 20, 1, 23
40, 4757, 188, 35118
41, 2876, 8, 34534
42, 3989, 81, 14960
43, 1755, 27, 3643
44, 1527, 30, 1811
45, 485, 14, 679
46, 115, 4, 154')
demo4.data <- read.csv(demo4.data.csv, header=TRUE, as.is=TRUE, strip.white=TRUE)
print(demo4.data)
```
A preliminary plot of the empirical logit (excluding those weeks when the trap was not running) shows some temporal structure:
```{r echo=FALSE, fig.height=4, fig.width=6, fig.caption="Logit(p) vs julian week", warning=FALSE, message=FALSE}
demo4.data$elogitphat <- log( (demo4.data$m2+.5)/(demo4.data$n1+1) / (1 - (demo4.data$m2+.5)/(demo4.data$n1+1) ))
plotdata <- demo4.data[ demo4.data$u2 >0, ]
ggplot(data=plotdata, aes(x=jweek, y=elogitphat))+
ggtitle("Empirical logit vs. julian week ")+
geom_point()+
geom_smooth(se=FALSE )+
ylab("Empirical logit(p)")+xlab("Julian week")
```
## Fitting the BTSPAS diagonal model and a spline for p.
We can create a set of covariates that serve as the basis for the spline over time using the $bs()$ function from the *splines* package:
```{r }
library(splines)
demo4.logitP.cov <- bs(1:length(demo4.data$n1), df=floor(length(demo4.data$n1)/4), intercept=TRUE)
head(demo4.logitP.cov)
```
The rest of the call is basically the same -- don't forget to specify the additional arguments in the call
```{r demo4.run}
library("BTSPAS")
# After which weeks is the spline allowed to jump?
demo4.jump.after <- c(22,39) # julian weeks after which jump occurs
# Which julian weeks have "bad" recapture values. These will be set to 0 or missing prior to the run.
demo4.bad.m2 <- c(41) # list julian weeks with bad m2 values. This is used in the Trinity Example
demo4.bad.u2 <- c(11) # list julian weeks with bad u2 values. [This was arbitrary to demostrate the feature.]
demo4.bad.n1 <- c(38) # list julian weeks with bad n1 values. [This was arbitrary to demonstrate the feature.]
# The prefix for the output files:
demo4.prefix <- "demo4-JC-2003-CH-TSPDE"
# Title for the analysis
demo4.title <- "Junction City 2003 Chinook with spline for p "
cat("*** Starting ",demo4.title, "\n\n")
# Make the call to fit the model and generate the output files
demo4.fit <- TimeStratPetersenDiagError_fit(
title=demo4.title,
prefix=demo4.prefix,
time=demo4.data$jweek,
n1=demo4.data$n1,
m2=demo4.data$m2,
u2=demo4.data$u2,
jump.after=demo4.jump.after,
logitP.cov = demo4.logitP.cov, # ***** NEW *****
bad.n1=demo4.bad.n1,
bad.m2=demo4.bad.m2,
bad.u2=demo4.bad.u2,
InitialSeed=890110,
debug=TRUE, # this generates only 10,000 iterations of the MCMC chain for checking.
save.output.to.files=FALSE)
```
```{r deletefiles4,echo=FALSE,results="hide" }
# delete extra files that were created
file.remove("data.txt" )
file.remove("CODAindex.txt" )
file.remove("CODAchain1.txt" )
file.remove("CODAchain2.txt" )
file.remove("CODAchain3.txt" )
file.remove("inits1.txt" )
file.remove("inits2.txt" )
file.remove("inits3.txt" )
file.remove("model.txt" )
```
## The output from the fit
Here is the fitted spline curve to the number of unmarked fish available in each recovery sample.
```{r message=FALSE,warning=FALSE,fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Fitted spline curve with covariate for p")}
demo4.fit$plots$fit.plot
```
The jump in the spline when hatchery fish are released is evident.
The distribution of the posterior sample for the total number unmarked and total abundance is available as before:
```{r fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Distribution of posterior samples")}
demo4.fit$plots$post.UNtot.plot
```
A plot of the $logit(P)$ is
```{r warnings=FALSE,message=FALSE,warning=FALSE, fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Estimates of logit(p)")}
demo4.fit$plots$logitP.plot
```
Here is a plot of the estimated $logit(p)$'s with the fitted spline for the $p$'s:
```{r, echo=FALSE, fig.height=4, fig.width=6, fig.caption="logit(p) over time with underlying spline fit"}
demo4.row.names <- rownames(demo4.fit$summary)
demo4.coeff.row.index <- grep("beta.logitP[", demo4.row.names, fixed=TRUE)
demo4.coeff.row.index <- demo4.coeff.row.index[1:ncol(demo4.logitP.cov)]
demo4.coeff <- demo4.fit$summary[demo4.coeff.row.index,"mean"]
demo4.coeff.sd <- demo4.fit$summary[demo4.coeff.row.index, "sd"]
demo4.pred.logitP <- demo4.logitP.cov %*% demo4.coeff
demo4.logitP.row.index <- grep("^logitP", demo4.row.names)
demo4.logitP <- demo4.fit$summary[demo4.logitP.row.index, "mean"] # extract the logit(P) values
plotdata <- data.frame(jweek =demo4.data$jweek,
pred =demo4.pred.logitP,
actual =demo4.logitP,
actual.lcl=demo4.fit$summary[demo4.logitP.row.index, "2.5%"],
actual.ucl=demo4.fit$summary[demo4.logitP.row.index, "97.5%"])
ggplot(data=plotdata, aes(x=jweek, y=pred))+
ggtitle("Relationship of logit(p) and time with fitted spline")+
geom_line()+
geom_errorbar(aes(ymin=actual.lcl, ymax=actual.ucl), color="blue", width=.01)+
geom_point(aes(y=actual),color="blue")+
ylab("logit(p) and 95% ci")+xlab("Julian week")
```
The underlying spline smooths the p's somewhat, especially when the credible intervals
are very wide (e.g. around julian weeks 37-40).
A summary of the posterior for each parameter is also available. In particular, here are the
summary statistics on the posterior sample for the total number unmarked and total abundance:
```{r}
demo4.fit$summary[ row.names(demo4.fit$summary) %in% c("Ntot","Utot"),]
```
The estimated total abundance from $BTSPAS$ is
`r formatC(round(demo4.fit$summary[ "Ntot","mean"]), big.mark=",", digits=0, format="f")` (SD
`r formatC(round(demo4.fit$summary[ "Ntot","sd" ]), big.mark=",", digits=0, format="f")` ) fish.
# References
Bonner, S. J., & Schwarz, C. J. (2011).
Smoothing population size estimates for Time-Stratified Mark–Recapture experiments Using Bayesian P-Splines.
Biometrics, 67, 1498–1507.
https://doi.org/10.1111/j.1541-0420.2011.01599.x
Schwarz, C. J., & Dempson, J. B. (1994).
Mark-recapture estimation of a salmon smolt population.
Biometrics, 50, 98–108.
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/vignettes/a-Diagonal-model.Rmd
|
---
title: "Diagonal Case - Multiple Stocks/Ages"
author: "Carl James Schwarz"
date: "`r Sys.Date()`"
output:
html_vignette:
toc: true # table of content true
toc_depth: 3 # upto three depths of headings (specified by #, ## and ###)
number_sections: true ## if you want number sections at each table header
#vignette: >
# %\VignetteIndexEntry{02 - Diagonal Case - Multiple Stocks/Ages}
# %\VignetteEncoding{UTF-8}
# %\VignetteEngine{knitr::rmarkdown_notangle}
editor_options:
chunk_output_type: inline
---
```{r setup, include = FALSE,message=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
library(binom)
library(BTSPAS)
library(ggplot2)
max.width=70
```
# Location of vignette source and code.
Because of the length of time needed to run the vignettes, only
static vignettes have been included with this package.
The original of the vignettes and the code can be obtained from
the GitHub site at
https://github.com/cschwarz-stat-sfu-ca/BTSPAS
# Introduction
In some cases, the population of fish consist of a mixture of ages (young of year, and juvenile) and
stocks (wild and hatchery). *BTSPAS* has a number of routines to handle three common
occurrences as explained below.
In all cases, only diagonal recoveries are allowed.
## Fixing values of $p$ or using covariates.
Refer to the vignette on the *Diagonal Case* for information about fixing values of $p$ or modelling
$p$ using covariates such a stream flow or smoothing $p$ using a temporal spline.
# Wild and Hatchery Chinook
In each stratum $j$, $n1[j]$ fish are marked and released above a rotary screw trap.
Of these, $m2[j]$ are recaptured in the stratum of release, i.e. the matrix of
releases and recoveries is diagonal.
The $n1[j]$ and $m2[j]$ establish the capture efficiency of the trap.
At the same time, $u2[j]$ unmarked fish are captured at the screw trap.
These fish are a mixture of wild and hatchery raised Chinook Salmon.
A portion (*clip.rate*) of the hatchery raised fish are adipose fin clipped and can be recognized as hatchery raised.
The unclipped fish are a mixture of wild and hatchery fish which must be separated.
Hence the $u2[j]$ are separated into:
* $u2.A[j]$ representing the number of adipose clipped fish known to be hatchery fish, and
* $u2.N[j]$ representing the number of unclipped fish which are mixture of hatchery and wild fish.
## Reading in the data
Here is an example of some raw data that is read in:
```{r}
demo.data.csv <- textConnection(
" jweek, n1, m2, u2.A, u2.N
9, 0, 0, 0, 4135
10, 1465, 51, 0, 10452
11, 1106, 121, 0, 2199
12, 229, 25, 0, 655
13, 20, 0, 0, 308
14, 177, 17, 0, 719
15, 702, 74, 0, 973
16, 633, 94, 0, 972
17, 1370, 62, 0, 2386
18, 283, 10, 0, 469
19, 647, 32, 0, 897
20, 276, 11, 0, 426
21, 277, 13, 0, 407
22, 333, 15, 0, 526
23, 3981, 242, 9427, 30542
24, 3988, 55, 4243, 13337
25, 2889, 115, 1646, 6282
26, 3119, 198, 1366, 5552
27, 2478, 80, 619, 2959
28, 1292, 71, 258, 1455
29, 2326, 153, 637, 3575
30, 2528, 156, 753, 4284
31, 2338, 275, 412, 2903
32, 1012, 101, 173, 1127
33, 729, 66, 91, 898
34, 333, 44, 38, 406
35, 269, 33, 22, 317
36, 77, 7, 8, 99
37, 62, 9, 2, 77
38, 26, 3, 4, 37
39, 20, 1, 1, 22")
demo.data <- read.csv(demo.data.csv, header=TRUE, as.is=TRUE, strip.white=TRUE)
print(demo.data)
```
There are several unusual features of the data set:
* No fish were tagged and released in the first stratum. So no information is available
to estimate the capture efficiency at the second trap in the first week.
* In one of the recovery strata, there were no recaptures and so the
estimated recapture probability will be zero. But the data shows that some unmarked
fish were captured in these strata, so the actual efficiency must have been non-zero.
* There one julian week where the number of unmarked fish captured suddenly jumps by
several orders of magnitude. This
jump correspond to releases of hatchery fish into the system.
* Before the hatchery release, we know that all of the unmarked fish
without an adipose clip are wild fish.
## Fitting the BTSPAS diagonal model
We already read in the data above. Here we set the rest of the parameters.
* the *hatch.after* variable indicates the stratum after which hatchery fish are released
* the *bad.m2* variable identifies the stratum where the $m2$ value is unusual.
* the *clip.frac.H* variable identify what fraction of the hatchery fish have adipose fin clips.
Don't forget to set the working directory as appropriate:
```{r }
library("BTSPAS")
# After which weeks do the hatchery fish start to arrive. Prior to this point, all fish are wild and it is not
# necessary to separate out the wild vs hatchery
demo.hatch.after <- c(22) # julian weeks after which hatchery fish arrive.
# Which julian weeks have "bad" values. These will be set to 0 (releases or recaptures) or missing (unmarked) and estimated.
demo.bad.m2 <- c() # list of julian weeks with bad m2 values
demo.bad.u2.A <- c() # list of julian weeks with bad u2.A values
demo.bad.u2.N <- c() # list of julian weeks with bad u2.N values
# The clipping fraction
demo.clip.frac.H <- .25 # what fraction of the hatchery fish are adipose fin clipped
# The prefix for the output files:
demo.prefix <- "JC-2003-CH"
# Title for the analysis
demo.title <- "Junction City 2003 Chinook - Separation of Wild and Hatchery YoY Chinook"
cat("*** Starting ",demo.title, "\n\n")
# Make the call to fit the model and generate the output files
demo.fit <- TimeStratPetersenDiagErrorWHChinook_fit(
title =demo.title,
prefix =demo.prefix,
time =demo.data$jweek ,
n1 =demo.data$n1,
m2 =demo.data$m2,
u2.A =demo.data$u2.A,
u2.N =demo.data$u2.N ,
clip.frac.H= demo.clip.frac.H,
hatch.after=demo.hatch.after,
bad.m2 =demo.bad.m2,
bad.u2.A =demo.bad.u2.A,
bad.u2.N =demo.bad.u2.N,
debug=TRUE, # this generates only 10,000 iterations of the MCMC chain for checking.
save.output.to.files=FALSE)
```
```{r echo=FALSE,results="hide" }
# delete extra files that were created
file.remove("data.txt" )
file.remove("CODAindex.txt" )
file.remove("CODAchain1.txt" )
file.remove("CODAchain2.txt" )
file.remove("CODAchain3.txt" )
file.remove("inits1.txt" )
file.remove("inits2.txt" )
file.remove("inits3.txt" )
file.remove("model.txt" )
```
## The output from the fit
Here is the fitted spline curve to the number of unmarked fish available in each recovery sample
```{r message=FALSE,warning=FALSE,fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Fitted spline curve")}
demo.fit$plots$fit.plot
```
The separation of wild and hatchery fish is evident as well has the start of the spline for the hatchery fish.
A plot of the $logit(P)$ is
```{r warnings=FALSE,message=FALSE,warning=FALSE, fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Estimates of logit(p)")}
demo.fit$plots$logitP.plot
```
In cases where there is no information (such as the first julian week), $BTSPAS$ has interpolated based on the distribution of catchability
in the other strata and so the credible interval is very wide.
A summary of the posterior for each parameter is also available. In particular, here are the
summary statistics on the posterior sample for the total number unmarked separated by wild and hatchery origin:
```{r}
demo.fit$summary[ row.names(demo.fit$summary) %in% c("Ntot","Utot","Utot.H","Utot.W"),]
```
# Wild and Hatchery Chinook with YoY and Age 1 fish.
In this example, *BTSPAS* allows for
separating wild from hatchery Chinook salmon when Age-1 Chinook Salmon are present (residualized) from last year.
In each stratum $j$, $n1[j]$ fish are marked and released above a rotary screw trap.
Of these, $m2[j]$ are recaptured in the stratum of release, i.e. the matrix of
releases and recoveries is diagonal.
The $n1[j]$ and $m2[j]$ establish the capture efficiency of the trap.
At the same time, $u2[j]$ unmarked fish are captured at the screw trap.
These fish are a mixture of YoY and Age-1 wild and hatchery raised Chinook Salmon.
A portion (*clip.rate.H.YoY*, *clip.rate.H.1*) of the YoY and Age1 hatchery raised fish
are adipose fin clipped and can be recognized as hatchery raised.
The unclipped fish are a mixture of wild and hatchery fish which must be separated.
Hence the $u2[j]$ are separated into
* $u2.A.YoY[j]$ representing the number of YoY adipose clipped fish known to be hatchery fish, and
* $u2.N.YoY[j]$ representing the number YoY unclipped fish which are mixture of hatchery and wild fish
* $u2.A.1 [j]$ representing the number of Age1 adipose clipped fish known to be hatchery fish, and
* $u2.N.1 [j]$ representing the number of Age1 unclipped fish) which are mixture of hatchery and wild fish.
## Reading in the data
Here is an example of some raw data that is read in:
```{r}
demo2.data.csv <- textConnection(
" jweek, n1, m2, u2.A.YoY, u2.N.YoY, u2.A.1, u2.N.1
2, 0, 0, 0, 15, 0, 10
3, 0, 0, 0, 94, 1, 90
4, 833, 52, 0, 385, 2, 112
5, 852, 67, 0, 1162, 12, 140
6, 1495, 77, 0, 592, 10, 103
7, 1356, 182, 0, 1151, 4, 94
8, 1889, 145, 0, 2258, 7, 121
9, 2934, 89, 0, 1123, 2, 80
10, 1546, 53, 0, 2277, 5, 57
11, 4001, 232, 0, 2492, 4, 27
12, 2955, 158, 0, 1579, 14, 88
13, 529, 14, 0, 1046, 5, 45
14, 1172, 49, 0, 766, 3, 13
15, 3204, 232, 0, 2702, 1, 9
16, 1328, 57, 0, 10408, 2, 18
17, 3540, 114, 0, 12145, 3, 15
18, 4791, 45, 0, 186, 0, 1
19, 4808, 11, 0, 407, 0, 2
20, 5952, 44, 0, 862, 0, 0
21, 3852, 55, 0, 465, 0, 0
22, 2621, 17, 0, 724, 0, 27
23, 2131, 37, 854, 4860, 0, 0
24, 5002, 152, 794, 3539, 0, 1
25, 3706, 120, 904, 4597, 0, 0
26, 1225, 44, 708, 3819, 0, 0
27, 723, 45, 762, 3300, 0, 0
28, 2895, 167, 1356, 5460, 0, 0
29, 1395, 117, 614, 2918, 0, 0
30, 479, 77, 420, 2252, 0, 0
31, 964, 74, 289, 1240, 0, 0
32, 2803, 288, 87, 428, 0, 0
33, 952, 51, 114, 464, 0, 0
34, 880, 126, 53, 515, 0, 0
35, 0, 0, 21, 93, 0, 0")
demo2.data <- read.csv(demo2.data.csv, header=TRUE, as.is=TRUE, strip.white=TRUE)
print(demo2.data)
```
There are several unusual features of the data set:
* No fish were tagged and released in the first two strata nor in the last stratum. So no information is available
to estimate the capture efficiency at the second trap in these strata.
* There one julian week where the number of unmarked fish captured suddenly jumps by
several orders of magnitude. This
jump correspond to releases of hatchery fish into the system.
* Before the hatchery release, we know that all of the unmarked fish
without an adipose clip are wild fish.
## Fitting the BTSPAS diagonal model
We already read in the data above. Here we set the rest of the parameters.
* the *hatch.after* variable indicates the stratum after which hatchery fish are released
* the *bad.m2* variable identifies the stratum where the $m2$ value is unusual.
* the *clip.frac.H* variables identify what fraction of the hatchery fish have adipose fin clips in the YoY and Age 1 fish.
Don't forget to set the working directory as appropriate:
```{r }
library("BTSPAS")
# After which weeks do the YoY hatchery fish start to arrive.
# Prior to this point, all YoY fish are wild and it is not
# necessary to separate out the YoY wild vs hatchery
demo2.hatch.after.YoY <- c(22) # julian weeks after which YoY hatchery fish arrive.
# Which julian weeks have "bad" values. These will be set 0 (releases or recaptures) or missing (unmarked) and estimated.
demo2.bad.m2 <- c() # list of julian weeks with bad m2 values
demo2.bad.u2.A.YoY <- c() # list of julian weeks with bad u2.A.YoY values
demo2.bad.u2.N.YoY <- c() # list of julian weeks with bad u2.N.YoY values
demo2.bad.u2.A.1 <- c() # list of julian weeks with bad u2.A.YoY values
demo2.bad.u2.N.1 <- c() # list of julian weeks with bad u2.N.YoY values
# The clipping fraction for the current YoY and last year's YoY (which are now Age 1 fish)
demo2.clip.frac.H.YoY <- .25 # what fraction of the YoY hatchery fish are adipose fin clipped
demo2.clip.frac.H.1 <- .25 # what fraction of the Age1 hatchery fish are adipose fin clipped
# The prefix for the output files:
demo2.prefix <- "NF-2009-CH-WH-YoY-Age1"
# Title for the analysis
demo2.title <- "North Fork 2009 Chinook - Separation of YoY and Age 1 Wild and Hatchery Chinook"
cat("*** Starting ",demo2.title, "\n\n")
# Make the call to fit the model and generate the output files
demo2.fit <- TimeStratPetersenDiagErrorWHChinook2_fit(
title = demo2.title,
prefix = demo2.prefix,
time = demo2.data$jweek,
n1 = demo2.data$n1,
m2 = demo2.data$m2,
u2.A.YoY = demo2.data$u2.A.YoY,
u2.N.YoY = demo2.data$u2.N.YoY,
u2.A.1 = demo2.data$u2.A.1,
u2.N.1 = demo2.data$u2.N.1,
clip.frac.H.YoY= demo2.clip.frac.H.YoY,
clip.frac.H.1 = demo2.clip.frac.H.1,
hatch.after.YoY= demo2.hatch.after.YoY,
bad.m2 = demo2.bad.m2,
bad.u2.A.YoY = demo2.bad.u2.A.YoY,
bad.u2.N.YoY = demo2.bad.u2.N.YoY,
bad.u2.A.1 = demo2.bad.u2.A.1 ,
bad.u2.N.1 = demo2.bad.u2.N.1,
debug=TRUE, # this generates only 10,000 iterations of the MCMC chain for checking.
save.output.to.files=FALSE)
```
```{r echo=FALSE,results="hide" }
# delete extra files that were created
file.remove("data.txt" )
file.remove("CODAindex.txt" )
file.remove("CODAchain1.txt" )
file.remove("CODAchain2.txt" )
file.remove("CODAchain3.txt" )
file.remove("inits1.txt" )
file.remove("inits2.txt" )
file.remove("inits3.txt" )
file.remove("model.txt" )
```
## The output from the fit
Here is the fitted spline curve to the number of unmarked fish available of both stocks and ages.
```{r message=FALSE,warning=FALSE,fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Fitted spline curve")}
demo2.fit$plots$fit.plot
```
The separation of wild and hatchery fish is evident as well has the start of the spline for the hatchery fish.
A plot of the $logit(P)$ is
```{r warnings=FALSE,message=FALSE,warning=FALSE, fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Estimates of logit(p)")}
demo2.fit$plots$logitP.plot
```
In cases where there is no information (such as the first julian week), $BTSPAS$ has interpolated based on the distribution of catchability
in the other strata and so the credible interval is very wide.
A summary of the posterior for each parameter is also available. In particular, here are the
summary statistics on the posterior sample for the total number unmarked separated by wild and hatchery origin and the different ages:
```{r}
round(demo2.fit$summary[ grepl("Utot", row.names(demo2.fit$summary)),],1)
```
# Wild and Hatchery Steelhead with YoY and Age 1 fish.
In this analysis we fit a diagonal time-stratified Petersen estimator
separating wind from hatchery Steelhead salmon..
This differs from the Wild vs Hatchery Chinook salmon in previous sections in that all hatchery raised steelhead are marked,
so there is complete separation by age and (wild/hatchery).
There are 3 population of interest, Wild.YoY, Hatchery.Age1+, and Wild.Age1+.
This analysis is based on the analysis of California Junction City 2003 Steelhead data and is the example used
in the Trinity River Project.
In each stratum $j$, $n1[j]$ fish are marked and released above a rotary screw trap.
Of these, $m2[j]$ are recaptured in the stratum of release, i.e. the matrix of
releases and recoveries is diagonal.
The $n1[j]$ and $m2[j]$ establish the capture efficiency of the trap.
At the same time, $u2[j]$ unmarked fish are captured at the screw trap.
These fish are a mixture of wild and hatchery raised steelhead salmon.
The $u2[j]$ are separated into
* $u2.W.YoY[j]$ representing wild, YoY steelhead,
* $u2.W.1 [j]$ representing wild, age 1+ steelhead, and
* $u2.H.1 [j]$ representing hatchery, age 1+ steelhead.
## Reading in the data
Here is an example of some raw data that is read in:
```{r}
demo3.data.csv <- textConnection(
" jweek, n1, m2, u2.W.YoY, u2.W.1, u2.H.1
9, 0, 0, 0, 58, 0
10, 0, 0, 0, 357, 2
11, 0, 0, 0, 720, 0
12, 999, 5, 0, 850, 4643
13, 1707, 13, 11, 585, 5758
14, 1947, 39, 0, 532, 4220
15, 2109, 7, 0, 873, 2328
16, 972, 1, 0, 303, 1474
17, 687, 0, 1, 291, 875
18, 0, 0, 33, 12, 39
19, 0, 0, 31, 101, 15
20, 0, 0, 11, 47, 13
21, 0, 0, 78, 49, 26
22, 0, 0, 46, 44, 22
23, 0, 0, 35, 50, 59
24, 0, 0, 30, 38, 15
25, 0, 0, 309, 58, 8
26, 3, 0, 278, 36, 4
27, 0, 0, 207, 13, 2
28, 0, 0, 196, 5, 0
29, 0 , 0, 613, 12, 0
30, 0, 0, 764, 15, 0
31, 0, 0, 556, 11, 0
32, 0, 0, 250, 12, 0
33, 0, 0, 106, 13, 0
34, 0, 0, 413, 12, 0
35, 0, 0, 995, 28, 1
36, 0, 0, 357, 10 , 0
37, 0, 0, 181, 8, 27
38, 0, 0, 53, 3, 2
39, 0, 0, 29, 2, 0
40, 0, 0, 3, 0, 0
41, 0, 0, 5, 0, 0
42, 0, 0, 14, 4, 0
43, 0, 0, 8, 10, 0
44, 0, 0, 19, 7, 0
45, 0, 0, 46, 4, 0
46, 0, 0, 229, 7, 0")
demo3.data <- read.csv(demo3.data.csv, header=TRUE, as.is=TRUE, strip.white=TRUE)
print(demo3.data)
```
There are several unusual features of the data set:
* Marking and releasing of steelhead took place in only a few weeks! The hierarchical model
will extrapolate outside these weeks to estimate the capture rate. This is likely a very dangerous thing
to do.
* There one julian week where the number of unmarked fish captured suddenly jumps by
several orders of magnitude. This
jump correspond to releases of hatchery fish into the system.
* Before the hatchery release, we know that all of the unmarked fish
without an adipose clip are wild fish.
## Fitting the BTSPAS diagonal model
We already read in the data above. Here we set the rest of the parameters.
* the *hatch.after* variable indicates the stratum after which hatchery fish are released
* the *bad.m2* variable identifies the stratum where the $m2$ value is unusual.
* the *clip.frac.H* variables identify what fraction of the hatchery fish have adipose fin clips in the YoY and Age 1 fish.
Don't forget to set the working directory as appropriate:
```{r }
library("BTSPAS")
# After which weeks do the hatchery fish start to arrive. Prior to this point, all fish are wild and it is not
# necessary to separate out the wild vs hatchery
demo3.hatch.after <- c(11) # julian weeks after which hatchery fish arrive.
# Which julian weeks have "bad" values. These will be set to 0 (releases and recapture) or missing (unmarked captured) and estimated.
demo3.bad.m2 <- c() # list of julian weeks with bad m2 values
demo3.bad.u2.W.YoY <- c() # list of julian weeks with bad u2.W.YoY values
demo3.bad.u2.W.1 <- c() # list of julian weeks with bad u2.W.1 values
demo3.bad.u2.H.1 <- c() # list of julian weeks with bad u2.H.1 values
# The prefix for the output files:
demo3.prefix <- "demo-JC-2003-ST-TSPDE-WH"
# Title for the analysis
demo3.title <- "Junction City 2003 Steelhead - Separation of Wild and Hatchery YoY and Age 1+ Steelhead"
cat("*** Starting ",demo3.title, "\n\n")
# Make the call to fit the model and generate the output files
demo3.fit <- TimeStratPetersenDiagErrorWHSteel_fit(
title = demo3.title,
prefix = demo3.prefix,
time = demo3.data$jweek,
n1 = demo3.data$n1,
m2 = demo3.data$m2,
u2.W.YoY = demo3.data$u2.W.YoY,
u2.W.1 = demo3.data$u2.W.1,
u2.H.1 = demo3.data$u2.H.1,
hatch.after=demo3.hatch.after,
bad.m2 = demo3.bad.m2,
bad.u2.W.YoY= demo3.bad.u2.W.YoY,
bad.u2.W.1 = demo3.bad.u2.W.1,
bad.u2.H.1 = demo3.bad.u2.H.1,
debug=TRUE, # this generates only 10,000 iterations of the MCMC chain for checking.
save.output.to.files=FALSE)
```
The final parameter (*save.output.to.files*) can be set to automatically to save plots and reports in files with the appropriate prefix in the working directory.
```{r echo=FALSE,results="hide" }
# delete extra files that were created
file.remove("data.txt" )
file.remove("CODAindex.txt" )
file.remove("CODAchain1.txt" )
file.remove("CODAchain2.txt" )
file.remove("CODAchain3.txt" )
file.remove("inits1.txt" )
file.remove("inits2.txt" )
file.remove("inits3.txt" )
file.remove("model.txt" )
```
## The output from the fit
Here is the fitted spline curve to the number of unmarked fish available of both stocks and ages.
```{r message=FALSE,warning=FALSE,fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Fitted spline curve")}
demo3.fit$plots$fit.plot
```
The separation of wild and hatchery fish is evident as well has the start of the spline for the hatchery fish.
A plot of the $logit(P)$ is
```{r warnings=FALSE,message=FALSE,warning=FALSE, fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Estimates of logit(p)")}
demo3.fit$plots$logitP.plot
```
In cases where there is no information (such as the first julian week), $BTSPAS$ has interpolated based on the distribution of catchability
in the other strata and so the credible interval is very wide.
A summary of the posterior for each parameter is also available. In particular, here are the
summary statistics on the posterior sample for the total number unmarked separated by wild and hatchery origin and the different ages:
```{r}
demo3.fit$summary[ grepl("Utot", row.names(demo3.fit$summary)),]
```
# References
Bonner, S. J., & Schwarz, C. J. (2011).
Smoothing population size estimates for Time-Stratified Mark–Recapture experiments Using Bayesian P-Splines.
Biometrics, 67, 1498–1507.
https://doi.org/10.1111/j.1541-0420.2011.01599.x
Schwarz, C. J., & Dempson, J. B. (1994).
Mark-recapture estimation of a salmon smolt population.
Biometrics, 50, 98–108.
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/vignettes/b-Diagonal-model-with-multiple-ages.Rmd
|
---
title: "Non-Diagonal Case"
author: "Carl James Schwarz"
date: "`r Sys.Date()`"
output:
html_vignette:
toc: true # table of content true
toc_depth: 3 # up to three depths of headings (specified by #, ## and ###)
number_sections: true ## if you want number sections at each table header
#vignette: >
# %\VignetteIndexEntry{03 - Non Diagonal Case}
# %\VignetteEncoding{UTF-8}
# %\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: inline
---
```{r setup, include = FALSE,message=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
library(binom)
library(BTSPAS)
library(ggplot2)
max.width=70
```
# Location of vignette source and code.
Because of the length of time needed to run the vignettes, only
static vignettes have been included with this package.
The original of the vignettes and the code can be obtained from
the GitHub site at
https://github.com/cschwarz-stat-sfu-ca/BTSPAS
# Introduction
This case represents a generalization of the diagonal case considered in a separate
vignette. Now, rather than assuming that all recaptures from a release take place in
a single recovery stratum, recoveries could take place over multiple recovery strata.
Again, consider an experiment to estimate the number of outgoing smolts on a small river. The
run of smolts extends over several weeks. As smolts migrate, they are captured and marked
with individually numbered tags and released at the first capture location using, for example, a
fishwheel. The migration continues, and a second fishwheel takes a second sample several
kilometers down stream. At the second fishwheel, the captures consist of a mixture of marked
(from the first fishwheel) and unmarked fish.
The efficiency of the fishwheels varies over time in response to stream flow, run size passing
the wheel and other uncontrollable events. So it is unlikely that the capture probabilities are
equal over time at either location, i.e. are heterogeneous over time.
We suppose that we can temporally stratify the data into, for example, weeks, where the
capture-probabilities are (mostly) homogeneous at each wheel in each week.
But now, we allow
tagged animals to be captured in several recovery strata.
For example,
suppose that in each julian week $j$, $n1[j]$ fish are marked and released above the rotary screw trap.
Of these, $m2[j,j]$ are recaptured in julian week $j$;
$m2[j,j+1]$ are recaptured in julian week $j+1$;
$m2[j,j+2]$ are recaptured in julian week $j+2$ and so on.
At the same time, $u2[j]$ unmarked fish are captured at the screw trap.
This implies that the data can be structured
as a **non-diagonal** array similar to:
```{}
Recovery Stratum
tagged rs1 rs2 rs3 ...rs4 rsk rs(k+1)
Marking ms1 n1[1] m2[1,1] m2[1,2] m2[1,3] m2[1,4] 0 ... 0 0
Stratum ms2 n1[2] 0 m2[2,2] m2[2,3] m2[2,4] .... 0 ... 0 0
ms3 n1[3] 0 0 m2[3,3] m2[3,4] ... 0 ... 0 0
...
msk n1[k] 0 0 0 ... 0 0 m2[k,k] m2[k,k+1]
Newly
Untagged u2[1] u2[2] u2[3] ... u2[k] u2[k,k+1]
captured
```
Here the tagging and recapture events have been stratified in to $k$ temporal strata.
Marked fish from one stratum tend to spread out and are recaptured over multiple strata.
Several additional recovery strata are needed at the end of the experiment to fully
capture the final release stratum.
Because the lower diagonal of the recovery matrix is zero, the data can be entered
in a shorthand fashion by showing the recoveries in the same stratum as release,
the next stratum, etc, up to a maximum number of recovery strata per release.
## Fixing values of $p$ or using covariates.
Refer to the vignette on the *Diagonal Case* for information about fixing values of $p$ or modelling
$p$ using covariates such a stream flow or smoothing $p$ using a temporal spline.
# Example of basic non-diagonal BTSPAS fit.
## Reading in the data
Here is an example of some raw data that is read in:
```{r}
demo.data.csv <- textConnection(
"Date , n1 , X0 , X1 , X2 , X3 , X4
1987-04-26 , 8 , 0 , 0 , 0 , 0 , 2
1987-04-27 , 5 , 0 , 0 , 0 , 0 , 0
1987-04-28 , 6 , 0 , 0 , 0 , 0 , 0
1987-04-29 , 17 , 0 , 0 , 2 , 1 , 1
1987-04-30 , 66 , 0 , 1 , 0 , 2 , 3
1987-05-01 , 193 , 0 , 1 , 7 , 7 , 2
1987-05-02 , 90 , 0 , 2 , 0 , 0 , 0
1987-05-03 , 260 , 0 , 0 , 14 , 6 , 1
1987-05-04 , 368 , 0 , 9 , 46 , 4 , 2
1987-05-05 , 506 , 0 , 38 , 33 , 11 , 0
1987-05-06 , 317 , 1 , 27 , 26 , 3 , 1
1987-05-07 , 43 , 0 , 4 , 3 , 0 , 2
1987-05-08 , 259 , 1 , 42 , 5 , 2 , 0
1987-05-09 , 259 , 1 , 32 , 27 , 1 , 0
1987-05-10 , 249 , 1 , 85 , 3 , 1 , 0
1987-05-11 , 250 , 3 , 21 , 19 , 2 , 0
1987-05-12 , 298 , 42 , 16 , 11 , 9 , 1
1987-05-13 , 250 , 1 , 7 , 25 , 6 , 4
1987-05-14 , 193 , 0 , 9 , 18 , 8 , 0
1987-05-15 , 207 , 0 , 17 , 21 , 2 , 0
1987-05-16 , 175 , 0 , 18 , 10 , 1 , 0
1987-05-17 , 141 , 0 , 12 , 14 , 7 , 1
1987-05-18 , 155 , 0 , 1 , 19 , 13 , 6
1987-05-19 , 123 , 0 , 5 , 22 , 5 , 0
1987-05-20 , 128 , 0 , 6 , 17 , 2 , 1
1987-05-21 , 72 , 0 , 11 , 9 , 2 , 0
1987-05-22 , 57 , 0 , 6 , 8 , 0 , 1
1987-05-23 , 49 , 0 , 4 , 2 , 1 , 0
1987-05-24 , 57, 14 , 2 , 1 , 0 , 0
1987-05-25 , 18 , 0 , 3 , 0 , 0 , 0
1987-05-26 , 20 , 0 , 3 , 4 , 0 , 0
1987-05-27 , 16 , 0 , 3 , 0 , 0 , 0
1987-05-28 , 15 , 0 , 0 , 2 , 0 , 0
1987-05-29 , 10 , 0 , 1 , 0 , 1 , 0
1987-05-30 , 13 , 0 , 0 , 2 , 0 , 0
1987-05-31 , 8 , 0 , 3 , 1 , 0 , 0
1987-06-01 , 2 , 0 , 1 , 0 , 0 , 0
1987-06-02 , 23 , 0 , 6 , 0 , 0 , 0
1987-06-03 , 20 , 0 , 2 , 0 , 0 , 0
1987-06-04 , 10 , 0 , 4 , 1 , 0 , 0
1987-06-05 , 10 , 3 , 1 , 0 , 0 , 0
1987-06-06 , 5 , 0 , 2 , 0 , 0 , 1
1987-06-07 , 2 , 0 , 0 , 0 , 0 , 0
1987-06-08 , 2 , 0 , 1 , 0 , 0 , 0 ")
demo.data <- read.csv(demo.data.csv, header=TRUE, as.is=TRUE, strip.white=TRUE)
print(demo.data)
demo.data$Date <- as.Date(demo.data$Date, "%Y-%m-%d")
demo.data$jday <- as.numeric(format(demo.data$Date, "%j"))
```
Here the strata are days (rather than weeks).
There are `r nrow(demo.data)` release strata, and tagged fish are recovered in the stratum of release plus another
`r ncol(demo.data)-2` strata.
In the first release stratum, a total of `r demo.data[1,"n1"]` fish were tagged and released.
No recoveries occurred until 4 days later.
Because the recoveries take place in more strata than releases, the $u2$ vector is read in separately.
Note that is must be sufficiently long
to account for the number of releases plus potential movement, for a length of
`r nrow(demo.data)+ ncol(demo.data)-4`:
```{r }
demo.data.u2 <- c(0 , 2 , 1 , 2 , 39 , 226 , 75 , 129 , 120 , 380 ,
921, 1005 , 1181 ,1087 , 1108 ,1685 ,671 ,1766 , 636 , 483 ,
170, 269 , 212 , 260 , 154 , 145 , 99 , 58 , 74 , 40 ,
50, 59 , 40 , 9 , 14 , 13 , 22 , 24 , 33 , 19 ,
12, 7 , 4 , 0 , 0 , 59 , 0 , 0 )
```
We also separate out the recoveries $m2$ into a matrix
```{r }
demo.data.m2 <- as.matrix(demo.data[,c("X0","X1","X2","X3","X4")])
```
```{r pp,message=FALSE,echo=FALSE}
Nhat <-BTSPAS::SimplePetersen( sum(demo.data$n1), sum(demo.data.m2), sum(demo.data.u2))
```
## Preliminary screening of the data
A pooled-Petersen estimator would add all of the marked, recaptured and unmarked fish
to give an estimate of `r formatC( round(Nhat$N.est), digits=0, width=20, format="f",big.mark=",")`.
Let us look at the data in more detail:
* In many of the release strata, there are small number of fish tagged and released.
* Most the recoveries occur soon after release, but there is longer right tail.
* It is difficult to estimate capture efficiency directly because of the smearing of
releases over multiple recovery strata.
Let us look at the pattern of unmarked fish captured at the second trap:
```{r umarked, echo=FALSE, fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Observed number of unmarked recaptures"), warning=FALSE}
plotdata <- data.frame(jday=min(demo.data$jday):(min(demo.data$jday)+length(demo.data.u2)-1),
u2 = demo.data.u2)
ggplot(data=plotdata, aes(x=jday, y=log(u2)))+
ggtitle("Number of unmarked fish captured by julian day")+
geom_point()+
geom_line()+
xlab("Julian day")+
ylab("log(Number of unmarked fish captured (u2))")
```
There appears to be a gradual rise and fall in the number of unmarked fish with no sudden jumps.
Something funny appears to have happened around julian day 160 -- I suspect that some pooling of data has occurred here.
*BTSPAS* provides two fitting routine:
* Non-diagonal but the movement of fish after tagging follows a log-normal distribution following release as explained in
Dempson and Schwarz (2009) and extended by Bonner and Schwarz (2011).
* Non-diagonal but a non-parametric movement distribution is assumes as explained in
Bonner and Schwarz (2011).
# Fitting the BTSPAS non-diagonal model with a log-normal movement distribution.
Bonner and Schwarz (2011) developed a model with the following features.
* Log-normal distribution of the distribution of times between release and availability at the second
trap.
* A spline is used to smooth the total number of unmarked fish presenting themselves at the second trap
over the strata
* A hierarchical model for the capture-probabilities is assumed where individual stratum capture
probabilities are assumed to vary around a common mean.
The model also allows the user to use covariates to explain some of the variation in the
capture probabilities in much the same way as the diagonal case.
The $BTSPAS$ package also has additional features and options:
* if $u2$ is missing for any stratum, the program will use the spline to interpolate the number of unmarked
fish in the population for the
missing stratum.
* if $n1$ and the entire corresponding row of $m2$ are 0, the program will use the hierarchical model to interpolate the capture
probabilities for the missing strata because there is no information about recapture probabilities when $n1=0$.
* the program allows you specify break points in the underlying spline to account
for external events.
* sometimes bad thing happen. The vector $bad.m2$ indicates which julian weeks something went wrong.
In the above example, the
number of recoveries in julian week 41 is far below expectations and leads to impossible
Petersen estimate for julian week 41.
Similarly, the vector $bad.u2$ indicates which julian weeks, the number of unmarked fish is suspect.
In both cases, the suspect values of $n1$ and $m2$ are set to 0 and the suspect values of $u2$ are set to missing.
Alternatively, the user can set the suspect $n1$ and $m2$ values to 0,
and the suspect $u2$ values to missing in the data input directly.
I arbitrarily chose the third julian week to demonstrate this feature.
The $BTSPAS$ function also allows you specify
* The prefix is used to identify the output files for this run.
* The title is used to title the output.
* Various parameters to control the Bayesian MCMC phase of model fitting. Please contact us for help in setting these
if problem arise.
We already read in the data above. Here we set the rest of the parameters. Don't forget to set the working directory as appropriate
```{r }
library("BTSPAS")
# After which weeks is the spline allowed to jump?
demo.jump.after <- c() # julian weeks after which jump occurs
# Which julian weeks have "bad" recapture values. These will be set to 0 or missing prior to the model fitting.
demo.bad.m2 <- c(117) # list julian weeks with bad m2 values.
demo.bad.u2 <- c() # list julian weeks with bad u2 values.
demo.bad.n1 <- c(117) # list julian weeks with bad n1 values.
# The prefix for the output files:
demo.prefix <- "demo-1987-Conne River-TSP NDE"
# Title for the analysis
demo.title <- "Conne River 1987 Atlantic Salmon Smolts - Log-normal"
cat("*** Starting ",demo.title, "\n\n")
# Make the call to fit the model and generate the output files
demo.fit <- TimeStratPetersenNonDiagError_fit(
title= demo.title,
prefix= demo.prefix,
time= min(demo.data$jday):(min(demo.data$jday)+length(demo.data.u2)-1),
n1= demo.data$n1,
m2= demo.data.m2,
u2= demo.data.u2,
jump.after= demo.jump.after,
bad.n1= demo.bad.n1,
bad.m2= demo.bad.m2,
bad.u2= demo.bad.u2,
debug=TRUE, # save time by reducing number of MCMC iterations
save.output.to.files=FALSE)
```
The final parameter (*save.output.to.files*) can be set to automatically to save plots and reports in files with the appropriate prefix in the working directory.
```{r deletefiles,echo=FALSE,results="hide" }
# delete extra files that were created
file.remove("data.txt" )
file.remove("CODAindex.txt" )
file.remove("CODAchain1.txt" )
file.remove("CODAchain2.txt" )
file.remove("CODAchain3.txt" )
file.remove("inits1.txt" )
file.remove("inits2.txt" )
file.remove("inits3.txt" )
file.remove("model.txt" )
```
## The output from the fit
The final object has many components
```{r components, results="hide"}
names(demo.fit)
```
```{r echo=FALSE}
save.options <- options()
options(width=max.width)
names(demo.fit)
options(save.options)
```
The *plots* sub-object contains many plots:
```{r results='hide'}
names(demo.fit$plots)
```
```{r echo=FALSE}
save.options <- options()
options(width=max.width)
names(demo.fit$plots)
options(save.options)
```
In particular, it contains plots of the initial spline fit (*init.plot*),
the final fitted spline (*fit.plot*),
the estimated capture probabilities (on the logit scale) (*logitP.plot*),
plots of the distribution of the posterior sample
for the total unmarked and marked fish (*post.UNtot.plot*)
and model diagnostic plots (goodness of fit (*gof.plot*), trace (*trace...plot*), and autocorrelation plots (*act.Utot.plot*).
These plots are all created using the $ggplot2$ packages, so the user can modify the plot (e.g. change titles etc).
The $BTSPAS$ program also creates a report, which includes information about the data used in the fitting,
the pooled- and stratified-Petersen estimates, a test for pooling, and summaries of the posterior. Only the first few lines
are shown below:
```{r }
head(demo.fit$report)
```
Here is the fitted spline curve to the number of unmarked fish available in each recovery sample
```{r message=FALSE,warning=FALSE,fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Fitted spline curve")}
demo.fit$plots$fit.plot
```
The distribution of the posterior sample for the total number unmarked and total abundance is available:
```{r fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Distribution of posterior samples")}
demo.fit$plots$post.UNtot.plot
```
A plot of the $logit(P)$ is
```{r warnings=FALSE,message=FALSE,warning=FALSE, fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Estimates of logit(p)")}
demo.fit$plots$logitP.plot
```
In cases where there is little information, $BTSPAS$ has shared information based on the distribution of catchability
in the other strata.
A summary of the posterior for each parameter is also available. In particular, here are the
summary statistics on the posterior sample for the total number unmarked and total abundance:
```{r}
demo.fit$summary[ row.names(demo.fit$summary) %in% c("Ntot","Utot"),]
```
This also includes the Rubin-Brooks-Gelman statistic ($Rhat$) on mixing of the chains and the effective sample size
of the posterior (after
accounting for autocorrelation).
The estimated total abundance from $BTSPAS$ is
`r formatC(round(demo.fit$summary[ "Ntot","mean"]), big.mark=",", digits=0, format="f")` (SD
`r formatC(round(demo.fit$summary[ "Ntot","sd" ]), big.mark=",", digits=0, format="f")` ) fish.
The model has a "base" log-normal distribution for the travel times between the release and recovery
strata. The summary of the posterior distribution of its parameters for the log(travel time) are:
```{r}
round(demo.fit$summary[ row.names(demo.fit$summary) %in% c("baseMu","baseSd"),],3)
```
Each release stratum is allowed to have a travel time distribution that differ from this base travel time distribution,
by allowing the individual release stratum parameters to be sampled from a distribution around the above vales.
Posterior samples represent the mean and standard deviation of the log(travel time) between
the release and recovery strata. Here are the results for the first 5 strata:
```{r}
round(demo.fit$summary[ grepl("muLogTT", row.names(demo.fit$summary)),][1:5,],3)
round(demo.fit$summary[ grepl("sdLogTT", row.names(demo.fit$summary)),][1:5,],3)
```
It is also possible to see the probability of moving from release stratum $i$ to recovery stratum $j$ by looking
at the $Theta[i,j]$ values. Here are the transition probabilities for the first release stratum:
```{r}
round(demo.fit$summary[ grepl("Theta[1,", row.names(demo.fit$summary),fixed=TRUE),][1:10,],3)
```
The probabilities should sum to 1 for each release group.
Samples from the posterior are also included in the *sims.matrix*, *sims.array* and *sims.list* elements
of the results object.
It is always important to do model assessment before accepting the results from the model fit.
Please contact me for details on how to interpret
the goodness of fit, trace, and autocorrelation plots.
# Fitting the BTSPAS non-diagonal model with a non-parametric movement distribution.
Bonner and Schwarz (2011) developed a model with the following features.
* Non-parametric distribution of the distribution of times between release and availability at the second
trap.
* A spline is used to smooth the total number of unmarked fish presenting themselves at the second trap
over the strata
* A hierarchical model for the capture-probabilities is assumed where individual stratum capture
probabilities are assumed to vary around a common mean.
The model also allows the user to use covariates to explain some of the variation in the
capture probabilities in much the same way as the diagonal case.
The $BTSPAS$ package also has additional features and options:
* if $u2$ is missing for any stratum, the program will use the spline to interpolate for the number of unmarked fish
in the population
missing stratum.
* if $n1$ and/or the entire corresponding row of $m2$ are 0, the program will use the hierarchical model to interpolate the capture
probabilities for the missing strata. This is often useful when releases did not take place in a stratum (e.g. trap not running)
or something went wrong in that stratum of release.
* the program allows you specify break points in the underlying spline to account
for external events.
* sometimes bad thing happen. The vector $bad.m2$ indicates which julian weeks something went wrong. In the above example, the
number of recoveries in julian week 41 is far below expectations and leads to impossible
Petersen estimate for julian week 41. Similarly, the vector $bad.u2$ indicates which julian weeks, the number of unmarked fish is suspect.
In both cases, the suspect values of $n1$ and $m2$ are set to 0 and the suspect values in $u2$ are set to missing.
Alternatively, the user can set the $n1$ and $m2$ to zero and $u2$ values to missing in the data input directly.
I arbitrarily chose the third julian week to demonstrate this feature.
The $BTSPAS$ function also allows you specify
* The prefix is used to identify the output files for this run.
* The title is used to title the output.
* Various parameters to control the Bayesian MCMC phase of model fitting. Please contact us for help in setting these
if problem arise.
We already read in the data above. Here we set the rest of the parameters. Don't forget to set the working directory as appropriate
```{r }
library("BTSPAS")
# After which weeks is the spline allowed to jump?
demo2.jump.after <- c() # julian weeks after which jump occurs
# Which julian weeks have "bad" recapture values. These will be set to 0 or missing prior to the model fit.
demo2.bad.m2 <- c() # list julian weeks with bad m2 values. This is used in the Trinity Example
demo2.bad.u2 <- c() # list julian weeks with bad u2 values. [This was arbitrary to demostrate the feature.]
demo2.bad.n1 <- c() # list julian weeks with bad n1 values. [This was arbitrary to demonstrate the feature.]
# The prefix for the output files:
demo2.prefix <- "demo2-1987-Conne River-TSP NDE NP"
# Title for the analysis
demo2.title <- "Conne River 1987 Atlantic Salmon Smolts - Non-parametric"
cat("*** Starting ",demo2.title, "\n\n")
# Make the call to fit the model and generate the output files
demo2.fit <- TimeStratPetersenNonDiagErrorNP_fit( # notice change in function name
title= demo2.title,
prefix= demo2.prefix,
time= min(demo.data$jday):(min(demo.data$jday)+length(demo.data.u2)-1),
n1= demo.data$n1,
m2= demo.data.m2,
u2= demo.data.u2,
jump.after= demo2.jump.after,
bad.n1= demo2.bad.n1,
bad.m2= demo2.bad.m2,
bad.u2= demo2.bad.u2,
debug=TRUE, # save time by reducing number of MCMC iterations
save.output.to.files=FALSE)
```
The final parameter (*save.output.to.files*) can be set to automatically to save plots and reports in files with the appropriate prefix in the working directory.
```{r deletefiles2,echo=FALSE,results="hide" }
# delete extra files that were created
file.remove("data.txt" )
file.remove("CODAindex.txt" )
file.remove("CODAchain1.txt" )
file.remove("CODAchain2.txt" )
file.remove("CODAchain3.txt" )
file.remove("inits1.txt" )
file.remove("inits2.txt" )
file.remove("inits3.txt" )
file.remove("model.txt" )
```
## The output from the fit
Here is the fitted spline curve to the number of unmarked fish available in each recovery sample
```{r message=FALSE,warning=FALSE,fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Fitted spline curve")}
demo2.fit$plots$fit.plot
```
The distribution of the posterior sample for the total number unmarked and total abundance is available:
```{r fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Distribution of posterior samples")}
demo2.fit$plots$post.UNtot.plot
```
A plot of the $logit(P)$ is
```{r warnings=FALSE,message=FALSE,warning=FALSE, fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Estimates of logit(p)")}
demo2.fit$plots$logitP.plot
```
In cases where there is little information, $BTSPAS$ has shared information based on the distribution of catchability
in the other strata.
A summary of the posterior for each parameter is also available. In particular, here are the
summary statistics on the posterior sample for the total number unmarked and total abundance:
```{r}
demo2.fit$summary[ row.names(demo2.fit$summary) %in% c("Ntot","Utot"),]
```
This also includes the Rubin-Brooks-Gelman statistic ($Rhat$) on mixing of the chains and the effective sample size
of the posterior (after
accounting for autocorrelation).
The estimated total abundance from $BTSPAS$ is
`r formatC(round(demo2.fit$summary[ "Ntot","mean"]), big.mark=",", digits=0, format="f")` (SD
`r formatC(round(demo2.fit$summary[ "Ntot","sd" ]), big.mark=",", digits=0, format="f")` ) fish.
The estimated distribution function is allowed by vary by release stratum around a common "mean" distribution.
```{r }
probs <- demo2.fit$summary[grepl("movep", row.names(demo2.fit$summary)), ]
round(probs,3)
```
So we expect that about `r round(probs[1,"mean"]*100,0)`% of fish will migrate to the second trap in the day of release;
about `r round(probs[2,"mean"]*100,0)`% of fish will migrate to the second trap in the second day after release etc.
The movement for each release stratum varies around this base distribution.
It is also possible to see the probability of moving from release stratum $i$ to recovery stratum $j$ by looking
at the $Theta[i,j]$ values. Here are the transition probabilities for the first release stratum:
```{r}
round(demo2.fit$summary[ grepl("Theta[1,", row.names(demo2.fit$summary),fixed=TRUE),],3)
```
The probabilities should also sum to 1 for each release group.
It is always important to do model assessment before accepting the results from the model fit.
Please contact me for details on how to interpret
the goodness of fit, trace, and autocorrelation plots.
# Prior information on the movement probabilities.
It is possible to impose prior information on the movement probabilities in both cases. This would be useful
in cases with VERY sparse data!
In the non-parametric case, specify a vector that gives the relative weight of belief of movement.
These are similar to a Dirchelet-type prior where the values representing belief in the distribution of travel times.
For example, $prior.muTT=c(1,4,3,2)$ represents a system where the maximum travel time is 3 strata after release with $1/10=.1$
of the animals moving in the stratum of release $4/10=.4$ of the animals taking 1 stratum to move etc
So if $prior.muTT=c(10,40,30,20)$, this represent the same movement pattern but a strong degree of belief because all of the numbers
are larger. AN intuitive explanation is that the $sum(prior.muTT)$ represents the number of animals observed to
make this travel time distribution.
Here we will fit a fairly strong prior on the movement probabilities:
```{r }
demo3.prior.muTT=c(10,50,30,5,5)
```
where the probability of movement in the stratum of release and subsequent strata is
```{r}
round(demo3.prior.muTT/sum(demo3.prior.muTT),2)
```
We already read in the data above. Here we set the rest of the parameters. Don't forget to set the working directory as appropriate
```{r }
library("BTSPAS")
# After which weeks is the spline allowed to jump?
demo3.jump.after <- c() # julian weeks after which jump occurs
# Which julian weeks have "bad" recapture values. These will be set to 0 or missing as needed.
demo3.bad.m2 <- c() # list julian weeks with bad m2 values. This is used in the Trinity Example
demo3.bad.u2 <- c() # list julian weeks with bad u2 values. [This was arbitrary to demonstrate the feature.]
demo3.bad.n1 <- c() # list julian weeks with bad n1 values. [This was arbitrary to demonstrate the feature.]
# The prefix for the output files:
demo3.prefix <- "demo3-1987-Conne River-TSP NDE NP- prior"
# Title for the analysis
demo3.title <- "Conne River 1987 Atlantic Salmon Smolts - Non-parametric - Strong Prior"
cat("*** Starting ",demo3.title, "\n\n")
# Make the call to fit the model and generate the output files
demo3.fit <- TimeStratPetersenNonDiagErrorNP_fit( # notice change in function name
title= demo3.title,
prefix= demo3.prefix,
time= min(demo.data$jday):(min(demo.data$jday)+length(demo.data.u2)-1),
n1= demo.data$n1,
m2= demo.data.m2,
u2= demo.data.u2,
prior.muTT= demo3.prior.muTT, # prior on moements
jump.after= demo3.jump.after,
bad.n1= demo3.bad.n1,
bad.m2= demo3.bad.m2,
bad.u2= demo3.bad.u2,
debug=TRUE, # save time by reducing number of MCMC iterations
save.output.to.files=FALSE)
```
The final parameter (*save.output.to.files*) can be set to automatically to save plots and reports in files with the appropriate prefix in the working directory.
```{r deletefiles3,echo=FALSE,results="hide" }
# delete extra files that were created
file.remove("data.txt" )
file.remove("CODAindex.txt" )
file.remove("CODAchain1.txt" )
file.remove("CODAchain2.txt" )
file.remove("CODAchain3.txt" )
file.remove("inits1.txt" )
file.remove("inits2.txt" )
file.remove("inits3.txt" )
file.remove("model.txt" )
```
## The output from the fit
Here is the fitted spline curve to the number of unmarked fish available in each recovery sample
```{r message=FALSE,warning=FALSE,fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Fitted spline curve")}
demo3.fit$plots$fit.plot
```
The distribution of the posterior sample for the total number unmarked and total abundance is available:
```{r fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Distribution of posterior samples")}
demo3.fit$plots$post.UNtot.plot
```
A plot of the $logit(P)$ is
```{r warnings=FALSE,message=FALSE,warning=FALSE, fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Estimates of logit(p)")}
demo3.fit$plots$logitP.plot
```
In cases where there is little information, $BTSPAS$ has shared information based on the distribution of catchability
in the other strata.
A summary of the posterior for each parameter is also available. In particular, here are the
summary statistics on the posterior sample for the total number unmarked and total abundance:
```{r}
demo3.fit$summary[ row.names(demo3.fit$summary) %in% c("Ntot","Utot"),]
```
This also includes the Rubin-Brooks-Gelman statistic ($Rhat$) on mixing of the chains and the effective sample size
of the posterior (after
accounting for autocorrelation).
The estimated total abundance from $BTSPAS$ is
`r formatC(round(demo3.fit$summary[ "Ntot","mean"]), big.mark=",", digits=0, format="f")` (SD
`r formatC(round(demo3.fit$summary[ "Ntot","sd" ]), big.mark=",", digits=0, format="f")` ) fish.
The estimated distribution function is allowed by vary by release stratum around a common "mean" distribution.
```{r }
probs <- demo3.fit$summary[grepl("movep", row.names(demo3.fit$summary)), ]
round(probs,3)
```
So we expect that about `r round(probs[1,"mean"]*100,0)`% of fish will migrate to the second trap in the day of release;
about `r round(probs[2,"mean"]*100,0)`% of fish will migrate to the second trap in the second day after release etc.
The movement for each release stratum varies around this base distribution.
It is also possible to see the probability of moving from release stratum $i$ to recovery stratum $j$ by looking
at the $Theta[i,j]$ values. Here are the transition probabilities for the first release stratum:
```{r}
round(demo3.fit$summary[ grepl("Theta[1,", row.names(demo3.fit$summary),fixed=TRUE),],3)
```
The probabilities should also sum to 1 for each release group.
It is always important to do model assessment before accepting the results from the model fit.
Please contact me for details on how to interpret
the goodness of fit, trace, and autocorrelation plots.
# References
Bonner, S. J., & Schwarz, C. J. (2011).
Smoothing population size estimates for Time-Stratified Mark–Recapture experiments Using Bayesian P-Splines.
Biometrics, 67, 1498–1507.
https://doi.org/10.1111/j.1541-0420.2011.01599.x
Schwarz, C. J., & Dempson, J. B. (1994).
Mark-recapture estimation of a salmon smolt population.
Biometrics, 50, 98–108.
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/vignettes/c-Non-diagonal-model.Rmd
|
---
title: "Non-Diagonal Fall-Back Model"
author: "Carl James Schwarz"
date: "`r Sys.Date()`"
output:
html_vignette:
toc: true # table of content true
toc_depth: 3 # up to three depths of headings (specified by #, ## and ###)
number_sections: true ## if you want number sections at each table header
#vignette: >
# %\VignetteIndexEntry{04 - Non Diagonal Fall-Back Model}
# %\VignetteEncoding{UTF-8}
# %\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: inline
---
```{r setup, include = FALSE,message=FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
library(binom)
library(BTSPAS)
library(ggplot2)
max.width=70
```
# Location of vignette source and code.
Because of the length of time needed to run the vignettes, only
static vignettes have been included with this package.
The original of the vignettes and the code can be obtained from
the GitHub site at
https://github.com/cschwarz-stat-sfu-ca/BTSPAS
# Introduction
## Experimental set-up
This case represents a generalization of the non-diagonal case considered in a separate
vignette. Now we allow some fish (marked and unmarked) to approach the second trap, but fall back and
never pass the trap. Schwarz and Bonner (2011) considered this model to estimate the number of
steelhead that passed upstream of Moricetown Canyon.
The experimental setup is the same as the non-diagonal case.
Consider an experiment to estimate the number of outgoing smolts on a small river. The
run of smolts extends over several weeks. As smolts migrate, they are captured and marked
with individually numbered tags and released at the first capture location using, for example, a
fishwheel. The migration continues, and a second fishwheel takes a second sample several
kilometers down stream. At the second fishwheel, the captures consist of a mixture of marked
(from the first fishwheel) and unmarked fish.
The efficiency of the fishwheels varies over time in response to stream flow, run size passing
the wheel and other uncontrollable events. So it is unlikely that the capture probabilities are
equal over time at either location, i.e. are heterogeneous over time.
We suppose that we can temporally stratify the data into, for example, weeks, where the
capture-probabilities are (mostly) homogeneous at each wheel in each week.
But now, we allow
tagged animals to be captured in several recovery strata.
For example,
suppose that in each julian week $j$, $n1[j]$ fish are marked and released above the rotary screw trap.
Of these, $m2[j,j]$ are recaptured in julian week $j$;
$m2[j,j+1]$ are recaptured in julian week $j+1$;
$m2[j,j+2]$ are recaptured in julian week $j+2$ and so on.
At the same time, $u2[j]$ unmarked fish are captured at the screw trap.
This implies that the data can be structured
as a **non-diagonal** array similar to:
```{}
Recovery Stratum
tagged rs1 rs2 rs3 ...rs4 rsk rs(k+1)
Marking ms1 n1[1] m2[1,1] m2[1,2] m2[1,3] m2[1,4] 0 ... 0 0
Stratum ms2 n1[2] 0 m2[2,2] m2[2,3] m2[2,4] .... 0 ... 0 0
ms3 n1[3] 0 0 m2[3,3] m2[3,4] ... 0 ... 0 0
...
msk n1[k] 0 0 0 ... 0 0 m2[k,k] m2[k,k+1]
Newly
Untagged u2[1] u2[2] u2[3] ... u2[k] u2[k,k+1]
captured
```
Here the tagging and recapture events have been stratified in to $k$ temporal strata.
Marked fish from one stratum tend to spread out and are recaptured over multiple strata.
Several additional recovery strata are needed at the end of the experiment to fully
capture the final release stratum.
Because the lower diagonal of the recovery matrix is zero, the data can be entered
in a shorthand fashion by showing the recoveries in the same stratum as release,
the next stratum, etc, up to a maximum number of recovery strata per release.
## Fall-back information
This information is obtained by also marking radio-tagged fish whose ultimate fate
(i.e. did they pass the second trap nor not) can be determined.
We measure:
* $marked\_available\_n$ representing the number of radio-tagged fish.
* $marked\_available\_x$ representing the number of radio tagged fish that **PASSED** the
second trap.The $n$ and $x$ are modelled using a
binomial distribution for information on the fraction of tagged fish that DO NOT fall back, i.e.
are available at the second trap. For
example, if $n=66$ and $x=40$, then you estimate that about $40/66=61$% of tagged and untagged fish
pass the second trap and that $39$% of fish fall back never to pass the second trap.
Notice we don't really care about unmarked fish that fall back as we only estimate the number of
unmarked fish that pass the second trap, which by definition exclude those fish that never
make it to second trap. We need to worry about **marked** fish that never make it to the second trap because
the fish that fall back will lead to underestimates of the trap-efficiency
and over-estimates of unmarked fish that pass the second trap.
This model could also be used for mortality between the marking and recovery trap.
## Fixing values of $p$ or using covariates.
Refer to the vignette on the *Diagonal Case* for information about fixing values of $p$ or modelling
$p$ using covariates such a stream flow or smoothing $p$ using a temporal spline.
# Example of non-diagonal model with fall-back.
## Reading in the data
Here is an example of some raw data that is read in:
```{r}
demo.data.csv <- textConnection("
jweek,n1, X0,X1 ,X2 ,X3,X4,X5,X6,X7
29 , 1 , 0 , 0 , 0 ,0 ,0 ,0 ,0 ,0
30 , 35 , 0 , 5 , 7 ,2 ,0 ,0 ,0 ,0
31 ,186 , 1 ,35 ,11 ,4 ,0 ,0 ,0 ,0
32 ,292 , 9 ,33 ,16 ,6 ,0 ,0 ,0 ,0
33 ,460 , 6 ,41 ,16 ,9 ,3 ,0 ,2 ,1
34 ,397 , 4 ,44 , 7 ,5 ,1 ,1 ,0 ,1
35 ,492 , 7 ,31 ,12 ,1 ,4 ,1 ,1 ,0
36 ,151 , 3 , 6 , 2 ,1 ,1 ,0 ,0 ,0
37 ,130 , 3 , 2 , 2 ,0 ,0 ,1 ,0 ,0
38 ,557 , 8 ,27 ,11 ,2 ,5 ,0 ,0 ,0
39 , 46 , 0 , 7 , 0 ,0 ,0 ,0 ,0 ,0
40 ,143 , 14 , 6 , 3 ,0 ,0 ,0 ,0 ,0
41 , 26 , 2 , 1 , 0 ,0 ,0 ,0 ,0 ,0")
# Read data
demo.data <- read.csv(demo.data.csv, header=TRUE, as.is=TRUE, strip.white=TRUE)
print(demo.data)
```
There are `r nrow(demo.data)` release strata.
In the first release stratum, a total of `r demo.data[1,"n1"]` fish were tagged and released.
No recoveries occurred.
Because the recoveries take place in more strata than releases, the $u2$ vector is read in separately. Note that is must be sufficiently long
to account for the number of releases plus potential movement:
```{r }
demo.data.u2 <- c( 2, 65, 325, 873, 976, 761, 869, 473, 332, 197,
177, 282, 82, 100)
```
We also separate out the recoveries $m2$ into a matrix
```{r }
demo.data.m2 <- as.matrix(demo.data[,c("X0","X1","X2","X3","X4","X5","X6","X7")])
```
A separate radio-telemetry study found that of 66 fish released, 40 passed the second trap:
```{r }
demo.mark.available.n <- 66
demo.mark.available.x <- 40
```
## Fitting the BTSPAS non-diagonal model with fall-back model with a non-parametric movement distribution.
Schwarz and Bonner (2011) extended Bonner and Schwarz (2011) with a model with the following features.
* Non-parametric distribution of the distribution of times between release and availability at the second
trap.
* A spline is used to smooth the total number of unmarked fish presenting themselves at the second trap
over the strata
* A hierarchical model for the capture-probabilities is assumed where individual stratum capture
probabilities are assumed to vary around a common mean.
* A binomial distribution is assumed for the number of marked fish that do not fall back and pass the second trap
to estimate the trap efficiency.
The model also allows the user to use covariates to explain some of the variation in the
capture probabilities in much the same way as the diagonal case.
The $BTSPAS$ package also has additional features and options:
* if $u2$ is missing for any stratum, the program will use the spline to interpolate for the
number of unmarked fish in the population
missing stratum.
* if $n1$ and the entire corresponding row of $m2$ are 0, the program will use the hierarchical model to interpolate the capture
probabilities for the missing strata because no information is available from 0 fish released.
* the program allows you specify break points in the underlying spline to account
for external events.
* sometimes bad thing happen. The vector $bad.m2$ indicates which julian weeks something went wrong. In the above example, the
number of recoveries in julian week 41 is far below expectations and leads to impossible
Petersen estimate for julian week 41. Similarly, the vector $bad.u2$ indicates which julian weeks, the number of unmarked fish is suspect.
In both cases, the suspect values of $n1$ and $m2$ are set to 0, and the suspect values of $u2$ are set to missing.
Alternatively, the user can set the $n1$ and $m2$ values to 0, and set the suspect $u2$ values to missing in the data input directly.
I arbitrarily chose the third julian week to demonstrate this feature.
The $BTSPAS$ function also allows you specify
* The prefix is used to identify the output files for this run.
* The title is used to title the output.
* Various parameters to control the Bayesian MCMC phase of model fitting. Please contact us for help in setting these
if problem arise.
We already read in the data above. Here we set the rest of the parameters. Don't forget to set the working directory as appropriate
```{r }
library("BTSPAS")
demo.prefix <- "FB-"
demo.title <- "Fall-back demo"
demo.jump.after <- NULL
## Identify spurious values in n1, m2, and u2 that should be set to 0 or missing as needed.
demo.bad.n1 <- c() # list sample times of bad n1 values
demo.bad.m2 <- c() # list sample times of bad m2 values
demo.bad.u2 <- c() # list sample times of bad u2 values
## Fix capture probabilities for strata when traps not operated
demo.logitP.fixed <- NULL
demo.logitP.fixed.values <- rep(-10,length(demo.logitP.fixed))
demo.fit <- TimeStratPetersenNonDiagErrorNPMarkAvail_fit(
title= demo.title,
prefix= demo.prefix,
time= demo.data$jweek[1]:(demo.data$jweek[1]+length(demo.data.u2)-1),
n1= demo.data$n1,
m2= demo.data.m2,
u2= demo.data.u2,
jump.after= demo.jump.after,
bad.n1= demo.bad.n1,
bad.m2= demo.bad.m2,
bad.u2= demo.bad.u2,
logitP.fixed=demo.logitP.fixed,
logitP.fixed.values=demo.logitP.fixed.values,
marked_available_n=demo.mark.available.n,
marked_available_x=demo.mark.available.x, # 40/66 fish did NOT fall back
debug=TRUE,
save.output.to.files=FALSE)
```
```{r deletefiles2,echo=FALSE,results="hide" }
# delete extra files that were created
file.remove("data.txt" )
file.remove("CODAindex.txt" )
file.remove("CODAchain1.txt" )
file.remove("CODAchain2.txt" )
file.remove("CODAchain3.txt" )
file.remove("inits1.txt" )
file.remove("inits2.txt" )
file.remove("inits3.txt" )
file.remove("model.txt" )
```
## The output from the fit
Here is the fitted spline curve to the number of unmarked fish available in each recovery stratum at the second trap
```{r message=FALSE,warning=FALSE,fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Fitted spline curve")}
demo.fit$plots$fit.plot
```
The distribution of the posterior sample for the total number unmarked and total abundance that pass the second trap is available.
Note this include the sum of the unmarked shown in the previous plot, plus a binomial distribution on the number
of marked fish released that pass the second trap.
```{r fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Distribution of posterior samples")}
demo.fit$plots$post.UNtot.plot
```
A plot of the $logit(P)$ is
```{r warnings=FALSE,message=FALSE,warning=FALSE, fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Estimates of logit(p)")}
demo.fit$plots$logitP.plot
```
In cases where there is little information, $BTSPAS$ has shared information based on the distribution of catchability
in the other strata.
A summary of the posterior for each parameter is also available. In particular, here are the
summary statistics on the posterior sample for the total number unmarked and total abundance
**THAT PASS THE SECOND TRAP**:
```{r}
demo.fit$summary[ row.names(demo.fit$summary) %in% c("Ntot","Utot"),]
```
This also includes the Rubin-Brooks-Gelman statistic ($Rhat$) on mixing of the chains and the effective sample size
of the posterior (after
accounting for autocorrelation).
The estimated total abundance is
`r formatC(round(demo.fit$summary[ "Ntot","mean"]), big.mark=",", digits=0, format="f")` (SD
`r formatC(round(demo.fit$summary[ "Ntot","sd" ]), big.mark=",", digits=0, format="f")` ) fish.
The estimated distribution function is allowed by vary by release stratum around a common "mean" distribution.
```{r }
probs <- demo.fit$summary[grepl("movep", row.names(demo.fit$summary)), ]
round(probs,3)
```
So we expect that about `r round(probs[1,"mean"]*100,0)`% of fish will migrate to the second trap in the day of release;
about `r round(probs[2,"mean"]*100,0)`% of fish will migrate to the second trap in the second day after release etc.
The movement for each release stratum varies around this base distribution.
It is also possible to see the probability of moving from release stratum $i$ to recovery stratum $j$ by looking
at the $Theta[i,j]$ values. Here are the transition probabilities for the first release stratum:
```{r}
round(demo.fit$summary[ grepl("Theta[1,", row.names(demo.fit$summary),fixed=TRUE),],3)
```
The probabilities should also sum to 1 for each release group.
As with the other non-parametric non-diagonal model, you can specify a prior distribution for the
movement probabilities.
The sample of the posterior-distribution for the proportion of fish that DO NOT FALL back
is
```{r}
round(demo.fit$summary[ grepl("ma.p", row.names(demo.fit$summary),fixed=TRUE),],3)
```
It is always important to do model assessment before accepting the results from the model fit.
Please contact me for details on how to interpret
the goodness of fit, trace, and autocorrelation plots.
# References
Bonner, S. J., & Schwarz, C. J. (2011).
Smoothing population size estimates for Time-Stratified Mark–Recapture experiments Using Bayesian P-Splines.
Biometrics, 67, 1498–1507.
https://doi.org/10.1111/j.1541-0420.2011.01599.x
Schwarz, C. J. and Bonner, S. B. (2011).
A spline-based capture-mark-recapture model applied to estimating the
number of steelhead within the Bulkley River passing the Moricetown Canyon
in 2001-2010. Prepared for the B.C. Ministry of Environment.
<!---http://www.stat.sfu.ca/~cschwarz/Consulting/Moricetown/Report-2011-06-01.pdf --->
Schwarz, C. J., & Dempson, J. B. (1994).
Mark-recapture estimation of a salmon smolt population.
Biometrics, 50, 98–108.
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/vignettes/d-Non-diagonal-with-fall-back-model.Rmd
|
---
title: "Bias caused by incomplete sampling"
author: "Carl James Schwarz"
date: "`r Sys.Date()`"
output:
html_vignette:
toc: true # table of content true
toc_depth: 3 # upto three depths of headings (specified by #, ## and ###)
number_sections: true ## if you want number sections at each table header
#vignette: >
# %\VignetteIndexEntry{05 - Bias caused by incomplete sampling}
# %\VignetteEncoding{UTF-8}
# %\VignetteEngine{knitr::rmarkdown_notangle}
editor_options:
chunk_output_type: inline
---
```{r setup, include=FALSE}
set.seed(856765)
max.width=200
library(ggplot2)
library(plyr)
library(stats)
# Generate population curve
logit <- function(p){log(p/(1-p))}
expit <- function(theta){1/(1+exp(-theta))}
```
# Location of vignette source and code.
Because of the length of time needed to run the vignettes, only
static vignettes have been included with this package.
The original of the vignettes and the code can be obtained from
the GitHub site at
https://github.com/cschwarz-stat-sfu-ca/BTSPAS
# Introduction
This document will illustrate the potential biases caused by incomplete sampling in the recovery strata. For example,
suppose that stratification is at a weekly level. Fish are tagged and released continuously during the week.
Recoveries occur from a commercial fishery that only operating for 1/2 a week (the first half).
This may cause bias in estimates of abundance because, for example, fish tagged at the end of a week,
may arrive at the commercial fishery in the second half of the recovery week and not be subject to capture.
This causes heterogeneity in recovery probabilities that is not accounted
for in the mark-recapture analysis.
A simulated population will be created and then analyzed in several ways to
illustrate the potential extent of bias, and how to properly stratify the data to account for this problem.
This scenario was originally envisioned to be handled with the *sampfrac* argument of the *BTSPAS* routines.
However, the actual implementation is incorrect in *BTSPAS* and is deprecated. This vignette shows the
proper way to deal with this problem.
## Experimental setup
This simulated population is modelled around a capture-capture experiment on the Taku River which flows
between the US and Canada.
Returning salmon arrive and are captured at a fish wheel during several weeks. Those fish
captured at the fish wheel are tagged and released (daily).
They migrate upstream to a commercial fishery. The commercial fishery does not operate
on all days of the week - in particular, the fishery tends to operate during the first
part of the week until the quota for catch is reached. Then the fishery stops
until the next week.
## Generation of population
```{r genpop, include=FALSE}
# simulate the population
N <- 150000 # total run size
# Assume a 16 week spread, implying a peak at 8 weeks.
mean.at.wheel <- 42
sd.at.wheel <- 15
pop <- data.frame(time.at.wheel=pmin(170,pmax(1,stats::rnorm(N, mean=mean.at.wheel, sd=sd.at.wheel)))) # date of arrival
pop$date <- trunc(pop$time.at.wheel) # how many arrive at the wheel
pop.dist <- ggplot(data=pop, aes(x=time.at.wheel))+
ggtitle("Distribution of arrival time at tagging wheel")+
geom_histogram(breaks=0:200, alpha=0.2)+
xlab("Arrive date at tagging wheel")
```
A population of `r formatC(round(N,0), digits=0, big.mark=',', format="f")`
fish will be simulated arriving at the fish wheels according to a normal distribution with
a mean of `r mean.at.wheel` and a standard deviation of `r sd.at.wheel`. This gives a distribution of arrival times at the fish wheel of
```{r fig.width=6,fig.height=4,include=TRUE, fig.align="center", fig.cap=c("Distribution of arrival time at wheel"), echo=FALSE}
pop.dist
```
The spikes at the start and end are where the arrival time has been truncated and fish forced to arrive in the first and last days
of the run (for convenience).
If the fish wheels had a constant probability of capture, then the pooled Petersen would be unbiased regardless of what happens in
the commercial fishery. Consequently, we simulate the probability of capture that varies around 0.05.
The distribution of capture probabilities at the wheel is:
```{r fig.width=6,fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Distribution of cature probabilities at wheel"), echo=FALSE, warning=FALSE, message=FALSE}
# capture prob at wheel is normal on logit( .03 with a sd of .01) but a maximum of 400 fish per week
capture.prob <- plyr::ddply(pop, "date", plyr::summarize,
tot.fish.wheel = length(date))
capture.prob$logit.tag <- stats::rnorm(nrow(capture.prob), mean=pmin(logit(.06), logit(1000/7/capture.prob$tot.fish.wheel), na.rm=TRUE), sd=0.5)
#capture.prob$logit.tag <- stats::rnorm(nrow(capture.prob), mean=logit(.06), sd=0.5)
capture.dist <- ggplot(data=capture.prob, aes(x=expit(logit.tag)))+
ggtitle("Distribution of capture probabilities at tagging wheel")+
geom_histogram()+
xlab("Capture probability at the fish wheel")
capture.dist
```
This is used to sample from the simulated run as it passes the wheel and the distribution of the number tagged is:
```{r fig.width=6, fig.height=4,include=TRUE, fig.align="center", fig.cap=c("Number tagged and released at wheel"), echo=FALSE}
# is this fish sampled?
pop <- merge(pop, capture.prob, all.x=TRUE)
pop$tagged <- as.logical(rbinom(nrow(pop), 1, expit(pop$logit.tag)))
ggplot(data=pop[pop$tagged,], aes(x=date))+
ggtitle("Number tagged and released by date")+
geom_bar(alpha=0.2)+
xlab("Date")
```
A total of `r sum(pop$tagged)` fish are tagged and released.
```{r travel.time, include=FALSE}
# travel time is log-normal with log(mean) of log(1 week) sd=.1 days
travel.time.mu <- 7
travel.time.sigma <- .3
pop$travel.time <- rlnorm(nrow(pop), meanlog=log(travel.time.mu), sdlog=travel.time.sigma)
travel.dist <- ggplot(data=pop, aes(x=travel.time))+
ggtitle("Distributon of travel times between wheel and fishery")+
geom_histogram(alpha=0.2)+
xlab("Travel time between wheel and fishery (days)")
```
Travel time from the wheel to the commercial fishery is simulated using a log-normal distribution
with a mean (on the log scale) of log(`r travel.time.mu`) days and a standard deviation on the log-scale
of `r travel.time.sigma`. This gives a distribution of travel times of:
```{r fig.width=6, fig.height=4,include=TRUE, fig.align="center", fig.cap=c("Distribution of travel times"), echo=FALSE, warning=FALSE, message=FALSE}
travel.dist
```
The travel time was added to the time of arrival at the fish wheels giving a distribution of time of arrival in fishery of
```{r, fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Arrive date at fishery"), echo=FALSE, warning=FALSE, message=FALSE}
# arrival at fishery
pop$time.at.fishery <- pop$time.at.wheel + pop$travel.time
pop$date.at.fishery <- trunc(pop$time.at.fishery)
fishery.dist <- ggplot(data=pop, aes(x=date.at.fishery))+
ggtitle("Distribution of arrival time at commercial fishery")+
geom_histogram(alpha=0.2)+
xlab("Date")
fishery.dist
```
```{r fishery1,include=FALSE}
# peformance of fishery
fishery <- plyr::ddply(pop, "date.at.fishery", plyr::summarize,
tot.fish.fishery = length(date))
# fishery runs for 3 days then off then on then off
fishery$active <- as.logical( trunc((fishery$date.at.fishery-1)/3) %% 2)
# fishery stops at certain part of the run
run.cutoff <- 0.99
date.cutoff <- quantile(pop$date.at.fishery, prob=run.cutoff)
fishery$active[ fishery$date.at.fishery > date.cutoff] <- FALSE
# figure out if captured in fishery
pop <- merge(pop, fishery, all.x=TRUE)
# fishery probability logit on logit(.15) sd .2 on log scale
# Here is a case where the probability of capture is independent
fishery.p <- .10
pop$logit.recover <- stats::rnorm( nrow(pop), mean=logit(.10), sd=.2)
pop$logit.recover[ !pop$active] <- -10 # probability of zero when fishery not acting
# add a dependency on run size similar to what happens at the fish wheels
fishery.p <- .10
pop$logit.recover <- stats::rnorm( nrow(pop), mean=pmin(logit(fishery.p), logit(1000/7/pop$tot.fish.fishery), na.rm=TRUE), sd=.2)
pop$logit.recover[ !pop$active] <- -10 # probability of zero when fishery not acting
fishery.prob <- ggplot(data=pop, aes(x=expit(logit.recover)))+
ggtitle("Distribution of catchability at fishery")+
geom_histogram(alpha=0.2)+
xlab("Probability of capture in fishery")
correlaton.plot <- ggplot(data=pop, aes(x=expit(logit.tag), y=expit(logit.recover)))+
ggtitle("Correlation between tagging and recapture probability")+
geom_point()+
xlab("Probability of capture at tagging wheel")+ylab("Probability of capture in fishery")
correlation.tag.recover <- cor(expit(pop$logit.tag), expit(pop$logit.recover))
relative.bias.petersen <- -correlation.tag.recover*sqrt( var(expit(pop$logit.tag))*var(expit(pop$logit.recover)))/
mean(expit(pop$logit.tag)* expit(pop$logit.recover))
pop$recover <- as.logical( rbinom(nrow(pop), 1, expit(pop$logit.recover)))
fishery.catch <- ggplot(data=pop[pop$recover,], aes(x=date.at.fishery))+
ggtitle("Commercial catch by date")+
geom_bar(width=1, alpha=0.2)+
geom_vline(xintercept=date.cutoff, color="red")+
xlim(0,max(pop$date.at.fishery))+xlab("Date")
```
The distribution of catchability in the commercial fishery is
```{r fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Distribution of cature probabilities in fishery"), echo=FALSE, warning=FALSE, message=FALSE}
fishery.prob
```
The commercial fishery is assumed to run on a 3 day on/3 day off schedule throughout the season and
terminates when about `r 100*run.cutoff`% of the run has passed the fishery (day `r round(date.cutoff)`).
If the catchability in the commercial fishery equal for all fish, then the pooled Petersen will also be unbiased.
This is clearly not the case because some fish has a probability of 0 of being captured when the fishery is not operating.
If the probability of capture in the commercial fishery is uncorrelated with the probability of capture by the tagging wheel,
the pooled-Petersen is also unbiased. A plot of the probability of capture at the tagging wheels and in the commercial fishery is:
```{r fig.width=6, fig.height=4,include=TRUE, fig.align="center", fig.cap=c("Distribution of cature-probabilities at wheel"), echo=FALSE, warning=FALSE, message=FALSE}
correlaton.plot
```
In this case the correlation between the tagging and recovery probability is
`r round(correlation.tag.recover,2)`.
Schwarz and Taylor (1988) give a formula for the relative bias of the pooled Petersen if you know the correlation and variation
in the probability in the two events. In this case the relative bias of the Pooled Petersen is `r round(100*relative.bias.petersen)`%.
A non-zero correlation could arise if both the fish wheel and commercial fishery can be
saturated, e.g. regardless of the number of fish arriving at the fishwheel, only a maximum number can be captured and tagged, and
regardless of how many fish are available in the fishery, only a maximum can be caught. In this case, the probability of tagging and the probability of recapture is reduced
when there are many fish available which could induce some correlation.
A summary of the catch
by the fishery is:
```{r fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Fish captured in fishery by date"), echo=FALSE}
fishery.catch
```
Notice the "holes" in the data when the commercial fishery is not operating.
A summary of the number of fish tagged and recaptured is:
```{r summary,include=TRUE,echo=FALSE}
xtabs(~tagged+recover, data=pop)
```
The data were broken into 3 day strata to match the commercial fishery operations and gives rise to the following matrix of releases and recoveries:
```{r taku.matrix, include=FALSE}
# break into 3 day strata for tagging and recovery
pop$tag.stratum <- pmax(1, 1+trunc((pop$date -1)/3))
pop$fishery.stratum <- pmax(1, 1+trunc((pop$date.at.fishery-1)/3))
range(pop$tag.stratum)
range(pop$fishery.stratum)
#xtabs(recover~tag.stratum + fishery.stratum, data=pop)
taku.n1 <- unlist(plyr::daply(pop, "tag.stratum", plyr::summarize, sum(tagged)))
taku.n1
taku.m2 <- as.matrix(xtabs(recover*tagged~ tag.stratum+fishery.stratum, data=pop))
taku.m2
taku.u2 <- unlist(plyr::daply(pop, "fishery.stratum", plyr::summarize, sum((1-tagged) & recover)))
taku.u2
# truncate any rows with no releases at the end
nweeks.tagging <- 1+length(taku.n1) - which.min(cumprod(rev(taku.n1==0)))
takuz <- rbind(cbind( taku.n1[1:nweeks.tagging], taku.m2[1:nweeks.tagging,]), c(NA, taku.u2))
rownames(takuz) <- c(paste("S",1:(nrow(takuz)-1),sep=""),"untagged")
colnames(takuz) <- c("tagged",paste("S", 1:(ncol(takuz)-1),sep=""))
```
```{r taku.matrix2, echo=FALSE}
takuz
```
Notice that some columns that are all zero because of the commercial fishery.
# Analysis of dataset stratified to the 3-day strata.
We are now back to familiar territory.
## Pooled Petersen estimator
```{r ppz,echo=FALSE,warning=FALSE,message=FALSE}
library(BTSPAS)
ppz.complete <- BTSPAS::SimplePetersen( sum(takuz[,"tagged"], na.rm=TRUE), sum(takuz[-nrow(takuz), -1]), sum(takuz["untagged",],na.rm=TRUE))
```
The pooled Petersen estimator of abundance is
`r formatC(round(ppz.complete$N.est,0), digits=0, big.mark=',', format="f")`
(SE
`r formatC(round(ppz.complete$N.se,0), digits=0, big.mark=',', format="f")`
). Notice the negative bias in the estimate as predicted.
## *BTSPAS* on the full dataset
We prepare the data in the usual way with the following results:
```{r statweekz,echo=FALSE}
# what is the strata identification number (statistical week from start of year)?
takuz.sweek <- 1:(ncol(takuz)-1)
cat("Stratum\n")
takuz.sweek
```
```{r n1u2z,echo=FALSE}
# First column is released. Last row is untagged recovered
takuz.n1 <- as.vector(takuz[1:(nrow(takuz)-1), 1,drop=TRUE])
cat('n1 - number released\n')
takuz.n1
# untagged fish recaptured - last row - truncate at last non-zero entry (assuming consistent with the m array)
takuz.u2 <- takuz[ "untagged", -1]
takuz.u2 <- takuz.u2[ -length(takuz.u2)]
cat('u2 - number of untagged fish in the commerial fishery \n')
takuz.u2
takuz.sweek <- takuz.sweek[1:length(takuz.u2)]
```
```{r m2z,echo=FALSE}
temp <- as.matrix(takuz[1:(nrow(takuz))-1, 2:ncol(takuz)])
temp2 <- plyr::laply(1:nrow(temp), function(i, temp){
#browser()
x <- temp[i,]
x <- c(x[i:length(x)],rep(0, i-1))
x
}, temp=temp)
# truncate after the last recovery column to limit the size of the movement
# distribution to the weeks needed
nweeks <- 1+ncol(temp) - which.min(cumprod(rev((apply(temp2,2,sum)==0))))
takuz.m2 <- temp2[, 1:nweeks]
colnames(takuz.m2) <- paste("X",0:(ncol(takuz.m2)-1),sep="")
rownames(takuz.m2) <- rownames(takuz)[-nrow(takuz)]
cat('n1 (releases) and m2 - recoveries from each release group \n')
cbind(n1=takuz.n1,m2=takuz.m2)
```
```{r miscz, include=FALSE}
takuz.prefix <- 'ex.3.day.strata'
takuz.title <- "Example 3-day stratification - TSPND NP"
# are there any jumps in the abundance?
takuz.jump.after <- NULL # list sample times after which jump in number occurs
# are there any bad values that need to be set to 0 or missing prior to the model fit?
takuz.bad.n1 <- c() # list sample times of bad n1 values
takuz.bad.m2 <- c() # list sample times of bad m2 values
takuz.bad.u2 <- c() # list sample times of bad u2 values
```
*BTSPAS* allows you fix the probability of capture to zero for specified recovery strata.
In this case, it corresponds to cases where the number of untagged fish is also zero.
You need to specify the statistical week number and the value of $p$ on the $logit$ scale.
Because *BTSPAS* operates on the $logit$ scale and $logit(0)$ is $-\infty$, *BTSPAS* uses a
value of -10 (on the logit scale) to represent strata with no effort:
```{r zeroset}
# are there any days where the capture probability is fixed in advance?, i.e. because no commercial fishery
takuz.logitP.fixed <- seq(2, length(takuz.u2), 2)
takuz.logitP.fixed
takuz.logitP.fixed.values <- rep(-10, length(takuz.logitP.fixed))
takuz.logitP.fixed.values
```
We will fit the non-diagonal model with a non-parametric movement distribution.
The total number of iterations, the burnin period and the number of posterior samples to retain are
specified. Here, smallish values have been used so that the run time is not excessive, but values on the order 10x larger are typically used.
```{r fit.zeroe, warning=FALSE, message=FALSE}
library(BTSPAS)
ex.3day.fit <- TimeStratPetersenNonDiagErrorNP_fit(
title= takuz.title,
prefix= takuz.prefix,
time= takuz.sweek,
n1= takuz.n1,
m2= takuz.m2,
u2= takuz.u2,
jump.after= takuz.jump.after,
bad.n1= takuz.bad.n1,
bad.m2= takuz.bad.m2,
bad.u2= takuz.bad.u2,
logitP.fixed=takuz.logitP.fixed,
logitP.fixed.values=takuz.logitP.fixed.values,
n.iter=10000, n.burnin=1000, n.sims=300,
debug=FALSE,
save.output.to.files=FALSE
)
```
```{r clean.upz, include=FALSE, results='hide'}
# delete extra files that were created
file.remove("data.txt" )
file.remove("CODAindex.txt" )
file.remove("CODAchain1.txt" )
file.remove("CODAchain2.txt" )
file.remove("CODAchain3.txt" )
file.remove("inits1.txt" )
file.remove("inits2.txt" )
file.remove("inits3.txt" )
file.remove("model.txt" )
```
## Exploring the output
* The revised fitted run curve of the unmarked individuals (the recovery sample would be added to this curve)
```{r message=FALSE,warning=FALSE,fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Fitted run curve with zeroes"), echo=FALSE}
ex.3day.fit$plots$fit.plot
```
Notice that *BTSPAS* interpolated through the weeks where no commercial fishery ran. Estimates of the run size are very
uncertain when there are few fish released and recovered near the end of the experiment.
* The revised posterior distribution for the total run size
```{r fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Posterior of total population size"), echo=FALSE}
ex.3day.fit$plots$post.UNtot.plot
```
* The revised estimated recovery probabilities on the logit scale (with 95% credible intervals)
```{r fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Estimates of logit(p)"), echo=FALSE}
ex.3day.fit$plots$logitP.plot
```
There is variability among the recovery probabilities in the recovery strata. Notice how the strata where recovery probabilities
were fixed to zero are shown.
The estimated total run size (with 95% credible interval)
```{r fit1dirz.Ntot,echo=FALSE}
round(ex.3day.fit$summary[ grepl("Ntot", rownames(ex.3day.fit$summary)),],0)
```
which can be compared to the real total population of `r formatC(round(N,0), digits=0, big.mark=',', format="f")` and the Pooled Petersen estimate of
`r formatC(round(ppz.complete$N.est,0), digits=0, big.mark=',', format="f")`
(SE
`r formatC(round(ppz.complete$N.se,0), digits=0, big.mark=',', format="f")`
). The bias in the pooled-Petersen seems to have been resolved.
The individual estimates of the number of unmarked in each recovery stratum are:
```{r fit1dirz.U,echo=FALSE}
round(ex.3day.fit$summary[ grepl("^U\\[", rownames(ex.3day.fit$summary)),],0)
```
# Analysis at the 6-day stratum level.
Many of the analyses stratify to the statistical week, so only part of the week is fished by the commercial fishery. As noted previously,
if the fish wheels sample a constant proportion of the run, then it doesn't matter how the recovery sample is obtained -- the Pooled Petersen
estimator will still be unbiased.
We simulate the coarser stratification by taking the previous simulated population and pool adjacent 3-day strata.
The pooled data is:
```{r takup.matrix, include=FALSE}
# break into 6 day strata for tagging and recovery
pop$tag.stratum2 <- pmax(1, 1+trunc((pop$date -1)/6))
pop$fishery.stratum2 <- pmax(1, 1+trunc((pop$date.at.fishery-1)/6))
range(pop$tag.stratum2)
range(pop$fishery.stratum2)
xtabs(~tag.stratum +tag.stratum2 , data=pop)
xtabs(~fishery.stratum+fishery.stratum2, data=pop)
takup.n1 <- unlist(plyr::daply(pop, "tag.stratum2", plyr::summarize, sum(tagged)))
takup.n1
takup.m2 <- as.matrix(xtabs(recover*tagged~ tag.stratum2+fishery.stratum2, data=pop))
takup.m2
takup.u2 <- unlist(plyr::daply(pop, "fishery.stratum2", plyr::summarize, sum((1-tagged) & recover)))
takup.u2
# truncate any rows with no releases at the end
nweeksp.tagging <- 1+length(takup.n1) - which.min(cumprod(rev(takup.n1==0)))
takuzp <- rbind(cbind( takup.n1[1:nweeksp.tagging], takup.m2[1:nweeksp.tagging,]), c(NA, takup.u2))
rownames(takuzp) <- c(paste("S",1:(nrow(takuzp)-1),sep=""),"untagged")
colnames(takuzp) <- c("tagged",paste("S", 1:(ncol(takuzp)-1),sep=""))
```
```{r takup.matrix2, include=TRUE, echo=FALSE}
takuzp
```
Notice that no recovery strata are now zero (except at the end of the study)
## Pooled Petersen estimator
```{r ppzp,echo=FALSE,warning=FALSE,message=FALSE}
library(BTSPAS)
ppzp.complete <- BTSPAS::SimplePetersen( sum(takuzp[,"tagged"], na.rm=TRUE), sum(takuzp[-nrow(takuzp), -1]), sum(takuzp["untagged",],na.rm=TRUE))
```
The pooled Petersen estimator of abundance is the same as before as there are no changes to the number tagged, recaptured, or fished.
`r formatC(round(ppzp.complete$N.est,0), digits=0, big.mark=',', format="f")`
(SE
`r formatC(round(ppzp.complete$N.se,0), digits=0, big.mark=',', format="f")`
).
## *BTSPAS* on the pooled dataset
We prepare the data in the usual way with the following results:
```{r statweekzp,echo=FALSE}
# what is the strata identification number (statistical week from start of year)?
takuzp.sweek <- 1:(ncol(takuzp)-1)
cat("Stratum\n")
takuzp.sweek
```
```{r n1u2zp,echo=FALSE}
# First column is released. Last row is untagged recovered
takuzp.n1 <- as.vector(takuzp[1:(nrow(takuzp)-1), 1,drop=TRUE])
cat('n1 - number released\n')
takuzp.n1
# untagged fish recaptured - last row - truncate at last non-zero entry (assuming consistent with the m array)
takuzp.u2 <- takuzp[ "untagged", -1]
cat('u2 - number of untagged fish in the commerial fishery \n')
takuzp.u2
takuzp.u2 <- as.vector(unlist(takuzp.u2))
takuzp.u2 <- takuzp.u2[1:18]
takuzp.sweek <- takuzp.sweek[1:length(takuzp.u2)]
```
```{r m2zp,echo=FALSE}
temp <- as.matrix(takuzp[1:(nrow(takuzp))-1, 2:ncol(takuzp)])
temp2 <- plyr::laply(1:nrow(temp), function(i, temp){
#browser()
x <- temp[i,]
x <- c(x[i:length(x)],rep(0, i-1))
x
}, temp=temp)
# truncate after the last recovery column to limit the size of the movement
# distribution to the weeks needed
nweeks <- 1+ncol(temp) - which.min(cumprod(rev((apply(temp2,2,sum)==0))))
takuzp.m2 <- temp2[, 1:nweeks]
colnames(takuzp.m2) <- paste("X",0:(ncol(takuzp.m2)-1), sep="")
rownames(takuzp.m2) <- rownames(takuzp)[-nrow(takuzp)]
cat('n1 and m2 - recoveries from each release group \n')
cbind(n1=takuzp.n1, takuzp.m2)
```
```{r misczp, include=FALSE}
takuzp.prefix <- 'ex.6.day.strata'
takuzp.title <- "Example 6-day stratification - TSPND NP"
# are there any jumps in the abundance?
takuzp.jump.after <- NULL # list sample times after which jump in number occurs
# are there any bad values that need to be adjusted?
takuzp.bad.n1 <- c() # list sample times of bad n1 values
takuzp.bad.m2 <- c() # list sample times of bad m2 values
takuzp.bad.u2 <- c() # list sample times of bad u2 values
```
There were no (pooled) strata where there was no commercial fishery, so we don't restrict the $logit(p)$ to any value.
```{r zerosetp}
# are there any days where the capture probability is fixed in advance?, i.e. because no commercial fishery
takuzp.logitP.fixed <- NULL
takuzp.logitP.fixed.values <- NULL
```
We will fit the non-parametric model. The total number of iterations, the burnin period and the number of posterior samples to retain are
specified. Here, smallish values have been used so that the run time is not excessive, but values on the order 10x larger are typically used.
```{r fit.zeroep}
library(BTSPAS)
ex.6day.fit <- TimeStratPetersenNonDiagErrorNP_fit(
title= takuzp.title,
prefix= takuzp.prefix,
time= takuzp.sweek,
n1= takuzp.n1,
m2= takuzp.m2,
u2= takuzp.u2,
jump.after= takuzp.jump.after,
bad.n1= takuzp.bad.n1,
bad.m2= takuzp.bad.m2,
bad.u2= takuzp.bad.u2,
logitP.fixed=takuzp.logitP.fixed,
logitP.fixed.values=takuzp.logitP.fixed.values,
n.iter=10000, n.burnin=1000, n.sims=300,
debug=FALSE,
save.output.to.files=FALSE
)
```
```{r clean.upzp, include=FALSE, results='hide'}
# delete extra files that were created
file.remove("data.txt" )
file.remove("CODAindex.txt" )
file.remove("CODAchain1.txt" )
file.remove("CODAchain2.txt" )
file.remove("CODAchain3.txt" )
file.remove("inits1.txt" )
file.remove("inits2.txt" )
file.remove("inits3.txt" )
file.remove("model.txt" )
```
## Exploring the output
* The revised fitted run curve of the unmarked individuals (the recovery sample would be added to this curve)
```{r fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Fitted run curve with zeroes"), echo=FALSE}
ex.6day.fit$plots$fit.plot
```
* The revised posterior distribution for the total run size
```{r fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Posterior of total population size"), echo=FALSE}
ex.6day.fit$plots$post.UNtot.plot
```
* The revised estimated recovery probabilities on the logit scale (with 95% credible intervals)
```{r fig.width=6, fig.height=4, include=TRUE, fig.align="center", fig.cap=c("Estimates of logit(p)"), echo=FALSE}
ex.6day.fit$plots$logitP.plot
```
There is variability among the recovery probabilities in the recovery strata. Notice how the strata where recovery probabilities
were fixed to zero are shown.
The estimated total run size (with 95% credible interval)
```{r fit1dirzp.Ntot,echo=FALSE}
round(ex.6day.fit$summary[ grepl("Ntot", rownames(ex.6day.fit$summary)),],0)
```
which can be compared to the real total population of `r formatC(round(N,0), digits=0, big.mark=',', format="f")` and the Pooled Petersen estimate of
`r formatC(round(ppz.complete$N.est,0), digits=0, big.mark=',', format="f")`
(SE
`r formatC(round(ppz.complete$N.se,0), digits=0, big.mark=',', format="f")`
). The data pooled to the 6-day strata appears to be biased, but not as much as the pooled-Petersen estimator.
The revised individual estimates of the number of unmarked in each recovery stratum are:
```{r fit1dirzp.U,echo=FALSE}
round(ex.6day.fit$summary[ grepl("^U\\[", rownames(ex.6day.fit$summary)),],0)
```
# References
Bonner Simon, J., & Schwarz Carl, J. (2011).
Smoothing Population Size Estimates for Time-Stratified MarkRecapture Experiments Using Bayesian P-Splines.
Biometrics, 67, 1498–1507.
https://doi.org/10.1111/j.1541-0420.2011.01599.x
Darroch, J. N. (1961).
The two-sample capture-recapture census when tagging and sampling are stratified. Biometrika, 48, 241–260.
https://doi.org/10.1093/biomet/48.3-4.241
Plante, N., L.-P Rivest, and G. Tremblay. (1988).
Stratified Capture-Recapture Estimation of the Size of a Closed Population. Biometrics 54, 47-60.
https://doi.org/10.2307/2533994
Schwarz, C. J., & Taylor, C. G. (1998). The use of the stratified-Petersen estimator in fisheries management with an illustration of estimating the number of pink salmon (Oncorhynchus gorbuscha)
that return to spawn in the Fraser River.
Canadian Journal of Fisheries and Aquatic Sciences, 55, 281–296.
https://doi.org/10.1139/f97-238
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/vignettes/e-Bias-from-incomplete-sampling.Rmd
|
---
title: "Interpolating run early and late"
author: "Carl James Schwarz"
date: "`r Sys.Date()`"
output:
html_vignette:
toc: true # table of content true
toc_depth: 3 # upto three depths of headings (specified by #, ## and ###)
number_sections: true ## if you want number sections at each table header
#vignette: >
# %\VignetteIndexEntry{06 - Interpolating run early and late}
# %\VignetteEncoding{UTF-8}
# %\VignetteEngine{knitr::rmarkdown_notangle}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
library(BTSPAS)
library(ggplot2)
library(plyr)
rec.matrix.csv <- textConnection(
"Tagging,SW22,SW23,SW24,SW25,SW26,SW27,SW28,SW29,SW30,SW31,SW32,SW33,SW34,SW35,SW36,SW37,SW38,SW39,SW40,SW41,Recovered,Applied,PropRecovered
SW22,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,10,0.100
SW23,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,7,100,0.070
SW24,0,0,0,51,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,56,525,0.107
SW25,0,0,0,10,45,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,55,403,0.136
SW26,0,0,0,0,169,64,9,0,0,0,0,0,0,0,0,0,0,0,0,0,242,849,0.285
SW27,0,0,0,0,0,139,41,5,0,0,0,0,0,0,0,0,0,0,0,0,185,742,0.249
SW28,0,0,0,0,0,0,155,31,3,1,0,0,0,0,0,0,0,0,0,0,190,675,0.281
SW29,0,0,0,0,0,0,0,266,32,5,0,0,0,0,0,0,0,0,0,0,303,916,0.331
SW30,0,0,0,0,0,0,0,0,33,49,3,0,0,0,0,0,0,0,0,0,85,371,0.229
SW31,0,0,0,0,0,0,0,0,0,33,36,0,0,0,1,0,0,0,0,0,70,296,0.236
SW32,0,0,0,0,0,0,0,0,0,0,39,8,1,0,0,0,0,0,0,0,48,234,0.205
SW33,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,39,0.026
SW34,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,97,0.000
SW35,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,0,0,0,0,0,3,61,0.049
SW36,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,26,0.077
SW37,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0.000
CatchComm,0,0,0,1869,5394,5131,5668,6733,1780,1828,2493,157,0,0,0,0,0,0,0,0,31053,NA,NA")
rec.matrix <- read.csv(rec.matrix.csv, header=TRUE, as.is=TRUE, strip.white=TRUE)
rec.matrix$Recovered <- NULL
rec.matrix$PropRecovered <- NULL
rec.matrix$SW38 <- NULL
rec.matrix$SW39 <- NULL
rec.matrix$SW40 <- NULL
rec.matrix$SW41 <- NULL
rec.matrix$SW34 <- 0
rec.matrix$SW35 <- 0
rec.matrix$SW36 <- 0
```
# Introduction
In some studies, harvest (recovery strata) start after the run has started and
terminate prior to the run ending. For example, consider the following
recovery matrix where releases and recoveries have been stratified on
a weekly basis:
```{r echo=FALSE}
rec.matrix
```
The bottom line is the total recoveries (tagged and untagged) from a commercial harvest.
In this case, the commercial harvest did not start until
statistical week SW25 and ended in SW33 but
the run started earlier and ended later than the commercial harvest.
```{r echo=FALSE}
# convert the recovery matrix to BTSPAS input
# get the stat weeks of releases from the first column
stat.week.rel <- rec.matrix$StatWeek[1:(nrow(rec.matrix)-1)]
# get the stat week of recoveries from the first row
stat.week.rec <- names(rec.matrix)[ grepl("SW",names(rec.matrix))]
# get the number of releases
n1.df <- data.frame(rel.index=1:(nrow(rec.matrix)-1),
n1 = rec.matrix$Applied[1:(nrow(rec.matrix)-1)])
# get the full recovery matrix
m2.full <- rec.matrix[1:(nrow(rec.matrix)-1), names(rec.matrix)[grepl("SW",names(rec.matrix))]]
# Now to compute the reduced recovery matrix by shifting rows to the left
# get the total number of recoveries
n2 <- rec.matrix[ nrow(rec.matrix), -c(1,ncol(rec.matrix))]
u2 <- as.numeric(n2-apply(m2.full,2,sum ))
```
## Fit with the current data.
We now fit the BTSPAS model using the current data
```{r echo=FALSE, message=FALSE, warning=FALSE}
library("BTSPAS")
red.m2 <- plyr::aaply(as.matrix(cbind(1:nrow(n1.df),m2.full)),
1, function(x){
#browser()
rot.vec <- c((x[1]+1):length(x), 2:(x[1]+1))
x <- x[rot.vec]
#print(x)
x[-length(x)]
})
# remove columns at the right that are all zero
all.zero <- apply(red.m2==0, 2, all)
remove.right <- rev(cumprod(rev(all.zero)))
red.m2 <- red.m2[, !remove.right]
# Make the call to fit the model and generate the output files
red.fit <- TimeStratPetersenNonDiagErrorNP_fit( # notice change in function name
title= "Original data",
prefix= "red",
time= 1:nrow(n1.df),
n1= n1.df$n1,
m2= red.m2,
u2= u2,
jump.after= NULL,
bad.n1= NULL,
bad.m2= NULL,
bad.u2= NULL,
debug=FALSE, # save time by reducing number of MCMC iterations
save.output.to.files=FALSE)
file.remove("data.txt" )
file.remove("CODAindex.txt" )
file.remove("CODAchain1.txt" )
file.remove("CODAchain2.txt" )
file.remove("CODAchain3.txt" )
file.remove("inits1.txt" )
file.remove("inits2.txt" )
file.remove("inits3.txt" )
file.remove("model.txt" )
# need to delete the *.txt files
```
On the surface, the fit looks fine:
```{r echo=FALSE,warning=FALSE, message=FALSE}
red.fit$plots$fit.plot
```
but the spline remains very large in the first 3 weeks leading to unrealistic estimates
of the run in the first 3 weeks and an unrealistic estimate of the total run:
```{r echo=FALSE}
select <- grepl("Ntot", row.names(red.fit$summary)) |
grepl("Utot", row.names(red.fit$summary)) |
grepl("^U\\[", row.names(red.fit$summary))
round(red.fit$summary[select,c("mean","sd","2.5%","97.5%")],0)
```
The problem is that without a commercial catch in the first 3 and last 3 weeks, there is no information
about the probability of capture for those weeks and BTSPAS simply interpolates the
spline from the middle of the data to the first 3 and last 3 weeks. The interpolation for the
last 3 weeks isn't too bad -- the spline is already on a downwards trend and so this is continued.
However, the interpolation back for the first 3 weeks is not very realistic
## Forcing the run curve to zero.
It is possible to "force" BTSPAS to interpolate the first 3 and last 3 weeks down to zero
by adding ``fake'' data. In particular, we pretend that in the first 3 and last 3 weeks, that
a commercial catch of 1 fish occurred and it was tagged. You also need to ensure that
enough fish were tagged and released to accommodate the fake data.
The revised recovery matrix is:
```{r echo=FALSE, message=FALSE, warning=FALSE}
rec.matrix2 <- rec.matrix
rec.matrix2[1,2] <- 1
rec.matrix2[2,3] <- 1
rec.matrix2[3,4] <- 1
rec.matrix2[nrow(rec.matrix2),2:4] <- 1
rec.matrix2[13,14] <- 1
rec.matrix2[14,15] <- 1
rec.matrix2[15,16] <- 1
rec.matrix2[16,17] <- 1
rec.matrix2[17,14:17] <- 1
rec.matrix2
```
Notice how "fake" recoveries were added to the diagonal entries for the first and final weeks
of the data including "fake" harvest.
Because the fake data values are very small, it has little impact on the total run size,
but a recovery of 1 tagged fish in a commercial harvest of 1 fish is not consistent
with a very large run size and so this forces the run curve down at these points as
seen in the revised fit:
```{r echo=FALSE}
# convert the recovery matrix to BTSPAS input
# get the stat weeks of releases from the first column
stat.week.rel <- rec.matrix2$StatWeek[1:(nrow(rec.matrix2)-1)]
# get the stat week of recoveries from the first row
stat.week.rec <- names(rec.matrix2)[ grepl("SW",names(rec.matrix2))]
# get the number of releases
n1.df <- data.frame(rel.index=1:(nrow(rec.matrix2)-1),
n1 = rec.matrix2$Applied[1:(nrow(rec.matrix2)-1)])
# get the full recovery matrix
m2.full <- rec.matrix2[1:(nrow(rec.matrix2)-1), names(rec.matrix2)[grepl("SW",names(rec.matrix2))]]
# Now to compute the reduced recovery matrix by shifting rows to the left
# get the total number of recoveries
n2 <- rec.matrix2[ nrow(rec.matrix2), -c(1,ncol(rec.matrix2))]
u2 <- as.numeric(n2-apply(m2.full,2,sum ))
red.m2 <- plyr::aaply(as.matrix(cbind(1:nrow(n1.df),m2.full)),
1, function(x){
#browser()
rot.vec <- c((x[1]+1):length(x), 2:(x[1]+1))
x <- x[rot.vec]
#print(x)
x[-length(x)]
})
# remove columns at the right that are all zero
all.zero <- apply(red.m2==0, 2, all)
remove.right <- rev(cumprod(rev(all.zero)))
red.m2 <- red.m2[, !remove.right]
# Make the call to fit the model and generate the output files
red.fit2 <- TimeStratPetersenNonDiagErrorNP_fit( # notice change in function name
title= "Adding fake data at start and end",
prefix= "red",
time= 1:nrow(n1.df),
n1= n1.df$n1,
m2= red.m2,
u2= u2,
jump.after= NULL,
bad.n1= NULL,
bad.m2= NULL,
bad.u2= NULL,
debug=FALSE, # save time by reducing number of MCMC iterations
save.output.to.files=FALSE)
file.remove("data.txt" )
file.remove("CODAindex.txt" )
file.remove("CODAchain1.txt" )
file.remove("CODAchain2.txt" )
file.remove("CODAchain3.txt" )
file.remove("inits1.txt" )
file.remove("inits2.txt" )
file.remove("inits3.txt" )
file.remove("model.txt" )
# need to delete the *.txt files
```
Notice that in the revised fit, the run curve is forced to 0 at the start
and end of the study:
```{r echo=FALSE,message=FALSE,warnings=FALSE}
red.fit2$plots$fit.plot+coord_cartesian(ylim=c(-20,15))
```
The estimates of total run size and the weekly estimates of the runsize are also more sensible:
```{r echo=FALSE}
select <- grepl("Ntot", row.names(red.fit2$summary)) |
grepl("Utot", row.names(red.fit2$summary)) |
grepl("^U\\[", row.names(red.fit2$summary))
round(red.fit2$summary[select,c("mean","sd","2.5%","97.5%")],0)
```
|
/scratch/gouwar.j/cran-all/cranData/BTSPAS/vignettes/f-Interpolating-run-earlier-and-later.Rmd
|
##---------------------------------------------------------------------
##
## Simulating a BARC model
##
##---------------------------------------------------------------------
##' @title
##' Functions to simulate, extract components and fit BARC models
##'
##' @name BARC.functions
##' @order 1
##'
##' @description
##' These functions can be used to simulate, extract components
##' and fit any model of the class \code{barc}. A model with
##' class \code{barc} is a special case of a model with class \code{btsr} .
##' See \sQuote{The BTSR structure} in \code{\link{BARC.functions}} for
##' more details on the general structure. See \sQuote{Details}.
##'
##' @details
##'
##' Neither the beta regression or an i.i.d. sample
##' from a beta distribution can be obtained as special cases of the
##' \eqn{\beta}ARC model since the term \eqn{h(T(U_0))} is always present
##'
##' The model from Pumi et al. (2021) is obtained by setting
##' \code{xregar = TRUE} (so that the regressors are included in the AR
##' part of the model) and using the same link for \eqn{y_t} and \eqn{\mu_t}.
##'
##' # The map function
##'
##' The map function \eqn{T:[0,1] \to [0,1]} is a dynamical system, i.e.,
##' a function, potentially depending on a \eqn{r}-dimensional vector of
##' parameters \eqn{\theta}. Available choices are
##' \itemize{
##' \item \code{map = 1}, \eqn{\theta = k}, for \eqn{k} integer greater
##' or equal to 2.
##'
##' \deqn{T(u) = (ku)(mod 1)}
##'
##' \item \code{map = 2}, \eqn{0 \le \theta \le 1}
##'
##' \deqn{T(u) = \frac{u}{\theta}I_( u < \theta) +
##' \theta\frac{(u - \theta)}{(1 - \theta)}I(u \ge \theta)}
##'
##' \item \code{map = 3} (logistic map), \eqn{ 0 \le \theta \le 4},
##'
##' \deqn{T(u) = \theta(1-\theta)}
##'
##' \item \code{map = 4} (Manneville-Pomeau map), \eqn{0 < \theta < 1}
##'
##' \deqn{T(u) = (u + u^{1+\theta})(mod 1)}
##'
##' \item \code{map = 5} (Lasota-Mackey's map),
##'
##' \deqn{T(u) = \frac{u}{(1 - u)}I(u \le 0.5) + (2u - 1)I(u > 0.5)}
##' }
##' @references
##'
##' Pumi, G.; Prass, T.S. and Souza, R.R. (2021). A dynamic model for
##' double bounded time series with chaotic driven conditional averages.
##' Scandinavian Journal of Statistics. Vol 48 (1), 68-86.
##'
##' @seealso
##' \code{\link{btsr.sim}}, \code{\link{btsr.extract}}, \code{\link{btsr.fit}}
##'
##' @md
NULL
#> NULL
##' @rdname BARC.functions
##' @order 2
##'
##' @details
##' The function \code{BARC.sim} generates a random sample from a
##' \eqn{\beta}ARC(p) model.
##'
##' @param n a strictly positive integer. The sample size of yt (after burn-in).
##' Default is 1.
##'
##' @param burn a non-negative integer. length of "burn-in" period. Default is 0.
##'
##' @param xreg optionally, a vector or matrix of external regressors.
##' For simulation purposes, the length of xreg must be \code{n+burn}.
##' Default is \code{NULL}. For extraction or fitting purposes, the length
##' of \code{xreg} must be the same as the length of the observed time series
##' \eqn{y_t}.
##'
##' @param map a non-negative integer from 1 to 5 corresponding to the map function.
##' Default is 4. See \sQuote{The map function}.
##'
##' @param coefs a list with the coefficients of the model. An empty list will result
##' in an error. The arguments that can be passed through this list are:
##' \itemize{
##' \item \code{alpha} optionally, a numeric value corresponding to the intercept.
##' If the argument is missing, it will be treated as zero. See
##' \sQuote{The BTSR structure} in \code{\link{btsr.functions}}.
##'
##' \item \code{beta} optionally, a vector of coefficients corresponding to the
##' regressors in \code{xreg}. If \code{xreg} is provided but \code{beta} is
##' missing in the \code{coefs} list, an error message is issued.
##'
##' \item \code{phi} optionally, for the simulation function this must be a vector
##' of size \eqn{p}, corresponding to the autoregressive coefficients
##' (including the ones that are zero), where \eqn{p} is the AR order. For
##' the extraction and fitting functions, this is a vector with the non-fixed
##' values in the vector of autoregressive coefficients.
##'
##' \item \code{theta} the parameter (or vector of parameters) corresponding
##' to the map function. If \code{map = 5} this value is ignored. For simulation,
##' purposes, the default is \code{map = 4} and \code{theta = 0.5}.
##'
##' \item \code{nu} the dispersion parameter. If missing, an error message is issued.
##'
##' \item \code{u0} a numeric value in the interval \eqn{(0,1)}, corresponding
##' to the value of the random variable \eqn{U_0}. For simulation purposes, the
##' default is \code{u0 = pi/4}.
##'
##' }
##'
##' @param y.start optionally, a initial value for yt (to be used
##' in the recursions). Default is \code{NULL}, in which case, the recursion assumes
##' that \eqn{g_2(y_t) = 0}, for \eqn{t < 1}.
##'
##' @param xreg.start optionally, a vector of initial value for xreg
##' (to be used in the recursions). Default is \code{NULL}, in which case, the
##' recursion assumes that \eqn{X_t = 0}, for \eqn{t < 1}. If \code{xregar = FALSE}
##' this argument is ignored.
##'
##' @param xregar logical; indicates if xreg is to be included in the
##' AR part of the model. See \sQuote{The BTSR structure}. Default is \code{TRUE}.
##'
##' @param error.scale the scale for the error term. See \sQuote{The BTSR structure}
##' in \code{\link{btsr.functions}}. Default is 0.
##'
##' @param complete logical; if \code{FALSE} the function returns only the simulated
##' time series yt, otherwise, additional time series are provided.
##' Default is \code{FALSE}
##'
##' @param linkg character or a two character vector indicating which
##' links must be used in the model. See \sQuote{The BTSR structure}
##' in \code{\link{btsr.functions}} for details and \code{\link{link.btsr}}
##' for valid links. If only one value is provided, the same link is used
##' for \eqn{mu_t} and for \eqn{y_t} in the AR part of the model.
##' Default is \code{c("linear", "linear")}
##'
##' @param linkh a character indicating which link must be associated to the
##' the chaotic process. See \sQuote{The BTSR structure}
##' in \code{\link{btsr.functions}} for details and \code{\link{link.btsr}}
##' for valid links. Default is \code{"linear"}.
##'
##' @param ctt.h numeric; the constant to be associated to the link \eqn{h},
##' when \code{linkh = "linear"}. Default is 1.
##'
##' @param seed optionally, an integer which gives the value of the fixed
##' seed to be used by the random number generator. If missing, a random integer
##' is chosen uniformly from 1,000 to 10,000.
##'
##' @param rngtype optionally, an integer indicating which random number generator
##' is to be used. Default is 2. See \sQuote{Common Arguments}
##' in \code{\link{btsr.functions}}.
##'
##' @param debug logical, if \code{TRUE} the output from FORTRAN is return (for
##' debuggin purposes). Default is \code{FALSE} for all models.
##'
##' @return
##' The function \code{BARC.sim} returns the simulated time series yt by default.
##' If \code{complete = TRUE}, a list with the following components
##' is returned instead:
##' \itemize{
##' \item \code{model}: string with the text \code{"BARC"}
##'
##' \item \code{yt}: the simulated time series
##'
##' \item \code{mut}: the conditional mean
##'
##' \item \code{etat}: the linear predictor \eqn{g(\mu_t)}
##'
##' \item \code{error}: the error term \eqn{r_t}
##'
##' \item \code{xreg}: the regressors (if included in the model).
##'
##' \item \code{debug}: the output from FORTRAN (if requested).
##'
##' }
##'
##' @examples
##' m1 <- BARC.sim(linkg = "linear", linkh = "linear",
##' n = 100, seed = 2021, complete = TRUE, ctt.h = 0.6,
##' coefs = list(nu = 15, theta = 0.85, u0 = pi/4))
##'
##' plot.ts(m1$yt)
##' lines(m1$mut, col = "red")
##'
##' @export
##'
##' @md
BARC.sim <- function(n = 1, burn = 0, xreg = NULL, map = 4,
coefs = list(alpha = 0, beta = NULL, phi = NULL,
theta = 0.5, nu = 20, u0 = pi/4),
y.start = NULL, xreg.start = NULL,
xregar = TRUE, error.scale = 0, complete = FALSE,
linkg = c("linear","linear"), linkh = "linear",
ctt.h = 1, seed = NULL, rngtype = 2, debug = FALSE){
##----------------------------------
## checking required parameters:
##----------------------------------
if(is.null(coefs)) stop("coefs missing with no default")
if(!"list" %in% class(coefs)) stop("coefs must be a list")
if(is.null(coefs$u0)) stop("u0 is missing")
##--------------------------------------------
## checking the map, theta and the link "h"
##--------------------------------------------
cb <- .barc.configs(map = map, theta = coefs$theta, linkh = linkh)
##--------------------------------------------
## checking remaining configurations:
##--------------------------------------------
cf <- .sim.configs(model = "BARC", xreg = xreg,
y.start = y.start, xreg.start = xreg.start,
linkg = linkg, n = n, burn = burn,
coefs = coefs, xregar = xregar,
error.scale = error.scale, seed = seed,
rngtype = rngtype, y.default = 0)
out <- .barc.sim(u0 = coefs$u0, map = map, ctt.h = ctt.h,
configs = cf, bconfigs = cb, complete = complete,
debug = debug)
class(out) <- c(class(out), "barc")
invisible(out)
}
##------------------------------------------------------------------------------------
## internal function: makes the calculations and reports only the relevant variables
##------------------------------------------------------------------------------------
.barc.sim <- function(u0, map, ctt.h, configs, bconfigs, complete, debug){
out <- .Fortran("simbarcR",
n = configs$n,
burn = configs$burn,
nu = configs$nu,
alpha = configs$alpha,
nreg = configs$nreg,
beta = configs$beta,
p = configs$p,
phi = configs$phi,
r = bconfigs$r,
theta = bconfigs$theta,
u0 = u0,
map = as.integer(map),
link = c(configs$linkg, bconfigs$linkh),
ctt.h = ctt.h,
xreg = configs$xreg,
xregar = configs$xregar,
yt = numeric(configs$n+configs$burn),
ystart = configs$y.start,
xstart = configs$xreg.start,
mut = numeric(configs$n+configs$burn),
etat = numeric(configs$n+configs$burn),
error = numeric(configs$n+configs$burn),
escale = configs$error.scale,
Ts = numeric(configs$n+configs$burn),
ns = length(configs$seed),
seed = configs$seed,
rngtype = configs$rngtype,
rev = 1L)
if(out$rev == 1){
warning("Revision Required. Try changing the link functions\n", immediate. = TRUE)
return(invisible(out))
}
##------------------------------------
## getting the final time series
##------------------------------------
if(configs$burn == 0) u0.star <- u0
else u0.star <- out$Ts[configs$burn+1]
##-----------------------------------------------
## if complete = TRUE returns the fulll model.
## otherwise only yt is returned
##-----------------------------------------------
ini <- configs$burn + 1
end <- configs$burn + configs$n
if(complete){
final <- list(model = "BARC",
yt = out$yt[ini:end],
mut = out$mut[ini:end],
u0 = u0.star,
Ts = out$Ts[ini:end],
etat = out$etat[ini:end],
error = out$error[ini:end],
xreg = out$xreg[ini:end,])
if(out$nreg == 0) final$xreg <- NULL
if(debug) final$out.Fortran <- out
}
else final <- out$yt[ini:end]
invisible(final)
}
##' @rdname BARC.functions
##' @order 3
##'
##' @details
##'
##' The function \code{BARC.extract} allows the user to extract the
##' components \eqn{y_t}, \eqn{\mu_t}, \eqn{\eta_t = g(\mu_t)}, \eqn{r_t},
##' \eqn{T^t(u_0)}, the log-likelihood, and the vectors and matrices used to
##' calculate the score vector and the information matrix associated to a given
##' set of parameters.
##'
##' This function can be used by any user to create an objective function
##' that can be passed to optimization functions not available in BTSR Package.
##' At this point, there is no other use for which this function was intended.
##'
##' @param yt a numeric vector with the observed time series. If missing, an error
##' message is issued.
##'
##' @param nnew optionally, the number of out-of sample predicted values required.
##' Default is 0.
##'
##' @param xnew a vector or matrix, with \code{nnew} observations of the
##' regressors observed/predicted values corresponding to the period of
##' out-of-sample forecast. If \code{xreg = NULL}, \code{xnew} is ignored.
##'
##' @param p a non-negative integer. The order of AR polynomial.
##' If missing, the value of \code{p} is calculated from length(coefs$phi)
##' and length(fixed.values$phi). For fitting, the default is 0.
##'
##' @param r a non-negative integer. The size of the vector theta.
##' If missing, the value of \code{t} is calculated from length(coefs$theta)
##' and length(fixed.values$theta). For fitting, the default is 1.
##'
##' @param lags optionally, a list with the lags that the values in \code{coefs} correspond to.
##' The names of the entries in this list must match the ones in \code{coefs}.
##' For one dimensional coefficients, the \code{lag} is obviously always 1 and can
##' be suppressed. An empty list indicates that either the argument \code{fixed.lags}
##' is provided or all lags must be used.
##'
##' @param fixed.values optionally, a list with the values of the coefficients
##' that are fixed. By default, if a given vector (such as the vector of AR coefficients)
##' has fixed values and the corresponding entry in this list is empty, the fixed values
##' are set as zero. The names of the entries in this list must match the ones
##' in \code{coefs}.
##'
##' @param fixed.lags optionally, a list with the lags that the fixed values
##' in \code{fixed.values} correspond to. The names of the entries in this list must
##' match the ones in \code{fixed.values}. ##' For one dimensional coefficients, the
##' \code{lag} is obviously always 1 and can be suppressed. If an empty list is provided
##' and the model has fixed lags, the argument \code{lags} is used as reference.
##'
##' @param llk logical, if \code{TRUE} the value of the log-likelihood function
##' is returned. Default is \code{TRUE}.
##'
##' @param sco logical, if \code{TRUE} the score vector is returned.
##' Default is \code{FALSE}.
##'
##' @param info logical, if \code{TRUE} the information matrix is returned.
##' Default is \code{FALSE}. For the fitting function, \code{info} is automatically
##' set to \code{TRUE} when \code{report = TRUE}.
##'
##' @return
##' The function \code{BARC.extract} returns a list with the following components.
##'
##' \itemize{
##' \item \code{model}: string with the text \code{"BARC"}.
##'
##' \item \code{coefs}: the coefficients of the model passed through the
##' \code{coefs} argument.
##'
##' \item \code{yt}: the observed time series.
##'
##' \item \code{gyt}: the transformed time series \eqn{g_2(y_t)}.
##'
##' \item \code{mut}: the conditional mean.
##'
##' \item \code{etat}: the linear predictor \eqn{g_1(\mu_t)}.
##'
##' \item \code{error}: the error term \eqn{r_t}.
##'
##' \item \code{xreg}: the regressors (if included in the model).
##'
##' \item \code{TS}: the chaotic process \eqn{T^t(u0)}.
##'
##' \item \code{sll}: the sum of the conditional log-likelihood (if requested).
##'
##' \item \code{sco}: the score vector (if requested).
##'
##' \item \code{info}: the information matrix (if requested).
##'
##' \item \code{Drho}, \code{T}, \code{E}, \code{h}: additional matrices and vectors
##' used to calculate the score vector and the information matrix. (if requested).
##'
##' \item \code{yt.new}: the out-of-sample forecast (if requested).
##'
##' \item \code{Ts.new}: the out-of-sample forecast for the chaotic
##' process (if requested).
##'
##' \item \code{out.Fortran}: FORTRAN output (if requested).
##' }
##'
##' @seealso
##' \code{\link{btsr.extract}}
##'
##' @examples
##' #------------------------------------------------------------
##' # Generating a sample from a BARC model
##' #------------------------------------------------------------
##'
##' m1 <- BARC.sim(linkg = "linear", linkh = "linear",
##' n = 100, seed = 2021, complete = TRUE, ctt.h = 0.6,
##' coefs = list(nu = 15, theta = 0.85, u0 = pi/4))
##'
##' #------------------------------------------------------------
##' # Extracting the conditional time series given yt and
##' # a set of parameters
##' #------------------------------------------------------------
##'
##' e1 = BARC.extract(yt = m1$yt, map = 4, ctt.h = 0.6,
##' coefs = list(nu = 15, theta = 0.85),
##' fixed.values = list(u0 = pi/4),
##' linkg = "linear", linkh = "linear", llk = TRUE,
##' sco = TRUE, info = TRUE)
##'
##' #----------------------------------------------------
##' # comparing the simulated and the extracted values
##' #----------------------------------------------------
##' cbind(head(m1$mut), head(e1$mut))
##'
##' #---------------------------------------------------------
##' # the log-likelihood, score vector and information matrix
##' # score vector and information matrix are obtained
##' # numerically.
##' #---------------------------------------------------------
##' e1$sll
##' e1$score
##' e1$info.Matrix
##'
##' @export
##' @md
BARC.extract <- function(yt, xreg = NULL, nnew = 0, xnew = NULL,
p, r, coefs = list(), lags = list(),
fixed.values = list(), fixed.lags = list(),
y.start = NULL, xreg.start = NULL,
xregar = TRUE, error.scale = 0, map = 4,
linkg = c("linear","linear"), linkh = "linear",
ctt.h = 1, llk = TRUE, sco = FALSE,
info = FALSE, debug = FALSE){
if(is.null(coefs) & is.null(fixed.values))
stop("Please, provide a list of coefficients")
if(!is.null(coefs)){
if(! "list" %in% class(coefs)) stop("coefs must be a list")}
if(!is.null(fixed.values)){
if(! "list" %in% class(fixed.values)) stop("fixed.values must be a list")}
else{ fixed.values <- list()}
##----------------------------------------------------
## checking if the required parameters are present
##----------------------------------------------------
if(is.null(coefs$u0)){
if(is.null(fixed.values$u0)) stop("u0 is missing with no default")}
if(is.null(coefs$theta)) theta <- fixed.values$theta
else theta <- coefs$theta
##--------------------------------------------
## checking the map, theta and the link "h"
##--------------------------------------------
cb <- .barc.configs(map = map, theta = theta, linkh = linkh)
if(missing(p)) p = length(coefs$phi) + length(fixed.values$phi)
if(missing(r)) r = cb$r
##----------------------------------------------------------------------
## theoretical score vector and information matrix are not implemented
## yet so there is no extra information to be extracted from FORTRAN
##----------------------------------------------------------------------
cf <- .extract.configs(model = "BARC", yt = yt, y.start = y.start,
y.lower = 0, y.upper = 1, openIC = c(TRUE, TRUE),
xreg = xreg, xnew = xnew, nnew = nnew,
xreg.start = xreg.start, linkg = linkg,
p = p, q = r, inf = 0, m = 0, xregar = xregar,
error.scale = error.scale, coefs = coefs,
lags = lags, fixed.values = fixed.values,
fixed.lags = fixed.lags, llk = llk, sco = sco,
info = info, extra = FALSE)
# fixing dummy argument
if(cf$npar == 0) cf$coefs <- NULL
#----------------------------------------------------------------
# merging the information about the map and the linkh with the
# other configurations
#----------------------------------------------------------------
cb <- cb[ "theta" != names(cb)]
cf[names(cb)] <- cb
cf$u0 <- .coefs.convert(parname = "u0", fvalues = fixed.values$u0, flags = NULL,
coefs = coefs$u0, lags = NULL, npar = 1)
if(cf$u0$nfix == 0){
cf$coefs <- c(cf$coefs, u0 = cf$u0$coefs)
cf$coefsname <- c(cf$coefsname, "u0")
}
cf$npar <- as.integer(length(cf$coefs))
cf$ctt.h <- ctt.h
# fixing dummy argument
if(cf$npar == 0) cf$coefs <- 0
out <- .barc.extract(yt = yt, configs = cf, debug = debug)
out$model <- "BARC"
class(out) <- c(class(out), "barc")
invisible(out)
}
##------------------------------------------------------------------------------------
## internal function: makes the calculations and reports only the relevant variables
##------------------------------------------------------------------------------------
.barc.extract <- function(yt, configs, debug){
temp <- .Fortran("barcR",
n = configs$n,
yt = yt,
gyt = numeric(configs$n),
ystart = configs$y.start,
nreg = configs$nreg,
xreg = configs$xreg,
xstart = configs$xreg.start,
mut = numeric(configs$n),
etat = numeric(configs$n),
error = numeric(configs$n),
escale = configs$error.scale,
Ts = numeric(configs$n),
nnew = configs$nnew,
xnew = configs$xnew,
ynew = numeric(max(1,configs$nnew)),
Tnew = numeric(max(1,configs$nnew)),
link = c(configs$linkg, configs$linkh),
ctt.h = configs$ctt.h,
map = configs$map,
npar = max(1L, configs$npar),
coefs = configs$coefs,
fixa = configs$alpha$nfix,
alpha = configs$alpha$fvalues,
fixb = configs$beta$nfix,
flagsb = configs$beta$flags,
beta = configs$beta$fvalues,
p = configs$p,
fixphi = configs$phi$nfix,
flagsphi = configs$phi$flags,
phi = configs$phi$fvalues,
xregar = configs$xregar,
r = configs$r,
fixtheta = configs$theta$nfix,
flagstheta = configs$theta$flags,
theta = configs$theta$fvalues,
fixnu = configs$nu$nfix,
nu = configs$nu$fvalues,
fixu0 = configs$u0$nfix,
u0 = configs$u0$fvalues,
llk = configs$llk,
sll = 0,
sco = configs$sco,
U = numeric(max(1, configs$npar*configs$sco)),
info = configs$info,
K = diag(max(1,configs$npar*configs$info)))
out <- list(model = "BARC")
vars <- c("coefs","yt", "xreg", "Ts", "gyt", "mut", "etat", "error")
out[vars] <- temp[vars]
if(configs$nreg == 0) out$xreg = NULL
if(configs$llk == 1) out$sll <- temp$sll
if(configs$sco == 1){
out$score <- temp$U
names(out$score) <- names(configs$coefs)
}
if(configs$info == 1){
out$info.Matrix <- as.matrix(temp$K)
colnames(out$info.Matrix) <- names(configs$coefs)
rownames(out$info.Matrix) <- names(configs$coefs)
}
if(configs$nnew > 0){
out$yt.new <- temp$ynew
out$Ts.new <- temp$Tnew
}
if(debug) out$out.Fortran <- temp
invisible(out)
}
##' @rdname BARC.functions
##' @order 4
##'
##' @details
##' The function \code{BARC.fit} fits a BARC model to a given univariate time
##' series. For now, available optimization algorithms are \code{"L-BFGS-B"} and
##' \code{"Nelder-Mead"}. Both methods accept bounds for the parameters. For
##' \code{"Nelder-Mead"}, bounds are set via parameter transformation.
##'
##'
##' @param start a list with the starting values for the non-fixed coefficients
##' of the model. If an empty list is provided, the function \code{\link{coefs.start}}
##' is used to obtain starting values for the parameters.
##'
##' @param ignore.start logical, if starting values are not provided, the
##' function uses the default values and \code{ignore.start} is ignored.
##' In case starting values are provided and \code{ignore.start = TRUE}, those
##' starting values are ignored and recalculated. The default is \code{FALSE}.
##'
##' @param lower optionally, list with the lower bounds for the
##' parameters. The names of the entries in these lists must match the ones
##' in \code{start}. The default is to assume that the parameters have no lower
##' bound except for \code{nu}, for which de default is 0. Only the bounds for
##' bounded parameters need to be specified.
##'
##' @param upper optionally, list with the upper bounds for the
##' parameters. The names of the entries in these lists must match the ones
##' in \code{start}. The default is to assume that the parameters have no upper
##' bound. Only the bounds for bounded parameters need to be specified.
##'
##' @param control a list with configurations to be passed to the
##' optimization subroutines. Missing arguments will receive default values. See
##' \cite{\link{fit.control}}.
##'
##' @param report logical, if \code{TRUE} the summary from model estimation is
##' printed and \code{info} is automatically set to \code{TRUE}. Default is \code{TRUE}.
##'
##' @param ... further arguments passed to the internal functions.
##'
##' @return
##' The function \code{btsr.fit} returns a list with the following components.
##' Each particular model can have additional components in this list.
##'
##' \itemize{
##' \item \code{model}: string with the text \code{"BARC"}
##'
##' \item \code{convergence}: An integer code. 0 indicates successful completion.
##' The error codes depend on the algorithm used.
##'
##' \item \code{message}: A character string giving any additional information
##' returned by the optimizer, or NULL.
##'
##' \item \code{counts}: an integer giving the number of function evaluations.
##'
##' \item \code{control}: a list of control parameters.
##'
##' \item \code{start}: the starting values used by the algorithm.
##'
##' \item \code{coefficients}: The best set of parameters found.
##'
##' \item \code{n}: the sample size used for estimation.
##'
##' \item \code{series}: the observed time series
##'
##' \item \code{gyt}: the transformed time series \eqn{g_2(y_t)}
##'
##' \item \code{fitted.values}: the conditional mean, which corresponds to
##' the in-sample forecast, also denoted fitted values
##'
##' \item \code{etat}: the linear predictor \eqn{g_1(\mu_t)}
##'
##' \item \code{error.scale}: the scale for the error term.
##'
##' \item \code{error}: the error term \eqn{r_t}
##'
##' \item \code{residual}: the observed minus the fitted values. The same as
##' the \code{error} term if \code{error.scale = 0}.
##'
##' \item \code{forecast}: the out-of-sample forecast for \eqn{y_t} (if requested).
##'
##' \item \code{Ts.forecas}: the out-of-sample forecast for \eqn{T^t(u_0)}
##' (if requested).
##'
##' \item \code{xnew}: the observations of the regressors observed/predicted
##' values corresponding to the period of out-of-sample forecast.
##' Only inlcudes if \code{xreg} is not \code{NULL} and \code{nnew > 0}.
##'
##' \item \code{sll}: the sum of the conditional log-likelihood (if requested)
##'
##' \item \code{info.Matrix}: the information matrix (if requested)
##'
##' \item \code{configs}: a list with the configurations adopted to fit the model.
##' This information is used by the prediction function.
##'
##' \item \code{out.Fortran}: FORTRAN output (if requested)
##'
##' \item \code{call}: a string with the description of the fitted model.
##'
##' }
##'
##' @seealso
##' \code{\link{btsr.fit}}
##'
##' @examples
##'
##' #------------------------------------------------------------
##' # Generating a sample from a BARC model
##' #------------------------------------------------------------
##'
##' m1 <- BARC.sim(linkg = "linear", linkh = "linear",
##' n = 100, seed = 2021, complete = TRUE, ctt.h = 0.6,
##' coefs = list(nu = 15, theta = 0.85, u0 = pi/4))
##'
##' #------------------------------------------------------------
##' # Fitting a BARC model. Assuming only alpha fixed.
##' #------------------------------------------------------------
##' f1 = BARC.fit(yt = m1$yt, map = 4, ctt.h = 0.6,
##' start = list(nu = 10, theta = 0.6, u0 = 0.5),
##' lower = list(nu = 0, theta = 0, u0 = 0),
##' upper = list(theta = 1, u0 = 1),
##' fixed.values = list(alpha = 0),
##' control = list(iprint = -1, method = "Nelder-Mead"))
##'
##' coefficients(f1)
##'
##' plot.ts(m1$yt)
##' lines(f1$fitted.values, col = "red")
##'
##' #------------------------------------------------------------
##' # Out-of-sample forecast
##' #------------------------------------------------------------
##' pred = predict(f1, nnew = 5)
##' pred$forecast
##' pred$Ts.forecast
##'
##' @export
##'
##' @md
BARC.fit <- function(yt, xreg = NULL, nnew = 0, xnew = NULL,
p = 0, r = 1, start = list(), lags = list(),
fixed.values = list(), ignore.start = FALSE,
fixed.lags = list(), lower = list(nu = 0, u0 = 0),
upper = list(nu = Inf, u0 = 1), map = 4,
linkg = c("linear","linear"), linkh = "linear",
ctt.h = 1, sco = FALSE, info = FALSE, xregar = TRUE,
y.start = NULL, xreg.start = NULL,
error.scale = 0, control = list(), report = TRUE,
debug = FALSE,...){
if(report) info = TRUE
##--------------------------------------------------------------
## checking if u0, theta, the map and the corresponding link
## Here we need to take into account that both start and
## fixed.values can be empty if start was not initialized yet.
##
## if initialization is required the value theta.barc will
## replace the starting value provided by coefs.start
##--------------------------------------------------------------
start0 <- fv <- list()
start0[names(start)] <- start
fv[names(fixed.values)] <- fixed.values
# initilization required for u0?
if(is.null(c(start0$u0, fv$u0))) start0$u0 = pi/4 # u0 was not provided
# initilization required for theta?
if(is.null(start0$theta)) theta.barc <- fv$theta # theta is fixed
else theta.barc <- start0$theta # checking if theta was given
if(is.null(theta.barc)) theta.barc <- .theta.start.barc(map) # theta needs initialization
# default values for nu (merge with user provided values)
lw <- list(nu = 0, u0 = 0); up <- list(nu = Inf, u0 = 1)
lw[names(lower)] <- lower; up[names(upper)] <- upper
lower <- lw; upper <- up
# fix the lower and upper values for theta (if needed)
if(is.null(fv$theta)){
lu <- .theta.lu.fix(map = map, lower = lower$theta, upper = upper$theta)
lower$theta = lu$lower
upper$theta = lu$upper
}
cb <- .barc.configs(map = map, theta = theta.barc, linkh = linkh)
## ------------------------------------------------------------------------
## updating start and fixed.values to pass to the configuration function
## ------------------------------------------------------------------------
start <- start0[names(start0) != "u0"]
fixed.values <- fv[names(fv) != "u0"]
cf <- .fit.configs(model = "BARC", yt = yt, y.start = y.start,
y.lower = 0, y.upper = 1, openIC = c(TRUE, TRUE),
xreg = xreg, xnew = xnew, nnew = nnew,
xreg.start = xreg.start, linkg = linkg,
p = p, d = FALSE, q = r, inf = 0, m = 0,
xregar = xregar, error.scale = error.scale,
start = start, ignore.start = ignore.start,
lags = lags, fixed.values = fixed.values,
fixed.lags = fixed.lags, lower = lower,
upper = upper, control = control,
sco = sco, info = info, extra = FALSE,
theta.barc = theta.barc)
if(!is.null(cf$conv)) return(invisible(out))
cb <- cb[ "theta" != names(cb)]
cf[names(cb)] <- cb
##--------
## u0
##--------
cf$u0 <- .coefs.convert(parname = "u0", fvalues = fv$u0, lags = NULL,
flags = NULL, coefs = start0$u0, npar = 1)
if(cf$u0$nfix == 0){
cf$coefs <- c(cf$coefs, u0 = cf$u0$coefs)
cb <- .bounds.convert(npar = 1, lower = lower$u0, upper = upper$u0)
cf$lower <- c(cf$lower, u0 = cb$lower)
cf$upper <- c(cf$upper, u0 = cb$upper)
cf$nbd <- c(cf$nbd, u0 = cb$nbd)
cf$coefsname = c(cf$coefsname, "u0")
}
cf$npar <- length(cf$coefs)
cf$ctt.h <- ctt.h
out <- .barc.fit(yt = yt, configs = cf, debug = debug)
out$call <- .fit.print(model = "BARC", p = cf$p, q = NULL, d = FALSE, nreg = cf$nreg)
class(out) <- c(class(out), "barc")
if(report) print(summary(out))
invisible(out)
}
##------------------------------------------------------------------------------------
## internal function: makes the calculations and reports only the relevant variables
##------------------------------------------------------------------------------------
.barc.fit <- function(yt, configs, debug){
if(configs$control$method == "L-BFGS-B"){
temp <- .Fortran("optimlbfgsbbarcR",
npar = max(1L, configs$npar),
coefs = configs$coefs,
nbd = configs$nbd,
lower = configs$lower,
upper = configs$upper,
n = configs$n,
yt = yt,
gy = numeric(configs$n),
ystart = configs$y.start,
nreg = configs$nreg,
xreg = configs$xreg,
xstart = configs$xreg.start,
mut = numeric(configs$n),
etat = numeric(configs$n),
error = numeric(configs$n),
escale = configs$error.scale,
Ts = numeric(configs$n),
nnew = configs$nnew,
xnew = configs$xnew,
ynew = numeric(max(1,configs$nnew)),
Tnew = numeric(max(1,configs$nnew)),
link = c(configs$linkg, configs$linkh),
ctt.h = configs$ctt.h,
map = configs$map,
fixa = configs$alpha$nfix,
alpha = configs$alpha$fvalues,
fixb = configs$beta$nfix,
flagsb = configs$beta$flags,
beta = configs$beta$fvalues,
p = configs$p,
fixphi = configs$phi$nfix,
flagsphi = configs$phi$flags,
phi = configs$phi$fvalues,
xregar = configs$xregar,
r = configs$r,
fixtheta = configs$theta$nfix,
flagstheta = configs$theta$flags,
theta = configs$theta$fvalues,
fixnu = configs$nu$nfix,
nu = configs$nu$fvalues,
fixu0 = configs$u0$nfix,
u0 = configs$u0$fvalues,
sll = 0,
U = numeric(max(1, configs$npar)),
info = configs$info,
K = diag(max(1, configs$npar*configs$info)),
iprint = as.integer(configs$control$iprint),
factr = configs$control$factr,
pgtol = configs$control$pgtol,
maxit = as.integer(configs$control$maxit),
neval = 0L,
conv = 0L)
}else{
temp <- .Fortran("optimnelderbarcR",
npar = max(1L, configs$npar),
coefs = configs$coefs,
nbd = configs$nbd,
lower = configs$lower,
upper = configs$upper,
n = configs$n,
yt = yt,
gy = numeric(configs$n),
ystart = configs$y.start,
nreg = configs$nreg,
xreg = configs$xreg,
xstart = configs$xreg.start,
mut = numeric(configs$n),
etat = numeric(configs$n),
error = numeric(configs$n),
escale = configs$error.scale,
Ts = numeric(configs$n),
nnew = configs$nnew,
xnew = configs$xnew,
ynew = numeric(max(1,configs$nnew)),
Tnew = numeric(max(1,configs$nnew)),
link = c(configs$linkg, configs$linkh),
ctt.h = configs$ctt.h,
map = configs$map,
fixa = configs$alpha$nfix,
alpha = configs$alpha$fvalues,
fixb = configs$beta$nfix,
flagsb = configs$beta$flags,
beta = configs$beta$fvalues,
p = configs$p,
fixphi = configs$phi$nfix,
flagsphi = configs$phi$flags,
phi = configs$phi$fvalues,
xregar = configs$xregar,
r = configs$r,
fixtheta = configs$theta$nfix,
flagstheta = configs$theta$flags,
theta = configs$theta$fvalues,
fixnu = configs$nu$nfix,
nu = configs$nu$fvalues,
fixu0 = configs$u0$nfix,
u0 = configs$u0$fvalues,
sll = 0,
sco = configs$sco,
U = numeric(max(1,configs$npar*configs$sco)),
info = configs$info,
K = diag(max(1, configs$npar*configs$info)),
iprint = as.integer(configs$control$iprint),
stopcr = configs$control$stopcr,
maxit = as.integer(configs$control$maxit),
neval = 0L,
conv = 0L)
}
temp$llk <- 1
temp$sco <- configs$sco
# for some reason, sometimes info returns NULL
# from Fortran. We need to fix this!!
temp$info <- configs$info
out <- .fit.get.results(model = "BARC", temp, configs = configs)
if(debug) out$out.Fortran <- temp
invisible(out)
}
##------------------------------------------------------------------------------------
## internal function: makes the calculations and reports only the relevant variables
##------------------------------------------------------------------------------------
.barc.predict <- function(object, debug){
if(object$model != "BARC") stop("Wrong configurations for BARC models")
temp <- .Fortran("predictbarcR",
n = object$n,
series = object$series,
gyt = object$gyt,
nreg = object$nreg,
xreg = object$xreg,
escale = object$error.scale,
error = object$error,
Ts = object$Ts,
nnew = object$nnew,
xnew = object$xnew,
ynew = numeric(max(1,object$nnew)),
Tnew = numeric(max(1,object$nnew)),
link = c(object$linkg, object$linkh),
ctt.h = object$ctt.h,
map = object$map,
npar = max(1L, object$npar),
coefs = object$coefs,
fixa = object$alpha$nfix,
alpha = object$alpha$fvalues,
fixb = object$beta$nfix,
flagsb = object$beta$flags,
beta = object$beta$fvalues,
p = object$p,
fixphi = object$phi$nfix,
flagsphi = object$phi$flags,
phi = object$phi$fvalues,
xregar = object$xregar,
r = object$r,
fixtheta = object$theta$nfix,
flagstheta = object$theta$flags,
theta = object$theta$fvalues,
fixnu = object$nu$nfix,
nu = object$nu$fvalues,
fixu0 = object$u0$nfix,
u0 = object$u0$fvalues)
out <- list(model = object$model,
yt.new = temp$ynew,
Ts.new = temp$Tnew)
if(debug) out$out.Fortran <- temp
invisible(out)
}
##-------------------------------------------------------------------------
## Internal function: Used to check if theta is compatible with the map
## selected by the user
##-------------------------------------------------------------------------
.check.map <- function(map, theta){
##------------------------------------------
## Maps
##------------------------------------------
## 1 = (kx)(mod 1). k integer
## 2 = Rafael's map. 0 <= theta <= 1
## 3 = logistic map. 0 <= theta <= 4
## 4 = Manneville-Pomeau. 0 < theta < 1
## 5 = Lasota-Mackey's map. No theta
##-------------------------------------------
if((map != 5) & is.null(theta))
stop("theta is missing with no default")
maps <- data.frame(map = c(1:5),
r = c(1,1,1,1,0),
lower = c(1,0,0,0,NA),
upper = c(Inf,1,4,1,NA))
r <- maps[map, "r"]
if(length(theta) > r & r == 0)
print(paste(msg, "Theta will be ignored.", sep = " "))
if(r == 0) return(invisible(list(theta = 0, r = 0L)))
## checking the length of theta
msg <- NULL
if(length(theta) != r)
msg <- paste(msg,"Length of theta = ", length(theta),
" but the map requires length ", r, ".", sep = "")
if(length(theta) > r & r > 0)
msg <- paste(msg, "Only the first ", r, " values will be used.", sep = "")
if(length(theta) < r)
stop("Please provide a new theta with length ", r)
## checking the range of theta
## !!! if theta is a vector this part of the code will need revision !!!!!
if(theta < maps[map, "lower"] | theta > maps[map, "upper"])
stop("Theta is out of range")
if(!is.null(msg)) print(msg)
invisible(list(map = as.integer(map), theta = theta[1:r], r = as.integer(r)))
}
##-------------------------------------------------------------------------
## Internal function: Used to check the configurations for simulation
## of BARC models
##-------------------------------------------------------------------------
.barc.configs <- function(map = 4, theta = 0.5, linkh = "linear"){
out <- c()
## checking the map and the corresponding parameter
out <- .check.map(map = map, theta = theta)
## link function
out$linkh <- .link.convert(linkh)
if(is.na(out$linkh)) stop(paste("link ", linkh, " not implemented", sep = ""))
return(out)
}
##-------------------------------------------------------------------------
## Internal function: Used to check if theta is compatible with the map
## selected by the user
##-------------------------------------------------------------------------
.theta.start.barc <- function(map){
##------------------------------------------
## Maps
##------------------------------------------
## 1 = (kx)(mod 1). k integer
## 2 = Rafael's map. 0 <= theta <= 1
## 3 = logistic map. 0 <= theta <= 4
## 4 = Manneville-Pomeau. 0 < theta < 1
## 5 = Lasota-Mackey's map. No theta
##-------------------------------------------
theta <- c(3, 0.5, 3.5, 0.5, 0)
theta[map]
}
##-------------------------------------------------------------------------
## Internal function: Used to check if the limits for theta are correct
##-------------------------------------------------------------------------
.theta.lu.fix <- function(map, lower, upper){
maps <- data.frame(map = c(1:5),
lower = c(1,0,0,0,NA),
upper = c(Inf,1,4,1,NA))
# checking if lower and upper limits are provided
fix.lower <- fix.upper <- FALSE
if(is.null(lower)) fix.lower <- TRUE
if(is.null(upper)) fix.upper <- TRUE
# checking if lower and upper are in the correct range
if(!is.null(lower))
if(lower < maps[map, "lower"] | lower > maps[map, "upper"])
fix.lower <- TRUE
if(!is.null(upper))
if(upper < maps[map, "lower"] | upper > maps[map, "upper"])
fix.upper <- TRUE
# if needed, fix the wrong values
if(fix.lower) lower <- maps[map, "lower"]
if(fix.upper) upper <- maps[map, "upper"]
if(lower > upper)
stop("Please, check the lower and upper limits for theta")
if(lower == upper & map != 5){
msg = "lower and upper limits for theta are the same.\n "
msg = paste0(msg, "The range for the selected map is,\n")
warning(paste0(msg, "lower = ", maps[map, "lower"],"\n",
"upper = ", maps[map, "upper"]))
}
invisible(list(lower = lower, upper = upper))
}
|
/scratch/gouwar.j/cran-all/cranData/BTSR/R/barc.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.