content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
###########################################
# This version: adjusted on 2022-07-09 #
###########################################
# it includes all the auxiliary functions that should only be called inside as well as OUTSIDE the bms function
#######################
# FUNCTIONS FOR USERS #
#########################################################################
# SEE ALSO PLOTTING FUCNTIONS AT THE END
# beta.draws.bma, pmp.bma, estimates.bma, info.bma
# cf. as well the functions combine_chains & [.bma farther below
# moreover: bin2hex, hex2bin, f21simple, fullmodel.ssq, is.bma, print.bma, summary.bma, print.topmod, is.topmod
# plotting: plotModelsize, plotComp, plotConv, plotDensity, plot.bma, image.bma
#' Coefficients of the Best Models
#'
#' Returns a matrix whose columns are the (expected value or standard
#' deviations of) coefficients for the best models in a bma object.
#'
#'
#' @param bmao a 'bma' object (as e.g. resulting from \code{\link{bms}})
#' @param stdev if \code{stdev=FALSE} then \code{beta.draws.bma} returns the
#' (conditional) posterior expected values of the coefficients (i.e. 'Bayesian
#' coefficients'). If \code{stdev=TRUE} it returns their posterior standard
#' deviations.
#' @return Each column presents the coefficients for the model indicated by its
#' column name. The zero coefficients are the excluded covariates per model.
#' Note that the coefficients returned are only those of the best (100) models
#' encountered by the \code{bma} object (cf. argument \code{nmodels} of
#' \code{\link{bms}}).
#'
#' For aggregate coefficients please refer to \code{\link{coef.bma}}.
#' @note Note that the elements of \code{beta.draws.bma(bmao)} correspond to
#' \code{bmao$topmod$betas()}
#' @seealso \code{\link{bms}} for creating bms objects, \code{\link{coef.bma}}
#' for aggregate coefficients
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords utilities
#' @examples
#'
#'
#' #sample a bma object:
#' data(datafls)
#' mm=bms(datafls,burn=500,iter=5000,nmodel=20)
#'
#' #coefficients for all
#' beta.draws.bma(mm)
#'
#' #standard deviations for the fourth- to eight best models
#' beta.draws.bma(mm[4:8],TRUE);
#'
#' @export
beta.draws.bma <- function(bmao,stdev=FALSE) {
# constructs a nice matrix of the betas of the best models stored in topmods
# bmao: bma object
if (!is.bma(bmao)) {stop("you need to provide a BMA object"); return()}
resmat=.post.beta.draws(bmao$topmod, bmao$reg.names,FALSE)
if (stdev) {
mom2=.post.beta.draws(bmao$topmod, bmao$reg.names,TRUE)
resmat=sqrt(mom2-resmat^2)
}
return(resmat)
}
#' Posterior Model Probabilities
#'
#' Returns the posterior model probabilites for the best models encountered by
#' a 'bma' object
#'
#' A call to bms with an MCMC sampler (e.g.
#' \code{bms(datafls,mcmc="bd",nmodel=100)} uses a Metropolis-Hastings
#' algorithm to sample through the model space - and the frequency of how often
#' models are drawn converges to the distribution of their posterior marginal
#' likelihoods. While sampling, each 'bma' object stores the best models
#' encountered by its sampling chain with their marginal likelihood and their
#' MCMC frequencies.
#'
#' \code{pmp.bma} then allows for comparing the posterior model probabilities
#' (PMPs) for the two different methods, similar to \code{\link{plotConv}}. It
#' calculates the PMPs based on marginal likelihoods (first column) and the
#' PMPs based on MCMC frequencies (second column) for the best x models stored
#' in the bma object.
#'
#' The correlation of the two columns is an indicator of how well the MCMC
#' sampler has converged to the actual PMP distribution - it is therefore also
#' given in the output of \code{\link{summary.bma}}.
#'
#' The second column is slightly different in case the \code{\link{bms}}
#' argument \code{mcmc} was set to \code{mcmc="enumeration"}: In this case, the
#' second column is also based on marginal likelihoods. The correlation between
#' the two columns is therefore one.
#'
#' @param bmao A bma object (see argument \code{nmodel} in \code{\link{bms}}),
#' alternatively an object of class \code{\link{topmod}}
#' @param oldstyle For normal use, leave this at \code{FALSE}. It is an
#' argument for compatibility with older BMS versions - see section 'Notes'
#' @return the result is a matrix, its row names describe the model binaries\cr
#' There are two columns in the matrix: \item{PMP (Exact)}{posterior model
#' probabilities based on the posterior likelihoods of the best models in
#' \code{bmao} } \item{PMP (MCMC)}{posterior model probabilities of the best
#' models in \code{bmao} based on their MCMC frequencies, relative to all
#' models encountered by \code{bmao} - see 'Details' }
#' @note The second column thus shows the PMPs of the best models relative to
#' all models the call to \code{\link{bms}} has sampled through (therefore
#' typically the second column adds up to less than one). The first column
#' relates to the likelihoods of the best models, therefore it would add up to
#' 1. In order estimate for their marginal likelihoods with respect to the
#' other models (the ones not retained in the best models), these PMP aadding
#' up to one are multiplied with the sum of PMP of the best models accroding to
#' MCMC frequencies. Therefore, the two columns have the same column sum.
#'
#' CAUTION: In package versions up to \code{BMS 0.2.5}, the first column was
#' indeed set always equal to one. This behaviour can still be mimicked by
#' setting \code{oldstyle=TRUE}.
#'
#' @seealso \code{\link{plotConv}} for plotting \code{pmp.bma},
#' \code{\link{pmpmodel}} to obtain the PMP for any individual model,
#' \code{\link{bms}} for sampling bma objects
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords utilities
#' @examples
#'
#'
#' ## sample BMA for growth dataset, MCMC sampler
#' data(datafls)
#' mm=bms(datafls[,1:10],nmodel=20, mcmc="bd")
#'
#' ## mmodel likelihoods and MCMC frequencies of best 20 models
#' print(mm$topmod)
#'
#' pmp.bma(mm)
#' #first column: posterior model prob based on model likelihoods,
#' # relative to best models in 'mm'
#' #second column: posterior model prob based MCMC frequencies,
#' # relative to all models encountered by 'mm'
#'
#' #consequently, first column adds up to one
#' #second column shows how much of the sampled model space is
#' # contained in the best models
#' colSums(pmp.bma(mm))
#'
#'
#' #correlation betwwen the two shows how well the sampler converged
#' cor(pmp.bma(mm)[,1],pmp.bma(mm)[,2])
#'
#' #is the same as given in summary.bma
#' summary(mm)["Corr PMP"]
#'
#' #plot the two model probabilites
#' plotConv(mm)
#'
#' #equivalent to the following chart
#' plot(pmp.bma(mm)[,2], type="s")
#' lines(pmp.bma(mm)[,1],col=2)
#'
#'
#' #moreover, note how the first column is constructed
#' liks=exp(mm$top$lik())
#' liks/sum(liks)
#' pmp.bma(mm)[,1] #these two are equivalent
#'
#'
#'
#' #the example above does not converge well,
#' #too few iterations and best models
#' # this is already better, but also not good
#' mm=bms(datafls[,1:10],burn=2000,iter=5000,nmodel=200)
#'
#'
#' # in case the sampler has been 'enumeration' instead of MCMC,
#' # then both matrix columns are of course equivalent
#' mm=bms(datafls[,1:10],nmodel=512,mcmc="enumeration")
#' cor(pmp.bma(mm)[,1],pmp.bma(mm)[,2])
#' colSums(pmp.bma(mm))
#'
#'
#' @export
pmp.bma <- function(bmao, oldstyle=FALSE) {
# constructs nice matrix with PMP analytical and PMP MCMC for best models stored in topmods
# Prob of top "nmodel" models, analytical (after deleting rest of models)'
# bmao: either the topmodel object or a "bma" object
if (!(is.bma(bmao)||is.topmod(bmao))) stop("bmao needs to be a 'bma' object!")
if (is.topmod(bmao)) {
topmods=bmao
was.enum=FALSE
cumsumweights=sum(topmods$ncount())
log.null.lik=0
} else {
topmods=bmao$topmod
log.null.lik=(1-bmao$info$N)/2*log(as.vector(crossprod(bmao$arguments$X.data[,1]-mean(bmao$arguments$X.data[,1]))))
cumsumweights=bmao$info$cumsumweights
was.enum=(bmao$arguments$mcmc=="enum")
}
lt1=suppressWarnings(topmods$lik() - max(topmods$lik())) # do this to get only positive probabilities
lt1=exp(lt1)/sum(exp(lt1))
if (was.enum) {
lt2=exp(topmods$lik()-log.null.lik)/cumsumweights #Prob of top "nmodel" models, (loglik based)
} else {
lt2=topmods$ncount()/cumsumweights #MCMC Prob of top "nmodel" models, numerical
}
cpoint=min(length(lt1),length(lt2))
lt1=lt1[1:cpoint]; lt2=lt2[1:cpoint]
if (!oldstyle) lt1 <- lt1*sum(lt2)
#rbind the probs to the topmodmatrix
topmodout=rbind(lt1, lt2)
rownames(topmodout)=c("PMP (Exact)","PMP (MCMC)")
colnames(topmodout)=topmods$bool()
return(t(topmodout))
}
#' Posterior Model Probability for any Model
#'
#' Returns the posterior model probability for any model based on bma results
#'
#' If the model as provided in \code{model} is the null or the full model, or
#' is contained in \code{bmao}'s topmod object (cf. argument \code{nmodel} in
#' \code{\link{bms}}), \cr then the result is the same as in
#' \code{\link{pmp.bma}}.\cr If not and \code{exact=TRUE}, then \code{pmpmodel}
#' estimates the model based on comparing its marginal likelihood (times model
#' prior) to the likelihoods in the \code{topmod} object and multiplying by
#' their sum of PMP according to MCMC frequencies,
#'
#' @param bmao A bma object as created by \code{\link{bms}}.
#' @param model A model index - either variable names, or a logical with model
#' binaries, or the model hexcode (cf. \code{\link{hex2bin}}, or a numeric with
#' positions of the variables to be included.
#' @param exact If \code{TRUE}, then the resulting PMP is based on analytical
#' model likelihoods (works for any model). \cr If \code{FALSE}, the the
#' resulting PMP is derived from MCMC frequencies (works only for the null and
#' fullmodel, as well as for models contained in \code{bmao}'s topmod
#' object.\cr If \code{bmao} is based on enumeration (cf. argument \code{mcmc}
#' in \code{\link{bms}}, then \code{exact} does not matter.
#' @return A scalar with (an estimate of) the posterior model probability for
#' \code{model}
#' @seealso \code{\link{pmp.bma}} for similar
#' functions
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords utilities
#' @examples
#'
#'
#' ## sample BMA for growth dataset, enumeration sampler
#' data(datafls)
#' mm=bms(datafls[,1:10],nmodel=5)
#'
#' #show the best 5 models:
#' pmp.bma(mm)
#' #first column: posterior model prob based on model likelihoods,
#' #second column: posterior model prob based MCMC frequencies,
#'
#' ### Different ways to get the same result: #########
#'
#' #PMP of 2nd-best model (hex-code representation)
#' pmpmodel(mm,"00c")
#'
#' #PMP of 2nd-best model (binary representation)
#' incls=as.logical(beta.draws.bma(mm)[,2])
#' pmpmodel(mm,incls)
#'
#' #PMP of 2nd-best model (via variable names)
#' #names of regressors in model "00c":
#' names(datafls[,2:10])[incls]
#' pmpmodel(mm,c("SubSahara", "LatAmerica"))
#'
#' #PMP of 2nd-best model (via positions)
#' pmpmodel(mm,c(6,7))
#'
#' ####PMP of another model #########
#' pmpmodel(mm,1:5)
#'
#'
#' @export
pmpmodel= function(bmao, model=numeric(0), exact=TRUE) {
#returns the PMP of any model
#if model is null, full or contained in topmod, returns the PMP as in pmp.bma
#if model is out of topmod but bmao is enumeration, calculates the marg-lik and compares
#if model is out of topmod and bmao is MCMC-sampled, then estimates PMP by comparing marg-lik and ncounts
if (!is.bma(bmao)) stop("bmao needs to be a bma object")
if (!is.vector(model)) stop("model needs to be vector denoting a single model.")
K=bmao$info$K
was.enum=(bmao$arguments$mcmc=="enum")
emptyindex=logical(K)
modelhex=""
#Conversion of user input
if (length(model)==0L) model=numeric(0)
if ((is.character(model))&&(all(model %in% bmao$reg.names))) {
mix=match(model, bmao$reg.names)
if (any(is.na(mix))) stop("Provided variable names do not conform to bma object")
emptyindex[mix]=TRUE; model=emptyindex
} else if ((length(model)==1L)&&all(is.character(model))) {
modelhex=model
model=as.logical(hex2bin(model)); if (length(model)>K) model=model[-(1:(length(model)-K))]
} else if (is.logical(model)||((length(model)==K)&&(is.numeric(model)&& max(model)<2))) {
if (length(model)>K) model=model[-(1:(length(model)-K))]
model=as.logical(model)
} else if (is.numeric(model)) {
emptyindex[model]=TRUE; model=emptyindex
} else stop("model needs to be an integer, logical or character model index representation (hexcode or variable names)")
if (any(is.na(model))) stop("Provided model index does not seem to exist.")
if (modelhex=="") modelhex=bin2hex(model)
# now model is a logical vector, modelhex its hex representation
#last user check
fixed.pos = bmao$mprior.info$fixed.pos
if (is.null(fixed.pos)) fixed.pos=numeric(0)
if (any(model[fixed.pos]!=TRUE)) stop("Such a model was excluded by bmao's argument fixed.reg")
# prior calculations
bools=bmao$topmod$bool()
liks=bmao$topmod$lik()
ncounts=bmao$topmod$ncount()
cumsumweights=bmao$info$cumsumweights
yty=as.vector(crossprod(bmao$arguments$X.data[,1,drop=TRUE]-mean(bmao$arguments$X.data[,1,drop=TRUE])))
log.null.lik= bmao$gprior.info$lprobcalc$just.loglik(ymy=yty,k=0)
#look up whether model is in topmodels
mix=match(modelhex,bools)
# first treat case when MCMC sampler and exact =FALSE
if ((!exact) && (!was.enum)) {
if (!is.na(mix)) {
return(ncounts[[mix]]/cumsumweights)
} else if (!any(model)||all(model)) {
return(bmao$info$k.vec[sum(model)+1]/cumsumweights)
} else {
stop("Model MCMC-based PMP cannot be found. Try exact=TRUE .")
}
}
# now treat the cases where exact = TRUE or was.enum=TRUE
if (!is.na(mix)) {
loglik=liks[mix] #if yes and exact return its PMP
} else if (was.enum && (!any(model)||all(model))) {
loglik = log(bmao$info$k.vec[sum(model)+1])+log.null.lik # stuff in info is saved as sum(Lik_model / Lik_null)
} else {
if (!was.enum && (length(liks)==0L)) stop("bmao needs to contain more than 0 top models to provide an estimate for your model index.")
if (sum(model)==0L) {
loglik= log.null.lik + bmao$mprior.info$pmp(ki=0, mdraw=rep(0,K), ymy=yty)
} else {
zz=zlm(bmao$arguments$X.data[,c(TRUE,model),drop=FALSE], g=bmao$gprior.info)
loglik=zz$marg.lik+bmao$mprior.info$pmp(ki=sum(model), mdraw=as.numeric(model), ymy=zz$olsres$ymy)
}
}
if (was.enum) {
return(exp(loglik-log.null.lik)/cumsumweights)
}
#estimate pmp of model compared to top models
# then multply with a factor by comparing ncount and liks so to estimate overall pmp of model
pmp_withintop=exp(loglik-log.null.lik)/sum(exp(liks-log.null.lik))
return(pmp_withintop*sum(ncounts)/cumsumweights)
}
#' @rdname coef.bma
#' @export
estimates.bma <- function(bmao,exact=FALSE,order.by.pip=TRUE,include.constant=FALSE,incl.possign=TRUE,std.coefs=FALSE,condi.coef=FALSE) {
# constructs a nice estimates matrix with 5 columns: 1) PIP, 2) E(beta|Y), 3) Var(beta|Y), 4) pos. coef. sign (cond. on inclusion, optional), 5) Index in X.data
# bmao: bma object;
# exact: if True, then calcs posterior stats based on analytical PMPs (likelihoods) in topmods object; if False, then does it as a weighted average (typically with MCMC frequencies)
# order.by.pip: if true then the matrix is sorted according to the PIPs (first column)
# include.constant: if true, then add the constant in the last row of resultant matrix
# incl.possign: if true, then the fouth column details how often the coefficent sign was positive (conditional on inclusion)
# std.coefs: if true, then coefficents are standardized
# condi.coefs: if true, then the coefficents and standard deviations are not given as overall expected values, but as expected values given inclusion
if (!is.bma(bmao)) {stop("you need to provide a BMA object"); return()}
if (exact) {
#if (bmao$arguments$beta.save==FALSE) stop("exact=TRUE needs betas from the draws: Run estimation again and save betas via setting beta.save=TRUE")
if (bmao$topmod$nbmodels==0) stop("exact=TRUE needs at least one 'top model': Run estimation again and set nmodel>0")
}
bmaest=.post.estimates(bmao$info$b1mo,bmao$info$b2mo,bmao$info$cumsumweights,bmao$info$inccount,bmao$topmod,bmao$arguments$X.data,bmao$reg.names,bmao$info$pos.sign,exact,order.by.pip,include.constant,incl.possign,std.coefs,condi.coef)
return(bmaest)
}
#' @rdname summary.bma
#' @export
info.bma <- function(bmao) {
# constructs an 'info' character matrix with 1 row and the following columns:
# bmao: bma object
# output: a list that contains information about the "Mean nr. of Regressors" (not counting the constant term),
# "Draws"=posterior draws, "Burnins"=nr. of burnins taken, "Time" denotes total time elapsed
# since calling the "bms" function, "Nr. of models visited" counts each time a model is accepted.
# Note that we do not take into account the case of revisiting models by the sampler.
# Modelspace is simply indicating the whole model space (2^K) and percentage visited is
# the nr. of models visited as a percentage of 2^K. "Corr PMP" is the correlation between
# analytic and MCMC posterior model probabilites, where a correlation of 0.99 indicates
# excellent convergence. For "nmodel=100" the best 100 models are considered for the correlation
# analysis. Finally, Nr. of Observations is given in the output as well.
foo=bmao$info
iter=foo$iter; burn=foo$burn; timed=foo$timed; models.visited=foo$models.visited; corr.pmp=foo$corr.pmp; K=foo$K; N=foo$N; msize=foo$msize; cumsumweights=foo$cumsumweights
if (is.element("mprior.info",names(bmao))) {
prior= paste(bmao$mprior.info$mp.mode, "/", bmao$mprior.info$mp.msize)
} else {
if (is.element("theta",names(bmao$arguments))&&is.element("prior.msize",names(bmao$arguments))) {
if (!is.null(bmao$arguments$theta)&!is.null(bmao$arguments$prior.msize)) prior=paste(bmao$arguments$theta, "/", bmao$arguments$prior.msize) else prior=NA
} else {
prior=paste(bmao$arguments$mprior, "/", bmao$arguments$mprior.size)
}
}
gprior.info=bmao$gprior.info
gprior.choice=gprior.info$gtype
model.space=2^K
fraction.model=models.visited/model.space*100
fraction.topmodel=sum(bmao$topmod$ncount())/iter*100 # this is the number of visits the topmodels accounted for relative to
if (gprior.info$gtype=="hyper") {gprior.choice=paste(gprior.choice," (a=",2+signif(gprior.info$hyper.parameter-2,digits=4),")",sep="")}
nr.reg=msize/cumsumweights
info<-as.character(c(format(round(nr.reg,4),nsmall=4),format(iter,nsmall=0),format(burn,nsmall=0),
format(timed,nsmall=4),models.visited,format(model.space,digits=2),
format(fraction.model,digits=2),format(fraction.topmodel,digits=2),format(round(.cor.topmod(bmao$topmod),4),nsmall=4),
format(N,nsmall=4),prior, gprior.choice))
names(info)<-c("Mean no. regressors", "Draws","Burnins", "Time", "No. models visited",
"Modelspace 2^K", "% visited","% Topmodels","Corr PMP","No. Obs.", "Model Prior", "g-Prior")
if (gprior.info$return.g.stats) {
gpriorav=gprior.info$shrinkage.moments[1]
gstatsprint= paste("Av=", format(gpriorav,digits=4),sep="")
if (length(gprior.info$shrinkage.moments)>1) {
gpriorstdev=sqrt(gprior.info$shrinkage.moments[2]-gprior.info$shrinkage.moments[1]^2)
gstatsprint = paste(gstatsprint, ", Stdev=", format(gpriorstdev,digits=2),sep="")
}
info <- c(info, gstatsprint)
names(info)[13]<- "Shrinkage-Stats"
}
return(info)
}
#' Predict Method for bma Objects
#'
#' Expected value of prediction based on 'bma' object
#'
#'
#' @param object a bma object - see \code{\link{bms}}
#' @param newdata An optional data.frame, matrix or vector containing variables
#' with which to predict. If omitted, then (the expected values of) the fitted
#' values are returned.
#' @param exact If \code{FALSE} (default), then prediction is based on all
#' models (i.e. on their MCMC frequencies in case the \code{\link{bms}}
#' parameter \code{mcmc} was set to an mcmc sampler.\cr If \code{TRUE}, then
#' prediction is based on analytical likelihoods of the best models retained in
#' \code{object} - cf. \code{\link{bms}} parameter \code{nmodel}.
#' @param topmodels index of the models with whom to predict: for instance,
#' \code{topmodels=1} predicts based solely on the best model, whereas
#' \code{topmodels=1:5} predicts based on a combination of the five best
#' models.\cr Note that setting \code{topmodels} triggers \code{exact=TRUE}.
#' @param \dots further arguments passed to or from other methods.
#' @return A vector with (expected values of) fitted values.
#' @seealso \code{\link{coef.bma}} for obtaining coefficients,
#' \code{\link{bms}} for creating bma objects, \code{\link{predict.lm}} for a
#' comparable function
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords utilities
#' @examples
#'
#' data(datafls)
#' mm=bms(datafls,user.int=FALSE)
#'
#' predict(mm) #fitted values based on MCM frequencies
#' predict(mm, exact=TRUE) #fitted values based on best models
#'
#' predict(mm, newdata=1:41) #prediction based on MCMC frequencies
#'
#' predict(mm, newdata=datafls[1,], exact=TRUE) #prediction based on a data.frame
#'
#' # the following two are equivalent:
#' predict(mm, topmodels=1:10)
#' predict(mm[1:10], exact=TRUE)
#'
#'
#' @export
predict.bma <- function(object, newdata=NULL, exact=FALSE, topmodels=NULL, ...) {
# does basic fitting in expected values
# object: a bma object
# newdata: newdata to be supplied (just eas in predict.lm)
# topmodels: The index of the bestmodels to be included (e.g. 1 for best, 1:3 for three best); setting this parameter triggers exact=TRUE
# exact: TRUE if fit should be based on exact likelihood of best models, FALSE if should be based on MCMC freqs
# output: a vector with fitted values
if (!is.bma(object)) {stop("you need to provide a BMA object"); return()}
# check the topmodels argument
if (!is.null(topmodels)) {
if (!(is.numeric(topmodels)&&is.vector(topmodels))) {
stop("topmodels must denote the models to take into account, e.g. 1:5 for the best five.")
} else if (object$topmod$nbmodels < max(topmodels)) {
stop(paste("Only",object$topmod$nbmodels,"best models are available, but you asked to take the", max(topmodels), "-best model into account."))
}
object=object[unique(topmodels)]
}
if ((!missing(topmodels))&&missing(exact)) exact=TRUE
#get the betas as required
betas=estimates.bma(object,exact=exact,order.by.pip=FALSE,include.constant=FALSE,std.coefs=FALSE,condi.coef=FALSE)[,2]
#check the newdata argument
if (is.null(newdata)) {
newX<-as.matrix(object$arguments$X.data[,-1, drop=FALSE])
} else {
newX=as.matrix(newdata)
if (!is.numeric(newX)) stop("newdata must be numeric!")
if (is.vector(newdata)) newX=matrix(newdata,1)
if (ncol(newX)!=length(betas)) {
if (ncol(newX)==length(betas)+1) {
newX=newX[,-1,drop=FALSE] # this is to achieve a bevavior similar to predict.lm in this case
} else {
stop("newdata must be a matrix or data.frame with", length(betas), "columns.")
}
}
orinames=colnames(object$arguments$X.data[,-1, drop=FALSE])
if (!is.null(colnames(newX))&& !is.null(orinames)) { #this is a user check whether columns had been submitted in the wrong order
if (all(orinames %in% colnames(newX) ) && !all(orinames == colnames(newX)) ) {
warning("argument newdata had to be reordered according to its column names. Consider submitting the columns of newdata in the right order.")
newX=newX[,orinames, drop=FALSE]
}
}
}
cons=.post.constant(object$arguments$X.data,betas)
return(as.vector(newX%*%betas)+cons)
}
#' OLS Statistics for the Full Model Including All Potential Covariates
#'
#' A utility function for reference: Returns a list with R2 and sum of squares
#' for the OLS model encompassing all potential covariates that are included in
#' a bma object.
#'
#'
#' @param yX.data a bma object (cf. \code{\link{bms}}) - alternatively a
#' \link{data.frame} or \link{matrix} whose first column is the dependent
#' variable
#' @return Returns a list with some basic OLS statistics \item{R2}{The
#' R-squared of the full model} \item{ymy}{The sum of squares of residuals of
#' the full model} \item{ypy}{The explained sum of squares of the full model}
#' \item{yty}{The sum of squares of the (demeaned) dependent variable}
#' \item{Fstat}{The F-statistic of the full model}
#' @note This function is just for quick comparison; for proper OLS estimation
#' consider \code{\link{lm}}
#' @seealso \code{\link{bms}} for creating bma objects, \code{\link{lm}} for
#' OLS estimation
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords utilities
#' @examples
#'
#' data(datafls)
#' mm=bms(datafls)
#'
#' fullmodel.ssq(mm)
#'
#' #equivalent:
#' fullmodel.ssq(datafls)
#'
#'
#' @export
fullmodel.ssq <- function(yX.data) {
# yX.data: a dataframe
# returns the OLS sums of sqares for yX.data, where the first column is the dependent:
# R2: r-squared; ymy: resid SS; ypy: explained SS; yty: (y-ymean)'(y-ymean)
if (is.bma(yX.data)) {yX.data <- yX.data$arguments$X.data}
y<-as.matrix(yX.data[,1])
X<-as.matrix(yX.data[,2:ncol(yX.data)])
N<-nrow(X)
K=ncol(X)
y.mean=mean(y)
y<-y-matrix(y.mean,N,1,byrow=TRUE)
X.mean=colMeans(X)
X<-X-matrix(X.mean,N,K,byrow=TRUE)
Xqr<-qr(X)
yty=as.numeric(crossprod(y))
ymy=as.numeric(crossprod(qr.resid(Xqr,y)))
ypy=as.numeric(crossprod( qr.fitted(Xqr,y)))
R2=ypy/yty
return(list(R2=R2,ymy=ymy,ypy=ypy,yty=yty,Fstat=(R2/(1-R2))*(N-K-1)/K))
}
#' @export
print.bma <- function(x,...) {
#defines how to print a bmao object (e.g. to the console)
if (!is.bma(x)) {return(print(x))}
print(estimates.bma(x),include.constant=TRUE,...)
cat("\n")
print(info.bma(x),...)
cat("\n")
}
#' Summary Statistics for a 'bma' Object
#'
#' Returns a vector with summary statistics for a 'bma' object
#'
#' \code{info.bma} is equivalent to \code{summary.bma}, its argument
#' \code{bmao} conforms to the argument \code{object}
#'
#' @aliases summary.bma info.bma
#' @param object a list/object of class 'bma' that typically results from the
#' function \code{bms} (see \code{\link{bms}} for details)
#' @param \dots further arguments passed to or from other methods
#' @param bmao same as \code{object}
#' @return A character vector summarizing the results of a call to \code{bms}
#' \item{Mean no. of Regressors}{ the posterior mean of model size}
#' \item{Draws}{the number of iterations (ex burn-ins)} \item{Burnins}{the
#' number of burn-in iterations} \item{Time}{the time spent on iterating
#' through the model space} \item{No. of models visited}{the number of times a
#' model was accepted (including burn-ins)} \item{Modelspace }{the total model
#' space \eqn{2^K}}\item{list(list("2^K"))}{the total model space \eqn{2^K}}
#' \item{Percentage visited}{\code{No. of models visited/Modelspace*100}}
#' \item{Percentage Topmodels}{number of times the best models were drawn in
#' percent of \code{Draws}} \item{Corr. PMP}{the correlation between the MCMC
#' frequencies of the best models (the number of times they were drawn) and
#' their marginal likelihoods.} \item{No. Obs.}{Number of observations}
#' \item{Model Prior}{a character conforming to the argument \code{mprior} of
#' \code{bms}, and the expected prior model size} \item{g-prior}{a character
#' corresponding to argument \code{g} of function \code{bms}}
#' \item{Shrinkage-Stats}{Posterior expected value und standard deviation (if
#' applicable) of the shrinkage factor. Only included if argument
#' \code{g.stats} of function \code{bms} was set to TRUE}
#' @note All of the above statistics can also be directly extracted from the
#' bma object (\code{bmao}). Therefore \code{summary.bma} only returns a
#' character vector.
#' @seealso \code{\link{bms}} and \code{\link{c.bma}} for functions creating
#' bma objects, \code{print.bma} makes use of \code{summary.bma}.
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords utilities
#' @examples
#'
#' data(datafls)
#'
#' m_fixed=bms(datafls,burn=1000,iter=3000,user.int=FALSE, )
#' summary(m_fixed)
#'
#' m_ebl=bms(datafls,burn=1000,iter=3000,user.int=FALSE, g="EBL",g.stats=TRUE)
#' info.bma(m_ebl)
#'
#' @export
summary.bma <-function(object,...) {
#just an alias for info.bma
info.bma(object)
}
#' Posterior Inclusion Probabilities and Coefficients from a 'bma' Object
#'
#' Returns a matrix with aggregate covariate-specific Bayesian model Averaging:
#' posterior inclusion probabilites (PIP), post. expected values and standard
#' deviations of coefficients, as well as sign probabilites
#'
#' More on the argument \code{exact}: \cr In case the argument
#' \code{exact=TRUE}, the PIPs, coefficient statistics and conditional sign
#' probabilities are computed on the basis of the (500) best models the
#' sampling chain encountered (cf. argument \code{nmodel} in
#' \code{\link{bms}}). Here, the weights for Bayesian model averaging (BMA) are
#' the posterior marginal likelihoods of these best models. \cr In case
#' \code{exact=FALSE}, then these statistics are based on all accepted models
#' (except burn-ins): If \code{mcmc="enumerate"} then this are simply all
#' models of the traversed model space, with their marginal likelihoods
#' providing the weights for BMA.\cr If, however, the bma object \code{bmao}
#' was based on an MCMC sampler (e.g. when \code{\link{bms}} argument
#' \code{mcmc="bd"}), then BMA statistics are computed differently: In contrast
#' to above, the weights for BMA are MCMC frequencies, i.e. how often the
#' respective models were encountered by the MCMC sampler. (cf. a comparison of
#' MCMC frequencies and marginal likelihoods for the best models via the
#' function \code{\link{pmp.bma}}).
#'
#' @aliases estimates.bma coef.bma
#' @param object,bmao a 'bma' object (cf. \code{\link{bms}})
#' @param exact if \code{exact=FALSE}, then PIPs, coefficients, etc. will be
#' based on aggregate information from the sampling chain with posterior model
#' distributions based on MCMC frequencies (except in case of enumeration - cf.
#' 'Details');\cr if \code{exact=TRUE}, estimates will be based on the
#' \code{\link[=bms]{nmodel}} best models encountered by the sampling chain,
#' with the posterior model distribution based on their \emph{exact} marginal
#' likelihoods - cf. 'Details' below.
#' @param order.by.pip \code{order.by.pip=TRUE} orders the resulting matrix
#' according to posterior inclusion probabilites, \code{order.by.pip=FALSE}
#' ranks them according to the original data (order of the covariates as in
#' provided in \code{X.data} to \code{\link{bms}}), default \code{TRUE}
#' @param include.constant If \code{include.constant=TRUE} then the resulting
#' matrix includes the expected value of the constant in its last row. Default
#' \code{FALSE}
#' @param incl.possign If \code{incl.possign=FALSE}, then the sign probabilites
#' column (cf. 'Values' below) is omitted from the result. Default \code{TRUE}
#' @param std.coefs If \code{std.coefs=TRUE} then the expected values and
#' standard deviations are returned in standardized form, i.e. as if the
#' original data all had mean zero and variance 1. If \code{std.coefs=FALSE}
#' (default) then both expected values and standard deviations are returned 'as
#' is'.
#' @param condi.coef If \code{condi.coef=FALSE} (default) then coefficients
#' \eqn{\beta_i} and standard deviations are unconditional posterior expected
#' values, as in standard model averaging; if \code{condi.coef=FALSE} then they
#' are given as conditional on inclusion (equivalent to \eqn{\beta_i / PIP_i}).
#' @param ... further arguments for other \code{\link{coef}} methods
#' @return A matrix with five columns (or four if \code{incl.possign=FALSE})
#' \item{Column 'PIP'}{Posterior inclusion probabilities \eqn{\sum p(\gamma|i
#' \in \gamma, Y) / sum p(\gamma|Y) }} \item{Column 'Post Mean'}{posterior
#' expected value of coefficients, unconditional \eqn{E(\beta|Y)=\sum
#' p(\gamma|Y) E(\beta|\gamma,Y)}, where \eqn{E(\beta_i|\gamma,i \notin \gamma,
#' Y)=0} if \code{condi.coef=FALSE}, or conditional on inclusion
#' (\eqn{E(\beta|Y) / \sum p(\gamma|Y, i \in \gamma) } ) if
#' \code{condi.coef=TRUE}} \item{Column 'Post SD'}{posterior standard deviation
#' of coefficients, unconditional or conditional on inclusion, depending on
#' \code{condi.coef}} \item{Column 'Cond.Pos.Sign'}{The ratio of how often the
#' coefficients' expected values were positive conditional on inclusion. (over
#' all visited models in case \code{exact=FALSE}, over the best models in case
#' \code{exact=TRUE})} \item{Column 'Idx'}{the original order of covariates as
#' the were used for sampling. (if included, the constant has index 0)}
#'
#' @seealso \code{\link{bms}} for creating bma objects, \code{\link{pmp.bma}}
#' for comparing MCMC frequencies and marginal likelihoods.
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords models
#' @examples
#'
#' #sample, with keeping the best 200 models:
#' data(datafls)
#' mm=bms(datafls,burn=1000,iter=5000,nmodel=200)
#'
#' #standard BMA PIPs and coefficients from the MCMC sampling chain, based on
#' # ...how frequently the models were drawn
#' coef(mm)
#'
#' #standardized coefficients, ordered by index
#' coef(mm,std.coefs=TRUE,order.by.pip=FALSE)
#'
#' #coefficients conditional on inclusion:
#' coef(mm,condi.coef=TRUE)
#'
#' #same as
#' ests=coef(mm,condi.coef=FALSE)
#' ests[,2]/ests[,1]
#'
#' #PIPs, coefficients, and signs based on the best 200 models
#' estimates.bma(mm,exact=TRUE)
#'
#' #... and based on the 50 best models
#' coef(mm[1:50],exact=TRUE)
#'
#'
#' @method coef bma
#' @export
coef.bma <-function(object,exact = FALSE, order.by.pip = TRUE, include.constant = FALSE,
incl.possign = TRUE, std.coefs = FALSE, condi.coef = FALSE, ...) {
#just an alias for estimates.bma
estimates.bma(object, exact=exact, order.by.pip = order.by.pip, include.constant = include.constant,
incl.possign = incl.possign, std.coefs = std.coefs, condi.coef = condi.coef)
}
#' Tests for a 'bma' Object
#'
#' tests for objects of class "bma"
#'
#'
#' @param bmao a 'bma' object: see 'value'
#' @return Returns \code{TRUE} if bmao is of class 'bma', \code{FALSE}
#' otherwise.
#'
#' @seealso 'Output' in \code{\link{bms}} for the structure of a 'bma' object
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords classes
#' @examples
#'
#' data(datafls)
#' mm=bms(datafls,burn=1000, iter=4000)
#' is.bma(mm)
#' @export
is.bma <-function(bmao) {
#returns true if the class of the object is either bma or a related class
if (any(is.element(class(bmao),c("bma","bma.fcast","bma.sar","oldbma","bmav0")))) return(TRUE) else return(FALSE)
}
#' @export
is.topmod <-function(tmo) {
#returns true if the class of the object is a "topmod" list
if (is.element("topmod",class(tmo))) return(TRUE) else return(FALSE)
}
#' Gaussian Hypergeometric Function F(a,b,c,z)
#'
#' Computes the value of a Gaussian hypergeometric function \eqn{ F(a,b,c,z) }
#' for \eqn{-1 \leq z \leq 1} and \eqn{a,b,c \geq 0}
#'
#' The function \code{f21hyper} complements the analysis of the 'hyper-g prior'
#' introduced by Liang et al. (2008).\cr For parameter values, compare cf.
#' \url{https://en.wikipedia.org/wiki/Hypergeometric_function}.
#'
#' @param a The parameter \code{a} of the Gaussian hypergeometric function,
#' must be a positive scalar here
#' @param b The parameter \code{b} of the Gaussian hypergeometric function,
#' must be a positive scalar here
#' @param c The parameter \code{c} of the Gaussian hypergeometric function,
#' must be a positive scalar here
#' @param z The parameter \code{z} of the Gaussian hypergeometric function,
#' must be between -1 and 1 here
#' @return The value of the Gaussian hypergeometric function \eqn{ F(a,b,c,z) }
#' @note This function is a simple wrapper function of sped-up code that is
#' intended for sporadic application by the user; it is neither efficient nor
#' general; for a more general version cf. the package '\code{hypergeo}'
#'
#' @seealso package \code{hypergeo} for a more proficient implementation.
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @references Liang F., Paulo R., Molina G., Clyde M., Berger J.(2008):
#' Mixtures of g-priors for Bayesian variable selection. J. Am. Statist. Assoc.
#' 103, p. 410-423
#'
#' \url{https://en.wikipedia.org/wiki/Hypergeometric_function}
#' @keywords utilities
#' @examples
#'
#'
#' f21hyper(30,1,20,.8) #returns about 165.8197
#'
#' f21hyper(30,10,20,0) #returns one
#'
#' f21hyper(10,15,20,-0.1) # returns about 0.4872972
#' @export
f21hyper = function(a,b,c,z) {
if ((length(a)!=1)|(length(b)!=1)|(length(c)!=1)|(length(z)!=1)) stop("All function arguments need to be scalars")
if ((a<0)|(b<0)|(c<0)) stop("Arguments a, b, and c need to be non-negative")
if ((z>1)|(z<=(-1))) stop("Argument z needs to be between -1 and 1")
nmx=max(100,3*floor(((a+b)*z-c-1)/(1-z)))
if (nmx>10000) warning("Power series probably does not converge")
serie=0:nmx
return(1+sum(cumprod((a+serie)/(c+serie)*(b+serie)/(1+serie)*z)))
}
########################################################################
# Auxilary functions on the final bma object or its components #########
########################################################################
.post.constant <- function(X.data,Ebeta) {
# calculates E(constant|Y): X.data is dataframe, Ebeta a vector
Xmeans= colMeans(X.data)
cons=Xmeans[1]-crossprod(Ebeta,Xmeans[-1])
return(as.vector(cons))
}
.post.beta.draws <- function(topmods,reg.names,moment2=FALSE) {
# constructs a nice matrix of the betas of the best models stored in topmods
# topmods: topmod-object; reg.names: character vector (like colnames(X))
# moment2: TRUE: return betas2(), FALSE: return betas()
if(moment2) beta.draws=as.matrix(topmods$betas2()) else beta.draws=as.matrix(topmods$betas())
if(sum(beta.draws)==0){
stop("The tompod object provided does not have saved betas. cf. bbeta argument in function topmod")
}
if(nrow(beta.draws)!=length(reg.names)){
rownames(beta.draws)=c(reg.names,"W-Index")
}
else{
rownames(beta.draws)=c(reg.names)
}
beta.names=topmods$bool()
if(length(which(beta.names=="0"))>0){
colnames(beta.draws)=beta.names[-c(which(beta.names=="0"))]
} else {
colnames(beta.draws)=beta.names
}
return(beta.draws)
}
.post.topmod.includes <- function(topmods,reg.names) {
# constructs nice 0-1-matrix with ionclusion vectors for the best models stored in topmods
# topmods: topmodel object, reg.names: character-vector (like colnames(X))
topmod = topmods$bool_binary()
colnames(topmod)<-topmods$bool()
rownames(topmod)=reg.names
return(topmod)
}
.post.topmod.bma <- function(topmods,reg.names=numeric(0)) {
# constructs nice 0-1 matrix for regressors in model, with the last two rows
# being the analytical and MCMC PMPs
# topmods: either the topmodel object or a "bma" object
pmps = pmp.bma(topmods)
if (is.bma(topmods)) {
reg.names=topmods$reg.names; topmods=topmods$topmod
}
# constructs nice matrix combining the 0-1- includes matrix with info on PMP
rbind(.post.topmod.includes(topmods,reg.names),t(pmps))
}
#' Model Binaries and their Posterior model Probabilities
#'
#' Returns a matrix whose columns show which covariates were included in the best models in a 'bma' object. The last two columns detail posterior model probabilities.
#'
#' @param bmao an object of class 'bma' - see \code{\link{bma-class}}
#' @return Each column in the resulting matrix corresponds to one of the 'best' models in bmao: the first column for the best model, the second for the second-best model, etc.
#' The model binaries have elements 1 if the regressor given by the row name was included in the respective models, and 0 otherwise.
#' The second-last row shows the model's posterior model probability based on marginal likelihoods (i.e. its marginal likelihood over the sum of likelihoods of all best models)
#' The last row shows the model's posterior model probability based on MCMC frequencies (i.e. how often the model was accepted vs sum of acceptance of all models)
#' Note that the column names are hexcode representations of the model binaries (e.g. "03" for c(0,0,0,1,0,0))
#'
#'
#' @details
#' Each bma class (the result of bms) contains 'top models', the x models with tthe best
#' analytical likelihood that bms had encountered while sampling
#'
#' See \code{\link{pmp.bma}} for an explanation of likelihood vs. MCMC
#' frequency concepts
#'
#'
#'
#' @seealso \code{\link{topmod}} for creating topmod objects, \code{\link{bms}}
#' for their typical use, \code{\link{pmp.bma}} for comparing posterior model
#' probabilities
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @examples
#'
#' data(datafls)
#' #sample with a limited data set for demonstration
#' mm=bms(datafls[,1:12],nmodel=20)
#'
#' #show binaries for all
#' topmodels.bma(mm)
#'
#' #show binaries for 2nd and 3rd best model, without the model probs
#' topmodels.bma(mm[2:3])[1:11,]
#'
#' #access model binaries directly
#' mm$topmod$bool_binary()
#'
#' @export
topmodels.bma <-function(bmao) {# function alias
if (!is.bma(bmao)) {stop("you need to provide a bma object")}
return(.post.topmod.bma(bmao))
}
.post.estimates <- function(b1mo=NULL,b2mo=NULL,cumsumweights=NULL,inccount=NULL,topmods=NULL,X.data=NULL,reg.names=NULL,pos.sign=NULL,exact=FALSE,order.by.pip=TRUE,include.constant=FALSE,incl.possign=TRUE,std.coefs=FALSE,condi.coef=FALSE) {
# b1mo: (weighted) cumsum of first moment of beta
# b2mo: (weighted) cumsum of second moment of beta
# cumsumweights: sum of the weights/model probs - typically just the number of draws "iter"
# inccount: (weighted) number of inclusions per coefficient
# reg.names: character vector like colnames(X.data)
# topmods: topmodl object (best n models)
# exact: if True, then calcs posterior stats based on analytical PMPs (likelihoods) in topmods object; if False, then does it as a weighted average (typically with MCMC frequencies)
# order.by.pip: if true then the matrix is sorted according to the PIPs (first column)
# include.constant: if true, then add the constant in the last row of resultant matrix
idx=1:(length(b1mo))
if(exact){
#calculate with analytical PMPs from topmods object (best n models)
lt1=topmods$lik() - max(topmods$lik()) # do this to get only positive probabilities
exact.pmp=as.vector(exp(lt1)/sum(exp(lt1)))
pip=as.vector(topmods$bool_binary()%*%exact.pmp)
idx=1:(length(pip))
betas=topmods$betas()
betas2=topmods$betas2()
K=nrow(betas)
Eb1=tcrossprod(betas,t(exact.pmp))[,1] #multiply betas with corr. pmp's and sum up
Eb2=tcrossprod(betas2,t(exact.pmp))[,1]
Ebsd=sqrt(Eb2-Eb1^2)
possign=round(tcrossprod((betas>0),t(exact.pmp))[,1]/pip,8)
possign[is.nan(possign)]=NA
}
else{
# Posterior mean and stand dev of each coefficient'
# calculate by taking b1mo etc. as a weighted sum (typically MCMC draws), divided by the sum of the weights (typically number of runs - "iter")
pip=inccount/cumsumweights
Eb1 = b1mo/cumsumweights
Eb2 = b2mo/cumsumweights
Ebsd = sqrt(Eb2-Eb1^2)
possign=round(pos.sign/inccount,8)
possign[is.nan(possign)]=NA
}
if (include.constant) constterm=.post.constant(X.data,Eb1)
if (condi.coef) {
Eb1=Eb1/pip; Eb2=Eb2/pip; Ebsd=sqrt(Eb2-Eb1^2);
Eb1[is.nan(Eb1)]=0; Ebsd[is.nan(Ebsd)]=0;
}
if (std.coefs) {
#if standardized coefficients, then adjust them by SDs
#important that this is done after all other Eb1 and Ebsd computations
sddata=apply(as.matrix(X.data),2,stats::sd)
Eb1=Eb1/sddata[1]*sddata[-1]
Ebsd=Ebsd/sddata[1]*sddata[-1]
if (include.constant) constterm=constterm/sddata[1]
}
if (incl.possign) {
post.mean<-cbind(pip,Eb1,Ebsd,possign,idx)
rownames(post.mean)<-reg.names
colnames(post.mean)<-c("PIP","Post Mean", "Post SD","Cond.Pos.Sign","Idx")
} else {
post.mean<-cbind(pip,Eb1,Ebsd,idx)
#post.mean<-cbind(pip,Eb1,Ebsd)
rownames(post.mean)<-reg.names
colnames(post.mean)<-c("PIP","Post Mean", "Post SD","Idx")
#colnames(post.mean)<-c("PIP","Post Mean", "Post SD")
}
if (order.by.pip) {
post.mean<-post.mean[order(-post.mean[,1]),] #order the results table according to PIP
}
if (include.constant) {
constrow=matrix(c(1,constterm,NA,rep(NA,incl.possign),0),1)
rownames(constrow)="(Intercept)"
post.mean=rbind(post.mean,constrow)
}
return(post.mean)
}
#####################################################################################################
########################################################################
# functions on retrieving a FULL sys.call(0) ###########################
########################################################################
.construct.arglist <- function(funobj,envir=NULL) {
# evaluates all function arguments at envir (default: calling environment)
# (they might have changed during running the function, or may be in variables)
# construct.arglist gets rid of the variables and so on, and returns the argument list as a list
namedlist=formals(funobj)
argnames=names(namedlist)
if (!is.environment(envir)) envir=sys.frame(-1)
for (argn in 1:length(namedlist)) {
# the following test is to cater for non.existent arguments
testval=as.logical(try(exists(argnames[argn],envir=envir),silent=TRUE)); if (is.na(testval)) testval=FALSE
if (testval) {
namedlist[[argn]]=try(get(argnames[argn],envir=envir))
}
}
return(namedlist)
}
#####################################################################################################
########################################################################
# functions for combining bma objects ##################################
########################################################################
.top10=function(nmaxregressors=10,nbmodels=10,bbeta=FALSE,lengthfixedvec=0,bbeta2=FALSE,...,
inivec_lik=numeric(0),inivec_bool=character(0),inivec_count=numeric(0),inivec_vbeta=numeric(0), inivec_vbeta2=numeric(0),inivec_veck=0,inivec_fixvec=numeric(0)){
#object used by bms to save the best models
#set up the variables to be augmented in the process
#use .top10(...) to initalise this object
# top-level INPUT arguments:
# nmaxregressors: maximum number of regreswsors possible in the models (ncol(X.data)-1) for bms()
# nmodels: integer >=0, maximum number of models to store
# bbeta: logical whether additionally, the models` beta coefficents should be stored as well
# lengthfixedvec: integer >=0; a vector of fixed length =lengthfixedvec will be stored for each model (e.g. posterior moments of g, etc.);
# if lengthfixedvec=0, nothing will be stored
# inivec_...: for the advanced user: vectors to initialize the top10 object in case one wants to 'add' several models right at the beginning:
# inivec_lik: vector of likelihoods (length: nb of models) corresponds to: lik()
# inivec_bool: vector of model binaries in hexcode (length: nb of models); althernatively, a numeric vector of 1s and 0s (length: nmaxregressors)
# correspoinds to: bool()
# inivec_count: vector of ncounts (length: nb of models), corresponds to: ncount()
# inivec_vbeta: vector of betas (stripped of zeroes), corresponds to betas_raw()
# inivec_veck: vector of ks (length: nb of models), i.e. the number of coefs per model, corresponds to: kvec_raw()
# inivec_fixedvec: vector of fixed vectors (length: nb of models times lengthfixedvec)
# OUTPUT: list with follwoing elements
# addmodel(mylik,vec01,vbeta=numeric(0),fixedvec=numeric(0)): Function that adds a model to be stored in the object.
# if the model is already there, only the model counter will be incremented: Reffeerence: see below
# lik(), bool(), ncount(), nbmodels, nregs, betas_raw(), kvec_raw(), bool_binary(), betas(), fixed_vector():
# these are final output functions, for a reference, refer to these directly
findex=function() {
seq_incl=seq_len(nbmodel)
if (nbmodel==nbmodels) {
seq_incl[indices]=seq_incl
} else {
truncindex=indices; truncindex[(nbmodel+1):nbmodels]=0L
seq_incl[truncindex]=seq_incl
}
return(seq_incl)
}
betamat = function(top10_betavec) { # return a matrix: rows: betas per model (including zeros); columns: model
bins=(sapply(as.list(top10_bool[findex()]),hexobject$as.binvec))
betamatx=matrix(0,nmaxregressors,nbmodel)
if (length(top10_betavec)>0) {betamatx[which(bins==1)]=top10_betavec} else betamatx=betamatx[,1]
return(betamatx)
}
# sortall = function() {
# if (!is_sorted) {
# if (nbmodel<nbmodels) {
# inclindex=seq_len(nbmodel)
# indices <<- indices[inclindex];
# top10_lik <<- top10_lik[inclindex];
# top10_count <<- top10_count[inclindex];
# top10_bool <<- top10_bool[inclindex];
# top10_fixedvec <<- top10_count[seq_len(lengthfixedvec*nbmodel)];
# }
# top10_lik[indices]<<-top10_lik;
# top10_count[indices]<<-top10_count;
# top10_count[indices]<<-top10_count;
#
# if (lengthfixedvec>0) { top10_fixvec <<- as.vector(matrix(lastm_fixvec,lengthfixedvec)[,order_index]) }
# last_visited <<- c(lastnewmodel:1,length(lastm_lik):(lastnewmodel+1))[orderindex]
# is_sorted <<- TRUE
# }
# }
hexobject<-.hexcode.binvec.convert(nmaxregressors) #initialize object for hexcode to logical vector conversion
if (nbmodels<0) {nbmodels=0}
#declare needed objects in full length now to optimize memory allocation later
indices=integer(nbmodels); top10_lik=rep(-Inf,nbmodels)
top10_bool=character(nbmodels); top10_count=integer(nbmodels)
top10_fixvec=numeric(lengthfixedvec*nbmodels)
if (bbeta) lbetas=vector("list",nbmodels)
if (bbeta2) lbetas2=vector("list",nbmodels)
seq_nbmodel=seq_len(nbmodels); ix_of_mybool=logical(nbmodels)
#is_sorted = FALSE
# read in initial data
nbmodel = length(inivec_lik)
top10_lik[seq_len(nbmodel)]=inivec_lik; top10_count[seq_len(nbmodel)]=inivec_count;
#read in initial binaries: character, or a list of vectors, a single vectors or a matrix, whose columns are the binaries
if (is.character(inivec_bool)) {top10_bool[seq_len(nbmodel)]=inivec_bool} else {
if (is.vector(inivec_bool)&(length(inivec_bool)==nmaxregressors)) {
top10_bool[seq_len(nbmodel)]=hexobject$as.hexcode(inivec_bool)
} else if (is.list(inivec_bool)) {
top10_bool[seq_len(nbmodel)]=sapply(inivec_bool, hexobject$as.hexcode)
} else if (is.matrix(inivec_bool)) {
top10_bool[seq_len(nbmodel)]=sapply(as.list(as.data.frame(inivec_bool)), hexobject$as.hexcode)
} else stop("inivec_bool is wrong format!")
}
top10_fixvec=inivec_fixvec;
if (is.na(inivec_veck[1])) {inivec_veck=0}
#read in initial beta information
if (bbeta|bbeta2) {
veck_ix=c(0,cumsum(inivec_veck))
veckix_aux=as.list(seq_len(nbmodel)); veckix_aux=lapply(veckix_aux,function(x) { if (veck_ix[[x]]==veck_ix[[x+1]]) c(0,0) else c(veck_ix[[x]]+1,veck_ix[[x+1]]) } )
}
if (bbeta) { lbetas[seq_len(nbmodel)]=lapply(veckix_aux,function(x) inivec_vbeta[x[[1]]:x[[2]]]) } else lbetas=list(numeric(0))
if (bbeta2) { lbetas2[seq_len(nbmodel)]=lapply(veckix_aux,function(x) inivec_vbeta2[x[[1]]:x[[2]]]) } else lbetas2=list(numeric(0))
lastvec01=integer(nmaxregressors);
modidx=length(top10_lik);
indices[seq_len(nbmodel)]=order(inivec_lik,decreasing=TRUE)
min.index = which.max(indices)
if (length(min.index)>0) {
min.top10_lik=top10_lik[[min.index]]
} else {
if (nbmodels>0) min.top10_lik=-Inf else min.top10_lik=Inf
}
index.of.mybool = function(mybool) {
ix_of_mybool <<- (mybool==top10_bool)
}
check4dupl = index.of.mybool; dupl.possible=TRUE;
retlist=list(
addmodel=function(mylik,vec01,vbeta=numeric(0),vbeta2=numeric(0),fixedvec=numeric(0)) {
#mylik: scalar likelihood, vec01: numeric vector of 0s and 1s, vbeta: small vector of betas (if bbeta was set to TRUE) that does NOT contain restricted zeros, vbeta2: small vector of betas^2 (is assumed to have same length as bvbeta)
#use this function to add a model:
#if its already among the best "nbmodels" models, its counter will be incremented by one
#if it is not already in the best "nbmodels" models though its likelihood justifies that, the model will be added to the list
if (mylik>=min.top10_lik|nbmodel<nbmodels) {
#look whether the model is 'better' than the least model in the list
if (identical(lastvec01,vec01)) {
#if the model is the same as the model before, just adjust the counter
top10_count[[modidx]]<<-top10_count[[modidx]]+1
} else {
#look whether the model is already in the bestof list
lastvec01<<-vec01
mybool=hexobject$as.hexcode(vec01)
check4dupl(mybool)
if (!any(ix_of_mybool)) {
#the model is not yet contained in the bestof list, but should be in there -> add model to list
if (nbmodel<nbmodels) {
nbmodel <<- nbmodel+1
modidx <<- nbmodel
} else {
modidx<<-min.index;
}
ltmylik=(top10_lik<=mylik) #adjusted on 2011-04-19, in order to cope with mylik=-Inf
indices <<- indices + ltmylik
indices[[modidx]] <<- nbmodels-sum(ltmylik)+1
top10_lik[[modidx]] <<- mylik
top10_bool[[modidx]] <<- mybool
top10_count[[modidx]] <<- 1
min.index <<- which.max(indices)
min.top10_lik <<- top10_lik[[min.index]]
if (lengthfixedvec>0) {
top10_fixvec[(modidx-1)*lengthfixedvec+seq_len(lengthfixedvec)] <<- fixedvec
}
if (bbeta) { lbetas[[modidx]] <<- vbeta }
if (bbeta2) { lbetas2[[modidx]] <<- vbeta2 }
} else {
#the model is already contained in the bestof list -> just adjust counter
modidx <<- seq_nbmodel[ix_of_mybool]
top10_count[[modidx]]<<-top10_count[[modidx]]+1
}
}
}
},
lik = function(){return(top10_lik[findex()])}, #return a vector of the best "nbmodels" likelihoods
bool = function(){return(top10_bool[findex()])}, #return a vector of the best "nbmodels" codes as hexadecimal (e.g. model c(0,1,0,0) as "4")
ncount = function(){return(top10_count[findex()])}, #return a vector of how each of the best models was chosen
#counters = function(){return(c(tcalls,inlik,added,maintained))}, # for programming checks
nbmodels = nbmodels, # return the maximum number of best mdoels saved in this object
nregs = nmaxregressors, # return K, the maximum number of regressors overall
betas_raw = function(){return(unlist(lbetas[findex()]))}, # return the vector of beta coefs. of the best models in one line without the zeros
betas2_raw = function(){return(unlist(lbetas2[findex()]))}, # return the vector of beta^2 coefs. of the best models in one line without the zeros
kvec_raw = function(){return(sapply(lbetas,length)[findex()])}, #return a vector that details how many coefs. each of the best models has
bool_binary = function(){return(sapply(as.list(top10_bool[findex()]),hexobject$as.binvec))}, #return a matrix: each column: one of the best models; rows: logical of coeficcient inclusion
betas = function() {
betamat(unlist(lbetas[findex()]))
},
betas2 = function() {
betamat(unlist(lbetas2[findex()]))
},
fixed_vector = function(){
if (lengthfixedvec<=0) {return(matrix(0,0,0))} else
{
findex_base=(findex()-1)*lengthfixedvec
findex_fixvec=numeric(0)
for (xx in 1:lengthfixedvec) findex_fixvec=rbind(findex_fixvec,findex_base+xx)
return(matrix(top10_fixvec[c(findex_fixvec)],lengthfixedvec))
}
}, # return the fixed vector that may contain additional statistics
duplicates_possible = function(possible=NULL) {
if (!is.logical(possible)) return(dupl.possible)
if (possible) {
check4dupl <<- index.of.mybool; dupl.possible <<- TRUE; ix_of_mybool <<- logical(nbmodels)
} else {
check4dupl <<- function(mybool){}; dupl.possible <<- FALSE; ix_of_mybool <<- FALSE
}
}
)
class(retlist)="topmod"
return(retlist)
}
############################################################
#auxiliary functions for topmod object
#' @export
"[.topmod" <- function(tm,idx) {
# this function (applied as topmod[idx] ) provides a topmodel object with only the models indicated by idx
# e.g. topmod[1] contains only the best model, topmod[-(90:100)] eliminates the models ranked 90 to 100
if (any(is.na(suppressWarnings(as.integer(idx))))) idx=1:length(tm$lik())
if (length(tm$betas_raw())>1) {
bbeta=TRUE
bet=as.vector(tm$betas()[,idx])
bet=bet[bet!=0]
} else {
bbeta=FALSE
bet=numeric(0)
}
if (length(tm$betas2_raw())>1) {
bbeta2=TRUE
bet2=as.vector(tm$betas2()[,idx])
bet2=bet2[bet2!=0]
} else {
bbeta2=FALSE
bet2=numeric(0)
}
fixvec=tm$fixed_vector()
if (!length(as.vector(fixvec))) fixvec=numeric(0) else fixvec=as.vector(t(fixvec[,idx]))
.top10(nmaxregressors=tm$nregs,nbmodels=tm$nbmodels,bbeta=bbeta,lengthfixedvec=nrow(tm$fixed_vector()),bbeta2=bbeta2,
inivec_lik=tm$lik()[idx],inivec_bool=tm$bool()[idx],inivec_count=tm$ncount()[idx],inivec_vbeta=bet,
inivec_vbeta2=bet2,inivec_veck=tm$kvec_raw()[idx],inivec_fixvec=fixvec)
}
#' @export
"[.bma" <- function(bmao,idx) {
# bma[idx] should have the same effect as applying the index to the topmod, for convenience
bmao$topmod <- bmao$topmod[idx]
return(bmao)
}
#' Printing topmod Objects
#'
#' Print method for objects of class 'topmod', typically the best models stored
#' in a 'bma' object
#'
#' See \code{\link{pmp.bma}} for an explanation of likelihood vs. MCMC
#' frequency concepts
#'
#' @param x an object of class 'topmod' - see \code{\link{topmod}}
#' @param \dots additional arguments passed to \code{link{print}}
#' @return if \code{x} contains more than one model, then the function returns
#' a 2-column matrix: \item{Row Names}{show the model binaries in hexcode
#' } \item{Column 'Marg.Log.Lik'}{shows the
#' marginal log-likelihoods of the models in \code{x}} \item{Column 'MCMC
#' Freq'}{shows the MCMC frequencies of the models in \code{x}}
#'
#' if \code{x} contains only one model, then more detailed information is shown
#' for this model: \item{first line}{'Model Index' provides the model binary in
#' hexcode, 'Marg.Log.Lik' its marginal log likelhood, 'Sampled Freq.' how
#' often it was accepted (function \code{ncount()} in \code{\link{topmod}})}
#' \item{Estimates}{first column: covariate indices included in the model,
#' second column: posterior expected value of the coefficients, third column:
#' their posterior standard deviations (excluded if no coefficients were stored
#' in the topmod object - cf. argument \code{bbeta} in \code{\link{topmod}}) }
#' \item{Included Covariates}{the model binary} \item{Additional
#' Statistics}{any custom additional statistics saved with the model}
#'
#' @seealso \code{\link{topmod}} for creating topmod objects, \code{\link{bms}}
#' for their typical use, \code{\link{pmp.bma}} for comparing posterior model
#' probabilities
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords print
#' @examples
#'
#' # do some small-scale BMA for demonstration
#' data(datafls)
#' mm=bms(datafls[,1:10],nmodel=20)
#'
#' #print info on the best 20 models
#' print(mm$topmod)
#' print(mm$topmod,digits=10)
#'
#' #equivalent:
#' cbind(mm$topmod$lik(),mm$topmod$ncount())
#'
#'
#'
#' #now print info only for the second-best model:
#' print(mm$topmod[2])
#'
#' #compare 'Included Covariates' to:
#' topmodels.bma(mm[2])
#'
#' #and to
#' as.vector(mm$topmod[2]$bool_binary())
#'
#'
#' @export
print.topmod <- function(x,...) {
#this function prints the matrix of logliks and MCMC frequencies
#if topmod contains only one model, more detiled infomration is given
# try e.g. topmod, or topmod[1:3] or topmod[1]
tm=x
if (length(tm$lik())==1) {
infomat=c(tm$bool(), tm$lik(),tm$ncount())
names(infomat)=c("Model Index","Marg.Log.Lik.","Sampled Freq.")
print(infomat)
betamat=cbind(as.vector(tm$betas_raw()),sqrt(as.vector(tm$betas2_raw())-as.vector(tm$betas_raw())^2))
if (nrow(betamat)!=0) {
if (ncol(betamat)==1) {colnames(betamat)="Coef."} else {colnames(betamat)=c("Coef.","Std.Dev.")}
rownames(betamat)=which(as.logical(as.vector(tm$bool_binary())))
cat("\nEstimates:\n")
print(betamat)
}
bin=as.vector(tm$bool_binary())
names(bin)=1:length(bin)
cat("\nIncluded Covariates:\n")
print(bin)
cat("\nAdditional Statistics:\n")
print(as.vector(tm$fixed_vector()))
} else {
mout=cbind(tm$lik(),tm$ncount())
colnames(mout)=c("Marg.Log.Lik", "MCMC Freq")
rownames(mout)=tm$bool()
print(mout,...)
}
}
#' Topmodel Object
#'
#' Create or use an updateable list keeping the best x models it encounters
#' (for advanced users)
#'
#' A 'topmod' object (as created by \code{topmod}) holds three basic vectors:
#' \code{lik} (for the (log) likelihood of models or similar), \code{bool()}
#' for a hexcode presentation of the model binaries (cf. \code{\link{bin2hex}})
#' and ncount() for the times the models have been drawn.\cr All these vectors
#' are sorted descendantly by \code{lik}, and are of the same length. The
#' maximum length is limited by the argument \code{nbmodels}.
#'
#' If \code{tmo} is a topmod object, then a call to \code{tmo$addmodel} (e.g.
#' \code{tmo$addmodel(mylik=4,vec01=c(T,F,F,T))} updates the object \code{tmo}
#' by a model represented by \code{vec01} (here the one including the first and
#' fourth regressor) and the marginal (log) likelihood \code{lik} (here: 4).
#'
#' If this model is already part of \code{tmo}, then its respective
#' \code{ncount} entry is incremented by one; else it is inserted into a
#' position according to the ranking of \code{lik}.
#'
#' In addition, there is the possibility to save (the first moments of)
#' coefficients of a model (\code{betas}) and their second moments
#' (\code{betas2}), as well as an arbitrary vector of statistics per model
#' (\code{fixed_vector}).
#'
#' \code{is.topmod} returns \code{TRUE} if the argument is of class 'topmod'
#'
#' @aliases topmod is.topmod
#' @param nbmodels The maximum number of models to be retained by the topmod
#' object
#' @param nmaxregressors The maximum number of covariates the models in the
#' topmod object are allowed to have
#' @param bbeta if \code{bbeta=TRUE}, then first and second moments of model
#' coefficients are stored in addition to basic model statistics (Note: if
#' \code{bbeta<0} then only the first moments are saved)
#' @param lengthfixedvec The length of an optional fixed vector adhering to
#' each model (for instance R-squared, etc). If \code{lengthfixedvec=0} then no
#' additonal fixed vector will be stored.
#' @param liks optional vector of log-likelihoods to initialize topmod object
#' with (length must be \code{<=nbmodels}) - see example below
#' @param ncounts optional vector of MCMC frequencies to initialize topmod
#' object with (same length as \code{liks}) - see example below
#' @param modelbinaries optional matrix whose columns detail model binaries to
#' initialize topmod object with (same nb columns as \code{liks}, nb rows as
#' \code{nmaxregressors}) - see example below
#' @param betas optional matrix whose columns are coefficients to initialize
#' topmod object with (same dimensions as \code{modelbinaries}) - see example
#' below
#' @param betas2 optional matrix whose columns are coefficients' second moments
#' to initialize topmod object with (same dimensions as \code{modelbinaries}) -
#' see example below
#' @param fixed_vector optional matrix whose columns are a fixed vector
#' initialize topmod object with (same \code{ncol} as \code{modelbinaries}) -
#' see example below
#' @return a call to \code{topmod} returns a list of class "topmod" with the
#' following elements:
#' \item{addmodel(mylik,vec01,vbeta=numeric(0),vbeta2=numeric(0),fixedvec=numeric(0))}{function
#' that adjusts the list of models in the 'topmod' object (see Details).
#' \code{mylik} is the basic selection criterion (usually log likelihood),
#' \code{vec01} is the model binary (logical or numeric) indicating which
#' regressors are included.\cr \code{vbeta} is a vector of length equal to
#' \code{sum(vec01)}, contianing only the non-zero coefficients (only accounted
#' for if \code{bbeta!=FALSE}). \code{vbeta2} is a similar vector of second
#' moments etc. (only accounted for if \code{bbeta=TRUE}); \code{fixedvec} is
#' an arbitrary vector of length \code{lengthfixedvec} (see above)}
#' \item{lik()}{A numeric vector of the best models (log) likelihoods, in
#' decreasing order} \item{bool()}{A character vector of hexmode expressions
#' for the model binaries (cf. \code{\link{bin2hex}}), sorted by \code{lik()} }
#' \item{ncount()}{A numeric vector of MCMC frequencies for the best models
#' (i.e. how often the respective model was introduced by \code{addmodel})}
#' \item{nbmodels}{Returns the argument \code{nbmodel}} \item{nregs}{Returns
#' the argument \code{nmaxregressors}} \item{bool_binary()}{Returns a matrix
#' whose columns present the models conforming to \code{lik()} in binary form}
#' \item{betas()}{a matrix whose columns are the coefficients conforming to
#' \code{bool_binary()} (Note that these include zero coefficients due to
#' non-inclusion of covariates); Note: if \code{bbeta=FALSE} this returns an
#' empty matrix} \item{betas2()}{similar to \code{betas} , for the second
#' moments of coefficients Note: if \code{bbeta<=0}, this returns an empty
#' matrix} \item{fixed_vector()}{The columns of this matrix return the
#' \code{fixed_vector} statistics conforming to \code{lik()} (see Details);
#' Note: if \code{lengthfixedvec=0} this returns an empty matrix}
#' @note \code{topmod} is rather intended as a building block for programming;
#' it has no direct application for a user of the BMS package.
#'
#' @seealso the object resulting from \code{\link{bms}} includes an element of
#' class 'topmod'
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords models
#' @examples
#'
#' #standard use
#' tm= topmod(2,4,TRUE,0) #should keep a maximum two models
#' tm$addmodel(-2.3,c(1,1,1,1),1:4,5:8) #update with some model
#' tm$addmodel(-2.2,c(0,1,1,1),1:3,5:7) #add another model
#' tm$addmodel(-2.2,c(0,1,1,1),1:3,5:7) #add it again -> adjust ncount
#' tm$addmodel(-2.5,c(1,0,0,1),1:2,5:6) #add another model
#'
#' #read out
#' tm$lik()
#' tm$ncount()
#' tm$bool_binary()
#' tm$betas()
#'
#' is.topmod(tm)
#'
#' #extract a topmod oobject only containing the second best model
#' tm2=tm[2]
#'
#'
#'
#' #advanced: should return the same result as
#' #initialize
#' tm2= topmod(2,4,TRUE,0, liks = c(-2.2,-2.3), ncounts = c(2,1),
#' modelbinaries = cbind(c(0,1,1,1),c(1,1,1,1)), betas = cbind(0:3,1:4),
#' betas2 = cbind(c(0,5:7),5:8))
#'
#' #update
#' tm$addmodel(-2.5,c(1,0,0,1),1:2,5:6) #add another model
#'
#' #read out
#' tm$lik()
#' tm$ncount()
#' tm$bool_binary()
#' tm$betas()
#'
#' @export
topmod <- function(nbmodels,nmaxregressors=NA,bbeta=FALSE,lengthfixedvec=0,liks=numeric(0),
ncounts=numeric(0),modelbinaries=matrix(0,0,0),betas=matrix(0,0,0),
betas2=matrix(0,0,0),fixed_vector=matrix(0,0,0)) {
#user-friendly function to create a 'topmod' object
#nbmodels: integer, maxmium number of models to be retained
#nmaxregressors: integer, maximum possible number of covariates (optional if arguments modelbinaries or betas are provided)
#bbeta: If TRUE, model coefficients (first & second moments) are to saved by the resulting object; If FALSE not; (If bbeta<0 then only the first mometns are to be saved)
#lengthfixedvec: If lengthfixedvec>0 then the resulting topmod object saves a numeric of length 'lengthfixedvec' for each added model; if lengthfixedvec=0, it does not; can be omitted if fixed_vector is supplied
#
#optional arguments to initialize the topmod object directly
#liks: a vector of (log) likelihoods (does not need to be sorted)
#ncounts: an (optional) vector of MCMC frrequencies for each model (if provided, needs to have the same length as liks)
#modelbinaries: a logical or numeric matrix denoting the models to initialize the topmod object with; With nrow(modelbinaries) equal to nmaxregressors and ncol(modelbinaries) equal to length(liks) (can be omitted if argument betas is provided)
#betas: a numeric matrix with the (expecteed values of) coefficents of the models; With nrow(betas) equal to nmaxregressors and ncol(betas) equal to length(liks); Note that providing betas automatically sets bbeta=-1
#betas2: a numeric matrix with the second moments coefficents of the models; With nrow(betas2) equal to nmaxregressors and ncol(betas2) equal to length(liks); Note that providing betas automatically sets bbeta=-1
#fixed_vector: an optional fixed vector to be saved with each model
if (!is.numeric(nbmodels)) stop("argument 'nbmodels' needs to be an integer>0")
nbmodels=floor(nbmodels[[1]])
if (nbmodels[[1]]<0) stop("argument 'nbmodels' needs to be an integer>0")
if (bbeta>0) bbeta2=TRUE else bbeta2=FALSE
bbeta=as.logical(bbeta)
if (!bbeta&(length(betas)>0)) bbeta=TRUE
if (!bbeta2&(length(betas2)>0)) bbeta2=TRUE
if (is.na(lengthfixedvec[1])) lengthfixedvec=0
if ((lengthfixedvec==0)&length(fixed_vector)>0) {lengthfixedvec=nrow(fixed_vector)}
if (length(liks)>nbmodels) stop("liks longer than nbmodels allows")
if (length(ncounts)>nbmodels) stop("ncounts longer than nbmodels allows")
if ((length(modelbinaries)==0)&(length(betas)>0)) {modelbinaries=as.logical(betas);dim(modelbinaries)=dim(betas) }
if (ncol(modelbinaries)>nbmodels) stop("modelbinaries has more columns than than nbmodels allows")
bindim=dim(modelbinaries); modelbinaries=as.logical(modelbinaries); dim(modelbinaries)=bindim;
if ((is.na(nmaxregressors[1]))&(length(modelbinaries)>0)) {nmaxregressors=nrow(modelbinaries) }
if (is.na(nmaxregressors)) stop("argument 'nmaxregressors' is missing")
nmaxregressors=floor(nmaxregressors[[1]])
if (nmaxregressors<=0) stop("argument 'nmaxregressors' needs to be a positive integer")
if ((length(ncounts)==0)&(length(liks)>0)) {ncounts=rep(1,length(liks)) }
#check conformance with K
if (length(modelbinaries)>0) if (nmaxregressors!=nrow(modelbinaries)) stop("nrow() of modelbinaries does not match nmaxregressors")
if (bbeta&(length(betas)>0)) if (nmaxregressors!=nrow(betas)) stop("nrow() of betas does not match nmaxregressors")
if (bbeta2&(length(betas2)>0)) if (nmaxregressors!=nrow(betas2)) stop("nrow() of betas2 does not match nmaxregressors")
#check that all have the same model length
N=length(liks)
if (length(ncounts)!=length(liks)) stop("lengths of arguments 'liks' and 'ncounts' do not conform")
if (ncol(modelbinaries)!=length(liks)) stop("nrow of argument 'modelbinaries' does not conform to length of argument 'liks'")
if (bbeta) if (ncol(betas)!=length(liks)) stop("nrow of argument 'betas' does not conform to length of argument 'liks'")
if (bbeta2) if (ncol(betas2)!=length(liks)) stop("nrow of argument 'betas2' does not conform to length of argument 'liks'")
if (lengthfixedvec) if (ncol(fixed_vector)!=length(liks)) stop("nrow of argument 'fixed_vector' does not conform to length of argument 'liks'")
morder=order(liks,decreasing=TRUE)
liks=liks[morder]
modelbinaries=modelbinaries[,morder,drop=FALSE]
ncounts=ncounts[morder]
if (bbeta) { betas=betas[,morder,drop=FALSE] }
if (bbeta2) { betas2=betas2[,morder,drop=FALSE] }
if (lengthfixedvec) { fixed_vector=fixed_vector[,morder,drop=FALSE] }
hexobj=.hexcode.binvec.convert(nmaxregressors)
bools=as.vector(sapply(as.list(as.data.frame(modelbinaries)),hexobj$as.hexcode))
if (length(bools)==0) {bools=character(0)}
veck=numeric(0); betas_raw=numeric(0); betas2_raw=numeric(0);
if (bbeta&(length(bbeta)>0)) {
veck=colSums(modelbinaries)
betas_raw=as.vector(betas)[as.vector(modelbinaries)]
}
if (bbeta2&(length(bbeta2)>0)) {
veck=colSums(modelbinaries)
betas2_raw=as.vector(betas2)[as.vector(modelbinaries)]
}
fixedvec=as.vector(fixed_vector)
.top10(nmaxregressors=nmaxregressors,nbmodels=nbmodels,bbeta=bbeta,lengthfixedvec=lengthfixedvec,bbeta2=bbeta2,
inivec_lik=liks,inivec_bool=bools,inivec_count=ncounts,inivec_vbeta=betas_raw, inivec_vbeta2=betas2_raw,inivec_veck=veck,inivec_fixvec=fixedvec)
}
.cor.topmod <- function(tmo) {
if (is.bma(tmo)) tmo=tmo$topmod
pmp.10=pmp.bma(tmo,oldstyle=TRUE)
if (nrow(pmp.10)==1|suppressWarnings(length(grep("error",class(try(cor(pmp.10[,1],pmp.10[,2]),silent=TRUE)))))) {
corr.pmp=NA
} else {
if (var(pmp.10[,2])==0) corr.pmp=NA else corr.pmp=cor(pmp.10[,1],pmp.10[,2])
}
return(corr.pmp)
}
.topmod.as.bbetaT <- function (tm,gprior.info=NULL,yXdata=NULL, addr2=FALSE) {
#this is a small function to convert a topmod object with bbeta=FALSE into on ewith bbeta=TRUE
#CAUTION: this is not a generic function, but tailored to very specific topmod objects in conjunction with bms()
# tm: a topmod object as the one resulting from a call to bms()
# gprior.info: an object as the one resulting from a call to bms();
# optionally you can easily create one with the function .choose.gprior()
# yXdata: the original data, just like the object X.data resulting from a call to bms()
# if addr2=TREU, then the fixed_vector is appended to a first row containing the R-squareds
# returns amn adjusted topmod object
#
# Optionally, tm can be a bma object, then this function returns an adjusted bma object
is.bmao=FALSE
if (is.bma(tm)) {
#in case tm is a bma object...
is.bmao=TRUE
bmao=tm;
yXdata=bmao$arguments$X.data;
gprior.info=bmao$gprior.info;
tm =bmao$topmod
}
#retrieve necessary info
yXdata=as.matrix(yXdata);
N=nrow(yXdata); K=ncol(yXdata)-1
yXdata=yXdata-matrix(colMeans(yXdata),N,K+1,byrow=TRUE)
if (length(tm$lik())<1) { if (is.bmao) return(bmao) else return(tm)}
if (!addr2) if ((length(tm$betas_raw())>0)&(ncol(as.matrix(tm$betas()))==length(tm$lik()))) { if (is.bmao) return(bmao) else return(tm)}
bools=(tm$bool_binary())
yty=c(crossprod(yXdata[,1]))
positions=lapply(lapply(as.list(as.data.frame(bools)),as.logical),which)
#calculate all the OLS results for all the models
olsmodels=lapply(lapply(positions,.ols.terms2,yty=yty,N=N,K=K,XtX.big=crossprod(yXdata[,-1]),Xty.big=c(crossprod(yXdata[,-1],yXdata[,1]))),function (x) x$full.results())
#initialize the right gprior-function
# if (gprior.info$is.constant) {
# lprobo=.lprob.constg.init(g=gprior.info$g,N=N,K=K,yty=yty)
# } else if (gprior.info$gtype=="EBL") {
# lprobo=.lprob.eblocal.init(g=gprior.info$g,N=N,K=K,yty=yty)
# } else if (gprior.info$gtype=="hyper") {
# lprobo=.lprob.hyperg.init(g=gprior.info$g,N=N,K=K,yty=yty,f21a=gprior.info$hyper.parameter)
# } else {
# stop("gprior not recognizeable")
# }
lprobo=gprior.info$lprobcalc
#calculate the posterior statistics
lpl=lapply(olsmodels,function(x) lprobo$lprob(x$ymy,length(x$bhat),x$bhat,x$diag.inverse)) # caution: lprobs are not the same due to missing prior model probs
veck=as.vector(unlist(lapply(lapply(lpl,"[[","b1new"),length)))
b1raw=as.vector(unlist(lapply(lpl,"[[","b1new")))
b2raw=as.vector(unlist(lapply(lpl,"[[","b2new")))
fixedvecmat=tm$fixed_vector()
if (addr2) { # add R-squared as the first elements among the fixed vectors
r2=1-sapply(olsmodels,function(x) x$ymy)/yty
if (nrow(fixedvecmat)==0) {
fixedvecmat=matrix(0,0,length(veck))
} else if ( mean(abs(r2-fixedvecmat[1,]))< 1e-17 ) {
fixedvecmat=fixedvecmat[-1,,drop=FALSE]
}
fixedvecmat = rbind(r2,fixedvecmat)
}
lengthfixedvec=nrow(fixedvecmat)
#now create new topmod with bbeta=TRUE
tm<-.top10(nmaxregressors=tm$nregs,nbmodels=tm$nbmodels,bbeta=TRUE,lengthfixedvec=lengthfixedvec,bbeta2=TRUE,
inivec_lik=tm$lik(),inivec_bool=tm$bool(),inivec_count=tm$ncount(),inivec_vbeta=b1raw, inivec_vbeta2=b2raw,inivec_veck=veck,inivec_fixvec=c(fixedvecmat))
if (is.bmao) {
bmao$topmod <- tm
# bmao$arguments$beta.save=TRUE
return(bmao)
}
return(tm)
}
combine_chains <- function(...) {
#to combine outputs of the the function bms
#EXAMPLE:
#bma1<-bms(X.data=t5.within[,1:20],burn=100,iter=1000,g=TRUE,nmodel=10,logfile=TRUE,beta.save=FALSE,start.value=41,step=1000)
#bma2<-bms(X.data=X.data,burn=1000,iter=10000,g=TRUE,nmodel=10,logfile=TRUE,beta.save=FALSE,start.value=41,step=1000)
#out=combine_chains(bma1,bma2)
### or: combine_chains(bma1,bma2,bma3,bma4)...
# output is a standard bma object
combine_topmods <- function(topmodobj1,topmodobj2) {
#to combine top10 models objects of function bms()
#e.g. ppp=combine_topmods(test1$topmod,test2$topmod)
# output: a topmodel object
#retrieve the necessary properties
nregs1=topmodobj1$nregs
nregs2=topmodobj2$nregs
if (nregs1!=nregs2) {stop("The number of regressors in both BMA chains has to be the same!")}
k1=length(topmodobj1$ncount())
k2=length(topmodobj2$ncount())
#read out the importnant stuff
nbmodels1=topmodobj1$nbmodels
nbmodels2=topmodobj2$nbmodels
ncount1=topmodobj1$ncount()
ncount2=topmodobj2$ncount()
lik1=topmodobj1$lik()
lik2=topmodobj2$lik()
bool1=topmodobj1$bool()
bool2=topmodobj2$bool()
betas1=topmodobj1$betas()
betas2=topmodobj2$betas()
betas2_1=topmodobj1$betas2()
betas2_2=topmodobj2$betas2()
fv1=topmodobj1$fixed_vector()
fv2=topmodobj2$fixed_vector()
if (all(betas1==0)|all(betas2==0)) {dobetas=FALSE} else {dobetas=TRUE}
if (all(betas2_1==0)|all(betas2_2==0)) {dobetas2=FALSE} else {dobetas2=TRUE}
#first look which models of 1 are already in 2 and
#for these just update the ncounts (note: this is quite easy, since this subset in 1 has the same order as in 2)
idxin2_boolof1in2=match(bool1,bool2)
idxin1_boolof1in2=which(!is.na(idxin2_boolof1in2))
idxin2_boolof1in2=idxin2_boolof1in2[!is.na(idxin2_boolof1in2)]
ncount2[idxin2_boolof1in2]=ncount2[idxin2_boolof1in2]+ncount1[idxin1_boolof1in2]
if (any(idxin1_boolof1in2)) { # in case there are models in 1 that also show up in 2
#strip 1 of all the models that were already in 2
ncount1=ncount1[-idxin1_boolof1in2]
lik1=lik1[-idxin1_boolof1in2]
bool1=bool1[-idxin1_boolof1in2]
}
#now do A u (B\(AnB))
lika=c(lik2,lik1)
orderlika=order(lika,decreasing=TRUE)
lika=lika[orderlika]
ncounta=c(ncount2,ncount1)[orderlika]
boola=c(bool2,bool1)[orderlika]
if (dobetas) {
# if there are betas, do the same for betas
if (any(idxin1_boolof1in2)) betas1=betas1[,-idxin1_boolof1in2]
betasa=cbind(betas2,betas1)[,orderlika]
betasa_not0=betasa!=0
vecka=colSums(betasa_not0)
vbetaa=as.vector(betasa[as.vector(betasa_not0)])
} else {
vecka=0;vbetaa=numeric(0)
}
if (dobetas2) {
# if there are betas^2, do the same for betas^2
if (any(idxin1_boolof1in2)) betas2_1=betas2_1[,-idxin1_boolof1in2]
betasa2=cbind(betas2_2,betas2_1)[,orderlika]
vbetaa2=as.vector(betasa2[as.vector(betasa_not0)]);
} else {
vbetaa2=numeric(0)
}
fva=numeric(0); lfv=0;
if ( (nrow(fv1)==nrow(fv2)) & ((nrow(fv1)>0) & (nrow(fv2)>0)) ) {
# if there is a fixed vector then combine it the same way
if (any(idxin1_boolof1in2)) fv1=fv1[,-idxin1_boolof1in2]
if (!is.matrix(fv1)) fv1=t(fv1)
fva=as.vector(cbind(fv2,fv1)[,orderlika])
lfv=nrow(fv1)
}
return(.top10(nmaxregressors=nregs1,nbmodels=length(lika),bbeta=dobetas,lengthfixedvec=lfv,bbeta2=dobetas2,inivec_lik=lika,inivec_bool=boola,inivec_count=ncounta,inivec_vbeta=vbetaa,inivec_vbeta2=vbetaa2,inivec_veck=vecka,inivec_fixvec=fva))
}
combine_2chains <- function(flso1,flso2) {
# if (!exists("sPath")) sPath=""
# source(paste(sPath,"aux_inner.r",sep=""),local=TRUE)
# if (flso1$arguments$beta.save & flso2$arguments$beta.save) {
# beta.save = TRUE
# } else {
# beta.save = FALSE
# }
# combine the topmodel opbjects
topmod.combi=combine_topmods(flso1$topmod,flso2$topmod)
#prepare the gprior.info obejct for post.calc
gpi <- flso1$gprior.info
gpi$shrinkage.moments=numeric(length(gpi$shrinkage.moments))
# use post.calc to compute info, gprior.info, and reg.names
io1=flso1$info; io2 = flso2$info
obj.combi=.post.calc(gprior.info=gpi,add.otherstats=io1$add.otherstats + io2$add.otherstats,k.vec=(io1$k.vec[-1]+io2$k.vec[-1]),null.count=(io1$k.vec[1]+io2$k.vec[1]),
flso1$arguments$X.data,topmods=topmod.combi,b1mo=io1$b1mo + io2$b1mo,b2mo=io1$b2mo + io2$b2mo,iter=io1$iter + io2$iter,burn=io1$burn + io2$burn,
inccount=io1$inccount + io2$inccount,models.visited=io1$models.visited + io2$models.visited,K=io1$K,N=io1$N,msize=io1$msize + io2$msize,
timed=io1$timed + io2$timed,cumsumweights=io1$cumsumweights + io2$cumsumweights,mcmc=flso1$arguments$mcmc,possign=io1$pos.sign+io2$pos.sign)
# concatenate start.positions by cbinding them
stpos1=as.matrix(flso1$start.pos);stpos2=as.matrix(flso2$start.pos)
startpos.combi =cbind(rbind(stpos1,matrix(0,max(0,nrow(stpos2)-nrow(stpos1)),ncol(stpos1))),rbind(stpos2,matrix(0,max(0,nrow(stpos1)-nrow(stpos2)),ncol(stpos2))))
# concatenate bms.calls in list
call.combi= c(flso1$bms.call,flso2$bms.call)
# combine arguments
args.combi = flso1$arguments; args2=flso2$arguments
args.combi$burn = args.combi$burn + args2$burn
args.combi$iter = args.combi$iter + args2$iter
if ((length(args.combi$mprior.size)==1)|(length(args.combi$mprior.size)==1)) {args.combi$mprior.size = mean(c(args.combi$mprior.size,args2$mprior.size))}
args.combi$nmodel = topmod.combi$nbmodels
# args.combi$beta.save = (args.combi$beta.save & args2$beta.save)
args.combi$user.int = (args.combi$user.int & args2$user.int)
args.combi$g.stats = (args.combi$g.stats & args2$g.stats)
#model prior object
mp1=flso1$mprior.info; mp2=flso2$mprior.info
if (mp1$mp.mode!=mp2$mp.mode) {mpall=list()} else {
mpall=mp1
mpall$mp.msize = .5*mp1$mp.msize+.5*mp2$mp.msize
mpall$origargs$mpparam = .5*mp1$origargs$mpparam+.5*mp2$origargs$mpparam
mpall$mp.Kdist= .5*mp1$mp.Kdist + .5*mp2$mp.Kdist
}
result=list(info=obj.combi$info,arguments=args.combi, topmod=topmod.combi,start.pos=startpos.combi,
gprior.info=obj.combi$gprior.info, mprior.info=mpall, X.data=flso1$arguments$X.data,reg.names=obj.combi$reg.names,bms.call=call.combi)
class(result)="bma"
return(result)
}
#############################################################################################################
#this is the rest of the combine function; the combine function is iteratively used to combine as many chains
#as are specified by (...)
############################################################################################################
arglist=list(...)
if ( !all(unlist(lapply(arglist,is.bma)))) stop("All of the input arguments must be BMA objects!")
if ( !all(lapply(arglist,function (xx) xx$info$K)==arglist[[1]]$info$K) ) stop("All of the input BMA objects must have an equal number of max regressors (i.e. equal (X.data))!")
if ( !all(lapply(arglist,function (xx) xx$info$N)==arglist[[1]]$info$N) ) stop("All of the input BMA objects must have equal X.data!")
if ( !all(lapply(arglist,function (xx) xx$gprior.info$gtype)==arglist[[1]]$gprior.info$gtype) ) stop("All of the input BMA objects must have the same type of g-prior (bms-argument g)")
if ( length(arglist)==1) return(arglist[[1]])
combined_output <- combine_2chains(arglist[[1]],arglist[[2]])
if (nargs()>2) {
for (inarg in 3:nargs()) {
combined_output <- combine_2chains(arglist[[inarg]],combined_output)
}
}
############################################################################################################
return(combined_output)
}
#' Concatenate bma objects
#'
#' Combines bma objects (resulting from \code{\link{bms}}). Can be used to
#' split estimation over several machines, or combine the MCMC results obtained
#' from different starting points.
#'
#' Aggregates the information obtained from several chains. The result is a
#' 'bma' object (cf. 'Values' in \code{\link{bms}}) that can be used just as a
#' standard 'bma' object.\cr Note that \code{combine_chains} helps in
#' particular to paralllelize the enumeration of the total model space: A model
#' with \eqn{K} regressors has \eqn{2^K} potential covariate combinations: With
#' \eqn{K} large (more than 25), this can be pretty time intensive. With the
#' \code{\link{bms}} arguments \code{start.value} and \code{iter}, sampling can
#' be done in steps: cf. example 'enumeration' below.
#'
#' @aliases combine_chains c.bma
#' @param \dots At least two 'bma' objects (cf. \code{\link{bms}})
#' @param recursive retained for compatibility with \code{\link{c}} method
#'
#' @seealso \code{\link{bms}} for creating bma objects
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords models
#' @examples
#'
#' data(datafls)
#'
#' #MCMC case ############################
#' model1=bms(datafls,burn=1000,iter=4000,mcmc="bd",start.value=c(20,30,35))
#' model2=bms(datafls,burn=1500,iter=7000,mcmc="bd",start.value=c(1,10,15))
#'
#' model_all=c(model1,model2)
#' coef(model_all)
#' plot(model_all)
#'
#'
#'
#' #splitting enumeration ########################
#'
#' #standard case with 12 covariates (4096 differnt combinations):
#' enum0=bms(datafls[,1:13],mcmc="enumerate")
#'
#' # now split the task:
#' # enum1 does everything from model zero (the first model) to model 1999
#' enum1=bms(datafls[,1:13],mcmc="enumerate",start.value=0,iter=1999)
#'
#' # enum2 does models from index 2000 to the index 3000 (in total 1001 models)
#' enum2=bms(datafls[,1:13],mcmc="enumerate",start.value=2000,iter=1000)
#'
#' # enum3 does models from index 3001 to the end
#' enum3=bms(datafls[,1:13],mcmc="enumerate",start.value=3001)
#'
#' enum_combi=c(enum1,enum2,enum3)
#' coef(enum_combi)
#' coef(enum0)
#' #both enum_combi and enum0 have exactly the same results
#' #(one difference: enum_combi has more 'top models' (1500 instead of 500))
#'
#' @export
c.bma <- function(...,recursive=FALSE) {
#simple wrapper, recursive has no meaning and ist retained for compatibility
combine_chains(...)
}
########################################################################
# auxiliary functions for the topmodel object ##########################
########################################################################
.hexcode.binvec.convert <- function(length.of.binvec) {
#function to initialise conversion betwwen logical vector (such as c(1,0,0,0)) and character hexcode vector (such as "f")
#length.of.binvec is the desired length of the inserted and resulting logical vectors;
#the initialisation will fit some leading zeros to make it convertible into hexcode (length of bin. vector as a multiple of 4)
if (length(length.of.binvec)>1) length.of.binvec=length(length.of.binvec)
addpositions=4-length.of.binvec%%4; positionsby4=(length.of.binvec+addpositions)/4;
hexvec=c(0:9,"a","b","c","d","e","f"); #lookup list for converting from binary to hexadecimal
hexcodelist=list("0"=numeric(4),"1"=c(0,0,0,1),"2"=c(0,0,1,0),"3"=c(0,0,1,1),"4"=c(0,1,0,0),"5"=c(0,1,0,1),"6"=c(0,1,1,0),
"7"=c(0,1,1,1),"8"=c(1,0,0,0),"9"=c(1,0,0,1),"a"=c(1,0,1,0),"b"=c(1,0,1,1),"c"=c(1,1,0,0),"d"=c(1,1,0,1),"e"=c(1,1,1,0),"f"=c(1,1,1,1));
#lookup list for converting from hexadecimal to binary
return(list(
as.hexcode = function(binvec) {
#convert logical vector to hexcode character
incl=c(numeric(addpositions),binvec);dim(incl)=c(4,positionsby4); #split into elements of four positions
return(paste(hexvec[crossprod(incl,2L^(3:0))+1],collapse=""));
},
as.binvec = function(hexcode) {
#convert hexcode character to numeric vector (e.g. "a" to c(1,0,1,0))
return(unlist(hexcodelist[unlist(strsplit(hexcode,"",fixed=TRUE),recursive=FALSE,use.names=FALSE)],recursive=FALSE,use.names=FALSE)[-(1:addpositions)])
}))
}
#' @rdname bin2hex
#' @export
hex2bin<-function(hexcode) {
#user-friendly function to convert some hexcode character to numeric vector (e.g. "a" to c(1,0,1,0))
if (!is.character(hexcode)) stop("please input a character like '0af34c'");
hexcode <- paste("0",tolower(hexcode),sep="")
hexobj<-.hexcode.binvec.convert(length(hexcode)*16L)
return(hexobj$as.binvec(hexcode))
}
#' Converting Binary Code to and from Hexadecimal Code
#'
#' A simple-to-use function for converting a logical ('binary') vector into hex
#' code and reverse.
#'
#' The argument is an integer in binary form (such as "101"), provided as a
#' logical (\code{c(T,F,T)}) or numeric vector (\code{c(1,0,1)}).\cr
#' \code{bin2hex} then returns a character denoting this number in hexcode (in
#' this case "5").
#'
#' The function \code{hex2bin} does the reverse operation, e.g.
#' \code{hex2bin("5")} gives (\code{c(1,0,1)}).
#'
#' @aliases bin2hex hex2bin
#' @param binvec a logical vector (alternatively a vector coercible into
#' logical)
#' @param hexcode a single-element character denoting an integer in hexcode
#' (admissible character: 0 to 9, ato f)
#' @return \code{bin2hex} returns a single element character; \code{hex2bin}
#' returns a numeric vector equivalent to a logical vector
#'
#' @seealso \code{\link{hex2bin}} for converting hexcode into binary vectors,
#' \code{\link{format.hexmode}} for a related R function.
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords arith
#' @examples
#'
#' bin2hex(c(TRUE,FALSE,TRUE,FALSE,TRUE,TRUE))
#' bin2hex(c(1,0,1,0,1,1))
#' hex2bin("b8a")
#' bin2hex(hex2bin("b8a"))
#'
#' @export
bin2hex<-function(binvec) {
#user-friendly function to convert some logical vector to hexcode character(e.g. c(1,0,1,0) or c(T,F,T,F) to "a")
if (!is.logical(binvec)) {if (is.numeric(binvec)) {binvec=as.logical(binvec)} else {stop("need to supply a logical vector like c(T,F) or c(1,0)")} }
hexobj<-.hexcode.binvec.convert(length(binvec))
hexcode=hexobj$as.hexcode(binvec)
if (nchar(hexcode)>(floor((length(binvec)-1)/4)+1)) {hexcode=substring(hexcode,2)}
return(hexcode)
}
#######################
# FUNCTIONS FOR USERS #
#########################################################################
#' Plot Model Size Distribution
#'
#' Plots posterior and prior model size distribution
#'
#'
#' @param bmao a 'bma' object (cf. \code{\link{bms}})
#' @param exact if \code{TRUE}, then the posterior model distribution is based
#' on the best models of \code{bmao} and their marginal likelihoods;\cr if
#' \code{FALSE} (default) then the distribution is based on all encountered
#' models and their MCMC frequencies (cf. 'Details' in \code{\link{coef.bma}})
#' @param ksubset integer vector detailing for which model sizes the plot
#' should be done
#' @param include.legend if \code{TRUE}, a small legend is included via the
#' low-level command \code{\link{legend}}
#' @param do.grid if \code{TRUE}, a \code{\link{grid}} is added to the plot
#' (with a simple \code{grid()}).
#' @param \dots parameters passed on to \code{\link{matplot}} with sensible
#' defaults
#' @return As a default, \code{plotModelsize} plots the posterior model size
#' distribution as a blue line, and the prior model distribution as a dashed
#' red line.\cr In addition, it returns a list with the following elements:
#' \item{mean}{The posterior expected value of model size} \item{var}{The
#' variance of the posterior model size distribution} \item{dens}{A vector
#' detailing the posterior model size distribution from model size \eqn{0} (the
#' first element) to \eqn{K} (the last element)}
#'
#' @seealso See also \code{\link{bms}}, \code{\link{image.bma}},
#' \code{\link{density.bma}}, \code{\link{plotConv}}
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords hplot
#' @examples
#'
#' data(datafls)
#' mm=bms(datafls,burn=1500, iter=5000, nmodel=200,mprior="fixed",mprior.size=6)
#'
#' #plot Nb.1 based on aggregate results
#' postdist= plotModelsize(mm)
#'
#' #plot based only on 30 best models
#' plotModelsize(mm[1:30],exact=TRUE,include.legend=FALSE)
#'
#' #plot based on all best models, but showing distribution only for model sizes 1 to 20
#' plotModelsize(mm,exact=TRUE,ksubset=1:20)
#'
#' # create a plot similar to plot Nb. 1
#' plot(postdist$dens,type="l")
#' lines(mm$mprior.info$mp.Kdist)
#'
#'
#' @export
plotModelsize<-function(bmao,exact=FALSE,ksubset=NULL,include.legend=TRUE, do.grid=TRUE, ...) { #,lwd=1.5,xaxt="n",col=c("steelblue3","tomato"),main=NULL,cex.main=0.8,xlab="Model Size",ylab=""){
#plots posterior vs. prior model size distribution
# bma: bma object
# exact: whether posterior distribution is to be based on MC3 results or on best 'top' models (as in bmao$topmod)
# ksubset: an integer vector detailing for which model sizes the plot should be done
# include.legend: if TRUE, a small legend is included via legend()
# ... parameters passed on to ?matplot (Note: defaults are as above, default 'main' is 'Posterior Model Size Distribution Mean: x' where x is the actual mean)
# besides plotting, the function returns a list with the mean and variance of the posterior model size distribution (not altered by ksubset)
dotargs = match.call(expand.dots=FALSE)$...
if (length(exact)>1) { topmodidx=exact; exact=TRUE} else {topmodidx=NA}
# get required information
K=bmao$info$K
if (is.element("mprior.info",names(bmao))) m=bmao$mprior.info$mp.msize else m=bmao$arguments$prior.msize
pmp.10=pmp.bma(bmao$topmod[topmodidx],oldstyle=TRUE)
if(exact){
modelSmean=sum(apply(.post.topmod.bma(bmao$topmod[topmodidx]),2,function(x) length(which(x==1)))*pmp.10[,1])
modelS.var=sum(apply(.post.topmod.bma(bmao$topmod[topmodidx]),2,function(x) length(which(x==1)))^2*pmp.10[,1])-modelSmean^2
x = apply(.post.topmod.bma(bmao$topmod[topmodidx]),2,function(x) length(which(x==1)))
y = pmp.10[,1]
result = c()
for( i in sort(unique(x)) )
result = c(result, sum(y[which(x==i)]))
names(result) = sort(unique(x))
kvec=rep(0,(K+1))
kvec[(as.numeric(names(result))+1)]=result
}
else{
k.vec=bmao$info$k.vec
#calculate expected value
summi=sum(k.vec)
#modelSmean -1 because we have to deduct the constant from the mean model size
modelSmean=sum((1:length(k.vec))*(k.vec/summi))-1
kvec=k.vec/sum(k.vec)
modelSmean.sq=sum(((1:length(k.vec))^2)*(k.vec/summi))
modelS.var=modelSmean.sq-modelSmean^2 #var(x)=E(X^2)-(E(X))^2
}
upper=min(ceiling(modelSmean+5*modelS.var), K)
lower=max(floor(modelSmean-5*modelS.var),0)
if (is.element("mp.Kdist",names(bmao$mprior.info))) {
prior = bmao$mprior.info$mp.Kdist
} else if (is.element("theta", names(bmao$arguments))) { #kept for historical reasons
theta=bmao$arguments$theta
if(theta=="random"){
beta.bin=function(a=1,b=(K-m)/m,K=K,w=0:K){
return(lgamma(a+b)-(lgamma(a)+lgamma(b)+lgamma(a+b+K))+log(choose(K,w))+lgamma(a+w)+lgamma(b+K-w))
}
prior=exp(beta.bin(a=1,b=(K-m)/m,K=K,w=0:K))
}
if(theta!="random"){ prior=stats::dbinom(x=0:K,size=K,prob=m/K,log=FALSE)}
} else {
prior = rep(NA,length(kvec))
}
mat=cbind(kvec,prior)
#do the plot
upper.ylim=max(kvec,prior, na.rm=TRUE)
if(is.null(ksubset)){ksubset=(lower:upper)}
dotargs=.adjustdots(dotargs,type="l",ylim=c(0,1.1*upper.ylim),lwd=1.5,xaxt="n",col=c("steelblue3","tomato"),main=paste("Posterior Model Size Distribution","\n","Mean:",round(modelSmean,4)),cex.main=0.8,xlab="Model Size",ylab="", lty=1:2, pch=4, cex.axis=.9)
matsubset=mat[ksubset+1,]
eval(as.call(c(list(as.name("matplot"),as.name("matsubset")),as.list(dotargs))))
#matplot(mat[ksubset,],type="l",xaxt=xaxt,col=col,
# main=ifelse(is.null(main),paste("Posterior Model Size Distribution","\n","Mean:",
# round(modelSmean,4)),main),cex.main=cex.main,xlab=xlab,
# ylim=c(0,1.1*upper.ylim),lwd=lwd,ylab=ylab,...)
if (as.logical(do.grid)) graphics::grid()
graphics::points(kvec[ksubset+1],cex=0.8,pch=eval(dotargs$pch))
#if(lower==0){
# graphics::axis(1, las=1, at =1:length(lower:upper), label = c(0:K)[lower:(upper+1)],cex.axis=0.7)
#}
#else{
# axis(1, las=1, at =1:length(lower:upper), label = c(0:K)[lower:upper],cex.axis=0.7)
#}
graphics::axis(1, las=1, at =1:length(ksubset), labels = ksubset, cex.axis=eval(dotargs$cex.axis))
if (include.legend) {
if (is.null(prior)||all(is.na(prior))) {
graphics::legend(x="topright",lty=eval(dotargs$lty),legend=c("Posterior"),col=eval(dotargs$col),ncol=1,bty="n",lwd=eval(dotargs$lwd))
} else {
graphics::legend(x="topright",lty=eval(dotargs$lty),legend=c("Posterior","Prior"),col=eval(dotargs$col),ncol=2,bty="n",lwd=eval(dotargs$lwd))
}
}
return(invisible(list(mean=modelSmean,var=modelS.var,dens=kvec)))
}
#' Coefficient Marginal Posterior Densities
#'
#' Calculates the mixture marginal posterior densities for the coefficients
#' from a BMA object and plots them
#'
#' The argument \code{addons} specifies what additional information should be
#' added to the plot(s) via the low-level commands \code{\link{lines}} and
#' \code{\link{legend}}:\cr \code{"e"} for the posterior expected value (EV) of
#' coefficients conditional on inclusion (see argument \code{exact=TRUE} in
#' \code{\link{coef.bma}}),\cr \code{"s"} for 2 times posterior standard
#' deviation (SD) bounds,\cr \code{"m"} for the posterior median,\cr \code{"b"}
#' for posterior expected values of the individual models whom the density is
#' averaged over,\cr \code{"E"} for posterior EV under MCMC frequencies (see
#' argument \code{exact=FALSE} in \code{\link{coef.bma}}),\cr \code{"S"} for
#' the corresponding SD bounds (MCMC),\cr \code{"p"} for plotting the Posterior
#' Inclusion Probability above the density plot,\cr \code{"l"} for including a
#' \code{\link{legend}}, \code{"z"} for a zero line, \code{"g"} for adding a
#' \code{\link{grid}}
#'
#' Any combination of these letters will give the desired result. Use
#' \code{addons=""} for not using any of these.\cr In case of
#' \code{density.zlm}, only the letters \code{e}, \code{s}, \code{l}, \code{z},
#' and \code{g} will have an effect.
#'
#' @aliases density.bma density.zlm
#' @param x A bma object (see \code{\link{bms}}) or a \code{\link{zlm}} object.
#' @param reg A scalar integer or character detailing which covariate's
#' coefficient should be plotted. If \code{reg=NULL} (default), then all
#' regressors are plotted one after the other, waiting for user interaction.
#' @param addons character. Specifies which additional information should be
#' added to the plot via low-level commands (see 'Details' below).
#' @param std.coefs logical. If \code{TRUE} then the posterior density is
#' estimated for standardized coefficients (representing the case where all
#' variables have mean zero and standard deviation 1) - default is
#' \code{FALSE}.
#' @param n numeric. the number of equally spaced points at which the density
#' is to be estimated.
#' @param plot logical. If \code{TRUE} (default), the density is plotted; if
#' \code{FALSE} then \code{density.bma} only returns the estimated posterior
#' densities without plotting.
#' @param hnbsteps even integer, default 30. The number of numerical
#' integration steps to be used in case of a hyper-g prior (cf. argument
#' \code{g} in \code{\link{bms}}). Increase this number to increase accuracy.
#' @param addons.lwd scalar, default 1.5. Line width to be used for the
#' low-level plotting commands specified by \code{addons}. Cf. argument
#' \code{lwd} in \code{\link{par}}
#' @param \dots Additional arguments for \code{\link{plot.default}} with
#' sensible defaults
#' @return The function returns a list containing objects of the class
#' \code{\link{density}} detailing the marginal posterior densities for each
#' coefficient provided in \code{reg}.\cr In case of \code{density.zlm}, simple
#' marginal posterior coefficient densities are computed, while
#' \code{density.bma} calculates there mixtures over models according to
#' posterior model probabilities.\cr These densities contain only the density
#' points apart from the origin. (see 'Note' below)
#'
#' As long as \code{plot=TRUE}, the densities are plotted too. Note that (for
#' \code{density.bma}) if the posterior inclusion probability of a covariate is
#' zero, then it will not be plotted, and the returned density will be
#' \code{list(x=numeric(n),y=numeric(n))}.
#' @note The computed marginal posterior densities from \code{density.bma} are
#' a Bayesian Model Averaging mixture of the marginal posterior densities of
#' the individual models. The accuracy of the result therefore depends on the
#' number of 'best' models contained in \code{x} (cf. argument \code{nmodel} in
#' \code{\link{bms}}).
#'
#' The marginal posterior density can be interpreted as 'conditional on
#' inclusion': If the posterior inclusion probability of a variable is smaller
#' than one, then some of its posterior density is Dirac at zero. Therefore
#' the integral of the returned density vector adds up to the posterior
#' inclusion probability, i.e. the probability that the coefficient is not
#' zero.
#'
#' Correspondingly, the posterior EV and SD specified by \code{addons="es"} are
#' based on 'best' model likelihoods ('exact') and are conditional on
#' inclusion. They correspond to the results from command
#' \code{coef.bma(x,exact=TRUE,condi.coef=TRUE,order.by.pip=FALSE)} (cf. the
#' example below).
#'
#' The low-level commands enacted by the argument \code{addons} rely on colors
#' of the \code{\link{palette}}: color 2 for \code{"e"} and \code{"s"}, color 3
#' for \code{"m"}, color 8 for \code{"b"}, color 4 for \code{"E"} and
#' \code{"S"}. The default colors may be changed by a call to
#' \code{\link{palette}}.
#'
#' Up to BMS version 0.3.0, \code{density.bma} may only cope with built-in
#' \code{gprior}s, not with any user-defined priors.
#'
#' @seealso \code{\link{quantile.coef.density}} for extracting quantiles,
#' \code{\link{coef.bma}} for similar concepts, \code{\link{bms}} for creating
#' bma objects
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords aplot utilities
#' @examples
#'
#'
#' data(datafls)
#' mm=bms(datafls)
#'
#' density(mm,reg="SubSahara")
#' density(mm,reg=7,addons="lbz")
#' density(mm,1:9)
#' density(mm,reg=2,addons="zgSE",addons.lwd=2,std.coefs=TRUE)
#'
#' # plot the posterior density only for the very best model
#' density(mm[1],reg=1,addons="esz")
#'
#'
#' #using the calculated density for other purposes...
#' dd=density(mm,reg="SubSahara")
#' plot(dd)
#'
#' dd_list=density(mm,reg=1:3,plot=FALSE,n=400)
#' plot(dd_list[[1]])
#'
#'
#' #Note that the shown density is only the part that is not zero
#' dd=density(mm,reg="Abslat",addons="esl")
#' pip_Abslat=sum(dd$y)*diff(dd$x)[1]
#'
#' #this pip and the EV conform to what is done by the follwing command
#' coef(mm,exact=TRUE,condi.coef=TRUE)["Abslat",]
#'
#' @export
density.bma <- function(x,reg=NULL,addons="lemsz",std.coefs=FALSE,n=300,plot=TRUE,hnbsteps=30,addons.lwd=1.5,...) {
# x: a bma object
# reg: the covariate to be calculated; can be character or integer
# addons: low-level additions to the plot: "e" for post. expected value, "s" for 2 times Std.Dev. bounds,
# "l" for including a legend, "m" for the median, "z" for a zero line
# "b" for posterior exp. values of the individual models
# "E" for post. exp. value under MCMC frequencies, "S" for the corresponding SD bounds (MCMC)
# ?"g" for adding a grid()
# "p" for an additional box drawing the PIP
# n: the number of points for which the density should be estimated
# std.coefs: If TRUE then plot post. dist. of standardized coefficients
# plot: if FALSE, then only the density is returned;
# hnbsteps: steps for numerical integration in case of a hyper-g prior
# addons.lwd: lty for addons lines
#...: parameters passed on to plot
#
#returns a list of class "density" (cf. ?density)
dtcm=function(x,df,ncp,varp) {
#a wrapper for univariate t-dist with non-centrality parameter and variance parameter
# (variance is df/(df-2)*varp)
sqvarp=sqrt(varp)
stats::dt((x-ncp)/sqvarp,df=df)/sqvarp
}
dsgivenykernel <- function(sf,kpa,N,z) {
#the post. density of the shrinkge factor f(s|Y)*F((N-1)/2,1,(k+a)/2,R2)
(kpa-2)/2*(1-sf)^((kpa-4)/2)*(1-sf*z)^(-(N-1)/2)
}
#check user input and get basic info
dotargs = match.call(expand.dots=FALSE)$...
bmao=x
if (!is.bma(bmao)) stop("Argument bmao needs to be a bma object")
if (hnbsteps%%2) stop("Argument nbsteps needs to be an even integer")
nbsteps=max(hnbsteps,2)
n=max(ceiling(n),1)
N=bmao$info$N; K=bmao$info$K
if(is.null(reg)) reg=1:K
nameix=1:K; names(nameix)=bmao$reg.names; reg=nameix[reg]
#if (is.na(reg)) stop("Argument reg is out of bounds")
ishyper=(bmao$gprior$gtype=="hyper")
tm=bmao$topmod
bools=(tm$bool_binary())
betas=tm$betas()
betas2=tm$betas2()
if (std.coefs) {
#if standardized coefficients are wanted, then adjust moments accordingly
sddata=apply(as.matrix(bmao$arguments$X.data),2,stats::sd)
betas=diag(sddata[-1])%*%betas/sddata[1]
betas2=diag(sddata[-1]^2)%*%betas2/sddata[1]^2
}
sigmadiag=(betas2-betas^2)*(N-3)/(N-1) #the variance parameters for the t-dist
pmps=pmp.bma(bmao$topmod,oldstyle=TRUE)[,1] #calc post model probs (exact)
#the stuff below is similar to estimates.bma(bmao, condi.coef=TRUE,exact=TRUE)
pips=c(tcrossprod(bools,t(pmps)))
Eb1=c(tcrossprod(betas,t(pmps)))/pips
Ebsd=sqrt(c(tcrossprod(betas2,t(pmps)))/pips-Eb1^2); Ebsd[is.nan(Ebsd)]=0; Eb1[is.nan(Eb1)]=0
Eball=cbind(Eb1,Ebsd) #conditional coefficients
if ((any(grep("E",addons,ignore.case=FALSE)))|(any(grep("S",addons,ignore.case=FALSE)))) {
#in case the user wants to include MCMC results (see estimates.bma(,exact=FALSE),
#then compute them here
Eb1.mcmc = bmao$info$b1mo/bmao$info$inccount
Ebsd.mcmc = sqrt(bmao$info$b2mo/bmao$info$inccount-Eb1.mcmc^2)
if (std.coefs) {
sddata=apply(as.matrix(bmao$arguments$X.data),2,stats::sd)
Eb1.mcmc=Eb1.mcmc*sddata[-1]/sddata[1];
Ebsd.mcmc=Ebsd.mcmc*sddata[-1]/sddata[1];
}
}
if (ishyper) {
#in case of hyper-g, we cannot rely on sigmadiag, but have to numerically integrate over
#differnt shrinkages, for that we need this
yXdata=as.matrix(bmao$arguments$X.data); yXdata=yXdata-matrix(colMeans(yXdata),N,K+1,byrow=TRUE)
if (std.coefs) yXdata=yXdata%*%diag(1/sddata)
yty=c(crossprod(yXdata[,1]))
positions=lapply(lapply(as.list(as.data.frame(bools)),as.logical),which)
olsmodels=lapply(lapply(positions,.ols.terms2,yty=yty,N=N,K=K,XtX.big=crossprod(yXdata[,-1]),Xty.big=c(crossprod(yXdata[,-1],yXdata[,1]))),function (x) x$full.results())
f21a=bmao$gprior.info$hyper.parameter
}
plotndens <- function(ix,doplot=FALSE) {
#this function does marginal density and plot for a specific covariate
#depends heavily on parent scope!
sss=function(lbound,uboundp1,nbsteps) {
#simple simpson integration over shrinkage factor
#this is just a convenience function and depends on variables in parent scope, only used in case of hyper-g
s.seq=seq(lbound,uboundp1,(uboundp1-lbound)/nbsteps)[-nbsteps]
tmat=sapply(as.list(s.seq),function(ss) { dtcm(seqs,N-1,ss*bhati,invdiagi*ss*(1-ss*z)/(N-1)*yty)}) #matrix of t-densities for different s
smat=sapply(as.list(s.seq),dsgivenykernel, kpa=k+f21a,N=N,z=z) #vector of posterior densities for the differnet s
if (any(is.infinite(smat))) smat[is.infinite(smat)]=0
intconst=(4*sum(smat[c(FALSE,TRUE)])+2*sum(smat[c(TRUE,FALSE)])-3*smat[nbsteps]-smat[1])*(s.seq[nbsteps]-s.seq[1])/nbsteps/3 #calc the value of F((N-1)/2,1,(k+a)/2,R2)
return(list(dv=c(4*tmat[,c(FALSE,TRUE)]%*%smat[c(FALSE,TRUE)]+2*tmat[,c(TRUE,FALSE)]%*%smat[c(TRUE,FALSE)]-3*tmat[,nbsteps]*smat[nbsteps]-tmat[,1]*smat[1])*(s.seq[nbsteps]-s.seq[1])/nbsteps/3, ic=intconst))
#return the estimated density and a normalization constant
#is done in parts because it may be recomposed later
}
if (pips[ix]==0) {
reslist=list(x=numeric(n),y=numeric(n),n=n,call=sys.call(),data.name=names(nameix)[ix],has.na=FALSE); class(reslist)=c("density", "coef.density")
return(reslist)
}
#the x vector
lbound=min(betas[ix,as.logical(bools[ix,])])-3*Eball[ix,2]; ubound=max(betas[ix,as.logical(bools[ix,])])+3*Eball[ix,2];
seqs=seq(lbound,ubound,(ubound-lbound)/(n-1))
densvec=numeric(length(seqs))
#loop through models and calc post dens
for (m in 1:length(pmps)) {
if (bools[ix,m]) {
if (ishyper) {
ixadj=sum(bools[1:ix,m])
bhati=olsmodels[[m]]$bhat[[ixadj]]; invdiagi=olsmodels[[m]]$diag.inverse[[ixadj]]; k=sum(bools[,m]);
Esf=betas[ix,m]/bhati; z=1-olsmodels[[m]]$ymy/yty; midpoint=1-(1-Esf)*4
if (midpoint<.5) {
dvl=sss(.0001,.9999999,nbsteps*2); addvec=dvl$dv/dvl$ic
} else {
dvl1=sss(.0001,midpoint,nbsteps); dvl2=sss(midpoint,1,nbsteps)
addvec=(dvl1$dv+dvl2$dv)/(dvl1$ic+dvl2$ic)
}
} else {
addvec=dtcm(seqs,N-1,betas[ix,m],sigmadiag[ix,m])
}
densvec=densvec+pmps[m]*addvec
}
}
reslist=list(x=seqs,y=densvec,bw=NULL,n=n,call=sys.call(),data.name=names(nameix)[ix],has.na=FALSE); class(reslist)="density"
if (!doplot) { return(reslist) }
#plot stuff
main_default=paste("Marginal Density:",names(nameix)[ix],"(PIP",round(c(crossprod(pmps,bools[ix,]))*100,2),"%)")
if (any(grep("p",addons,ignore.case=TRUE))) {
decr=.12;
parplt=graphics::par()$plt;
parplt_temp=parplt; parplt_temp[4]=(1-decr)*parplt[4] +decr*parplt[3]; graphics::par(plt=parplt_temp)
main_temp=main_default; main_default=NULL
# graphics::layout(1:2,heights=c(.1,1))
# opm=par()$mar
# par(mar=c(0,opm[2],1,opm[4]))
# plot(0,type="n",xlim=0:1,ylim=0:1,xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
# mtext("PIP",side=2,las=2)
# rect(0,0,1,1,col=8)
# rect(0,0,pips[ix],1,col=9)
# par(mar=opm)
}
dotargs=.adjustdots(dotargs,type="l", col="steelblue4", main=main_default,
xlab=if (std.coefs) "Standardized Coefficient" else "Coefficient", ylab = "Density");
# if (!is.element('type',names(dotargs))) { dotargs$type <- "l" }
# if (!is.element('col',names(dotargs))) { dotargs$col <- "steelblue4" }
# if (!is.element('main',names(dotargs))) { dotargs$main=paste("Marginal Density:",names(nameix)[ix],"(PIP",round(c(crossprod(pmps,bools[ix,]))*100,2),"%)") }
# if (!is.element('xlab',names(dotargs))) { dotargs$xlab <- if (std.coefs) "Standardized Coefficient" else "Coefficient" }
# if (!is.element('ylab',names(dotargs))) { dotargs$ylab <- "Density" }
#
eval(as.call(c(list(as.name("plot"),x=as.name("seqs"),y=as.name("densvec")),as.list(dotargs))))
leg.col=numeric(0);leg.lty=numeric(0); leg.legend=character(0)
if (any(grep("g",addons,ignore.case=TRUE))) { # grid
graphics::grid()
}
if (any(grep("b",addons,ignore.case=TRUE))) { # post exp values of the individual models
for (m in 1:length(pmps)) {
Ebm=betas[ix,m] ;
if (as.logical(Ebm)) {
Ebheight=min(densvec[max(sum(seqs<Ebm),1)],densvec[sum(seqs<Ebm)+1])
graphics::lines(x=rep(Ebm,2),y=c(0,Ebheight),col=8)
}
}
leg.col=c(leg.col,8);leg.lty=c(leg.lty,1);leg.legend=c(leg.legend,"EV Models")
}
if (any(grep("e",addons,ignore.case=FALSE))) { # posterior mean
graphics::abline(v=Eball[ix,1],col=2,lwd=addons.lwd)
leg.col=c(leg.col,2);leg.lty=c(leg.lty,1); leg.legend=c(leg.legend,"Cond. EV")
}
if (any(grep("s",addons,ignore.case=FALSE))) { # posterior SD bounds
graphics::abline(v=Eball[ix,1]-2*Eball[ix,2],col=2,lty=2,lwd=addons.lwd)
graphics::abline(v=Eball[ix,1]+2*Eball[ix,2],col=2,lty=2,lwd=addons.lwd)
leg.col=c(leg.col,2);leg.lty=c(leg.lty,2);leg.legend=c(leg.legend,"2x Cond. SD")
}
if (any(grep("m",addons,ignore.case=TRUE))) { # posterior median
median_index=sum(cumsum(densvec)<sum(densvec)/2)
graphics::abline(v=(seqs[median_index]+seqs[median_index+1])/2,col=3,lwd=addons.lwd)
leg.col=c(leg.col,3);leg.lty=c(leg.lty,1);leg.legend=c(leg.legend,"Median")
}
if (any(grep("z",addons,ignore.case=TRUE))) { #zero line
graphics::abline(h=0,col="gray",lwd=addons.lwd)
}
if (any(grep("E",addons,ignore.case=FALSE))) { #post exp value of MCMC results (see estimates.bma(,exact=F)
graphics::abline(v=Eb1.mcmc[ix],col=4,lwd=addons.lwd)
leg.col=c(leg.col,4);leg.lty=c(leg.lty,1); leg.legend=c(leg.legend,"Cond. EV (MCMC)")
}
if (any(grep("S",addons,ignore.case=FALSE))) { #2 times post SD of MCMC results (see estimates.bma(,exact=F)
graphics::abline(v=Eb1.mcmc[ix]-2*Ebsd.mcmc[ix],col=4,lty=2,lwd=addons.lwd)
graphics::abline(v=Eb1.mcmc[ix]+2*Ebsd.mcmc[ix],col=4,lty=2,lwd=addons.lwd)
leg.col=c(leg.col,4);leg.lty=c(leg.lty,2); leg.legend=c(leg.legend,"2x SD (MCMC)")
}
if (any(grep("l",addons,ignore.case=TRUE))&(length(leg.col)>0)) { #legend
leg.pos="topright"; if (Eball[ix,1]>seqs[floor(n/2)]) leg.pos="topleft";
graphics::legend(x=leg.pos,lty=leg.lty,col=leg.col,legend=leg.legend,box.lwd=0,bty="n",lwd=addons.lwd)
}
if (any(grep("p",addons,ignore.case=TRUE))) {
pusr=graphics::par()$usr
graphics::rect(pusr[1],pusr[4]*(1+decr*.2), pusr[2], pusr[4]*(1+decr),xpd=TRUE,col=8)
graphics::rect(pusr[1],pusr[4]*(1+decr*.2), pips[ix]*pusr[2]+(1-pips[ix])*pusr[1], pusr[4]*(1+decr),xpd=TRUE,col=9)
graphics::mtext("PIP:",side=2, las=2,line=1, at=pusr[4]*(1+decr*.6))
graphics::par(plt=parplt)
graphics::title(main_temp)
}
return(reslist)
}
densres=list()
oldask=graphics::par()$ask
plots=0
for (vbl in 1:length(reg)) {
doplot=(if (as.logical(pips[reg[vbl]])) plot else FALSE)
plots=plots+doplot
if (plots==2) {graphics::par(ask=TRUE)}
densres[[nameix[vbl]]]=plotndens(reg[vbl],doplot)
densres[[nameix[vbl]]]$call=sys.call() #call("density.bma",bmao=bmao,reg=reg,n=300,hnbsteps=30)
}
graphics::par(ask=oldask)
if (length(densres)==1) densres=densres[[1]] else class(densres) = c("coef.density",class(densres))
if (!plot) return(densres)
if (plot&(plots==0)) {warning("No plot produced as PIPs of provided variables are zero under 'exact' estimation.")}
return(invisible(densres))
}
#' Posterior Density of the Shrinkage Factor
#'
#' Calculates the mixture marginal posterior density for the shrinkage factor
#' (g/(1+g)) from a BMA object under the hyper-g prior and plots it
#'
#' The function \code{gdensity} estimates and plots the posterior density for
#' the shrinkage factor \eqn{g/(1+g)}\cr This is evidently only possible if the
#' shrinkage factor if not fixed, i.e. if the bma object \code{x} was estimated
#' with a hyper-g prior - cf. argument \code{g} in \code{\link{bms}}\cr The
#' density is based only on the best models retained in the bma object
#' \code{x}, cf. argument \code{nmodel} in \code{\link{bms}}\cr A note on
#' argument \code{n}: The points at which the density is estimated start at
#' \eqn{max(0,E-5*SD)}, where \eqn{E} and \eqn{SD} are the expected value and
#' standard deviation of the shrinkage factor, respectively. For plotting the
#' entire domain \eqn{(0,1)} use \code{xlim=c(0,1)} as an argument for
#' \code{gdensity}.
#'
#' The argument \code{addons} specifies what additional information should be
#' added to the plot(s) via the low-level commands \code{\link{lines}} and
#' \code{\link{legend}}:\cr \code{"e"} for the posterior expected value (EV) of
#' the shrinkage factor,\cr \code{"s"} for 2 times posterior standard deviation
#' (SD) bounds,\cr \code{"m"} for the posterior median,\cr \code{"f"} for
#' posterior expected values of the individual models whom the density is
#' averaged over,\cr \code{"z"} for a zero line, \code{"l"} for including a
#' \code{\link{legend}}\cr The following two are only possible if the bma
#' object collected statistics on shrinkage, cf. argument \code{g.stats} in
#' \code{\link{bms}} \code{"E"} for posterior expected value under MCMC
#' frequencies (see argument \code{exact} in \code{\link{coef.bma}}),\cr
#' \code{"S"} for the corresponding 2 times standard deviation bounds
#' (MCMC),\cr
#'
#' Any combination of these letters will give the desired result. Use
#' \code{addons=""} for not using any of these.
#'
#' @param x A bma object (see \code{\link{bms}}).
#' @param n The integer number of equally spaced points at which the density is
#' to be estimated. see 'Details' below
#' @param addons character, defaulting to \code{"zles"}. Specifies which
#' additional information should be added to the plot via low-level commands
#' (see 'Details' below).
#' @param plot logical. If \code{TRUE} (default), the density is plotted; if
#' \code{FALSE} then \code{gdensity} only returns the estimated posterior
#' density without plotting.
#' @param addons.lwd scalar, default 1.5. Line width to be used for the
#' low-level plotting commands specified by \code{addons}. Cf. argument
#' \code{lwd} in \code{\link{par}}
#' @param \dots Additional arguments for \code{\link{plot.default}} with
#' sensible defaults
#' @return \code{gdensity} returns an object of the class \code{\link{density}}
#' detailing the posterior mixture density of the shrinkage factor.
#' @note The computed marginal posterior density is a Bayesian Model Averaging
#' mixture of the marginal posterior densities of the shrinkage factor under
#' individual models. The accuracy of the result therefore depends on the
#' number of 'best' models contained in \code{x} (cf. argument \code{nmodel} in
#' \code{\link{bms}}).
#'
#' Correspondingly, the posterior EV and SD specified by \code{addons="es"} are
#' based on 'best' model likelihoods ('exact') and are conditional on
#' inclusion.
#'
#' The low-level commands enacted by the argument \code{addons} rely on colors
#' of the \code{\link{palette}}: color 2 for \code{"e"} and \code{"s"}, color 3
#' for \code{"m"}, color 8 for \code{"f"}, color 4 for \code{"E"} and
#' \code{"S"}. The default colors may be changed by a call to
#' \code{\link{palette}}.
#'
#' @seealso \code{\link{density.bma}} for computing coefficient densities,
#' \code{\link{bms}} for creating bma objects, \code{\link{density}} for the
#' general method
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords aplot utilities
#' @examples
#'
#'
#' data(datafls)
#' mm=bms(datafls,g="hyper=UIP")
#'
#' gdensity(mm) # default plotting
#'
#' # the grey bars represent expected shrinkage factors of the individual models
#' gdensity(mm,addons="lzfes")
#'
#' # #plotting the median 'm' and the posterior mean and bounds based on MCMC results:
#' gdensity(mm,addons="zSEm",addons.lwd=2)
#'
#' # plot the posterior shrinkage density only for the very best model
#' gdensity(mm[1],addons="esz")
#'
#'
#' #using the calculated density for other purposes...
#' dd=gdensity(mm,plot=FALSE)
#' plot(dd)
#'
#' @export
gdensity <- function(x,n=512,plot=TRUE,addons="zles",addons.lwd=1.5,...) { #main="Posterior Density of the Shrinkage Factor",type="l",xlab="Shrinkage factor",ylab="Density",col="steelblue4"
# plots posterior density of shrinkage factor for hyper-g bma objects
# INPUTS:
# x: bma object
# n: number of equally spaced points where density is computed
# plot: whether to do a plot in addition or not
# addons: low-level plot commands: draw "z"=zero line, "l"=legend, "e"=exact exp. val, "s"=2x St dev bounds (exact), "E"=MCMC exp. val., "S"=MCMC SD bounds, "g"=shrinkage Exp vals for individual models
# addons.lwd: linwe width for addons stuff
# ... commands pased on to plot.default
dsgivenykernel <- function(kpazvec,sf,N) {
#the post. density of the shrinkge factor f(s|Y)*F((N-1)/2,1,(k+a)/2,R2)
#kpazvec is a vector with two elements: first element is k+a, second is z (the R-squared)
#sf: a vector of shrinkage values
#N: sample size
(kpazvec[[1]]-2)/2*(1-sf)^((kpazvec[[1]]-4)/2)*(1-sf*kpazvec[[2]])^(-(N-1)/2)
#mode is at sf=(N-k-a+3)/(N-1-z*(k+a-4))
}
#user checks
if (!is.bma(x)) stop("argument needs to an object of class 'bma'")
if (!(x$gprior$gtype=="hyper")) stop("g prior density makes only sense for hyper-g prior.")
if (n<2) stop("n needs to be at least 2")
n=floor(n); #if (!(n%%2)) {n=n+1}
dotargs = match.call(expand.dots=FALSE)$...
#extract info
N=x$info$N; K=x$info$K
tm=x$topmod
bools=tm$bool_binary()
betas=tm$betas()
betas2=tm$betas2()
smoments = tm$fixed_vector()
#re-compute the R-squareds and other stuff for the topmodels
yXdata=as.matrix(x$arguments$X.data); yXdata=yXdata-matrix(colMeans(yXdata),N,K+1,byrow=TRUE)
yty=c(crossprod(yXdata[,1]))
positions=lapply(lapply(as.list(as.data.frame(bools)),as.logical),which) #vector of who is in where
ymyvec=unlist(lapply(lapply(positions,.ols.terms2,yty=yty,N=N,K=K,XtX.big=crossprod(yXdata[,-1]),Xty.big=c(crossprod(yXdata[,-1],yXdata[,1]))),function (x) x$full.results()$ymy)) # vector of SSResid
kvec=tm$kvec_raw() #vector of parameter number
zvec=1-ymyvec/yty #vector of r.-squared
pmpexact=pmp.bma(x,oldstyle=TRUE)[,1] #vector of 'exact' posterior model probs
f21a=x$gprior.info$hyper.parameter #hyper parameter
if (length(smoments)==0) { #if not there, re-compute individual moments for models
#lprob=.lprob.hyperg.init(N=N,K=K,yty=yty,f21a=f21a, return.gmoments=TRUE)
lprob = x$gprior.info$lprobcalc
smoments = sapply(lapply(as.list(as.data.frame(rbind(kvec,ymyvec))),function(x) lprob$lprob.all(ymy=x[2],k=x[1],bhat=numeric(x[1]),diag.inverse=rep(1,x[1]))),"[[","otherstats")
}
Es=c(crossprod(smoments[1,],pmpexact)) #exp val
Es2=c(crossprod(smoments[2,],pmpexact))
Esd = sqrt(Es2-Es^2) #st dev
nbsteps=n
cutoff=max(0,Es-5*Esd) #this is to concentrate on where the mass is
sdiff=(1-cutoff)/(nbsteps+1)
s.seq=seq(sdiff+cutoff,cutoff+nbsteps*sdiff,sdiff)
sdensl=lapply(as.list(as.data.frame(rbind(kvec+f21a,zvec))),dsgivenykernel,sf=s.seq,N=N)
intconsts=lapply(lapply(sdensl,sum),"*",sdiff) # a crude numerical integration to save time
sdensvecs=mapply("/",sdensl,intconsts) #normalize by integration constants
sdens=sdensvecs%*%pmpexact #mixture density
reslist = list(x=s.seq,y=sdens,bw=NULL,n=n,call=sys.call(),data.name="Shrinkage",has.na=FALSE);
class(reslist)="density"
if (!plot) {return(reslist)}
##### PLOTTING #########################
#Main plot
dotargs=.adjustdots(dotargs,ylab="Density", xlab = "Shrinkage factor", main = "Posterior Density of the Shrinkage Factor", type="l", col="steelblue4")
eval(as.call(c(list(as.name("plot"),as.name("s.seq"),as.name("sdens")),as.list(dotargs))))
#preparing stuff for addons
leg.col=numeric(0);leg.lty=numeric(0); leg.legend=character(0)
if (any(grep("f",addons,ignore.case=TRUE))) { # post exp values of the individual models
for (m in 1:length(pmpexact)) {
Esm=smoments[1,m] ;
if (as.logical(Esm)) {
ixlower=max(sum(s.seq<Esm),1)
Esheight=(sdens[ixlower+1]-sdens[ixlower])*(Esm-s.seq[ixlower])+sdens[ixlower]
graphics::lines(x=rep(Esm,2),y=c(0,Esheight),col=8,lwd=addons.lwd)
}
}
leg.col=c(leg.col,8);leg.lty=c(leg.lty,1);leg.legend=c(leg.legend,"EV Models")
}
if (any(grep("e",addons,ignore.case=FALSE))) { # posterior mean
graphics::abline(v=Es,col=2,lwd=addons.lwd)
leg.col=c(leg.col,2);leg.lty=c(leg.lty,1); leg.legend=c(leg.legend,"EV")
}
if (any(grep("s",addons,ignore.case=FALSE))) { # posterior SD bounds
if (!(Es-2*Esd)<0) graphics::abline(v=Es-2*Esd,col=2,lty=2,lwd=addons.lwd)
if (!(Es+2*Esd)>1) graphics::abline(v=Es+2*Esd,col=2,lty=2,lwd=addons.lwd)
leg.col=c(leg.col,2);leg.lty=c(leg.lty,2);leg.legend=c(leg.legend,"2x SD")
}
if (any(grep("m",addons,ignore.case=TRUE))) { # posterior median
median_index=sum(cumsum(sdens)<sum(sdens)/2)
graphics::abline(v=(s.seq[median_index]+s.seq[median_index+1])/2,col=3,lwd=addons.lwd)
leg.col=c(leg.col,3);leg.lty=c(leg.lty,1);leg.legend=c(leg.legend,"Median")
}
if (any(grep("z",addons,ignore.case=TRUE))) { #zero line
graphics::abline(h=0,col="gray",lwd=addons.lwd)
}
if (any(grep("E",addons,ignore.case=FALSE))) { #post exp value of MCMC results (see estimates.bma(,exact=F)
if (all(x$gprior.info$shrinkage.moments==0)) warning("bma object needs to contain posterior g statistics - cf. argument 'g.stats' in 'help(bms)'") else {
graphics::abline(v=x$gprior.info$shrinkage.moments[1],col=4,lwd=addons.lwd)
leg.col=c(leg.col,4);leg.lty=c(leg.lty,1); leg.legend=c(leg.legend,"EV (MCMC)")
}
}
if (any(grep("S",addons,ignore.case=FALSE))) { #2 times post SD of MCMC results (see estimates.bma(,exact=F)
if (!all(x$gprior.info$shrinkage.moments==0)) {
ES=x$gprior.info$shrinkage.moments[1]; SDs=sqrt(x$gprior.info$shrinkage.moments[2]-x$gprior.info$shrinkage.moments[1]^2)
if (ES-2*SDs>0) graphics::abline(v=ES-2*SDs,col=4,lty=2,lwd=addons.lwd)
if (ES+2*SDs<1) graphics::abline(v=ES+2*SDs,col=4,lty=2,lwd=addons.lwd)
leg.col=c(leg.col,4);leg.lty=c(leg.lty,2); leg.legend=c(leg.legend,"2x SD (MCMC)")
}
}
if (any(grep("l",addons,ignore.case=TRUE))&(length(leg.col)>0)) { #legend
leg.pos="topleft";
graphics::legend(x=leg.pos,lty=leg.lty,col=leg.col,legend=leg.legend,box.lwd=0,bty="n")
}
return(invisible(reslist))
}
#' @rdname quantile.pred.density
#' @export
quantile.density = function(x, probs=seq(.25,.75,.25), names=TRUE, normalize=TRUE, ...) {
# a generic function for objects of class density or lists whose elements are densities
# the actual subfunction for object of class "density"
my.quantile.density = function(x, probs, names, normalize, ...) {
ycs=(cumsum(x$y)-(x$y-x$y[[1]])/2)*diff(x$x[1:2])
if (normalize) ycs=ycs/(ycs[[length(ycs)]])
xin=x$x; maxi=length(ycs)
qqs=sapply(as.list(probs), function(qu) {iii=sum(ycs<=qu); if (iii==maxi) return(Inf) else if (iii==0L) return(-Inf) else { return(
xin[[iii+1]] + ( (ycs[[iii+1]]-qu)/(ycs[[iii+1]]-ycs[[iii]]) ) *(xin[[iii]]-xin[[iii+1]]) )
}})
if (as.logical(names)) names(qqs)= paste(format(100 * probs, trim = TRUE, digits = max(2L, getOption("digits"))), "%", sep = "")#paste(signif(probs,5),"%",sep="")
return(qqs)
}
# user checks
probs=as.vector(probs)
if (is.element("density",class(x))) return(my.quantile.density(x=x, probs=probs, names=names, normalize=normalize))
if (!all(sapply(x,function(dd) is.element("density",class(dd))))) stop("x needs to be a density or list of densities")
if (length(x)==1L) return(my.quantile.density(x=x[[1]], probs=probs, names=names, normalize=normalize))
#combining a list of denisties
qout=sapply(x, my.quantile.density, probs=probs, names=FALSE, normalize=normalize)
#formatting the output into a matrix
if (!is.matrix(qout)) { #some formatting
if (length(probs)>1) return(qout)
qout=as.matrix(qout)
} else qout=t(qout)
if (as.logical(names)) colnames(qout)= paste(format(100 * probs, trim = TRUE, digits = max(2L, getOption("digits"))), "%", sep = "")
return(qout)
}
.quantile.density=quantile.density
#' @rdname quantile.pred.density
#' @export
quantile.coef.density = function(x, probs=seq(.25,.75,.25), names=TRUE, ...) {
#customizing quantile.density to stuff resulting from density.bma
quout= .quantile.density(x, probs=probs, names=names, normalize=TRUE)
if (is.matrix(quout)&&as.logical(names)) rownames(quout) <- sapply(x, function(lx) lx[["data.name"]])
return(quout)
}
#' Extract Quantiles from 'density' Objects
#'
#' Quantiles for objects of class "density", "pred.density" or "coef.density"
#'
#' The methods \code{quantile.coef.density} and \code{quantile.pred.density}
#' both apply \code{quantile.density} to densities nested with object of class
#' \code{coef.density} or \code{pred.density}.\cr The function
#' \code{quantile.density} applies generically to the built-in class
#' \code{\link{density}} (as least for versions where there is no such method
#' in the pre-configured packages).\cr Note that \code{quantile.density} relies
#' on trapezoidal integration in order to compute the cumulative densities
#' necessary for the calculation of quantiles.
#'
#' @aliases quantile.pred.density quantile.coef.density quantile.density
#' @param x a object of class \code{\link{pred.density}}, \code{coef.density},
#' \code{\link{density}}, or a list of densities.
#' @param probs numeric vector of probabilities with values in [0,1] - elements
#' very close to the boundaries return \code{Inf} or \code{-Inf}
#' @param names logical; if \code{TRUE}, the result has a \code{names}
#' attribute, resp. a \code{rownames} and \code{colnames} attributes. Set to
#' \code{FALSE} for speedup with many probs.
#' @param normalize logical; if \code{TRUE} then the values in \code{x$y} are
#' multiplied with a factor such that their integral is equal to one.
#' @param \dots further arguments passed to or from other methods.
#' @return If \code{x} is of class \code{density} (or a list with exactly one
#' element), a vector with quantiles.\cr If \code{x} is a \code{\link{list}} of
#' densities with more than one element (e.g. as resulting from
#' \code{pred.density} or \code{coef.density}), then the output is a matrix of
#' quantiles, with each matrix row corresponding to the respective density.
#' @author Stefan Zeugner
#' @seealso \code{\link{quantile.default}} for a comparable function,
#' \code{\link{pred.density}} and \code{\link{density.bma}} for the
#' BMA-specific objects.
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords utilities
#' @examples
#'
#' data(datafls)
#' mm = bms(datafls[1:70,], user.int=FALSE)
#'
#' #predict last two observations with preceding 70 obs:
#' pmm = pred.density(mm, newdata=datafls[71:72,], plot=FALSE)
#' #'standard error' quantiles
#' quantile(pmm, c(.05, .95))
#'
#' #Posterior density for Coefficient of "GDP60"
#' cmm = density(mm, reg="GDP60", plot=FALSE)
#' quantile(cmm, probs=c(.05, .95))
#'
#'
#' #application to generic density:
#' dd1 = density(rnorm(1000))
#' quantile(dd1)
#'
#'\dontrun{
#' #application to list of densities:
#' quantile.density( list(density(rnorm(1000)), density(rnorm(1000))) )
#' }
#'
#' @export
quantile.pred.density = function(x, probs=seq(.25,.75,.25), names=TRUE, ...) {
#customizing quantile.density to stuff resulting from pred.density
quout= .quantile.density(x$densities(), probs=probs, names=names, normalize=FALSE)
if (is.matrix(quout)&&as.logical(names)) rownames(quout) <- names(x$fit)
return(quout)
}
#' Plot Convergence of BMA Sampler
#'
#' Plots the posterior model probabilites based on 1) marginal likelihoods and
#' 2) MCMC frequencies for the best models in a 'bma' object and details the
#' sampler's convergence by their correlation
#'
#' A call to bms with a MCMC sampler (e.g.
#' \code{bms(datafls,mcmc="bd",nmodel=100)} uses a Metropolis-Hastings
#' algorithm to sample through the model space: the frequency of how often
#' models are drawn converges to the distribution of their posterior marginal
#' likelihoods.\cr While sampling, each 'bma' object stores the best models
#' encountered by its sampling chain with their marginal likelihood and their
#' MCMC frequencies.\cr \code{plotConv} compares the MCMC frequencies to
#' marginal likelihoods, and thus visualizes how well the sampler has
#' converged.
#'
#' @param bmao an object of class 'bma' - see \code{\link{bms}}
#' @param include.legend whether to include a \code{\link{legend}} in the plot
#' @param add.grid whether to include a \code{\link{grid}} in the plot
#' @param \dots other parameters for \code{\link{matplot}}
#' @note \code{plotConv} is also used by \code{\link{plot.bma}}
#'
#' @seealso \code{\link{pmp.bma}} for posterior model probabilites based on the
#' two concepts, \code{\link{bms}} for creating objects of class 'bma'
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords aplot
#' @examples
#'
#'
#' data(datafls)
#' mm=bms(datafls[,1:12],user.int=FALSE)
#'
#' plotConv(mm)
#'
#' #is similar to
#' matplot(pmp.bma(mm),type="l")
#'
#' @export
plotConv<-function(bmao,include.legend=TRUE,add.grid=TRUE,...){
#function that compares MCMC and exact PMP's for the models in bmao$topmod
# bmao: bma object
# include.legend: TRUE or FALSE
# main: if NULL then default is "Posterior Model Probabilites (Corr: x)", where x is the correlation betw. exact PMPs and MCMC freqs
# other parameters: cf. help(plot.default)
if (!is.bma(bmao)) stop("submit an object of class bma")
# now get PMP analytical and MCMC
mat=pmp.bma(bmao,oldstyle=TRUE)
norm_const = sum(mat[,1])/sum(mat[,2])
mat=cbind(mat[,2]*norm_const,mat[,1])
if (length(bmao$topmod$lik())==0L) {
stop("plotConv needs at least one model stored in topmod in order to produce a plot")
}
cor.pmp=format(round(.cor.topmod(bmao$topmod),4),nsmall=4)
dotargs = match.call(graphics::plot,expand.dots=FALSE)$...
dotargs=.adjustdots(dotargs, lwd=2,main=paste("Posterior Model Probabilities\n(Corr: ",cor.pmp,")",sep=""),lty=1,col=c("steelblue3","tomato"),cex.main=0.8,xlab="Index of Models",ylab="",type="l")
eval(as.call(c(list(as.name("matplot"),as.name("mat")),as.list(dotargs))))
# if (is.null(main)) main=paste("Posterior Model Probabilities\n(Corr: ",cor.pmp,")",sep="")
# matplot(mat,type="l",lty=lty,col=col,lwd=lwd,main=main,xlab=xlab,cex.main=cex.main,ylab=ylab,...)
if (as.logical(add.grid)) graphics::grid()
if (as.logical(include.legend)) graphics::legend("topright",lty=eval(dotargs$lty),legend=c("PMP (MCMC)", "PMP (Exact)"),col=eval(dotargs$col),ncol=2,bty="n",cex=1,lwd=eval(dotargs$lwd));
}
#' Compare Two or More bma Objects
#'
#' Plots a comparison of posterior inclusion probabilites, coefficients or
#' their standard deviation between various bma objects
#'
#'
#' @param \dots one or more objects of class 'bma' to be compared.
#' \code{plotComp} passes on any other parameters in \code{\dots{}} to
#' \code{\link{matplot}}.
#' @param varNr optionally, covariate indices to be included in the plot, can
#' be either integer vector or character vector - see examples
#' @param comp a character denoting what should be compared: \code{comp="PIP"}
#' (default) for posterior inclusion probabilities, \code{comp="Post Mean"} for
#' coefficients, \code{comp="Post SD"} for their standard deviations,
#' \code{comp="Std Mean"} or standardized coefficients, or \code{comp="Std SD"}
#' for standardized standard deviations
#' @param exact if \code{FALSE}, the statistics to be compared are based on
#' aggregate bma statistics, if \code{TRUE}, they are based solely on the best
#' models retained in the bma objects
#' @param include.legend whether to include a default legend in the plot
#' (custom legends can be added with the command \code{\link{legend}})
#' @param add.grid whether to add a \code{\link{grid}} to the plot
#' @param do.par whether to adjust \code{par("mar")} in order to fit in the
#' tick labels on the x-axis
#' @param cex.xaxis font size scaling parameter for the x-axis - cf. argument
#' \code{cex.axis} in \code{\link{par}}
#'
#' @seealso \code{\link{coef.bma}} for the underlying function
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords hplot
#' @examples
#'
#' ## sample two simple bma objects
#' data(datafls)
#' mm1=bms(datafls[,1:15])
#' mm2=bms(datafls[,1:15])
#'
#' #compare PIPs
#' plotComp(mm1,mm2)
#'
#' #compare standardized coefficeitns
#' plotComp(mm1,mm2,comp="Std Mean")
#'
#' #...based on the lieklihoods of best models
#' plotComp(mm1,mm2,comp="Std Mean",exact=TRUE)
#'
#' #plot only PIPs for first four covariates
#' plotComp(mm1,mm2,varNr=1:4, col=c("black","red"))
#'
#' #plot only coefficients for covariates 'GDP60 ' and 'LifeExp'
#' plotComp(mm1,mm2,varNr=c("GDP60", "LifeExp"),comp="Post Mean")
#'
#'
#'
#' @export
plotComp <-function(...,varNr=NULL,comp="PIP",exact=FALSE,include.legend=TRUE,add.grid=TRUE,do.par=TRUE,cex.xaxis=0.8) { #,main=NULL,type="p",lty=1:5, lwd=1.5, pch=NULL,col=NULL,cex=NULL,bg=NA,xlab="",ylab=NULL){
# this plot compares results from different bma specifications, for example different W matrices
# for the SAR bma or if a new computer routine was coded we want to compare results of it with that
# of former computer routines
# in case you plug in results for different sets of regressors (e.g. once you use interaction sampling with
# some interacted variables and compare that to results without interaction sampling) the plot looks only
# at the coefficients / SD's for the set of variables that is contained in both specifications; so you can
# answer questions like, if I include this set of variables, how do they impact on remaining coefficients if
# the set is not included.
# you can specify bmaList (list of bma results), varNr is the number of variables you want to see in the plot,
# comp is one of "PIP", "Post Mean", "Post SD" or "Std Mean" or "Std SD" (standardized coefficients),
# bmaNames are the names the routines get assigned in the legend (e.g. "new Model", "old Model" or "inv Dist.", "inv Dist Squared"),
# main is the title of the graph and cexM is the size of the labeling of the regressor names
col_default =c("#1B9E77", "#D95F02", "#7570B3", "#E7298A", "#66A61E" ,"#E6AB02", "#A6761D", "#666666")
#bmaList = list(...)
bmaList=list(...); bmaix=sapply(bmaList,is.bma); bmaList=bmaList[bmaix]
#user check:
bmaNr=length(bmaList)
if(!all(sapply(bmaList,is.bma))){
stop("Submit only bma objects to compare results (no other objects)")
}
dotargs = match.call(expand.dots=FALSE)$...;
dotargs=.adjustdots(dotargs,ylab=paste(comp),pch=1:bmaNr,col=col_default,type="p",lty=1:5,lwd=1.5,xlab="",xaxt="n")
dotargs=dotargs[!c(bmaix,logical(length(dotargs)-length(bmaix)))]
# # take care of name order and look whether we compare results for same set of variables!
xMat=lapply(bmaList,function(x) rownames(estimates.bma(x,exact=exact)))
xNames=xMat[[1]]
ind=as.numeric(unlist(lapply(xMat,function(x) length(x))))
# in case we do not have the same set of vars in each submitted specification
if(length(unique(ind)>1)){
smallestSet=which.min(ind)
indMat=array(0:0,dim=c(length(xMat[[smallestSet]]),length(ind)))
for(i in 1:length(ind)){
indMat[,i]=as.numeric(xMat[[smallestSet]] %in% xMat[[i]])
}
xNamesInd=which(rowSums(indMat)==bmaNr)
xNames=xMat[[smallestSet]][xNamesInd]
}
compNames=c(colnames(estimates.bma(bmaList[[1]])),"Std Mean", "Std Coef")
if(is.null(xNames)){
stop("the bma objects have to have (the same) rownames attached to them")
}
if(!(comp %in% compNames)){
stop("Please specify comp as one of PIP, Post Mean, Post SD, Std Mean, or Std Coef")
}
if(comp=="Std Mean"){
compMatrix=sapply(bmaList,function(x) estimates.bma(x,std.coefs=TRUE,exact=exact)[xNames,"Post Mean"])
comp="Standardized Coefficients"
} else if (comp=="Std SD") {
compMatrix=sapply(bmaList,function(x) estimates.bma(x,std.coefs=TRUE,exact=exact)[xNames,"Post SD"])
comp="Standardized SD"
} else{
compMatrix=sapply(bmaList,function(x) estimates.bma(x,exact=exact)[xNames,comp])
}
bmaNames=names(list(...))[bmaix]
colnames(compMatrix)=paste("Model", 1:bmaNr)
if(!is.null(bmaNames) && (length(bmaNames)==ncol(compMatrix))) {
for (bix in 1:bmaNr) {
colnames(compMatrix)[[bix]] <- ifelse(bmaNames[[bix]]=="",paste("Model", bix), bmaNames[[bix]])
}
}
# in case you do not want to plot the whole stuff but only the first varNr regressors
if(!is.null(varNr)){
compMatrix=compMatrix[varNr,,drop=FALSE]
}
# do the plot ####################
if (as.logical(do.par)) {
oldmar=graphics::par()$mar
spaceforxaxis=graphics::strwidth(rownames(compMatrix)[which.max(nchar(rownames(compMatrix)))],units="inches", cex=cex.xaxis)*(graphics::par("mar")/graphics::par("mai"))[[2]]
tempmar=oldmar; tempmar[1]=min(max(oldmar[1],spaceforxaxis+oldmar[1]/3), .5*graphics::par("fin")[[2]]*(graphics::par("mar")/graphics::par("mai"))[[1]])
graphics::par(mar=tempmar)
}
eval(as.call(c(list(as.name("matplot"),as.name("compMatrix")),as.list(dotargs))))
#matplot(compMatrix,main=main,type=type,col=col,cex=cex,bg=bg,ylab=ylab,xlab=xlab,xaxt="n",pch=pch,lty=lty,lwd=lwd)
if (as.logical(include.legend)) {
extractfromdotargs=function(...) { dal=list(...); return(list(col=dal$col,pch=dal$pch)) }
myargs=eval(as.call(c(list(as.name("extractfromdotargs")),as.list(dotargs))))
graphics::legend("topright", colnames(compMatrix),pch=myargs$pch,col=myargs$col,bty="n")
}
if (as.logical(add.grid)) graphics::grid()
graphics::axis(1, las=2, at = 1:nrow(compMatrix), labels = rownames(compMatrix),cex.axis=cex.xaxis)
#graphics::layout(matrix(1))
if (as.logical(do.par)) graphics::par(mar=oldmar)
}
#' Plot Posterior Model Size and Model Probabilities
#'
#' Produces a combined plot: upper row shows prior and posterior model size
#' distribution, lower row shows posterior model probabilities for the best
#' models
#'
#'
#' @param x an object of class 'bma'
#' @param \dots additional arguments for \code{\link{matplot}}
#' @return combines the plotting functions \code{\link{plotModelsize}} and
#' \code{\link{plotConv}}
#' @note The upper plot shows the prior and posterior distribution of model
#' sizes (\code{\link{plotModelsize}}).\cr The lower plot is an indicator of
#' how well the bma object has converged (\code{\link{plotConv}}).
#' and Paul
#' @seealso \code{\link{plotModelsize}} and \code{\link{plotConv}}
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords hplot
#' @examples
#'
#' data(datafls)
#' mm=bms(datafls,user.int=FALSE)
#'
#' plot(mm)
#' @export
plot.bma <-function(x,...) {
# does a combined plot of plotConv and plotModelsize for bma object bmao
# bmao: bma object
# ... passes on parameters to these two functions
if (!is.bma(x)) stop("Need to provide object of class 'bma'!")
if (x$arguments$nmodel<3) {
try(plotModelsize(x,...),silent=TRUE)
} else {
graphics::layout(matrix(1:2,2,1))
try(plotModelsize(x,...),silent=TRUE)
try(plotConv(x,...),silent=TRUE)
graphics::layout(1)
}
}
#' Plot Signs of Best Models
#'
#' Plots a grid with signs and inclusion of coefficients vs. posterior model
#' probabilities for the best models in a 'bma' object:
#'
#' Under default settings, blue corresponds to positive sign, red to a negative
#' sign, white to non-inclusion.
#'
#' @param x a list of class bma (cf. \code{\link{bms}} for further details)
#' @param yprop2pip if \code{yprop2pip=TRUE} then the grid lines on the
#' vertical axis are scaled according to the coefficients' inclusion
#' probabilites.\cr If \code{yprop2pip=FALSE} (default) then the grid lines on
#' the vertical axis are equidistant.
#' @param order.by.pip with \code{order.by.pip=TRUE} (default), coefficients
#' are sorted according to their posterior inclusion probabilites along the
#' vertical axis. If \code{order.by.pip=FALSE} they are ordered as they were
#' provided to \code{\link{bms}}.
#' @param do.par Defaults to \code{do.par=TRUE}, which adjusts
#' \code{\link{par}()$mar} for optimal positioning. Set \code{do.par=FALSE} for
#' customizing \code{par} yourself.
#' @param do.grid \code{do.grid=TRUE} (default) plots grid lines among the
#' chart's boxes, akin to the low level command \code{\link{grid}}.
#' \code{do.grid=FALSE} omits the grid lines.
#' @param do.axis \code{do.axis=TRUE} (default) plots axis tick marks and
#' labels (cf. \code{\link{axis}}). \code{do.axis=FALSE} omits them.
#' @param cex.axis font size for the axes (cf. \code{\link{axis}}), defaults to
#' 1
#' @param \dots Parameters to be passed on to \code{\link{image.default}}.
#'
#' @seealso \link{coef.bma} for the coefficients in matrix form, \link{bms} for
#' creating 'bma' objects.
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords hplot
#' @examples
#'
#' data(datafls)
#'
#' model=bms(datafls,nmodel=200)
#'
#' #plot all models
#' image(model,order.by.pip=FALSE)
#' image(model,order.by.pip=TRUE,cex.axis=.8)
#'
#' #plot best 7 models, with other colors
#' image(model[1:7],yprop2pip=TRUE,col=c("black","lightgrey"))
#'
#' @export
image.bma <- function(x,yprop2pip=FALSE,order.by.pip=TRUE,do.par=TRUE,do.grid=TRUE,do.axis=TRUE,cex.axis=1,...) { #,main=NULL,col=c("tomato","blue"),xlab="Cumulative Model Probabilities",ylab=""
#does a 'grid plot' of bmao's best models (contained in bmao$topmod)
#putting the variables' (on the y-axis) coefficent signs per model vs. the post. model probs (x-axis)
# bmao: bma object
# yprop2pip: if TRUE, then horizontal grid lines (on the y axis) are proportional to the variables PIPs; if FALSE, they are equidistanrt
# do.par: if TRUE adjust par temporarily to fit labels properly; if FALSE, please take care of the par()$mar parameter yourself
# do.grid: if TRUE, highlights the boundaries between coefficient areas via the grid() function; if FALSE, it does not
# do.axis: if TRUE, plots default axis tickmarks and labels; if FALSE, it does not (consider using axis() afterwards)
# cex.axis: denotes label font size (cf. ?axis )
# ... parameters passed on to image.default (Note: default colors are col=c(4,2), default axis labels are empty strings)
dotargs = match.call(expand.dots=FALSE)$...
ests=estimates.bma(x,exact=TRUE,order.by.pip=order.by.pip,include.constant=FALSE)
ests=ests[nrow(ests):1,]
pips=ests[,"PIP"]
idx=ests[,"Idx"]
pmp.res=pmp.bma(x,oldstyle=TRUE)
pmps=pmp.res[,1]
normali_factor=sum(pmp.res[,2])
betasigns=beta.draws.bma(x)[idx,,drop=FALSE]
betasigns=betasigns[as.logical(pips),]
betasigns=sign(betasigns)/2+.5
betasigns[betasigns==.5]=NA
pips=pips[as.logical(pips)]
if (yprop2pip) {
pipbounds=(c(0,cumsum(pips)))
} else {
pipbounds=0:length(pips)
names(pipbounds)=c("",names(pips))
}
pmpbounds=(c(0,cumsum(pmps)))
if (do.par) {
oldmar=graphics::par()$mar
spaceforyaxis=graphics::strwidth(names(pipbounds)[which.max(nchar(names(pipbounds)))],units="inches")*(graphics::par("mar")/graphics::par("mai"))[[2]]
tempmar=oldmar; tempmar[2]=min(spaceforyaxis+oldmar[2]/2, .5*graphics::par("fin")[[1]]*(graphics::par("mar")/graphics::par("mai"))[[2]])
graphics::par(mar=tempmar)
}
dotargs=.adjustdots(dotargs, ylab="", xlab="Cumulative Model Probabilities", col=c("tomato", "blue"), main = paste("Model Inclusion Based on Best ",length(pmps), " Models"))
dotargs$axes <- FALSE
# if (!is.element("ylab",names(dotargs))) { dotargs$ylab <- "" }
# if (!is.element("xlab",names(dotargs))) { dotargs$xlab <- "Cumulative Model Probabilities" }
# if (!is.element("col",names(dotargs))) { dotargs$col <- c("tomato", "blue") }
# if (!is.element("main",names(dotargs))) { dotargs$main <- paste("Model Inclusion Based on Best ",length(pmps), " Models") }
tbetasigns=t(betasigns)
eval(as.call(c(list(as.name("image.default"),as.name("pmpbounds"),as.name("pipbounds"),as.name("tbetasigns")),as.list(dotargs))))
#if (is.null(main)) { main=paste("Model Inclusion Based on Best ",length(pmps), " Models") }
#image.default(pmpbounds,pipbounds,t(betasigns),col=col,axes=FALSE,xlab=xlab,ylab=ylab,main=main,...)
if (do.axis) {
graphics::axis(1,at=pmpbounds, labels=round(normali_factor*pmpbounds,2),cex.axis=cex.axis)
graphics::axis(2,at=pipbounds,labels=FALSE,line=FALSE)
graphics::axis(2,at=pipbounds[-1]-diff(pipbounds)/2,labels=names(pipbounds[-1]),tick=FALSE,las=1,cex.axis=cex.axis)
}
if (do.grid) {
graphics::abline(v=round(pmpbounds,2),lty="dotted",col="grey")
graphics::abline(h=round(pipbounds,2),lty="dotted",col="grey")
}
if (do.par) {graphics::par(mar=oldmar)}
}
##########################
# LINEAR MODELS (NO BMA) #
###########################################################################
#' Bayesian Linear Model with Zellner's g
#'
#' Used to fit the Bayesian normal-conjugate linear model with Zellner's g
#' prior and mean zero coefficient priors. Provides an object similar to the
#' \code{\link{lm}} class.
#'
#' \code{zlm} estimates the coefficients of the following model \eqn{y = \alpha
#' + X \beta + \epsilon} where \eqn{\epsilon} ~ \eqn{N(0,\sigma^2)} and \eqn{X}
#' is the design matrix\cr The priors on the intercept \eqn{\alpha} and the
#' variance \eqn{\sigma} are improper: \eqn{alpha \propto 1}, \eqn{sigma
#' \propto \sigma^{-1}} \cr Zellner's g affects the prior on coefficients:
#' \eqn{beta} ~ \eqn{N(0, \sigma^2 g (X'X)^{-1})}. \cr Note that the prior mean
#' of coefficients is set to zero by default and cannot be adjusted. Note
#' moreover that \code{zlm} always includes an intercept.
#'
#' @param formula an object of class "formula" (or one that can be coerced to
#' that class), such as a data.frame - cf. \code{\link{lm}}
#' @param data an optional \code{\link{data.frame}} (or one that can be coerced
#' to that class): cf. \code{\link{lm}}
#' @param subset an optional vector specifying a subset of observations to be
#' used in the fitting process.
#' @param g specifies the hyperparameter on Zellner's g-prior for the
#' regression coefficients.\cr \code{g="UIP"} corresponds to \eqn{g=N}, the
#' number of observations (default); \code{g="BRIC"} corresponds to the
#' benchmark prior suggested by Fernandez, Ley and Steel (2001), i.e
#' \eqn{g=max(N, K^2)}, where K is the total number of covariates;\cr
#' \code{g="EBL"} estimates a local empirical Bayes g-parameter (as in Liang et
#' al. (2008));\cr \code{g="hyper"} takes the 'hyper-g' prior distribution (as
#' in Liang et al., 2008) with the default hyper-parameter \eqn{a=3}; This
#' hyperparameter can be adjusted (between \eqn{2<a<=4}) by setting
#' \code{g="hyper=2.9"}, for instance.\cr Alternatively, \code{g="hyper=UIP"}
#' sets the prior expected value of the shrinkage factor equal to that of UIP
#' (above), \code{g="hyper=BRIC"} sets it according to BRIC
#' @return Returns a list of class \code{zlm} that contains at least the
#' following elements (cf. \code{\link{lm}}):
#'
#' \item{coefficients}{a named vector of posterior coefficient expected values}
#' \item{residuals}{the residuals, that is response minus fitted values}
#' \item{fitted.values}{the fitted mean values} \item{rank}{the numeric rank of
#' the fitted linear model} \item{df.residual}{the residual degrees of freedom}
#' \item{call}{the matched call} \item{terms}{the \code{\link{terms}} object
#' used} \item{model}{the model frame used} \item{coef2moments}{a named vector
#' of coefficient posterior second moments} \item{marg.lik}{the log marginal
#' likelihood of the model} \item{gprior.info}{a list detailing information on
#' the g-prior, cf. output value \code{gprior.info} in \code{\link{bms}}}
#' @author Stefan Zeugner
#' @seealso The methods \code{\link{summary.zlm}} and \code{\link{predict.lm}}
#' provide additional insights into \code{zlm} output.\cr The function
#' \code{\link{as.zlm}} extracts a single out model of a \code{bma} object (as
#' e.g. created through\code{\link{bms}}).\cr Moreover, \code{\link{lm}} for
#' the standard OLS object, \code{\link{bms}} for the application of \code{zlm}
#' in Bayesian model averaging.
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @references The representation follows Fernandez, C. E. Ley and M. Steel
#' (2001): Benchmark priors for Bayesian model averaging. Journal of
#' Econometrics 100(2), 381--427
#'
#' See also \url{http://bms.zeugner.eu} for additional help.
#' @keywords models
#' @examples
#'
#'
#' data(datafls)
#'
#' #simple example
#' foo = zlm(datafls)
#' summary(foo)
#'
#' #example with formula and subset
#' foo2 = zlm(y~GDP60+LifeExp, data=datafls, subset=2:70) #basic model, omitting three countries
#' summary(foo2)
#'
#'
#' @export
zlm <- function(formula, data=NULL, subset=NULL, g="UIP") {
#does a normal-gamma linear Bayesian model with Zellner's g prior
#INPUTS:
# formula, data, subset: cf help(lm)
# g: a g prior character (cf help(bms)), or a gprior object as from .choose.gprior
#OUTPUT:
# an object class c("zlm","lm") that is comparable to the lm object
thiscall=match.call()
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- as.name("model.frame")
if (is.matrix(formula)) {
mf <- model.frame(as.data.frame(formula,drop.unused.levels=TRUE))
} else {
mf <- eval(mf, parent.frame())
}
yXdata=as.matrix(mf);
N=nrow(yXdata); K=ncol(yXdata)-1
dmdata=yXdata-matrix(colMeans(yXdata),N,K+1,byrow=TRUE)
yty=c(crossprod(dmdata[,1]));
olsres=.ols.terms2(positions=rep(TRUE,K),yty=yty,N=N,K=K,XtX.big=crossprod(dmdata[,-1,drop=FALSE]),Xty.big=c(crossprod(dmdata[,-1,drop=FALSE],dmdata[,1])))$full.results()
#get gprior
if (is.list(g)) {
if (any(is.element(names(g),"gtype"))) gprior.info=g else stop("Please provide a proper g-prior. see help(zlm)")
}
gprior.info=.choose.gprior(g=g,N=N,K=K,return.g.stats=TRUE,yty=yty)
lprobcalc=gprior.info$lprobcalc
# if (gprior.info$gtype=="EBL") {
# lprobcalc=.lprob.eblocal.init(N=N,K=K,yty=yty,return.g=gprior.info$return.g.stats)
# } else if (gprior.info$gtype=="hyper") {
# lprobcalc=.lprob.hyperg.init(N=N,K=K,yty=yty,f21a=gprior.info$hyper.parameter,return.gmoments=gprior.info$return.g.stats)
# } else {
# lprobcalc=.lprob.constg.init(g=gprior.info$g,N=N,K=K,yty=yty)
# }
zres = lprobcalc$lprob.all(ymy=olsres$ymy, k=K, bhat=olsres$bhat, diag.inverse=olsres$diag.inverse)
betas = c(zres$b1)
betas2 = c(zres$b2)
alpha = mean(yXdata[,1]) - c(crossprod(betas, colMeans(yXdata)[-1]))
fitval = c(yXdata[, -1,drop=FALSE]%*%betas)+alpha
resids = yXdata[,1]-fitval
if (gprior.info$is.constant) {
gprior.info$shrinkage.moments = 1 - 1/(1+gprior.info$g)
} else {
gprior.info$shrinkage.moments = zres$otherstats
}
#lm-like stuff
mt <- attr(mf, "terms")
alphabeta=c(alpha,betas)
names(alphabeta) <- c("(Intercept)", attr(mt,"term.labels"))
res=list()
res$coefficients <- alphabeta
res$residuals <- resids
res$rank <- K+1
res$fitted.values <- fitval
res$df.residual <- N-K-1
res$xlevels <- stats::.getXlevels(mt, mf)
res$call <- thiscall
res$terms <- mt
res$model <- mf
res$na.action <- attr(mf,"na.action")
res$coef2moments <- c(NA,betas2)
res$marg.lik <- zres$lprob
res$gprior.info <- gprior.info
res$olsres <- olsres
res$zres <- zres
class(res)=c("zlm","lm")
return(res)
}
#' Summarizing Linear Models under Zellner's g
#'
#' summary method for class "\code{zlm}"
#'
#' \code{summary.zlm} prints out coefficients expected values and their
#' standard deviations, as well as information on the gprior and the log
#' marginal likelihood. However, it invisibly returns a list with elements as
#' described below:
#'
#' @param object an object of class \code{zlm}: see "Examples" below
#' @param printout If \code{TRUE} (default, then information is printed to
#' console in a neat form
#' @param \dots further arguments passed to or from other methods
#' @return A \code{\link{list}} with the following elements \item{residuals}{
#' The expected value of residuals from the model} \item{coefficients}{The
#' posterior expected values of coefficients (including the intercept) }
#' \item{coef.sd}{Posterior standard deviations of the coefficients (the
#' intercept SD is \code{NA}, since an improper prior was used)}
#' \item{gprior}{The g prior as it has been submitted to \code{object}}
#' \item{E.shrinkage}{the shrinkage factor \eqn{g/(1+g)}, respectively its
#' posterior expected value in case of a hyper-g prior}
#' \item{SD.shrinkage}{(Optionally) the shrinkage factor's posterior standard
#' deviation (in case of a hyper-g prior)} \item{log.lik}{The log marginal
#' likelihood of the model}
#' @author Stefan Zeugner
#' @seealso \code{\link{zlm}} for creating \code{zlm} objects,
#' \code{link{summary.lm}} for a similar function on OLS models
#'
#' See also \url{http://bms.zeugner.eu} for additional help.
#' @keywords utilities
#' @examples
#'
#' data(datafls)
#'
#' #simple example
#' foo = zlm(datafls)
#' summary(foo)
#'
#' sfoo = summary(foo,printout=FALSE)
#' print(sfoo$E.shrinkage)
#'
#' @export
summary.zlm <- function(object, printout=TRUE, ...) {
#prints a summary for a zlm object: coefficients, std devs, shinkage stats, marg.lik.
betas=object$coefficients
betas2= object$coef2moments
sds= sqrt(betas2-betas^2)
ests=cbind(betas,sds)
gi=object$gprior.info
gi.choice = gi$gtype
if (gi$gtype=="hyper") {gi.choice=paste(gi$gtype," (a=",2+signif(gi$hyper.parameter-2,digits=4),")",sep="")}
gi.sd=-1; gi.sdtext=""
if (length(gi$shrinkage.moments)>1) {
gi.sd=sqrt(gi$shrinkage.moments[[2]]-gi$shrinkage.moments[[1]]^2)
gi.sdtext=paste(" St.Dev.:", round(gi.sd,3))
}
rownames(ests) = c("(Intercept)",attr(object$terms,"term.labels"))
colnames(ests)=c("Exp.Val.","St.Dev.")
cat("Coefficients\n")
print(ests)
cat("\n Log Marginal Likelihood:\n")
cat(object$marg.lik)
cat(paste("\n g-Prior:", gi.choice,"\n"))
cat(paste("Shrinkage Factor",ifelse(gi$is.constant,": ", " Exp.Val: "), round(gi$shrinkage.moments[[1]],3), gi.sdtext, "\n",sep=""))
res=list()
res$residuals <- object$residuals
res$coefficients <- object$coefficients
res$coef.sd <- sds
res$gprior <- gi.choice
res$E.shrinkage <- gi$shrinkage.moments[[1]]
if (gi.sd>-1) {res$SD.shrinkage <- gi.sd}
res$log.lik <- object$marg.lik
return(invisible(res))
}
#' Extract a Model from a bma Object
#'
#' Extracts a model out of a \code{bma} object's saved models and converts it
#' to a \code{\link{zlm}} linear model
#'
#' A bma object stores several 'best' models it encounters (cf. argument
#' \code{nmodel} in \code{\link{bms}}). \code{as.zlm} extracts a single model
#' and converts it to an object of class \code{\link{zlm}}, which represents a
#' linear model estimated under Zellner's g prior.\cr The utility
#' \code{\link{model.frame}} allows to transfrom a \code{zlm} model into an OLS
#' model of class \code{\link{lm}}.
#'
#' @param bmao A \code{bma} object, e.g. resulting from a call to
#' \code{\link{bms}}
#' @param model The model index, in one of the following forms:\cr An integer,
#' denoting the rank of the model (1 for best, 2 for second-best, ...)\cr A
#' numeric or logical vector of length K describing which covariates are
#' contained in the model\cr A hexcode character describing which covariates
#' are contained in the model
#' @return a list of class \code{\link{zlm}}
#' @author Stefan Zeugner
#' @seealso \code{\link{bms}} for creating \code{bma} objects,
#' \code{\link{zlm}} for creating \code{zlm} objects,
#' \code{\link{pmp.bma}} for displaying the
#' topmodels in a \code{bma} object
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords models
#' @examples
#'
#' data(datafls)
#'
#' mm=bms(datafls[,1:6],mcmc="enumeration") # do a small BMA chain
#' topmodels.bma(mm)[,1:5] #display the best 5 models
#'
#' m2a=as.zlm(mm,4) #extract the fourth best model
#' summary(m2a)
#'
#' # Bayesian Model Selection:
#' # transform the best model into an OLS model:
#' lm(model.frame(as.zlm(mm)))
#'
#' # extract the model only containing the 5th regressor
#' m2b=as.zlm(mm,c(0,0,0,0,1))
#'
#' # extract the model only containing the 5th regressor in hexcode
#' print(bin2hex(c(0,0,0,0,1)))
#' m2c=as.zlm(mm,"01")
#'
#'
#'
#'
#' @export
as.zlm <- function(bmao, model=1) {
#this function extracts a single topmodel from a bma object and converts it to zlm format
#Inputs:
# bmao: bma object
# model: index of the model
#Output: zlm object
thiscall=match.call()
if (!is.bma(bmao)) stop("bmao needs to be a bma object")
bools=bmao$topmod$bool()
if (all(is.character(model))&&length(model)==1) {
model=(1:length(bools))[bools==model[[1]]]
if (length(model)==0) stop("Provided model hex-index was not found in bmao object topmodels")
} else if (all(is.character(model))&&(length(model)>1) ) {
mix=match(model, bmao$reg.names)
if (any(is.na(mix))) stop("Provided variable names do not conform to bma object")
ll=logical(bmao$info$K); ll[mix]=TRUE; model=(1:length(bools))[bools==bin2hex(ll)]; rm(ll,mix)
if (length(model)==0) stop("Model conforming to provided variable names was not found in bmao object topmodels")
} else if ((length(model)==bmao$info$K)&&(is.numeric(model)||is.logical(model))) {
model=(1:length(bools))[bools==bin2hex(model)]
if (length(model)==0) stop("Provided binary model index was not found in bmao object topmodels")
} else if ((length(model)==1)&&(is.numeric(model)||is.logical(model))) {
if (model<1|model>length(bools)) stop("Provided numeric model index was not found in bmao object topmodels")
} else stop("model needs to be an integer, logical or character model index representation (hexcode or variable names)")
inclvbls= as.logical(bmao$topmod$bool_binary()[,model, drop=TRUE])
yXdf =as.data.frame(bmao$arguments$X.data)
zlmres=zlm(as.formula(yXdf[,c(TRUE,inclvbls)]),data=yXdf,g=bmao$gprior.info)
zlmres$call <- thiscall
return(zlmres)
}
#' Predict Method for zlm Linear Model
#'
#' Expected value (And standard errors) of predictions based on 'zlm' linear
#' Bayesian model under Zellner's g prior
#'
#'
#' @param object a zlm linear model object - see \code{\link{zlm}}
#' @param newdata An optional data.frame, matrix or vector containing variables
#' with which to predict. If omitted, then (the expected values of) the fitted
#' values are returned.
#' @param se.fit A switch indicating if the standard deviations for the
#' predicted varaibles are required.
#' @param \dots further arguments passed to or from other methods.
#' @return A vector with (expected values of) fitted values.\cr If
#' \code{se.fit} is \code{TRUE}, then the output is a list with the following
#' elements: \item{fit}{ a vector with the expected values of fitted values}
#' \item{std.err}{ a vector with the standard deviations of fitted values}
#' \item{se.fit}{ a vector with the standard errors without the residual scale
#' akin to \code{se.fit} in \code{\link{predict.lm}} } \item{residual.scale}{
#' The part from the standard deviations that involves the identity matrix.
#' Note that \code{sqrt(se.fit^2+residual.scale^2)} yields \code{std.err}. }
#'
#' @seealso \code{\link{bms}} for creating zlm objects,
#' \code{\link{predict.lm}} for a comparable function,
#' \code{\link{predict.bma}} for predicting with bma objects
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords utilities
#' @examples
#'
#' data(datafls)
#' mm=zlm(datafls,g="EBL")
#'
#' predict(mm) #fitted values
#' predict(mm, newdata=1:41) #prediction based on a 'new data point'
#'
#' #prediction based on a 'new data point', with 'standard errors'
#' predict(mm, newdata=datafls[1,], se.fit=TRUE)
#'
#' @export
predict.zlm <- function(object, newdata=NULL, se.fit=FALSE, ...) {
# does basic fitting in expected values, cf. predict.lm
# object: a zlm object
# newdata: newdata to be supplied (just eas in predict.lm)
# se.fit: whether standard erros should be calculated
# output: a vector with fitted values, if se.fit=T, then a list with fit, standard errors and residual scale
if (!is(object,"zlm")) {stop("you need to provide a zlm object"); return()}
#get the betas as required
betas=object$coefficients[-1,drop=FALSE]
alpha=object$coefficients[[1]]
#check the newdata argument
if (is.null(newdata)) {
newX<-as.matrix(object$model[,-1,drop=FALSE])
} else {
newX=as.matrix(newdata)
if (!is.numeric(newX)) stop("newdata must be numeric!")
if (is.vector(newdata)) newX=matrix(newdata,1)
if (ncol(newX)!=length(betas)) {
if (ncol(newX)==length(betas)+1) {
newX=newX[,-1,drop=FALSE] # this is to achieve a behavior similar to predict.lm in this case
} else {
stop("newdata must be a matrix or data.frame with ", length(betas), " columns.")
}
}
}
#if se.fit==FALSE, we are done now:
if (!se.fit) return(as.vector(newX%*%betas)+alpha)
#################################################
#if se.fit==TRUE, get additional info to compute the standard errors
yXdata <- as.matrix(object$model)
oldXraw <- yXdata[,-1,drop=FALSE]
if (!is.null(colnames(newX))&& !is.null(colnames(oldXraw))) { if ( all(colnames(oldXraw) %in% colnames(newX)) && !all(colnames(oldXraw) == colnames(newX)) ) { #this is another user check, when the stuff came in malformed
warning("argument newdata had to be reordered according to its column names. Consider submitting the columns of newdata in the right order.")
newX=newX[,colnames(oldXraw), drop=FALSE]
} }
yraw <- yXdata[,1,drop=TRUE]
N <- length(yraw)
k <- ncol(oldXraw)
oldXmeans <- colMeans(oldXraw)
oldXdm <- oldXraw-matrix(oldXmeans,N,k,byrow=TRUE)
newXdm <- newX-matrix(oldXmeans,nrow(newX),k,byrow=TRUE)
#compute x_f' (X'X)^-1 x_f
xtxinv=chol2inv(chol(crossprod(oldXdm)))
xtxinv_xf= tcrossprod(xtxinv,newXdm)
xf_xx_xf=unlist(lapply(1:nrow(newXdm),function(x) {crossprod(newXdm[x,],xtxinv_xf[,x])[[1L]]} ) )
#these are some factors multipliying the above
bvar=object$coef2moments[-1]-object$coefficients[-1]^2
bvar_factor=bvar[[1L]]/xtxinv[[1L]]
yty=as.vector(crossprod(yraw)-N*mean(yraw)^2)
r2 = 1-object$olsres$ymy/yty
if (object$gprior.info$gtype=="hyper") {
f21a=object$gprior.info$hyper.parameter
f21_recover = exp( (object$marg.lik) + (N-1)/2*log(yty) + log((k+f21a-2)/(f21a-2)) ) #this might be not so accurate due to numerical errors, but in this case the part involving F21 is usually very very small...
res_scale = yty/(N-3)/(N-1-k-f21a)*( (N-3)*(1-r2) - (k+f21a-2)/f21_recover)
svar_woscale = res_scale/N + bvar_factor*xf_xx_xf
#svar_woscale is the standard error without the residual scale (the identiy matrix part)
} else {
sf=object$gprior.info$shrinkage.moments[[1]]
res_scale = (1-sf*r2)*yty/(N-3)
svar_woscale = res_scale/N + bvar_factor*xf_xx_xf
}
#produce output
reslist=list()
reslist$fit <- as.vector(newX%*%betas)+alpha
reslist$std.err <- sqrt(svar_woscale+res_scale)
reslist$se.fit <- sqrt(svar_woscale)
reslist$residual.scale <- sqrt(res_scale)
return(reslist)
}
#' @export
density.zlm <- function(x,reg=NULL,addons="lesz",std.coefs=FALSE,n=300,plot=TRUE,hnbsteps=30,addons.lwd=1.5,...) {
#this function does just the same as density.bma, but for an object of class zlm
#permitted values for addons: e, s, l, z, g
addons=gsub("E","",addons,ignore.case=FALSE)
addons=gsub("S","",addons,ignore.case=FALSE)
addons=gsub("b","",addons,ignore.case=FALSE)
addons=gsub("m","",addons,ignore.case=FALSE)
addons=gsub("p","",addons,ignore.case=FALSE)
N=length(x$residuals); K=length(x$coefficients)-1
tmo=topmod(1,nmaxregressors=K, bbeta=TRUE,liks=x$marg.lik, ncounts=1, modelbinaries=matrix(rep(1,K),K,1), betas=matrix(as.vector(x$coefficients[-1]),K), betas2=matrix(as.vector(x$coef2moments[-1]),K))
tokenbma=list(info=list(K=K,N=N),arguments=list(),topmod=tmo,start.pos=integer(0), gprior.info=x$gprior.info, X.data=x$model, reg.names=names(x$coefficients)[-1], bms.call=new("call"))
class(tokenbma)="bma"
return(density.bma(tokenbma,reg=reg, addons=addons, std.coefs=std.coefs, n=n, plot=plot, hnbsteps=hnbsteps, addons.lwd=addons.lwd,...))
}
.adjustdots = function(dotargs,...) {
# helper function to adjust default arguments for passing on to other functions
# in particular to adjust type, col, main etc. when passing on to plotting functions
defargs=list(...); defargnames=names(defargs);
dotargs=as.list(dotargs);
if (is.null(dotargs)) { dotargs= list() }
for (di in seq_len(length(defargs))) {
if (!is.element(defargnames[[di]],names(dotargs))) {
dotargs[[defargnames[[di]]]] <- defargs[[di]];
}
}
return(dotargs);
}
#####################################
# PREDICTIVE DENSITY ######
#####################################
#' Predictive Densities for bma Objects
#'
#' Predictive densities for conditional forecasts
#'
#' The predictive density is a mixture density based on the \code{nmodels} best
#' models in a \code{bma} object (cf. \code{nmodel} in \code{\link{bms}}).\cr
#' The number of 'best models' to retain is therefore vital and should be set
#' quite high for accuracy.
#'
#' @aliases pred.density pred.density-class print.pred.density
#' @param object a bma object - see \code{\link{bms}}, alternativel a
#' \code{\link{zlm}} object
#' @param newdata A data.frame, matrix or vector containing variables with
#' which to predict.
#' @param n The integer number of equally spaced points at which the density is
#' to be estimated.
#' @param hnbsteps The number of numerical integration steps to be used in case
#' of a hyper-g prior (cf. argument \code{g} in \code{\link{bms}}). Increase
#' this number to increase accuracy. Must be an even integer.
#' @param \dots arguments to be passed on to \code{\link{plot.density}}.
#' @return \code{pred.density} returns a list of class \code{pred.density} with
#' the following elements \item{densities()}{a list whose elements each contain
#' the estimated density for each forecasted observation} \item{fit}{a vector
#' with the expected values of the predictions (the 'point forecasts')}
#' \item{std.err}{a vector with the standard deviations of the predictions (the
#' 'standard errors')} \item{dyf(realized.y, predict_index=NULL)}{Returns the
#' densities of realized response variables provided in \code{realized.y}. \cr
#' If \code{realized.y} is a matrix, then each row corresponds to a forecast
#' observation in \code{newdata}\cr if not left empty, \code{predict.index}
#' specifies to which observations in newdata the realized.y should apply}
#' \item{lps(realized.y, predict_index=NULL)}{Computes the log predictive score
#' for the response varaible provided in \code{realized.y} (cf.
#' \code{\link{lps.bma}}) -\cr Note that the LPS equals minus the mean of the
#' logarithmized results from \code{dyf}) } \item{plot((x, predict_index =
#' NULL, addons = "eslz", realized.y = NULL, addons.lwd = 1.5, ...)}{the same
#' as \code{plot.pred.density}} \item{n}{The number of equally spaced
#' points for which the density (under \code{densities()} was computed.}
#' \item{nmodel}{The number of best models predictive densities are based
#' upon.} \item{call}{the call that created this \code{pred.density} object}
#' @note In BMS version 0.3.0, \code{pred.density} may only cope with built-in
#' \code{gprior}s, not with any user-defined priors.
#'
#' @seealso \code{\link{predict.bma}} for simple point forecasts,
#' \code{plot.pred.density} for plotting predictive densities,
#' \code{\link{lps.bma}} for calculating the log predictive score
#' independently, \code{\link{quantile.pred.density}} for extracting quantiles
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords utilities
#' @examples
#'
#' data(datafls)
#' mm=bms(datafls,user.int=FALSE)
#'
#' #predictive densityfor two 'new' data points
#' pd=pred.density(mm,newdata=datafls[1:2,])
#'
#'
#' #fitted values based on best models, same as predict(mm, exact=TRUE)
#' pd$fit
#'
#' #plot the density for the first forecast observation
#' plot(pd,1)
#'
#' # the same plot ' naked'
#' plot(pd$densities()[[1]])
#'
#'
#' #predict density for the first forecast observation if the dep. variable is 0
#' pd$dyf(0,1)
#'
#' #predict densities for both forecasts for the realizations 0 and 0.5
#' pd$dyf(rbind(c(0,.5),c(0,.5)))
#'
#' # calc. Log Predictive Score if both forecasts are realized at 0:
#' lps.bma(pd,c(0,0))
#'
#'
#' @export
pred.density <- function(object, newdata=NULL, n=300, hnbsteps=30, ...) {
dtcm=function(x,df,ncp,varp) {
#a wrapper for univariate t-dist with non-centrality parameter and variance parameter
# (variance is df/(df-2)*varp)
sqvarp=sqrt(varp)
stats::dt((x-ncp)/sqvarp,df=df)/sqvarp
}
dsgivenykernel <- function(sf,kpa,N,z) {
#the post. density of the shrinkge factor f(s|Y)*F((N-1)/2,1,(k+a)/2,R2)
(kpa-2)/2*(1-sf)^((kpa-4)/2)*(1-sf*z)^(-(N-1)/2)
}
#user checks
nbsteps=max(hnbsteps,2)
n=max(ceiling(n),1)
#read out data from object
is.hyper=(object$gprior.info$gtype=="hyper")
if (is.hyper) f21a=object$gprior.info$hyper.parameter
if (is.bma(object)) {
K=object$info$K; N=object$info$N
yXdata=as.matrix(object$arguments$X.data)
tmo <- object$topmod
} else if (is(object,"zlm")) {
yXdata=as.matrix(object$model)
K=ncol(yXdata)-1; N=nrow(yXdata)
tmo <- topmod(1,nmaxregressors=K, bbeta=TRUE,liks=object$marg.lik, ncounts=1, modelbinaries=matrix(rep(1,K),K,1), betas=matrix(as.vector(object$coefficients[-1]),K), betas2=matrix(as.vector(object$coef2moments[-1]),K))
} else stop("argument 'object' requires class 'bma' or 'zlm'")
rm(object)
#check the newdata argument, checks are small as newdata has already survived predict.zlm
if (missing(newdata)) {
stop("You must provide the argument newdata")
} else {
newX=as.matrix(newdata)
if (!is.numeric(newX)) stop("newdata must be numeric!")
if (is.vector(newdata)) newX=matrix(newdata,1)
if (ncol(newX)!=K) {
if (ncol(newX)==K+1) {
newX=newX[,-1,drop=FALSE] # this is to achieve a bevavior similar to predict.lm in this case
} else {
stop("newdata must be a matrix or data.frame with ", K, " columns.")
}
}
orinames=colnames(yXdata[,-1,drop=FALSE]) #this is a user check whether columns had been submitted in the wrong order
if (!is.null(colnames(newX)) && !is.null(orinames)) {
if (all(orinames %in% colnames(newX) ) && !all(orinames == colnames(newX)) ) {
warning("argument newdata had to be reordered according to its column names. Consider submitting the columns of newdata in the right order.")
newX=newX[,orinames, drop=FALSE]
}
}
}
if(!is.null(rownames(newX))) {
newXnames=rownames(newX)
} else {
newXnames=as.character(1:nrow(newX))
}
rnew = nrow(newX)
y.mean=mean(yXdata[,1])
y<-yXdata[,1]-matrix(y.mean,N,1,byrow=TRUE)
X<-yXdata[,-1,drop=FALSE]-matrix(colMeans(yXdata[,-1,drop=FALSE]),N,K,byrow=TRUE)
XtX.big=crossprod(X)
Xty.big=as.vector(crossprod(X,y))
yty = crossprod(y)[[1]]
newXdm= newX-matrix(colMeans(yXdata[,-1,drop=FALSE]),rnew,K,byrow=TRUE)
hexobject<-.hexcode.binvec.convert(K)
make_xfxxxf = function(hex) {
syminv <- function(symmat, ndim=ncol(symmat)) {
#this does the same as chol2inv(chol.default(x)), but is stripped-down for speed purposes
# Caution: symmat must always have length(symmat)>0!!!
if (!is.matrix(symmat)) {symmat=as.matrix(symmat)}
if (dim(symmat)[[1]]==0) return(matrix(numeric(0),0,0))
return( chol2inv(chol(symmat), size=ndim) )
}
boolvec = as.logical(hexobject$as.binvec(hex))
if (!any(boolvec)) return(c(numeric(rnew),numeric(rnew),Inf,Inf,0))
newXsub= newXdm[,boolvec,drop=FALSE]
xtxinv = syminv(XtX.big[boolvec,boolvec,drop=FALSE])
xty=Xty.big[boolvec]
betas = as.vector(crossprod(xtxinv,xty),mode="numeric")
r2=crossprod(xty,betas)[[1]]/yty
xtxinv_xf = tcrossprod(xtxinv,newXsub)
xf_xx_xf = unlist(lapply(1:nrow(newXsub),function(x) {crossprod(newXsub[x,],xtxinv_xf[,x])[[1L]]} ) )
xf_bhat = as.vector(newXsub%*%betas)
return(c(xf_xx_xf, xf_bhat, xtxinv[[1L]], betas[[1L]],r2))
}
pmps=pmp.bma(tmo,oldstyle=TRUE)[,1,drop=TRUE]
bools=tmo$bool()
nmodel=length(bools)
linvres=lapply(bools,make_xfxxxf)
mat_xfxxxf = array(unlist(lapply(linvres,"[",1:rnew)),dim=c(rnew,nmodel))
mat_xfbhat = array(unlist(lapply(linvres,"[",rnew+(1:rnew))),dim=c(rnew,nmodel))
xtxinv_elem1 = unlist(lapply(linvres,"[[",rnew*2+1))
betahat_elem1 = unlist(lapply(linvres,"[[",rnew*2+2))
r2 = unlist(lapply(linvres,"[[",rnew*2+3))
kvec=tmo$kvec_raw()
kvec_cs=c(1,cumsum(kvec)+1); kvec_cs=kvec_cs[-length(kvec_cs)]
firstbetas=tmo$betas_raw()[kvec_cs]
firstbetas2=tmo$betas2_raw()[kvec_cs]
Es = firstbetas / betahat_elem1
varmult = (firstbetas2-firstbetas^2) / xtxinv_elem1
if (is.hyper) {
first_factor= yty/(N-3)*(N+1)/N* (1+2/(N-kvec-f21a-1)-r2*Es )
} else {
first_factor= yty/(N-3)*(1-Es*r2)*(N+1)/N
}
Sigmas = (matrix((N-3)/(N-1)*first_factor,rnew,nmodel,byrow=TRUE)+t(t(mat_xfxxxf)*((N-3)/(N-1)*varmult))) #by recyling this multplies each column with varmult
Evals_minusy = t(t(mat_xfbhat)*Es)
Eyf = as.vector(Evals_minusy %*%pmps+y.mean)
Varyf = as.vector(Sigmas%*%pmps)*(N-1)/(N-3)
premultfactor=yty/(N-1) #needed for hyper
interceptfactor=(N+1)/N #needed for hyper
####################
calcdensvec = function(xf_index, seqy, m_index) {
sss=function(lbound,uboundp1,nbsteps,seqs,xf.index) {
#simple simpson integration over shrinkage factor
#this is just a convenience function and depends on variables in parent scope, only used in case of hyper-g
#caution: seqs needs to have at least two elements!
s.seq=seq(lbound,uboundp1,(uboundp1-lbound)/nbsteps)[-nbsteps]
tmat=array(unlist(lapply(as.list(s.seq),function(ss) { dtcm(seqs,N-1,y.mean+ss*myev,premultfactor*(1-ss*myr2)*(interceptfactor+ss*myxfxxxf))})),dim=c(length(seqs),nbsteps)) #matrix of t-densities for different s
smat=sapply(as.list(s.seq),dsgivenykernel, kpa=myk+f21a,N=N,z=myr2) #vector of posterior densities for the different s
if (any(is.infinite(smat))) smat[is.infinite(smat)]=0
intconst=(4*sum(smat[c(FALSE,TRUE)])+2*sum(smat[c(TRUE,FALSE)])-3*smat[nbsteps]-smat[1])*(s.seq[nbsteps]-s.seq[1])/nbsteps/3 #calc the integration constant
return(list(dv=c(4*tmat[,c(FALSE,TRUE)]%*%smat[c(FALSE,TRUE)]+2*tmat[,c(TRUE,FALSE)]%*%smat[c(TRUE,FALSE)]-3*tmat[,nbsteps]*smat[nbsteps]-tmat[,1]*smat[1])*(s.seq[nbsteps]-s.seq[1])/nbsteps/3, ic=intconst))
#return the estimated density and a normalization constant
#is done in parts because it may be recomposed later
}
if (any(is.na(newX[xf_index,]))) {
densvec=numeric(0)
}
if (is.hyper) {
myev=mat_xfbhat[xf_index,m_index]; myxfxxxf=mat_xfxxxf[xf_index,m_index];
myk=kvec[[m_index]]; myr2=r2[[m_index]]
midpoint=1-(1-Es[[m_index]])*4
if (midpoint<.5) {
dvl=sss(.0001,.9999999,nbsteps*2,seqy,xf_index); densvec=dvl$dv/dvl$ic
} else {
dvl1=sss(.0001,midpoint,nbsteps,seqy,xf_index); dvl2=sss(midpoint,1,nbsteps,seqy,xf_index)
densvec=(dvl1$dv+dvl2$dv)/(dvl1$ic+dvl2$ic)
}
} else {
densvec=dtcm(seqy,N-1,Evals_minusy[xf_index,m_index]+y.mean,Sigmas[xf_index,m_index])
}
return(densvec)
}
##########################
##########################
dens_yf = function(yfr,xf_indices=NULL) {
if (is.null(xf_indices)) xf_indices=seq_len(rnew)
yfdens=array(NA,dim=dim(yfr))
for (myxf in 1:length(xf_indices)) {
allm_dens=sapply(seq_len(nmodel), function(x) calcdensvec(xf_indices[[myxf]],yfr[myxf,],x))
yfdens[myxf,]=as.vector(allm_dens%*%pmps)
}
yfdens[!is.finite(yfdens)]=NA
if (ncol(yfdens)==1) dim(yfdens) <- NULL
return(yfdens)
}
################################################
emptydens=list(x=numeric(0), y=numeric(0), bw=NULL, n=0, has.na=TRUE)
class(emptydens)="density"
dlist = lapply(vector("list",nrow(newX)),function (x) emptydens)
densities_calculated <- FALSE
#if (!exists("xf_index")) xf_index=NULL
calc_alldens = function() {
if (densities_calculated) return(NULL)
for (xf.index in 1:rnew) {
if (!any(is.na(newX[xf.index,]))) {
#assign("xf_index",xf.index,envir=env) # IS THIS NECESSARY?
#xf_index <<- xf.index
lbound=Eyf[[xf.index]]-sqrt(Varyf[[xf.index]])*4; ubound=Eyf[[xf.index]]+sqrt(Varyf[[xf.index]])*4
seqs=seq(lbound,ubound,(ubound-lbound)/(n-1))
allm_dens=sapply(seq_len(nmodel), function(x) calcdensvec(xf.index,seqs,x))
myy=as.vector(tcrossprod(t(as.matrix(pmps)),allm_dens))
mydens=list(x=seqs,y=myy,bw=NULL, n=n, has.na=FALSE)
class(mydens)="density"
dlist[[xf.index]] <<- mydens
}
}
densities_calculated <<- TRUE
}
########################################
consistent.yf = function(yf, xf.indices=NULL) {
#a user check function whether a realized y conforms to requirements
xf_series=seq_len(rnew)
wasnull=FALSE
if (is.null(xf.indices)) {
wasnull=TRUE
xf.indices=xf_series
} else {
if (!all(xf.indices %in% xf_series)) stop(paste("predict_index needs to be an integer between 1 and ",rnew,"!",sep=""))
}
if (!is.numeric(yf)) stop("realized.y must be a numeric matrix or vector!")
if (!is.matrix(yf)) yf <- as.matrix(yf)
if ((length(xf.indices)==1)&(nrow(yf)>1)&(ncol(yf)==1)) yf <- t(yf)
if (nrow(newX[xf.indices,,drop=FALSE])!=nrow(yf)) {
if (wasnull) stop(paste("realized.y must have", rnew , "elements/rows corresponding to newdata"))
else stop("The number of rows/elements in realized.y must have the same length as predict_index!")
}
return(yf)
}
consistent_predict_index =function(pix) {
# a user check function to convert characgter predict_index into numeric
if (is.character(pix)) {
if (all(pix %in% newXnames)) {
return( match( pix, newXnames) )
} else {
stop("Forecast IDs provided in predict_index do not conform to rownames of predicted data");
}
} else return(pix)
}
########################################
plot.preddens = function(xf.index=1, addons="eslz", yf.addons=NULL, predict_index=NULL, addons.lwd=1.5, ...) { #, main=NULL,col="steelblue4", xlab="Response variable") {
dotargs = match.call(expand.dots=FALSE)$...
if (rnew>1) {
main_default <- paste("Predictive Density Obs ", newXnames[[xf.index]], " (", nmodel, " Models)", sep="")
} else {
main_default <- paste("Predictive Density", " (", nmodel, " Models)",sep="")
}
dotargs=.adjustdots(dotargs,xlab="Response variable",main=main_default,col=4, zero.line=FALSE)
# if (is.null(main)) {
# if (rnew>1) {
# main<- paste("Predictive Density Obs ", newXnames[[xf.index]], " (", nmodel, " Models)", sep="")
# } else {
# main<- paste("Predictive Density", " (", nmodel, " Models)",sep="")
# }}
#
# plot.density(dlist[[xf.index]], main=main, xlab=xlab, col=col, zero.line=any(grep("z",addons,ignore.case=TRUE)), ...)
thingy=dlist[[xf.index]]
eval(as.call(c(list(as.name("plot"),as.name("thingy")),as.list(dotargs))))
leg.col=numeric(0);leg.lty=numeric(0); leg.legend=character(0)
if (any(grep("g",addons,ignore.case=TRUE))) { # grid
graphics::grid()
}
if (any(grep("e",addons,ignore.case=FALSE))) { # posterior mean
graphics::abline(v=fit[[xf.index]],col=2,lwd=addons.lwd)
leg.col=c(leg.col,2);leg.lty=c(leg.lty,1); leg.legend=c(leg.legend,"Exp. Value")
}
if (any(grep("s",addons,ignore.case=FALSE))) { # standard error bounds
graphics::abline(v=fit[[xf.index]]-2*stderrs[[xf.index]],col=2,lty=2,lwd=addons.lwd)
graphics::abline(v=fit[[xf.index]]+2*stderrs[[xf.index]],col=2,lty=2,lwd=addons.lwd)
leg.col=c(leg.col,2);leg.lty=c(leg.lty,2);leg.legend=c(leg.legend,"2x Std.Errs")
}
if (any(grep("z",addons,ignore.case=TRUE))) { #zero line
graphics::abline(h=0,col="gray",lwd=addons.lwd)
}
if (!is.null(yf.addons)&&is.numeric(yf.addons)) { #yf actual y realization line
yfs=as.vector(yf.addons)
#if (length(yfs)==rnew) {
if (!is.na(yfs[[xf.index]])) {
graphics::abline(v=yfs[[xf.index]],col=1,lwd=addons.lwd, lty=2)
leg.col=c(leg.col,1);leg.lty=c(leg.lty,2);leg.legend=c(leg.legend,"Realized y")
} else warning("yf.addons must be a vector with the same number of elements as rows in newdata!")
#}
}
if (any(grep("l",addons,ignore.case=TRUE))&(length(leg.col)>0)) { #legend
graphics::legend(x="topright",lty=leg.lty,col=leg.col,legend=leg.legend,box.lwd=0,bty="n",lwd=addons.lwd)
}
}
#######################################
fit=Eyf; names(fit)=newXnames
stderrs=sqrt(Varyf); names(stderrs)=newXnames
reslist= list()
reslist$densities = function() { calc_alldens(); return(dlist) }
reslist$fit = fit
reslist$std.err = stderrs
reslist$dyf = function(realized.y, predict_index=NULL) {
predict_index=consistent_predict_index(predict_index)
if (missing(realized.y)) { stop("You must provide a realization of the dependent variable in realized.y")}
return(dens_yf(consistent.yf(realized.y, predict_index), predict_index))
}
reslist$lps = function(realized.y, predict_index=NULL) {
predict_index=consistent_predict_index(predict_index)
if (missing(realized.y)) { stop("You must provide a realization of the dependent variable in realized.y")}
yf=consistent.yf(realized.y, predict_index); if (ncol(yf)!=1) stop("realized.y must have only one column!")
yf.dens=dens_yf(yf, predict_index)
return(-sum(log(yf.dens[!is.na(yf.dens)]))/length(yf))
}
reslist$plot = function(predict_index=NULL, addons="eslz", realized.y=NULL, addons.lwd=1.5, ...) {
dotargs = match.call(expand.dots=FALSE)$...
xf_series=seq_len(rnew)
#user checks
predict_index=consistent_predict_index(predict_index)
if (is.null(predict_index)) { predict_index=xf_series
} else if (!all(predict_index%in%xf_series)) stop(paste("predict_index needs to be an integer between 1 and ",rnew,"!",sep=""))
if (!(is.null(realized.y))) {
if (length(realized.y)!=length(predict_index)) {
stop("realized.y must be a vector with the same number of elements as rows in newdata (or predict_index)!")
}
}
if (!is.null(realized.y)) realized.y <- consistent.yf(realized.y,predict_index);
calc_alldens()
#temp=reslist$densities()
oldask = graphics::par()$ask
plotnb=0
for (xf_index in predict_index) {
doplot=!dlist[[xf_index]]$has.na; plotnb=plotnb+doplot
if (plotnb==2) graphics::par(ask=TRUE)
dotargs=.adjustdots(dotargs,main=NULL,col="steelblue4", xlab="Response variable")
if (doplot) {
eval(as.call(c(list(as.name("plot.preddens"),as.name("xf_index"),addons=as.name("addons"),yf.addons=as.name("realized.y"),addons.lwd=as.name("addons.lwd")),as.list(dotargs))))
#plot.preddens(xf_index, addons=addons, yf.addons=realized.y, addons.lwd = addons.lwd, ..., main=main, col = col, xlab=xlab)
}
}
graphics::par(ask=oldask)
}
reslist$n=n
reslist$nmodel=nmodel
reslist$call=sys.call(0)
class(reslist) ="pred.density"
rm(betahat_elem1, bools, emptydens, firstbetas, firstbetas2, linvres)
return(reslist)
}
#' Log Predictive Score
#'
#' Computes the Log Predictive Score to evaluate a forecast based on a bma
#' object
#'
#' The log predictive score is an indicator for the likelihood of several
#' forecasts.\cr It is defined as minus the arithmethic mean of the logarithms
#' of the point densities for \code{realized.y} given \code{newdata}.\cr Note
#' that in most cases is more efficient to first compute the predictive density
#' object via a call to \code{\link{pred.density}} and only then pass the
#' result on to \code{lps.bma}.
#'
#' @param object an object of class \code{\link{pred.density}}, or class
#' \code{bma} (cf. \code{\link{bms}}), or class \code{\link{zlm}}
#' @param realized.y a vector with realized values of the dependent variables
#' to be plotted in addition to the predictive density, must have its length
#' conforming to \code{newdata}
#' @param newdata Needs to be provided if \code{object} is not of class
#' \code{\link{pred.density}}: a data.frame, matrix or vector containing
#' variables with which to predict.
#' @return A scalar denoting the log predictive score
#'
#' @seealso \code{\link{pred.density}} for constructing predictive densities,
#' \code{\link{bms}} for creating \code{bma} objects, \code{\link{density.bma}}
#' for plotting coefficient densities
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords utilities
#' @examples
#'
#' data(datafls)
#' mm=bms(datafls,user.int=FALSE,nmodel=100)
#'
#' #LPS for actual values under the used data (static forecast)
#' lps.bma(mm, realized.y=datafls[,1] , newdata=datafls[,-1])
#'
#' #the same result via predicitve.density
#' pd=pred.density(mm, newdata=datafls[,-1])
#' lps.bma(pd,realized.y=datafls[,1])
#'
#' # similarly for a linear model (not BMA)
#' zz = zlm(datafls)
#' lps.bma(zz, realized.y=datafls[,1] , newdata=datafls[,-1])
#'
#' @export
lps.bma <- function(object, realized.y, newdata=NULL) {
if (!any(class(object) %in% c("pred.density","bma","zlm"))) stop("object must be of class 'pred.density', 'bma' or 'zlm'!")
if (any(class(object) %in% c("bma","zlm"))) {
if (is.null(newdata)) stop("newdata must be provided if object is of class 'bma' or 'zlm'.")
object=pred.density(object,newdata=newdata)
}
return(object$lps(realized.y))
}
#' @export
plot.pred.density <- function(x, predict_index=NULL, addons="eslz", realized.y=NULL, addons.lwd=1.5, ...) {
if (!is(x,"pred.density")) stop("x must be of class 'pred.density'!")
x$plot(predict_index, realized.y=realized.y, addons=addons, addons.lwd=addons.lwd, ...)
}
#' @export
print.pred.density <-function(x, digits=NULL, ...) {
outmat=matrix(numeric(0),length(x$fit),2)
colnames(outmat)=c("Exp.Val.","Std.Err.")
rownames(outmat)=names(x$fit)
outmat[,1]=x$fit; outmat[,2]=x$std.err
cat("Call:\n")
print(x$call)
cat(paste("\nDensities for conditional forecast(s)\n",x$n, " data points, based on ", x$nmodel, " models;\n",sep=""))
print(outmat, digits=digits, ...)
}
#####################################
# NEW UTILITIES ######
#####################################
#' @export
deviance.bma = function(object, exact=FALSE, ...) {
#calculates (N-1)*posterior variance of a bma object = effective residual sum of squares
# also works for objects of class zlm (and in principle for lm)
#akin to method 'deviance'
if (is.bma(object)) {
xx=as.matrix(object$arguments$X.data);
ebeta = estimates.bma(object,order.by.pip=FALSE,exact=exact)[,2,drop=TRUE]
} else if (is(object,"lm")) {
xx=as.matrix(object$model)
ebeta = coef(object); if (length(ebeta)==ncol(xx) ) ebeta=ebeta[-1]
} else stop("Required input is an object of class 'bma' or 'lm'/'zlm'.")
xx =xx - matrix(colMeans(xx), nrow(xx), ncol(xx),byrow=TRUE)
ess=as.vector(crossprod(ebeta,as.vector(crossprod(xx[,-1,drop=FALSE],xx[,1]))))
return((as.vector(crossprod(xx[,1,drop=TRUE]))-ess))
}
#' @export
deviance.zlm=function(object, ...) deviance.bma(object)
#' @export
model.frame.bma = function(formula, ...) {
#akin to method 'model.frame'
if (!is.bma(formula)) stop("argument 'formula' needs to be a bma object")
return(as.data.frame(formula$arguments$X.data))
}
#' Variable names and design matrix
#'
#' Simple utilities retrieving variable names and design matrix from a bma
#' object
#'
#' All functions are \code{bma}-functions for the generic methods
#' \code{\link{variable.names}}, \code{\link{deviance}}, and
#' \code{\link{model.frame}}.
#'
#' @aliases variable.names.bma model.frame.bma
#' @param object A \code{bma} object (as produced by \code{\link{bms}})
#' @param ... further arguments passed to or from other methods
#'
#' @seealso \code{\link{bms}} for creating bma objects
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords utilities
#' @examples
#'
#' data(datafls)
#' bma_enum=bms(datafls[1:20,1:10])
#'
#' model.frame(bma_enum) # similar to
#' bma_enum$arguments$X.data
#'
#' variable.names(bma_enum)[-1] # is equivalent to
#' bma_enum$reg.names
#'
#' @export
variable.names.bma = function(object, ...) {
#akin to method 'variable.names'
if (!is.bma(object)) stop("argument 'object' needs to be a bma object")
return(c("(Intercept)", object$reg.names))
}
#' Variable names and design matrix
#'
#' Simple utilities retrieving variable names and design matrix from a bma
#' object
#'
#' \code{variable.names.zlm}: method \code{\link{variable.names}} for a
#' \code{\link{zlm}} model. \cr \code{vcov.zlm}: the posterior
#' variance-covariance matrix of the coefficients of a \code{\link{zlm}} model
#' - cf. \code{\link{vcov}} \cr \code{logLik.zlm}: a \code{\link{zlm}} model's
#' log-likelihood \code{p(y|M)} according to the implementation of the
#' respective coefficent prior \cr
#'
#' @aliases variable.names.zlm vcov.zlm logLik.zlm
#' @param object A \code{bma} object (as produced by \code{\link{bms}})
#' @param ... further arguments passed to or from other methods
#'
#' @seealso \code{\link{zlm}} for creating \code{zlm} objects
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords utilities
#' @examples
#'
#' data(datafls)
#'
#' zz=zlm(datafls)
#' variable.names(zz)
#' vcov(zz)
#' logLik(zz)
#'
#' @export
variable.names.zlm = function(object, ...) {
#akin to method 'variable.names'
if (!is(object,"zlm")) stop("argument 'object' needs to be zlm object")
return(names(object$coefficients))
}
#' @export
logLik.zlm = function(object,...) {
#marginal likelihood of a 'zlm' model, akin to method 'logLik'
if (!is(object,"zlm")) stop("argument 'formula' needs to be zlm object")
ret = object$marg.lik
attr(ret, "df") = object$rank+1
attr(ret, "nbobs") = object$rank+object$df.residual
class(ret)="logLik"
return(ret)
}
#' @export
vcov.zlm = function(object, include.const = FALSE, ...) {
#akin to vcov.lm
#get initial stuff
Xmat=as.matrix(model.frame(object)[,-1,drop=FALSE]);
if (ncol(Xmat)<1) stop("Needs at least one non-constant regressor")
regnames= colnames(Xmat)
Xmat=Xmat-matrix(colMeans(Xmat),nrow(Xmat),ncol(Xmat),byrow=TRUE)
#complement the diagonal VCOV with off-diagonal elements, which are proportional to OLS-VCOV
xxinv=chol2inv(chol(crossprod(Xmat)))
outmat=((object$coef2moments[[2]]-object$coefficients[[2]]^2)/xxinv[[1]]) * xxinv
if (include.const) {
outmat=rbind(rep(NA,nrow(outmat)+1), cbind(rep(NA,ncol(outmat)),outmat))
regnames=c("(Intercept)", regnames)
}
colnames(outmat)<-rownames(outmat) <- regnames
return(outmat)
}
#' Posterior Variance and Deviance
#'
#' Returns posterior residual variance, deviance, or pseudo R-squared,
#' according to the chosen prior structure
#'
#' \code{post.var}: Posterior residual variance as according to the prior
#' definitions contained in \code{object} \cr \code{post.pr2}: A
#' pseudo-R-squared corresponding to unity minus posterior variance over
#' dependent variance. \cr \code{deviance.bma}: returns the
#' \code{\link{deviance}} of a \code{bma} model as returned from
#' \code{\link{bms}}. \cr \code{deviance.zlm}: returns the
#' \code{\link{deviance}} of a \code{\link{zlm}} model.
#'
#' @aliases post.var post.pr2 deviance.bma deviance.zlm
#' @param object A \code{bma} object (as produced by \code{\link{bms}}) or a
#' \code{\link{zlm}} object.
#' @param exact When \code{exact=FALSE}, then \code{deviance} will be based on
#' MCMC frequencies, if \code{exact=TRUE} then it will be based on\cr
#' analytical posterior model probabilities - cf. argument \code{exact} in
#' \code{\link{coef.bma}}.
#' @param ... further arguments passed to or from other methods
#'
#' @seealso \code{\link{bms}} for creating \code{bma} objects and priors,
#' \code{\link{zlm}} object.
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @keywords utilities
#' @examples
#'
#' data(datafls)
#'
#' mm=bms(datafls[,1:10])
#' deviance(mm)/nrow(datafls) # is equivalent to
#' post.var(mm)
#'
#' post.pr2(mm) # is equivalent to
#' 1 - post.var(mm) / ( var(datafls[,1])*(1-1/nrow(datafls)) )
#'
#' @export
post.var= function(object,exact=FALSE,...) {
#calculates the expected posterior standard error based on effective Residual sum of squares, for objects of class 'bma', 'zlm', 'lm', ...
if (!(is.bma(object) | is(object,"lm"))) stop("Required input is an object of class 'bma' or 'lm'/'zlm'.")
od=deviance(object, exact=exact)
oy=model.frame(object)[,1,drop=TRUE]
ret=od/length(oy)
attr(ret,"nobs") = length(oy)
return(ret)
}
#' @export
post.pr2= function(object,exact=FALSE) {
#calculates a pseudo-R-squared based on effective Residual sum of squares, for objects of class 'bma', 'zlm', 'lm', ...
if (!(is.bma(object) | is(object,"lm"))) stop("Required input is an object of class 'bma' or 'lm'/'zlm'.")
od=deviance(object, exact=exact)
oy=model.frame(object)[,1,drop=TRUE]
return(1-(od/crossprod(oy-mean(oy))[[1]]))
}
|
/scratch/gouwar.j/cran-all/cranData/BMS/R/aux_outer.R
|
######################################
# Bayesian Model Averaging Program #
######################################
# This version: adjusted on 2011-05-05
# Martin Feldkircher
# [email protected], http://feldkircher.gzpace.net
# Stefan Zeugner
# [email protected], http://www.zeugner.eu
#####################
# The main code starts at line 169 with the function "bms=function(....)" and is written
# by Martin Feldkircher and Stefan Zeugner as part of their work at the Institute for Advanced Studies (IHS),
# Vienna in 2006/2007. Descriptions of the algorithms and priors used can be found in
# Gary Koop ("Bayesian Econometrics", Wiley & Sons), Fernandez, C., E. Ley and M.F.J. Steel (2001b)
# ?Model Uncertainty in CrossCountry Growth Regressions,? Journal of Applied Econometrics,
# Fernandez, C., E. Ley and M.F.J. Steel (2001a) ?Benchmark Priors for Bayesian Model Averaging,?
# Journal of Econometrics, 100: 381?427. and
# Liang, F., R. Paulo, G. Molina, M.A. Clyde, and J.O. Berger (2008): "Mixtures of g Priors for Bayesian Variable Selection",
# Journal of the American Statistical Association, 103: 410-423
####################
# USAGE #
###################################################################################################
# bms <-function(X.data,burn=1000,iter=NA,nmodel=100,mcmc="bd",g="UIP",mprior="random",mprior.size=NA,user.int=TRUE,
# start.value=NA,g.stats=TRUE,logfile=FALSE,logstep=10000,force.full.ols=FALSE)
#
######################
# FUNCTION ARGUMENTS #
###################################################################################################
#X.data data.frame or matrix: submit a data frame or a matrix, where the first column corresponds to the dependent variable
# followed by the covariates, including a constant term is not necessary since y and X is
# demeaned automatically.
#burn integer >=0: is the number of burn-in draws, default 1000 (not taken into account if mcmc="enumerate")
#iter integer >=0: is the number of posterior draws, default 3000 (not taken into account if mcmc="enumerate");
# if mcmc="enumerate", then iter is the number of models to be sampled, starting from 0 (default 2^K-1) - cf. start.value
#nmodel integer >=0: is the number of best models for which information is stored. Convergence analysis of
# the sampler by means of the correlation between analytic posterior model probabilities
# (pmp's) and hat of the MCMC sampler is based on the number you have set in nmodels. Also
# if you want to save the regression coefficients (beta.save=T), they are taken from the
# nmodel best models. Setting nmodel500 slows down the MCMC sampler. Note that posterior
# inclusion probabilites, and mean calculations are based on the MCMC frequencies as opposed
# to exact analytical calculations (as in Fernandez, Ley and Steel).
# Set nmodel=0 to speed up sampling (if topmodel information is not needed)
#mcmc character: * MC3 samplers: default is "bd" which corresponds to a birth / death MCMC alogrithm. You can choose
# also "rev.jump" where we have implemented a true reversible jump algorithm adding a "move" step to the birth / death steps from "bd".
# * enumeraton sampler: moreover there is mcmc="enumerate"/"enum" which will enumerate all possible regressor combinations (not advisable for K>23)
# * interaction sampler: adding an ".int" to an MC3 sampler (e.g. "mcmc="bd.int") does an interaction sampler
# interaction terms will only be sampled along with their component variables, in the colnumn names of X.data interaction terms need to be
# denominated by names consisting of the base terms separated by "#" (e.g. an interaction term of base variables "A", "B" and "C" needs column name "A#B#C")
#g character or integer: is the hyperparameter on Zellner's g-prior for the regression coefficients. You can specify g="UIP", corresponding to g=N (default)
# g="bric" corresponding to the benchmark prior suggestion from FLS (2001), i.e g=max(N, K^2)
# with K denoting the total number of regressors and N the number of observations, g="EBL" estimates a
# local empirical Bayes g-parameter (as in Liang et a. (2008), JASA). g="hyper", takes the 'hyper-g'
# prior distribution (As in Liang et al.) with the default hyper-parameter a=3; This hyperparameter can
# be adjusted (between 2<a<=4) by setting g="hyper=2.9", for instance.
#mprior regards the prior on model size. It can be either "random" (Default), fixed", "uniform", "customk", or "pip" and is based on the
# working paper "On the Effect of Prior Assumptions in Bayesian Model Averaging with Applications
# to Growth Regression", by Ley and Steel (2008). Mprior denotes the a priori inclusion probability
# of a regressor. Their suggestion is to use a binomial-beta hyperprior on mprior (i.e. mprior="random")
# in order to be noninformative on model size. You can use mprior="fixed" if you have strong prior
# information on model size. mprior="uniform" employs the uniform model prior;
# mprior="customk" allows for custom model size priors (cf. mprior.size)
# mprior="pip" allows for custom prior inclusion probabilities (cf mprior.size)
# Note that the prior on models with more than N-3 regressors is automatically zero; these models will not be sampled.
#mprior.size corresponds to the expected value of the model size prior (default K/2). For mprior=random there is little
# impact on results by varying mprior.size. For fixed mprior (i.e. informative) prior this is a
# sensible choice.
# if mprior="customk" then a custom model size prior can be provided as K+1 vector detailing the priors from model size 0 to K
# (e.g. rep(1,K+1) for the uniform model prior)
# if mprior="pip" then custom prior inclusion probabilities can be provided as a vector of size K, with elements in the interval (0,1]
#user.int 'interactive mode' print out results to command line after ending the routine and do two charts
#start.value specifies the starting model. You can choose either a specific model by the corresponding
# column indices (e.g. starting.model=numeric(K) starts from the null model including
# solely a constant term) or you set a number (e.g. starting.model=20). In the latter case
# randomly 20 covariates are chosen and the starting model is identified by those regressors
# with a t-statistics>0.2.
# The default value start.value=NA corresponds to start.value=min(nrow(X)-2,ncol(X),nrow(X)-3)
# start.value=0 or start.value=NULL starts from the null model
# * if mcmc="enumerate" then start.value is the index to start the iteration (default=0) . Any number between 0 and K^2-1 is admissible
#g.stats TRUE or FALSE whether statistics on the shrinkage factor should be collected. default=TRUE
# set g.stats=FALSE for faster iteration
#logfile setting logfile=TRUE produces a logfile named "test.log" in your current working directory,
# in order to keep track of the sampling procedure.
# setting logfile equal to some filepath (like logfile="subfolder/bla.txt") puts the logfile
# into that specified position. (default: logfile=FALSE)
# Note that logfile="" implies log printouts on the console
#logstep specifies at which number of posterior draws information is written to the log file; default: 100 000 iterations
#force.full.ols default FALSE. If force.full.ols=TRUE, the ols estimation part of the sampling procedure relies on slower matrix inversion,
# instead of streamlined routines. force.full.ols=TRUE can slow down sampling but may deal better with highly collinear data
#beta.save obsolete
#int obsolete, see "mcmc"
#exact obsolete: see estimates.bma()
#ask.set obsolete, with no replacement
#printRes obsolete; see user.int
#return.g.stats renamed intop 'g.stats'
#theta renamed into 'mprior'
#prior.msize renamed into 'mprior.size'
#
################
# OUTPUT #
###################################################################################################
# A call to bms returns a list/"bma" object with the following elements:
#info a list containing some posterior statistics. it has the following elements
# iter (numeric(1)) The number of iteration runs in the sampler (without burn-ins)
# burn (numeric(1)) The number of burn-in iterations
# inccount (numeric(K)) The (weighted) cumulative sum of the binary vector with regressor inclusions
# models.visited (numeric(1)) the number of model candidates that have been accepted (including burn-ins)
# b1mo ((numeric(K)) the (weighted) cumulative sum of first posterior moment of beta: Sum p(M)E(beta|X,y,M)
# b2mo ((numeric(K)) the (weighted) cumulative sum of second posterior moment of beta: Sum p(M)E(beta^2|X,y,M)
# add.otherstats (numeric(0 or some integer)) cumulative sum of some additional statistics (such as posterior moments of g)
# cumsumweights (numeric(1)) the denominator to turn cumulative sums into (weighted) averages
# K (numeric(1)) number of covariates in X.data
# N (numeric(1)) number of observations in X.data
# corr.pmp (numeric(1)) correlation between analytical and MCMC frequencies for the best nmodel models; if mcmc="enum", then this is NA
# msize (numeric(1)) the cumulative sum of model sizes
# timed (difftime) time taken for the main sampling routine in seconds
# k.vec (numeric(K+1)) cumulative sum of the different model siyes, from zero to K
# cons (numeric(1)) scalar value of E(constant|X,y), same as BMAOBJECT$cons
# pos.sign (numeric(K)) cumulative sum of the coefficients>0, from 1 to K
# arguments named list with the all the function arguments to the function bms, after being adjusted for inconsistencies
# topmod object/list containing the best nmodel models; for mor information see help on function .top10
# start.pos (numeric(<=K)) the indexes of covariates in the actual starting model, that started the MCMC sampling chain
# gprior.info (list) A list containing information on the gprior: its type ("BRIC", "EBL", "hyper" or "numeric"), whether it is constant, and its
# first moment (in case of the hyper-prior, also the second moment)
# X.data (data.frame) the data in the bms function argument X.data; the same as BMAOBJECT$arguments$X.data, retained for backward compatibility
# reg.names (character(K)) the covariates' column names (generic ones, if no names were provided with X.data)
# bms.call (call) the function call to bms() as it was entered into the command line (regularized by match.call())
#
###############################
# Deprecated output elements #
###################################################################################################
# in order to convert your bma object into a bma object of versions before this version: 20090612,
# use the function as.oldbma(BMAOBJECT)
# ----- The following are deprecated elements of BMAOBJECT =bms(...) -------
# estimates use estimates.bma(BMAOBJECT)
# estimates when exact=TRUE: use estimates.bma(BMAOBJECT, exact=TRUE)
# info use info.bma(BMAOBJECT)
# topmodels use topmodels.bma(BMAOBJECT)
# beta.draws use beta.draws.bma(BMAOBJECT)
# pmp.10 use pmp.bma(BMAOBJECT)
#
#
#
###### BMS MAIN FUNCTION #########################
#' Bayesian Model Sampling and Averaging
#'
#' Given data and prior information, this function samples all possible model
#' combinations via MC3 or enumeration and returns aggregate results.
#'
#' Ad \code{mcmc}: \cr Interaction sampler: adding an ".int" to an MC3 sampler
#' (e.g. "mcmc="bd.int") provides for special treatment of interaction terms.
#' Interaction terms will only be sampled along with their component variables:
#' In the colnumn names of X.data, interaction terms need to be denominated by
#' names consisting of the base terms separated by \code{#} (e.g. an
#' interaction term of base variables \code{"A"}, \code{"B"} and \code{"C"}
#' needs column name \code{"A#B#C"}). Then variable \code{"A#B#C"} will only be
#' included in a model if all of the component variables ("A", "B", and "C")
#' are included.
#'
#' The MC3 samplers "\code{bd}", "\code{rev.jump}", "\code{bd.int}" and
#' "\code{rev.jump.int}", iterate away from a starting model by adding,
#' dropping or swapping (only in the case of rev.jump) covariates.
#'
#' In an MCMC fashion, they thus randomly draw a candidate model and then move
#' to it in case its marginal likelihood (marg.lik.) is superior to the
#' marg.lik. of the current model.
#'
#' In case the candidate's marg.lik is inferior, it is randomly accepted or
#' rejected according to a probability formed by the ratio of candidate
#' marg.lik over current marg.lik. Over time, the sampler should thus converge
#' to a sensible distribution. For aggregate results based on these MC3
#' frequencies, the first few iterations are typically disregarded (the
#' 'burn-ins').
#'
#' Ad \code{g} and the hyper-g prior: The hyper-g prior introduced by Liang et
#' al. (2008) puts a prior distribution on the shrinkage factor \eqn{g/(1+g)},
#' namely a Beta distribution \eqn{ Beta(1, 1/2-1)} that is governed by the
#' parameter \eqn{a}. \eqn{a=4} means a uniform prior distribution of the
#' shrinkage factor, while \eqn{a>2} close to 2 concentrates the prior
#' shrinkage factor close to one. \cr The prior expected value is
#' \eqn{E(g/1+g)) = 2/a}. In this sense \code{g="hyper=UIP"} and
#' \code{g="hyper=BRIC"} set the prior expected shrinkage such that it conforms
#' to a fixed UIP-g (eqng=N) or BRIC-g (\eqn{g=max(K^2,N)} ).
#'
#'
#' @param X.data a data frame or a matrix, with the dependent variable in the
#' first column, followed by the covariates (alternatively, \code{X.data} can
#' also be provided as a \code{\link{formula}}). Note that \code{bms}
#' automatically estimates a constant, therefore including constant terms is
#' not necessary.
#' @param burn The (positive integer) number of burn-in draws for the MC3
#' sampler, defaults to 1000. (Not taken into account if mcmc="enumerate")
#' @param iter If mcmc is set to an MC3 sampler, then this is the number of
#' iteration draws to be sampled (ex burn-ins), default 3000 draws. \cr If
#' \code{mcmc="enumerate"}, then iter is the number of models to be sampled,
#' starting from 0 (defaults to \eqn{2^K-1}) - cf. \code{start.value}.
#' @param nmodel the number of best models for which information is stored
#' (default 500). Best models are used for convergence analysis between
#' likelihoods and MCMC frequencies, as well as likelihood-based inference.\cr
#' Note that a very high value for \code{nmodel} slows down the sampler
#' significantly. Set nmodel=0 to speed up sampling (if best model information
#' is not needed).
#' @param mcmc a character denoting the model sampler to be used.\cr The MC3
#' sampler \code{mcmc="bd"} corresponds to a birth/death MCMC algogrithm.
#' \code{mcmc="rev.jump"} enacts a reversible jump algorithm adding a "swap"
#' step to the birth / death steps from "bd".\cr Alternatively, the entire
#' model space may be fully enumerated by setting \code{mcmc="enumerate"} which
#' will iterate all possible regressor combinations (Note: consider that this
#' means \eqn{2^K} iterations, where K is the number of covariates.)\cr Default
#' is full enumeration (\code{mcmc="enumerate"}) with less then 15 covariates,
#' and the birth-death MC3 sampler (\code{mcmc="bd"}) with 15 covariates or
#' more. Cf. section 'Details' for more options.
#' @param g the hyperparameter on Zellner's g-prior for the regression
#' coefficients.\cr \code{g="UIP"} corresponds to \eqn{g=N}, the number of
#' observations (default);\cr \code{g="BRIC"} corresponds to the benchmark
#' prior suggested by Fernandez, Ley and Steel (2001), i.e \eqn{g=max(N, K^2)},
#' where K is the total number of covariates;\cr \code{g="RIC"} sets
#' \eqn{g=K^2} and conforms to the risk inflation criterion by George and
#' Foster (1994)\cr \code{g="HQ"} sets \eqn{g=log(N)^3} and asymptotically
#' mimics the Hannan-Quinn criterion with \eqn{C_{HQ}=3} (cf. Fernandez, Ley
#' and Steel, 2001, p.395)\cr \code{g="EBL"} estimates a local empirical Bayes
#' g-parameter (as in Liang et al. (2008));\cr \code{g="hyper"} takes the
#' 'hyper-g' prior distribution (as in Liang et al., 2008) with the default
#' hyper-parameter \eqn{a} set such that the prior expected shrinkage factor
#' conforms to 'UIP';\cr This hyperparameter \eqn{a} can be adjusted (between
#' \eqn{2<a<=4}) by setting \code{g="hyper=2.9"}, for instance.\cr
#' Alternatively, \code{g="hyper=UIP"} sets the prior expected value of the
#' shrinkage factor equal to that of UIP (default), \code{g="hyper=BRIC"} sets
#' it according to BRIC \cr cf section 'Details' fro more on the hyper-g prior
#' @param mprior a character denoting the model prior choice, defaulting to
#' "random":\cr \code{mprior="fixed"} denotes fixed common prior inclusion
#' probabilities for each regressor as e.g. in Sala-i-Martin, Doppelhofer, and
#' Miller(2004) - for their fine-tuning, cf. \code{mprior.size}. Preferable to
#' \code{mcmc="random"} if strong prior information on model size exists;\cr
#' \code{mprior="random"} (default) triggers the 'random theta' prior by Ley
#' and Steel (2008), who suggest a binomial-beta hyperprior on the a priori
#' inclusion probability;\cr \code{mprior="uniform"} employs the uniform model
#' prior;\cr \code{mprior="customk"} allows for custom model size priors (cf.
#' \code{mprior.size});\cr \code{mprior="pip"} allows for custom prior
#' inclusion probabilities (cf. \code{mprior.size});\cr Note that the prior on
#' models with more than N-3 regressors is automatically zero: these models
#' will not be sampled.
#' @param mprior.size if \code{mprior} is "fixed" or "random",
#' \code{mprior.size} is a scalar that denotes the prior expected value of the
#' model size prior (default K/2).\cr If \code{mprior="customk"} then a custom
#' model size prior can be provided as a K+1 vector detailing the priors from
#' model size 0 to K (e.g. rep(1,K+1) for the uniform model prior);\cr if
#' \code{mprior="pip"}, then custom prior inclusion probabilities can be
#' provided as a vector of size K, with elements in the interval (0,1)
#' @param user.int 'interactive mode': print out results to console after
#' ending the routine and plots a chart (default TRUE).
#' @param start.value specifies the starting model of the iteration chain. For
#' instance a specific model by the corresponding column indices (e.g.
#' starting.model=numeric(K) starts from the null model including solely a
#' constant term) or \code{start.value=c(3,6)} for a starting model only
#' including covariates 3 and 6.\cr If \code{start.model} is set to an integer
#' (e.g. \code{start.model=15}) then that number of covariates (here: 15
#' covariates) is randomly chosen and the starting model is identified by those
#' regressors with an OLS t-statistic>0.2.\cr The default value
#' \code{start.value=NA} corresponds to
#' \code{start.value=min(ncol(X.data),nrow(X.data)-3)}. Note that
#' \code{start.value=0} or \code{start.value=NULL} starts from the null
#' model.\cr If \code{mcmc="enumerate"} then \code{start.value} is the index to
#' start the iteration (default: 0, the null model) . Any number between 0 and
#' \eqn{K^2-1} is admissible.
#' @param g.stats \code{TRUE} if statistics on the shrinkage factor g/(1+g)
#' should be collected, defaulting to TRUE (Note: set \code{g.stats=FALSE} for
#' faster iteration.)
#' @param logfile setting \code{logfile=TRUE} produces a logfile named
#' \code{"test.log"} in your current working directory, in order to keep track
#' of the sampling procedure. \code{logfile} equal to some filepath (like
#' \code{logfile="subfolder/log.txt"}) puts the logfile into that specified
#' position. (default: \code{logfile=FALSE}). Note that \code{logfile=""}
#' implies log printouts on the console.
#' @param logstep specifies at which number of posterior draws information is
#' written to the log file; default: 10 000 iterations
#' @param force.full.ols default FALSE. If \code{force.full.ols=TRUE}, the OLS
#' estimation part of the sampling procedure relies on slower matrix inversion,
#' instead of streamlined routines. \code{force.full.ols=TRUE} can slow down
#' sampling but may deal better with highly collinear data
#' @param fixed.reg indices or variable names of \code{X.data} that are fixed
#' regressors to be always included in every sampled model. Note: the parameter
#' \code{mprior.size} refers to prior model size including these fixed
#' regressors.
#' @return A list of class \code{bma}, that may be displayed using e.g.
#' \code{\link{summary.bma}} or \code{\link{coef.bma}}. The list contains the
#' following elements: \item{info}{a list of aggregate statistics: \code{iter}
#' is the number of iterations, \code{burn} the number of burn-ins.\cr The
#' following have to be divided by \code{cumsumweights} to get posterior
#' expected values: \code{inccount} are the posterior inclusion probabilities,
#' \code{b1mo} and \code{b2mo} the first and second moment of coefficients,
#' \code{add.otherstats} other statistics of interest (typically the moments of
#' the shrinkage factor), \code{msize} is the post. expected model size,
#' \code{k.vec} the posterior model size distribution, \code{pos.sign} the
#' unconditional post. probability of positive coefficients, \code{corr.pmp} is
#' the correlation between the best models' MCMC frequencies and their marg.
#' likelihoods.\cr \code{timed} is the time that was needed for MCMC sampling,
#' \code{cons} is the posterior expected value of the constant. \code{K} and
#' \code{N} are the maximum number of covariates and the sample size,
#' respectively.} \item{arguments}{a list of the evaluated function arguments
#' provided to \code{bms} (see above)} \item{topmod}{a 'topmod' object
#' containing the best drawn models. see \code{\link{topmod}} for more details}
#' \item{start.pos}{the positions of the starting model. If bmao is a'bma'
#' object this corresponds to covariates bmao$reg.names[bmao$start.pos]. If
#' bmao is a chain that resulted from several starting models (cf.
#' \code{\link{c.bma}}, then \code{start.pos} is a list detailing all of them.}
#' \item{gprior.info}{a list of class \code{\link{gprior-class}}, detailing
#' information on the g-prior: \code{gtype} corresponds to argument \code{g}
#' above, \code{is.constant} is FALSE if \code{gtype} is either "hyper" or
#' "EBL", \code{return.g.stats} corresponds to argument \code{g.stats} above,
#' \code{shrinkage.moments} contains the first and second moments of the
#' shrinkage factor (only if \code{return.g.stats==TRUE}), \code{g} details the
#' fixed g (if \code{is.constant==TRUE}), \code{hyper.parameter} corresponds to
#' the hyper-g parameter \eqn{a} as in Liang et al. (2008) }
#' \item{mprior.info}{a list of class \code{\link{mprior-class}}, detailing
#' information on the model prior: \code{origargs} lists the original arguments
#' to \code{mprior} and \code{mprior.size} above; \code{mp.msize} denotes the
#' prior mode size; \code{mp.Kdist} is a (K+1) vector with the prior model size
#' distribution from 0 to K} \item{X.data}{data.frame or matrix: corresponds to
#' argument \code{X.data} above, possibly cleaned for NAs}
#' \item{reg.names}{character vector: the covariate names to be used for X.data
#' (corresponds to \code{\link{variable.names.bma}} } \item{bms.call}{the
#' original call to the \code{bms} function}
#' @note There are several ways to speed-up sampling: \code{nmodel=10} saves
#' only the ten best models, at most a marginal improvement. \code{nmodels=0}
#' does not save the best (500) models, however then posterior convergence and
#' likelihood-based inference are not possible. %\code{beta.save=FALSE} saves
#' the best models, but not their coefficients, which renders the use of
#' \code{image.bma} and the paramer \code{exact=TRUE} in functions such as
#' \code{coef.bma} infeasible. \code{g.stats=FALSE} saves some time by not
#' retaining the shrinkage factors for the MC3 chain (and the best models).
#' \code{force.fullobject=TRUE} in contrast, slows sampling down significantly
#' if \code{mcmc="enumerate"}.
#' @section Theoretical background: The models analyzed are Bayesian
#' normal-gamma conjugate models with improper constant and variance priors
#' akin to Fernandez, Ley and Steel (2001): A model \eqn{M} can be described as
#' follows, with \eqn{\epsilon} ~ \eqn{N(0,\sigma^2 I)}: \deqn{latex}{ y=
#' \alpha + X \beta + \epsilon} \deqn{f(\beta | \sigma, M, g) ~ N(0, g \sigma^2
#' (X'X)^-1) }
#'
#' Moreover, the (improper) prior on the constant \eqn{f(\alpha)} is put
#' proportional to 1. Similarly, the variance prior \eqn{f(\sigma)} is
#' proportional to \eqn{1/\sigma}.
#' @author Martin Feldkircher, Paul Hofmarcher, and Stefan Zeugner
#' @seealso \code{\link{coef.bma}}, \code{\link{plotModelsize}} and
#' \code{\link{density.bma}} for some operations on the resulting 'bma' object,
#' \code{\link{c.bma}} for integrating separate MC3 chains and splitting of
#' sampling over several runs.
#'
#' Check \url{http://bms.zeugner.eu} for additional help.
#' @references
#' \url{http://bms.zeugner.eu}: BMS package homepage with help and tutorials
#'
#' Feldkircher, M. and S. Zeugner (2015): Bayesian Model Averaging Employing
#' Fixed and Flexible Priors: The BMS Package for R, Journal of Statistical Software 68(4).
#'
#' Feldkircher, M. and S. Zeugner (2009): Benchmark Priors
#' Revisited: On Adaptive Shrinkage and the Supermodel Effect in Bayesian Model
#' Averaging, IMF Working Paper 09/202.
#'
#' Fernandez, C. E. Ley and M. Steel (2001): Benchmark priors for Bayesian
#' model averaging. Journal of Econometrics 100(2), 381--427
#'
#' Ley, E. and M. Steel (2008): On the Effect of Prior Assumptions in Bayesian
#' Model Averaging with Applications to Growth Regressions. working paper
#'
#' Liang, F., Paulo, R., Molina, G., Clyde, M. A., and Berger, J. O. (2008).
#' Mixtures of g Priors for Bayesian Variable Selection. Journal of the
#' American Statistical Association 103, 410-423.
#'
#' Sala-i-Martin, X. and G. Doppelhofer and R.I. Miller (2004): Determinants of
#' long-term growth: a Bayesian averaging of classical estimates (BACE)
#' approach. American Economic Review 94(4), 813--835
#' @keywords models
#' @examples
#'
#' data(datafls)
#' #estimating a standard MC3 chain with 1000 burn-ins and 2000 iterations and uniform model priors
#' bma1 = bms(datafls,burn=1000, iter=2000, mprior="uniform")
#'
#' ##standard coefficients based on exact likelihoods of the 100 best models:
#' coef(bma1,exact=TRUE, std.coefs=TRUE)
#'
#' #suppressing user-interactive output, using a customized starting value, and not saving the best
#' # ...models for only 19 observations (but 41 covariates)
#' bma2 = bms(datafls[20:39,],burn=1000, iter=2000, nmodel=0, start.value=c(1,4,7,30),
#' user.int=FALSE)
#' coef(bma2)
#'
#' #MC3 chain with a hyper-g prior (custom coefficient a=2.1), saving only the 20 best models,
#' # ...and an alternative sampling procedure; putting a log entry to console every 1000th step
#' bma3 = bms(datafls,burn=1000, iter=5000, nmodel=20, g="hyper=2.1", mcmc="rev.jump",
#' logfile="",logstep=1000)
#' image(bma3) #showing the coefficient signs of the 20 best models
#'
#' #enumerating with 10 covariates (= 1024 models), keeping the shrinkage factors
#' # ...of the best 200 models
#' bma4 = bms(datafls[,1:11],mcmc="enumerate",nmodel=200,g.stats=TRUE)
#'
#' #using an interaction sampler for two interaction terms
#' dataint=datafls
#' dataint=cbind(datafls,datafls$LifeExp*datafls$Abslat/1000,
#' datafls$Protestants*datafls$Brit-datafls$Muslim)
#' names(dataint)[ncol(dataint)-1]="LifeExp#Abslat"
#' names(dataint)[ncol(dataint)]="Protestants#Brit#Muslim"
#' bma5 = bms(X.data=dataint,burn=1000,iter=9000,start.value=0,mcmc="bd.int")
#'
#' density(bma5,reg="English") # plot posterior density for covariate "English"
#'
#' # a matrix as X.data argument
#' bms(matrix(rnorm(1000),100,10))
#'
#' # keeping a set of fixed regressors:
#' bms(datafls, mprior.size=7, fixed.reg = c("PrScEnroll", "LifeExp", "GDP60"))
#' # Note that mprior.size=7 means prior model size of 3 fixed to 4 'uncertain' regressors
#'
#' @export
bms <-function(X.data,burn=1000,iter=NA,nmodel=500,mcmc="bd",g="UIP",mprior="random",mprior.size=NA,user.int=TRUE,
start.value=NA,g.stats=TRUE,logfile=FALSE,logstep=10000,force.full.ols=FALSE, fixed.reg=numeric(0)) {
# beta.save=TRUE,exact=NA,int=NA,printRes=NA,ask.set=NA,return.g.stats=NA,theta=NULL,prior.msize=NULL #deprecated function arguments, retained for compatibility with older versions
### getting data dimensions ####################
if (class(X.data)[[1]]=="formula") { X.data=stats::model.frame(X.data); if (!is.null(ncol(X.data[[2]]))) X.data=cbind(X.data[[1]],X.data[[2]][,-1]) }
if (any(is.na(X.data))) {
X.data=na.omit(X.data)
if (nrow(X.data)<3) {stop("Too few data observations. Please provide at least three data rows without NA entries.") }
warning("Argument 'X.data' contains NAs. The corresponding rows have not been taken into account.")
}
N<-nrow(X.data)
K=ncol(X.data)-1
maxk=N-3 #maximum number of admissible k per model
############################################################################################################################################
#### User Checks: ##########################################################################################################################
############################################################################################################################################
# check for deprecated arguments
# if (!is.na(exact)) { warning("Function argument 'exact' has been deprecated, please refer to function 'estimates.bma' instead.") }
# if (!is.na(int)) { mcmc=paste(mcmc,".int",sep=""); warning("Function argument 'int' has been deprecated, please add an 'int' to the argument 'mcmc' instead.") }
# if (!is.na(printRes)) { user.int=printRes; warning("Function argument 'printRes' has been deprecated, please refer to the argument 'user.int' instead.") }
# if (!is.na(ask.set)) { warning("Function argument 'ask.set' has been deprecated, with no replacement.") }
# if (!is.na(return.g.stats)) { g.stats=return.g.stats; warning("Function argument 'return.g.stats' has been renamed into 'g.stats'.") }
# if (!is.null(theta)) { mprior=theta; warning("Function argument 'theta' has been renamed into 'mprior'.") }
# if (!is.null(prior.msize)) { mprior.size=prior.msize; warning("Function argument 'prior.msize' has been renamed into 'mprior.size'.") }
# return.g.stats=g.stats; #theta=mprior; prior.msize=mprior.size
if (is.null(nmodel[1])||is.na(nmodel[1])||nmodel[1]<=0) {dotop=FALSE;nmodel=0} else {dotop=TRUE}
###########################################################################
nameix=1:K; names(nameix)=colnames(X.data[,-1,drop=FALSE]); fixed.pos=nameix[fixed.reg]; rm(nameix)
######################################################################################################################################
#assign the sampling procedure
if (missing(mcmc)&&((K-length(fixed.pos))<15)) {mcmc="enum"}
int=FALSE; is.enum=FALSE #int: whether interaction sampler is wanted; is.enum: whether the sampler is enumeration
if (is.function(mcmc)) {
samplingfun=mcmc
mcmc="custom"
} else {
if (length(grep("int",mcmc,ignore.case=TRUE))) {int=TRUE}
if (length(grep("enum",mcmc,ignore.case=TRUE))) {
is.enum=TRUE; samplingfun=.iterenum
if (K>maxk) samplingfun=.iterenum.KgtN
} else if(length(grep("bd",mcmc,ignore.case=TRUE))){
samplingfun=switch(int+1,.fls.samp,.fls.samp.int)
} else {
samplingfun=switch(int+1,.rev.jump,.rev.jump.int)
}
}
if (int&&(length(fixed.pos)>0L)) { warning("interaction sampler does not allow for non-zero argument fixed.pos; consequently it was set fixed.pos=0"); fixed.pos=numeric(0); }
sampling=.fixedset.sampler(samplingfun,fullK=K,fixed.pos=fixed.pos, X.data=X.data);
######################################################################################################################################
# specific enumeration user checks & init
if (is.enum) {
#check for a start.value index to start enumeration from seomewhere in between (and not do all possible models)
burn=0; int=FALSE; mcmc="enum"; is.enum=TRUE
tmp=.enum_startend(iter=iter, start.value=start.value, K=K, maxk=maxk, fixed.pos=fixed.pos); iter=tmp$iter; start.value=tmp$start.value
} else {
if (is.na(iter)) {iter=3000}; #if no enumeration and iter not given, set to default value 3000
}
######################################################################################################################################
# generate logfile if desired
if(logfile!=FALSE){
if (is.character(logfile)) {
sfilename=logfile}
else {
sfilename="test.log"
}
if (nchar(sfilename)>0) file.create(sfilename)
logfile=TRUE
cat(as.character(Sys.time()),": starting loop ... \n",append=TRUE, file=sfilename) #write one line
if (logstep!=10000) fact=logstep else fact=max(floor((burn+iter)/100),logstep)
}
######################################################################################################################################
######################################################################################################################################
# subtract mean from all regressors as in FLS
y<-as.matrix(X.data[,1])
X<-as.matrix(X.data[,2:ncol(X.data)])
y<-y-mean(y)
X<-X-matrix(colMeans(X),N,K,byrow=TRUE)
# multiply the whole matrix stuff out before going into the simulation loops
XtX.big=crossprod(X)
Xty.big=crossprod(X,y)
yty = as.vector(crossprod(y))
# user check: whether we have to use force.full.ols
coreig=eigen(cor(X),symmetric=TRUE,only.values=TRUE)$values
if (!force.full.ols) { #this line was added due to feature requests
if (sum(coreig>1e-7)<min(K,(N-1))) { force.full.ols=TRUE }
}
if (any(coreig[1:min(K,(N-1))] <1e-16)) { warning(paste("data seems to be rank-deficient: its rank seems to be only ", sum(coreig>1e-13))) }
######################################################################################################################################
# for the case that X contains interaction terms
if(int){
if(length(grep("#",colnames(X.data),fixed=TRUE))==0) stop("Please separate column names of interaction terms by # (e.g. A#B)")
mPlus=.constr.intmat(X,K)
}
else{ mPlus<-NA }
######################################################################################################################################
#Prior Initialization
#model prior: outsourced to stupid function for cleanliness
pmplist=.choose.mprior(mprior,mprior.size,K=K,X.data=X.data,fixed.pos=fixed.pos)
mprior=pmplist$mp.mode;
#g-prior
gprior.info = .choose.gprior(g,N,K,return.g.stats=g.stats,yty=yty,X.data=X.data) # gprior.info is a list that summarizes info about the choice of the g-prior
lprobcalc = gprior.info$lprobcalc
######################################################################################################################################
#The function Starter selects randomly a start matrix and runs a
#regression. From this regression, the
#start Design matrix is that for which the t-stats are >0.2. So we
#can circumvent starting from a very bad start point.
start.list=.starter(K,start.value,y,N=N,XtX.big=XtX.big,Xty.big=Xty.big,X=X,fixed.pos=fixed.pos)
molddraw=start.list$molddraw; start.position=start.list$start.position
kold=sum(molddraw)
position=(1:K)[molddraw==1]
########################################################################################################################################
################################################################################################
# Initializing #
################################################################################################
# initialize sampler-specific variables ########################################
# these are to select additional statistics (such as g)
collect.otherstats=FALSE
otherstats=numeric(0)
add.otherstats=numeric(0)
# initialize the vector for collecting the empirical shrinkage factor moments
if (gprior.info$return.g.stats & !(gprior.info$is.constant)) { add.otherstats=gprior.info$shrinkage.moments; collect.otherstats=TRUE }
cumsumweights=iter
null.lik=lprobcalc$just.loglik(ymy=yty,k=0) # calculate Likelihood for NullModel
if (K < N-3) {
mid.lik=lprobcalc$just.loglik(ymy=yty*(1-as.vector(crossprod(crossprod(chol2inv(chol(XtX.big)),Xty.big),Xty.big)/yty)),k=ceiling(K/2)) # calculate Likelihood for max model
} else {
mid.lik=lprobcalc$just.loglik(ymy=yty*.001,k=ceiling(K/2)) # calculate Likelihood for max model
}
if (!is.finite(mid.lik)) { mid.lik=sapply(as.list(seq(.1,.9,.1)),function(x) lprobcalc$just.loglik(ymy=yty*x,k=ceiling(K/2))); mid.lik=max(mid.lik[is.finite(mid.lik)]) }
#adding up posterior stats has been outsourced to sub-functions for speed reasons
if (collect.otherstats) {
addup<- function() {
inccount <<- inccount + molddraw #PIPs
msize<<-msize + kold # average size of models
#for speed reasons, iterative adding with indexing should be done in one stacked vector
if (kold!=0) {
bm[c(position,K+position,2*K+kold,3*K+position)]=c(b1,b2,1,b1>0); bmo <<- bmo+bm
#bmo is partitioned: first K entries have cum. b1 ("b1mo"), second K entries have cum. b2 ("b2mo"), third K entries have model size dist ("k.vec"), and fourth K entries are like inccount for positive betas (add up pos. sign covariate selections)
} else {
null.count<<-null.count+1
}
# collect e.g. estimated g-priors, etc
otherstats<<-lik.list[["otherstats"]]; add.otherstats<<-add.otherstats + otherstats
}
} else {
addup <- function() {
inccount <<- inccount + molddraw #PIPs
msize<<-msize + kold # average size of models
#for speed reasons, iterative adding with indexing should be done in one stacked vector
if (kold!=0) {
bm[c(position,K+position,2*K+kold,3*K+position)]=c(b1,b2,1,b1>0); bmo <<- bmo+bm
#bmo is partitioned: first K entries have cum. b1 ("b1mo"), second K entries have cum. b2 ("b2mo"), third K entries have model size dist ("k.vec"), and fourth K entries are like inccount for positive betas (add up pos. sign covariate selections)
} else {
null.count<<-null.count+1
}
}
}
if (is.enum) {
cumsumweights=0
if (collect.otherstats) {
addup<- function() {
weight= exp(pmpold+lprobold-mid.lik)
inccount <<- inccount + weight*molddraw #PIPs
msize<<-msize + weight*kold # average size of models
cumsumweights<<-cumsumweights+weight #denominator to get at sum of PMPs=1
#browser()
#for speed reasons, iterative adding with indexing should be done in one stacked vector
if (kold!=0) {
bm[c(position,K+position,2*K+kold,3*K+position)]=weight*c(b1,b2,1,b1>0); bmo <<- bmo+bm
#bmo is partitioned: first K entries have cum. b1 ("b1mo"), second K entries have cum. b2 ("b2mo"), third K entries have model size dist ("k.vec"), and fourth K entries are like inccount for positive betas (add up pos. sign covariate selections)
} else {
null.count<<-null.count+weight
}
otherstats<<-lik.list[["otherstats"]]; add.otherstats<<-add.otherstats + weight*otherstats
}
} else {
addup <- function() {
weight= exp(pmpold+lprobold-mid.lik)
#browser()
inccount <<- inccount + weight*molddraw #PIPs
msize<<-msize + weight*kold # average size of models
cumsumweights<<-cumsumweights+weight #denominator to get at sum of PMPs=1
#for speed reasons, iterative adding with indexing should be done in one stacked vector
if (kold!=0) {
bm[c(position,K+position,2*K+kold,3*K+position)]=weight*c(b1,b2,1,b1>0); bmo <<- bmo+bm
#bmo is partitioned: first K entries have cum. b1 ("b1mo"), second K entries have cum. b2 ("b2mo"), third K entries have model size dist ("k.vec"), and fourth K entries are like inccount for positive betas (add up pos. sign covariate selections)
} else {
null.count<<-null.count+weight
}
}
}
}
environment(addup) <- environment()
##################################################################################
##initialize model varibales with starter model ###################################
ols.object=.ols.terms2(positions=(1:K)[molddraw==1],yty=yty,k=kold,N,K=K,XtX.big=XtX.big,Xty.big=Xty.big) #OLS results from starter model
lik.list=lprobcalc$lprob.all(ymy=ols.object$ymy, k=kold, bhat=ols.object$bhat, diag.inverse=ols.object$diag.inverse) #likelihood and expected values for starter model
lprobold=lik.list$lprob
b1=lik.list$b1new
b2=lik.list$b2new
## calculate the posterior model probability for the first model
pmpold=pmplist$pmp(ki=kold,mdraw=molddraw)
##################################################################################
## initialize top 10 function ####################################################
#topmods=.top10(nmaxregressors=K,nbmodel=nmodel,bbeta=FALSE,bbeta2=FALSE,lengthfixedvec=length(add.otherstats))
topmods=topmod(nbmodels=nmodel,nmaxregressors=K,bbeta=FALSE,lengthfixedvec=length(add.otherstats))
if (mcmc=="enum") { try(topmods$duplicates_possible(FALSE), silent=TRUE) }
if (dotop && (burn==0L)) topmods$addmodel(mylik=pmpold+lprobold,vec01=molddraw,fixedvec=lik.list$otherstats)
##################################################################################
## Initialize the rest ###########################################################
null.count=0 #number the null model has been drawn
models.visited=0 #how often a model has been accepted (in burn-ins and iterations)
inccount=numeric(K) #how often the respective covariate has been included
msize=0 #average model size
k.vec=numeric(K) #how often the respective model size has been accepted
b1mo=numeric(K) #holds aggregate first moment of all coefficients
ab=numeric(K) #Initialize them here
b2mo=numeric(K) #holds aggregate second moment of all coefficients
bb=numeric(K)
possign=inccount # the number of times the respective coefficent has been positive
mnewdraw=numeric(K) #holds the binary vector denoting the proposed model
if (force.full.ols) {candi.is.full.object=TRUE} else {candi.is.full.object=FALSE} #candi.is.full: if TRUE, standard OLS, else OLS via Frisch-Waugh tricks
bmo=numeric(4*K); bm=bmo #common placeholder for b1mo, b2mo, k.vec and possign
if (is.enum) { addup() } # in case the sampler is enumeration then count the starting value as well (no burn-ins)
if (!is.finite(pmpold)) pmpold = -1e90 # this is if in case of an MCMC sampler the starting model got 0 PMP
###############################################################################################################
###############################################################################################################
#############################################################################################
set.seed(as.numeric(Sys.time())) #Set Seed randomly for number generator
t1<-Sys.time() #Save time before going into the loop
###########################################################################################
#START MAIN LOOP
###########################################################################################
nrep=burn+iter; i=0;
while(i<nrep) {
i=i+1;
if(logfile){ if (i %% fact==0) { cat(as.character(Sys.time()),":",i,"current draw \n",append=TRUE, file=sfilename)} } #write one line
##########################################################################################
#Start sampling program
###########################################################################################
#sample a model
a=sampling(molddraw=molddraw,K=K,mPlus=mPlus,maxk=maxk,oldk=kold)
mnewdraw=a[["mnewdraw"]]; positionnew=a[["positionnew"]]; knew=length(positionnew)
#calculate prior model prob
pmpnew=pmplist[["pmp"]](ki=knew,mdraw=mnewdraw) # get the (log) model prior prob
if (!is.enum) {
if (int) {if (length(c(a$dropi,a$addi))>2|i<3|force.full.ols) {candi.is.full.object=TRUE} else {candi.is.full.object=FALSE}}
#candi.is.full.object = TRUE if there were multiple regs dropped or added due to interaction terms
if (candi.is.full.object) {
ols.candidate = .ols.terms2(positions=positionnew,yty=yty,k=knew,N,K=K,XtX.big=XtX.big,Xty.big=Xty.big) #in case of changing interaction terms, draw the big OLS stuff
ymy.candi =ols.candidate[["ymy"]]
} else {
ymy.candi=ols.object[["child.ymy"]](a$addi,a$dropi,k=knew) #if standard sampling, use Frisch-Waugh to get the new ResidSS (faster)
}
if ( (ymy.candi<0) | is.na(ymy.candi) ) stop(paste("stumbled on rank-deficient model" ))
lprobnew = lprobcalc[["just.loglik"]](ymy=ymy.candi,k=knew) # get the log-likelihood out of the ResidSS
#Now decide whether to accept candidate draw
accept.candi = as.logical(log(stats::runif(1,0,1))< lprobnew-lprobold + pmpnew-pmpold)
} else {
accept.candi=TRUE
candi.is.full.object=FALSE
}
if(accept.candi){
if (!candi.is.full.object) {
#in case one has used Frisch-Waugh and the new model got accepted,
#calculate the 'real' inverse in order not to make copying mistakes
ols.res = ols.object[["mutate"]](addix=a$addi, dropix=a$dropi, newpos=positionnew, newk=knew)
} else {
ols.object = ols.candidate
ols.res = ols.candidate[["full.results"]]()
}
lik.list = lprobcalc[["lprob.all"]](max(0,ols.res$ymy), knew, ols.res$bhat, ols.res$diag.inverse)
lprobold=lik.list[["lprob"]]
position = positionnew
pmpold=pmpnew #get posterior odds for new model if accepted
molddraw=mnewdraw
kold=knew
models.visited=models.visited+1 #does not account for revisiting models
}
# Collect Posterior Draws
########################################################################
if (i>burn){
b1=lik.list[["b1new"]]; b2=lik.list[["b2new"]];
addup() #addup does iterative, cumulative sums of quantities of interest (betas, model size, etc.)
# add log(lik)*p(M) to topmodels
if (dotop) topmods[["addmodel"]](mylik=pmpold+lprobold,vec01=molddraw,fixedvec=otherstats)
}
}
###########################################################################################
#END MAIN LOOP
###########################################################################################
###########################################################################################
#adjust the topmod object and calculate all the betas after sampling
#similar to havving set bbeta=T, and bbeta2=T in the call to .top10 above
if (dotop) topmods=.topmod.as.bbetaT(topmods,gprior.info,X.data)
###########################################################################################
###########################################################################################
timed<-difftime(Sys.time(),t1)
# do aggregating calculations
if (is.enum) {iter=iter+1; models.visited=models.visited+1}
bmo=matrix(bmo,4,byrow=TRUE); b1mo=bmo[1,]; b2mo=bmo[2,]; k.vec=bmo[3,]; possign=bmo[4,]; rm(bmo)
post.inf=.post.calc(gprior.info,add.otherstats,k.vec,null.count,X.data,topmods,b1mo,b2mo,iter,burn,inccount,models.visited,K,N,msize,timed,cumsumweights,mcmc,possign)
result=list(info=post.inf$info,arguments=.construct.arglist(bms, environment()),topmod=topmods,start.pos=sort(start.position),gprior.info=post.inf$gprior.info,mprior.info=pmplist,reg.names=post.inf$reg.names,bms.call=try(match.call(bms,sys.call(0)),silent=TRUE))
if (!is.null(result$X.data)) { result$X.data<-NULL }
class(result)=c("bma")
###########################################################################################
# print results to console
if(user.int){
print(result)
print(timed)
plot.bma(result) # do modelsize plot
}
return(invisible(result))
}
|
/scratch/gouwar.j/cran-all/cranData/BMS/R/bma.R
|
#' FLS (2001) growth data
#'
#' The economic growth data set from Fernandez, Ley and Steel, Journal of Applied Econometrics 2001
#'
#' @format A data frame with 53940 rows and 10 variables:
#' A data frame with 72 observations on the following 42 variables.
#' \describe{
#' \item{\code{y}}{numeric: Economic growth 1960-1992 as from the Penn World Tables Rev 6.0}
#' \item{\code{Abslat}}{numeric: Absolute latitude}
#' \item{\code{Spanish}}{numeric: Spanish colony dummy}
#' \item{\code{French}}{numeric: French colony dummy}
#' \item{\code{Brit}}{numeric: British colony dummy}
#' \item{\code{WarDummy}}{numeric: War dummy}
#' \item{\code{LatAmerica}}{numeric: Latin America dummy}
#' \item{\code{SubSahara}}{numeric; Sub-Sahara dummy}
#' \item{\code{OutwarOr}}{numeric: Outward Orientation}
#' \item{\code{Area}}{numeric: Area surface}
#' \item{\code{PrScEnroll}}{numeric: Primary school enrolment}
#' \item{\code{LifeExp}}{numeric: Life expectancy}
#' \item{\code{GDP60}}{numeric: Initial GDP in 1960}
#' \item{\code{Mining}}{numeric: Fraction of GDP in mining}
#' \item{\code{EcoOrg}}{numeric: Degree of capitalism}
#' \item{\code{YrsOpen}}{numeric: Number of years having an open economy}
#' \item{\code{Age}}{numeric: Age}
#' \item{\code{Buddha}}{numeric: Fraction Buddhist}
#' \item{\code{Catholic}}{numeric: Fraction Catholic}
#' \item{\code{Confucian}}{numeric: Fraction Confucian}
#' \item{\code{EthnoL}}{numeric: Ethnolinguistic fractionalization}
#' \item{\code{Hindu}}{numeric: Fraction Hindu}
#' \item{\code{Jewish}}{numeric: Fraction Jewish}
#' \item{\code{Muslim}}{numeric: Fraction Muslim}
#' \item{\code{PrExports}}{numeric: Primary exports 1970}
#' \item{\code{Protestants}}{numeric: Fraction Protestants}
#' \item{\code{RuleofLaw}}{numeric: Rule of law}
#' \item{\code{Popg}}{numeric: Population growth}
#' \item{\code{WorkPop}}{numeric: workers per inhabitant}
#' \item{\code{LabForce}}{numeric: Size of labor force}
#' \item{\code{HighEnroll}}{numeric: Higher education enrolment}
#' \item{\code{PublEdupct}}{numeric: Public education share}
#' \item{\code{RevnCoup}}{numeric: Revolutions and coups}
#' \item{\code{PolRights}}{numeric: Political rights}
#' \item{\code{CivlLib}}{numeric: Civil liberties}
#' \item{\code{English}}{numeric: Fraction speaking English}
#' \item{\code{Foreign}}{numeric: Fraction speaking foreign language}
#' \item{\code{RFEXDist}}{numeric: Exchange rate distortions}
#' \item{\code{EquipInv}}{numeric: Equipment investment}
#' \item{\code{NequipInv}}{numeric: Non-equipment investment}
#' \item{\code{stdBMP}}{numeric: stand. dev. of black market premium}
#' \item{\code{BlMktPm}}{numeric: black market premium}
#' }
#' @source
#' Fernandez, C., Ley, E., and Steel, M. F. (2001b). Model Uncertainty in Cross-Country Growth Regressions. Journal of Applied Econometrics, 16:563-576.
#' Data set from \url{https://warwick.ac.uk/fac/sci/statistics/staff/academic-research/steel/steel_homepage/software}.
#'
#' A working paper version of Fernandez, Ley and Steel (2001) is available via \url{https://econpapers.repec.org/article/jaejapmet/v_3a16_3ay_3a2001_3ai_3a5_3ap_3a563-576.htm}.
"datafls"
|
/scratch/gouwar.j/cran-all/cranData/BMS/R/data.R
|
#' Class "topmod"
#'
#' An updateable list keeping the best x models it encounters in any kind of
#' model iteration
#'
#'
#' @name topmod-class
#' @docType class
#' @section Objects from the Class: Objects can be created by calls to
#' \code{\link{topmod}}, or indirectly by calls to \code{\link{bms}}.\cr
#'
#' A 'topmod' object (as created by \code{topmod}) holds three basic vectors:
#' \code{lik} (for the (log) likelihood of models or similar), \code{bool()}
#' for a hexcode presentation of the model binaries (cf. \code{\link{bin2hex}})
#' and ncount() for the times the models have been drawn.\cr All these vectors
#' are sorted descendantly by \code{lik}, and are of the same length. The
#' maximum length is limited by the argument \code{nbmodels}.
#'
#' If \code{tmo} is a topmod object, then a call to \code{tmo$addmodel} (e.g.
#' \code{tmo$addmodel(mylik=4,vec01=c(T,F,F,T))} updates the object \code{tmo}
#' by a model represented by \code{vec01} (here the one including the first and
#' fourth regressor) and the marginal (log) likelihood \code{lik} (here: 4).\cr
#' If this model is already part of \code{tmo}, then its respective
#' \code{ncount} entry is incremented by one; else it is inserted into a
#' position according to the ranking of \code{lik}.\cr In addition, there is
#' the possibility to save (the first moments of) coefficients of a model
#' (\code{betas}) and their second moments (\code{betas2}), as well as an
#' arbitrary vector of statistics per model (\code{fixed_vector}).\cr
#' @author Martin Feldkircher and Stefan Zeugner
#' @seealso \code{\link{topmod}} to create \code{topmod} objects and a more
#' detailed description,
#' \code{\link{is.topmod}} to test for this class
#' @references \url{http://bms.zeugner.eu}
#' @keywords classes
#' @examples
#'
#' tm= topmod(2,4,TRUE,0) #should keep a maximum two models
#' tm$addmodel(-2.3,c(1,1,1,1),1:4,5:8) #update with some model
#' tm$addmodel(-2.2,c(0,1,1,1),1:3,5:7) #add another model
#' tm$addmodel(-2.2,c(0,1,1,1),1:3,5:7) #add it again -> adjust ncount
#' tm$addmodel(-2.5,c(1,0,0,1),1:2,5:6) #add another model
#'
#' #read out
#' tm$lik()
#' tm$ncount()
#' tm$bool_binary()
#' tm$betas()
#'
setClass("topmod",representation(addmodel="function", lik="function", bool="function", ncount="function", nbmodels="function", nregs="function", betas_raw="function", betas2_raw="function", kvec_raw="function", bool_binary="function", betas="function", betas2="function", fixed_vector="function"))
#' Class "bma"
#'
#' A list holding results from a BMA iteration chain
#'
#'
#' @name bma-class
#' @docType class
#' @section Objects from the Class: Objects can be created via calls to
#' \code{\link{bms}}, but indirectly also via \code{\link{c.bma}}\cr A
#' \code{bma} object is a list whose elements hold information on input and
#' output for a Bayesian Model Averaging iteration chain, such as from a call
#' to \code{\link{bms}}:
#' @author Martin Feldkircher and Stefan Zeugner
#' @seealso \code{\link{bms}} for creating \code{bma} objects,\cr or
#' \code{\linkS4class{topmod}} for the topmod object
#' @references \url{http://bms.zeugner.eu}
#' @keywords classes
#' @examples
#'
#' data(datafls)
#' mm=bms(datafls)
#' #show posterior model size
#' print(mm$info$msize/mm$info$cumsumweights)
#' #is the same number as in
#' summary(mm)
#'
#'
setClass("bma",representation(info="list",arguments="list",topmod="topmod",start.pos="integer",gprior.info="list",mprior.info="list",X.data="data.frame",reg.names="character",bms.call="call"))
#' Class "zlm"
#'
#' A list holding output from the Bayesian Linar Model under Zellner's g prior
#' akin to class 'lm'
#'
#'
#' @name zlm-class
#' @docType class
#' @section Objects from the Class: Objects can be created via calls to
#' \code{\link{zlm}}, but indirectly also via \code{\link{as.zlm}}.\cr
#' \code{\link{zlm}} estimates a Bayesian Linear Model under Zellner's g prior
#' - its output is very similar to objects of class \code{\link{lm}} (cf.
#' section 'Value')
#' @author Martin Feldkircher and Stefan Zeugner
#' @seealso \code{\link{zlm}} and \code{\link{as.zlm}} for creating \code{zlm}
#' objects,\cr \code{\link{density.zlm}}, \code{\link{predict.zlm}} and
#' \code{\link{summary.zlm}} for other posterior results
#' @references \url{http://bms.zeugner.eu}
#' @keywords classes
setClass("zlm",representation(coefficients="numeric",residuals="numeric",rank="numeric",fitted.values="numeric",df.residual="numeric",call="call",terms="formula",model="data.frame",coef2moments="numeric",marg.lik="numeric",gprior.info="list"))
#' Class "gprior"
#'
#' An object pertaining to a coefficient prior
#'
#'
#' @name gprior-class
#' @docType class
#' @section Objects from the Class: A \code{gprior} object holds descriptions
#' and subfunctions pertaining to coefficient priors. Functions such as
#' \code{\link{bms}} or \code{\link{zlm}} rely on this class to 'convert' the
#' output of OLS results into posterior expressions for a Bayesian Linear
#' Model. Post-processing functions such as \code{\link{density.bma}} also
#' resort to gprior objects.\cr There are currently three coefficient prior
#' structures built into the BMS package, generated by the following functions
#' (cf. Feldkircher and Zeugner, 2009) : \cr \code{gprior.constg.init}: creates
#' a Zellner's g-prior object with constant \code{g}.\cr
#' \code{gprior.eblocal.init}: creates an Empricial Bayes Zellner's g-prior.\cr
#' \code{gprior.hyperg.init}: creates a hyper g-prior with a Beta-prior on the
#' shrinkage parameter.\cr The following describes the necessary slots
#' @author Martin Feldkircher and Stefan Zeugner
#' @seealso \code{\link{bms}} and \code{\link{zlm}} for creating \code{bma} or
#' \code{zlm} objects. \cr Check the appendix of \code{vignette(BMS)} for a
#' more detailed description of built-in priors.\cr Check
#' \url{http://bms.zeugner.eu/custompriors.php} for examples.
#' @references Feldkircher, M. and S. Zeugner (2009): Benchmark Priors
#' Revisited: On Adaptive Shrinkage and the Supermodel Effect in Bayesian Model
#' Averaging, IMF Working Paper 09/202.
#' @keywords classes
#' @examples
#'
#'
#' data(datafls)
#' mm1=bms(datafls[,1:10], g="EBL")
#' gg=mm1$gprior.info # is the g-prior object, augmented with some posterior statistics
#'
#' mm2=bms(datafls[,1:10], g=gg) #produces the same result
#'
#' mm3=bms(datafls[,1:10], g=BMS:::.gprior.eblocal.init)
#' #this passes BMS's internal Empirical Bayes g-prior object as the coefficient prior
#' # - any other obejct might be used as well
#'
#'
#'
setClass("gprior")
#' Class "mprior"
#'
#' An object pertaining to a BMA model prior
#'
#'
#' @name mprior-class
#' @docType class
#' @section Objects from the Class: An \code{mprior} object holds descriptions
#' and subfunctions pertaining to model priors. The BMA functions
#' \code{\link{bms}} and post-processing functions rely on this class. \cr
#' There are currently five model prior structures built into the BMS package,
#' generated by the following functions (cf. the appendix of
#' \code{vignette(BMS)}): \cr \code{mprior.uniform.init}: creates a uniform
#' model prior object.\cr \code{mprior.fixedt.init}: creates the popular
#' binomial model prior object with common inclusion probabilities.\cr
#' \code{mprior.randomt.init}: creates a beta-binomial model prior object.\cr
#' \code{mprior.pip.init}: creates a binomial model prior object that allows
#' for defining individual prior inclusion probabilities.\cr
#' \code{mprior.customk.init}: creates a model prior object that allows for
#' defining a custom prior for each model parameter size.\cr The following
#' describes the necessary slots:
#' @author Martin Feldkircher and Stefan Zeugner
#' @seealso \code{\link{bms}} for creating \code{bma} objects. \cr Check the
#' appendix of \code{vignette(BMS)} for a more detailed description of built-in
#' priors.\cr Check \url{http://bms.zeugner.eu/custompriors.php} for examples.
#' @keywords classes
setClass('mprior')
#' @importFrom stats density
.flsresultlist =function(item=NULL) {
if (is.null(item)) return(.flsresults)
return(.flsresults[[item]])
}
|
/scratch/gouwar.j/cran-all/cranData/BMS/R/zzz.R
|
### R code from vignette source 'bmsmanual.Rnw'
###################################################
### code chunk number 1: bmsmanual.Rnw:16-17
###################################################
options(width=75)
###################################################
### code chunk number 2: bmsmanual.Rnw:64-65
###################################################
data(attitude)
###################################################
### code chunk number 3: bmsmanual.Rnw:68-69
###################################################
library(BMS)
###################################################
### code chunk number 4: bmsmanual.Rnw:72-73
###################################################
att = bms(attitude, mprior = "uniform", g="UIP", user.int=F)
###################################################
### code chunk number 5: bmsmanual.Rnw:79-80
###################################################
coef(att)
###################################################
### code chunk number 6: bmsmanual.Rnw:87-88
###################################################
coef(att, std.coefs=T, order.by.pip=F, include.constant=T)
###################################################
### code chunk number 7: bmsmanual.Rnw:94-95
###################################################
summary(att)
###################################################
### code chunk number 8: bmsmanual.Rnw:100-101
###################################################
topmodels.bma(att)[,1:3]
###################################################
### code chunk number 9: bmsmanual.Rnw:106-107
###################################################
image(att)
###################################################
### code chunk number 10: bmsmanual.Rnw:115-116
###################################################
sum(coef(att)[,1])
###################################################
### code chunk number 11: bmsmanual.Rnw:120-121
###################################################
plotModelsize(att)
###################################################
### code chunk number 12: bmsmanual.Rnw:132-133
###################################################
att_fixed = bms(attitude, mprior="fixed", mprior.size=2, user.int=T)
###################################################
### code chunk number 13: bmsmanual.Rnw:139-140
###################################################
att_pip = bms(attitude, mprior="pip", mprior.size=c(.01,.5,.5,.5,.5,.5), user.int=F)
###################################################
### code chunk number 14: bmsmanual.Rnw:146-147
###################################################
plotModelsize(att_fixed)
###################################################
### code chunk number 15: bmsmanual.Rnw:153-155
###################################################
att_random = bms(attitude, mprior="random", mprior.size=3, user.int=F)
plotModelsize(att_random)
###################################################
### code chunk number 16: bmsmanual.Rnw:161-162 (eval = FALSE)
###################################################
## plotComp(Uniform=att, Fixed=att_fixed, PIP=att_pip, Random=att_random)
###################################################
### code chunk number 17: bmsmanual.Rnw:165-166
###################################################
plotComp(Uniform=att, Fixed=att_fixed, PIP=att_pip, Random=att_random, cex=2)
###################################################
### code chunk number 18: bmsmanual.Rnw:199-201 (eval = FALSE)
###################################################
## data(datafls)
## fls1 = bms(datafls, burn=50000, iter=100000, g="BRIC", mprior="uniform", nmodel=2000, mcmc="bd", user.int=F)
###################################################
### code chunk number 19: bmsmanual.Rnw:203-204
###################################################
fls1 = BMS:::.flsresultlist('fls1')
###################################################
### code chunk number 20: bmsmanual.Rnw:207-208
###################################################
summary(fls1)
###################################################
### code chunk number 21: bmsmanual.Rnw:212-213
###################################################
plotConv(fls1)
###################################################
### code chunk number 22: bmsmanual.Rnw:217-218
###################################################
plotConv(fls1[1:100])
###################################################
### code chunk number 23: bmsmanual.Rnw:226-227
###################################################
pmp.bma(fls1)[1:5,]
###################################################
### code chunk number 24: bmsmanual.Rnw:230-231
###################################################
colSums(pmp.bma(fls1))
###################################################
### code chunk number 25: bmsmanual.Rnw:234-235
###################################################
coef(fls1)[1:5,]
###################################################
### code chunk number 26: bmsmanual.Rnw:238-239
###################################################
coef(fls1,exact=TRUE)[1:5,]
###################################################
### code chunk number 27: bmsmanual.Rnw:250-251
###################################################
fls2 = BMS:::.flsresultlist('fls2')
###################################################
### code chunk number 28: bmsmanual.Rnw:253-254 (eval = FALSE)
###################################################
## fls2= bms(datafls, burn=20000, iter=50000, g="BRIC", mprior="uniform", mcmc="rev.jump", start.value=0, user.int=F)
###################################################
### code chunk number 29: bmsmanual.Rnw:256-257
###################################################
summary(fls2)
###################################################
### code chunk number 30: bmsmanual.Rnw:261-263
###################################################
fls_combi = c(fls1,fls2)
summary(fls_combi)
###################################################
### code chunk number 31: bmsmanual.Rnw:277-278
###################################################
fls_g5 = BMS:::.flsresultlist('fls_g5')
###################################################
### code chunk number 32: bmsmanual.Rnw:280-281 (eval = FALSE)
###################################################
## fls_g5 = bms(datafls, burn=20000, iter=50000, g=5, mprior="uniform", user.int=F)
###################################################
### code chunk number 33: bmsmanual.Rnw:283-285
###################################################
coef(fls_g5)[1:5,]
summary(fls_g5)
###################################################
### code chunk number 34: bmsmanual.Rnw:300-301
###################################################
fls_ebl = BMS:::.flsresultlist('fls_ebl')
###################################################
### code chunk number 35: bmsmanual.Rnw:303-304 (eval = FALSE)
###################################################
## fls_ebl = bms(datafls, burn=20000, iter=50000, g="EBL", mprior="uniform", nmodel=1000, user.int=F)
###################################################
### code chunk number 36: bmsmanual.Rnw:306-307
###################################################
summary(fls_ebl)
###################################################
### code chunk number 37: bmsmanual.Rnw:310-311
###################################################
plot(fls_ebl)
###################################################
### code chunk number 38: bmsmanual.Rnw:320-321
###################################################
fls_hyper = BMS:::.flsresultlist('fls_hyper')
###################################################
### code chunk number 39: bmsmanual.Rnw:323-324 (eval = FALSE)
###################################################
## fls_hyper = bms(datafls, burn=20000, iter=50000, g="hyper=UIP", mprior="random", mprior.size=7, nmodel=1000, user.int=F)
###################################################
### code chunk number 40: bmsmanual.Rnw:326-327
###################################################
summary(fls_hyper)
###################################################
### code chunk number 41: bmsmanual.Rnw:331-332
###################################################
gdensity(fls_hyper)
###################################################
### code chunk number 42: bmsmanual.Rnw:336-337
###################################################
image(fls_hyper)
###################################################
### code chunk number 43: bmsmanual.Rnw:345-346
###################################################
density(fls_combi,reg="Muslim")
###################################################
### code chunk number 44: bmsmanual.Rnw:350-351
###################################################
coef(fls_combi,exact=T,condi.coef=T)["Muslim",]
###################################################
### code chunk number 45: bmsmanual.Rnw:357-358
###################################################
dmuslim=density(fls_hyper,reg="Muslim",addons="Eebl")
###################################################
### code chunk number 46: bmsmanual.Rnw:364-365
###################################################
quantile(dmuslim, c(0.025, 0.975))
###################################################
### code chunk number 47: bmsmanual.Rnw:372-375
###################################################
fcstbma= bms(datafls[1:70,], mprior="uniform", burn=20000, iter=50000, user.int=FALSE)
pdens = pred.density(fcstbma, newdata=datafls[71:72,])
###################################################
### code chunk number 48: bmsmanual.Rnw:381-382
###################################################
plot(pdens, 2)
###################################################
### code chunk number 49: bmsmanual.Rnw:388-389
###################################################
quantile(pdens, c(0.05, 0.95))
###################################################
### code chunk number 50: bmsmanual.Rnw:395-396
###################################################
pdens$dyf(datafls[71:72,1])
###################################################
### code chunk number 51: bmsmanual.Rnw:400-401
###################################################
plot(pdens, "ZM", realized.y=datafls["ZM",1])
###################################################
### code chunk number 52: bmsmanual.Rnw:408-409
###################################################
lps.bma(pdens, datafls[71:72,1])
###################################################
### code chunk number 53: bmsmanual.Rnw:498-499
###################################################
data(attitude)
###################################################
### code chunk number 54: bmsmanual.Rnw:502-503
###################################################
att_full = zlm(attitude,g="UIP")
###################################################
### code chunk number 55: bmsmanual.Rnw:506-507
###################################################
summary(att_full)
###################################################
### code chunk number 56: bmsmanual.Rnw:511-513
###################################################
att_best = as.zlm(att,model=1)
summary(att_best)
###################################################
### code chunk number 57: bmsmanual.Rnw:518-520
###################################################
att_bestlm = lm(model.frame(as.zlm(att)))
summary(att_bestlm)
###################################################
### code chunk number 58: bmsmanual.Rnw:529-530
###################################################
att_learn = bms(attitude,mprior="uniform", fixed.reg=c("complaints", "learning") )
###################################################
### code chunk number 59: bmsmanual.Rnw:539-540
###################################################
fls_culture = bms(datafls,fixed.reg=c(1,8:16,24,26:41), mprior="random", mprior.size=28, mcmc="enumeration", user.int=F)
###################################################
### code chunk number 60: bmsmanual.Rnw:544-545
###################################################
coef(fls_culture)[28:41, ]
###################################################
### code chunk number 61: bmsmanual.Rnw:549-550
###################################################
plotModelsize(fls_culture, ksubset=27:41)
|
/scratch/gouwar.j/cran-all/cranData/BMS/inst/doc/bmsmanual.R
|
#' @title The BMT-Phi Distribution.
#' @description Density, distribution function, quantile function, random number
#' generation for the BMT-Phi distribution with mean equal to \code{mean} and
#' standard deviation equal to \code{sd}.
#' @rdname BMT.Phi
#' @name BMT.Phi
#' @aliases dBMT.Phi
#' @aliases pBMT.Phi
#' @aliases qBMT.Phi
#' @aliases rBMT.Phi
#'
#' @details If \code{mean} or \code{sd} are not specified they assume the
#' default values of 0 and 1, respectively.
#'
#' The BMT-Phi distribution is the BMT distribution with \eqn{\kappa_l =
#' \kappa_r = 0.58029164978583758}. The BMT-Phi cumulative distribution
#' function (cdf) is the closest BMT cdf to the normal cdf with the same mean and standard deviation.
#'
#' @param x,q vector of quantiles.
#' @param p vector of probabilities.
#' @param n number of observations. If \code{length(n) > 1}, the lenght is taken
#' to be the number required
#' @param mean vector of means.
#' @param sd vector of standard deviations.
#' @param log,log.p logical; if TRUE, probabilities p are given as log(p).
#' @param lower.tail logical; if TRUE (default), probabilities are \eqn{P[X \le
#' x]}, otherwise, \eqn{P[X > x]}.
#'
#' @return \code{dBMT.Phi} gives the density, \code{pBMT.Phi} the distribution
#' function, \code{qBMT.Phi} the quantile function, and \code{rBMT.Phi}
#' generates random deviates.
#'
#' The length of the result is determined by \code{n} for \code{rBMT.Phi}, and
#' is the maximum of the lengths of the numerical arguments for the other
#' functions.
#'
#' The numerical arguments other than \code{n} are recycled to the length of
#' the result. Only the first elements of the logical arguments are used.
#'
#' \code{sd <= 0} is an error and returns \code{NaN}.
#'
#' @references Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory model: A
#' new skewed distribution family with bounded domain and an IRT model based
#' on it}, PhD thesis, Doctorado en ciencias - Estadistica, Universidad
#' Nacional de Colombia, Sede Bogota.
#'
#' @seealso \link{Distributions} for other standard distributions.
#' \code{\link{pBMT}} for the BMT distribution and \code{\link{pBMT.Psi}} for
#' the BMT-Psi distribution.
#'
#' @author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#'
#' @examples
#'
#' layout(matrix(1:4,2,2))
#'
#' curve(pnorm(x), -4, 4, col = "red", lty = 2, ylab = "cdf")
#' curve(pBMT.Phi(x), add = TRUE, col = "blue", lty = 3)
#' legend("topleft", legend = c("norm(0,1)","BMT-Phi(0,1)"),
#' bty = "n", col = c("red","blue"), lty = 2:3)
#'
#' curve(pnorm(x)-pBMT.Phi(x), -4, 4)
#'
#' curve(qnorm(x), col = "red", lty = 2, xlab = "p", ylab = "qf")
#' curve(qBMT.Phi(x), add = TRUE, col = "blue", lty = 3)
#'
#' hist(rBMT.Phi(10000), freq = FALSE, breaks = seq(-4,4,0.25), border = "blue")
#' curve(dnorm(x), add = TRUE, col = "red", lty = 2)
#' curve(dBMT.Phi(x), add = TRUE, col = "blue", lty = 3)
#' @rdname BMT.Phi
#' @export dBMT.Phi
dBMT.Phi <- function(x, mean = 0, sd = 1, log = FALSE) {
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(x), length(mean), length(sd))
x <- rep(x, len = len)
mean <- rep(mean, len = len)
sd <- rep(sd, len = len)
# Control location-scale parameters
sd <- replace(sd, sd <= 0, NaN)
# Transform x to 0,1 given location-scale parameters
range <- sd / .sd.BMT.DD01
x <- (x - mean) / range + .mean.BMT.DD01
# NaNs
y <- x + mean + sd
if (any(is.nan(y))) {
warning("NaNs founded or produced")
}
ind <- !is.na(y)
# For transformed x outside 0,1
y[ind] <- 0
# For transformed x inside 0,1
ind[ind] <- x[ind] > 0 & x[ind] < 1
if (any(ind)) {
# inv.x.t.Phi
t <- .inv.x.t.Phi(x[ind])
# yf.t.Phi
y[ind] <- .yf.t.Phi(t) / range[ind]
}
# density values y are given as log(y)
if (log)
y <- log(y)
return(y)
}
#' @rdname BMT.Phi
#' @export pBMT.Phi
pBMT.Phi <- function(q, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE){
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(q), length(mean), length(sd))
q <- rep(q, len=len)
mean <- rep(mean, len=len)
sd <- rep(sd, len=len)
# Control location-scale parameters
sd <- replace(sd, sd <= 0, NaN)
# Transform q to 0,1 given location-scale parameters
range <- sd / .sd.BMT.DD01
q <- (q - mean) / range + .mean.BMT.DD01
# NaNs
p <- q + mean + sd
if(any(is.nan(p)))
warning("NaNs founded or produced")
ind <- !is.na(p)
# For transformed q outside 0,1
p[ind] <- 0
ind2 <- ind
ind2[ind] <- q[ind] >= 1
p[ind2] <- 1
# For transformed q inside 0,1
ind[ind] <- q[ind] > 0 & q[ind] < 1
if(any(ind)){
# inv.x.t.Phi
t <- .inv.x.t.Phi(q[ind])
# yF.t.Phi
p[ind] <- .yF.t.Phi(t)
}
# probabilities are \eqn{P[X > x]}
if(!lower.tail)
p <- 1 - p
# probabilities p are given as log(p)
if(log.p)
p <- log(p)
return(p)
}
#' @rdname BMT.Phi
#' @export qBMT.Phi
qBMT.Phi <- function(p, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE){
# probabilities p are given as log(p)
if(log.p)
p <- exp(p)
# probabilities are \eqn{P[X > x]}
if(!lower.tail)
p <- 1 - p
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(p),length(mean),length(sd))
p <- rep(p, len=len)
mean <- rep(mean, len=len)
sd <- rep(sd, len=len)
# Control location-scale parameters
sd <- replace(sd, sd <= 0, NaN)
# NaNs
q <- p + mean + sd
if(any(is.nan(q)))
warning("NaNs founded or produced")
ind <- !is.na(q)
# For p outside (0,1)
q <- rep(NaN,len)
ind2 <- ind
ind2[ind] <- p[ind] == 0
q[ind2] <- 0
ind2 <- ind
ind2[ind] <- p[ind] == 1
q[ind2] <- 1
# For p inside (0,1)
ind[ind] <- p[ind] > 0 & p[ind] < 1
if(any(ind)){
# inv.yF.t
t <- .inv.yF.t.Phi(p[ind])
# x.t
q[ind] <- .x.t.Phi(t)
}
# Transform q
range <- sd / .sd.BMT.DD01
q <- (q - .mean.BMT.DD01) * range + mean
return(q)
}
#' @rdname BMT.Phi
#' @export rBMT.Phi
rBMT.Phi <- function(n, mean = 0, sd = 1){
#
len <- length(n)
if(len > 1)
n <- len
else
n <- trunc(n)
if(n < 1)
stop("invalid arguments")
# Method of inversion
p <- runif(n)
x <- qBMT.Phi(p, mean, sd)
return(x)
}
## Global constants
.epsilon <- 1e-10
.zero <- 0-.epsilon
.one <- 1+.epsilon
# X \sim BMT.Phi(mean, sd) is the same X \sim BMT(.D, .D, "t w", mean, sd, "l-s")
# .D <- BMTfit.mge(qnorm(1:1e6/(1e6+1)), "KS", start=list(p4=0.5), fix.arg=list(p1=0, p2=1, p3=0),
# type.p.3.4="a-s", type.p.1.2="l-s", custom.optim="nlminb")$estimate
.D <- 0.58029164978583758
# Mean of X \sim BMT(.D, .D, "t w", 0, 1, "c-d")
# .mean.BMT.DD01 <- BMTmean(.D, .D)
.mean.BMT.DD01 <- 0.5
# Standard deviation of X \sim BMT(.D, .D, "t w", 0, 1, "c-d")
# .sd.BMT.DD01 <- BMTsd(.D, .D)
.sd.BMT.DD01 <- 0.17733001242558785
# Maximum of X \sim BMT.Phi(0, 1)
# .d.BMT.Phi <- BMTchangepars(.D, .D, "t w", 0, 1, "l-s")$p2
.d.BMT.Phi <- 2.8196016746449653
# Coefficients of polynomial x.t = a_3 * t^3 + a_2 * t^2 + a_1 * t
# .a_3 <- 6*.D - 2
.a_3 <- 1.4817498987150257
# .a_2 <- -9*.D + 3
.a_2 <- -2.2226248480725381
# .a_1 <- 3*.D
.a_1 <- 1.7408749493575129
# For the real root of x.t = x
# .a <- .a_2/.a_3
.a <- -1.5
# .b <- .a_1/.a_3
.b <- 1.1748777245520317
# .Q <- (.a*.a - 3*.b)/9
.Q <- -0.14162590818401061
# .auxR <- (2*.a*.a - 9*.b)*.a / 54
.auxR <- 0.16871943113800789
# x.t.Phi
.x.t.Phi <- function(t){
x <- ((.a_3*t + .a_2)*t + .a_1)*t
return(x)
}
# Inverse function for x.t.Phi
.inv.x.t.Phi <- function(x){
# Press W.H., Teukolsky S.A., Vetterling W.T. & Flannery B.P. 2007.
# Numerical recipes: The art of scientific computing
# Section 5.6: Quadratic and Cubic Equations. Page 228.
len <- length(x)
c <- -x/.a_3
R <- .auxR + 0.5*c
# One real root
A <- - sign(R) * (abs(R) + sqrt(R*R-.Q*.Q*.Q))^(1/3)
B <- rep(0, len)
B[A!=0] <- .Q/A[A!=0]
r <- (A + B) - .a/3
r[r < 0] <- 0
r[r > 1] <- 1
return(r)
}
# yF.t.Phi (same yF.t)
.yF.t.Phi <- function(t){
yF <- (-2*t + 3)*t*t
return(yF)
}
# inv.yF.t.Phi (same inv.yF.t)
.inv.yF.t.Phi <- function(yF){
t <- 0.5-cos((acos(2*yF-1)-2*pi)/3)
return(t)
}
# yf.t.Phi
.yf.t.Phi <- function(t){
yf <- ((-6*t + 6)*t) / ((3*.a_3*t + 2*.a_2)*t + .a_1)
return(yf)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/BMT.Phi.R
|
#' @title The BMT-Psi Distribution.
#' @description Density, distribution function, quantile function, random number
#' generation for the BMT-Psi distribution with mean equal to \code{mean} and
#' standard deviation equal to \code{sd}.
#' @rdname BMT.Psi
#' @name BMT.Psi
#' @aliases dBMT.Psi
#' @aliases pBMT.Psi
#' @aliases qBMT.Psi
#' @aliases rBMT.Psi
#'
#' @details If \code{mean} or \code{sd} are not specified they assume the
#' default values of 0 and 1, respectively.
#'
#' The BMT-Psi distribution is the BMT distribution with \eqn{\kappa_l =
#' \kappa_r = 0.63355781127887611515}. The BMT-Psi cumulative distribution
#' function (cdf) is the closest BMT cdf to the logistic cdf with scale =
#' 1 / d and d = 1.70174439 (Camilli, 1994, p. 295).
#'
#' @param x,q vector of quantiles.
#' @param p vector of probabilities.
#' @param n number of observations. If \code{length(n) > 1}, the lenght is taken
#' to be the number required
#' @param mean vector of means.
#' @param sd vector of standard deviations.
#' @param log,log.p logical; if TRUE, probabilities p are given as log(p).
#' @param lower.tail logical; if TRUE (default), probabilities are \eqn{P[X \le
#' x]}, otherwise, \eqn{P[X > x]}.
#'
#' @return \code{dBMT.Psi} gives the density, \code{pBMT.Psi} the distribution
#' function, \code{qBMT.Psi} the quantile function, and \code{rBMT.Psi}
#' generates random deviates.
#'
#' The length of the result is determined by \code{n} for \code{rBMT.Psi}, and
#' is the maximum of the lengths of the numerical arguments for the other
#' functions.
#'
#' The numerical arguments other than \code{n} are recycled to the length of
#' the result. Only the first elements of the logical arguments are used.
#'
#' \code{sd <= 0} is an error and returns \code{NaN}.
#'
#' @references Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory
#' model: A new skewed distribution family with bounded domain and an IRT
#' model based on it}, PhD thesis, Doctorado en ciencias - Estadistica,
#' Universidad Nacional de Colombia, Sede Bogota.
#'
#' Camilli, G. (1994). Teacher's corner: origin of the scaling constant d= 1.7
#' in item response theory. Journal of Educational Statistics, 19(3), 293-295.
#'
#' @seealso \link{Distributions} for other standard distributions.
#' \code{\link{pBMT}} for the BMT distribution and \code{\link{pBMT.Phi}} for
#' the BMT-Phi distribution.
#'
#' @author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#'
#' @examples
#'
#' layout(matrix(1:4, 2, 2))
#'
#' curve(plogis(x, scale = 1 / 1.70174439), -4, 4, col = "red", lty = 2, ylab = "cdf")
#' curve(pBMT.Psi(x), add = TRUE, col = "blue", lty = 3)
#' legend("topleft", legend = c("logis(0, 1 / 1.70174439)","BMT-Psi(0,1)"),
#' bty = "n", col = c("red","blue"), lty = 2:3)
#'
#' curve(plogis(x, scale = 1 / 1.70174439)-pBMT.Psi(x), -4, 4)
#'
#' curve(qlogis(x, scale = 1 / 1.70174439), col = "red", lty = 2, xlab = "p", ylab = "qf")
#' curve(qBMT.Psi(x), add = TRUE, col = "blue", lty = 3)
#'
#' hist(rBMT.Psi(10000), freq = FALSE, breaks = seq(-4, 4, 0.25), border = "blue")
#' curve(dlogis(x, scale = 1 / 1.70174439), add = TRUE, col = "red", lty = 2)
#' curve(dBMT.Psi(x), add = TRUE, col = "blue", lty = 3)
#' @rdname BMT.Psi
#' @export dBMT.Psi
dBMT.Psi <- function(x, mean = 0, sd = 1, log = FALSE) {
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(x), length(mean), length(sd))
x <- rep(x, len = len)
mean <- rep(mean, len = len)
sd <- rep(sd, len = len)
# Control location-scale parameters
sd <- replace(sd, sd <= 0, NaN)
# Transform x to 0,1 given location-scale parameters
range <- sd / .sd.BMT.Psi.DD01
x <- (x - mean) / range + .mean.BMT.Psi.DD01
# NaNs
y <- x + mean + sd
if (any(is.nan(y))) {
warning("NaNs founded or produced")
}
ind <- !is.na(y)
# For transformed x outside 0,1
y[ind] <- 0
# For transformed x inside 0,1
ind[ind] <- x[ind] > 0 & x[ind] < 1
if (any(ind)) {
# inv.x.t.Psi
t <- .inv.x.t.Psi(x[ind])
# yf.t.Psi
y[ind] <- .yf.t.Psi(t) / range[ind]
}
# density values y are given as log(y)
if (log)
y <- log(y)
return(y)
}
#' @rdname BMT.Psi
#' @export pBMT.Psi
pBMT.Psi <- function(q, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE){
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(q), length(mean), length(sd))
q <- rep(q, len=len)
mean <- rep(mean, len=len)
sd <- rep(sd, len=len)
# Control location-scale parameters
sd <- replace(sd, sd <= 0, NaN)
# Transform q to 0,1 given location-scale parameters
range <- sd / .sd.BMT.Psi.DD01
q <- (q - mean) / range + .mean.BMT.Psi.DD01
# NaNs
p <- q + mean + sd
if(any(is.nan(p)))
warning("NaNs founded or produced")
ind <- !is.na(p)
# For transformed q outside 0,1
p[ind] <- 0
ind2 <- ind
ind2[ind] <- q[ind] >= 1
p[ind2] <- 1
# For transformed q inside 0,1
ind[ind] <- q[ind] > 0 & q[ind] < 1
if(any(ind)){
# inv.x.t.Psi
t <- .inv.x.t.Psi(q[ind])
# yF.t.Psi
p[ind] <- .yF.t.Psi(t)
}
# probabilities are \eqn{P[X > x]}
if(!lower.tail)
p <- 1 - p
# probabilities p are given as log(p)
if(log.p)
p <- log(p)
return(p)
}
#' @rdname BMT.Psi
#' @export qBMT.Psi
qBMT.Psi <- function(p, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE){
# probabilities p are given as log(p)
if(log.p)
p <- exp(p)
# probabilities are \eqn{P[X > x]}
if(!lower.tail)
p <- 1 - p
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(p),length(mean),length(sd))
p <- rep(p, len=len)
mean <- rep(mean, len=len)
sd <- rep(sd, len=len)
# Control location-scale parameters
sd <- replace(sd, sd <= 0, NaN)
# NaNs
q <- p + mean + sd
if(any(is.nan(q)))
warning("NaNs founded or produced")
ind <- !is.na(q)
# For p outside (0,1)
q <- rep(NaN,len)
ind2 <- ind
ind2[ind] <- p[ind] == 0
q[ind2] <- 0
ind2 <- ind
ind2[ind] <- p[ind] == 1
q[ind2] <- 1
# For p inside (0,1)
ind[ind] <- p[ind] > 0 & p[ind] < 1
if(any(ind)){
# inv.yF.t
t <- .inv.yF.t.Psi(p[ind])
# x.t
q[ind] <- .x.t.Psi(t)
}
# Transform q
range <- sd / .sd.BMT.Psi.DD01
q <- (q - .mean.BMT.Psi.DD01) * range + mean
return(q)
}
#' @rdname BMT.Psi
#' @export rBMT.Psi
rBMT.Psi <- function(n, mean = 0, sd = 1){
#
len <- length(n)
if(len > 1)
n <- len
else
n <- trunc(n)
if(n < 1)
stop("invalid arguments")
# Method of inversion
p <- runif(n)
x <- qBMT.Psi(p, mean, sd)
return(x)
}
## Global constants
.epsilon <- 1e-10
.zero <- 0 - .epsilon
.one <- 1 + .epsilon
# X \sim BMT.Psi(mean, sd) is the same X \sim BMT(.D.Psi, .D.Psi, "t w", mean, sd, "l-s")
# .D.Psi <- BMTfit.mge(qlogis(1:1e6/(1e6+1), scale=1/1.70174439),
# "KS", start=list(p4=0.5), fix.arg=list(p1=0, p2=1, p3=0),
# type.p.3.4="a-s", type.p.1.2="l-s", custom.optim="nlminb")$estimate
.D.Psi <- 0.63355781127887611515
# Mean of X \sim BMT(.D.Psi, .D.Psi, "t w", 0, 1, "c-d")
# .mean.BMT.Psi.DD01 <- BMTmean(.D.Psi, .D.Psi)
.mean.BMT.Psi.DD01 <- 0.5
# Standard deviation of X \sim BMT(.D.Psi, .D.Psi, "t w", 0, 1, "c-d")
# .sd.BMT.Psi.DD01 <- BMTsd(.D.Psi, .D.Psi)
.sd.BMT.Psi.DD01 <- 0.16771818811837588270
# Maximum of X \sim BMT.Psi(0, 1)
# .d.BMT.Psi <- BMTchangepars(.D.Psi, .D.Psi, "t w", 0, 1, "l-s")$p2
.d.BMT.Psi <- 2.98119128050142556674
# Coefficients of polynomial x.t = a_3.Psi * t^3 + a_2.Psi * t^2 + a_1.Psi * t
# .a_3.Psi <- 6*.D.Psi - 2
.a_3.Psi <- 1.80134686767325646883
# .a_2.Psi <- -9*.D.Psi + 3
.a_2.Psi <- -2.70202030150988470325
# .a_1.Psi <- 3*.D.Psi
.a_1.Psi <- 1.90067343383662823442
# For the real root of x.t = x
# .a.Psi <- .a_2.Psi/.a_3.Psi = -1.5
.a.Psi <- -1.5
# .b.Psi <- .a_1.Psi/.a_3.Psi
.b.Psi <- 1.05514016647536013060
# .Q.Psi <- (.a.Psi*.a.Psi - 3*.b.Psi)/9
.Q.Psi <- -0.10171338882512001578
# .auxR.Psi <- (2*.a.Psi*.a.Psi - 9*.b.Psi)*.a.Psi / 54
.auxR.Psi <- 0.13878504161884000490
# x.t.Psi
.x.t.Psi <- function(t){
x <- ((.a_3.Psi*t + .a_2.Psi)*t + .a_1.Psi)*t
return(x)
}
# Inverse function for x.t.Psi
.inv.x.t.Psi <- function(x){
# Press W.H., Teukolsky S.A., Vetterling W.T. & Flannery B.P. 2007.
# Numerical recipes: The art of scientific computing
# Section 5.6: Quadratic and Cubic Equations. Page 228.
len <- length(x)
c <- -x/.a_3.Psi
R <- .auxR.Psi + 0.5*c
# One real root
A <- - sign(R) * (abs(R) + sqrt(R*R-.Q.Psi*.Q.Psi*.Q.Psi))^(1/3)
B <- rep(0, len)
B[A!=0] <- .Q.Psi/A[A!=0]
r <- (A + B) - .a.Psi/3
r[r < 0] <- 0
r[r > 1] <- 1
return(r)
}
# yF.t.Psi (same yF.t)
.yF.t.Psi <- function(t){
yF <- (-2*t + 3)*t*t
return(yF)
}
# inv.yF.t.Psi (same inv.yF.t)
.inv.yF.t.Psi <- function(yF){
t <- 0.5-cos((acos(2*yF-1)-2*pi)/3)
return(t)
}
# yf.t.Psi
.yf.t.Psi <- function(t){
yf <- ((-6*t + 6)*t) / ((3*.a_3.Psi*t + 2*.a_2.Psi)*t + .a_1.Psi)
return(yf)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/BMT.Psi.R
|
#' @title The BMT Distribution.
#'
#' @description Density, distribution, quantile function, random number
#' generation for the BMT distribution, with \code{p3} and \code{p4} tails
#' weights (\eqn{\kappa_l} and \eqn{\kappa_r}) or asymmetry-steepness
#' parameters (\eqn{\zeta} and \eqn{\xi}) and \code{p1} and \code{p2} domain
#' (minimum and maximum) or location-scale (mean and standard deviation)
#' parameters.
#'
#' @rdname BMT
#' @name BMT
#' @aliases dBMT
#' @aliases pBMT
#' @aliases qBMT
#' @aliases rBMT
#'
#' @details The BMT distribution with tails weights and domain parametrization
#' (\code{type.p.3.4 = "t w"} and \code{type.p.1.2 = "c-d"}) has quantile
#' function \deqn{(d - c) [3 t_p ( 1 - t_p )^2 \kappa_l - 3 t_p^2 ( 1 - t_p )
#' \kappa_r + t_p^2 ( 3 - 2 t_p ) ] + c} where \eqn{0 \le p \le 1}, \eqn{t_p =
#' 1/2 - \cos ( [\arccos ( 2 p - 1 ) - 2 \pi] / 3 )}, and \eqn{0 < \kappa_l <
#' 1} and \eqn{0 < \kappa_r < 1} are, respectively, related to left and right
#' tail weights or curvatures.
#'
#' The BMT coefficient of asymmetry \eqn{-1 < \zeta < 1} is \deqn{\kappa_r -
#' \kappa_l}
#'
#' The BMT coefficient of steepness \eqn{0 < \xi < 1} is \deqn{(\kappa_r +
#' \kappa_l - |\kappa_r - \kappa_l|) / (2 (1 - |\kappa_r - \kappa_l|))} for
#' \eqn{|\kappa_r - \kappa_l| < 1}.
#'
#' @param x,q vector of quantiles.
#' @param p vector of probabilities.
#' @param n number of observations. If \code{length(n) > 1}, the lenght is taken
#' to be the number required
#' @param p3,p4 tails weights (\eqn{\kappa_l} and \eqn{\kappa_r}) or
#' asymmetry-steepness (\eqn{\zeta} and \eqn{\xi}) parameters of the BMT
#' distribution.
#' @param type.p.3.4 type of parametrization asociated to p3 and p4. "t w" means
#' tails weights parametrization (default) and "a-s" means asymmetry-steepness
#' parametrization.
#' @param p1,p2 domain (minimum and maximum) or location-scale (mean and
#' standard deviation) parameters of the BMT ditribution.
#' @param type.p.1.2 type of parametrization asociated to p1 and p2. "c-d" means
#' domain parametrization (default) and "l-s" means location-scale
#' parametrization.
#' @param log,log.p logical; if TRUE, probabilities p are given as log(p).
#' @param lower.tail logical; if TRUE (default), probabilities are \eqn{P[X \le
#' x]}, otherwise, \eqn{P[X > x]}.
#'
#' @return \code{dBMT} gives the density, \code{pBMT} the distribution function,
#' \code{qBMT} the quantile function, and \code{rBMT} generates random
#' deviates.
#'
#' The length of the result is determined by \code{n} for \code{rBMT}, and is
#' the maximum of the lengths of the numerical arguments for the other
#' functions.
#'
#' The numerical arguments other than \code{n} are recycled to the length of
#' the result. Only the first elements of the logical arguments are used.
#'
#' If \code{type.p.3.4 == "t w"}, \code{p3 < 0} and \code{p3 > 1} are errors
#' and return \code{NaN}.
#'
#' If \code{type.p.3.4 == "a-s"}, \code{p3 < -1} and \code{p3 > 1} are errors
#' and return \code{NaN}.
#'
#' \code{p4 < 0} and \code{p4 > 1} are errors and return \code{NaN}.
#'
#' If \code{type.p.1.2 == "c-d"}, \code{p1 >= p2} is an error and returns
#' \code{NaN}.
#'
#' If \code{type.p.1.2 == "l-s"}, \code{p2 <= 0} is an error and returns
#' \code{NaN}.
#'
#' @references Torres-Jimenez, C. J. and Montenegro-Diaz, A. M. (2017, September),
#' \emph{An alternative to continuous univariate distributions supported on a
#' bounded interval: The BMT distribution}. ArXiv e-prints.
#'
#' Torres-Jimenez, C. J. (2017, September), \emph{Comparison of estimation methods
#' for the BMT distribution}. ArXiv e-prints.
#'
#' Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory model: A
#' new skewed distribution family with bounded domain and an IRT model based
#' on it}, PhD thesis, Doctorado en ciencias - Estadistica, Universidad
#' Nacional de Colombia, Sede Bogota.
#'
#' @seealso \code{\link{BMTcentral}}, \code{\link{BMTdispersion}},
#' \code{\link{BMTskewness}}, \code{\link{BMTkurtosis}},
#' \code{\link{BMTmoments}} for descriptive measures or moments.
#' \code{\link{BMTchangepars}} for parameter conversion between different
#' parametrizations.
#'
#' @author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#' and Alvaro Mauricio Montenegro Diaz [ths]
#'
#' @examples
#' # BMT on [0,1] with left tail weight equal to 0.25 and
#' # right tail weight equal to 0.75
#' z <- seq(0, 1, length.out = 100)
#' F1 <- pBMT(z, 0.25, 0.75, "t w")
#' Q1 <- qBMT(F1, 0.25, 0.75, "t w")
#' max(abs(z - Q1))
#' f1 <- dBMT(z, 0.25, 0.75, "t w")
#' r1 <- rBMT(100, 0.25, 0.75, "t w")
#' layout(matrix(c(1,2,1,3), 2, 2))
#' hist(r1, freq = FALSE, xlim = c(0,1))
#' lines(z, f1)
#' plot(z, F1, type="l")
#' plot(F1, Q1, type="l")
#'
#' # BMT on [0,1] with asymmetry coefficient equal to 0.5 and
#' # steepness coefficient equal to 0.5
#' F2 <- pBMT(z, 0.5, 0.5, "a-s")
#' Q2 <- qBMT(F2, 0.5, 0.5, "a-s")
#' f2 <- dBMT(z, 0.5, 0.5, "a-s")
#' r2 <- rBMT(100, 0.5, 0.5, "a-s")
#' max(abs(f1 - f2))
#' max(abs(F1 - F2))
#' max(abs(Q1 - Q2))
#'
#' # BMT on [-1.783489, 3.312195] with
#' # left tail weight equal to 0.25 and
#' # right tail weight equal to 0.75
#' x <- seq(-1.783489, 3.312195, length.out = 100)
#' F3 <- pBMT(x, 0.25, 0.75, "t w", -1.783489, 3.312195, "c-d")
#' Q3 <- qBMT(F3, 0.25, 0.75, "t w", -1.783489, 3.312195, "c-d")
#' max(abs(x - Q3))
#' f3 <- dBMT(x, 0.25, 0.75, "t w", -1.783489, 3.312195, "c-d")
#' r3 <- rBMT(100, 0.25, 0.75, "t w", -1.783489, 3.312195, "c-d")
#' layout(matrix(c(1,2,1,3), 2, 2))
#' hist(r3, freq = FALSE, xlim = c(-1.783489,3.312195))
#' lines(x, f3)
#' plot(x, F3, type="l")
#' plot(F3, Q3, type="l")
#'
#' # BMT with mean equal to 0, standard deviation equal to 1,
#' # asymmetry coefficient equal to 0.5 and
#' # steepness coefficient equal to 0.5
#' f4 <- dBMT(x, 0.5, 0.5, "a-s", 0, 1, "l-s")
#' F4 <- pBMT(x, 0.5, 0.5, "a-s", 0, 1, "l-s")
#' Q4 <- qBMT(F4, 0.5, 0.5, "a-s", 0, 1, "l-s")
#' r4 <- rBMT(100, 0.5, 0.5, "a-s", 0, 1, "l-s")
#' max(abs(f3 - f4))
#' max(abs(F3 - F4))
#' max(abs(Q3 - Q4))
#'
#' @keywords distribution
#' @rdname BMT
#' @export dBMT
dBMT <- function(x, p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d",
log = FALSE) {
# Control type.p.3.4
TYPE.P.3.4 <- c("t w", "a-s") # tail weights or asymmetry-steepness
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# Control type.p.1.2
TYPE.P.1.2 <- c("c-d", "l-s") # domain or location-scale
int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2)
if (is.na(int.type.p.1.2))
stop("invalid type of parametrization for parameters 1 and 2")
if (int.type.p.1.2 == -1)
stop("ambiguous type of parametrization for parameters 1 and 2")
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(x),length(p1),length(p2),length(p3),length(p4))
x <- rep(x, len = len)
p1 <- rep(p1, len = len)
p2 <- rep(p2, len = len)
p3 <- rep(p3, len = len)
p4 <- rep(p4, len = len)
# Transform x to 0,1 given domain or location-scale parameters
if (int.type.p.1.2 == 1) {
# domain parametrization
# Control domain parameters
min <- replace(p1, p1 >= p2, NaN)
max <- replace(p2, p1 >= p2, NaN)
# Transform x
range <- max - min
x <- (x - min) / range
}
else{
# location-scale parametrization
# Control location-scale parameters
mu <- p1
sigma <- replace(p2, p2 <= 0, NaN)
# Transform x
range <- sigma / BMTsd(p3, p4, type.p.3.4)
x <- (x - mu) / range + BMTmean(p3, p4, type.p.3.4)
}
# Obtain coefficients of polynomials x.t and yf.t given tail weights or
# asymmetry-steepness parameters
if (int.type.p.3.4 == 1) {
# tail weights parametrization
# Control tail weights parameters
kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN)
kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN)
# Coefficients a_3*t^3+a_2*t^2+a_1*t+a_0
a_3 <- 3 * kappa_l + 3 * kappa_r - 2
a_2 <- (-6 * kappa_l - 3 * kappa_r + 3)
a_1 <- (3 * kappa_l)
}
else{
# asymmetry-steepness parametrization
# Control asymmetry-steepness parameters
zeta <- replace(p3, p3 < -1 | p3 > 1, NaN)
xi <- replace(p4, p4 < 0 | p4 > 1, NaN)
# Coefficients a_3*t^3+a_2*t^2+a_1*t+a_0
abs.zeta <- abs(zeta)
aux1 <- 0.5 - xi
a_3 <- 6 * (xi + abs.zeta * aux1) - 2
a_2 <- -9 * (xi + abs.zeta * aux1) + 1.5 * zeta + 3
a_1 <- 3 * (xi + abs.zeta * aux1) - 1.5 * zeta
}
# NaNs
y <- x + a_3 + a_2 + a_1
if (any(is.nan(y))) {
warning("NaNs founded or produced")
}
ind <- !is.na(y)
# Transformed x outside 0,1
y[ind] <- 0
# Transformed x inside 0,1
ind[ind] <- x[ind] > 0 & x[ind] < 1
if (any(ind)) {
# inv.x.t
t <- .inv.x.t(x[ind], a_3[ind], a_2[ind], a_1[ind])
# yf.t
y[ind] <- .yf.t(t, a_3[ind], a_2[ind], a_1[ind]) / range[ind]
}
# density values y are given as log(y)
if (log)
y <- log(y)
return(y)
}
#' @rdname BMT
#' @export pBMT
pBMT <- function(q, p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d",
lower.tail = TRUE, log.p = FALSE){
# Control type.p.3.4
TYPE.P.3.4 <- c("t w", "a-s") # tail weights or asymmetry-steepness
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# Control type.p.1.2
TYPE.P.1.2 <- c("c-d", "l-s") # domain or location-scale
int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2)
if (is.na(int.type.p.1.2))
stop("invalid type of parametrization for parameters 1 and 2")
if (int.type.p.1.2 == -1)
stop("ambiguous type of parametrization for parameters 1 and 2")
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(q),length(p1),length(p2),length(p3),length(p4))
q <- rep(q, len=len)
p1 <- rep(p1, len=len)
p2 <- rep(p2, len=len)
p3 <- rep(p3, len=len)
p4 <- rep(p4, len=len)
# Transform q to 0,1 given domain or location-scale parameters
if(int.type.p.1.2 == 1){ # domain parametrization
# Control domain parameters
min <- replace(p1, p1 >= p2, NaN)
max <- replace(p2, p1 >= p2, NaN)
# Transform q
range <- max - min
q <- (q - min)/range
}
else{ # location-scale parametrization
# Control location-scale parameters
mu <- p1
sigma <- replace(p2, p2 <= 0, NaN)
# Transform q
range <- sigma/BMTsd(p3, p4, type.p.3.4)
q <- (q - mu)/range + BMTmean(p3, p4, type.p.3.4)
}
# Obtain coefficients of polynomials x.t and yf.t given tail weights or
# asymmetry-steepness parameters
if(int.type.p.3.4 == 1){ # tail weights parametrization
# Control tail weights parameters
kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN)
kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN)
# Coefficients a_3*t^3+a_2*t^2+a_1*t+a_0
a_3 <- 3*kappa_l+3*kappa_r-2
a_2 <- (-6*kappa_l-3*kappa_r+3)
a_1 <- (3*kappa_l)
}
else{ # asymmetry-steepness parametrization
# Control asymmetry-steepness parameters
zeta <- replace(p3, p3 < -1 | p3 > 1, NaN)
xi <- replace(p4, p4 < 0 | p4 > 1, NaN)
# Coefficients a_3*t^3+a_2*t^2+a_1*t+a_0
abs.zeta <- abs(zeta)
aux1 <- 0.5-xi
a_3 <- 6*(xi+abs.zeta*aux1)-2
a_2 <- -9*(xi+abs.zeta*aux1)+1.5*zeta+3
a_1 <- 3*(xi+abs.zeta*aux1)-1.5*zeta
}
# NaNs
p <- q+a_3+a_2+a_1
if(any(is.nan(p)))
warning("NaNs founded or produced")
ind <- !is.na(p)
# Transformed q outside 0,1
p[ind] <- 0
ind2 <- ind
ind2[ind] <- q[ind] >= 1
p[ind2] <- 1
# Transformed q inside 0,1
ind[ind] <- q[ind] > 0 & q[ind] < 1
if(any(ind)){
# inv.x.t
t <- .inv.x.t(q[ind], a_3[ind], a_2[ind], a_1[ind])
# yF.t
p[ind] <- .yF.t(t)
}
# probabilities are \eqn{P[X > x]}
if(!lower.tail)
p <- 1 - p
# probabilities p are given as log(p)
if(log.p)
p <- log(p)
return(p)
}
#' @rdname BMT
#' @export qBMT
qBMT <- function(p, p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d",
lower.tail = TRUE, log.p = FALSE){
# probabilities p are given as log(p)
if(log.p)
p <- exp(p)
# probabilities are \eqn{P[X > x]}
if(!lower.tail)
p <- 1 - p
# Control type.p.3.4
TYPE.P.3.4 <- c("t w", "a-s") # tail weights or asymmetry-steepness
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# Control type.p.1.2
TYPE.P.1.2 <- c("c-d", "l-s") # domain or location-scale
int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2)
if (is.na(int.type.p.1.2))
stop("invalid type of parametrization for parameters 1 and 2")
if (int.type.p.1.2 == -1)
stop("ambiguous type of parametrization for parameters 1 and 2")
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(p),length(p1),length(p2),length(p3),length(p4))
p <- rep(p, len=len)
p1 <- rep(p1, len=len)
p2 <- rep(p2, len=len)
p3 <- rep(p3, len=len)
p4 <- rep(p4, len=len)
# Obtain coefficients of polynomials x.t and yf.t given tail weights or
# asymmetry-steepness parameters
if(int.type.p.3.4 == 1){ # tail weights parametrization
# Control tail weights parameters
kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN)
kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN)
# Coefficients a_3*t^3+a_2*t^2+a_1*t+a_0
a_3 <- 3*kappa_l+3*kappa_r-2
a_2 <- (-6*kappa_l-3*kappa_r+3)
a_1 <- (3*kappa_l)
}
else{ # asymmetry-steepness parametrization
# Control asymmetry-steepness parameters
zeta <- replace(p3, p3 < -1 | p3 > 1, NaN)
xi <- replace(p4, p4 < 0 | p4 > 1, NaN)
# Coefficients a_3*t^3+a_2*t^2+a_1*t+a_0
abs.zeta <- abs(zeta)
aux1 <- 0.5-xi
a_3 <- 6*(xi+abs.zeta*aux1)-2
a_2 <- -9*(xi+abs.zeta*aux1)+1.5*zeta+3
a_1 <- 3*(xi+abs.zeta*aux1)-1.5*zeta
}
# NaNs
q <- p+a_3+a_2+a_1
if(any(is.nan(q)))
warning("NaNs founded or produced")
ind <- !is.na(q)
# q outside (0,1)
q <- rep(NaN,len)
ind2 <- ind
ind2[ind] <- p[ind] == 0
q[ind2] <- 0
ind2 <- ind
ind2[ind] <- p[ind] == 1
q[ind2] <- 1
# q inside (0,1)
ind[ind] <- p[ind] > 0 & p[ind] < 1
if(any(ind)){
# inv.yF.t
t <- .inv.yF.t(p[ind])
# x.t
q[ind] <- .x.t(t, a_3[ind], a_2[ind], a_1[ind])
}
# Transform q to [c,d] given domain or location-scale parameters
if(int.type.p.1.2 == 1){ # domain parametrization
# Control domain parameters
min <- replace(p1, p1 >= p2, NaN)
max <- replace(p2, p1 >= p2, NaN)
# Transform q
range <- max - min
q <- q * range + min
}
else{ # location-scale parametrization
# Control location-scale parameters
mu <- p1
sigma <- replace(p2, p2 <= 0, NaN)
# Transform q
range <- sigma/BMTsd(p3, p4, type.p.3.4)
q <- (q - BMTmean(p3, p4, type.p.3.4)) * range + mu
}
return(q)
}
#' @rdname BMT
#' @export rBMT
rBMT <- function(n, p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d"){
#
len <- length(n)
if(len > 1)
n <- len
else
n <- trunc(n)
if(n < 1)
stop("invalid arguments")
# Method of inversion
p <- runif(n)
x <- qBMT(p, p3, p4, type.p.3.4, p1, p2, type.p.1.2)
return(x)
}
# Global constants
.epsilon <- 1e-10
.zero <- 0-.epsilon
.one <- 1+.epsilon
# x.t
.x.t <- function(t, a_3, a_2, a_1){
x <- ((a_3*t + a_2)*t + a_1)*t
return(x)
}
# Inverse function for x.t
.inv.x.t <- function(x, a_3, a_2, a_1){
# Press W.H., Teukolsky S.A., Vetterling W.T. & Flannery B.P. 2007.
# Numerical recipes: The art of scientific computing
# Section 5.6: Quadratic and Cubic Equations. Page 228.
len <- length(x)
a <- a_2/a_3
b <- a_1/a_3
c <- -x/a_3
Q <- (a*a - 3*b)/9
R <- ((2*a*a - 9*b)*a + 27*c)/54
r <- rep(0,len)
# All real roots
ind.r <- Q*Q*Q-R*R > 0
a.v <- a[ind.r]
Q.v <- Q[ind.r]
R.v <- R[ind.r]
theta <- acos(R.v/sqrt(Q.v*Q.v*Q.v))
aux1 <- -2*sqrt(Q.v)
aux2 <- a.v/3
r.v <- aux1*cos(theta/3)-aux2
ind.no01 <- r.v < .zero | r.v > .one
r.v[ind.no01] <- aux1[ind.no01]*cos((theta[ind.no01]+2*pi)/3)-aux2[ind.no01]
ind.no01 <- r.v < .zero | r.v > .one
r.v[ind.no01] <- aux1[ind.no01]*cos((theta[ind.no01]-2*pi)/3)-aux2[ind.no01]
r[ind.r] <- r.v
# Two complex roots
a.v <- a[!ind.r]
Q.v <- Q[!ind.r]
R.v <- R[!ind.r]
aux2 <- a.v/3
A <- -sign(R.v)*(abs(R.v)+sqrt(R.v*R.v-Q.v*Q.v*Q.v))^(1/3)
B <- rep(0,length(A))
B[A!=0] <- Q.v[A!=0]/A[A!=0]
r.v <- (A+B)-aux2
ind.no01 <- r.v < .zero | r.v > .one
r.v[ind.no01] <- -0.5*(r.v[ind.no01])-1.5*aux2[ind.no01]
r[!ind.r] <- r.v
# Considering an epsilon, all roots in [0,1]
r[r >= .zero & r < 0] <- 0
r[r > 1 & r <= .one] <- 1
return(r)
}
# yF.t
.yF.t <- function(t){
yF <- (-2*t + 3)*t*t
return(yF)
}
# inv.yF.t
.inv.yF.t <- function(yF){
t <- 0.5-cos((acos(2*yF-1)-2*pi)/3)
return(t)
}
# yf.t
.yf.t <- function(t, a_3, a_2, a_1){
yf <- ((-6*t + 6)*t) / ((3*a_3*t + 2*a_2)*t + a_1)
return(yf)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/BMT.R
|
#' @title The BMT Distribution Descriptive Measures - Central Tendency.
#' @description Mean, median and mode for the BMT distribution, with \code{p3}
#' and \code{p4} tails weights (\eqn{\kappa_l} and \eqn{\kappa_r}) or
#' asymmetry-steepness parameters (\eqn{\zeta} and \eqn{\xi}) and \code{p1}
#' and \code{p2} domain (minimum and maximum) or location-scale (mean and
#' standard deviation) parameters.
#' @rdname BMTcentral
#' @name BMTcentral
#' @aliases BMTmean
#' @aliases BMTmedian
#' @aliases BMTmode
#'
#' @details See References.
#'
#' @param p3,p4 tails weights (\eqn{\kappa_l} and \eqn{\kappa_r}) or
#' asymmetry-steepness (\eqn{\zeta} and \eqn{\xi}) parameters of the BMT
#' distribution.
#' @param type.p.3.4 type of parametrization asociated to p3 and p4. "t w" means
#' tails weights parametrization (default) and "a-s" means asymmetry-steepness
#' parametrization.
#' @param p1,p2 domain (minimum and maximum) or location-scale (mean and
#' standard deviation) parameters of the BMT ditribution.
#' @param type.p.1.2 type of parametrization asociated to p1 and p2. "c-d" means
#' domain parametrization (default) and "l-s" means location-scale
#' parametrization.
#'
#' @return \code{BMTmean} gives the mean, \code{BMTmedian} the median and
#' \code{BMTmode} the mode for the BMT distribution.
#'
#' The arguments are recycled to the length of the result. Only the first
#' elements of \code{type.p.3.4} and \code{type.p.1.2} are used.
#'
#' If \code{type.p.3.4 == "t w"}, \code{p3 < 0} and \code{p3 > 1} are errors
#' and return \code{NaN}.
#'
#' If \code{type.p.3.4 == "a-s"}, \code{p3 < -1} and \code{p3 > 1} are errors
#' and return \code{NaN}.
#'
#' \code{p4 < 0} and \code{p4 > 1} are errors and return \code{NaN}.
#'
#' If \code{type.p.1.2 == "c-d"}, \code{p1 >= p2} is an error and returns
#' \code{NaN}.
#'
#' If \code{type.p.1.2 == "l-s"}, \code{p2 <= 0} is an error and returns
#' \code{NaN}.
#'
#' @references Torres-Jimenez, C. J. and Montenegro-Diaz, A. M. (2017, September),
#' \emph{An alternative to continuous univariate distributions supported on a
#' bounded interval: The BMT distribution}. ArXiv e-prints.
#'
#' Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory model: A
#' new skewed distribution family with bounded domain and an IRT model based
#' on it}, PhD thesis, Doctorado en ciencias - Estadistica, Universidad
#' Nacional de Colombia, Sede Bogota.
#'
#' @seealso \code{\link{BMTdispersion}}, \code{\link{BMTskewness}},
#' \code{\link{BMTkurtosis}}, \code{\link{BMTmoments}} for other descriptive
#' measures or moments.
#'
#' @author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#'
#' @examples
#' # BMT on [0,1] with left tail weight equal to 0.25 and
#' # right tail weight equal to 0.75
#' BMTmean(0.25, 0.75, "t w")
#' BMTmedian(0.25, 0.75, "t w")
#' BMTmode(0.25, 0.75, "t w")
#'
#' # BMT on [0,1] with asymmetry coefficient equal to 0.5 and
#' # steepness coefficient equal to 0.75
#' BMTmean(0.5, 0.5, "a-s")
#' BMTmedian(0.5, 0.5, "a-s")
#' BMTmode(0.5, 0.5, "a-s")
#'
#' # BMT on [-1.783489,3.312195] with
#' # left tail weight equal to 0.25 and
#' # right tail weight equal to 0.75
#' BMTmean(0.25, 0.75, "t w", -1.783489, 3.312195, "c-d")
#' BMTmedian(0.25, 0.75, "t w", -1.783489, 3.312195, "c-d")
#' BMTmode(0.25, 0.75, "t w", -1.783489, 3.312195, "c-d")
#'
#' # BMT with mean equal to 0, standard deviation equal to 1,
#' # asymmetry coefficient equal to 0.5 and
#' # steepness coefficient equal to 0.75
#' BMTmean(0.5, 0.5, "a-s", 0, 1, "l-s")
#' BMTmedian(0.5, 0.5, "a-s", 0, 1, "l-s")
#' BMTmode(0.5, 0.5, "a-s", 0, 1, "l-s")
#' @rdname BMTcentral
#' @export BMTmean
BMTmean <- function(p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d"){
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(p1),length(p2),length(p3),length(p4))
p1 <- rep(p1, len=len)
p2 <- rep(p2, len=len)
p3 <- rep(p3, len=len)
p4 <- rep(p4, len=len)
# Control type.p.3.4
TYPE.P.3.4 <- c("t w", "a-s")
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# Control type.p.1.2
TYPE.P.1.2 <- c("c-d", "l-s")
int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2)
if (is.na(int.type.p.1.2))
stop("invalid type of parametrization for parameters 1 and 2")
if (int.type.p.1.2 == -1)
stop("ambiguous type of parametrization for parameters 1 and 2")
# domain or location-scale parametrization
if(int.type.p.1.2 == 1){ # domain parametrization
# Control domain parameters
min <- replace(p1, p1 >= p2, NaN)
max <- replace(p2, p1 >= p2, NaN)
# range
range <- max - min
# tail weigths or asymmetry-steepness parametrization
if(int.type.p.3.4 == 1){ # tail weights parametrization
# Control tail weights parameters
kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN)
kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN)
# mean
m <- 0.3*(kappa_l - kappa_r) + 0.5
}
else{ # asymmetry-steepness parametrization
# Control asymmetry-steepness parameters
zeta <- replace(p3, p3 < -1 | p3 > 1, NaN)
xi <- replace(p4, p4 < 0 | p4 > 1, NaN)
# mean
m <- -0.3*zeta + 0.5
}
# scaled and shifted mean
m <- range*m + min
}
else{ # location-scale parametrization
# mean
m <- p1
}
return(m)
}
#' @rdname BMTcentral
#' @export BMTmedian
BMTmedian <- function(p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d"){
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(p1),length(p2),length(p3),length(p4))
p1 <- rep(p1, len=len)
p2 <- rep(p2, len=len)
p3 <- rep(p3, len=len)
p4 <- rep(p4, len=len)
# Control type.p.3.4
TYPE.P.3.4 <- c("t w", "a-s")
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# Control type.p.1.2
TYPE.P.1.2 <- c("c-d", "l-s")
int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2)
if (is.na(int.type.p.1.2))
stop("invalid type of parametrization for parameters 1 and 2")
if (int.type.p.1.2 == -1)
stop("ambiguous type of parametrization for parameters 1 and 2")
# tail weigths or asymmetry-steepness parametrization
if(int.type.p.3.4 == 1){ # tail weights parametrization
# Control tail weights parameters
kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN)
kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN)
# median
m <- 0.375*(kappa_l - kappa_r) + 0.5
}
else{ # asymmetry-steepness parametrization
# Control asymmetry-steepness parameters
zeta <- replace(p3, p3 < -1 | p3 > 1, NaN)
xi <- replace(p4, p4 < 0 | p4 > 1, NaN)
# median
m <- -0.375*zeta + 0.5
}
# domain or location-scale parametrization
if(int.type.p.1.2 == 1){ # domain parametrization
# Control domain parameters
min <- replace(p1, p1 >= p2, NaN)
max <- replace(p2, p1 >= p2, NaN)
# range
range <- max - min
# scaled and shifted median
m <- range*m + min
}
else{ # location-scales parametrization
# Control location-scale parameters
mu <- p1
sigma <- replace(p2, p2 <= 0, NaN)
# range
range <- sigma/BMTsd(p3, p4, type.p.3.4)
# scaled and shifted median
m <- range*(m - BMTmean(p3, p4, type.p.3.4)) + mu
}
return(m)
}
#' @rdname BMTcentral
#' @export BMTmode
BMTmode <- function(p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d"){
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(p1),length(p2),length(p3),length(p4))
p1 <- rep(p1, len=len)
p2 <- rep(p2, len=len)
p3 <- rep(p3, len=len)
p4 <- rep(p4, len=len)
# Control type.p.3.4
TYPE.P.3.4 <- c("t w", "a-s")
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# Control type.p.1.2
TYPE.P.1.2 <- c("c-d", "l-s")
int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2)
if (is.na(int.type.p.1.2))
stop("invalid type of parametrization for parameters 1 and 2")
if (int.type.p.1.2 == -1)
stop("ambiguous type of parametrization for parameters 1 and 2")
# tail weigths or asymmetry-steepness parametrization
if(int.type.p.3.4 == 1){ # tail weights parametrization
# Control tail weights parameters
kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN)
kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN)
}
else{ # asymmetry-steepness parametrization
# change parametrization
p <- BMTchangepars(p3, p4, type.p.3.4)
kappa_l <- p$p3
kappa_r <- p$p4
}
# mode
aux1 <- sqrt(kappa_l*kappa_r)
m <- ifelse(kappa_l==kappa_r,0.5,
(kappa_l^2 - 5*kappa_l*kappa_r + kappa_l*aux1 + 3*kappa_r*aux1 +
9*kappa_l*kappa_r^2 + 3*kappa_l^2*kappa_r - 3*kappa_r^2*aux1 -
9*kappa_l*kappa_r*aux1)*(kappa_l-aux1)/(kappa_l-kappa_r)^3)
# domain or location-scale parametrization
if(int.type.p.1.2 == 1){ # domain parametrization
# Control domain parameters
min <- replace(p1, p1 >= p2, NaN)
max <- replace(p2, p1 >= p2, NaN)
# range
range <- max - min
# scaled and shifted mode
m <- range*m + min
}
else{ # location-scale parametrization
# Control location-scale parameters
mu <- p1
sigma <- replace(p2, p2 <= 0, NaN)
# range
range <- sigma/BMTsd(p3, p4, type.p.3.4)
# scaled and shifted mode
m <- range*(m - BMTmean(p3, p4, type.p.3.4)) + mu
}
return(m)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/BMTcentral.R
|
#' @title The BMT Distribution Parameter Conversion.
#' @description Parameter conversion for different parameterizations for the BMT
#' distribution, with \code{p3} and \code{p4} tails weights (\eqn{\kappa_l}
#' and \eqn{\kappa_r}) or asymmetry-steepness parameters (\eqn{\zeta} and
#' \eqn{\xi}) and \code{p1} and \code{p2} domain (minimum and maximum) or
#' location-scale (mean and standard deviation) parameters.
#' @name BMTchangepars
#' @aliases BMTchangepars
#'
#' @details The BMT coefficient of asymmetry \eqn{-1 < \zeta < 1} is
#' \deqn{\kappa_r - \kappa_l}
#'
#' The BMT coefficient of steepness \eqn{0 < \xi < 1} is \deqn{(\kappa_r +
#' \kappa_l - |\kappa_r - \kappa_l|) / (2 (1 - |\kappa_r - \kappa_l|))} for
#' \eqn{|\kappa_r - \kappa_l| < 1}.
#'
#' The BMT distribution has mean \eqn{( d - c ) BMTmean(\kappa_l, \kappa_r) +
#' c} and standard deviation \eqn{( d - c ) BMTsd(\kappa_l, \kappa_r)}
#'
#' From these equations, we can go back and forth with each parameterization.
#'
#' @param p3,p4 tails weights (\eqn{\kappa_l} and \eqn{\kappa_r}) or
#' asymmetry-steepness (\eqn{\zeta} and \eqn{\xi}) parameters of the BMT
#' distribution.
#' @param type.p.3.4 type of parametrization asociated to p3 and p4. "t w" means
#' tails weights parametrization (default) and "a-s" means asymmetry-steepness
#' parametrization.
#' @param p1,p2 domain (minimum and maximum) or location-scale (mean and
#' standard deviation) parameters of the BMT ditribution.
#' @param type.p.1.2 type of parametrization asociated to p1 and p2. "c-d" means
#' domain parametrization (default) and "l-s" means location-scale
#' parametrization.
#'
#' @return \code{BMTchangepars} reparametrizes \code{p3}, \code{p4}, \code{p1},
#' \code{p2} according to the alternative parameterizations from the given
#' \code{type.p.3.4} and \code{type.p.1.2}. \code{BMTchangepars} returns a
#' list with the alternative arguments to those received.
#'
#' The arguments are recycled to the length of the result. Only the first
#' elements of \code{type.p.3.4} and \code{type.p.1.2} are used.
#'
#' If \code{type.p.3.4 == "t w"}, \code{p3 < 0} and \code{p3 > 1} are errors
#' and return \code{NaN}.
#'
#' If \code{type.p.3.4 == "a-s"}, \code{p3 < -1} and \code{p3 > 1} are errors
#' and return \code{NaN}.
#'
#' \code{p4 < 0} and \code{p4 > 1} are errors and return \code{NaN}.
#'
#' If \code{type.p.1.2 == "c-d"}, \code{p1 >= p2} is an error and returns
#' \code{NaN}.
#'
#' If \code{type.p.1.2 == "l-s"}, \code{p2 <= 0} is an error and returns
#' \code{NaN}.
#'
#' @references Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory
#' model: A new skewed distribution family with bounded domain and an IRT
#' model based on it}, PhD thesis, Doctorado en ciencias - Estadistica,
#' Universidad Nacional de Colombia, Sede Bogota.
#'
#' @seealso \code{\link{BMT}} for the BMT density, distribution, quantile
#' function and random deviates.
#'
#' @author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#' and Alvaro Mauricio Montenegro Diaz [ths]
#'
#' @examples
#' # BMT on [0,1] with left tail weight equal to 0.25 and
#' # right tail weight equal to 0.75
#' parameters <- BMTchangepars(0.25, 0.75, "t w")
#' parameters # Parameters of the BMT in the asymmetry-steepness parametrization
#'
#' # BMT with mean equal to 0, standard deviation equal to 1,
#' # asymmetry coefficient equal to 0.5 and
#' # steepness coefficient equal to 0.75
#' parameters <- BMTchangepars(0.5, 0.5, "a-s", 0, 1, "l-s")
#' parameters # Parameters of the BMT in the tail weight and domain parametrization
#' @rdname BMTchangepars
#' @export BMTchangepars
BMTchangepars <- function(p3, p4, type.p.3.4 = "t w",
p1 = NULL, p2 = NULL, type.p.1.2 = NULL){
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
if(is.null(p1) || is.null(p2)){
len <- max(length(p3),length(p4))
p3 <- rep(p3, len=len)
p4 <- rep(p4, len=len)
}
else{
len <- max(length(p1),length(p2),length(p3),length(p4))
p1 <- rep(p1, len=len)
p2 <- rep(p2, len=len)
p3 <- rep(p3, len=len)
p4 <- rep(p4, len=len)
}
# Control type.p.3.4
TYPE.P.3.4 <- c("t w", "a-s")
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# tail weigths or asymmetry-steepness parametrization
if(int.type.p.3.4 == 1){ # tail weights parametrization
# Control tail weights parameters
kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN)
kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN)
# Parameter conversion
zeta <- BMTasymm(kappa_l, kappa_r, type.p.3.4)
xi <- BMTsteep(kappa_l, kappa_r, type.p.3.4)
#
zeta <- replace(zeta, zeta > 1 & zeta < .one, 1)
zeta <- replace(zeta, zeta < -1 & zeta > -.one, -1)
xi <- replace(xi, xi > 1 & xi < .one, 1)
xi <- replace(xi, xi < 0 & xi > .zero, 0)
#
p <- list(p3=zeta, p4=xi, type.p.3.4="a-s")
if(is.null(type.p.1.2)){
return(p)
}
}
else{ # asymmetry-steepness parametrization
# Control asymmetry-steepness parameters
zeta <- replace(p3, p3 < -1 | p3 > 1, NaN)
xi <- replace(p4, p4 < 0 | p4 > 1, NaN)
# Parameter conversion
kappa_r <- xi + abs(zeta)*(0.5 - xi) + 0.5*zeta
kappa_l <- kappa_r - zeta
#
kappa_r <- replace(kappa_r, kappa_r > 1 & kappa_r < .one, 1)
kappa_r <- replace(kappa_r, kappa_r < 0 & kappa_r > .zero, 0)
kappa_l <- replace(kappa_l, kappa_l > 1 & kappa_l < .one, 1)
kappa_l <- replace(kappa_l, kappa_l < 0 & kappa_l > .zero, 0)
#
p <- list(p3=kappa_l, p4=kappa_r, type.p.3.4="t w")
if(is.null(type.p.1.2)){
return(p)
}
}
# Control type.p.1.2
TYPE.P.1.2 <- c("c-d", "l-s")
int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2)
if (is.na(int.type.p.1.2))
stop("invalid type of parametrization for parameters 1 and 2")
if (int.type.p.1.2 == -1)
stop("ambiguous type of parametrization for parameters 1 and 2")
# domain or location-scale parametrization
if(int.type.p.1.2 == 1){ # domain parametrization
# Control domain parameters
min <- replace(p1, p1 >= p2, NaN)
max <- replace(p2, p1 >= p2, NaN)
# range
range <- max - min
#
p$p1 <- range*BMTmean(p3, p4, type.p.3.4) + min
p$p2 <- range*BMTsd(p3, p4, type.p.3.4)
p$type.p.1.2 <- "l-s"
}
else{ # location-scale parametrization
# Control location-scale parameters
mu <- p1
sigma <- replace(p2, p2 <= 0, NaN)
# range
range <- sigma/BMTsd(p3, p4, type.p.3.4)
#
p$p1 <- mu - range*BMTmean(p3, p4, type.p.3.4)
p$p2 <- range + p$p1
p$type.p.1.2 <- "c-d"
}
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/BMTchangepars.R
|
#' @title The BMT Distribution Descriptive Measures - Dispersion.
#' @description Variance, standard deviation and interquantile range for the BMT
#' distribution, with \code{p3} and \code{p4} tails weights (\eqn{\kappa_l}
#' and \eqn{\kappa_r}) or asymmetry-steepness parameters (\eqn{\zeta} and
#' \eqn{\xi}) and \code{p1} and \code{p2} domain (minimum and maximum) or
#' location-scale (mean and standard deviation) parameters.
#' @rdname BMTdispersion
#' @name BMTdispersion
#' @aliases BMTvar
#' @aliases BMTsd
#' @aliases BMTiqr
#'
#' @details See References.
#'
#' @param p3,p4 tails weights (\eqn{\kappa_l} and \eqn{\kappa_r}) or
#' asymmetry-steepness (\eqn{\zeta} and \eqn{\xi}) parameters of the BMT
#' ditribution.
#' @param type.p.3.4 type of parametrization asociated to p3 and p4. "t w" means
#' tails weights parametrization (default) and "a-s" means asymmetry-steepness
#' parametrization.
#' @param p1,p2 domain (minimum and maximum) or location-scale (mean and
#' standard deviation) parameters of the BMT ditribution.
#' @param type.p.1.2 type of parametrization asociated to p1 and p2. "c-d" means
#' domain parametrization (default) and "l-s" means location-scale
#' parametrization.
#'
#' @return \code{BMTvar} gives the variance, \code{BMTsd} the standard deviation
#' and \code{BMTiqr} the interquantile range for the BMT distribution.
#'
#' The arguments are recycled to the length of the result. Only the first
#' elements of \code{type.p.3.4} and \code{type.p.1.2} are used.
#'
#' If \code{type.p.3.4 == "t w"}, \code{p3 < 0} and \code{p3 > 1} are errors
#' and return \code{NaN}.
#'
#' If \code{type.p.3.4 == "a-s"}, \code{p3 < -1} and \code{p3 > 1} are errors
#' and return \code{NaN}.
#'
#' \code{p4 < 0} and \code{p4 > 1} are errors and return \code{NaN}.
#'
#' If \code{type.p.1.2 == "c-d"}, \code{p1 >= p2} is an error and returns
#' \code{NaN}.
#'
#' If \code{type.p.1.2 == "l-s"}, \code{p2 <= 0} is an error and returns
#' \code{NaN}.
#'
#' @references Torres-Jimenez, C. J. and Montenegro-Diaz, A. M. (2017, September),
#' \emph{An alternative to continuous univariate distributions supported on a
#' bounded interval: The BMT distribution}. ArXiv e-prints.
#'
#' Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory model: A
#' new skewed distribution family with bounded domain and an IRT model based
#' on it}, PhD thesis, Doctorado en ciencias - Estadistica, Universidad
#' Nacional de Colombia, Sede Bogota.
#'
#' @seealso \code{\link{BMTcentral}}, \code{\link{BMTskewness}},
#' \code{\link{BMTkurtosis}}, \code{\link{BMTmoments}} for other descriptive
#' measures or moments.
#'
#' @author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#'
#' @examples
#' # BMT on [0,1] with left tail weight equal to 0.25 and
#' # right tail weight equal to 0.75
#' BMTvar(0.25, 0.75, "t w")
#' BMTsd(0.25, 0.75, "t w")
#' BMTiqr(0.25, 0.75, "t w")
#'
#' # BMT on [0,1] with asymmetry coefficient equal to 0.5 and
#' # steepness coefficient equal to 0.75
#' BMTvar(0.5, 0.5, "a-s")
#' BMTsd(0.5, 0.5, "a-s")
#' BMTiqr(0.5, 0.5, "a-s")
#'
#' # BMT on [-1.783489,3.312195] with left tail weight equal to 0.25 and
#' # right tail weight equal to 0.75
#' BMTvar(0.25, 0.75, "t w", -1.783489, 3.312195, "c-d")
#' BMTsd(0.25, 0.75, "t w", -1.783489, 3.312195, "c-d")
#' BMTiqr(0.25, 0.75, "t w", -1.783489, 3.312195, "c-d")
#'
#' # BMT with mean equal to 0, standard deviation equal to 1,
#' # asymmetry coefficient equal to 0.5 and
#' # steepness coefficient equal to 0.75
#' BMTvar(0.5, 0.5, "a-s", 0, 1, "l-s")
#' BMTsd(0.5, 0.5, "a-s", 0, 1, "l-s")
#' BMTiqr(0.5, 0.5, "a-s", 0, 1, "l-s")
#' @rdname BMTdispersion
#' @export BMTvar
BMTvar <- function(p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d"){
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(p1),length(p2),length(p3),length(p4))
p1 <- rep(p1, len=len)
p2 <- rep(p2, len=len)
p3 <- rep(p3, len=len)
p4 <- rep(p4, len=len)
# Control type.p.3.4
TYPE.P.3.4 <- c("t w", "a-s")
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# Control type.p.1.2
TYPE.P.1.2 <- c("c-d", "l-s")
int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2)
if (is.na(int.type.p.1.2))
stop("invalid type of parametrization for parameters 1 and 2")
if (int.type.p.1.2 == -1)
stop("ambiguous type of parametrization for parameters 1 and 2")
# domain or location-scale parametrization
if(int.type.p.1.2 == 1){ # domain parametrization
# Control domain parameters
min <- replace(p1, p1 >= p2, NaN)
max <- replace(p2, p1 >= p2, NaN)
range <- max - min
# tail weigths or asymmetry-steepness parametrization
if(int.type.p.3.4 == 1){ # tail weights parametrization
# Control tail weights parameters
kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN)
kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN)
# variance
m <- ((36*kappa_l - 120 + 18*kappa_r)*kappa_l + (36*kappa_r - 120)*kappa_r + 175) / 2100
}
else{ # Skewness-steepness parametrization
# Control skewness-steepness parameters
zeta <- replace(p3, p3 < -1 | p3 > 1, NaN)
xi <- replace(p4, p4 < 0 | p4 > 1, NaN)
# variance
m <- (zeta^2*((90*xi-90)*xi+36)+abs(zeta)*((-180*xi+330)*xi-120)+((90*xi-240)*xi+175)) / 2100
}
# scaled variance
m <- range^2*m
}
else{ # location-scale parametrization
# Control scale parameter
sigma <- replace(p2, p2 <= 0, NaN)
# variance
m <- sigma^2
}
return(m)
}
#' @rdname BMTdispersion
#' @export BMTsd
BMTsd <- function(p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d"){
m <- sqrt(BMTvar(p3, p4, type.p.3.4, p1, p2, type.p.1.2))
return(m)
}
#' @rdname BMTdispersion
#' @export BMTiqr
BMTiqr <- function(p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d"){
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(p1),length(p2),length(p3),length(p4))
p1 <- rep(p1, len=len)
p2 <- rep(p2, len=len)
p3 <- rep(p3, len=len)
p4 <- rep(p4, len=len)
# Control type.p.3.4
TYPE.P.3.4 <- c("t w", "a-s")
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# Control type.p.1.2
TYPE.P.1.2 <- c("c-d", "l-s")
int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2)
if (is.na(int.type.p.1.2))
stop("invalid type of parametrization for parameters 1 and 2")
if (int.type.p.1.2 == -1)
stop("ambiguous type of parametrization for parameters 1 and 2")
# tail weigths or asymmetry-steepness parametrization
if(int.type.p.3.4 == 1){ # tail weights parametrization
# Control tail weights parameters
kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN)
kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN)
# iqr
m <- 3*(cos(4*pi/9) - 0.25)*(kappa_l + kappa_r) + 0.5
}
else{ # asymmetry-steepness parametrization
# Control asymmetry-steepness parameters
zeta <- replace(p3, p3 < -1 | p3 > 1, NaN)
xi <- replace(p4, p4 < 0 | p4 > 1, NaN)
# iqr
abs.zeta <- abs(zeta)
m <- 3*(cos(4*pi/9) - 0.25)*(abs.zeta + 2*xi*(1-abs.zeta)) + 0.5
}
# domain or location-scale parametrization
if(int.type.p.1.2 == 1){ # domain parametrization
# Control domain parameters
min <- replace(p1, p1 >= p2, NaN)
max <- replace(p2, p1 >= p2, NaN)
# range
range <- max - min
}
else{ # location-scale parametrization
# Control location-scale parameters
mu <- p1
sigma <- replace(p2, p2 <= 0, NaN)
# range
range <- sigma/BMTsd(p3, p4, type.p.3.4)
}
# scaled iqr
m <- range*m
return(m)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/BMTdispersion.R
|
#' @title Fit of the BMT Distribution to Non-censored Data.
#'
#' @description Fit of the BMT distribution to non-censored data by maximum
#' likelihood (mle), moment matching (mme), quantile matching (qme), maximum
#' goodness-of-fit (mge), also known as minimum distance, maximum product of
#' spacing (mpse), also called maximum spacing, and minimum quantile distance
#' (mqde), which can also be called maximum quantile goodness-of-fit.
#'
#' @rdname BMTfit
#' @name BMTfit
#'
#' @details This function is based on the function \code{\link{fitdist}} from
#' the package \code{\link{fitdistrplus}} but it focuses on the parameter
#' estimation for the BMT distribution (see \code{\link{BMT}} for details). It
#' has six possible fitting methods: maximum likelihood (mle), moment matching
#' (mme), quantile matching (qme), maximum goodness-of-fit (mge), also known
#' as minimum distance, maximum product of spacing (mpse), also called maximum
#' spacing, and minimum quantile distance (mqde), which can also be called
#' maximum quantile goodness-of-fit. These fitting methods are carried out in
#' \code{\link{BMTfit.mle}}, \code{\link{BMTfit.mme}},
#' \code{\link{BMTfit.qme}}, \code{\link{BMTfit.mge}},
#' \code{\link{BMTfit.mpse}}, and \code{\link{BMTfit.mqde}}, respectively (see
#' each function for details). \code{BMTfit} returns an object of class
#' \code{"fitdist"} (see \code{\link{fitdist}} for details). Therefore, it
#' benefits of all the developed functions and methods for that class (see
#' \code{\link{fitdistrplus}} for details).
#'
#' Generic methods of a \code{\link{fitdist}} object are \code{print},
#' \code{plot}, \code{summary}, \code{quantile}, \code{logLik}, \code{vcov}
#' and \code{coef}.
#'
#' @param data A numeric vector with the observed values for non-censored data.
#' @param method A character string coding for the fitting method: \code{"mle"}
#' for 'maximum likelihood estimation', \code{"mme"} for 'moment matching
#' estimation', \code{"qme"} for 'quantile matching estimation', \code{"mge"}
#' for 'maximum goodness-of-fit estimation', \code{"mpse"} for 'maximum
#' product of spacing estimation', and \code{"mqde"} for 'minimum quantile
#' estimation'.
#' @param start A named list giving the initial values of parameters of the BMT
#' distribution or a function of data computing initial values and returning a
#' named list. (see the 'details' section of
#' \code{\link{mledist}}).
#' @param fix.arg An optional named list giving the values of fixed parameters
#' of the BMT distribution or a function of data computing (fixed) parameter
#' values and returning a named list. Parameters with fixed value are thus NOT
#' estimated. (see the 'details' section of
#' \code{\link{mledist}}).
#' @param type.p.3.4 Type of parametrization asociated to p3 and p4. "t w" means
#' tails weights parametrization (default) and "a-s" means asymmetry-steepness
#' parametrization.
#' @param type.p.1.2 Type of parametrization asociated to p1 and p2. "c-d" means
#' domain parametrization (default) and "l-s" means location-scale
#' parametrization.
#' @param optim.method \code{"default"} (see the 'details' section of
#' \code{\link{mledist}}) or optimization method to pass to
#' \code{\link{optim}}.
#' @param custom.optim A function carrying the optimization (see the 'details'
#' section of \code{\link{mledist}}).
#' @param keepdata A logical. If \code{TRUE}, dataset is returned, otherwise
#' only a sample subset is returned.
#' @param keepdata.nb When \code{keepdata=FALSE}, the length (>1) of the subset
#' returned.
#' @param \dots Further arguments to be passed to generic functions, or to one
#' of the functions \code{"BMTfit.mle"}, \code{"BMTfit.mme"},
#' \code{"BMTfit.qme"}, \code{"BMTfit.mge"}, \code{"BMTfit.mpse"}, or
#' \code{"BMTfit.mqde"} depending of the chosen method. See
#' \code{\link{BMTfit.mle}}, \code{\link{BMTfit.mme}},
#' \code{\link{BMTfit.qme}}, \code{\link{BMTfit.mge}},
#' \code{\link{BMTfit.mpse}}, \code{\link{BMTfit.mqde}} for details on
#' parameter estimation.
#'
#' @return \code{fitdist} returns an object of class \code{"fitdist"} with the
#' following components:
#'
#' \item{estimate }{ the parameter estimates.}
#'
#' \item{method }{ the character string coding for the fitting method :
#' \code{"mle"} for 'maximum likelihood estimation', \code{"mme"} for 'moment
#' matching estimation', \code{"qme"} for 'quantile matching estimation',
#' \code{"mge"} for 'maximum goodness-of-fit estimation', \code{"mpse"} for
#' 'maximum product of spacing estimation', and \code{"mqde"} for 'minimum
#' quantile estimation'.}
#'
#' \item{sd}{ the estimated standard errors, \code{NA} if numerically not
#' computable or \code{NULL} if not available.}
#'
#' \item{cor}{ the estimated correlation matrix, \code{NA} if numerically not
#' computable or \code{NULL} if not available.}
#'
#' \item{vcov}{ the estimated variance-covariance matrix, \code{NULL} if not
#' available.}
#'
#' \item{loglik}{ the log-likelihood.}
#'
#' \item{aic}{ the Akaike information criterion.}
#'
#' \item{bic}{ the the so-called BIC or SBC (Schwarz Bayesian criterion).}
#'
#' \item{n}{ the length of the data set.}
#'
#' \item{data}{ the data set.}
#'
#' \item{distname}{ the name of the distribution (BMT).}
#'
#' \item{fix.arg}{ the named list giving the values of parameters of the named
#' distribution that must be kept fixed rather than estimated or \code{NULL}
#' if there are no such parameters. }
#'
#' \item{fix.arg.fun}{the function used to set the value of \code{fix.arg} or
#' \code{NULL}.}
#'
#' \item{discrete}{ the input argument or the automatic definition by the
#' function to be passed to functions \code{\link{gofstat}},
#' \code{\link{plotdist}} and \code{\link{cdfcomp}}. }
#'
#' \item{dots}{ the list of further arguments passed in \dots to be used in
#' \code{\link{bootdist}} in iterative calls to \code{\link{mledist}},
#' \code{\link{mmedist}}, \code{\link{qmedist}}, \code{\link{mgedist}},
#' \code{\link{mpsedist}}, \code{\link{mqdedist}} or \code{NULL} if no such
#' arguments.}
#'
#' \item{weights}{the vector of weigths used in the estimation process or
#' \code{NULL}.}
#'
#' @references Torres-Jimenez, C. J. (2017, September), \emph{Comparison of
#' estimation methods for the BMT distribution}. ArXiv e-prints.
#'
#' Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory model: A
#' new skewed distribution family with bounded domain and an IRT model based
#' on it}, PhD thesis, Doctorado en ciencias - Estadistica, Universidad
#' Nacional de Colombia, Sede Bogota.
#'
#' @seealso See \code{\link{BMT}} for the BMT density, distribution, quantile
#' function and random deviates. See \code{\link{BMTfit.mle}},
#' \code{\link{BMTfit.mme}}, \code{\link{BMTfit.qme}},
#' \code{\link{BMTfit.mge}}, \code{\link{BMTfit.mpse}} and
#' \code{\link{BMTfit.mqde}} for details on parameter estimation. See
#' \code{\link{fitdist}} for details on the object fitdist and its methods
#' \code{print}, \code{plot}, \code{summary}, \code{quantile}, \code{logLik},
#' \code{vcov} and \code{coef}, and \code{\link{fitdistrplus}} for an overview
#' of the package to which that object belongs to.
#'
#' @author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#'
#' @source Based on the function \code{\link{fitdist}} of the R package:
#' \code{\link{fitdistrplus}}
#'
#' Delignette-Muller ML and Dutang C (2015), \emph{fitdistrplus: An R Package
#' for Fitting Distributions}. Journal of Statistical Software, 64(4), 1-34.
#'
#' @examples
#' # (1) fit of the BMT distribution by maximum likelihood estimation
#' data(groundbeef)
#' serving <- groundbeef$serving
#' fit.mle <- BMTfit(serving)
#' summary(fit.mle)
#' plot(fit.mle)
#' plot(fit.mle, demp = TRUE)
#' plot(fit.mle, histo = FALSE, demp = TRUE)
#' cdfcomp(fit.mle, addlegend=FALSE)
#' denscomp(fit.mle, addlegend=FALSE)
#' ppcomp(fit.mle, addlegend=FALSE)
#' qqcomp(fit.mle, addlegend=FALSE)
#'
#' # (2) Comparison of various estimation methods
#' fit.mme <- BMTfit(serving, method="mme")
#' fit.mpse <- BMTfit(serving, method="mpse")
#' fit.mqde <- BMTfit(serving, method="mqde")
#' summary(fit.mme)
#' summary(fit.mpse)
#' summary(fit.mqde)
#' cdfcomp(list(fit.mle, fit.mme, fit.mpse, fit.mqde),
#' legendtext=c("mle", "mme", "mpse", "mqde"))
#' denscomp(list(fit.mle, fit.mme, fit.mpse, fit.mqde),
#' legendtext=c("mle", "mme", "mpse", "mqde"))
#' qqcomp(list(fit.mle, fit.mme, fit.mpse, fit.mqde),
#' legendtext=c("mle", "mme", "mpse", "mqde"))
#' ppcomp(list(fit.mle, fit.mme, fit.mpse, fit.mqde),
#' legendtext=c("mle", "mme", "mpse", "mqde"))
#' gofstat(list(fit.mle, fit.mme, fit.mpse, fit.mqde),
#' fitnames=c("mle", "mme", "mpse", "mqde"))
#'
#' # (3) how to change the optimisation method?
#' BMTfit(serving, optim.method="Nelder-Mead")
#' BMTfit(serving, optim.method="L-BFGS-B")
#' BMTfit(serving, custom.optim="nlminb")
#'
#' # (4) estimation of the tails weights parameters of the BMT distribution
#' # with domain fixed at [9,201] using Kolmogorov-Smirnov
#' fit.KS <- BMTfit(serving, method="mge", gof="KS",
#' start=list(p3=0.5, p4=0.5), fix.arg=list(p1=9, p2=201))
#' summary(fit.KS)
#' plot(fit.KS)
#'
#' # (5) estimation of the asymmetry-steepness parameters of the BMT
#' # distribution with domain fixed at [9,201] using minimum quantile distance
#' # with a closed formula (optim.method="CD")
#' fit.mqde.CD <- BMTfit(serving, method="mqde", optim.method="CD",
#' start=list(p3=0.5, p4=0.5), type.p.3.4 = "a-s",
#' fix.arg=list(p1=9, p2=201))
#' summary(fit.mqde.CD)
#' plot(fit.mqde.CD)
#'
#' @keywords distribution
#################
#' @rdname BMTfit
#' @export BMTfit
#' @import stats
#' @import utils
#' @import partitions
#' @import fitdistrplus
BMTfit <- function(data, method = c("mle","mme","qme","mge","mpse","mqde"),
start = list(p3 = 0.5, p4 = 0.5, p1 = min(data) - 0.1, p2 = max(data) + 0.1),
fix.arg = NULL, type.p.3.4 = "t w", type.p.1.2 = "c-d",
optim.method = "Nelder-Mead", custom.optim = NULL,
keepdata = TRUE, keepdata.nb = 100, ...) {
# Control keepdata and keepdata.nb
if (!is.logical(keepdata) || !is.numeric(keepdata.nb) || keepdata.nb < 2)
stop("wrong arguments 'keepdata' and 'keepdata.nb'")
# Further arguments to be passed
my3dots <- list(...)
if (length(my3dots) == 0)
my3dots <- NULL
# Lenght of data
n <- length(data)
# Control method
method <- match.arg(method, c("mle","mme","qme","mge","mpse","mqde"))
# Separation for each estimation method
res <- switch (method,
mle = BMTfit.mle(data, start=start, fix.arg=fix.arg,
type.p.3.4=type.p.3.4, type.p.1.2=type.p.1.2,
optim.method=optim.method, custom.optim=custom.optim, ...),
mme = BMTfit.mme(data, start=start, fix.arg=fix.arg,
type.p.3.4=type.p.3.4, type.p.1.2=type.p.1.2,
optim.method=optim.method, custom.optim=custom.optim, ...),
qme = BMTfit.qme(data, start=start, fix.arg=fix.arg,
type.p.3.4=type.p.3.4, type.p.1.2=type.p.1.2,
optim.method=optim.method, custom.optim=custom.optim, ...),
mge = BMTfit.mge(data, start=start, fix.arg=fix.arg,
type.p.3.4=type.p.3.4, type.p.1.2=type.p.1.2,
optim.method=optim.method, custom.optim=custom.optim, ...),
mpse = BMTfit.mpse(data, start=start, fix.arg=fix.arg,
type.p.3.4=type.p.3.4, type.p.1.2=type.p.1.2,
optim.method=optim.method, custom.optim=custom.optim, ...),
mqde = BMTfit.mqde(data, start=start, fix.arg=fix.arg,
type.p.3.4=type.p.3.4, type.p.1.2=type.p.1.2,
optim.method=optim.method, custom.optim=custom.optim, ...))
# Optimization method message
if(!is.null(res$optim.message))
cat("\noptim.message:",res$optim.message,"\n\n")
# Unsuccesful convergence
if (res$convergence > 0)
stop("Unsuccesful convergence with the error code ", res$convergence,
".\nAnother optimization method could succeed.\n")
# Parameters covariance, correlation and standard error
sd <- correl <- varcovar <- NULL
# Maximum likelihood and maximum product of spacing
if (method == "mle" || method == "mpse"){
if (!is.null(res$hessian)){
# check for NA values and invertible Hessian
if (all(!is.na(res$hessian)) && qr(res$hessian)$rank == NCOL(res$hessian)) {
# Parameters covariance, correlation and standard error
varcovar <- solve(res$hessian)
sd <- sqrt(diag(varcovar))
correl <- cov2cor(varcovar)
}
else
sd <- correl <- varcovar <- NA
}
else
sd <- correl <- varcovar <- NA
}
# Object fitdist
if (!keepdata){
n2keep <- min(keepdata.nb, n) - 2
imin <- which.min(data)
imax <- which.max(data)
subdata <- data[sample((1:n)[-c(imin, imax)], size = n2keep, replace = FALSE)]
data <- c(subdata, data[c(imin, imax)])
}
npar <- length(res$estimate)
aic <- -2 * res$loglik + 2 * npar
bic <- -2 * res$loglik + log(n) * npar
# Optimization method goes to dots
if(is.null(custom.optim)){
my3dots$optim.method <- optim.method
}
else
my3dots$custom.optim <- custom.optim
reslist <- list(estimate = res$estimate, method = method, sd = sd, cor = correl,
vcov = varcovar, loglik = res$loglik, aic = aic, bic = bic,
n = n, data = data, distname = "BMT", fix.arg = res$fix.arg,
fix.arg.fun = res$fix.arg.fun, dots = my3dots,
convergence = res$convergence, discrete = FALSE, weights = res$weights)
return(structure(reslist, class = "fitdist"))
}
############################
##### Hidden functions #####
# Wrapper for function nlminb in order to work as a custom.optim
.m.nlminb <- function(fn , par, ...) {
opt <- nlminb(start = par, objective = fn, ...)
return(list(par = opt$par, convergence = opt$convergence, value = opt$objective,
hessian = NULL, counts = as.vector(opt$evaluations),
message = opt$message))
}
# Compute log-likelihood
.loglik <- function(par, fix.arg, obs, ddistnam) {
sum(log(do.call(ddistnam, c(list(obs), as.list(par), as.list(fix.arg)))))
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/BMTfit.R
|
#'@title Maximum Goodness-of-fit Fit of the BMT Distribution to Non-censored
#' Data.
#'
#'@description Fit of the BMT distribution to non-censored data by maximum
#' goodness-of-fit estimation (mge), also known as minimum distance estimation.
#'
#'@rdname BMTfit.mge
#'@name BMTfit.mge
#'
#'@details This function is not intended to be called directly but is internally
#' called in \code{\link{BMTfit}} when used with the maximum goodness-of-fit
#' method.
#'
#' \code{BMTfit.mge} is based on the function \code{\link{mgedist}} but it
#' focuses on the maximum goodness-of-fit parameter estimation for the BMT
#' distribution (see \code{\link{BMT}} for details about the BMT distribution
#' and \code{\link{mgedist}} for details about maximum goodness-of-fit fit of
#' univariate distributions).
#'
#'@param data A numeric vector with the observed values for non-censored data.
#'@param gof A character string coding for the name of the goodness-of-fit
#' distance used : "CvM" for Cramer-von Mises distance,"KS" for
#' Kolmogorov-Smirnov distance, "AD" for Anderson-Darling distance, "ADR",
#' "ADL", "AD2R", "AD2L" and "AD2" for variants of Anderson-Darling distance
#' described by Luceno (2006).
#'@param start A named list giving the initial values of parameters of the BMT
#' distribution or a function of data computing initial values and returning a
#' named list. (see the 'details' section of
#' \code{\link{mledist}}).
#'@param fix.arg An optional named list giving the values of fixed parameters of
#' the BMT distribution or a function of data computing (fixed) parameter
#' values and returning a named list. Parameters with fixed value are thus NOT
#' estimated. (see the 'details' section of
#' \code{\link{mledist}}).
#'@param type.p.3.4 Type of parametrization asociated to p3 and p4. "t w" means
#' tails weights parametrization (default) and "a-s" means asymmetry-steepness
#' parametrization.
#'@param type.p.1.2 Type of parametrization asociated to p1 and p2. "c-d" means
#' domain parametrization (default) and "l-s" means location-scale
#' parametrization.
#'@param optim.method \code{"default"} (see the 'details' section of
#' \code{\link{mledist}}) or optimization method to pass to
#' \code{\link{optim}}.
#'@param custom.optim A function carrying the optimization (see the 'details'
#' section of \code{\link{mledist}}).
#'@param silent A logical to remove or show warnings when bootstraping.
#'@param \dots Further arguments to be passed to generic functions or to the
#' function \code{"mgedist"}. See \code{\link{mgedist}} for details.
#'
#'@return \code{BMTfit.mge} returns a list with following components,
#'
#' \item{estimate}{ the parameter estimates.}
#'
#' \item{convergence}{ an integer code for the convergence of
#' \code{\link{optim}}/\code{\link{constrOptim}} defined as below or defined by
#' the user in the user-supplied optimization function.
#'
#' \code{0} indicates successful convergence.
#'
#' \code{1} indicates that the iteration limit of \code{\link{optim}} has been
#' reached.
#'
#' \code{10} indicates degeneracy of the Nealder-Mead simplex.
#'
#' \code{100} indicates that \code{\link{optim}} encountered an internal error.
#' }
#'
#' \item{value}{the value of the corresponding objective function of the
#' estimation method at the estimate.}
#'
#' \item{hessian}{a symmetric matrix computed by \code{\link{optim}} as an
#' estimate of the Hessian at the solution found or computed in the
#' user-supplied optimization function.}
#'
#' \item{loglik}{the log-likelihood value.}
#'
#' \item{gof}{ the code of the goodness-of-fit distance maximized. }
#'
#' \item{optim.function}{the name of the optimization function used for maximum
#' product of spacing.}
#'
#' \item{optim.method}{when \code{\link{optim}} is used, the name of the
#' algorithm used, \code{NULL} otherwise.}
#'
#' \item{fix.arg}{the named list giving the values of parameters of the named
#' distribution that must kept fixed rather than estimated or \code{NULL} if
#' there are no such parameters. }
#'
#' \item{fix.arg.fun}{the function used to set the value of \code{fix.arg} or
#' \code{NULL}.}
#'
#' \item{weights}{the vector of weigths used in the estimation process or
#' \code{NULL}.}
#'
#' \item{counts}{A two-element integer vector giving the number of calls to the
#' log-likelihood function and its gradient respectively. This excludes those
#' calls needed to compute the Hessian, if requested, and any calls to
#' log-likelihood function to compute a finite-difference approximation to the
#' gradient. \code{counts} is returned by \code{\link{optim}} or the
#' user-supplied function or set to \code{NULL}.}
#'
#' \item{optim.message}{A character string giving any additional information
#' returned by the optimizer, or \code{NULL}. To understand exactly the
#' message, see the source code.}
#'
#'@references Torres-Jimenez, C. J. (2017, September), \emph{Comparison of estimation
#' methods for the BMT distribution}. ArXiv e-prints.
#'
#' Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory model: A
#' new skewed distribution family with bounded domain and an IRT model based on
#' it}, PhD thesis, Doctorado en ciencias - Estadistica, Universidad Nacional
#' de Colombia, Sede Bogota.
#'
#'@seealso See \code{\link{BMT}} for the BMT density, distribution, quantile
#' function and random deviates. See \code{\link{BMTfit.mme}},
#' \code{\link{BMTfit.qme}}, \code{\link{BMTfit.mle}},
#' \code{\link{BMTfit.mpse}} and \code{\link{BMTfit.mqde}} for other estimation
#' methods. See \code{\link{optim}} and \code{\link{constrOptim}} for
#' optimization routines. See \code{\link{BMTfit}} and \code{\link{fitdist}}
#' for functions that return an objetc of class \code{"fitdist"}.
#'
#'@author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#'
#'@source Based on the function \code{\link{mgedist}} of the R package:
#' \code{\link{fitdistrplus}}
#'
#' Delignette-Muller ML and Dutang C (2015), \emph{fitdistrplus: An R Package
#' for Fitting Distributions}. Journal of Statistical Software, 64(4), 1-34.
#'
#' @examples
#' # (1) basic fit by maximum goodness-of-fit estimation
#' set.seed(1234)
#' x1 <- rBMT(n=100, p3 = 0.25, p4 = 0.75)
#' BMTfit.mge(x1)
#'
#' # (2) how to change the goodness-of-fit statistic/distance?
#' BMTfit.mge(x1, gof="KS")
#' BMTfit.mge(x1, gof="AD2R")
#'
#' # (3) how to change the optimisation method?
#' BMTfit.mge(x1, optim.method="L-BFGS-B")
#' BMTfit.mge(x1, custom.optim="nlminb")
#'
#' # (4) estimation of the tails weights parameters of the BMT
#' # distribution with domain fixed at [0,1]
#' BMTfit.mge(x1, start=list(p3=0.5, p4=0.5), fix.arg=list(p1=0, p2=1))
#'
#' # (5) estimation of the asymmetry-steepness parameters of the BMT
#' # distribution with domain fixed at [0,1]
#' BMTfit.mge(x1, start=list(p3=0, p4=0.5), type.p.3.4 = "a-s",
#' fix.arg=list(p1=0, p2=1))
#'
#'@keywords distribution
#####################
#' @rdname BMTfit.mge
#' @export BMTfit.mge
BMTfit.mge <- function(data, gof = "CvM",
start = list(p3 = 0.5, p4 = 0.5, p1 = min(data) - 0.1, p2 = max(data) + 0.1),
fix.arg = NULL, type.p.3.4 = "t w", type.p.1.2 = "c-d",
optim.method = "Nelder-Mead", custom.optim = NULL, silent = TRUE, ...){
# Control data
if (!(is.vector(data) & is.numeric(data) & length(data) > 1))
stop("data must be a numeric vector of length greater than 1")
# Further arguments to be passed
my3dots <- list(...)
if (length(my3dots) == 0)
my3dots <- NULL
# Control weights
if(!is.null(my3dots$weights))
stop("Estimation with weights is not considered yet")
#
if(type.p.1.2 != "c-d" && gof %in% c("AD","ADR","ADL","AD2","AD2R","AD2L"))
stop("Anderson-Darling distance and variants only allow parametrization \"c-d\"")
# Control type.p.3.4. It allows partial match.
TYPE.P.3.4 <- c("t w", "a-s") # tail weights or asymmetry-steepness
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# Control type.p.1.2. It allows partial match.
TYPE.P.1.2 <- c("c-d", "l-s") # domain or location-scale
int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2)
if (is.na(int.type.p.1.2))
stop("invalid type of parametrization for parameters 1 and 2")
if (int.type.p.1.2 == -1)
stop("ambiguous type of parametrization for parameters 1 and 2")
# Type of parametrizations are fixed parameters
fix.arg$type.p.3.4 <- type.p.3.4
fix.arg$type.p.1.2 <- type.p.1.2
# Establish box constraints according to parameters in start
stnames <- names(start)
m <- length(stnames)
# Initialize all box constraints: (-Inf, Inf)
lower <- rep(-Inf, m)
upper <- rep(Inf, m)
# domain parametrization
if (int.type.p.1.2 == 1) {
# c has to be inside (-Inf, min(data))
upper[stnames == "p1"] <- min(data) - .epsilon
# d has to be inside (max(data), Inf)
lower[stnames == "p2"] <- max(data) + .epsilon
}
# location-scale parametrization
else{
# sigma has to be inside (0, Inf)
lower[stnames == "p2"] <- 0 + .epsilon
}
# tail weights parametrization
if (int.type.p.3.4 == 1) {
# Both tail weights have to be inside (0,1)
lower[stnames == "p3" | stnames == "p4"] <- 0 + .epsilon
upper[stnames == "p3" | stnames == "p4"] <- 1 - .epsilon
}
# asymmetry-steepness parametrization
else{
# asymmetric has to be inside (-1, 1)
# steepness has to be inside (0, 1)
lower[stnames == "p3"] <- -1 + .epsilon
lower[stnames == "p4"] <- 0 + .epsilon
upper[stnames == "p3" | stnames == "p4"] <- 1 - .epsilon
}
# nlminb optimization method
if(!is.null(custom.optim))
if(custom.optim=="nlminb")
custom.optim <- .m.nlminb
# qmedist function of fitdistplus
mge <- fitdistrplus::mgedist(data, "BMT", gof = gof, start = start, fix.arg = fix.arg,
optim.method = optim.method, lower = lower, upper = upper,
custom.optim = custom.optim, silent = silent, ...)
# Estimation with location-scale parameterization might allow data outside the estimated domain
par <- append(mge$estimate,fix.arg)
if (int.type.p.1.2 == 2)
par <- BMTchangepars(par$p3, par$p4, par$type.p.3.4, par$p1, par$p2, par$type.p.1.2)
n.obs <- sum(data < par$p1 | data > par$p2)
if(n.obs > 0){
text <- paste("The resultant estimated domain is [",round(par$p1,4),",",round(par$p2,4),
"] and there are ",n.obs," observations out of it.",sep="")
warning(text)
}
return(mge)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/BMTfit.mge.R
|
#'@title Maximum Likelihood Fit of the BMT Distribution to Non-censored Data.
#'
#'@description Fit of the BMT distribution to non-censored data by maximum
#' likelihood estimation (mle).
#'
#'@rdname BMTfit.mle
#'@name BMTfit.mle
#'
#'@details This function is not intended to be called directly but is internally
#' called in \code{\link{BMTfit}} when used with the maximum likelihood method.
#'
#' \code{BMTfit.mle} is based on the function \code{\link{mledist}} from the
#' package \code{\link{fitdistrplus}} but it focuses on the maximum likelihood
#' parameter estimation for the BMT distribution (see \code{\link{BMT}} for
#' details about the BMT distribution and \code{\link{mledist}} for details
#' about maximum likelihood fit of univariate distributions).
#'
#'@param data A numeric vector with the observed values for non-censored data.
#'@param start A named list giving the initial values of parameters of the BMT
#' distribution or a function of data computing initial values and returning a
#' named list. (see the 'details' section of
#' \code{\link{mledist}}).
#'@param fix.arg An optional named list giving the values of fixed parameters of
#' the BMT distribution or a function of data computing (fixed) parameter
#' values and returning a named list. Parameters with fixed value are thus NOT
#' estimated. (see the 'details' section of
#' \code{\link{mledist}}).
#'@param type.p.3.4 Type of parametrization asociated to p3 and p4. "t w" means
#' tails weights parametrization (default) and "a-s" means asymmetry-steepness
#' parametrization.
#'@param type.p.1.2 Type of parametrization asociated to p1 and p2. "c-d" means
#' domain parametrization (default) and "l-s" means location-scale
#' parametrization.
#'@param optim.method \code{"default"} (see the 'details' section of
#' \code{\link{mledist}}) or optimization method to pass to
#' \code{\link{optim}}.
#'@param custom.optim A function carrying the optimization (see the 'details'
#' section of \code{\link{mledist}}).
#'@param silent A logical to remove or show warnings when bootstraping.
#'@param \dots Further arguments to be passed to generic functions or to the
#' function \code{"mledist"}. See \code{\link{mledist}} for details.
#'
#'@return \code{BMTfit.mle} returns a list with following components,
#'
#' \item{estimate}{ the parameter estimates.}
#'
#' \item{convergence}{ an integer code for the convergence of
#' \code{\link{optim}}/\code{\link{constrOptim}} defined as below or defined by
#' the user in the user-supplied optimization function.
#'
#' \code{0} indicates successful convergence.
#'
#' \code{1} indicates that the iteration limit of \code{\link{optim}} has been
#' reached.
#'
#' \code{10} indicates degeneracy of the Nealder-Mead simplex.
#'
#' \code{100} indicates that \code{\link{optim}} encountered an internal error.
#' }
#'
#' \item{loglik}{the log-likelihood value.}
#'
#' \item{hessian}{a symmetric matrix computed by \code{\link{optim}} as an
#' estimate of the Hessian at the solution found or computed in the
#' user-supplied optimization function. It is used in \code{\link{BMTfit}} to estimate
#' standard errors. }
#'
#' \item{optim.function}{the name of the optimization function used for maximum
#' likelihood.}
#'
#' \item{optim.method}{when \code{\link{optim}} is used, the name of the
#' algorithm used, \code{NULL} otherwise.}
#'
#' \item{fix.arg}{the named list giving the values of parameters of the named
#' distribution that must kept fixed rather than estimated or \code{NULL} if there are no such parameters. }
#'
#' \item{fix.arg.fun}{the function used to set the value of \code{fix.arg} or
#' \code{NULL}.}
#'
#' \item{weights}{the vector of weigths used in the estimation process or
#' \code{NULL}.}
#'
#' \item{counts}{A two-element integer vector giving the number of calls to the
#' log-likelihood function and its gradient respectively. This excludes those
#' calls needed to compute the Hessian, if requested, and any calls to
#' log-likelihood function to compute a finite-difference approximation to the
#' gradient. \code{counts} is returned by \code{\link{optim}} or the
#' user-supplied function or set to \code{NULL}.}
#'
#' \item{optim.message}{A character string giving any additional information
#' returned by the optimizer, or \code{NULL}. To understand exactly the
#' message, see the source code.}
#'
#'@references Torres-Jimenez, C. J. (2017, September), \emph{Comparison of estimation
#' methods for the BMT distribution}. ArXiv e-prints.
#'
#' Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory model: A
#' new skewed distribution family with bounded domain and an IRT model based on
#' it}, PhD thesis, Doctorado en ciencias - Estadistica, Universidad Nacional
#' de Colombia, Sede Bogota.
#'
#'@seealso See \code{\link{BMT}} for the BMT density, distribution, quantile
#' function and random deviates. See \code{\link{BMTfit.mme}},
#' \code{\link{BMTfit.qme}}, \code{\link{BMTfit.mge}},
#' \code{\link{BMTfit.mpse}} and \code{\link{BMTfit.mqde}} for other estimation
#' methods. See \code{\link{optim}} and \code{\link{constrOptim}} for
#' optimization routines. See \code{\link{BMTfit}} and \code{\link{fitdist}}
#' for functions that return an objetc of class \code{"fitdist"}.
#'
#'@author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#'
#'@source Based on the function \code{\link{mledist}} of the R package:
#' \code{\link{fitdistrplus}}
#'
#' Delignette-Muller ML and Dutang C (2015), \emph{fitdistrplus: An R Package
#' for Fitting Distributions}. Journal of Statistical Software, 64(4), 1-34.
#'
#' @examples
#' # (1) basic fit by maximum likelihood estimation
#' set.seed(1234)
#' x1 <- rBMT(n=100, p3 = 0.25, p4 = 0.75)
#' BMTfit.mle(x1)
#'
#' # (2) how to change the optimisation method?
#' BMTfit.mle(x1, optim.method="L-BFGS-B")
#' BMTfit.mle(x1, custom.optim="nlminb")
#'
#' # (3) estimation of the tails weights parameters of the BMT
#' # distribution with domain fixed at [0,1]
#' BMTfit.mle(x1, start=list(p3=0.5, p4=0.5), fix.arg=list(p1=0, p2=1))
#'
#' # (4) estimation of the asymmetry-steepness parameters of the BMT
#' # distribution with domain fixed at [0,1]
#' BMTfit.mle(x1, start=list(p3=0, p4=0.5), type.p.3.4 = "a-s",
#' fix.arg=list(p1=0, p2=1))
#'
#'@keywords distribution
#####################
#' @rdname BMTfit.mle
#' @export BMTfit.mle
BMTfit.mle <- function(data,
start = list(p3 = 0.5, p4 = 0.5, p1 = min(data) - 0.1, p2 = max(data) + 0.1),
fix.arg = NULL, type.p.3.4 = "t w", type.p.1.2 = "c-d",
optim.method = "Nelder-Mead", custom.optim = NULL, silent = TRUE, ...){
# Control data
if (!(is.vector(data) & is.numeric(data) & length(data) > 1))
stop("data must be a numeric vector of length greater than 1")
# Further arguments to be passed
my3dots <- list(...)
if (length(my3dots) == 0)
my3dots <- NULL
# Control weights
if(!is.null(my3dots$weights))
stop("Estimation with weights is not considered yet")
# Control type.p.3.4. It allows partial match.
TYPE.P.3.4 <- c("t w", "a-s") # tail weights or asymmetry-steepness
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# mle only allows parametrization "c-d"
# because all data have to be inside the estimated domain.
if(type.p.1.2 != "c-d")
stop("maximum likelihood estimation only allows parametrization \"c-d\"")
# Type of parametrizations are fixed parameters
fix.arg$type.p.3.4 <- type.p.3.4
fix.arg$type.p.1.2 <- "c-d"
# Establish box constraints according to parameters in start
stnames <- names(start)
m <- length(stnames)
# Initialize all box constraints: (0, 1)
lower <- rep(0 + .epsilon, m)
upper <- rep(1 - .epsilon, m)
# domain parametrization
# c has to be inside (-Inf, min(data))
lower[stnames == "p1"] <- -Inf
upper[stnames == "p1"] <- min(data) - .epsilon
# d has to be inside (max(data), Inf)
lower[stnames == "p2"] <- max(data) + .epsilon
upper[stnames == "p2"] <- Inf
# asymmetry-steepness parametrization
if(int.type.p.3.4 == 2) {
# asymmetry has to be inside (-1, 1)
lower[stnames == "p3"] <- -1 + .epsilon
}
# nlminb optimization method
if(!is.null(custom.optim))
if(custom.optim=="nlminb")
custom.optim <- .m.nlminb
# mledist function of fitdistplus
mle <- fitdistrplus::mledist(data, "BMT", start = start, fix.arg = fix.arg,
optim.method = optim.method, lower = lower, upper = upper,
custom.optim = custom.optim, silent = silent, ...)
return(mle)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/BMTfit.mle.R
|
#'@title Moment Matching Fit of the BMT Distribution to Non-censored Data.
#'
#'@description Fit of the BMT distribution to non-censored data by moment
#' matching (mme).
#'
#'@rdname BMTfit.mme
#'@name BMTfit.mme
#'
#'@details This function is not intended to be called directly but is internally
#' called in \code{\link{BMTfit}} when used with the moment matching method.
#'
#' \code{BMTfit.mme} is based on the function \code{\link{mmedist}} but it
#' focuses on the moment matching parameter estimation for the BMT distribution
#' (see \code{\link{BMT}} for details about the BMT distribution and
#' \code{\link{mmedist}} for details about moment matching fit of univariate
#' distributions).
#'
#' For each parameter of the BMT distribution we choose a moment or measure.
#' Mean for \code{p1}, standard deviation for \code{p2}, Pearson_s skewness for
#' \code{p3}, and Pearson's kurtosis for \code{p4}.
#'
#'@param data A numeric vector with the observed values for non-censored data.
#'@param start A named list giving the initial values of parameters of the BMT
#' distribution or a function of data computing initial values and returning a
#' named list. (see the 'details' section of
#' \code{\link{mledist}}).
#'@param fix.arg An optional named list giving the values of fixed parameters of
#' the BMT distribution or a function of data computing (fixed) parameter
#' values and returning a named list. Parameters with fixed value are thus NOT
#' estimated. (see the 'details' section of
#' \code{\link{mledist}}).
#'@param type.p.3.4 Type of parametrization asociated to p3 and p4. "t w" means
#' tails weights parametrization (default) and "a-s" means asymmetry-steepness
#' parametrization.
#'@param type.p.1.2 Type of parametrization asociated to p1 and p2. "c-d" means
#' domain parametrization (default) and "l-s" means location-scale
#' parametrization.
#'@param optim.method \code{"default"} (see the 'details' section of
#' \code{\link{mledist}}) or optimization method to pass to
#' \code{\link{optim}}.
#'@param custom.optim A function carrying the optimization (see the 'details'
#' section of \code{\link{mledist}}).
#'@param silent A logical to remove or show warnings when bootstraping.
#'@param \dots Further arguments to be passed to generic functions or to the
#' function \code{"mmedist"}. See \code{\link{mmedist}} for details.
#'
#'@return \code{BMTfit.mme} returns a list with following components,
#'
#' \item{estimate}{ the parameter estimates.}
#'
#' \item{convergence}{ an integer code for the convergence of
#' \code{\link{optim}}/\code{\link{constrOptim}} defined as below or defined by
#' the user in the user-supplied optimization function.
#'
#' \code{0} indicates successful convergence.
#'
#' \code{1} indicates that the iteration limit of \code{\link{optim}} has been
#' reached.
#'
#' \code{10} indicates degeneracy of the Nealder-Mead simplex.
#'
#' \code{100} indicates that \code{\link{optim}} encountered an internal error.
#' }
#'
#' \item{value}{the value of the corresponding objective function of the
#' estimation method at the estimate.}
#'
#' \item{hessian}{a symmetric matrix computed by \code{\link{optim}} as an
#' estimate of the Hessian at the solution found or computed in the
#' user-supplied optimization function.}
#'
#' \item{loglik}{the log-likelihood value.}
#'
#' \item{order}{the vector of moment(s) matched: mean (1), standard deviation
#' (2), Pearson's skewness (3), Pearson's kurtosis (4).}
#'
#' \item{memp}{the empirical moment function. }
#'
#' \item{optim.function}{the name of the optimization function used for maximum
#' product of spacing.}
#'
#' \item{optim.method}{when \code{\link{optim}} is used, the name of the
#' algorithm used, \code{NULL} otherwise.}
#'
#' \item{fix.arg}{the named list giving the values of parameters of the named
#' distribution that must kept fixed rather than estimated or \code{NULL} if
#' there are no such parameters. }
#'
#' \item{fix.arg.fun}{the function used to set the value of \code{fix.arg} or
#' \code{NULL}.}
#'
#' \item{weights}{the vector of weigths used in the estimation process or
#' \code{NULL}.}
#'
#' \item{counts}{A two-element integer vector giving the number of calls to the
#' log-likelihood function and its gradient respectively. This excludes those
#' calls needed to compute the Hessian, if requested, and any calls to
#' log-likelihood function to compute a finite-difference approximation to the
#' gradient. \code{counts} is returned by \code{\link{optim}} or the
#' user-supplied function or set to \code{NULL}.}
#'
#' \item{optim.message}{A character string giving any additional information
#' returned by the optimizer, or \code{NULL}. To understand exactly the
#' message, see the source code.}
#'
#'@references Torres-Jimenez, C. J. (2017, September), \emph{Comparison of estimation
#' methods for the BMT distribution}. ArXiv e-prints.
#'
#' Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory model: A
#' new skewed distribution family with bounded domain and an IRT model based on
#' it}, PhD thesis, Doctorado en ciencias - Estadistica, Universidad Nacional
#' de Colombia, Sede Bogota.
#'
#'@seealso See \code{\link{BMT}} for the BMT density, distribution, quantile
#' function and random deviates. See \code{\link{BMTfit.qme}},
#' \code{\link{BMTfit.mle}}, \code{\link{BMTfit.mge}},
#' \code{\link{BMTfit.mpse}} and \code{\link{BMTfit.mqde}} for other estimation
#' methods. See \code{\link{optim}} and \code{\link{constrOptim}} for
#' optimization routines. See \code{\link{BMTfit}} and \code{\link{fitdist}}
#' for functions that return an objetc of class \code{"fitdist"}.
#'
#'@author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#'
#'@source Based on the function \code{\link{mmedist}} of the R package:
#' \code{\link{fitdistrplus}}
#'
#' Delignette-Muller ML and Dutang C (2015), \emph{fitdistrplus: An R Package
#' for Fitting Distributions}. Journal of Statistical Software, 64(4), 1-34.
#'
#' @examples
#' # (1) basic fit by moment matching estimation
#' set.seed(1234)
#' x1 <- rBMT(n=100, p3=0.25, p4=0.75)
#' BMTfit.mme(x1)
#'
#' # (3) how to change the optimisation method?
#' BMTfit.mme(x1, optim.method="L-BFGS-B")
#' BMTfit.mme(x1, custom.optim="nlminb")
#'
#' # (4) estimation of the tails weights parameters of the BMT
#' # distribution with domain fixed at [0,1]
#' BMTfit.mme(x1, start=list(p3=0.5, p4=0.5), fix.arg=list(p1=0, p2=1))
#'
#' # (5) estimation of the asymmetry-steepness parameters of the BMT
#' # distribution with domain fixed at [0,1]
#' BMTfit.mme(x1, start=list(p3=0, p4=0.5), type.p.3.4 = "a-s",
#' fix.arg=list(p1=0, p2=1))
#'
#'@keywords distribution
#####################
#' @rdname BMTfit.mme
#' @export BMTfit.mme
BMTfit.mme <- function(data,
start = list(p3 = 0.5, p4 = 0.5, p1 = min(data) - 0.1, p2 = max(data) + 0.1),
fix.arg = NULL, type.p.3.4 = "t w", type.p.1.2 = "c-d",
optim.method = "Nelder-Mead", custom.optim = NULL, silent = TRUE, ...){
# Control data
if (!(is.vector(data) & is.numeric(data) & length(data) > 1))
stop("data must be a numeric vector of length greater than 1")
# Further arguments to be passed
my3dots <- list(...)
if (length(my3dots) == 0)
my3dots <- NULL
# Control weights
if(!is.null(my3dots$weights))
stop("Estimation with weights is not considered yet")
# Control type.p.3.4. It allows partial match.
TYPE.P.3.4 <- c("t w", "a-s") # tail weights or asymmetry-steepness
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# Control type.p.1.2. It allows partial match.
TYPE.P.1.2 <- c("c-d", "l-s") # domain or location-scale
int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2)
if (is.na(int.type.p.1.2))
stop("invalid type of parametrization for parameters 1 and 2")
if (int.type.p.1.2 == -1)
stop("ambiguous type of parametrization for parameters 1 and 2")
# Type of parametrizations are fixed parameters
fix.arg$type.p.3.4 <- type.p.3.4
fix.arg$type.p.1.2 <- type.p.1.2
# Establish box constraints according to parameters in start
stnames <- names(start)
m <- length(stnames)
# Initialize all box constraints: (-Inf, Inf)
lower <- rep(-Inf, m)
upper <- rep(Inf, m)
# domain parametrization
if (int.type.p.1.2 == 1) {
# c has to be inside (-Inf, min(data))
upper[stnames == "p1"] <- min(data) - .epsilon
# d has to be inside (max(data), Inf)
lower[stnames == "p2"] <- max(data) + .epsilon
}
# location-scale parametrization
else{
# sigma has to be inside (0, Inf)
lower[stnames == "p2"] <- 0 + .epsilon
}
# tail weights parametrization
if (int.type.p.3.4 == 1) {
# Both tail weights have to be inside (0,1)
lower[stnames == "p3" | stnames == "p4"] <- 0 + .epsilon
upper[stnames == "p3" | stnames == "p4"] <- 1 - .epsilon
}
# asymmetry-steepness parametrization
else{
# asymmetric has to be inside (-1, 1)
# steepness has to be inside (0, 1)
lower[stnames == "p3"] <- -1 + .epsilon
lower[stnames == "p4"] <- 0 + .epsilon
upper[stnames == "p3" | stnames == "p4"] <- 1 - .epsilon
}
# nlminb optimization method
if(!is.null(custom.optim))
if(custom.optim=="nlminb")
custom.optim <- .m.nlminb
# order of moments to be used (p1-mean, p2-sd, p3-skew, p4-kurt)
order <- as.integer(substr(stnames,2,2))
# memp unbiased sample moments
memp <- function(x, order){
n <- length(x)
s.mean <- mean(x)
s.sd <- sd(x)
res <- switch(order,
s.mean,
s.sd,
n / ((n-1) * (n-2)) * sum((x - s.mean)^3) / s.sd^3,
(n-1) / ((n-2) * (n-3)) * ((n+1) * n / (n-1)^2 * sum((x - s.mean)^4) / s.sd^4 - 3 * (n-1)) + 3)
return(res)
}
# mmedist function of fitdistplus
mme <- fitdistrplus::mmedist(data, "BMT", order = order, memp = memp, start = start, fix.arg = fix.arg,
optim.method = optim.method, lower = lower, upper = upper,
custom.optim = custom.optim, silent = silent, ...)
# Estimation with location-scale parameterization might allow data outside the estimated domain
par <- append(mme$estimate,fix.arg)
if (int.type.p.1.2 == 2)
par <- BMTchangepars(par$p3, par$p4, par$type.p.3.4, par$p1, par$p2, par$type.p.1.2)
n.obs <- sum(data < par$p1 | data > par$p2)
if(n.obs > 0){
text <- paste("The resultant estimated domain is [",round(par$p1,4),",",round(par$p2,4),
"] and there are ",n.obs," observations out of it.",sep="")
warning(text)
}
return(mme)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/BMTfit.mme.R
|
#'@title Maximum Product of Spacing Fit of the BMT Distribution to Non-censored
#' Data.
#'
#'@description Fit of the BMT distribution to non-censored data by maximum
#' product of spacing estimation (mpse), also called maximum spacing
#' estimation.
#'
#'@rdname BMTfit.mpse
#'@name BMTfit.mpse
#'
#'@details This function is not intended to be called directly but is internally
#' called in \code{\link{BMTfit}} when used with the maximum product of spacing
#' method.
#'
#' \code{BMTfit.mpse} is based on the function \code{\link{mpsedist}} but it
#' focuses on the maximum product of spacing parameter estimation for the BMT
#' distribution (see \code{\link{BMT}} for details about the BMT distribution
#' and \code{\link{mpsedist}} for details about maximum product of spacing fit
#' of univariate distributions).
#'
#'@param data A numeric vector with the observed values for non-censored data.
#'@param start A named list giving the initial values of parameters of the BMT
#' distribution or a function of data computing initial values and returning a
#' named list. (see the 'details' section of
#' \code{\link{mledist}}).
#'@param fix.arg An optional named list giving the values of fixed parameters of
#' the BMT distribution or a function of data computing (fixed) parameter
#' values and returning a named list. Parameters with fixed value are thus NOT
#' estimated. (see the 'details' section of
#' \code{\link{mledist}}).
#'@param type.p.3.4 Type of parametrization asociated to p3 and p4. "t w" means
#' tails weights parametrization (default) and "a-s" means asymmetry-steepness
#' parametrization.
#'@param type.p.1.2 Type of parametrization asociated to p1 and p2. "c-d" means
#' domain parametrization (default) and "l-s" means location-scale
#' parametrization.
#'@param optim.method \code{"default"} (see the 'details' section of
#' \code{\link{mledist}}) or optimization method to pass to
#' \code{\link{optim}}.
#'@param custom.optim A function carrying the optimization (see the 'details'
#' section of \code{\link{mledist}}).
#'@param silent A logical to remove or show warnings when bootstraping.
#'@param \dots Further arguments to be passed to generic functions or to the
#' function \code{"mpsedist"}. See \code{\link{mpsedist}} for details.
#'
#'@return \code{BMTfit.mpse} returns a list with following components,
#'
#' \item{estimate}{ the parameter estimates.}
#'
#' \item{convergence}{ an integer code for the convergence of
#' \code{\link{optim}}/\code{\link{constrOptim}} defined as below or defined by
#' the user in the user-supplied optimization function.
#'
#' \code{0} indicates successful convergence.
#'
#' \code{1} indicates that the iteration limit of \code{\link{optim}} has been
#' reached.
#'
#' \code{10} indicates degeneracy of the Nealder-Mead simplex.
#'
#' \code{100} indicates that \code{\link{optim}} encountered an internal error.
#' }
#'
#' \item{value}{the value of the corresponding objective function of the
#' estimation method at the estimate.}
#'
#' \item{loglik}{the log-likelihood value.}
#'
#' \item{hessian}{a symmetric matrix computed by \code{\link{optim}} as an
#' estimate of the Hessian at the solution found or computed in the
#' user-supplied optimization function.}
#'
#' \item{optim.function}{the name of the optimization function used for maximum
#' product of spacing.}
#'
#' \item{optim.method}{when \code{\link{optim}} is used, the name of the
#' algorithm used, \code{NULL} otherwise.}
#'
#' \item{fix.arg}{the named list giving the values of parameters of the named
#' distribution that must kept fixed rather than estimated or \code{NULL} if
#' there are no such parameters. }
#'
#' \item{fix.arg.fun}{the function used to set the value of \code{fix.arg} or
#' \code{NULL}.}
#'
#' \item{weights}{the vector of weigths used in the estimation process or
#' \code{NULL}.}
#'
#' \item{counts}{A two-element integer vector giving the number of calls to the
#' log-likelihood function and its gradient respectively. This excludes those
#' calls needed to compute the Hessian, if requested, and any calls to
#' log-likelihood function to compute a finite-difference approximation to the
#' gradient. \code{counts} is returned by \code{\link{optim}} or the
#' user-supplied function or set to \code{NULL}.}
#'
#' \item{optim.message}{A character string giving any additional information
#' returned by the optimizer, or \code{NULL}. To understand exactly the
#' message, see the source code.}
#'
#'@references Torres-Jimenez, C. J. (2017, September), \emph{Comparison of estimation
#' methods for the BMT distribution}. ArXiv e-prints.
#'
#' Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory model: A
#' new skewed distribution family with bounded domain and an IRT model based on
#' it}, PhD thesis, Doctorado en ciencias - Estadistica, Universidad Nacional
#' de Colombia, Sede Bogota.
#'
#'@seealso See \code{\link{BMT}} for the BMT density, distribution, quantile
#' function and random deviates. See \code{\link{BMTfit.mme}},
#' \code{\link{BMTfit.qme}}, \code{\link{BMTfit.mge}}, \code{\link{BMTfit.mle}}
#' and \code{\link{BMTfit.mqde}} for other estimation methods. See
#' \code{\link{optim}} and \code{\link{constrOptim}} for optimization routines.
#' See \code{\link{BMTfit}} and \code{\link{fitdist}} for functions that return
#' an objetc of class \code{"fitdist"}.
#'
#'@author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#'
#'@source Based on the function \code{\link{mpsedist}} which in turn is based on
#' the function \code{\link{mledist}} of the R package:
#' \code{\link{fitdistrplus}}
#'
#' Delignette-Muller ML and Dutang C (2015), \emph{fitdistrplus: An R Package
#' for Fitting Distributions}. Journal of Statistical Software, 64(4), 1-34.
#'
#' @examples
#' # (1) basic fit by maximum product of spacing estimation
#' set.seed(1234)
#' x1 <- rBMT(n=100, p3 = 0.25, p4 = 0.75)
#' BMTfit.mpse(x1)
#'
#' # (2) how to change the optimisation method?
#' BMTfit.mpse(x1, optim.method="L-BFGS-B")
#' BMTfit.mpse(x1, custom.optim="nlminb")
#'
#' # (3) estimation of the tails weights parameters of the BMT
#' # distribution with domain fixed at [0,1]
#' BMTfit.mpse(x1, start=list(p3=0.5, p4=0.5), fix.arg=list(p1=0, p2=1))
#'
#' # (4) estimation of the asymmetry-steepness parameters of the BMT
#' # distribution with domain fixed at [0,1]
#' BMTfit.mpse(x1, start=list(p3=0, p4=0.5), type.p.3.4 = "a-s",
#' fix.arg=list(p1=0, p2=1))
#'
#'@keywords distribution
######################
#' @rdname BMTfit.mpse
#' @export BMTfit.mpse
BMTfit.mpse <- function(data,
start = list(p3 = 0.5, p4 = 0.5, p1 = min(data) - 0.1, p2 = max(data) + 0.1),
fix.arg = NULL, type.p.3.4 = "t w", type.p.1.2 = "c-d",
optim.method = "Nelder-Mead", custom.optim = NULL, silent = TRUE, ...){
# Control data
if (!(is.vector(data) & is.numeric(data) & length(data) > 1))
stop("data must be a numeric vector of length greater than 1")
# Further arguments to be passed
my3dots <- list(...)
if (length(my3dots) == 0)
my3dots <- NULL
# Control weights
if(!is.null(my3dots$weights))
stop("Estimation with weights is not considered yet")
# Control type.p.3.4. It allows partial match.
TYPE.P.3.4 <- c("t w", "a-s") # tail weights or asymmetry-steepness
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# mle only allows parametrization "c-d"
# because all data have to be inside the estimated domain.
if(type.p.1.2 != "c-d")
stop("maximum likelihood estimation only allows parametrization \"c-d\"")
# Type of parametrizations are fixed parameters
fix.arg$type.p.3.4 <- type.p.3.4
fix.arg$type.p.1.2 <- "c-d"
# Establish box constraints according to parameters in start
stnames <- names(start)
m <- length(stnames)
# Initialize all box constraints: (0, 1)
lower <- rep(0 + .epsilon, m)
upper <- rep(1 - .epsilon, m)
# domain parametrization
# c has to be inside (-Inf, min(data))
lower[stnames == "p1"] <- -Inf
upper[stnames == "p1"] <- min(data) - .epsilon
# d has to be inside (max(data), Inf)
lower[stnames == "p2"] <- max(data) + .epsilon
upper[stnames == "p2"] <- Inf
# asymmetry-steepness parametrization
if(int.type.p.3.4 == 2) {
# asymmetry has to be inside (-1, 1)
lower[stnames == "p3"] <- -1 + .epsilon
}
# nlminb optimization method
if(!is.null(custom.optim))
if(custom.optim=="nlminb")
custom.optim <- .m.nlminb
# mpsedist function of fitdistplus
mpse <- mpsedist(data, "BMT", start = start, fix.arg = fix.arg,
optim.method = optim.method, lower = lower, upper = upper,
custom.optim = custom.optim, silent = silent, ...)
return(mpse)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/BMTfit.mpse.R
|
#'@title Minimum Quantile Distance Fit of the BMT Distribution to Non-censored
#' Data.
#'
#'@description Fit of the BMT distribution to non-censored data by minimum
#' quantile distance (mqde), which can also be called maximum quantile
#' goodness-of-fit.
#'
#'@rdname BMTfit.mqde
#'@name BMTfit.mqde
#'
#'@details This function is not intended to be called directly but is internally
#' called in \code{\link{BMTfit}} when used with the minimum quantile distance
#' method.
#'
#' \code{BMTfit.mqde} is based on the function \code{\link{mqdedist}} but it
#' focuses on the minimum quantile distance parameter estimation for the BMT
#' distribution (see \code{\link{BMT}} for details about the BMT distribution
#' and \code{\link{mqdedist}} for details about minimum quantile distance fit
#' of univariate distributions).
#'
#' Given the close-form expression of the quantile
#' function, two optimization methods were added when the euclidean distance is
#' selected: Coordinate descend (\code{"CD"}) and Newton-Rhapson (\code{"NR"}).
#'
#'@param data A numeric vector with the observed values for non-censored data.
#'@param probs A numeric vector of the probabilities for which the minimum
#' quantile distance estimation is done. \eqn{p[k] = (k - 0.5) / n} (default).
#'@param qtype The quantile type used by the R \code{\link{quantile}} function
#' to compute the empirical quantiles. Type 5 (default), i.e. \eqn{x[k]} is
#' both the \eqn{k}th order statistic and the type 5 sample quantile of
#' \eqn{p[k] = (k - 0.5) / n}.
#'@param dist The distance measure between observed and theoretical quantiles to
#' be used. This must be one of "euclidean" (default), "maximum", or
#' "manhattan". Any unambiguous substring can be given.
#'@param start A named list giving the initial values of parameters of the BMT
#' distribution or a function of data computing initial values and returning a
#' named list. (see the 'details' section of
#' \code{\link{mledist}}).
#'@param fix.arg An optional named list giving the values of fixed parameters of
#' the BMT distribution or a function of data computing (fixed) parameter
#' values and returning a named list. Parameters with fixed value are thus NOT
#' estimated. (see the 'details' section of
#' \code{\link{mledist}}).
#'@param type.p.3.4 Type of parametrization asociated to p3 and p4. "t w" means
#' tails weights parametrization (default) and "a-s" means asymmetry-steepness
#' parametrization.
#'@param type.p.1.2 Type of parametrization asociated to p1 and p2. "c-d" means
#' domain parametrization (default) and "l-s" means location-scale
#' parametrization.
#'@param optim.method \code{"default"} (see the 'details' section of
#' \code{\link{mledist}}) or optimization method to pass to
#' \code{\link{optim}}. Given the close-form expression of the quantile
#' function, two optimization methods were added when the euclidean distance is
#' selected: Coordinate descend (\code{"CD"}) and Newton-Rhapson (\code{"NR"}).
#'@param custom.optim A function carrying the optimization (see the 'details'
#' section of \code{\link{mledist}}).
#'@param weights an optional vector of weights to be used in the fitting process.
#' Should be \code{NULL} or a numeric vector with strictly positive numbers.
#' If non-\code{NULL}, weighted mqde is used, otherwise ordinary mqde.
#'@param silent A logical to remove or show warnings when bootstraping.
#'@param \dots Further arguments to be passed to generic functions or to the
#' function \code{"mqdedist"}. See \code{\link{mqdedist}} for details.
#'
#'@return \code{BMTfit.mqde} returns a list with following components,
#'
#' \item{estimate}{ the parameter estimates.}
#'
#' \item{convergence}{ an integer code for the convergence of
#' \code{\link{optim}}/\code{\link{constrOptim}} defined as below or defined by
#' the user in the user-supplied optimization function.
#'
#' \code{0} indicates successful convergence.
#'
#' \code{1} indicates that the iteration limit of \code{\link{optim}} has been
#' reached.
#'
#' \code{10} indicates degeneracy of the Nealder-Mead simplex.
#'
#' \code{100} indicates that \code{\link{optim}} encountered an internal error.
#' }
#'
#' \item{value}{the value of the corresponding objective function of the
#' estimation method at the estimate.}
#'
#' \item{hessian}{a symmetric matrix computed by \code{\link{optim}} as an
#' estimate of the Hessian at the solution found or computed in the
#' user-supplied optimization function.}
#'
#' \item{loglik}{the log-likelihood value.}
#'
#' \item{probs}{ the probability vector on which observed and theoretical
#' quantiles were calculated. }
#'
#' \item{dist}{ the name of the distance between observed and theoretical
#' quantiles used. }
#'
#' \item{optim.function}{the name of the optimization function used for maximum
#' product of spacing.}
#'
#' \item{optim.method}{when \code{\link{optim}} is used, the name of the
#' algorithm used, \code{NULL} otherwise.}
#'
#' \item{fix.arg}{the named list giving the values of parameters of the named
#' distribution that must kept fixed rather than estimated or \code{NULL} if
#' there are no such parameters. }
#'
#' \item{fix.arg.fun}{the function used to set the value of \code{fix.arg} or
#' \code{NULL}.}
#'
#' \item{weights}{the vector of weigths used in the estimation process or
#' \code{NULL}.}
#'
#' \item{counts}{A two-element integer vector giving the number of calls to the
#' log-likelihood function and its gradient respectively. This excludes those
#' calls needed to compute the Hessian, if requested, and any calls to
#' log-likelihood function to compute a finite-difference approximation to the
#' gradient. \code{counts} is returned by \code{\link{optim}} or the
#' user-supplied function or set to \code{NULL}.}
#'
#' \item{optim.message}{A character string giving any additional information
#' returned by the optimizer, or \code{NULL}. To understand exactly the
#' message, see the source code.}
#'
#'@references Torres-Jimenez, C. J. (2017, September), \emph{Comparison of estimation
#' methods for the BMT distribution}. ArXiv e-prints.
#'
#' Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory model: A
#' new skewed distribution family with bounded domain and an IRT model based on
#' it}, PhD thesis, Doctorado en ciencias - Estadistica, Universidad Nacional
#' de Colombia, Sede Bogota.
#'
#'@seealso See \code{\link{BMT}} for the BMT density, distribution, quantile
#' function and random deviates. See \code{\link{BMTfit.mme}},
#' \code{\link{BMTfit.mle}}, \code{\link{BMTfit.mge}},
#' \code{\link{BMTfit.mpse}} and \code{\link{BMTfit.qme}} for other estimation
#' methods. See \code{\link{optim}} and \code{\link{constrOptim}} for
#' optimization routines. See \code{\link{BMTfit}} and \code{\link{fitdist}}
#' for functions that return an objetc of class \code{"fitdist"}.
#'
#'@author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#'
#'@source Based on the function \code{\link{mqdedist}} which in turn is based on
#' the function \code{\link{mledist}} of the R package:
#' \code{\link{fitdistrplus}}
#'
#' Delignette-Muller ML and Dutang C (2015), \emph{fitdistrplus: An R Package
#' for Fitting Distributions}. Journal of Statistical Software, 64(4), 1-34.
#'
#' @examples
#' # (1) basic fit by minimum quantile distance estimation
#' set.seed(1234)
#' x1 <- rBMT(n=100, p3=0.25, p4=0.75)
#' BMTfit.mqde(x1)
#'
#' # (2) quantile matching is a particular case of minimum quantile distance
#' BMTfit.mqde(x1, probs=c(0.2,0.4,0.6,0.8), qtype=7)
#'
#' # (3) maximum or manhattan instead of euclidean distance
#' BMTfit.mqde(x1, dist="maximum")
#' BMTfit.mqde(x1, dist="manhattan")
#'
#' # (4) how to change the optimisation method?
#' BMTfit.mqde(x1, optim.method="L-BFGS-B")
#' BMTfit.mqde(x1, custom.optim="nlminb")
#'
#' # (5) estimation of the tails weights parameters of the BMT
#' # distribution with domain fixed at [0,1]
#' BMTfit.mqde(x1, start=list(p3=0.5, p4=0.5), fix.arg=list(p1=0, p2=1))
#'
#' # (6) estimation of the asymmetry-steepness parameters of the BMT
#' # distribution with domain fixed at [0,1]
#' BMTfit.mqde(x1, start=list(p3=0, p4=0.5), type.p.3.4 = "a-s",
#' fix.arg=list(p1=0, p2=1))
#'
#'@keywords distribution
######################
#' @rdname BMTfit
#' @export BMTfit.mqde
BMTfit.mqde <- function(data, probs = (1:length(data)-0.5)/length(data), qtype = 5, dist = "euclidean",
start = list(p3 = 0.5, p4 = 0.5, p1 = min(data) - 0.1, p2 = max(data) + 0.1),
fix.arg = NULL, type.p.3.4 = "t w", type.p.1.2 = "c-d",
optim.method = "Nelder-Mead", custom.optim = NULL, weights = NULL, silent = TRUE, ...){
# Control data
if (!(is.vector(data) & is.numeric(data) & length(data) > 1))
stop("data must be a numeric vector of length greater than 1")
# Control probs, qtype
if (!(is.vector(probs) & is.numeric(probs)) | anyNA(probs) | any(probs < 0 | probs > 1))
stop("probs must be a numeric vector with all elements greater than zero and less than one")
probs <- unique(sort(probs))
if (qtype < 0 || qtype > 9)
stop("wrong type for the R quantile function")
# Control dist
int.dist <- pmatch(dist, c("euclidean", "maximum", "manhattan"))
if (is.na(int.dist))
stop("invalid distance measure to be used")
if (int.dist == -1)
stop("ambiguous distance measure to be used")
# Control optim.method
if (is.null(custom.optim))
optim.method <- match.arg(optim.method, c("default", "Nelder-Mead", "BFGS", "CG",
"L-BFGS-B", "SANN", "Brent", "CD","NR"))
# Control start and fix.arg
start.arg <- start
if (is.vector(start.arg))
start.arg <- as.list(start.arg)
stnames <- names(start.arg)
fixnames <- names(fix.arg)
# Further arguments to be passed
my3dots <- list(...)
if (length(my3dots) == 0)
my3dots <- NULL
if (is.vector(data)) {
n <- length(data)
if (!(is.numeric(data) & n > 1))
stop("data must be a numeric vector of length greater than 1")
}
else
stop("Minimum quantile distance estimation is not yet available for censored data.")
# Control weights
if (!is.null(weights)) {
if (any(weights <= 0))
stop("weights should be a vector of numbers greater than 0")
if (length(weights) != n)
stop("weights should be a vector with a length equal to the observation number")
w <- sum(weights)
}
# Control maximum number of iterations
maxit <- ifelse(is.null(my3dots$control$maxit),3000,my3dots$control$maxit)
# Control type.p.3.4. It allows partial match.
TYPE.P.3.4 <- c("t w", "a-s") # tail weights or asymmetry-steepness
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# Control type.p.1.2. It allows partial match.
TYPE.P.1.2 <- c("c-d", "l-s") # domain or location-scale
int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2)
if (is.na(int.type.p.1.2))
stop("invalid type of parametrization for parameters 1 and 2")
if (int.type.p.1.2 == -1)
stop("ambiguous type of parametrization for parameters 1 and 2")
# Type of parametrizations are fixed parameters
fix.arg$type.p.3.4 <- type.p.3.4
fix.arg$type.p.1.2 <- type.p.1.2
# Establish box constraints according to parameters in start
npar <- length(stnames)
# Initialize all box constraints: (-Inf, Inf)
if(is.null(my3dots$lower)){
lower <- rep(-Inf, npar)
# domain parametrization
if (int.type.p.1.2 == 1) {
# c has to be inside (-Inf, min(data))
# d has to be inside (max(data), Inf)
lower[stnames == "p2"] <- max(data) + .epsilon
}
# location-scale parametrization
else{
# sigma has to be inside (0, Inf)
lower[stnames == "p2"] <- 0 + .epsilon
}
# tail weights parametrization
if (int.type.p.3.4 == 1) {
# Both tail weights have to be inside (0,1)
lower[stnames == "p3" | stnames == "p4"] <- 0 + .epsilon
}
# asymmetry-steepness parametrization
else{
# asymmetric has to be inside (-1, 1)
# steepness has to be inside (0, 1)
lower[stnames == "p3"] <- -1 + .epsilon
lower[stnames == "p4"] <- 0 + .epsilon
}
}
else{
lower <- my3dots$lower
my3dots$lower <- NULL
}
if(is.null(my3dots$upper)){
upper <- rep(Inf, npar)
# domain parametrization
if (int.type.p.1.2 == 1) {
# c has to be inside (-Inf, min(data))
upper[stnames == "p1"] <- min(data) - .epsilon
# d has to be inside (max(data), Inf)
}
# location-scale parametrization
# sigma has to be inside (0, Inf)
# tail weights parametrization
if (int.type.p.3.4 == 1) {
# Both tail weights have to be inside (0,1)
upper[stnames == "p3" | stnames == "p4"] <- 1 - .epsilon
}
# asymmetry-steepness parametrization
else{
# asymmetric has to be inside (-1, 1)
# steepness has to be inside (0, 1)
upper[stnames == "p3" | stnames == "p4"] <- 1 - .epsilon
}
}
else{
upper <- my3dots$upper
my3dots$upper <- NULL
}
names(upper) <- names(lower) <- stnames
# nlminb optimization method
if(!is.null(custom.optim)){
if(custom.optim=="nlminb")
custom.optim <- .m.nlminb
# mqdedist function
mqde <- do.call(mqdedist, append(list(data, "BMT", probs = probs, qtype = qtype, dist = dist,
start = start, fix.arg = fix.arg, optim.method = optim.method,
lower = lower, upper = upper, custom.optim = custom.optim,
weights = weights, silent = silent), my3dots))
}
else{
if(optim.method == "CD"){
if(int.dist != 1)
stop("Coordinate descend (CD) optimization metod is only considered with euclidean distance")
# Change to tails curvature and domain parameterization
par <- append(start.arg, fix.arg)
if(int.type.p.3.4 != 1){
c.par <- BMTchangepars(par$p3, par$p4, par$type.p.3.4, par$p1, par$p2, par$type.p.1.2)
par$p3 <- c.par$p3
par$p4 <- c.par$p4
par$type.p.3.4 <- c.par$type.p.3.4
}
# Sample quantiles
if(qtype == 0)
sq <- data
else
sq <- quantile(data, probs = probs, type = qtype, names = FALSE)
t <- 0.5-cos((acos(2*probs-1)-2*pi)/3)
a <- 3 * t * (t - 1)^2
b <- 3 * t * t * (t - 1)
c <- t * t * (3 - 2 * t)
#
flag <- FALSE
iter <- 0
conv <- 0
message <- NULL
if(is.null(weights)){
# theorical quantiles
theoq <- do.call("qBMT", c(list(p = probs), as.list(par[stnames]), as.list(par[names(fix.arg)])))
# objective function
value <- sum((sq - theoq)^2)
repeat{
value.old <- value
iter <- iter + 1
if(!("p3" %in% fixnames) | !("p4" %in% fixnames)){
sx <- (sq - par$p1)/(par$p2 - par$p1)
if(!("p4" %in% fixnames)){
a.b <- sum(a*b)
b.b <- sum(b*b)
b.c_sx <- sum(b*(c-sx))
if(!("p3" %in% fixnames)){
a.a <- sum(a*a)
a.c_sx <- sum(a*(c-sx))
par$p3 <- (a.b * b.c_sx - b.b * a.c_sx ) / (a.a * b.b - a.b * a.b)
par$p3 <- unname(ifelse(par$p3 > upper["p3"], upper["p3"], par$p3))
par$p3 <- unname(ifelse(par$p3 < lower["p3"], lower["p3"], par$p3))
}
par$p4 <- (- par$p3 * a.b - b.c_sx) / b.b
par$p4 <- unname(ifelse(par$p4 > upper["p4"], upper["p4"], par$p4))
par$p4 <- unname(ifelse(par$p4 < lower["p4"], lower["p4"], par$p4))
}
else{
if(!("p3" %in% fixnames)){
par$p3 <- (- par$p4 * sum(a*b) - sum(a*(c-sx))) / sum(a*a)
par$p3 <- unname(ifelse(par$p3 > upper["p3"], upper["p3"], par$p3))
par$p3 <- unname(ifelse(par$p3 < lower["p3"], lower["p3"], par$p3))
}
}
}
else
flag <- TRUE
if(!("p1" %in% fixnames) | !("p2" %in% fixnames)){
tx <- a * par$p3 + b * par$p4 + c
mean.sq <- mean(sq)
mean.tx <- mean(tx)
if(!("p2" %in% fixnames)){
mean.tx2 <- mean(tx^2)
mean.sq.tx <- mean(sq * tx)
if(!("p1" %in% fixnames)){
par$p1 <- (mean.sq * mean.tx2 - mean.sq.tx * mean.tx) / (mean.tx2 - mean.tx^2)
if(int.type.p.1.2 == 1)
par$p1 <- unname(ifelse(par$p1 > upper["p1"], upper["p1"], par$p1))
}
par$p2 <- par$p1 + (mean.sq.tx - par$p1 * mean.tx) / mean.tx2
if(int.type.p.1.2 == 1)
par$p2 <- unname(ifelse(par$p2 < lower["p2"], lower["p2"], par$p2))
}
else{
if(!("p1" %in% fixnames)){
par$p1 <- (mean.sq - par$p2 * mean.tx) / (1 - mean.tx)
if(int.type.p.1.2 == 1)
par$p1 <- unname(ifelse(par$p1 > upper["p1"], upper["p1"], par$p1))
}
}
}
else
flag <- TRUE
# theorical quantiles
theoq <- do.call("qBMT", c(list(p = probs), as.list(par[stnames]), as.list(par[names(fix.arg)])))
# objective function
value <- sum((sq - theoq)^2)
if(abs(value - value.old) < 1e-12 | iter == maxit | flag == TRUE)
break
}
}
else{
# theorical quantiles
theoq <- do.call("qBMT", c(list(p = probs), as.list(par[stnames]), as.list(par[names(fix.arg)])))
# objective function
value <- sum(weights*(sq - theoq)^2)
repeat{
value.old <- value
iter <- iter + 1
if(!("p3" %in% fixnames) | !("p4" %in% fixnames)){
sx <- (sq - par$p1)/(par$p2 - par$p1)
if(!("p4" %in% fixnames)){
a.b <- sum(weights*a*b)
b.b <- sum(weights*b*b)
b.c_sx <- sum(weights*b*(c-sx))
if(!("p3" %in% fixnames)){
a.a <- sum(weights*a*a)
a.c_sx <- sum(weights*a*(c-sx))
par$p3 <- (a.b * b.c_sx - b.b * a.c_sx ) / (a.a * b.b - a.b * a.b)
par$p3 <- unname(ifelse(par$p3 > upper["p3"], upper["p3"], par$p3))
par$p3 <- unname(ifelse(par$p3 < lower["p3"], lower["p3"], par$p3))
}
par$p4 <- (- par$p3 * a.b - b.c_sx) / b.b
par$p4 <- unname(ifelse(par$p4 > upper["p4"], upper["p4"], par$p4))
par$p4 <- unname(ifelse(par$p4 < lower["p4"], lower["p4"], par$p4))
}
else{
if(!("p3" %in% fixnames)){
par$p3 <- (- par$p4 * sum(weights*a*b) - sum(weights*a*(c-sx))) / sum(weights*a*a)
par$p3 <- unname(ifelse(par$p3 > upper["p3"], upper["p3"], par$p3))
par$p3 <- unname(ifelse(par$p3 < lower["p3"], lower["p3"], par$p3))
}
}
}
else
flag <- TRUE
if(!("p1" %in% fixnames) | !("p2" %in% fixnames)){
tx <- a * par$p3 + b * par$p4 + c
mean.sq <- sum(weights * sq) / w
mean.tx <- sum(weights * tx) / w
if(!("p2" %in% fixnames)){
mean.tx2 <- sum(weights * tx^2) / w
mean.sq.tx <- sum(weights * sq * tx) / w
if(!("p1" %in% fixnames)){
par$p1 <- (mean.sq * mean.tx2 - mean.sq.tx * mean.tx) / (mean.tx2 - mean.tx^2)
if(int.type.p.1.2 == 1)
par$p1 <- unname(ifelse(par$p1 > upper["p1"], upper["p1"], par$p1))
}
par$p2 <- par$p1 + (mean.sq.tx - par$p1 * mean.tx) / mean.tx2
if(int.type.p.1.2 == 1)
par$p2 <- unname(ifelse(par$p2 < lower["p2"], lower["p2"], par$p2))
}
else{
if(!("p1" %in% fixnames)){
par$p1 <- (mean.sq - par$p2 * mean.tx) / (1 - mean.tx)
if(int.type.p.1.2 == 1)
par$p1 <- unname(ifelse(par$p1 > upper["p1"], upper["p1"], par$p1))
}
}
}
else
flag <- TRUE
# theorical quantiles
theoq <- do.call("qBMT", c(list(p = probs), as.list(par[stnames]), as.list(par[names(fix.arg)])))
# objective function
value <- sum(weights*(sq - theoq)^2)
if(abs(value - value.old) < 1e-12 | iter == maxit | flag == TRUE)
break
}
}
if(iter == maxit){
conv <- 1
message <- "Maximum number of iterations"
}
# Restore to the given parameterization
if(int.type.p.3.4 != 1){
c.par <- BMTchangepars(par$p3, par$p4, par$type.p.3.4, par$p1, par$p2, par$type.p.1.2)
par$p3 <- c.par$p3
par$p4 <- c.par$p4
par$type.p.3.4 <- c.par$type.p.3.4
}
mqde <- list(estimate = unlist(par[stnames]), convergence = conv, value = value,
hessian = NULL, probs = probs, dist = dist,
optim.function = NULL, fix.arg = fix.arg,
loglik = .loglik(par[stnames], fix.arg, data, "dBMT"),
optim.method = "CD", fix.arg.fun = NULL,
counts = c(iter, NA), optim.message = message)
}
else if(optim.method == "NR"){
if(int.dist != 1)
stop("Newton-Raphson (NR) optimization method is only considered with euclidean distance")
# Change to tails curvature and domain parameterization
par <- append(start.arg, fix.arg)
if(int.type.p.3.4 != 1 | int.type.p.1.2 != 1){
c.par <- BMTchangepars(par$p3,par$p4,par$type.p.3.4,par$p1,par$p2,par$type.p.1.2)
if(int.type.p.3.4 != 1){
par$p3 <- c.par$p3
par$p4 <- c.par$p4
par$type.p.3.4 <- c.par$type.p.3.4
}
if(int.type.p.1.2 != 1){
par$p1 <- c.par$p1
par$p2 <- c.par$p2
par$type.p.1.2 <- c.par$type.p.1.2
}
}
# Sample quantiles
if(qtype == 0)
sq <- data
else
sq <- quantile(data, probs = probs, type = qtype, names = FALSE)
t <- 0.5-cos((acos(2*probs-1)-2*pi)/3)
a <- 3 * t * (t - 1)^2
b <- 3 * t * t * (t - 1)
c <- t * t * (3 - 2 * t)
#
iter <- 0
conv <- 0
message <- NULL
if(is.null(weights)){
repeat{
iter <- iter + 1
R <- par$p2 - par$p1
sx <- (sq - par$p1) / R
tx <- a * par$p3 + b * par$p4 + c
sx_tx <- sx - tx
sxsx_tx <- 2*sx - tx
a.sx_tx <- sum(a * sx_tx)
b.sx_tx <- sum(b * sx_tx)
sx.sx_tx <- sum(sx * sx_tx)
kl.1_kl <- par$p3 * (1 - par$p3)
kr.1_kr <- par$p4 * (1 - par$p4)
gradient <- -2 * c(kl.1_kl * a.sx_tx,
kr.1_kr * b.sx_tx,
sx.sx_tx,
sum(sx_tx) / R)
hessian <- diag(c(kl.1_kl * ((2 * par$p3 - 1) * a.sx_tx + kl.1_kl * sum(a*a)),
kr.1_kr * ((2 * par$p4 - 1) * b.sx_tx + kr.1_kr * sum(b*b)),
sum(sx * sxsx_tx),
n / R^2))
hessian[lower.tri(hessian)] <- 2*c(kl.1_kl * kr.1_kr * sum(a * b),
kl.1_kl * sum(a * sx),
kl.1_kl * sum(a) / R,
kr.1_kr * sum(b * sx),
kr.1_kr * sum(b) / R,
sum(sxsx_tx) / R)
hessian <- hessian + t(hessian)
delta <- solve(hessian,gradient)
theta <- c(log(par$p3/(1-par$p3)), log(par$p4/(1-par$p4)), log(R), par$p1)
theta <- theta - delta
par$p3 <- plogis(theta[1])
par$p4 <- plogis(theta[2])
par$p1 <- theta[4]
par$p2 <- par$p1 + exp(theta[3])
if(all(abs(delta) < 1e-12) | iter == maxit)
break
}
}
else{
#
}
# theorical quantiles
theoq <- do.call("qBMT", c(list(p = probs), as.list(par[stnames]), as.list(fix.arg)))
# objective function
value <- sum((sq - theoq)^2)
if(iter == maxit){
conv <- 1
message <- "Maximum number of iterations"
}
# Restore to the given parameterization
if(int.type.p.3.4 != 1 | int.type.p.1.2 != 1){
c.par <- BMTchangepars(par$p3,par$p4,par$type.p.3.4,par$p1,par$p2,par$type.p.1.2)
if(int.type.p.3.4 != 1){
par$p3 <- c.par$p3
par$p4 <- c.par$p4
par$type.p.3.4 <- c.par$type.p.3.4
}
if(int.type.p.1.2 != 1){
par$p1 <- c.par$p1
par$p2 <- c.par$p2
par$type.p.1.2 <- c.par$type.p.1.2
}
}
mqde <- list(estimate = unlist(par[stnames]), convergence = conv, value = value,
hessian = NULL, probs = probs, dist = dist,
optim.function = NULL, fix.arg = fix.arg,
loglik = .loglik(par[stnames], fix.arg, data, "dBMT"),
optim.method = "NR", fix.arg.fun = NULL,
counts = c(iter, iter), optim.message = message)
}
else{
# mqdedist function
mqde <- mqdedist(data, "BMT", probs = probs, qtype = qtype, dist = dist,
start = start, fix.arg = fix.arg, optim.method = optim.method,
lower = lower, upper = upper, custom.optim = custom.optim,
weights = weights, silent = silent, ...)
}
}
# Estimation with location-scale parametrization might allow data outside the estimated domain
par <- append(mqde$estimate,fix.arg)
if (int.type.p.1.2 == 2)
par <- BMTchangepars(par$p3, par$p4, par$type.p.3.4, par$p1, par$p2, par$type.p.1.2)
n.obs <- sum(data < par$p1 | data > par$p2)
if(n.obs > 0){
text <- paste("The resultant estimated domain is [", round(par$p1, 4), ",", round(par$p2, 4),
"] and there are ", n.obs, " observations out of it.", sep="")
warning(text)
}
return(mqde)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/BMTfit.mqde.R
|
#'@title Quantile Matching Fit of the BMT Distribution to Non-censored Data.
#'
#'@description Fit of the BMT distribution to non-censored data by quantile
#' matching estimation (qme).
#'
#'@rdname BMTfit.qme
#'@name BMTfit.qme
#'
#'@details This function is not intended to be called directly but is internally
#' called in \code{\link{BMTfit}} when used with the quantile matching method.
#'
#' \code{BMTfit.qme} is based on the function \code{\link{qmedist}} but it
#' focuses on the quantile matching parameter estimation for the BMT
#' distribution (see \code{\link{BMT}} for details about the BMT distribution
#' and \code{\link{qmedist}} for details about quantile matching fit of
#' univariate distributions).
#'
#'@param data A numeric vector with the observed values for non-censored data.
#'@param probs A numeric vector of the probabilities for which the quantile
#' matching is done. The length of this vector must be equal to the number of
#' parameters to estimate.
#'@param qtype The quantile type used by the R \code{\link{quantile}} function
#' to compute the empirical quantiles, (default 7 corresponds to the default
#' quantile method in R).
#'@param start A named list giving the initial values of parameters of the BMT
#' distribution or a function of data computing initial values and returning a
#' named list. (see the 'details' section of
#' \code{\link{mledist}}).
#'@param fix.arg An optional named list giving the values of fixed parameters of
#' the BMT distribution or a function of data computing (fixed) parameter
#' values and returning a named list. Parameters with fixed value are thus NOT
#' estimated. (see the 'details' section of
#' \code{\link{mledist}}).
#'@param type.p.3.4 Type of parametrization asociated to p3 and p4. "t w" means
#' tails weights parametrization (default) and "a-s" means asymmetry-steepness
#' parametrization.
#'@param type.p.1.2 Type of parametrization asociated to p1 and p2. "c-d" means
#' domain parametrization (default) and "l-s" means location-scale
#' parametrization.
#'@param optim.method \code{"default"} (see the 'details' section of
#' \code{\link{mledist}}) or optimization method to pass to
#' \code{\link{optim}}.
#'@param custom.optim A function carrying the optimization (see the 'details'
#' section of \code{\link{mledist}}).
#'@param silent A logical to remove or show warnings when bootstraping.
#'@param \dots Further arguments to be passed to generic functions or to the
#' function \code{"qmedist"}. See \code{\link{qmedist}} for details.
#'
#'@return \code{BMTfit.qme} returns a list with following components,
#'
#' \item{estimate}{ the parameter estimates.}
#'
#' \item{convergence}{ an integer code for the convergence of
#' \code{\link{optim}}/\code{\link{constrOptim}} defined as below or defined by
#' the user in the user-supplied optimization function.
#'
#' \code{0} indicates successful convergence.
#'
#' \code{1} indicates that the iteration limit of \code{\link{optim}} has been
#' reached.
#'
#' \code{10} indicates degeneracy of the Nealder-Mead simplex.
#'
#' \code{100} indicates that \code{\link{optim}} encountered an internal error.
#' }
#'
#' \item{value}{the value of the corresponding objective function of the
#' estimation method at the estimate.}
#'
#' \item{hessian}{a symmetric matrix computed by \code{\link{optim}} as an
#' estimate of the Hessian at the solution found or computed in the
#' user-supplied optimization function.}
#'
#' \item{loglik}{the log-likelihood value.}
#'
#' \item{probs}{the probability vector on which quantiles are matched.}
#'
#' \item{optim.function}{the name of the optimization function used for maximum
#' product of spacing.}
#'
#' \item{optim.method}{when \code{\link{optim}} is used, the name of the
#' algorithm used, \code{NULL} otherwise.}
#'
#' \item{fix.arg}{the named list giving the values of parameters of the named
#' distribution that must kept fixed rather than estimated or \code{NULL} if
#' there are no such parameters. }
#'
#' \item{fix.arg.fun}{the function used to set the value of \code{fix.arg} or
#' \code{NULL}.}
#'
#' \item{weights}{the vector of weigths used in the estimation process or
#' \code{NULL}.}
#'
#' \item{counts}{A two-element integer vector giving the number of calls to the
#' log-likelihood function and its gradient respectively. This excludes those
#' calls needed to compute the Hessian, if requested, and any calls to
#' log-likelihood function to compute a finite-difference approximation to the
#' gradient. \code{counts} is returned by \code{\link{optim}} or the
#' user-supplied function or set to \code{NULL}.}
#'
#' \item{optim.message}{A character string giving any additional information
#' returned by the optimizer, or \code{NULL}. To understand exactly the
#' message, see the source code.}
#'
#'@references Torres-Jimenez, C. J. (2017, September), \emph{Comparison of estimation
#' methods for the BMT distribution}. ArXiv e-prints.
#'
#' Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory model: A
#' new skewed distribution family with bounded domain and an IRT model based on
#' it}, PhD thesis, Doctorado en ciencias - Estadistica, Universidad Nacional
#' de Colombia, Sede Bogota.
#'
#'@seealso See \code{\link{BMT}} for the BMT density, distribution, quantile
#' function and random deviates. See \code{\link{BMTfit.mme}},
#' \code{\link{BMTfit.mle}}, \code{\link{BMTfit.mge}},
#' \code{\link{BMTfit.mpse}} and \code{\link{BMTfit.mqde}} for other estimation
#' methods. See \code{\link{optim}} and \code{\link{constrOptim}} for
#' optimization routines. See \code{\link{BMTfit}} and \code{\link{fitdist}}
#' for functions that return an objetc of class \code{"fitdist"}.
#'
#'@author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#'
#'@source Based on the function \code{\link{qmedist}} of the R package:
#' \code{\link{fitdistrplus}}
#'
#' Delignette-Muller ML and Dutang C (2015), \emph{fitdistrplus: An R Package
#' for Fitting Distributions}. Journal of Statistical Software, 64(4), 1-34.
#'
#' @examples
#' # (1) basic fit by quantile matching estimation
#' set.seed(1234)
#' x1 <- rBMT(n=100, p3 = 0.25, p4 = 0.75)
#' BMTfit.qme(x1)
#'
#' # (2) changing the probability vector on which quantiles are matched
#' BMTfit.qme(x1, probs=c(0.1,0.3,0.5,0.75))
#'
#' # (3) how to change the optimisation method?
#' BMTfit.qme(x1, optim.method="L-BFGS-B")
#' BMTfit.qme(x1, custom.optim="nlminb")
#'
#' # (4) estimation of the tails weights parameters of the BMT
#' # distribution with domain fixed at [0,1]
#' BMTfit.qme(x1, start=list(p3=0.5, p4=0.5),
#' fix.arg=list(p1=0, p2=1), probs=c(1/3,2/3))
#'
#' # (5) estimation of the asymmetry-steepness parameters of the BMT
#' # distribution with domain fixed at [0,1]
#' BMTfit.qme(x1, start=list(p3=0, p4=0.5), type.p.3.4 = "a-s",
#' fix.arg=list(p1=0, p2=1), probs=c(1/3,2/3))
#'
#'@keywords distribution
#####################
#' @rdname BMTfit
#' @export BMTfit.qme
BMTfit.qme <- function(data, probs = c(0.2,0.4,0.6,0.8), qtype = 7,
start = list(p3 = 0.5, p4 = 0.5, p1 = min(data) - 0.1, p2 = max(data) + 0.1),
fix.arg = NULL, type.p.3.4 = "t w", type.p.1.2 = "c-d",
optim.method = "Nelder-Mead", custom.optim = NULL, silent = TRUE, ...){
# Control data
if (!(is.vector(data) & is.numeric(data) & length(data) > 1))
stop("data must be a numeric vector of length greater than 1")
# Further arguments to be passed
my3dots <- list(...)
if (length(my3dots) == 0)
my3dots <- NULL
# Control weights
if(!is.null(my3dots$weights))
stop("Estimation with weights is not considered yet")
# Control type.p.3.4. It allows partial match.
TYPE.P.3.4 <- c("t w", "a-s") # tail weights or asymmetry-steepness
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# Control type.p.1.2. It allows partial match.
TYPE.P.1.2 <- c("c-d", "l-s") # domain or location-scale
int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2)
if (is.na(int.type.p.1.2))
stop("invalid type of parametrization for parameters 1 and 2")
if (int.type.p.1.2 == -1)
stop("ambiguous type of parametrization for parameters 1 and 2")
# Type of parametrizations are fixed parameters
fix.arg$type.p.3.4 <- type.p.3.4
fix.arg$type.p.1.2 <- type.p.1.2
# Establish box constraints according to parameters in start
stnames <- names(start)
m <- length(stnames)
# Initialize all box constraints: (-Inf, Inf)
lower <- rep(-Inf, m)
upper <- rep(Inf, m)
# domain parametrization
if (int.type.p.1.2 == 1) {
# c has to be inside (-Inf, min(data))
upper[stnames == "p1"] <- min(data) - .epsilon
# d has to be inside (max(data), Inf)
lower[stnames == "p2"] <- max(data) + .epsilon
}
# location-scale parametrization
else{
# sigma has to be inside (0, Inf)
lower[stnames == "p2"] <- 0 + .epsilon
}
# tail weights parametrization
if (int.type.p.3.4 == 1) {
# Both tail weights have to be inside (0,1)
lower[stnames == "p3" | stnames == "p4"] <- 0 + .epsilon
upper[stnames == "p3" | stnames == "p4"] <- 1 - .epsilon
}
# asymmetry-steepness parametrization
else{
# asymmetric has to be inside (-1, 1)
# steepness has to be inside (0, 1)
lower[stnames == "p3"] <- -1 + .epsilon
lower[stnames == "p4"] <- 0 + .epsilon
upper[stnames == "p3" | stnames == "p4"] <- 1 - .epsilon
}
# nlminb optimization method
if(!is.null(custom.optim))
if(custom.optim=="nlminb")
custom.optim <- .m.nlminb
# qmedist function of fitdistplus
qme <- fitdistrplus::qmedist(data, "BMT", probs = probs, qtype = qtype, start = start, fix.arg = fix.arg,
optim.method = optim.method, lower = lower, upper = upper,
custom.optim = custom.optim, silent = silent, ...)
# Estimation with location-scale parameterization might allow data outside the estimated domain
par <- append(qme$estimate,fix.arg)
if (int.type.p.1.2 == 2)
par <- BMTchangepars(par$p3, par$p4, par$type.p.3.4, par$p1, par$p2, par$type.p.1.2)
n.obs <- sum(data < par$p1 | data > par$p2)
if(n.obs > 0){
text <- paste("The resultant estimated domain is [",round(par$p1,4),",",round(par$p2,4),
"] and there are ",n.obs," observations out of it.",sep="")
warning(text)
}
return(qme)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/BMTfit.qme.R
|
#' @title The BMT Distribution Descriptive Measures - Kurtosis.
#' @description Kurtosis and steepness coefficient for the BMT distribution with
#' \code{p3} and \code{p4} tails weights (\eqn{\kappa_l} and \eqn{\kappa_r})
#' or asymmetry-steepness parameters (\eqn{\zeta} and \eqn{\xi}) and \code{p1}
#' and \code{p2} domain (minimum and maximum) or location-scale (mean and
#' standard deviation) parameters.
#' @rdname BMTkurtosis
#' @name BMTkurtosis
#' @aliases BMTkurt
#' @aliases BMTsteep
#'
#' @details See References.
#'
#' @param p3,p4 tails weights (\eqn{\kappa_l} and \eqn{\kappa_r}) or
#' asymmetry-steepness (\eqn{\zeta} and \eqn{\xi}) parameters of the BMT
#' distribution.
#' @param type.p.3.4 type of parametrization asociated to p3 and p4. "t w" means
#' tails weights parametrization (default) and "a-s" means asymmetry-steepness
#' parametrization.
#' @param p1,p2 domain (minimum and maximum) or location-scale (mean and
#' standard deviation) parameters of the BMT ditribution.
#' @param type.p.1.2 type of parametrization asociated to p1 and p2. "c-d" means
#' domain parametrization (default) and "l-s" means location-scale
#' parametrization.
#'
#' @return \code{BMTkurt} gives the Pearson's kurtosis and \code{BMTsteep} the
#' proposed steepness coefficient for the BMT distribution.
#'
#' The arguments are recycled to the length of the result. Only the first
#' elements of \code{type.p.3.4} and \code{type.p.1.2} are used.
#'
#' If \code{type.p.3.4 == "t w"}, \code{p3 < 0} and \code{p3 > 1} are errors
#' and return \code{NaN}.
#'
#' If \code{type.p.3.4 == "a-s"}, \code{p3 < -1} and \code{p3 > 1} are errors
#' and return \code{NaN}.
#'
#' \code{p4 < 0} and \code{p4 > 1} are errors and return \code{NaN}.
#'
#' If \code{type.p.1.2 == "c-d"}, \code{p1 >= p2} is an error and returns
#' \code{NaN}.
#'
#' If \code{type.p.1.2 == "l-s"}, \code{p2 <= 0} is an error and returns
#' \code{NaN}.
#'
#' @references Torres-Jimenez, C. J. and Montenegro-Diaz, A. M. (2017, September),
#' \emph{An alternative to continuous univariate distributions supported on a
#' bounded interval: The BMT distribution}. ArXiv e-prints.
#'
#' Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory model: A
#' new skewed distribution family with bounded domain and an IRT model based
#' on it}, PhD thesis, Doctorado en ciencias - Estadistica, Universidad
#' Nacional de Colombia, Sede Bogota.
#'
#' @seealso \code{\link{BMTcentral}}, \code{\link{BMTdispersion}},
#' \code{\link{BMTskewness}}, \code{\link{BMTmoments}} for other descriptive
#' measures or moments.
#'
#' @author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#'
#' @examples
#' # BMT on [0,1] with left tail weight equal to 0.25 and
#' # right tail weight equal to 0.75
#' BMTkurt(0.25, 0.75, "t w")
#' BMTsteep(0.25, 0.75, "t w")
#'
#' # BMT on [0,1] with asymmetry coefficient equal to 0.5 and
#' # steepness coefficient equal to 0.75
#' BMTkurt(0.5, 0.5, "a-s")
#' BMTsteep(0.5, 0.5, "a-s")
#'
#' # domain or location-scale parameters do not affect
#' # the skewness and the asymmetry coefficient
#'
#' # BMT on [-1.783489,3.312195] with
#' # left tail weight equal to 0.25 and
#' # right tail weight equal to 0.75
#' BMTkurt(0.25, 0.75, "t w", -1.783489, 3.312195, "c-d")
#' BMTsteep(0.25, 0.75, "t w", -1.783489, 3.312195, "c-d")
#'
#' # BMT with mean equal to 0, standard deviation equal to 1,
#' # asymmetry coefficient equal to 0.5 and
#' # steepness coefficient equal to 0.75
#' BMTkurt(0.5, 0.5, "a-s", 0, 1, "l-s")
#' BMTsteep(0.5, 0.5, "a-s", 0, 1, "l-s")
#' @rdname BMTkurtosis
#' @export BMTkurt
BMTkurt <- function(p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d"){
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(p1),length(p2),length(p3),length(p4))
p1 <- rep(p1, len=len)
p2 <- rep(p2, len=len)
p3 <- rep(p3, len=len)
p4 <- rep(p4, len=len)
# Control type.p.3.4
TYPE.P.3.4 <- c("t w", "a-s")
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# tail weigths or asymmetry-steepness parametrization
if(int.type.p.3.4 == 1){ # tail weights parametrization
# Control tail weights parameters
kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN)
kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN)
# kurtosis
m <- ((((6507*kappa_l - 43380)*kappa_l + 135900)*kappa_l - 150000)*kappa_l +
(((6507*kappa_r - 43380)*kappa_r + 135900)*kappa_r - 150000)*kappa_r +
((432*kappa_l - 28620 + 13122*kappa_r)*kappa_l +
(432*kappa_r - 28620)*kappa_r + 29700)*kappa_l*kappa_r + 125125) /
(10010000*BMTvar(kappa_l,kappa_r,type.p.3.4)^2)
}
else{ # asymmetry-steepness parametrization
# Control asymmetry-steepness parameters
zeta <- replace(p3, p3 < -1 | p3 > 1, NaN)
xi <- replace(p4, p4 < 0 | p4 > 1, NaN)
# kurtosis
abs.zeta <- abs(zeta)
m <- ((((((((27000*xi-54000)*xi+53460)*xi-26460)*xi+6507)*abs.zeta+
((((-108000*xi+306000)*xi-322920)*xi+185220)*xi-43380))*abs.zeta+
((((162000*xi-594000)*xi+786960)*xi-460260)*xi+135900))*abs.zeta+
((((-108000*xi+486000)*xi-819000)*xi+601500)*xi-150000))*abs.zeta+
((((27000*xi-144000)*xi+301500)*xi-300000)*xi+125125)) /
(10010000*BMTvar(zeta,xi,type.p.3.4)^2)
}
# domain or location-scale parameters do not affect the kurtosis
return(m)
}
#' @rdname BMTkurtosis
#' @export BMTsteep
BMTsteep <- function(p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d"){
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(p1),length(p2),length(p3),length(p4))
p1 <- rep(p1, len=len)
p2 <- rep(p2, len=len)
p3 <- rep(p3, len=len)
p4 <- rep(p4, len=len)
# Control type.p.3.4
TYPE.P.3.4 <- c("t w", "a-s")
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# tail weigths or asymmetry-steepness parametrization
if(int.type.p.3.4 == 1){ # tail weights parametrization
# Control tail weights parameters
kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN)
kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN)
# steepness coefficient
abs.dif <- abs(kappa_r - kappa_l)
m <- ifelse(abs.dif == 1, 1, (kappa_r + kappa_l - abs.dif) / (2*(1 - abs.dif)))
}
else{
# Control asymmetry-steepness parameters
xi <- replace(p4, p4 < 0 | p4 > 1, NaN)
# steepness coefficient
m <- xi
}
# domain or location-scale parameters do not affect the steepness coefficient
return(m)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/BMTkurtosis.R
|
#' @title The BMT Distribution Moments, Moment-Generating Function and
#' Characteristic Function.
#' @description Any raw, central or standarised moment, the moment-generating
#' function and the characteristic function for the BMT distribution, with
#' \code{p3} and \code{p4} tails weights (\eqn{\kappa_l} and \eqn{\kappa_r})
#' or asymmetry-steepness parameters (\eqn{\zeta} and \eqn{\xi}) and \code{p1}
#' and \code{p2} domain (minimum and maximum) or location-scale (mean and
#' standard deviation) parameters.
#' @rdname BMTmoments
#' @name BMTmoments
#' @aliases BMTmoment
#' @aliases BMTmgf
#' @aliases BMTchf
#'
#' @details See References.
#'
#' @param p3,p4 tails weights (\eqn{\kappa_l} and \eqn{\kappa_r}) or
#' asymmetry-steepness (\eqn{\zeta} and \eqn{\xi}) parameters of the BMT
#' distribution.
#' @param type.p.3.4 type of parametrization asociated to p3 and p4. "t w" means
#' tails weights parametrization (default) and "a-s" means asymmetry-steepness
#' parametrization.
#' @param p1,p2 domain (minimum and maximum) or location-scale (mean and
#' standard deviation) parameters of the BMT ditribution.
#' @param type.p.1.2 type of parametrization asociated to p1 and p2. "c-d" means
#' domain parametrization (default) and "l-s" means location-scale
#' parametrization.
#' @param order order of the moment.
#' @param type type of the moment: raw, central or standardised (default).
#' @param method method to obtain the moment: exact formula or Chebyshev-Gauss
#' quadrature (default).
#' @param s variable for the moment-generating and characteristic functions.
#'
#' @return \code{BMTmoment} gives any raw, central or standarised moment,
#' \code{BMTmgf} the moment-generating function and \code{BMTchf} the
#' characteristic function
#'
#' The arguments are recycled to the length of the result. Only the first
#' elements of \code{type.p.3.4}, \code{type.p.1.2}, \code{type} and
#' \code{method} are used.
#'
#' If \code{type.p.3.4 == "t w"}, \code{p3 < 0} and \code{p3 > 1} are errors
#' and return \code{NaN}.
#'
#' If \code{type.p.3.4 == "a-s"}, \code{p3 < -1} and \code{p3 > 1} are errors
#' and return \code{NaN}.
#'
#' \code{p4 < 0} and \code{p4 > 1} are errors and return \code{NaN}.
#'
#' If \code{type.p.1.2 == "c-d"}, \code{p1 >= p2} is an error and returns
#' \code{NaN}.
#'
#' If \code{type.p.1.2 == "l-s"}, \code{p2 <= 0} is an error and returns
#' \code{NaN}.
#'
#' @references Torres-Jimenez, C. J. and Montenegro-Diaz, A. M. (2017, September),
#' \emph{An alternative to continuous univariate distributions supported on a
#' bounded interval: The BMT distribution}. ArXiv e-prints.
#'
#' Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory model: A
#' new skewed distribution family with bounded domain and an IRT model based
#' on it}, PhD thesis, Doctorado en ciencias - Estadistica, Universidad
#' Nacional de Colombia, Sede Bogota.
#'
#' @seealso \code{\link{BMTcentral}}, \code{\link{BMTdispersion}},
#' \code{\link{BMTskewness}}, \code{\link{BMTkurtosis}} for specific
#' descriptive measures or moments.
#'
#' @author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#'
#' @examples
#' layout(matrix(1:4, 2, 2, TRUE))
#' s <- seq(-1, 1, length.out = 100)
#'
#' # BMT on [0,1] with left tail weight equal to 0.25 and
#' # right tail weight equal to 0.75
#' BMTmoment(0.25, 0.75, order = 5) # hyperskewness by Gauss-Legendre quadrature
#' BMTmoment(0.25, 0.75, order = 5, method = "exact") # hyperskewness by exact formula
#' mgf <- BMTmgf(s, 0.25, 0.75) # moment-generation function
#' plot(s, mgf, type="l")
#' chf <- BMTchf(s, 0.25, 0.75) # characteristic function
#'
#' # BMT on [0,1] with asymmetry coefficient equal to 0.5 and
#' # steepness coefficient equal to 0.5
#' BMTmoment(0.5, 0.5, "a-s", order = 5)
#' BMTmoment(0.5, 0.5, "a-s", order = 5, method = "exact")
#' mgf <- BMTmgf(s, 0.5, 0.5, "a-s")
#' plot(s, mgf, type="l")
#' chf <- BMTchf(s, 0.5, 0.5, "a-s")
#'
#' # BMT on [-1.783489, 3.312195] with
#' # left tail weight equal to 0.25 and
#' # right tail weight equal to 0.75
#' BMTmoment(0.25, 0.75, "t w", -1.783489, 3.312195, "c-d", order = 5)
#' BMTmoment(0.25, 0.75, "t w", -1.783489, 3.312195, "c-d", order = 5, method = "exact")
#' mgf <- BMTmgf(s, 0.25, 0.75, "t w", -1.783489, 3.312195, "c-d")
#' plot(s, mgf, type="l")
#' chf <- BMTchf(s, 0.25, 0.75, "t w", -1.783489, 3.312195, "c-d")
#'
#' # BMT with mean equal to 0, standard deviation equal to 1,
#' # asymmetry coefficient equal to 0.5 and
#' # steepness coefficient equal to 0.5
#' BMTmoment(0.5, 0.5, "a-s", 0, 1, "l-s", order = 5)
#' BMTmoment(0.5, 0.5, "a-s", 0, 1, "l-s", order = 5, method = "exact")
#' mgf <- BMTmgf(s, 0.5, 0.5, "a-s", 0, 1, "l-s")
#' plot(s, mgf, type="l")
#' chf <- BMTchf(s, 0.5, 0.5, "a-s", 0, 1, "l-s")
#' @rdname BMTmoments
#' @export BMTmoment
BMTmoment <- function(p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d",
order, type = "standardised", method = "quadrature"){
# Control order
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
if (any(!is.wholenumber(order)) || any(order < 1))
stop("order should be a vector of integers greater or equal than 1")
# Control type
type <- match.arg(type, c("raw","central","standardised"))
# Control method
method <- match.arg(method, c("quadrature","exact"))
# Control type.p.3.4
TYPE.P.3.4 <- c("t w", "a-s") # tail weights or asymmetry-steepness
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# Control type.p.1.2
TYPE.P.1.2 <- c("c-d", "l-s") # domain or location-scale
int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2)
if (is.na(int.type.p.1.2))
stop("invalid type of parametrization for parameters 1 and 2")
if (int.type.p.1.2 == -1)
stop("ambiguous type of parametrization for parameters 1 and 2")
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len1 <- max(length(p3),length(p4))
p3 <- rep(p3, len=len1)
p4 <- rep(p4, len=len1)
len2 <- max(length(p1),length(p2))
p1 <- rep(p1, len=len2)
p2 <- rep(p2, len=len2)
# domain or location-scale parametrization
if(int.type.p.1.2 == 1){ # domain parametrization
# Control domain parameters
min <- replace(p1, p1 >= p2, NaN)
max <- replace(p2, p1 >= p2, NaN)
# scale
a <- max - min
# shift
b <- min
}
else{ # location-scale parametrization
# Control location-scale parameters
mu <- p1
sigma <- replace(p2, p2 <= 0, NaN)
# scale
a <- sigma/BMTsd(p3, p4, type.p.3.4)
# shift
b <- mu - a * BMTmean(p3, p4, type.p.3.4)
}
# Obtain moments
if(method=="quadrature"){ # by quadrature
# Obtain coefficients of polynomials x.t and yf.t given tail weights or
# asymmetry-steepness parameters
if(int.type.p.3.4 == 1){ # tail weights parametrization
# Control tail weights parameters
kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN)
kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN)
# Coefficients a_3*t^3+a_2*t^2+a_1*t+a_0
a_3 <- 3*kappa_l+3*kappa_r-2
a_2 <- (-6*kappa_l-3*kappa_r+3)
a_1 <- (3*kappa_l)
}
else{ # asymmetry-steepness parametrization
# Control asymmetry-steepness parameters
zeta <- replace(p3, p3 < -1 | p3 > 1, NaN)
xi <- replace(p4, p4 < 0 | p4 > 1, NaN)
# Coefficients a_3*t^3+a_2*t^2+a_1*t+a_0
abs.zeta <- abs(zeta)
aux1 <- 0.5-xi
a_3 <- 6*(xi+abs.zeta*aux1)-2
a_2 <- -9*(xi+abs.zeta*aux1)+1.5*zeta+3
a_1 <- 3*(xi+abs.zeta*aux1)-1.5*zeta
}
# function
funct1 <- function(order,a_3,a_2,a_1,a,b){
# 10 points for the Gauss-Legendre quadrature over [0,1] (22 digits)
t <- 0.5*.GL.10.points + 0.5
# x.t
x.t <- .x.t(t, a_3, a_2, a_1)
# scaling and shifting (or minus mean for central or standardised)
if(a!=1)
x.t <- a * x.t
if(b!=0)
x.t <- x.t + b
# Derivative of yF.t
yFp.t <- 6*t*(1-t)
# Gauss-Legendre quadrature over [0,1]
return(0.5*sum(.GL.10.weights*(x.t^order)*yFp.t))
}
# by type of moment
if(type=="raw"){ # raw
# moments (vectorised form)
m <- mapply(funct1,order,a_3,a_2,a_1,a,b)
}
else{
# mean
mean <- BMTmean(p3, p4, type.p.3.4)
# moments (vectorised form)
m <- mapply(funct1,order,a_3,a_2,a_1,rep(1,len=len2),-mean)
if(type=="central"){ # central
# scaled moments
m <- a^order * m
}
else{ # standardised
# standard deviation
sigma <- BMTsd(p3, p4, type.p.3.4)
# standardised moments
m <- m / ( sigma^order )
}
}
}
else{ # by exact formula
# tail weigths or asymmetry-steepness parametrization
if(int.type.p.3.4 == 1){ # tail weights parametrization
# Control tail weights parameters
kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN)
kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN)
}
else{ # asymmetry-steepness parametrization
# change parametrization
p <- BMTchangepars(p3, p4, type.p.3.4)
kappa_l <- p$p3
kappa_r <- p$p4
}
# function
funct2 <- function(kappa_l,kappa_r,order,a,b){
# Order 4 composition of order including zero
K <- partitions::compositions(order, 4, include.zero=TRUE)
# function for each term of the sum
term4 <- function(v,kappa_l,kappa_r,order,a,b){
term4 <- factorial(order) * 3^(v[2]+v[3]) *
ifelse(v[1]==0,1,(b)^v[1]) *
ifelse(v[2]==0,1,(a*kappa_l+b)^v[2]) *
ifelse(v[3]==0,1,(a*(1-kappa_r)+b)^v[3]) *
ifelse(v[4]==0,1,(a+b)^v[4]) /
factorial(v[1]) /
factorial(v[2]) /
factorial(v[3]) /
factorial(v[4]) /
choose((3*order+2),(1+v[2]+2*v[3]+3*v[4]))
return(term4)
}
return(2/(order+1) * sum(apply(K,2,term4,kappa_l,kappa_r,order,a,b)))
}
# by type of moment
if(type=="raw"){
# moment
m <- mapply(funct2,kappa_l,kappa_r,order,a,b)
}
else{
# mean
mean <- BMTmean(kappa_l, kappa_r)
# moment
m <- mapply(funct2,kappa_l,kappa_r,order,rep(1,len=len2),-mean)
if(type=="central"){ # central
# scaled moments
m <- a^order * m
}
else{ # standardised
# standard deviation
sigma <- BMTsd(kappa_l, kappa_r)
# standardised moments
m <- m / ( sigma^order )
}
}
}
return(m)
}
#' @rdname BMTmoments
#' @export BMTmgf
BMTmgf <- function(s, p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d"){
# Control type.p.3.4
TYPE.P.3.4 <- c("t w", "a-s") # tail weights or asymmetry-steepness
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# Control type.p.1.2
TYPE.P.1.2 <- c("c-d", "l-s") # domain or location-scale
int.type.p.1.2 <- pmatch(type.p.1.2, TYPE.P.1.2)
if (is.na(int.type.p.1.2))
stop("invalid type of parametrization for parameters 1 and 2")
if (int.type.p.1.2 == -1)
stop("ambiguous type of parametrization for parameters 1 and 2")
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len1 <- max(length(p3),length(p4))
p3 <- rep(p3, len=len1)
p4 <- rep(p4, len=len1)
len2 <- max(length(s),length(p1),length(p2))
s <- rep(s, len=len2)
p1 <- rep(p1, len=len2)
p2 <- rep(p2, len=len2)
# Obtain coefficients of polynomials x.t and yf.t given tail weights or
# asymmetry-steepness parameters
if(int.type.p.3.4 == 1){ # tail weights parametrization
# Control tail weights parameters
kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN)
kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN)
# Coefficients a_3*t^3+a_2*t^2+a_1*t+a_0
a_3 <- 3*kappa_l+3*kappa_r-2
a_2 <- (-6*kappa_l-3*kappa_r+3)
a_1 <- (3*kappa_l)
}
else{ # asymmetry-steepness parametrization
# Control asymmetry-steepness parameters
zeta <- replace(p3, p3 < -1 | p3 > 1, NaN)
xi <- replace(p4, p4 < 0 | p4 > 1, NaN)
# Coefficients a_3*t^3+a_2*t^2+a_1*t+a_0
abs.zeta <- abs(zeta)
aux1 <- 0.5-xi
a_3 <- 6*(xi+abs.zeta*aux1)-2
a_2 <- -9*(xi+abs.zeta*aux1)+1.5*zeta+3
a_1 <- 3*(xi+abs.zeta*aux1)-1.5*zeta
}
#
funct3 <- function(s,a_3,a_2,a_1){
# 10 points for the Gauss-Legendre quadrature over [0,1] (22 digits)
t <- 0.5*.GL.10.points + 0.5
# x.t
x.t <- .x.t(t, a_3, a_2, a_1)
# Derivative of yF.t
yFp.t <- 6*t*(1-t)
# Gauss-Legendre quadrature over [0,1]
return(0.5*sum(.GL.10.weights*exp(s*x.t)*yFp.t))
}
# domain or location-scale parametrization
if(int.type.p.1.2 == 1){ # domain parametrization
# Control domain parameters
min <- replace(p1, p1 >= p2, NaN)
max <- replace(p2, p1 >= p2, NaN)
# range
range <- max - min
# scaled and shifted
y <- mapply(funct3,range*s,a_3,a_2,a_1)*exp(min*s)
}
else{ # location-scale parametrization
# Control location-scale parameters
mu <- p1
sigma <- replace(p2, p2 <= 0, NaN)
# range
range <- sigma/BMTsd(p3, p4, type.p.3.4)
# scaled and shifted
y <- mapply(funct3,range*s,a_3,a_2,a_1)*exp((mu-range*BMTmean(p3, p4, type.p.3.4))*s)
}
return(y)
}
#' @rdname BMTmoments
#' @export BMTchf
BMTchf <- function(s, p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d"){
y <- BMTmgf(1i*s, p3, p4, type.p.3.4, p1, p2, type.p.1.2)
return(y)
}
#' @rdname BMTmoments
#' @export mBMT
mBMT <- function(order, p3, p4, type.p.3.4, p1, p2, type.p.1.2){
fun <- switch(order,BMTmean,BMTsd,BMTskew,BMTkurt)
return(fun(p3, p4, type.p.3.4, p1, p2, type.p.1.2))
}
# Global constants
# 10 points for the Gauss-Legendre quadrature over [-1,1] (22 digits)
.GL.10.points <- c(-0.973906528517171720078,
-0.8650633666889845107321,
-0.6794095682990244062343,
-0.4333953941292471907993,
-0.148874338981631210885,
0.1488743389816312108848,
0.433395394129247190799,
0.6794095682990244062343,
0.8650633666889845107321,
0.973906528517171720078)
# Weights for 10 points of the Gauss-Legendre quadrature
.GL.10.weights <- c(0.0666713443086881375936,
0.149451349150580593146,
0.2190863625159820439955,
0.2692667193099963550912,
0.295524224714752870174,
0.295524224714752870174,
0.2692667193099963550913,
0.219086362515982043995,
0.1494513491505805931458,
0.0666713443086881375936)
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/BMTmoments.R
|
#' @title The BMT Distribution Descriptive Measures - Skewness.
#' @description Skewness and an asymmetry coefficient for the BMT distribution,
#' with \code{p3} and \code{p4} tails weights (\eqn{\kappa_l} and
#' \eqn{\kappa_r}) or asymmetry-steepness parameters (\eqn{\zeta} and
#' \eqn{\xi}) and \code{p1} and \code{p2} domain (minimum and maximum) or
#' location-scale (mean and standard deviation) parameters.
#' @rdname BMTskewness
#' @name BMTskewness
#' @aliases BMTskew
#' @aliases BMTasymm
#'
#' @details See References.
#'
#' @param p3,p4 tails weights (\eqn{\kappa_l} and \eqn{\kappa_r}) or
#' asymmetry-steepness (\eqn{\zeta} and \eqn{\xi}) parameters of the BMT
#' distribution.
#' @param type.p.3.4 type of parametrization asociated to p3 and p4. "t w" means
#' tails weights parametrization (default) and "a-s" means asymmetry-steepness
#' parametrization.
#' @param p1,p2 domain (minimum and maximum) or location-scale (mean and
#' standard deviation) parameters of the BMT ditribution.
#' @param type.p.1.2 type of parametrization asociated to p1 and p2. "c-d" means
#' domain parametrization (default) and "l-s" means location-scale
#' parametrization.
#'
#' @return \code{BMTskew} gives the Pearson's skewness and \code{BMTasymm} the
#' proposed asymmetry coefficient for the BMT distribution.
#'
#' The arguments are recycled to the length of the result. Only the first
#' elements of \code{type.p.3.4} and \code{type.p.1.2} are used.
#'
#' If \code{type.p.3.4 == "t w"}, \code{p3 < 0} and \code{p3 > 1} are errors
#' and return \code{NaN}.
#'
#' If \code{type.p.3.4 == "a-s"}, \code{p3 < -1} and \code{p3 > 1} are errors
#' and return \code{NaN}.
#'
#' \code{p4 < 0} and \code{p4 > 1} are errors and return \code{NaN}.
#'
#' If \code{type.p.1.2 == "c-d"}, \code{p1 >= p2} is an error and returns
#' \code{NaN}.
#'
#' If \code{type.p.1.2 == "l-s"}, \code{p2 <= 0} is an error and returns
#' \code{NaN}.
#'
#' @references Torres-Jimenez, C. J. and Montenegro-Diaz, A. M. (2017, September),
#' \emph{An alternative to continuous univariate distributions supported on a
#' bounded interval: The BMT distribution}. ArXiv e-prints.
#'
#' Torres-Jimenez, C. J. (2018), \emph{The BMT Item Response Theory model: A
#' new skewed distribution family with bounded domain and an IRT model based
#' on it}, PhD thesis, Doctorado en ciencias - Estadistica, Universidad
#' Nacional de Colombia, Sede Bogota.
#'
#' @seealso \code{\link{BMTcentral}}, \code{\link{BMTdispersion}},
#' \code{\link{BMTkurtosis}}, \code{\link{BMTmoments}} for other descriptive
#' measures or moments.
#'
#' @author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#'
#' @examples
#' # BMT on [0,1] with left tail weight equal to 0.25 and
#' # right tail weight equal to 0.75
#' BMTskew(0.25, 0.75, "t w")
#' BMTasymm(0.25, 0.75, "t w")
#'
#' # BMT on [0,1] with asymmetry coefficient equal to 0.5 and
#' # steepness coefficient equal to 0.75
#' BMTskew(0.5, 0.5, "a-s")
#' BMTasymm(0.5, 0.5, "a-s")
#'
#' # domain or location-scale parameters do not affect
#' # the skewness and the asymmetry coefficient
#'
#' # BMT on [-1.783489,3.312195] with
#' # left tail weight equal to 0.25 and
#' # right tail weight equal to 0.75
#' BMTskew(0.25, 0.75, "t w", -1.783489, 3.312195, "c-d")
#' BMTasymm(0.25, 0.75, "t w", -1.783489, 3.312195, "c-d")
#'
#' # BMT with mean equal to 0, standard deviation equal to 1,
#' # asymmetry coefficient equal to 0.5 and
#' # steepness coefficient equal to 0.75
#' BMTskew(0.5, 0.5, "a-s", 0, 1, "l-s")
#' BMTasymm(0.5, 0.5, "a-s", 0, 1, "l-s")
#' @rdname BMTskewness
#' @export BMTskew
BMTskew <- function(p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d"){
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(p1),length(p2),length(p3),length(p4))
p1 <- rep(p1, len=len)
p2 <- rep(p2, len=len)
p3 <- rep(p3, len=len)
p4 <- rep(p4, len=len)
# Control type.p.3.4
TYPE.P.3.4 <- c("t w", "a-s")
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# tail weigths or asymmetry-steepness parametrization
if(int.type.p.3.4 == 1){ # tail weights parametrization
# Control tail weights parameters
kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN)
kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN)
# skewness
m <- 9*(kappa_r-kappa_l)*((13*kappa_l-65+4*kappa_r)*kappa_l+(13*kappa_r-65)*kappa_r+150) /
(77000*BMTvar(kappa_l,kappa_r,type.p.3.4)^1.5)
}
else{ # asymmetry-steepness parametrization
# Control asymmetry-steepness parameters
zeta <- replace(p3, p3 < -1 | p3 > 1, NaN)
xi <- replace(p4, p4 < 0 | p4 > 1, NaN)
# skewness
abs.zeta <- abs(zeta)
m <- 9*zeta*
((((30*xi-30)*xi+13)*abs.zeta+
((-60*xi+160)*xi-65))*abs.zeta+
((30*xi-130)*xi+150)) /
(77000*BMTvar(zeta,xi,type.p.3.4)^1.5)
}
# domain or location-scale parameters do not affect the skewness
return(m)
}
#' @rdname BMTskewness
#' @export BMTasymm
BMTasymm <- function(p3, p4, type.p.3.4 = "t w",
p1 = 0, p2 = 1, type.p.1.2 = "c-d"){
# The length of the result is determined by the maximum of the lengths of the
# numerical arguments. The numerical arguments are recycled to the length of
# the result.
len <- max(length(p1),length(p2),length(p3),length(p4))
p1 <- rep(p1, len=len)
p2 <- rep(p2, len=len)
p3 <- rep(p3, len=len)
p4 <- rep(p4, len=len)
# Control type.p.3.4
TYPE.P.3.4 <- c("t w", "a-s")
int.type.p.3.4 <- pmatch(type.p.3.4, TYPE.P.3.4)
if (is.na(int.type.p.3.4))
stop("invalid type of parametrization for parameters 3 and 4")
if (int.type.p.3.4 == -1)
stop("ambiguous type of parametrization for parameters 3 and 4")
# tail weigths or asymmetry-steepness parametrization
if(int.type.p.3.4 == 1){ # tail weights parametrization
# Control tail weights parameters
kappa_l <- replace(p3, p3 < 0 | p3 > 1, NaN)
kappa_r <- replace(p4, p4 < 0 | p4 > 1, NaN)
# asymmetry coefficient
m <- kappa_r - kappa_l
}
else{ # asymmetry-steepness parametrization
# Control asymmetry-steepness parameters
zeta <- replace(p3, p3 < -1 | p3 > 1, NaN)
# asymmetry coefficient
m <- zeta
}
# domain or location-scale parameters do not affect the asymmetry coefficient
return(m)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/BMTskewness.R
|
#' @title Maximum Product of Spacing Fit of Univariate Distributions.
#' @description Fit of univariate distributions for non-censored data using
#' maximum product of spacing estimation (mpse), also called maximum spacing
#' estimation.
#' @rdname mpsedist
#' @name mpsedist
#'
#' @details The \code{mpsedist} function carries out the maximum product of
#' spacing estimation numerically, by maximization of the arithmetic mean of
#' \eqn{\log(F(k) - F(k-1))}.
#'
#' The optimization process is the same as
#' \code{\link{mledist}}, see the 'details' section of that
#' function.
#'
#' Optionally, a vector of \code{weights} can be used in the fitting process.
#' By default (when \code{weigths=NULL}), ordinary mpse is carried out,
#' otherwise the specified weights are used to compute a weighted arithmetic
#' mean.
#'
#' We believe this function should be added to the package
#' \code{\link{fitdistrplus}}. Until it is accepted and incorporated into that
#' package, it will remain in the package \code{\link{BMT}}. This function is
#' internally called in \code{\link{BMTfit.mpse}}.
#'
#' @param data A numeric vector with the observed values for non-censored data.
#' @param distr A character string \code{"name"} naming a distribution for which
#' the corresponding density function \code{dname} and the corresponding
#' distribution function \code{pname} must be classically defined.
#' @param start A named list giving the initial values of parameters of the
#' named distribution or a function of data computing initial values and
#' returning a named list. This argument may be omitted (default) for some
#' distributions for which reasonable starting values are computed (see the
#' 'details' section of \code{\link{mledist}}).
#' @param fix.arg An optional named list giving the values of fixed parameters
#' of the named distribution or a function of data computing (fixed) parameter
#' values and returning a named list. Parameters with fixed value are thus NOT
#' estimated.
#' @param optim.method \code{"default"} (see details) or an optimization method
#' to pass to \code{\link{optim}}.
#' @param lower Left bounds on the parameters for the \code{"L-BFGS-B"} method
#' (see \code{\link{optim}}) or the \code{\link{constrOptim}} function (as an
#' equivalent linear constraint).
#' @param upper Right bounds on the parameters for the \code{"L-BFGS-B"} method
#' (see \code{\link{optim}}) or the \code{\link{constrOptim}} function (as an
#' equivalent linear constraint).
#' @param custom.optim A function carrying the optimization (see details).
#' @param weights An optional vector of weights to be used in the fitting
#' process. Should be \code{NULL} or a numeric vector with strictly positive
#' numbers. If non-\code{NULL}, weighted mpse is used, otherwise ordinary
#' mpse.
#' @param silent A logical to remove or show warnings when bootstraping.
#' @param gradient A function to return the gradient of the optimization
#' objective function for the \code{"BFGS"}, \code{"CG"} and \code{"L-BFGS-B"}
#' methods. If it is \code{NULL}, a finite-difference approximation will be
#' used, see \code{\link{optim}}.
#' @param \dots Further arguments passed to the \code{\link{optim}},
#' \code{\link{constrOptim}} or \code{custom.optim} function.
#'
#' @return \code{mpsedist} returns a list with following components,
#'
#' \item{estimate}{ the parameter estimates.}
#'
#' \item{convergence}{ an integer code for the convergence of
#' \code{\link{optim}} defined as below or defined by the user in the
#' user-supplied optimization function.
#'
#' \code{0} indicates successful convergence.
#'
#' \code{1} indicates that the iteration limit of \code{\link{optim}} has been
#' reached.
#'
#' \code{10} indicates degeneracy of the Nealder-Mead simplex.
#'
#' \code{100} indicates that \code{\link{optim}} encountered an internal
#' error. }
#'
#' \item{value}{ the value of the optimization objective function at the solution found. }
#'
#' \item{loglik}{ the log-likelihood. }
#'
#' \item{hessian}{ a symmetric matrix computed by \code{\link{optim}} as an
#' estimate of the Hessian at the solution found or computed in the
#' user-supplied optimization function. }
#'
#' \item{optim.function }{ the name of the optimization function used. }
#'
#' \item{fix.arg}{ the named list giving the values of parameters of the named
#' distribution that must kept fixed rather than estimated by maximum
#' likelihood or \code{NULL} if there are no such parameters. }
#'
#' \item{optim.method}{when \code{\link{optim}} is used, the name of the
#' algorithm used, \code{NULL} otherwise.}
#'
#' \item{fix.arg.fun}{the function used to set the value of \code{fix.arg} or
#' \code{NULL}.}
#'
#' \item{weights}{the vector of weigths used in the estimation process or
#' \code{NULL}.}
#'
#' \item{counts}{A two-element integer vector giving the number of calls to
#' the log-likelihood function and its gradient respectively. This excludes
#' those calls needed to compute the Hessian, if requested, and any calls to
#' log-likelihood function to compute a finite-difference approximation to the
#' gradient. \code{counts} is returned by \code{\link{optim}} or the
#' user-supplied optimization function, or set to \code{NULL}.}
#'
#' \item{optim.message}{A character string giving any additional information
#' returned by the optimizer, or \code{NULL}. To understand exactly the
#' message, see the source code.}
#'
#' @references Cheng, R. and N. Amin (1983). \emph{Estimating parameters in
#' continuous univariate distributions with a shifted origin}. Journal of the
#' Royal Statistical Society. Series B (Methodological), 394-403.
#'
#' Ranneby, B. (1984). \emph{The maximum spacing method. an estimation method
#' related to the maximum likelihood method}. Scandinavian Journal of
#' Statistics, 93-112.
#'
#' @seealso \code{\link{mqdedist}}, \code{\link{mledist}},
#' \code{\link{mmedist}}, \code{\link{qmedist}},
#' \code{\link{mgedist}}, and \code{\link{optim}}.
#'
#' @author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#'
#' @source Based on the function mledist of the R package: fitdistrplus
#'
#' Delignette-Muller ML and Dutang C (2015), \emph{fitdistrplus: An R Package
#' for Fitting Distributions}. Journal of Statistical Software, 64(4), 1-34.
#'
#' Functions \code{checkparam} and \code{start.arg.default} are needed and
#' were copied from the same package (fitdistrplus version: 1.0-9).
#'
#' @examples
#' # (1) basic fit of a normal distribution
#' set.seed(1234)
#' x1 <- rnorm(n = 100)
#' mean(x1); sd(x1)
#' mpse1 <- mpsedist(x1, "norm")
#' mpse1$estimate
#'
#' # (2) defining your own distribution functions, here for the Gumbel
#' # distribution for other distributions, see the CRAN task view dedicated
#' # to probability distributions
#' dgumbel <- function(x, a, b) 1/b*exp((a-x)/b)*exp(-exp((a-x)/b))
#' pgumbel <- function(q, a, b) exp(-exp((a-q)/b))
#' qgumbel <- function(p, a, b) a-b*log(-log(p))
#' mpse1 <- mpsedist(x1, "gumbel", start = list(a = 10, b = 5))
#' mpse1$estimate
#'
#' # (3) fit a discrete distribution (Poisson)
#' set.seed(1234)
#' x2 <- rpois(n = 30, lambda = 2)
#' mpse2 <- mpsedist(x2, "pois")
#' mpse2$estimate
#'
#' # (4) fit a finite-support distribution (beta)
#' set.seed(1234)
#' x3 <- rbeta(n = 100, shape1 = 5, shape2 = 10)
#' mpse3 <- mpsedist(x3, "beta")
#' mpse3$estimate
#'
#' # (5) fit frequency distributions on USArrests dataset.
#' x4 <- USArrests$Assault
#' mpse4pois <- mpsedist(x4, "pois")
#' mpse4pois$estimate
#' mpse4nbinom <- mpsedist(x4, "nbinom")
#' mpse4nbinom$estimate
#'
#' # (6) weighted fit of a normal distribution
#' set.seed(1234)
#' w1 <- runif(101)
#' mpse1 <- mpsedist(x1, "norm", weights = w1)
#' mpse1$estimate
#'
#' @keywords distribution
###################
#' @rdname mpsedist
#' @export mpsedist
mpsedist <- function (data, distr, start = NULL, fix.arg = NULL, optim.method = "default",
lower = -Inf, upper = Inf, custom.optim = NULL, weights = NULL,
silent = TRUE, gradient = NULL, ...)
{
if (!is.character(distr))
stop("distr must be a character string naming a distribution")
else distname <- distr
ddistname <- paste("d", distname, sep = "")
if (!exists(ddistname, mode = "function"))
stop(paste("The ", ddistname, " function must be defined"))
pdistname <- paste("p", distname, sep = "")
if (!exists(pdistname, mode = "function"))
stop(paste("The ", pdistname, " function must be defined"))
if (is.null(custom.optim))
optim.method <- match.arg(optim.method, c("default",
"Nelder-Mead", "BFGS", "CG", "L-BFGS-B", "SANN",
"Brent"))
start.arg <- start
if (is.vector(start.arg))
start.arg <- as.list(start.arg)
txt1 <- "data must be a numeric vector of length greater than 1 for non censored data"
# txt2 <- "or a dataframe with two columns named left and right and more than one line for censored data"
if (!is.null(weights)) {
if (any(weights <= 0))
stop("weights should be a vector of numbers greater than 0")
if (length(weights) != NROW(data) + 1)
stop("weights should be a vector with a length equal to the observation number")
warning("weights are not taken into account in the default initial values")
}
if (is.vector(data)) {
cens <- FALSE
if (!(is.numeric(data) & length(data) > 1))
stop(txt1)
}
else {
stop("Maximum product of spacing estimation is not yet available for censored data.")
# cens <- TRUE
# censdata <- data
# if (!(is.vector(censdata$left) & is.vector(censdata$right) &
# length(censdata[, 1]) > 1))
# stop(paste(txt1, txt2))
}
# if (cens) {
# irow.lcens <- is.na(censdata$left)
# lcens <- censdata[irow.lcens, ]$right
# if (any(is.na(lcens)))
# stop("An observation cannot be both right and left censored, coded with two NA values")
# irow.rcens <- is.na(censdata$right)
# rcens <- censdata[irow.rcens, ]$left
# irow.ncens <- censdata$left == censdata$right & !is.na(censdata$left) &
# !is.na(censdata$right)
# ncens <- censdata[irow.ncens, ]$left
# irow.icens <- censdata$left != censdata$right & !is.na(censdata$left) &
# !is.na(censdata$right)
# icens <- censdata[irow.icens, ]
# data <- c(rcens, lcens, ncens, (icens$left + icens$right)/2)
# }
argpdistname <- names(formals(pdistname))
chfixstt <- checkparam(start.arg = start.arg, fix.arg = fix.arg,
argdistname = argpdistname, errtxt = NULL,
data10 = head(data, 10), distname = distname)
if (!chfixstt$ok)
stop(chfixstt$txt)
if (is.function(chfixstt$start.arg))
vstart <- unlist(chfixstt$start.arg(data))
else vstart <- unlist(chfixstt$start.arg)
if (is.function(fix.arg)) {
fix.arg.fun <- fix.arg
fix.arg <- fix.arg(data)
}
else fix.arg.fun <- NULL
if (distname == "unif") {
n <- length(data)
data <- sort(data)
par <- c(min = (n*data[1]-data[n])/(n-1), max = (n*data[n] - data[1])/(n-1))
par <- c(par[!names(par) %in% names(fix.arg)], unlist(fix.arg))
value <- unname(sum(log(diff(c(par["min"],data,par["max"])))) - (n+1)*log(par["max"]-par["min"]))
res <- list(estimate = par[!names(par) %in% names(fix.arg)], convergence = 0,
value = value,
loglik = .loglik(par[!names(par) %in% names(fix.arg)], fix.arg, data, ddistname),
hessian = NA, optim.function = NA, fix.arg = fix.arg)
return(res)
}
if (!cens && is.null(weights)) {
fnobj <- function(par, fix.arg, obs, pdistnam, ddistnam) {
obs <- sort(obs)
spacing <- diff(c(0, do.call(pdistnam, c(list(obs), as.list(par), as.list(fix.arg))), 1))
if(any(is.nan(spacing)))
return(NaN)
ind <- abs(spacing) < .epsilon
if(any(ind)){
aux <- c(obs[1],obs)[ind]
spacing[ind] <- do.call(ddistnam, c(list(aux), as.list(par), as.list(fix.arg)))
}
-sum(log(spacing))
}
}
else if (!cens && !is.null(weights)) {
fnobj <- function(par, fix.arg, obs, pdistnam, ddistnam) {
obs <- sort(obs)
spacing <- diff(c(0, do.call(pdistnam, c(list(obs), as.list(par), as.list(fix.arg))), 1))
if(any(is.nan(spacing)))
return(NaN)
ind <- abs(spacing) < .epsilon
if(any(ind)){
aux <- c(obs[1],obs)[ind]
spacing[ind] <- do.call(ddistnam, c(list(aux), as.list(par), as.list(fix.arg)))
}
-sum(weights * log(spacing))
}
}
# else if (cens && is.null(weights)) {
# argpdistname <- names(formals(pdistname))
# if (("log" %in% argddistname) & ("log.p" %in% argpdistname)) {
# fnobjcens <- function(par, fix.arg, rcens, lcens, icens, ncens, ddistnam, pdistnam) {
# - sum(do.call(ddistnam, c(list(x = ncens), as.list(par), as.list(fix.arg), list(log = TRUE)))) -
# sum(do.call(pdistnam, c(list(q = lcens), as.list(par), as.list(fix.arg), list(log = TRUE)))) -
# sum(do.call(pdistnam, c(list(q = rcens), as.list(par), as.list(fix.arg), list(lower.tail = FALSE), list(log = TRUE)))) -
# sum(log(do.call(pdistnam, c(list(q = icens$right), as.list(par), as.list(fix.arg))) -
# do.call(pdistnam, c(list(q = icens$left), as.list(par), as.list(fix.arg)))))
# }
# }
# else {
# fnobjcens <- function(par, fix.arg, rcens, lcens, icens, ncens, ddistnam, pdistnam) {
# -sum(log(do.call(ddistnam, c(list(x = ncens), as.list(par), as.list(fix.arg))))) -
# sum(log(do.call(pdistnam, c(list(q = lcens), as.list(par), as.list(fix.arg))))) -
# sum(log(1 - do.call(pdistnam, c(list(q = rcens), as.list(par), as.list(fix.arg))))) -
# sum(log(do.call(pdistnam, c(list(q = icens$right), as.list(par), as.list(fix.arg))) -
# do.call(pdistnam, c(list(q = icens$left), as.list(par), as.list(fix.arg)))))
# }
# }
# }
# else if (cens && !is.null(weights)) {
# fnobjcens <- function(par, fix.arg, rcens, lcens, icens, ncens, ddistnam, pdistnam) {
# p1 <- log(do.call(ddistnam, c(list(x = ncens), as.list(par), as.list(fix.arg))))
# p2 <- log(do.call(pdistnam, c(list(q = lcens), as.list(par), as.list(fix.arg))))
# p3 <- log(1 - do.call(pdistnam, c(list(q = rcens), as.list(par), as.list(fix.arg))))
# p4 <- log(do.call(pdistnam, c(list(q = icens$right), as.list(par), as.list(fix.arg))) -
# do.call(pdistnam, c(list(q = icens$left), as.list(par), as.list(fix.arg)))) -
# sum(weights[irow.ncens] * p1) - sum(weights[irow.lcens] * p2) -
# sum(weights[irow.rcens] * p3) - sum(weights[irow.icens] * p4)
# }
# }
owarn <- getOption("warn")
if (is.null(custom.optim)) {
hasbound <- any(is.finite(lower) | is.finite(upper))
if (optim.method == "default") {
meth <- ifelse(length(vstart) > 1, "Nelder-Mead",
"BFGS")
}
else meth <- optim.method
if (meth == "BFGS" && hasbound && is.null(gradient)) {
meth <- "L-BFGS-B"
txt1 <- "The BFGS method cannot be used with bounds without provided the gradient."
txt2 <- "The method is changed to L-BFGS-B."
warning(paste(txt1, txt2))
}
options(warn = ifelse(silent, -1, 0))
if (hasbound) {
if (!is.null(gradient)) {
opt.fun <- "constrOptim"
}
else {
if (meth == "Nelder-Mead")
opt.fun <- "constrOptim"
else if (meth %in% c("L-BFGS-B", "Brent"))
opt.fun <- "optim"
else {
txt1 <- paste("The method", meth, "cannot be used by constrOptim() nor optim() without gradient and bounds.")
txt2 <- "Only optimization methods L-BFGS-B, Brent and Nelder-Mead can be used in such case."
stop(paste(txt1, txt2))
}
}
if (opt.fun == "constrOptim") {
npar <- length(vstart)
lower <- as.double(rep_len(lower, npar))
upper <- as.double(rep_len(upper, npar))
haslow <- is.finite(lower)
Mat <- diag(npar)[haslow, ]
hasupp <- is.finite(upper)
Mat <- rbind(Mat, -diag(npar)[hasupp, ])
colnames(Mat) <- names(vstart)
rownames(Mat) <- paste0("constr", 1:NROW(Mat))
Bnd <- c(lower[is.finite(lower)], -upper[is.finite(upper)])
names(Bnd) <- paste0("constr", 1:length(Bnd))
initconstr <- Mat %*% vstart - Bnd
if (any(initconstr < 0))
stop("Starting values must be in the feasible region.")
# if (!cens) {
opttryerror <- try(opt <- constrOptim(theta = vstart,
f = fnobj, ui = Mat, ci = Bnd, grad = gradient,
fix.arg = fix.arg, obs = data,
pdistnam = pdistname, ddistnam = ddistname,
hessian = !is.null(gradient), method = meth,
...), silent = TRUE)
# }
# else opttryerror <- try(opt <- constrOptim(theta = vstart,
# f = fnobjcens, ui = Mat, ci = Bnd, grad = gradient,
# ddistnam = ddistname, rcens = rcens, lcens = lcens,
# icens = icens, ncens = ncens, pdistnam = pdistname,
# fix.arg = fix.arg, obs = data, hessian = !is.null(gradient),
# method = meth, ...), silent = TRUE)
if (!inherits(opttryerror, "try-error"))
if (length(opt$counts) == 1)
opt$counts <- c(opt$counts, NA)
}
else {
# if (!cens)
opttryerror <- try(opt <- optim(par = vstart,
fn = fnobj, fix.arg = fix.arg, obs = data,
pdistnam = pdistname, ddistnam = ddistname,
gr = gradient, hessian = TRUE,
method = meth, lower = lower, upper = upper,
...), silent = TRUE)
# else opttryerror <- try(opt <- optim(par = vstart,
# fn = fnobjcens, fix.arg = fix.arg, gr = gradient,
# rcens = rcens, lcens = lcens, icens = icens,
# ncens = ncens, ddistnam = ddistname, pdistnam = pdistname,
# hessian = TRUE, method = meth, lower = lower,
# upper = upper, ...), silent = TRUE)
}
}
else {
opt.fun <- "optim"
# if (!cens)
opttryerror <- try(opt <- optim(par = vstart,
fn = fnobj, fix.arg = fix.arg, obs = data,
pdistnam = pdistname, ddistnam = ddistname,
gr = gradient, hessian = TRUE,
method = meth, lower = lower, upper = upper,
...), silent = TRUE)
# else opttryerror <- try(opt <- optim(par = vstart,
# fn = fnobjcens, fix.arg = fix.arg, gr = gradient,
# rcens = rcens, lcens = lcens, icens = icens,
# ncens = ncens, ddistnam = ddistname, pdistnam = pdistname,
# hessian = TRUE, method = meth, lower = lower,
# upper = upper, ...), silent = TRUE)
}
options(warn = owarn)
if (inherits(opttryerror, "try-error")) {
warnings("The function optim encountered an error and stopped.")
if (getOption("show.error.messages"))
print(attr(opttryerror, "condition"))
return(list(estimate = rep(NA, length(vstart)),
convergence = 100, value=NA, loglik = NA, hessian = NA,
optim.function = opt.fun, fix.arg = fix.arg,
optim.method = meth, fix.arg.fun = fix.arg.fun,
counts = c(NA, NA)))
}
if (opt$convergence > 0) {
warnings("The function optim failed to converge, with the error code ",
opt$convergence)
}
if (is.null(names(opt$par)))
names(opt$par) <- names(vstart)
res <- list(estimate = opt$par, convergence = opt$convergence, value = -opt$value,
loglik = .loglik(opt$par, fix.arg, data, ddistname),
hessian = opt$hessian, optim.function = opt.fun,
fix.arg = fix.arg, optim.method = meth, fix.arg.fun = fix.arg.fun,
weights = weights, counts = opt$counts, optim.message = opt$message)
}
else {
options(warn = ifelse(silent, -1, 0))
# if (!cens)
opttryerror <- try(opt <- custom.optim(fn = fnobj,
fix.arg = fix.arg, obs = data,
pdistnam = pdistname, ddistnam = ddistname,
par = vstart, ...), silent = TRUE)
# else opttryerror <- try(opt <- custom.optim(fn = fnobjcens,
# fix.arg = fix.arg, rcens = rcens, lcens = lcens,
# icens = icens, ncens = ncens, ddistnam = ddistname,
# pdistnam = pdistname, par = vstart, ...), silent = TRUE)
options(warn = owarn)
if (inherits(opttryerror, "try-error")) {
warnings("The customized optimization function encountered an error and stopped.")
if (getOption("show.error.messages"))
print(attr(opttryerror, "condition"))
return(list(estimate = rep(NA, length(vstart)),
convergence = 100, value = NA, loglik = NA, hessian = NA,
optim.function = custom.optim, fix.arg = fix.arg,
fix.arg.fun = fix.arg.fun, counts = c(NA, NA)))
}
if (opt$convergence > 0) {
warnings("The customized optimization function failed to converge, with the error code ",
opt$convergence)
}
argdot <- list(...)
method.cust <- argdot[argdot == "method"]
if (length(method.cust) == 0) {
method.cust <- NULL
}
if (is.null(names(opt$par)))
names(opt$par) <- names(vstart)
res <- list(estimate = opt$par, convergence = opt$convergence, value = -opt$value,
loglik = .loglik(opt$par, fix.arg, data, ddistname),
hessian = opt$hessian, optim.function = custom.optim,
fix.arg = fix.arg, method = method.cust, fix.arg.fun = fix.arg.fun,
weights = weights, counts = opt$counts, optim.message = opt$message)
}
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/mpsedist.R
|
#' @title Minimum Quantile Distance Fit of Univariate Distributions.
#' @description Fit of univariate distributions for non-censored data using
#' minimum quantile distance estimation (mqde), which can also be called
#' maximum quantile goodness-of-fit estimation.
#' @rdname mqdedist
#' @name mqdedist
#'
#' @details The \code{mqdedist} function carries out the minimum quantile
#' distance estimation numerically, by minimization of a distance between
#' observed and theoretical quantiles.
#'
#' The optimization process is the same as
#' \code{\link{mledist}}, see the 'details' section of that
#' function.
#'
#' Optionally, a vector of \code{weights} can be used in the fitting process.
#' By default (when \code{weigths=NULL}), ordinary mqde is carried out,
#' otherwise the specified weights are used to compute a weighted distance.
#'
#' We believe this function should be added to the package
#' \code{\link{fitdistrplus}}. Until it is accepted and incorporated into that
#' package, it will remain in the package \code{\link{BMT}}. This function is
#' internally called in \code{\link{BMTfit.mqde}}.
#'
#' @param data A numeric vector with the observed values for non-censored data.
#' @param distr A character string \code{"name"} naming a distribution for which
#' the corresponding quantile function \code{qname} and the corresponding
#' density distribution \code{dname} must be classically defined.
#' @param probs A numeric vector of the probabilities for which the minimum
#' quantile distance estimation is done. \eqn{p[k] = (k - 0.5) / n} (default).
#' @param qtype The quantile type used by the R \code{\link{quantile}} function
#' to compute the empirical quantiles. Type 5 (default), i.e. \eqn{x[k]} is
#' both the \eqn{k}th order statistic and the type 5 sample quantile of
#' \eqn{p[k] = (k - 0.5) / n}.
#' @param dist The distance measure between observed and theoretical quantiles
#' to be used. This must be one of "euclidean" (default), "maximum", or
#' "manhattan". Any unambiguous substring can be given.
#' @param start A named list giving the initial values of parameters of the
#' named distribution or a function of data computing initial values and
#' returning a named list. This argument may be omitted (default) for some
#' distributions for which reasonable starting values are computed (see the
#' 'details' section of \code{\link{mledist}}).
#' @param fix.arg An optional named list giving the values of fixed parameters
#' of the named distribution or a function of data computing (fixed) parameter
#' values and returning a named list. Parameters with fixed value are thus NOT
#' estimated.
#' @param optim.method \code{"default"} (see details) or optimization method to pass to
#' \code{\link{optim}}.
#' @param lower Left bounds on the parameters for the \code{"L-BFGS-B"} method
#' (see \code{\link{optim}}) or the \code{\link{constrOptim}} function (as an
#' equivalent linear constraint).
#' @param upper Right bounds on the parameters for the \code{"L-BFGS-B"} method
#' (see \code{\link{optim}}) or the \code{\link{constrOptim}} function (as an
#' equivalent linear constraint).
#' @param custom.optim A function carrying the optimization (see details).
#' @param weights An optional vector of weights to be used in the fitting
#' process. Should be \code{NULL} or a numeric vector with strictly positive
#' numbers. If non-\code{NULL}, weighted mqde is used, otherwise ordinary
#' mqde.
#' @param silent A logical to remove or show warnings when bootstraping.
#' @param gradient A function to return the gradient of the optimization
#' objective function for the \code{"BFGS"}, \code{"CG"} and \code{"L-BFGS-B"}
#' methods. If it is \code{NULL}, a finite-difference approximation will be
#' used, see \code{\link{optim}}.
#' @param \dots Further arguments passed to the \code{\link{optim}},
#' \code{\link{constrOptim}} or \code{custom.optim} function.
#'
#' @return \code{mqdedist} returns a list with following components,
#'
#' \item{estimate}{ the parameter estimates.}
#'
#' \item{convergence}{ an integer code for the convergence of
#' \code{\link{optim}} defined as below or defined by the user in the
#' user-supplied optimization function.
#'
#' \code{0} indicates successful convergence.
#'
#' \code{1} indicates that the iteration limit of \code{\link{optim}} has been
#' reached.
#'
#' \code{10} indicates degeneracy of the Nealder-Mead simplex.
#'
#' \code{100} indicates that \code{\link{optim}} encountered an internal
#' error. }
#'
#' \item{value}{the value of the optimization objective function at the solution found.}
#'
#' \item{hessian}{ a symmetric matrix computed by \code{\link{optim}} as an
#' estimate of the Hessian at the solution found or computed in the
#' user-supplied optimization function. }
#'
#' \item{probs}{ the probability vector on which observed and theoretical quantiles were calculated. }
#'
#' \item{dist}{ the name of the distance between observed and theoretical quantiles used. }
#'
#' \item{optim.function }{ the name of the optimization function used. }
#'
#' \item{fix.arg}{ the named list giving the values of parameters of the named
#' distribution that must kept fixed rather than estimated by maximum
#' likelihood or \code{NULL} if there are no such parameters. }
#'
#' \item{loglik}{ the log-likelihood. }
#'
#' \item{optim.method}{when \code{\link{optim}} is used, the name of the
#' algorithm used, \code{NULL} otherwise.}
#'
#' \item{fix.arg.fun}{the function used to set the value of \code{fix.arg} or
#' \code{NULL}.}
#'
#' \item{weights}{the vector of weigths used in the estimation process or
#' \code{NULL}.}
#'
#' \item{counts}{A two-element integer vector giving the number of calls to
#' the log-likelihood function and its gradient respectively. This excludes
#' those calls needed to compute the Hessian, if requested, and any calls to
#' log-likelihood function to compute a finite-difference approximation to the
#' gradient. \code{counts} is returned by \code{\link{optim}} or the
#' user-supplied optimization function, or set to \code{NULL}.}
#'
#' \item{optim.message}{A character string giving any additional information
#' returned by the optimizer, or \code{NULL}. To understand exactly the
#' message, see the source code.}
#'
#' @references LaRiccia, V. N. (1982). \emph{Asymptotic Properties of Weighted
#' $L^2$ Quantile Distance Estimators}. The Annals of Statistics, 10(2),
#' 621-624.
#'
#' Torres-Jimenez, C. J. (2017, September), \emph{Comparison of estimation methods
#' for the BMT distribution}. ArXiv e-prints.
#'
#' @seealso \code{\link{mpsedist}}, \code{\link{mledist}},
#' \code{\link{mmedist}}, \code{\link{qmedist}},
#' \code{\link{mgedist}}, \code{\link{optim}},
#' \code{\link{constrOptim}}, and \code{\link{quantile}}.
#'
#' @author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#'
#' @source Based on the function mledist of the R package: fitdistrplus
#'
#' Delignette-Muller ML and Dutang C (2015), \emph{fitdistrplus: An R Package
#' for Fitting Distributions}. Journal of Statistical Software, 64(4), 1-34.
#'
#' Functions \code{checkparam} and \code{start.arg.default} are needed and
#' were copied from the same package (fitdistrplus version: 1.0-9).
#'
#' @examples
#' # (1) basic fit of a normal distribution
#' set.seed(1234)
#' x1 <- rnorm(n = 100)
#' mean(x1); sd(x1)
#' mqde1 <- mqdedist(x1, "norm")
#' mqde1$estimate
#'
#' # (2) defining your own distribution functions, here for the Gumbel
#' # distribution for other distributions, see the CRAN task view dedicated
#' # to probability distributions
#' dgumbel <- function(x, a, b) 1/b*exp((a-x)/b)*exp(-exp((a-x)/b))
#' pgumbel <- function(q, a, b) exp(-exp((a-q)/b))
#' qgumbel <- function(p, a, b) a-b*log(-log(p))
#' mqde1 <- mqdedist(x1, "gumbel", start = list(a = 10, b = 5))
#' mqde1$estimate
#'
#' # (3) fit a discrete distribution (Poisson)
#' set.seed(1234)
#' x2 <- rpois(n = 30, lambda = 2)
#' mqde2 <- mqdedist(x2, "pois")
#' mqde2$estimate
#'
#' # (4) fit a finite-support distribution (beta)
#' set.seed(1234)
#' x3 <- rbeta(n = 100, shape1 = 5, shape2 = 10)
#' mqde3 <- mqdedist(x3, "beta")
#' mqde3$estimate
#'
#' # (5) fit frequency distributions on USArrests dataset.
#' x4 <- USArrests$Assault
#' mqde4pois <- mqdedist(x4, "pois")
#' mqde4pois$estimate
#' mqde4nbinom <- mqdedist(x4, "nbinom")
#' mqde4nbinom$estimate
#'
#' # (6) weighted fit of a normal distribution
#' set.seed(1234)
#' w1 <- runif(100)
#' weighted.mean(x1, w1)
#' mqde1 <- mqdedist(x1, "norm", weights = w1)
#' mqde1$estimate
#'
#' @keywords distribution
###################
#' @rdname mqdedist
#' @export mqdedist
mqdedist <- function (data, distr, probs = (1:length(data)-0.5)/length(data), qtype = 5,
dist = "euclidean", start = NULL, fix.arg = NULL, optim.method = "default",
lower = -Inf, upper = Inf, custom.optim = NULL, weights = NULL,
silent = TRUE, gradient = NULL, ...)
{
if (!is.character(distr))
stop("distr must be a character string naming a distribution")
else distname <- distr
qdistname <- paste("q", distname, sep = "")
if (!exists(qdistname, mode = "function"))
stop(paste("The ", qdistname, " function must be defined"))
ddistname <- paste("d", distname, sep = "")
if (!exists(ddistname, mode = "function"))
stop(paste("The ", ddistname, " function must be defined"))
if (!(is.vector(probs) & is.numeric(probs)) | anyNA(probs) | any(probs <= 0 | probs >= 1))
stop("probs must be a numeric vector with all elements greater than zero and less than one")
probs <- unique(sort(probs))
if (qtype < 0 || qtype > 9)
stop("wrong type for the R quantile function")
if (is.null(custom.optim))
optim.method <- match.arg(optim.method, c("default", "Nelder-Mead", "BFGS", "CG",
"L-BFGS-B", "SANN", "Brent"))
int.dist <- pmatch(dist, c("euclidean", "maximum", "manhattan"))
if (is.na(int.dist))
stop("invalid distance measure to be used")
if (int.dist == -1)
stop("ambiguous distance measure to be used")
start.arg <- start
if (is.vector(start.arg))
start.arg <- as.list(start.arg)
my3dots <- list(...)
if (!is.null(weights)) {
if (any(weights <= 0))
stop("weights should be a vector of numbers greater than 0")
if (length(weights) != NROW(probs))
stop("weights should be a vector with a length equal to the the probabilities probs")
warning("weights are not taken into account in the default initial values")
}
if (is.vector(data)) {
cens <- FALSE
if (!(is.numeric(data) & length(data) > 1))
stop("data must be a numeric vector of length greater than 1 for non-censored data")
# \n or a dataframe with two columns named left and right and more than one line for censored data")
}
else {
stop("Minimum quantile distance estimation is not yet available for censored data.")
# cens <- TRUE
# censdata <- data
# if (!(is.vector(censdata$left) & is.vector(censdata$right) &
# length(censdata[, 1]) > 1))
# stop("data must be a numeric vector of length greater than 1 for non censored data\n or a dataframe with two columns named left and right and more than one line for censored data")
# pdistname <- paste("p", distname, sep = "")
# if (!exists(pdistname, mode = "function"))
# stop(paste("The ", pdistname, " function must be defined to apply maximum likelihood to censored data"))
}
# if (cens) {
# lcens <- censdata[is.na(censdata$left), ]$right
# if (any(is.na(lcens)))
# stop("An observation cannot be both right and left censored, coded with two NA values")
# rcens <- censdata[is.na(censdata$right), ]$left
# ncens <- censdata[censdata$left == censdata$right &
# !is.na(censdata$left) & !is.na(censdata$right),
# ]$left
# icens <- censdata[censdata$left != censdata$right &
# !is.na(censdata$left) & !is.na(censdata$right),
# ]
# data <- c(rcens, lcens, ncens, (icens$left + icens$right)/2)
# }
argqdistname <- names(formals(qdistname))
chfixstt <- checkparam(start.arg = start.arg, fix.arg = fix.arg,
argdistname = argqdistname, errtxt = NULL,
data10 = head(data, 10), distname = distname)
if (!chfixstt$ok)
stop(chfixstt$txt)
if (is.function(chfixstt$start.arg))
vstart <- unlist(chfixstt$start.arg(data))
else vstart <- unlist(chfixstt$start.arg)
if (is.function(fix.arg)) {
fix.arg.fun <- fix.arg
fix.arg <- fix.arg(data)
}
else fix.arg.fun <- NULL
if(qtype == 0)
s <- data
else
s <- quantile(data, probs = probs, type = qtype, names = FALSE)
# if (!cens) {
if(is.null(weights)){
if (int.dist == 1) # euclidean
fnobj <- function(par, fix.arg, obs, qdistnam, probs, weights) {
theoq <- do.call(qdistnam, c(list(p = probs), as.list(par), as.list(fix.arg)))
sum((obs - theoq)^2)
}
else if (int.dist == 2) # maximum
fnobj <- function(par, fix.arg, obs, qdistnam, probs, weights) {
theoq <- do.call(qdistnam, c(list(p = probs), as.list(par), as.list(fix.arg)))
max(abs(obs - theoq))
}
else if (int.dist == 3) # manhattan
fnobj <- function(par, fix.arg, obs, qdistnam, probs, weights) {
theoq <- do.call(qdistnam, c(list(p = probs), as.list(par), as.list(fix.arg)))
sum(abs(obs - theoq))
}
}
else{
if (int.dist == 1) # euclidean
fnobj <- function(par, fix.arg, obs, qdistnam, probs, weights) {
theoq <- do.call(qdistnam, c(list(p = probs), as.list(par), as.list(fix.arg)))
sum(weights*(obs - theoq)^2)
}
else if (int.dist == 2) # maximum
fnobj <- function(par, fix.arg, obs, qdistnam, probs, weights) {
theoq <- do.call(qdistnam, c(list(p = probs), as.list(par), as.list(fix.arg)))
max(weights*abs(obs - theoq))
}
else if (int.dist == 3) # manhattan
fnobj <- function(par, fix.arg, obs, qdistnam, probs, weights) {
theoq <- do.call(qdistnam, c(list(p = probs), as.list(par), as.list(fix.arg)))
sum(weights*abs(obs - theoq))
}
}
# }
# else stop("Minimum quantile distance estimation is not yet available for censored data.")
owarn <- getOption("warn")
if (is.null(custom.optim)) {
hasbound <- any(is.finite(lower) | is.finite(upper))
if (optim.method == "default") {
meth <- ifelse(length(vstart) > 1, "Nelder-Mead", "BFGS")
}
else meth <- optim.method
if (meth == "BFGS" && hasbound && is.null(gradient)) {
meth <- "L-BFGS-B"
txt1 <- "The BFGS method cannot be used with bounds without provided the gradient."
txt2 <- "The method is changed to L-BFGS-B."
warning(paste(txt1, txt2))
}
options(warn = ifelse(silent, -1, 0))
if (hasbound) {
if (!is.null(gradient)) {
opt.fun <- "constrOptim"
}
else {
if (meth == "Nelder-Mead")
opt.fun <- "constrOptim"
else if (meth %in% c("L-BFGS-B", "Brent"))
opt.fun <- "optim"
else {
txt1 <- paste("The method", meth, "cannot be used by constrOptim() nor optim() without gradient and bounds.")
txt2 <- "Only optimization methods L-BFGS-B, Brent and Nelder-Mead can be used in such case."
stop(paste(txt1, txt2))
}
}
if (opt.fun == "constrOptim") {
npar <- length(vstart)
lower <- as.double(rep_len(lower, npar))
upper <- as.double(rep_len(upper, npar))
haslow <- is.finite(lower)
Mat <- diag(npar)[haslow, ]
hasupp <- is.finite(upper)
Mat <- rbind(Mat, -diag(npar)[hasupp, ])
colnames(Mat) <- names(vstart)
rownames(Mat) <- paste0("constr", 1:NROW(Mat))
Bnd <- c(lower[is.finite(lower)], -upper[is.finite(upper)])
names(Bnd) <- paste0("constr", 1:length(Bnd))
initconstr <- Mat %*% vstart - Bnd
if (any(initconstr < 0))
stop("Starting values must be in the feasible region.")
opttryerror <- try(opt <- constrOptim(theta = vstart,
f = fnobj, ui = Mat, ci = Bnd, grad = gradient,
fix.arg = fix.arg, obs = s, qdistnam = qdistname,
probs = probs, weights = weights,
hessian = !is.null(gradient), method = meth,
...), silent = TRUE)
if (!inherits(opttryerror, "try-error"))
if (length(opt$counts) == 1)
opt$counts <- c(opt$counts, NA)
}
else {
opttryerror <- try(opt <- optim(par = vstart,
fn = fnobj, fix.arg = fix.arg, obs = s,
gr = gradient, qdistnam = qdistname,
probs = probs, weights = weights, hessian = TRUE,
method = meth, lower = lower, upper = upper,
...), silent = TRUE)
}
}
else {
opt.fun <- "optim"
opttryerror <- try(opt <- optim(par = vstart, fn = fnobj,
fix.arg = fix.arg, obs = s, gr = gradient,
qdistnam = qdistname, probs = probs, weights = weights,
hessian = TRUE, method = meth,
lower = lower, upper = upper, ...), silent = TRUE)
}
options(warn = owarn)
if (inherits(opttryerror, "try-error")) {
warnings("The function optim encountered an error and stopped.")
if (getOption("show.error.messages"))
print(attr(opttryerror, "condition"))
return(list(estimate = rep(NA, length(vstart)),
convergence = 100, loglik = NA, hessian = NA))
}
if (opt$convergence > 0) {
warnings("The function optim failed to converge, with the error code ",
opt$convergence)
}
if (is.null(names(opt$par)))
names(opt$par) <- names(vstart)
res <- list(estimate = opt$par, convergence = opt$convergence, value = opt$value,
hessian = opt$hessian, probs = probs, dist = dist,
optim.function = opt.fun, fix.arg = fix.arg,
loglik = .loglik(opt$par, fix.arg, data, ddistname),
optim.method = meth, fix.arg.fun = fix.arg.fun, weights = weights,
counts = opt$counts, optim.message = opt$message)
}
else {
options(warn = ifelse(silent, -1, 0))
# if (!cens)
opttryerror <- try(opt <- custom.optim(fn = fnobj,
fix.arg = fix.arg, obs = s, qdistnam = qdistname,
probs = probs, weights = weights,
par = vstart, ...), silent = TRUE)
# else stop("Maximum goodness-of-fit estimation is not yet available for censored data.")
options(warn = owarn)
if (inherits(opttryerror, "try-error")) {
warnings("The customized optimization function encountered an error and stopped.")
if (getOption("show.error.messages"))
print(attr(opttryerror, "condition"))
return(list(estimate = rep(NA, length(vstart)),
convergence = 100, value = NA, hessian = NA))
}
if (opt$convergence > 0) {
warnings("The customized optimization function failed to converge, with the error code ",
opt$convergence)
}
if (is.null(names(opt$par)))
names(opt$par) <- names(vstart)
res <- list(estimate = opt$par, convergence = opt$convergence, value = opt$value,
probs = probs, dist = dist, hessian = opt$hessian,
optim.function = custom.optim, fix.arg = fix.arg,
loglik = .loglik(opt$par, fix.arg, data, ddistname),
optim.method = NULL, fix.arg.fun = fix.arg.fun, weights = weights,
counts = opt$counts, optim.message = opt$message)
}
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/mqdedist.R
|
#' @name score.math.booklet10.PISA2012
#' @docType data
#' @title A Performance Score in Mathematics from Booklet 10 of PISA 2012.
#' @description Classic performance score in mathematics, using the students responses to Booklet 10 of the PISA test applied in 2012. We use the answers to the PISA 2012 questionnaire. We take the ``Scored cognitive item response data file.''. We keep all the questions with binary score (correct and incorrect) and exclude those that could be scored with partial credit. Then, we recode the responses: 1 for correct and 0 for incorrect. Finally, we obtain the percentage of correct answers, i.e., the classic performance score of each student. It is important to mention that the reported scores of PISA 2012 are estimated and scaled using the Rasch model of item response theory. On the other hand, all booklets for the test could have different: number of questions, traits evaluated, and participating countries. Also, the assignation of a booklet to a student is randomized. Considering that, we choose only one arbitrary booklet, Booklet 10, and its questions of mathematics.
#' @usage data(score.math.booklet10.PISA2012)
#' @format \code{score.math.booklet10.PISA2012} is a vector.
#' @source OECD Programme for International Student Assesment (PISA) (2012). Database -
#' PISA 2012. Online; accessed 2014-08-23.
#' \url{https://www.oecd.org/pisa/pisaproducts/pisa2012database-downloadabledata.htm}.
#' @author Camilo Jose Torres-Jimenez [aut,cre] \email{[email protected]}
#' @keywords datasets
NULL
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/score.math.booklet10.PISA2012.R
|
## Taken from the fitdistrplus package
# checkparam function checks start.arg and fix.arg that parameters are named correctly
# INPUTS
# start.arg : starting values for optimization or the function to compute them from data
# fix.arg : fixed values of paramaters or the function to compute them from data
# argdistname : parameter names of the distribution
# errtxt : error text messages
# data10 : the first ten values of data
# distname : name of the distribution
# OUTPUTS
# a named list with components: ok (TRUE or FALSE), txt (NULL or the error message),
# start.arg : a named list of starting values for optimization
# or a function to compute them from data
checkparam <- function(start.arg, fix.arg, argdistname, errtxt=NULL, data10, distname)
{
if(is.null(errtxt))
errtxt <- list(t0="Fixed values must be either a named list or a function returning a named list.",
t1="Starting values must be either a named list or a function returning a named list.",
t2="Starting and fixed values must be either a named list or a function returning a named list.",
t3="'start' must specify names which are arguments to 'distr'.",
t4="'fix.arg' must specify names which are arguments to 'distr'.",
t5="A distribution parameter cannot be specified both in 'start' and 'fix.arg'.")
#t6 = "Unknown starting values..."
#before any treatment
start.arg.was.null <- is.null(start.arg)
#if clause with 4 different cases:
#start.arg \ fix.arg | NULL | non NULL
# NULL | 1 | 2
# non NULL | 3 | 4
if(is.null(start.arg) && is.null(fix.arg)) #1
{ #default case from fitdist, mledist,...
start.arg <- start.arg.default(data10, distr=distname)
}else if(is.null(start.arg) && !is.null(fix.arg)) #2
{ #fix.arg should be a function or a named list
if(!is.list(fix.arg) && !is.function(fix.arg))
return(list(ok=FALSE, txt=errtxt$t0))
#get param names
if(is.function(fix.arg))
namarg <- names(fix.arg(data10))
else
namarg <- names(fix.arg)
start.arg <- start.arg.default(data10, distr=distname) #could return "Unknown starting values..."
start.arg <- start.arg[!names(start.arg) %in% namarg]
}else if(!is.null(start.arg) && is.null(fix.arg)) #3
{ #start should be a function or a named list
if(!is.list(start.arg) && !is.function(start.arg))
return(list(ok=FALSE, txt=errtxt$t1))
}else if(!is.null(start.arg) && !is.null(fix.arg)) #4
{
#fix.arg and start should be a function or a named list
if( (!is.list(fix.arg) && !is.function(fix.arg)) || (!is.list(start.arg) && !is.function(start.arg)) )
return(list(ok=FALSE, txt=errtxt$t2))
}else
stop("wrong implementation")
#check start
#start.arg : function() | list()
#start.arg cannot be null because set to a named list (by start.arg.default) when NULL
if(is.function(start.arg)) #a function
{
start2 <- start.arg(data10)
if(!is.list(start2) && is.null(names(start2))) #check a named list
return(list(ok=FALSE, txt=errtxt$t3))
vstart <- unlist(start2)
}else #a list
vstart <- unlist(start.arg)
m <- match(names(vstart), argdistname)
if (any(is.na(m))) #check unexpected names
return(list(ok=FALSE, txt=errtxt$t3))
#check fix.arg
#fix.arg : function() | list() | NULL
if(is.function(fix.arg)) #a function
{
fix.arg2 <- fix.arg(data10)
if(!is.list(fix.arg2) && is.null(names(fix.arg2))) #check a named list
return(list(ok=FALSE, txt=errtxt$t4))
vfix.arg <- unlist(fix.arg2)
}else if(is.list(fix.arg)) #a list
vfix.arg <- unlist(fix.arg)
else
vfix.arg <- NULL
mfix <- match(names(vfix.arg), argdistname)
if (any(is.na(mfix))) #check unexpected names
return(list(ok=FALSE, txt=errtxt$t4))
# check that some parameters are not both in fix.arg and start
minter <- match(names(vstart), names(vfix.arg))
if (any(!is.na(minter)))
return(list(ok=FALSE, txt=errtxt$t5))
#prepare the starg.arg for outputs, i.e. when start.arg=NULL,
# returns start.arg.default if not fixed param
# returns a subset of start.arg.default if fixed param
if(start.arg.was.null && is.null(fix.arg))
start.arg <- function(x) start.arg.default(x, distr=distname) #could return "Unknown starting values..."
else if(start.arg.was.null && !is.null(fix.arg))
{
if(is.function(fix.arg))
namarg <- names(fix.arg(data10))
else
namarg <- names(fix.arg)
start.arg <- function(x){
start.arg <- start.arg.default(x, distr=distname) #could return "Unknown starting values..."
start.arg[!names(start.arg) %in% namarg]
}
}
#otherwise start.arg is a named list or a function
return(list(ok=TRUE, txt=NULL, start.arg=start.arg))
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/util-checkparam.R
|
# start.arg.default function returns initial values of parameters generally using moments or quantiles
# INPUTS
#x : data vector or matrix
#distr : the distribution name
# OUTPUTS
# a named list or raises an error
start.arg.default <- function(x, distr)
{
if (distr == "norm") {
n <- length(x)
sd0 <- sqrt((n - 1)/n) * sd(x)
mx <- mean(x)
start <- list(mean=mx, sd=sd0)
}else if (distr == "lnorm") {
if (any(x <= 0))
stop("values must be positive to fit a lognormal distribution")
n <- length(x)
lx <- log(x)
sd0 <- sqrt((n - 1)/n) * sd(lx)
ml <- mean(lx)
start <- list(meanlog=ml, sdlog=sd0)
}else if (distr == "pois") {
start <- list(lambda=mean(x))
}else if (distr == "exp") {
if (any(x < 0))
stop("values must be positive to fit an exponential distribution")
start <- list(rate=1/mean(x))
}else if (distr == "gamma") {
if (any(x < 0))
stop("values must be positive to fit an gamma distribution")
n <- length(x)
m <- mean(x)
v <- (n - 1)/n*var(x)
start <- list(shape=m^2/v, rate=m/v)
}else if (distr == "nbinom") {
n <- length(x)
m <- mean(x)
v <- (n - 1)/n*var(x)
size <- ifelse(v > m, m^2/(v - m), 100)
start <- list(size = size, mu = m)
}else if (distr == "geom" ) {
m <- mean(x)
prob <- ifelse(m>0, 1/(1+m), 1)
start <- list(prob=prob)
}else if (distr == "beta") {
if (any(x < 0) | any(x > 1))
stop("values must be in [0-1] to fit a beta distribution")
n <- length(x)
m <- mean(x)
v <- (n - 1)/n*var(x)
aux <- m*(1-m)/v - 1
start <- list(shape1=m*aux, shape2=(1-m)*aux)
}else if (distr == "weibull") {
if (any(x < 0))
stop("values must be positive to fit an Weibull distribution")
m <- mean(log(x))
v <- var(log(x))
shape <- 1.2/sqrt(v)
scale <- exp(m + 0.572/shape)
start <- list(shape = shape, scale = scale)
}else if (distr == "logis") {
n <- length(x)
m <- mean(x)
v <- (n - 1)/n*var(x)
start <- list(location=m, scale=sqrt(3*v)/pi)
}else if (distr == "cauchy") {
start <- list(location=median(x), scale=IQR(x)/2)
}else if (distr == "unif"){
start <- list(min=0, max=1)
}else if (distr == "invgamma")
{
if (any(x < 0))
stop("values must be positive to fit an inverse gamma distribution")
#http://en.wikipedia.org/wiki/Inverse-gamma_distribution
m1 <- mean(x)
m2 <- mean(x^2)
shape <- (2*m2-m1^2)/(m2-m1^2)
scale <- m1*m2/(m2-m1^2)
start <- list(shape=shape, scale=scale)
}else if (distr == "llogis")
{
if (any(x < 0))
stop("values must be positive to fit a log-logistic distribution")
p25 <- as.numeric(quantile(x, 0.25))
p75 <- as.numeric(quantile(x, 0.75))
shape <- 2*log(3)/(log(p75)-log(p25))
scale <- exp(log(p75)+log(p25))/2
start <- list(shape=shape, scale=scale)
}else if (distr == "invweibull")
{
if (any(x < 0))
stop("values must be positive to fit an inverse Weibull distribution")
g <- log(log(4))/(log(log(4/3)))
p25 <- as.numeric(quantile(x, 0.25))
p75 <- as.numeric(quantile(x, 0.75))
shape <- exp((g*log(p75)-log(p25))/(g-1))
scale <-log(log(4))/(log(shape)-log(p25))
start <- list(shape=shape, scale=max(scale, 1e-9))
}else if (distr == "pareto1")
{
if (any(x < 0))
stop("values must be positive to fit a Pareto distribution")
#http://www.math.umt.edu/gideon/pareto.pdf
x1 <- min(x)
m1 <- mean(x)
n <- length(x)
shape <- (n*m1-x1)/(n*(m1-x1))
min <- x1*(n*shape - 1)/(n*shape)
start <- list(shape=shape, min=min)
}else if (distr == "pareto")
{
if (any(x < 0))
stop("values must be positive to fit a Pareto distribution")
m1 <- mean(x)
m2 <- mean(x^2)
scale <- (m1*m2)/(m2-2*m1^2)
shape <- 2*(m2-m1^2)/(m2-2*m1^2)
start <- list(shape=shape, scale=scale)
}else
stop(paste0("Unknown starting values for distribution ", distr, "."))
return(start)
}
|
/scratch/gouwar.j/cran-all/cranData/BMT/R/util-startarg.R
|
#==================================================================================================#
# Date:
# Description:
#==================================================================================================#
auto_mtar = function(Yt, Zt = NULL, Xt = NULL, l0_min = 2, l0_max = 3,
maxorders = list(pj = 2,qj = 0,dj = 0),
niter = 3000, chain = FALSE, method = 'KUO', parallel = FALSE) {
if (!is.logical(chain)) {stop('chain must be a logical object')}
if (!is.list(maxorders) | length(maxorders) != 3) {
stop('maxorders must be a list of length 3 list(pj, qj, dj)')
}else if (!{all(names(maxorders) %in% c('pj','qj','dj'))}) {
stop('maxorders must be a list of length 3 list(pj, qj, dj)')
}
data = tsregime(Yt,Zt,Xt)
k = data$k
nu = data$nu
results = vector('list')
if (is.null(nu)) {nu = 0}
pjmax = maxorders$pj
qjmax = maxorders$qj
djmax = maxorders$dj
if (any(is.na(data$Yt)) | any(is.na(data$Xt)) | any(is.na(data$Zt))) {
data_temp = data
meanY = apply(data_temp$Yt,2,mean,na.rm = T)
nasy = apply(data_temp$Yt,2,is.na)
meanZ = mean(data_temp$Zt,na.rm = T)
if (nu > 1) {
meanX = apply(data_temp$Yt,2,mean,na.rm = T)
nasx = apply(data_temp$Xt,2,is.na)
}else{
meanX = mean(data_temp$Xt,na.rm = T)
}
for (i in 1:k) {
data_temp$Yt[nasy[,i],i] = meanY[i]
}
data_temp$Zt[is.na(data_temp$Zt)] = meanZ
if (nu > 1) {
for (i in 1:nu) {
data_temp$Xt[nasx[,i],i] = meanX[i]
}
}else{
data_temp$Xt[is.na(data_temp$Xt)] = meanX
}
data_temp = tsregime(data_temp$Yt,data_temp$Zt,data_temp$Xt)
initial = mtarinipars(tsregime_obj = data_temp,
list_model = list(l0_max = l0_max,l0_min = l0_min),method = method)
numregest_1 = mtarnumreg(ini_obj = initial,niter_m = niter,NAIC = TRUE,
ordersprev = list(maxpj = pjmax,maxqj = qjmax,maxdj = djmax),parallel = parallel)
l_1 = numregest_1$NAIC_final_m
estrucopt = numregest_1$list_m[[paste0('m',l_1)]]$par
initial = mtarinipars(tsregime_obj = data_temp,method = method,
list_model = list(pars = list(l = l_1),
orders = list(pj = estrucopt$orders$pj,qj = estrucopt$orders$qj,dj = estrucopt$orders$dj)))
est_1 = mtarstr(ini_obj = initial,niter = niter,chain = chain, parallel = parallel)
initial = mtarinipars(tsregime_obj = data_temp,
list_model = list(pars = list(l = l_1,r = est_1$r,
orders = list(pj = est_1$orders$pj, qj = est_1$orders$qj,dj = est_1$orders$dj))))
missingest = mtarmissing(ini_obj = initial,niter = niter)
results$missing = missingest
data_complete = missingest$tsregim
}else{
data_complete = data
}
initial = mtarinipars(tsregime_obj = data_complete,
list_model = list(l0_max = l0_max,l0_min = l0_min),method = method)
numregest_final = mtarnumreg(ini_obj = initial,niter_m = niter,chain_m = chain,list_m = TRUE,
ordersprev = list(maxpj = pjmax, maxqj = qjmax, maxdj = djmax), parallel = parallel)
lf = numregest_final$final_m
estrucopt = numregest_final$list_m[[paste0('m',lf)]]$par
initial = mtarinipars(tsregime_obj = data_complete,method = method,
list_model = list(pars = list(l = lf),
orders = list(pj = estrucopt$orders$pj,
qj = estrucopt$orders$qj,dj = estrucopt$orders$dj)))
est_final = mtarstr(ini_obj = initial,niter = niter,chain = chain,parallel = parallel)
results$tsregim = data_complete
results$numreg = numregest_final
results$pars = est_final
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/BMTAR/R/auto_mtar.R
|
#==================================================================================================#
# Date: 14/04/2020
# Description: Display some graphics for residuals analysis
# Function:
#==================================================================================================#
diagnostic_mtar = function(regime_model, lagmax = NULL, alpha = '0.05'){
if (!requireNamespace('ggplot2', quietly = TRUE)) {
stop('ggplot2 is needed for this function to work')
}else {
if (!inherits(regime_model, 'regime_model')) {
stop('diagnostic.mtar requires a regime_model object')
}}
if (is.numeric(alpha)) {alpha = as.character(alpha)}
if (!{alpha %in% c('0.10','0.05','0.025','0.01','0.005')}) {
stop('alpha should take values in c(0.10,0.05,0.025,0.01,0.005)')
}
tablasq = data.frame('0.10' = c( 1.0729830,-0.6698868,-0.5816458),
'0.05' = c(1.2238734,-0.6700069,-0.7351697),
'0.025' = c(1.3581015,-0.6701218,-0.8858694),
'0.01' = c(1.5174271,-0.6702672,-1.0847745),
'0.005' = c(1.6276236,-0.6703724,-1.2365861))
tablac = data.frame('0.10' = 0.850,'0.05' = 0.948,'0.025' = 1.036,'0.01' = 1.143,'0.005' = 1.217)
e_k = tsregime(as.matrix(regime_model$residuals))
p1 = autoplot.tsregime(e_k) + ggplot2::geom_hline(yintercept = 0,color = "red") +
ggplot2::ggtitle('Residual serie plot')
e_data = as.data.frame(e_k$Yt)
time = seq(1,nrow(e_data))
dat = data.frame(label = 'Series.1',time = time,value = e_data[,1],
cusum = cumsum(e_data[,1])/stats::sd(e_data[,1]),
cumsq = c(cumsum(e_data[,1]^2)/sum(e_data[,1]^2)))
if (ncol(e_data) > 1) {
for (i in 2:ncol(e_data)) {
dat = rbind.data.frame(dat,data.frame(label = paste0('Series.',i),time = time,value = e_data[,i],
cusum = cumsum(e_data[,i])/stats::sd(e_data[,i]),
cumsq = c(cumsum(e_data[,i]^2)/sum(e_data[,i]^2))))
}
}
p2 = ggplot2::ggplot(ggplot2::aes_(x = ~value, color = ~label),data = dat) +
ggplot2::geom_density() + ggplot2::theme_bw()
p2 = p2 + ggplot2::stat_function(fun = stats::dnorm,color = "black")
p2 = p2 + ggplot2::ggtitle("Residual density plot")
Af = c(tablac[,paste0('X',alpha)])
LS = Af*sqrt(e_k$N) + 2*Af*c(1:e_k$N)/sqrt(e_k$N)
LI = -LS
p3 = ggplot2::ggplot(ggplot2::aes_(x = ~time, y = ~cusum,color = ~label),data = dat)
p3 = p3 + ggplot2::geom_ribbon(ggplot2::aes(ymin = rep(LS,e_k$k), ymax = rep(LI,e_k$k)),
fill = "gray",color = NA,alpha = 0.5)
p3 = p3 + ggplot2::geom_line() + ggplot2::theme_bw() + ggplot2::ggtitle('CUSUM statistic for residuals')
# Tabla CusumSQ
if (is.null(regime_model$data$nu)) {nu = 0}else{
nu = regime_model$data$nu
}
k = regime_model$data$k
if (is.null(regime_model$data$Zt)) {
Ind = rep(1,regime_model$data$N)
}else{
Ind = lists_ind(regime_model$r[1],regime_model$data$Zt,length(regime_model$r[1]) + 1)$Ind
}
etaj = 1 + regime_model$orders$pj*k + regime_model$orders$qj*nu + regime_model$orders$dj
ff = 1/2*(regime_model$data$N - etaj[Ind]) - 1
co = 1/ff^(1/2)*tablasq[1,paste0('X',alpha)] + 1/ff*tablasq[2,paste0('X',alpha)] + 1/ff^(3/2)*tablasq[3,paste0('X',alpha)]
LQS = co + (1:e_k$N)/e_k$N
LQI = -co + (1:e_k$N)/e_k$N
p4 = ggplot2::ggplot(ggplot2::aes_(x = ~time, y = ~cumsq,color = ~label),data = dat)
p4 = p4 + ggplot2::geom_ribbon(ggplot2::aes(ymin = rep(LQS,e_k$k), ymax = rep(LQI,e_k$k)),
fill = "gray",color = NA,alpha = 0.5)
p4 = p4 + ggplot2::geom_line() + ggplot2::theme_bw() + ggplot2::ggtitle('CUSUMSQ statistic for residuals')
acf_i = stats::acf(regime_model$residuals[,1],lag.max = lagmax,plot = FALSE,type = 'correlation')
acf_Yt = data.frame(Lag = acf_i$lag, value = acf_i$acf,names = 'Serie.1',type = 'ACF')
pacf_i = stats::acf(regime_model$residuals[,1],lag.max = lagmax,plot = FALSE,type = 'partial')
pacf_Yt = data.frame(Lag = pacf_i$lag, value = pacf_i$acf,names = 'Serie.1',type = 'PACF')
if (ncol(regime_model$residuals) > 1) {
for (i in 2:ncol(regime_model$residuals)) {
acf_i = stats::acf(regime_model$residuals[,i],lag.max = lagmax,plot = FALSE,type = 'correlation')
acf_Yt = rbind(acf_Yt,data.frame(Lag = acf_i$lag + 0.1*i, value = acf_i$acf,names = paste0('Series.',i),type = 'ACF'))
pacf_i = stats::acf(regime_model$residuals[,i],lag.max = lagmax,plot = FALSE,type = 'partial')
pacf_Yt = rbind(pacf_Yt,data.frame(Lag = pacf_i$lag + 0.1*i, value = pacf_i$acf,names = paste0('Series.',i),type = 'PACF'))
}
}
dat_cor = rbind.data.frame(acf_Yt,pacf_Yt)
p5 = ggplot2::ggplot(ggplot2::aes_(x = ~Lag, y = ~value),data = dat_cor[floor(dat_cor$Lag) != 0,])
p5 = p5 + ggplot2::geom_hline(yintercept = 0) + ggplot2::facet_grid(type~names)
p5 = p5 + ggplot2::geom_segment(ggplot2::aes(xend = dat_cor[floor(dat_cor$Lag) != 0,]$Lag,yend = 0)) + ggplot2::geom_point(color = "blue",size = 0.4)
ci = stats::qnorm((as.numeric(alpha))/2)/sqrt(nrow(regime_model$residuals))
p5 = p5 + ggplot2::geom_ribbon(ggplot2::aes(ymax = ci ,ymin = -ci),color = NA,fill = "blue",alpha = 0.2)
p5 = p5 + ggplot2::ggtitle('ACF and PACF plots for residuals series') + ggplot2::theme_bw()
return(list(p1,p2,p3,p4,p5))
}
|
/scratch/gouwar.j/cran-all/cranData/BMTAR/R/diagnostic_mtar.R
|
autoplot = function(object, ...) UseMethod("autoplot")
autoplot.regime_model = function(object, type = 1, ...) {
if (!requireNamespace('ggplot2', quietly = TRUE)) {
stop('ggplot2 is needed for this function to work')
}else {
if (!inherits(object, 'regime_model')) {
stop('autoplot.regime_model requires a regime_model object')
}}
if (!{type %in% c(1:5)}) {stop('type should take values in c (1,2,3,4)')}
if (is.null(object$Chain)) {stop('There are no chains to graph')}
if (type == 1) {
if (is.null(object$Chain$r)) {stop('r unknown')}
Chain_r = t(object$Chain$r)
time = seq(1,nrow(Chain_r))
dat2 = data.frame(name = 'r1',time = time,value = Chain_r[,1])
if (ncol(Chain_r) > 1) {
for (i in 2:ncol(Chain_r)) {
dat2 = rbind(dat2,data.frame(name = paste0('r',i),
time = time,value = Chain_r[,i]))
}
}
p = ggplot2::ggplot(ggplot2::aes_(x = ~time,y = ~value),data = dat2)
p = p + ggplot2::geom_line() + ggplot2::facet_grid(name~.,scales = 'free')
p = p + ggplot2::theme_bw() + ggplot2::scale_y_continuous(labels = function(x) format(x, scientific = TRUE))
return(p)
}
if (type == 2) {
# Sigma Chains
Chain_Sig = object$Chain$Sigma
dat3l = vector(mode = 'list',length = length(Chain_Sig))
p3 = vector(mode = 'list',length = length(Chain_Sig))
names(p3) = names(Chain_Sig)
names(dat3l) = names(Chain_Sig)
for (j in names(Chain_Sig)) {
if (!is.matrix(Chain_Sig[[j]])) {
Chain_Sig[[j]] = t(as.matrix(Chain_Sig[[j]]))
}
time = seq(1,ncol(Chain_Sig[[j]]))
dat3 = data.frame(comp = '11',time = time,value = Chain_Sig[[j]][1,])
k = dim(object$regime[[j]]$sigma)[1]
names_sig = paste0(1:k,1)
for (i3 in 2:k) {names_sig = c(names_sig,paste0(1:k,i3))}
if (nrow(Chain_Sig[[j]]) > 1) {
ii = 1
for (i in names_sig[-1]) {
dat3 = rbind(dat3,data.frame(comp = i,time = time,value = Chain_Sig[[j]][ii,]))
ii = ii + 1
}
}
p3[[j]] = ggplot2::ggplot(ggplot2::aes_(x = ~time, y = ~value),data = dat3) +
ggplot2::geom_line() + ggplot2::facet_grid(dat3$comp~.,scales = 'free') +
ggplot2::theme_bw() +
ggplot2::labs(title = paste('Sigma chains',j)) +
ggplot2::scale_y_continuous(labels = function(x) format(x, scientific = TRUE))
}
return(p3)
}
if (type == 3) {
# Theta Chains
Chain_Theta = object$Chain$Theta
dat3l = vector(mode = 'list',length = length(Chain_Theta))
p4 = vector(mode = 'list',length = length(Chain_Theta))
names(p4) = names(Chain_Theta)
if (!is.matrix(Chain_Theta$R1)) {
Chain_Theta$R1 = t(as.matrix(Chain_Theta$R1))
}
time = seq(1,ncol(Chain_Theta$R1))
for (j in names(Chain_Theta)) {
dat3 = data.frame(comp = '1',time = time,value = Chain_Theta[[j]][1,])
if (ncol(Chain_Theta[[j]]) > 1) {
for (i in 2:nrow(Chain_Theta[[j]])) {
dat3 = rbind(dat3,data.frame(comp = as.character(i),
time = time,value = Chain_Theta[[j]][i,]))
}
}
p4[[j]] = ggplot2::ggplot(ggplot2::aes_(x = ~time,y = ~value),data = dat3) +
ggplot2::theme_bw() +
ggplot2::geom_line() + ggplot2::facet_grid(comp~.,scales = 'free') +
ggplot2::labs(title = paste('Theta chains',j)) +
ggplot2::scale_y_continuous(labels = function(x) format(x, scientific = TRUE))
}
return(p4)
}
if (type == 4) {
# Gamma Chains
if (is.null(object$Chain$Gamma)) {
stop('Object $Chain$Gamma does not exist (known orders)')
}
Chain_Gamma = object$Chain$Gamma
dat3l = vector(mode = 'list',length = length(Chain_Gamma))
p5 = vector(mode = 'list',length = length(Chain_Gamma))
names(p5) = names(Chain_Gamma)
time = seq(1,ncol(Chain_Gamma$R1))
for (j in names(Chain_Gamma)) {
dat3 = data.frame(comp = '1',time = time,value = Chain_Gamma[[j]][1,])
if (ncol(Chain_Gamma[[j]]) > 1) {
for (i in 2:nrow(Chain_Gamma[[j]])) {
dat3 = rbind(dat3,data.frame(comp = as.character(i),
time = time,value = Chain_Gamma[[j]][i,]))
}
}
p5[[j]] = ggplot2::ggplot(ggplot2::aes_(x = ~time,y = ~value),data = dat3) +
ggplot2::geom_line() + ggplot2::facet_grid(comp~.,scales = 'free') +
ggplot2::theme_bw() +
ggplot2::labs(title = paste('Gamma chains',j)) +
ggplot2::scale_y_continuous(labels = function(x) format(x, scientific = TRUE))
}
return(p5)
}
if (type == 5) {
Chain_Yt = as.data.frame(object$data$Yt)
Chain_fit = as.data.frame(object$fitted.values)
Chain_Yt = as.data.frame(Chain_Yt[-nrow(Chain_Yt),])
Chain_fit = as.data.frame(Chain_fit[-nrow(Chain_fit),])
time = seq(1,nrow(Chain_fit))
dat1 = data.frame(type = 'obs',name = 'Series.1',time = time, value = Chain_Yt[,1])
dat1 = rbind.data.frame(dat1,data.frame(type = 'fit',name = 'Series.1',time = time, value = Chain_fit[,1]))
if (ncol(Chain_Yt) > 1) {
for (i in 2:ncol(Chain_Yt)) {
dati = data.frame(type = 'obs',name = paste0('Series.',i),time = time,value = Chain_Yt[,i])
dati = rbind.data.frame(dati,data.frame(type = 'fit',name = paste0('Series.',i),time = time, value = Chain_fit[,i]))
dat1 = rbind.data.frame(dat1,dati)
}
}
p = ggplot2::ggplot(ggplot2::aes_(x = ~time,y = ~value, color = ~type),data = dat1)
p = p + ggplot2::geom_line() + ggplot2::facet_grid(name~.) + ggplot2::theme_bw()
p = p + ggplot2::labs(title = 'Output process')
p = p + ggplot2::scale_color_manual(values = c("black","blue")) +
ggplot2::scale_y_continuous(labels = function(x) format(x, scientific = TRUE))
return(p)
}
}
autoplot.regime_missing = function(object, type = 1, ...) {
if (!requireNamespace('ggplot2', quietly = TRUE)) {
stop('ggplot2 is needed for this function to work')
}else {
if (!inherits(object, 'regime_missing')) {
stop('autoplot.regime_missing requires a regime_missing object')
}}
if (is.null(object$Chains$Y)) {stop('There are no chains to graph')}
if (!{type %in% c(1:3)}) {stop('type should take values in c (1,2,3)')}
if (type == 1) {
if (is.null(object$estimates$Yt)) {stop('Yt has no missing data')}
Chain_Yt = t(object$Chains$Yt)
time = seq(1,nrow(Chain_Yt))
names_yt = rownames(object$estimates$Yt)
dat2 = data.frame(name = names_yt[1],time = time,value = Chain_Yt[,1])
if (ncol(Chain_Yt) > 1) {
for (i in 2:ncol(Chain_Yt)) {
dat2 = rbind(dat2,data.frame(name = names_yt[i],time = time,value = Chain_Yt[,i]))
}
}
p = ggplot2::ggplot(ggplot2::aes_(x = ~time,y = ~value),data = dat2)
p = p + ggplot2::geom_line() + ggplot2::facet_grid(name~.,scales = 'free') + ggplot2::theme_bw()
p = p + ggplot2::labs(title = 'Missing data (Yt) chains')
return(p)
}
if (type == 2) {
if (is.null(object$estimates$Zt)) {stop('Zt has no missing data')}
Chain_Zt = t(object$Chains$Zt)
time = seq(1,nrow(Chain_Zt))
names_Zt = rownames(object$estimates$Zt)
dat2 = data.frame(name = names_Zt[1],time = time,value = Chain_Zt[,1])
if (ncol(Chain_Zt) > 1) {
for (i in 2:ncol(Chain_Zt)) {
dat2 = rbind(dat2,data.frame(name = names_Zt[i],time = time,value = Chain_Zt[,i]))
}
}
p = ggplot2::ggplot(ggplot2::aes_(x = ~time,y = ~value),data = dat2)
p = p + ggplot2::geom_line() + ggplot2::facet_grid(name~.,scales = 'free') + ggplot2::theme_bw()
p = p + ggplot2::labs(title = 'Missing data (Zt) chains')
return(p)
}
if (type == 3) {
if (is.null(object$estimates$Xt)) {stop('Xt has no missing data')}
Chain_Xt = t(object$Chains$Xt)
time = seq(1,nrow(Chain_Xt))
names_Xt = rownames(object$estimates$Xt)
dat2 = data.frame(name = names_Xt[1],time = time,value = Chain_Xt[,1])
if (ncol(Chain_Xt) > 1) {
for (i in 2:ncol(Chain_Xt)) {
dat2 = rbind(dat2,data.frame(name = names_Xt[i],time = time,value = Chain_Xt[,i]))
}
}
p = ggplot2::ggplot(ggplot2::aes_(x = ~time,y = ~value),data = dat2)
p = p + ggplot2::geom_line() + ggplot2::facet_grid(name~.,scales = 'free') + ggplot2::theme_bw()
p = p + ggplot2::labs(title = 'Missing data (Xt) chains')
return(p)
}
}
autoplot.tsregime = function(object, type = 1, ...) {
if (!requireNamespace('ggplot2', quietly = TRUE)) {
stop('ggplot2 is needed for this function to work')
}else {
if (!inherits(object, 'tsregime')) {
stop('autoplot.tsregime requires a tsregime object')
}}
if (!{type %in% c(1:3)}) {stop('type should take values in c (1,2,3)')}
dats_Yt = as.data.frame(object$Yt)
time = seq(1,nrow(dats_Yt))
dat = data.frame(name = 'Series.1',time = time,value = dats_Yt[,1])
if (ncol(dats_Yt) > 1) {
for (i in 2:ncol(object$Yt)) {
dat = rbind(dat,data.frame(name = paste0('Series.',i),time = time,value = dats_Yt[,i]))
}
}
dat_NA = c()
N = length(dats_Yt[,1])
for (i in 1:object$k) {
xl = c(1:N)[is.na(dats_Yt[,i])]
dat_NA = rbind(dat_NA,data.frame(name = rep(paste0('Series.',i),length(xl)),xl = xl))
}
p = ggplot2::ggplot(ggplot2::aes_(x = ~time,y = ~value),data = dat)
p = p + ggplot2::geom_line() + ggplot2::theme_bw()
p = p + ggplot2::labs(title = 'Output process')
p = p + ggplot2::geom_vline(ggplot2::aes(xintercept = xl),color = "red",linetype = 'dashed',data = dat_NA)
p = p + ggplot2::facet_grid(name~.,scales = 'free_y')
if (!is.null(object$Zt)) {
dats_Zt = data.frame(time = time,value = object$Zt)
p2 = ggplot2::ggplot(ggplot2::aes_(x = ~time,y = ~value),data = dats_Zt)
p2 = p2 + ggplot2::geom_line() + ggplot2::theme_bw()
p2 = p2 + ggplot2::geom_vline(xintercept = dats_Zt$time[is.na(dats_Zt$value)],color = "red",linetype = 'dashed')
if (!is.null(object$r)) {
Nrg_plot = paste0(paste0(paste0('Reg_',1:object$l),'='),object$Summary_r$Prop_reg,'%')
p2 = p2 + ggplot2::labs(title = 'Threshold process',subtitle = paste0('(',paste(Nrg_plot,collapse = ','),')'))
for (i in c(object$r)) {
p2 = p2 + ggplot2::geom_hline(yintercept = i,linetype = 'dashed',color = 'blue')
}
}
}
if (!is.null(object$Xt)) {
dats_Xt = as.data.frame(object$Xt)
dat2 = data.frame(name = 'Series.1',time = time,value = dats_Xt[,1])
if (ncol(dats_Xt) > 1) {
for (i in 2:ncol(object$Xt)) {
dat2 = rbind(dat2,data.frame(name = paste0('Series.',i),
time = time,value = dats_Xt[,i]))
}
}
dat_NA = c()
for (i in 1:object$nu) {
xl = c(1:N)[is.na(dats_Yt[,i])]
dat_NA = rbind(dat_NA,data.frame(name = rep(paste0('Series.',i),length(xl)),xl = xl))
}
p3 = ggplot2::ggplot(ggplot2::aes_(x = ~time,y = ~value),data = dat2)
p3 = p3 + ggplot2::geom_line() + ggplot2::theme_bw()
p3 = p3 + ggplot2::labs(title = 'Covariates process')
p3 = p3 + ggplot2::geom_vline(ggplot2::aes(xintercept = xl),color = "red",linetype = 'dashed',data = dat_NA)
p3 = p3 + ggplot2::facet_grid(name~.,scales = 'free_y')
}
if (type == 1) {
return(p)
}
if (type == 2) {
if (is.null(object$Zt)) {
stop('Threshold process does not exist')}
return(p2)
}
if (type == 3) {
if (is.null(object$Xt)) {
stop('Covariates process does not exist')}
return(p3)
}
}
print = function(object, ...) UseMethod('print')
print.tsregime = function(object, ...){
cat('Threshold time series:\n','N =',object$N,'\n')
dats = object
class(dats) = NULL
if (!is.null(object$r)) {
cat('======================','\n')
cat('r = ',object$r,'\n')
print(object$Summary_r)
cat('======================','\n')
}else{
if (!is.null(object$Zt)) {
cat('Unknown threshold values','\n')
}
}
utils::str(dats)
}
print.regime_model = function(object, ...) {
print(object$estimates)
}
print.regime_missing = function(object, ...) {
print(object$estimates)
}
print.regime_number = function(object, ...) {
print(object$estimates)
}
|
/scratch/gouwar.j/cran-all/cranData/BMTAR/R/methods_mtar.R
|
prodB = function(x){
prod = 1
for (a in 1:length(x)) {
prod = prod*x[a]
}
return(prod)
}
dmnormB = function(x, mean, sigma){
dist = Brobdingnag::as.brob(t(x - mean) %*% solve(sigma) %*% (x - mean))
cte = (2*pi)^{-nrow(sigma)/2}*determinant(sigma, logarithm = FALSE)$modulus^{-1/2}
return(cte*exp(-1/2*dist))
}
dwishartB = function(x, nu, S){
k = ncol(x)
producto = Brobdingnag::as.brob(1)
for (i in 1:k) {
producto = producto*exp(Brobdingnag::as.brob(lgamma((nu + 1 - i)/2)))
}
densidades = (Brobdingnag::as.brob(2)^(nu*k/2)*Brobdingnag::as.brob(pi^(k*(k - 1)/4))*producto)^(-1) *
Brobdingnag::as.brob(det((1/nu)*S))^(-nu/2)*Brobdingnag::as.brob(det(x))^((nu - k - 1)/2) *
exp(Brobdingnag::as.brob(-0.5*sum(diag(solve((1/nu)*S) %*% x))))
return(densidades)
}
|
/scratch/gouwar.j/cran-all/cranData/BMTAR/R/mtarAUX.R
|
#==================================================================================================#
# Date: 14/04/2020
# Description:
# Function:
#==================================================================================================#
mtarNAIC = function(regimemodel){
if (class(regimemodel) != 'regime_model') {
stop('regimemodel must be an object of type (regime_model)')
}
l = length(regimemodel$regime)
k = nrow(regimemodel$regime[[1]]$sigma)
nuaux = NULL
for (lj in 1:l) {
nuaux[lj] = length(regimemodel$regime[[lj]]$beta[[1]][1,])
}
nu = max(nuaux)
pj = qj = dj = vector(length = l)
for (lj in 1:l) {
pj[lj] = length(regimemodel$regime[[lj]]$phi)
qj[lj] = length(regimemodel$regime[[lj]]$beta)
dj[lj] = length(regimemodel$regime[[lj]]$delta)
}
etaj = 1 + pj*k + qj*nu + dj
Nj = c(regimemodel$Nj)
logLikj = as.numeric(regimemodel$logLikj)
AICj = as.numeric(Nj*logLikj + 2*k*etaj,nrow = 1,row.names = NULL)
NAIC = sum(AICj)/sum(Nj)
message('NAIC=',round(NAIC,4),'\n')
return(list(AICj = AICj,NAIC = NAIC))
}
|
/scratch/gouwar.j/cran-all/cranData/BMTAR/R/mtarNAIC.R
|
#==================================================================================================#
# Date: 14/04/2020
# Coments:
#-> k was taken by default by Sigma dimensions
# Function:
# Function to repeat matrix
#==================================================================================================#
repM = function(M,r){lapply(rep(0,r),function(x){x*M})}
mtaregime = function(orders = list(p = 1,q = 0,d = 0), cs = NULL,
Phi, Beta = NULL, Delta = NULL, Sigma){
if (is.numeric(Sigma) & !is.matrix(Sigma)) {Sigma = as.matrix(Sigma)}
if (is.numeric(cs) & !is.matrix(cs)) {cs = as.matrix(cs)}
if (!is.list(orders)) {
stop('orders must be a list with names p (Not NULL), q or d')
}else if (!any(names(orders) %in% c('p','q','d'))) {
stop('orders must be a list with names p (Not NULL), q or d')
}
if (is.null(orders$p)) {stop('orders must have orders$p a positive integer')}
p = orders$p
q = ifelse(is.null(orders$q),0,orders$q)
d = ifelse(is.null(orders$d),0,orders$d)
# Validation of values
## structural parameters
if (!{round(p) == p & p >= 0}) {stop('p must be a positive integer')}
if (!{round(q) == q & q >= 0}) {stop('q must be a positive integer or 0')}
if (!{round(d) == d & d >= 0}) {stop('d must be a positive integer or 0')}
# set matrix dimensions
k = ncol(Sigma)
## non-structural parameters
if (!is.list(Phi)) {
stop('Phi must be a list of real matrix of dimension kxk')
}else{
for (i in 1:length(Phi)) {
if (is.numeric(Phi[[i]]) & !is.matrix(Phi[[i]])) {Phi[[i]] = as.matrix(Phi[[i]])}
vl = all((is.numeric(Phi[[i]]) & {dim(Phi[[i]]) == c(k,k)}))
if (!vl) {stop('Phi must be a list of real matrix of dimension kxk')}
if (!is.matrix(Phi[[i]])) {stop('Phi[[i]] must be a matrix type object')}
if (substr(names(Phi[i]),1,3) != 'phi' | !{as.numeric(substr(names(Phi[i]),4,4)) %in% c(1:p)}) {
stop('names in the list Phi must be \'phii\' with a integer i in 1:p')
}
}
if (max(as.numeric(sapply(names(Phi),substr,4,4))) != p) {
stop('p and Phi max order must match')
}
}
if (!is.null(Beta)) {
if (!is.list(Beta)) {
stop('Beta must be a list of real matrix of dimension kxnu')
} else{
if (is.matrix(Beta[[1]])) {nu = ncol(Beta[[1]])
}else{stop('Beta must be a list of real matrix of dimension kxnu')}
for (i in 1:length(Beta)) {
if (is.numeric(Beta[[i]]) & !is.matrix(Beta[[i]])) {Beta[[i]] = as.matrix(Beta[[i]])}
if (!is.matrix(Beta[[i]])) {stop('Beta[[i]] must be a matrix type object')}
vl = all(is.numeric(Beta[[i]]) & {dim(Beta[[i]]) == c(k,nu)})
if (!vl) {stop('Beta must be a list of real matrix of dimension kxnu')}
if (substr(names(Beta[i]),1,4) != 'beta' | !{
as.numeric(substr(names(Beta[i]),5,5)) %in% c(1:q)}) {
stop('names in the list Beta must be \'betai\' with a integer i in 1:q')
}
}
if (max(as.numeric(sapply(names(Beta),substr,5,5))) != q) {
stop('q and Beta max order must match')
}
}
}else{
nu = 0
if (q > 0) {stop('q and Beta max order must match')}}
if (!is.null(Delta)) {
if (!is.list(Delta)) {
stop('Delta must be a list of real matrix of dimension kx1')
} else{
for (i in 1:length(Delta)) {
if (is.numeric(Delta[[i]]) & !is.matrix(Delta[[i]])) {Delta[[i]] = as.matrix(Delta[[i]])}
vl = all(is.numeric(Delta[[i]]) & {dim(Delta[[i]]) == c(k,1)})
if (!vl) {stop('Delta must be a list of real matrix of dimension kx1')}
if (!is.matrix(Delta[[i]])) {stop('Delta[[i]] must be a matrix type object')}
if (substr(names(Delta[i]),1,5) != 'delta' | !{
as.numeric(substr(names(Delta[i]),6,6)) %in% c(1:d)}) {
stop('names in the list Delta must be \'deltai\' with a integer i in 1:q')
}
}
if (max(as.numeric(sapply(names(Delta),substr,6,6))) != d) {
stop('d and Delta max order must match')
}
}
} else if (d > 0) {
stop('Delta must be a list of real matrix of dimension kx1')
}
if (!is.null(cs)) {
if (!is.matrix(cs)) {
stop('cs must be a matrix type object')
}else{
vl = all(is.numeric(cs) & {dim(cs) == c(k,1)})
if (!vl) {stop('cs must be a real matrix of dimension kx1')}
}
}
if (!is.matrix(Sigma)) {
stop('Sigma must be a matrix type object')
}else if (is.numeric(Sigma)) {
vl = all(dim(Sigma) == c(k,k))
if (!vl) {stop('Sigma must be a real positive matrix of dimension kxk')}
vl = all(eigen(Sigma)$values >= 0)
if (!vl) {stop('Sigma must be a real positive matrix of dimension kxk')}
}else{
stop('Sigma must be a real positive matrix of dimension kxk')
}
# Create a list of regimes
Ri = vector('list')
if (is.numeric(cs) & !is.matrix(cs)) {Sigma = as.matrix(cs)}
if (!is.null(cs)) {
cs = cs
}else{
cs = rep(0,k)
}
Ri$cs = cs
Ri$phi = vector('list', p)
names(Ri$phi) = paste0('phi',1:p)
Ri$phi[names(Phi)] = Phi
Ri$phi[names(Ri$phi)[!(names(Ri$phi) %in% names(Phi))]] = repM(matrix(0,k,k),
sum(!(names(Ri$phi) %in% names(Phi))))
if (q != 0) {
Ri$beta = vector('list', q)
names(Ri$beta) = paste0('beta',1:q)
Ri$beta[names(Beta)] = Beta
Ri$beta[names(Ri$beta)[!(names(Ri$beta) %in%
names(Beta))]] = repM(matrix(0,k,nu),
sum(!(names(Ri$beta) %in% names(Beta))))
}
if (d != 0) {
Ri$delta = vector('list', d)
names(Ri$delta) = paste0('delta',1:d)
Ri$delta[names(Delta)] = Delta
Ri$delta[names(Ri$delta)[!(names(Ri$delta) %in%
names(Delta))]] = repM(matrix(0,k,1),
sum(!(names(Ri$delta) %in% names(Delta))))
}
Ri$sigma = Sigma
# creation of object type regime
class(Ri) = 'regime'
return(Ri)
}
|
/scratch/gouwar.j/cran-all/cranData/BMTAR/R/mtaregime.R
|
#==================================================================================================#
# Date: 14/04/2020
# Description: Functions for checking prior and other parameters for estimation
# Function:
#==================================================================================================#
mtarinipars = function(tsregime_obj,
list_model = list(pars = list(l = 2, orders = list(pj = c(1,1), qj = c(0,0), dj = c(0,0)),
r = NULL, Sigma = NULL),
orders = NULL,l0_min = NULL,l0_max = NULL),
method = NULL, theta_prior = NULL, sigma_prior = NULL, gamma_prior = NULL,
r_prior = NULL){
if (!inherits(tsregime_obj, 'tsregime')) {
stop('tsregime_obj must be a tsregime object')
}
k = tsregime_obj$k
nu = tsregime_obj$nu
if (is.null(nu)) {nu = 0}
if (!is.list(list_model)) {
stop('list_model must be a list type object with pars, orders, l0_min or l0_max')
message('If pars are unknown use mtarnumreg with l0_max maximum number of regimes','\n')
}else{
if (sum(names(list_model) %in% c('pars','orders','l0_min','l0_max')) == 0) {
stop('list_model must be a list type object with pars, orders, l0_min or l0_max')
message('If pars are unknown use mtarnumreg with l0_max maximum number of regimes','\n')
}else{
if (!is.null(list_model$orders)) {
if (!is.list(list_model$orders)) {stop('list_model$orders must be a list type object with names pj(Not NULL),qj,dj')
}else{
if (is.null(list_model$orders$pj)) {stop('list_model$orders must have orders$pj a positive integer')}
if (is.null(list_model$orders$qj)) {
list_model$orders$qj = list_model$orders$pj*0
}else{
if (any(list_model$orders$qj != 0) & is.null(tsregime_obj$Xt)) {
stop('For qj > 0 covariate process Xt must be in tsregime_obj')
}
}
if (is.null(list_model$orders$dj)) {
list_model$orders$dj = list_model$orders$pj*0
}else{
if (any(list_model$orders$dj != 0) & is.null(tsregime_obj$Zt)) {
stop('For dj > 0 threshold process Zt must be in tsregime_obj')
}
}
}
}
if (is.null(list_model$l0_max)) {
if (!is.null(list_model$pars)) {
if (!is.list(list_model$pars)) {
stop('list_model$pars must be a list type object with l,Sigma,r or orders')
}else{
if (sum(names(list_model$pars) %in% c('l','Sigma','r','orders')) == 0) {
stop('list_model$pars must be a list type object with l,Sigma,r or orders')
}else{
if (!is.null(list_model$pars$orders)) {
if (!is.list(list_model$pars$orders)) {stop('list_model$pars$orders must be a list type object with names pj(Not NULL),qj,dj')
}else{
if (is.null(list_model$pars$orders$pj)) {stop('list_model$pars$orders must have orders$pj a positive integer')}
if (is.null(list_model$pars$orders$qj)) {
list_model$pars$orders$qj = list_model$pars$orders$pj*0
}else{
if (any(list_model$pars$orders$qj != 0) & is.null(tsregime_obj$Xt)) {
stop('For qj > 0 covariate process Xt must be in tsregime_obj')
}
}
if (is.null(list_model$pars$orders$dj)) {
list_model$pars$orders$dj = list_model$pars$orders$pj*0
}else{
if (any(list_model$pars$orders$dj != 0) & is.null(tsregime_obj$Zt)) {
stop('For dj > 0 threshold process Zt must be in tsregime_obj')
}
}
}
}
l = list_model$pars$l
if (round(l) != l & l <= 0) {stop('l must be an integer greater than 0')}
if (l > 4) {stop('l must be less than 4')}
}
}
}
}else{
if (!is.null(list_model$pars) & is.list(list_model$pars)) {
if ('l' %in% names(list_model$pars)) {
stop('If l is known, l0_min and l0_max is not necesary')
}
}
l = list_model$l0_max
l1 = list_model$l0_min
if (!is.null(l1)) {
if (round(l1) != l1 & l1 <= 0) {stop('l0_min must be an integer greater than 0')}
if (round(l) != l & l < l1) {stop('l0_max must be an integer greater than l0_min')}
}else{
if (round(l) != l & l <= 1) {stop('l0_max must be an integer greater than 1')}
}
if (l > 4) {stop('l0_max must be less than 4')}
if (is.null(method)) {stop('For l unknown method must be KUO or SSVS')}
if (!is.null(r_prior)) {
if (!is.list(r_prior)) {
stop('r_prior must be a list type object with names za, zb or val_rmh')
}else{
if (!all(names(r_prior) %in% c('za','zb','val_rmh'))) {
stop('r_prior must be a list type object with names za, zb or val_rmh')
}
if (!is.null(r_prior$za)) {
if (is.numeric(r_prior$za) & length(r_prior$za) == 1) {
if (!{r_prior$za > 0 & r_prior$za < 1}) {
stop('r_prior$za must be between 0 and 1. Note: suggestion not less than 0.2')
}
}else{stop('r_prior$za must be a real number between 0 and 1')}
}else{r_prior$za = NULL}
if (!is.null(r_prior$zb)) {
if (is.numeric(r_prior$zb) & length(r_prior$zb) == 1) {
if (!{r_prior$zb > 0 & r_prior$zb < 1}) {
stop('r_prior$zb must be between 0 and 1. Note: suggestion not less than 0.8')
}
}else{stop('r_prior$zb must be a real number between 0 and 1')}
}else{r_prior$zb = NULL}
if (!is.null(r_prior$val_rmh)) {
if (is.numeric(r_prior$val_rmh) & length(r_prior$val_rmh) == 1 & abs(r_prior$val_rmh) > 0 & abs(r_prior$val_rmh) < 1) {
}else{stop('abs(r_prior$val_rmh) must be a real number between 0 and 1')}
}else{
r_prior$val_rmh = 0.00375
}
}
}else{
r_prior$za = NULL
r_prior$zb = NULL
r_prior$val_rmh = 0.00375
}
message('If pars are unknown use mtarnumreg with l0_max maximum number of regimes','\n')
listf = list(tsregime_obj = tsregime_obj, l0_min = list_model$l0_min,l0_max = list_model$l0_max,method = method,init = list(r = r_prior))
class(listf) = 'regime_inipars'
return(listf)
}
}
}
if (!all(is.null(r_prior$zb),is.null(r_prior$za))) {
if (r_prior$za >= r_prior$zb) {
stop('za must be less than zb')
}
}
# list_model
#orders
if (is.null(list_model$l0_min) & is.null(list_model$l0_max)) {
if (!is.null(list_model$pars$orders)) {
orders = list_model$pars$orders
if (!is.null(method)) {message('For orders known, method is not necesary','\n')}
method = 'ns'
}else{
if (!is.null(list_model$orders)) {orders = list_model$orders}else{
stop('For orders unknown they must be enter maximum orders in list_model$orders')
}
if (is.null(method)) {stop('For orders unknown method must be KUO or SSVS')}
}
}else{
orders = list(pj = NULL,qj = NULL,dj = NULL)
message('For l unknown, orders are not necesary')
}
if (!is.list(orders) | length(orders) != 3) {
stop('orders must be a list with names pj (Not NULL), qj or dj')
}else if (!{all(names(orders) %in% c('pj','qj','dj'))}) {
stop('orders must be a list with names pj (Not NULL), qj or dj')
}
pj = orders$pj
qj = orders$qj
dj = orders$dj
if (!is.null(pj) & !is.null(dj) & !is.null(qj)) {
if (is.vector(pj) & is.vector(qj) & is.vector(dj)) {
if (!{length(pj) == l & length(qj) == l & length(dj) == l}) {
stop('pj qj and dj must have length l')
}else{
for (lj in 1:l) {
if (!{round(pj[lj]) == pj[lj] & pj[lj] >= 0}) {stop('pj must be a positive integer or 0 for each regime')}
if (!{round(qj[lj]) == qj[lj] & qj[lj] >= 0}) {stop('qj must be a positive integer or 0 for each regime')}
if (!{round(dj[lj]) == dj[lj] & dj[lj] >= 0}) {stop('dj must be a positive integer or 0 for each regime')}
if (pj[lj] > 5) {stop('pj must be smaller or 5 for each regime')}
if (qj[lj] > 5) {stop('qj must be smaller or 5 for each regime')}
if (dj[lj] > 5) {stop('dj must be smaller or 5 for each regime')}
}
}
}else{stop('pj qj and dj must be of numeric type')}
}
eta = 1 + pj*k + qj*nu + dj
#
# Validar Sigma
Sigma = list_model$pars$Sigma
if (!is.null(Sigma)) {
if (!is.list(Sigma)) {
stop(paste('Sigma must be a list of length l with names', paste0('R',1:l,collapse = ', '),'of real positive matrix of dimension',k,'x',k))
}else{
if (length(Sigma) != l) {
stop(paste('Sigma must be a list of length l with names', paste0('R',1:l,collapse = ', '),'of real positive matrix of dimension',k,'x',k))
}else{
if (all(names(Sigma) %in% paste0('R',1:l))) {
for (lj in 1:l) {
if (is.numeric(Sigma[[paste0('R',lj)]])) {
if (!is.matrix(Sigma[[paste0('R',lj)]])) {stop(paste0('Sigma$R',lj,' must be a matrix type object'))}
vl = sum(dim(Sigma[[paste0('R',lj)]]) == c(k,k))
if (vl != 2) {stop(paste0('Sigma$R',lj,' must be a real positive matrix of dimension ',k,' x ',k))}
vl = sum(eigen(Sigma[[paste0('R',lj)]])$values >= 0)
if (vl != k) {stop(paste0('Sigma$R',lj,' must be a real positive matrix of dimension ',k,' x ',k))}
}else{stop(paste0('Sigma$R',lj,' must be a real positive matrix of dimension ',k,' x ',k))}
}
}else{stop(paste('Sigma must be a list of length l with names', paste0('R',1:l,collapse = ', '),'of real positive matrix of dimension',k,'x',k))}
}
}
}
# r
r = list_model$pars$r
if (is.null(r)) {
if (l > 1 & is.null(tsregime_obj$Zt)) {stop('Threshold process Zt must be enter')}
if (!is.null(r_prior)) {
if (!is.list(r_prior)) {
stop('r_prior must be a list type object with names za, zb or val_rmh')
}else{
if (sum(names(r_prior) %in% c('za','zb','val_rmh')) == 0) {
stop('r_prior must be a list type object with names za, zb or val_rmh')
}
if (!is.null(r_prior$za)) {
if (is.numeric(r_prior$za) & length(r_prior$za) == 1) {
if (!{r_prior$za > 0 & r_prior$za < 1}) {
stop('r_prior$za must be between 0 and 1. Note: suggestion not less than 0.2')
}
}else{stop('r_prior$za must be a real number between 0 and 1')}
}else{r_prior$za = NULL}
if (!is.null(r_prior$zb)) {
if (is.numeric(r_prior$zb) & length(r_prior$zb) == 1) {
if (!{r_prior$zb > 0 & r_prior$zb < 1}) {
stop('r_prior$zb must be between 0 and 1. Note: suggestion not less than 0.8')
}
}else{stop('r_prior$zb must be a real number between 0 and 1')}
}else{r_prior$zb = NULL}
if (!is.null(r_prior$val_rmh)) {
if (is.numeric(r_prior$val_rmh) & length(r_prior$val_rmh) == 1 & abs(r_prior$val_rmh) > 0 & abs(r_prior$val_rmh) < 1) {
}else{stop('abs(r_prior$val_rmh) must be a real number between 0 and 1')}
}else{
r_prior$val_rmh = 0.00375
}
}
}else{
r_prior$za = NULL
r_prior$zb = NULL
r_prior$val_rmh = 0.00375
}
}else{
if (l == 1) {stop('One regime must not have threshold')
}else{
if (length(r) != {l - 1}) {stop('r known must be of length l-1')}
if (is.null(tsregime_obj$Zt)) {stop('Zt must be enter with threshold value')}
}
}
# validar iniciales
#THETA
if (!is.null(theta_prior)) {
if (length(theta_prior) <= l) {
if (sum(names(theta_prior) %in% paste0('R',1:l)) != 0) {
for (lj in 1:l) {
if (!is.null(theta_prior[[paste0('R',lj)]])) {
if (!is.list(theta_prior[[paste0('R',lj)]])) {
stop(paste0('theta_prior$R',lj,' must be a list type object with names theta0j or cov0j'))
}else{
if (sum(c('theta0j','cov0j') %in% names(theta_prior[[paste0('R',lj)]])) == 0) {
stop(paste0('theta_prior$R',lj,' must be a list type object with names theta0j or cov0j'))
}else{
if (!is.null(theta_prior[[paste0('R',lj)]]$theta0j)) {
if (is.numeric(theta_prior[[paste0('R',lj)]]$theta0j)) {
if (!is.matrix(theta_prior[[paste0('R',lj)]]$theta0j)) {stop(paste0('theta_prior$R',lj,'$theta0j must be a matrix type object'))}
vl = sum(dim(theta_prior[[paste0('R',lj)]]$theta0j) == c(k*eta[lj],1))
if (vl != 2) {stop(paste0('theta_prior$R',lj,'$theta0j must be a matrix of dimension ',(k*eta[lj]),' x 1'))}
}else{stop(paste0('theta_prior$R',lj,'$theta0j must be a real positive matrix of dimension',(k*eta[lj]),'x 1'))}
}else{
theta_prior[[paste0('R',lj)]]$theta0j = rep(0,k*eta[lj])
}
if (!is.null(theta_prior[[paste0('R',lj)]]$cov0j)) {
if (is.numeric(theta_prior[[paste0('R',lj)]]$cov0j)) {
if (!is.matrix(theta_prior[[paste0('R',lj)]]$cov0j)) {stop(paste0('theta_prior$R',lj,'$cov0j must be a matrix type object'))}
vl = sum(dim(theta_prior[[paste0('R',lj)]]$cov0j) == c(k*eta[lj],k*eta[lj]))
if (vl != 2) {stop(paste0('theta_prior$R',lj,'$cov0j must be a matrix of dimension ',(k*eta[lj]),' x ',k*eta[lj]))}
vl = sum(eigen(theta_prior[[paste0('R',lj)]]$cov0j)$values >= 0)
if (vl != (k*eta[lj])) {stop(paste0('theta_prior$R',lj,'$cov0j must be a real positive matrix of dimension ',k*eta[lj],' x' ,k*eta[lj]))}
}
}else{
theta_prior[[paste0('R',lj)]]$cov0j = diag(k*eta[lj])
}
}
if (method == "KUO" & !all(names(theta_prior[[paste0('R',lj)]]) %in% c('theta0j','cov0j'))) {
stop(paste0('theta_prior$R',lj,' must be a list type object with names theta0j or cov0j only'))}
if (method == "SSVS" & !all(names(theta_prior[[paste0('R',lj)]]) %in% c('theta0j','cov0j','Cij','Tauij','R'))) {
stop(paste0('theta_prior$R',lj,' must be a list type object with names theta0j, cov0j, Cij, Tauij or R'))
}else{
Cij = theta_prior[[paste0('R',lj)]]$Cij
Tauij = theta_prior[[paste0('R',lj)]]$Tauij
R = theta_prior[[paste0('R',lj)]]$R
if (!is.null(Cij)) {
if (length(Cij) != k*eta[lj]) {stop(paste0('theta_prior$R',lj,'$Cij must be a vector of length ',k*eta[lj]))}
}else{
theta_prior[[paste0('R',lj)]]$Cij = rep(25,k*eta[lj])
}
if (!is.null(Tauij)) {
if (length(Tauij) != k*eta[lj]) {stop(paste0('theta_prior$R',lj,'$Tauij must be a vector of length ',k*eta[lj]))}
}else{
if (l == 2) {theta_prior[[paste0('R',lj)]]$Tauij = rep(1.25,k*eta[lj])
}else{theta_prior[[paste0('R',lj)]]$Tauij = rep(1.5,k*eta[lj])}
}
if (!is.null(R)) {
if (is.numeric(R)) {
if (sum(dim(R) == c(k*eta[lj],k*eta[lj])) != 2) {stop(paste0('theta_prior$R',lj,'$R must be a matrix of dimension ',k*eta[lj]))}
vl = sum(eigen(R)$values >= 0)
if (vl != k*eta[lj]) {stop(paste0('theta_prior$R',lj,'$R must be a real positive matrix of dimension ',k*eta[lj]))}
}else{stop(paste0('theta_prior$R',lj,'$R must be a list with l real positive matrix of dimension ',k*eta[lj]))}
}else{
theta_prior[[paste0('R',lj)]]$R = diag(k*eta[lj])
}
}
}
}
}
}else{
stop(paste('theta_prior must be a list type object of length l or less with any name',paste0('R',1:l,collapse = ', ')))
}
}else{stop(paste('theta_prior must be a list type object of length l or less with any name',paste0('R',1:l,collapse = ', ')))}
}else{
theta_prior = vector('list')
for (lj in 1:l) {
theta_prior[[paste0('R',lj)]] = vector('list')
theta_prior[[paste0('R',lj)]]$theta0j = rep(0,k*eta[lj])
theta_prior[[paste0('R',lj)]]$cov0j = diag(k*eta[lj])
if (method == 'SSVS') {
theta_prior[[paste0('R',lj)]]$Cij = rep(25,k*eta[lj])
if (l == 2) {theta_prior[[paste0('R',lj)]]$Tauij = rep(1.25,k*eta[lj])
}else{theta_prior[[paste0('R',lj)]]$Tauij = rep(1.5,k*eta[lj])}
theta_prior[[paste0('R',lj)]]$R = diag(k*eta[lj])
}
}
}
#SIGMA
if (!is.null(sigma_prior)) {
if (!is.list(sigma_prior)) {
stop(paste('sigma_prior must be a list type object of length l or less with names',paste0('R',1:l,collapse = ', ')))
}else{
if (length(sigma_prior) <= l) {
if (sum(names(sigma_prior) %in% paste0('R',1:l)) != 0) {
for (lj in 1:l) {
if (!is.null(sigma_prior[[paste0('R',lj)]])) {
if (!is.list(sigma_prior[[paste0('R',lj)]])) {
stop(paste0('sigma_prior$R',lj,' must be a list type object with names S0j or nu0j only'))
}else{
if (sum(names(sigma_prior[[paste0('R',lj)]]) %in% c('S0j','nu0j')) != 0) {
if (!is.null(sigma_prior[[paste0('R',lj)]]$S0j)) {
if (is.numeric(sigma_prior[[paste0('R',lj)]]$S0j)) {
if (!is.matrix(sigma_prior[[paste0('R',lj)]]$S0j)) {
stop(paste0('sigma_prior$R',lj,'$S0j must be a real positive matrix type object of dimension ',k,' x ',k))
}
vl = sum(dim(sigma_prior[[paste0('R',lj)]]$S0j) == c(k,k))
if (vl != 2) {stop(paste0('sigma_prior$R',lj,'$S0j must be a real positive matrix of dimension ',k,' x ',k))}
vl = sum(eigen(sigma_prior[[paste0('R',lj)]]$S0j)$values >= 0)
if (vl != k) {stop(paste0('sigma_prior$R',lj,'$S0j must be a real positive matrix of dimension ',k,' x ',k))}
}else{stop(paste0('sigma_prior$R',lj,'$S0j must be a real positive matrix of dimension ',k,' x ',k))}
}else{
sigma_prior[[paste0('R',lj)]]$S0j = diag(k)
}
if (!is.null(sigma_prior[[paste0('R',lj)]]$nu0j)) {
if (is.numeric(sigma_prior[[paste0('R',lj)]]$nu0j) & length(sigma_prior[[paste0('R',lj)]]$nu0j) == 1) {
if (!{round(sigma_prior[[paste0('R',lj)]]$nu0j) == sigma_prior[[paste0('R',lj)]]$nu0j & sigma_prior[[paste0('R',lj)]]$nu0j >= k}) {
stop(paste0('sigma_prior$R',lj,'$nu0j must be an integer greater or equal ',k))
}
}else{stop(paste0('sigma_prior$R',lj,'$nu0j must be an integer greater or equal ', k))}
}else{
sigma_prior[[paste0('R',lj)]]$nu0j = k
}
}else{stop(paste0('sigma_prior$R',lj,' must be a list type object with names S0j or nu0j'))}
}
}
}
}else{stop(paste('sigma_prior must be a list type object of length l or less with any name',paste0('R',1:l,collapse = ', ')))}
}else{stop(paste('sigma_prior must be a list type object of length l or less with any name',paste0('R',1:l,collapse = ', ')))}
}
}else{
sigma_prior = vector('list')
for (lj in 1:l) {
sigma_prior[[paste0('R',lj)]] = vector('list')
sigma_prior[[paste0('R',lj)]]$S0j = diag(k)
sigma_prior[[paste0('R',lj)]]$nu0j = k
}
}
#GAMMA
if (method %in% c('KUO','SSVS')) {
if (!is.null(gamma_prior)) {
if (!is.list(gamma_prior)) {stop(paste('gamma_prior must be a list type object of length l or less with any name',paste0('R',1:l,collapse = ', ')))
}else{
if (sum(names(gamma_prior) %in% paste0('R',1:l)) != 0) {
if (length(gamma_prior) > l) {stop(paste('gamma_prior must be a list type object of length l or less with any name',paste0('R',1:l,collapse = ', ')))
}else{
for (lj in 1:l) {
if (!is.null(gamma_prior[[paste0('R',lj)]])) {
if (length(gamma_prior[[paste0('R',lj)]]) != k*eta[lj]) {stop(paste0('gamma_prior$R',lj,' must be a vector of length ',k*eta[lj]))}
if (sum(gamma_prior[[paste0('R',lj)]] >= 0 & gamma_prior[[paste0('R',lj)]] <= 1) != k*eta[lj]) {
stop(paste0('gamma_prior$R',lj,' values must be between 0 and 1'))}
}else{
gamma_prior[[paste0('R',lj)]] = rep(0.5,k*eta[lj])
}
}
}
}else{
stop(paste('gamma_prior must be a list type object of length l or less with any name',paste0('R',1:l,collapse = ', ')))
}
}
}else{
gamma_prior = vector('list')
for (lj in 1:l) {
gamma_prior[[paste0('R',lj)]] = rep(0.5,k*eta[lj])
}
}
}
# exits
if (method %in% c('KUO','SSVS')) {
listf = list(tsregime_obj = tsregime_obj, pars = list_model$pars, orders = list_model$orders,method = method,
init = list(r = r_prior, Theta = theta_prior, Sigma = sigma_prior, Gamma = gamma_prior))
}else{
if (!is.null(Sigma) & is.null(r)) {
listf = list(tsregime_obj = tsregime_obj, pars = list_model$pars,init = list(r = r_prior, Theta = theta_prior))
}else if (is.null(Sigma) & !is.null(r)) {
listf = list(tsregime_obj = tsregime_obj, pars = list_model$pars,init = list(Theta = theta_prior, Sigma = sigma_prior))
}else if (is.null(Sigma) & is.null(r)) {
listf = list(tsregime_obj = tsregime_obj, pars = list_model$pars,init = list(r = r_prior, Theta = theta_prior, Sigma = sigma_prior))
}else if (!is.null(Sigma) & !is.null(r)) {
listf = list(tsregime_obj = tsregime_obj, pars = list_model$pars,init = list(Theta = theta_prior))
}
}
class(listf) = 'regime_inipars'
return(listf)
}
|
/scratch/gouwar.j/cran-all/cranData/BMTAR/R/mtarinipars.R
|
#==================================================================================================#
# Date: 14/04/2020
# Description:
# Function:
#==================================================================================================#
mtarmissing = function(ini_obj,niter = 1000, chain = FALSE, level = 0.95, burn = NULL, cU = 0.5, b = NULL) {
#checking
compiler::enableJIT(3)
if (!is.logical(chain)) {stop('chain must be a logical object')}
if (!inherits(ini_obj, 'regime_inipars')) {
stop('ini_obj must be a regime_inipars object')
}
#code
symm = function(x) {
x = (x + t(x)) / 2
return(x)
}
Yt = ini_obj$tsregime_obj$Yt
Ut = cbind(ini_obj$tsregime_obj$Zt,ini_obj$tsregime_obj$Xt)
if (!is.na(sum(Ut))) {
stop('ini_obj$tsregime_obj contains no missing data')
}
k = ini_obj$tsregime_obj$k
N = ini_obj$tsregime_obj$N
nu = ini_obj$tsregime_obj$nu
if (is.null(nu)) {nu = 0}
l = ini_obj$pars$l
r = ini_obj$pars$r
pj = ini_obj$pars$orders$pj
qj = ini_obj$pars$orders$qj
dj = ini_obj$pars$orders$dj
burn = ifelse(is.null(burn),round(0.1*niter),burn)
pmax = max(pj)
qmax = max(qj)
dmax = max(dj)
pmax = ifelse(pmax == 1,2,pmax)
qmax = ifelse(qmax == 0,1,qmax)
dmax = ifelse(dmax == 0,1,dmax)
# first entries
Yt = t(Yt)
Ut = t(Ut)
b = ifelse(is.null(b),1,b)
Zt = Ut[1,]
if (nu == 0) {
Xt = matrix(0,ncol = N,nrow = 1)
qj = rep(0,l)
}else{
Xt = t(ini_obj$tsregime_obj$Xt)
}
etaj = 1 + k*pj + nu*qj + dj
PosNAMat = PosNAvec = PosNAvecT = vector(mode = 'list',2)
PosNAMat[[1]] = apply(Yt,2,is.na)
PosNAvec[[1]] = c(1:ncol(Yt))[apply(PosNAMat[[1]],2,any)]
PosNAvecT[[1]] = matrix(rep(c(1:N),k),nrow = k,ncol = N,byrow = T)[PosNAMat[[1]]]
if (nu == 0) {
PosNAMat[[2]] = t(as.matrix(apply(Ut,2,is.na)))
PosNAvec[[2]] = c(1:ncol(Ut))[PosNAMat[[2]]]
}else{
PosNAMat[[2]] = apply(Ut,2,is.na)
PosNAvec[[2]] = c(1:ncol(Ut))[apply(PosNAMat[[2]],2,any)]
}
PosNAvecT[[2]] = matrix(rep(c(1:N),nu + 1),nrow = nu + 1,ncol = N,byrow = T)[PosNAMat[[2]]]
#Completamos Ut faltantes con promedios Ut
if (length(PosNAvec[[2]]) != 0) {
meanU = apply(Ut,1,mean,na.rm = TRUE)
for (i in 1:nrow(Ut)) {
Ut[i,PosNAMat[[2]][i,]] = meanU[i]
}
}
#Completar datos faltantes con o en Yt y permutar en Yt y Kt(OJO)
initialU = mtarinipars(tsregime(t(Ut)),list_model = list(pars = list(l = 1,orders = list(pj = b,qj = 0,dj = 0))))
message('Estimating model (Zt,Xt) \n')
modelU = mtarns(ini_obj = initialU,niter = 1000,chain = FALSE,burn = 1000)
modelU = modelU$regime$R1
#functions
lists = function(r, Yt, Ut,...){
Zt = Ut[1,]
if (nu == 0) {
Xt = matrix(0,ncol = N,nrow = 1)
}else{
Xt = matrix(Ut[-1,],nrow = nu,ncol = N,byrow = TRUE)
}
rj = matrix(nrow = 2,ncol = l)
if (l == 1) {
rj[,1] = c(-Inf,Inf)
}else{
rj[,1] = c(-Inf,r[1])
rj[,l] = c(rev(r)[1],Inf)
}
if (l > 2) {for (i2 in 2:{l - 1}) {rj[,i2] = c(r[i2 - 1],r[i2])}}
# indimessageor variable for the regime
Ind = vector(mode = 'numeric',length = N)
for (j in 1:l) {
Ind[Zt > rj[1,j] & Zt <= rj[2,j]] = j
}
Nrg = vector(mode = 'numeric')
listaWj = listaYj = vector('list', l)
Inj_W = function(ti,Yt,Zt,Xt,p,q,d){
yti = vector(mode = "numeric")
for (w in 1:p) {yti = c(yti,Yt[,ti - w])}
xti = vector(mode = "numeric")
for (w in 1:q) {xti = c(xti,Xt[,ti - w])}
zti = vector(mode = "numeric")
for (w in 1:d) {zti = c(zti,Zt[ti - w])}
if (q == 0 & d != 0) {
wtj = c(1,yti,zti)
}else if (d == 0 & q != 0) {
wtj = c(1,yti,xti)
}else if (d == 0 & q == 0) {
wtj = c(1,yti)
}else{
wtj = c(1,yti,xti,zti)}
return(wtj)
}
Inj_W = Vectorize(Inj_W,vectorize.args = "ti")
for (lj in 1:l) {
p = pj[lj]
q = qj[lj]
d = dj[lj]
maxj = max(p,q,d)
Inj = which(Ind == lj)
Inj = Inj[Inj > maxj]
Nrg[lj] = length(Inj)
Yj = matrix(Yt[,Inj],nrow = k,ncol = Nrg[lj])
# matrix Wj =(1,lagY,lagX,lagZ)
if (identical(Inj,integer(0))) {
Wj = matrix(nrow = etaj[lj],ncol = 0)
}else{
Wj = sapply(Inj,Inj_W,Yt = Yt,Zt = Zt,Xt = Xt,p = p,q = q,d = d)
}
listaWj[[lj]] = Wj
listaYj[[lj]] = Yj
}
return(list(Nrg = Nrg,listaW = listaWj,listaY = listaYj,Ind = Ind))
}
lists = compiler::cmpfun(lists)
ker = function(t, Ut, ...){
cs = modelU$cs
At = as.matrix(as.data.frame(modelU$phi))
Sig = as.matrix(modelU$sigma)
EU = solve(diag(nu + 1) - At) %*% cs
vecVU = solve(diag(2*(nu + 1)) - At %x% At) %*% c(Sig %*% Sig)
VU = ks::invvec(vecVU,ncol = nu + 1, nrow = nu + 1)
val = dmnormB(Ut[,t], EU, VU)
return(c(val))
}
transker = function(t, Ut, ...){
p = length(modelU$phi)
## create matrix
cs = modelU$cs
At = as.matrix(as.data.frame(modelU$phi))
Sig = as.matrix(modelU$sigma)
## make lags and calculate
uti = c()
for (w in 1:p) {uti = c(uti,Ut[,t - w])}
val = dmnormB(Ut[,t], cs + At %*% uti, Sig %*% Sig)
return(c(val))
}
kernU = compiler::cmpfun(Vectorize(ker,vectorize.args = 't'))
transkernU = compiler::cmpfun(Vectorize(transker,vectorize.args = 't'))
state_space = function(reg, iSS, theta, sigma, ...) {
p = pj[reg]
q = qj[reg]
d = dj[reg]
Aj = ks::invvec(theta[[reg]][,iSS],nrow = k, ncol = etaj[reg])
Ajf = Aj[,-1]
if (p >= 1) {phis = Ajf[,1:(k*p)]}else{phis = NULL}
if (q > 0) {
betas = Ajf[,(k*p + 1):(k*p + nu*q)]
}else{betas = NULL}
if (d > 0) {
deltas = Ajf[,(k*p + nu*q + 1):(k*p + nu*q + d)]
}else{deltas = NULL}
Aj = cbind(Aj[,1],phis, matrix(0,nrow = k, ncol = k*(pmax - p)),
betas,matrix(0,nrow = k, ncol = nu*(qmax - q)),
deltas,matrix(0,nrow = k,ncol = dmax - d))
R_zt = t(cbind(expm::sqrtm(sigma[[reg]][[iSS]]),matrix(0,k,(pmax - 1)*k + nu*qmax + dmax)))
L_zt = c(Aj[,1],rep(0,(pmax - 1)*k + nu*qmax + dmax))
hphi = cbind(diag(k*(pmax - 1)),matrix(0,nrow = k*(pmax - 1), ncol = k + qmax*nu + dmax))
hbeta = cbind(matrix(0,nrow = nu*(qmax - 1), ncol = k*pmax),diag(nu*(qmax - 1)),matrix(0,nrow = nu*(qmax - 1),ncol = nu + dmax))
hdelta = cbind(matrix(0,nrow = dmax - 1, ncol = k*pmax + qmax*nu),diag(dmax - 1),matrix(0,ncol = 1,nrow = dmax - 1))
H_zt = rbind(Aj[,-1],
hphi,
matrix(0,nrow = nu, ncol = ncol(Aj) - 1),
hbeta,
matrix(0,nrow = 1, ncol = ncol(Aj) - 1),
hdelta)
K_zt = cbind(diag(k),matrix(0,k,(pmax - 1)*k + nu*qmax + dmax))
M_zt = rbind(matrix(0,nrow = k*pmax, ncol = nu + 1),
cbind(matrix(0,nrow = nu, ncol = 1),diag(nu)),
matrix(0,nrow = nu*(qmax - 1), ncol = nu + 1),
c(1,rep(0,nu)),
matrix(0,nrow = (dmax - 1), ncol = nu + 1))
rownames(M_zt) = rownames(K_zt) = rownames(L_zt) = rownames(H_zt) = rownames(R_zt) = NULL
colnames(M_zt) = colnames(K_zt) = colnames(L_zt) = colnames(H_zt) = colnames(R_zt) = NULL
return(list(K = K_zt, L = L_zt, H = H_zt, M = M_zt, R = R_zt))
}
state_space = compiler::cmpfun(state_space)
alphacond = function(t, iA, Ut, Yt, theta, sigma, ...) {
Zt = Ut[1,]
if (nu == 0) {
Xt = matrix(0,ncol = N,nrow = 1)
}else{
Xt = matrix(Ut[-1,],nrow = nu,ncol = N,byrow = TRUE)
}
rj = matrix(nrow = 2,ncol = l)
if (l == 1) {
rj[,1] = c(-Inf,Inf)
}else{
rj[,1] = c(-Inf,r[1])
rj[,l] = c(rev(r)[1],Inf)
}
if (l > 2) {for (i2 in 2:{l - 1}) {rj[,i2] = c(r[i2 - 1],r[i2])}}
# indimessageor variable for the regime
Ind = vector(mode = 'numeric',length = N)
for (j in 1:l) {
Ind[Zt > rj[1,j] & Zt <= rj[2,j]] = j
}
lj = Ind[t]
p = pj[lj]
q = qj[lj]
d = dj[lj]
Wj = matrix(0,nrow = etaj[lj],ncol = 1)
yti = c()
for (w in 1:p) {yti = c(yti,Yt[,t - w])}
xti = c()
for (w in 1:q) {xti = c(xti,Xt[,t - w])}
zti = c()
for (w in 1:d) {zti = c(zti,Zt[t - w])}
if (q == 0 & d != 0) {
wtj = c(1,yti,zti)
}else if (d == 0 & q != 0) {
wtj = c(1,yti,xti)
}else if (d == 0 & q == 0) {
wtj = c(1,yti)
}else{
wtj = c(1,yti,xti,zti)}
Wj[,1] = wtj
Hj = ks::invvec(theta[[lj]][,iA],nrow = k,ncol = etaj[lj])
val = dmnormB(Yt[,t], {Hj %*% Wj}, sigma[[lj]][[iA]])
return(val)
}
alphacond = compiler::cmpfun(Vectorize(alphacond,vectorize.args = 't'))
#objects for each regimen and iterations
theta_iter = sigma_iter = vector('list', l)
itheta0j = isigma0j = vector('list', l)
iS0j = inu0j = vector('list', l)
Yt_iter = matrix(ncol = niter + burn,nrow = sum(ks::vec(PosNAMat[[1]])))
Ut_iter = matrix(ncol = niter + burn,nrow = sum(ks::vec(PosNAMat[[2]])))
Ytr = Yt #Yt que vamos a cambiar en el proceso
Utr = Ut #Ut que vamos a cambiar en el proceso
#Ytr[PosNAMat[[1]]] = 0
Yt_iter[,1] = Ytr[PosNAMat[[1]]]
Ut_iter[,1] = Utr[PosNAMat[[2]]]
#set initial values for each regime in each chain
#creacion de cadenas para sigma y theta
for (lj in 1:l) {
theta_iter[[lj]] = matrix(ncol = niter + burn,nrow = k*etaj[lj])
itheta0j[[lj]] = ini_obj$init$Theta[[lj]]$theta0j
isigma0j[[lj]] = ini_obj$init$Theta[[lj]]$cov0j
theta_iter[[lj]][,1] = mvtnorm::rmvnorm(1,mean = itheta0j[[lj]],sigma = isigma0j[[lj]])
sigma_iter[[lj]] = vector('list',niter + burn)
iS0j[[lj]] = ini_obj$init$Sigma[[lj]]$S0j
inu0j[[lj]] = ini_obj$init$Sigma[[lj]]$nu0j
sigma_iter[[lj]][[1]] = MCMCpack::riwish(v = inu0j[[lj]],S = iS0j[[lj]])
}
#state-space model
K_zti = K_zt = vector('list')
R_zt = vector('list')
L_zt = vector('list')
H_zt = vector('list')
M_zt = vector('list')
#Primera permutaciones
for (lj in 1:l) {
listmatrix = state_space(lj, 1, theta_iter,sigma_iter)
R_zt[[lj]] = listmatrix$R
L_zt[[lj]] = listmatrix$L
H_zt[[lj]] = listmatrix$H
K_zt[[lj]] = listmatrix$K
M_zt[[lj]] = listmatrix$M
}
listj = lists(r, Ytr, Ut)
for (ij in 1:N) {
K_zti[[ij]] = K_zt[[listj$Ind[ij]]]
}
#permutaciones para cadena Yt:
for (ij in PosNAvec[[1]]) {
posNAi = PosNAMat[[1]][,ij]
if (!all(posNAi)) {
K_zti[[ij]] = K_zti[[ij]][order(posNAi),]
Ytr[,ij] = Ytr[,ij][order(posNAi)]
}
K_zti[[ij]][is.na(Ytr[,ij]),] = K_zti[[ij]][is.na(Ytr[,ij]),]*0
Ytr[,ij][is.na(Ytr[,ij])] = 0
}
sersalY = Ytr
# Sampling
message('Estimating missing data ...\n')
pb = utils::txtProgressBar(min = 2, max = niter + burn, style = 3)
for (i in 2:{niter + burn}) {
#State space model
PtC = AlphatC = vector('list',N)
QtC = ytC = vector('list',N)
Pt = Alphat = vector('list',N + 1)
Alphat[[1]] = matrix(0,nrow = k*pmax + nu*qmax + dmax,ncol = 1)
Pt[[1]] = 10*diag(k*pmax + nu*qmax + dmax)
# iteraciones
Indi = listj$Ind
for (i1 in 1:{N}) {
#Prediction Equations mt|t-1
AlphatC[[i1]] = H_zt[[Indi[i1]]] %*% Alphat[[i1]] + L_zt[[Indi[i1]]] + M_zt[[Indi[i1]]] %*% Ut[,i1]
R2 = R_zt[[Indi[i1]]] %*% diag(k) %*% t(R_zt[[Indi[i1]]])
PtC[[i1]] = H_zt[[Indi[i1]]] %*% Pt[[i1]] %*% t(H_zt[[Indi[i1]]]) + R2
ytC[[i1]] = K_zti[[i1]] %*% AlphatC[[i1]]
QtC[[i1]] = K_zti[[i1]] %*% PtC[[i1]] %*% t(K_zti[[i1]])
#Updating Equations mt
St = PtC[[i1]] %*% t(K_zti[[i1]]) %*% MASS::ginv(QtC[[i1]])
Alphat[[i1 + 1]] = AlphatC[[i1]] + St %*% {sersalY[,i1] - ytC[[i1]]}
Pt[[i1 + 1]] = PtC[[i1]] - St %*% K_zti[[i1]] %*% PtC[[i1]]
}
#sampling for state vector (pg37)
PT = AlphaT = vector('list',N + 1)
AlphaT[[N + 1]] = Alphat[[N + 1]]
PT[[N + 1]] = Pt[[N + 1]]
for (i1 in rev(1:{N})) {
Eig = eigen(Pt[[i1 + 1]])$values
Eig = any(Mod(Eig) > exp(-6))
if (Eig) {
estUp = MASS::mvrnorm(1,AlphaT[[i1 + 1]],PT[[i1 + 1]])
}else{
estUp = AlphaT[[i1 + 1]]
}
R2 = R_zt[[Indi[i1]]][1:k,] %*% diag(k) %*% t(R_zt[[Indi[i1]]][1:k,])
Qt = MASS::ginv(H_zt[[Indi[i1]]][1:k,] %*% Pt[[i1]] %*% t(H_zt[[Indi[i1]]][1:k,]) + R2)
Bt = Pt[[i1]] %*% t(H_zt[[Indi[i1]]][1:k,]) %*% Qt
if (nu == 0) {
Gt = estUp[1:k] - M_zt[[Indi[i1]]][1:k,]*Ut[,i1] - L_zt[[Indi[i1]]][1:k] - H_zt[[Indi[i1]]][1:k,] %*% Alphat[[i1]]
}else{
Gt = estUp[1:k] - M_zt[[Indi[i1]]][1:k,] %*% Ut[,i1] - L_zt[[Indi[i1]]][1:k] - H_zt[[Indi[i1]]][1:k,] %*% Alphat[[i1]]
}
AlphaT[[i1]] = Alphat[[i1]] + Bt %*% Gt
PT[[i1]] = Pt[[i1]] - Bt %*% H_zt[[Indi[i1]]][1:k,] %*% Pt[[i1]]
PT[[i1]] = symm(PT[[i1]])
}
# Simulaion de datos faltantes en Yt
for (i1 in PosNAvec[[1]]) {
Ysim = as.matrix(MASS::mvrnorm(1,AlphaT[[i1 + 1]],PT[[i1 + 1]]))
Yt_iter[,i][PosNAvecT[[1]] == i1] = Ysim[1:k][PosNAMat[[1]][,i1]]
Ytr[,i1] = Ysim[1:k]
AlphaT[[i1 + 1]] = Ysim
}
#random walk U
for (i1 in PosNAvec[[2]]) {
ek = mvtnorm::rmvnorm(1,mean = rep(0,nu + 1), sigma = cU*diag(nu + 1))
# Simulacion de la propues
Usim = Utr
Usim[,i1] = Utr[,i1] + c(ek)
Usim[!PosNAMat[[2]][,i1],i1] = Utr[!PosNAMat[[2]][,i1],i1]
# Calculo de las probabilidades segun sea el caso
# Numerador
if (i1 <= b) {
prod1N = Reduce('*',kernU(1:b,Usim))
prod2N = Reduce('*',alphacond(1:b,i - 1,Usim,Ytr,theta_iter,sigma_iter))
prod3N = Reduce('*',transkernU({b + 1}:{2*b},Usim))
prod1D = Reduce('*',kernU(1:b,Utr))
prod2D = Reduce('*',alphacond(1:b,i - 1,Utr,Ytr,theta_iter,sigma_iter))
prod3D = Reduce('*',transkernU({b + 1}:{2*b},Utr))
val = (prod1N*prod2N*prod3N)/(prod1D*prod2D*prod3D)
}else{
prod1N = alphacond(i1,i - 1,Usim,Ytr,theta_iter,sigma_iter)[[1]]
prod2N = Reduce('*',transkernU(i1:{i1 + b},Usim))
prod1D = alphacond(i1,i - 1,Utr,Ytr,theta_iter,sigma_iter)[[1]]
prod2D = Reduce('*',transkernU(i1:{i1 + b},Utr))
val = (prod1N*prod2N)/(prod1D*prod2D)
}
if (val >= stats::runif(1)) {
Utr = Usim
Ut_iter[,i][PosNAvecT[[2]] == i1] = Usim[PosNAMat[[2]][,i1],i1]
}else{
Utr = Utr
Ut_iter[,i][PosNAvecT[[2]] == i1] = Utr[,i1][PosNAMat[[2]][,i1]]
}
}
listj = lists(r, Ytr, Utr)
for (lj in 1:l) {
Wj = listj$listaW[[lj]]
Yj = listj$listaY[[lj]]
Nj = listj$Nrg[lj]
yj = c(Yj)
theta0j = itheta0j[[lj]]
sigma0j = isigma0j[[lj]]
S0j = iS0j[[lj]]
nu0j = inu0j[[lj]]
Vj = solve(Wj %*% t(Wj) %x% solve(sigma_iter[[lj]][[i - 1]]) + solve(sigma0j))
thetaj = Vj %*% {(Wj %x% solve(sigma_iter[[lj]][[i - 1]])) %*% yj + solve(sigma0j) %*% theta0j}
theta_iter[[lj]][,i] = mvtnorm::rmvnorm(1,mean = thetaj,sigma = Vj)
Hj = ks::invvec(theta_iter[[lj]][,i],nrow = k,ncol = etaj[lj])
Sj = (Yj - Hj %*% Wj) %*% t(Yj - Hj %*% Wj)
sigma_iter[[lj]][[i]] = MCMCpack::riwish(v = Nj + nu0j,S = Sj + S0j)
}
#Actualizacion de las matrices espacio y estado
for (lj in 1:l) {
listmatrix = state_space(lj, i, theta_iter,sigma_iter)
R_zt[[lj]] = listmatrix$R
L_zt[[lj]] = listmatrix$L
H_zt[[lj]] = listmatrix$H
K_zt[[lj]] = listmatrix$K
M_zt[[lj]] = listmatrix$M
}
utils::setTxtProgressBar(pb,i)
}
close(pb)
message('Saving results ... \n')
# exits
# names
Names_Yt = paste0("(",1:N,",",1,")")
if (k > 1) {
for (i in 2:k) {
Names_Yt = rbind(Names_Yt,paste0("(",1:N,",",k,")"))
}
}
Names_Zt = paste0("(",1:N,",",1,")")
if (nu != 0) {
Names_Xt = paste0("(",1:N,",",1,")")
if (nu > 1) {
for (i in 2:nu) {
Names_Xt = rbind(Names_Xt,paste0("(",1:N,",",k,")"))
}
}
}else{Names_Xt = NULL}
Names_Ut = rbind(Names_Zt,Names_Xt)
# Table of estimations
Yt_chains = Yt_iter[,-c(1:burn)]
if (nu == 0) {Ut_chains = as.matrix(Ut_iter[,-c(1:burn)])
}else{Ut_chains = Ut_iter[,-c(1:burn)]}
Test_Yt = matrix(nrow = nrow(Yt_iter),ncol = 3)
Test_Ut = matrix(nrow = nrow(Ut_iter),ncol = 3)
colnames(Test_Yt) = colnames(Test_Ut) = c(paste('lower limit ',(1 - level)/2*100,'%',sep = ''),'mean',paste('upper limit ',(1 + level)/2*100,'%',sep = ''))
Test_Yt[,1] = apply(Yt_chains,1,stats::quantile,probs = (1 - level)/2)
Test_Yt[,3] = apply(Yt_chains,1,stats::quantile,probs = (1 + level)/2)
est_Yt = apply(Yt_chains,1,mean)
Test_Yt[,2] = est_Yt
rownames(Test_Yt) = Names_Yt[PosNAMat[[1]]]
Test_Ut[,1] = apply(Ut_chains,1,stats::quantile,probs = (1 - level)/2)
Test_Ut[,3] = apply(Ut_chains,1,stats::quantile,probs = (1 + level)/2)
est_Ut = apply(Ut_chains,1,mean)
Test_Ut[,2] = est_Ut
rownames(Test_Ut) = Names_Ut[PosNAMat[[2]]]
if (nu == 0) {
Test_Zt = matrix(nrow = length(Names_Zt[PosNAMat[[2]]]),ncol = 3)
}else{
Test_Zt = matrix(nrow = length(Names_Zt[PosNAMat[[2]][1,]]),ncol = 3)
}
colnames(Test_Zt) = c(paste('lower limit ',(1 - level)/2*100,'%',sep = ''),'mean',paste('upper limit ',(1 + level)/2*100,'%',sep = ''))
if (nu == 0) {
tab_name_Zt = Names_Zt[PosNAMat[[2]]]
}else{
tab_name_Zt = Names_Zt[PosNAMat[[2]][1,]]
}
Test_Zt[,1] = Test_Ut[tab_name_Zt,1]
Test_Zt[,2] = Test_Ut[tab_name_Zt,2]
Test_Zt[,3] = Test_Ut[tab_name_Zt,3]
rownames(Test_Zt) = tab_name_Zt
if (nu != 0) {
Test_Xt = matrix(nrow = length(Names_Xt[PosNAMat[[2]][-1,]]),ncol = 3)
colnames(Test_Xt) = c(paste('lower limit ',(1 - level)/2*100,'%',sep = ''),'mean',paste('upper limit ',(1 + level)/2*100,'%',sep = ''))
tab_name_Xt = Names_Xt[PosNAMat[[2]][-1,]]
Test_Xt[,1] = Test_Ut[tab_name_Xt,1]
Test_Xt[,2] = Test_Ut[tab_name_Xt,1]
Test_Xt[,3] = Test_Ut[tab_name_Xt,1]
rownames(Test_Xt) = tab_name_Xt
}
ini_obj$tsregime_obj$Yt[PosNAvec[[1]],] = matrix(Test_Yt[,2],ncol = k,byrow = T)
ini_obj$tsregime_obj$Zt[PosNAvec[[2]],] = matrix(Test_Ut[,2],ncol = nu + 1,byrow = T)[,1]
ini_obj$tsregime_obj$Xt[PosNAvec[[2]],] = matrix(Test_Ut[,2],ncol = nu + 1,byrow = T)[,-1]
if (chain) {Chains = vector('list')}
if (any(is.na(Yt)) & any(is.na(Zt)) & any(is.na(Xt))) {
estimates = list(Yt = Test_Yt, Zt = Test_Zt, Xt = Test_Xt)
if (chain) {
Chains$Yt = Yt_chains
Chains$Zt = Ut_chains[1:sum(PosNAMat[[2]][1,]),]
Chains$Xt = Ut_chains[-c(1:sum(PosNAMat[[2]][1,])),]
}
}else if (any(is.na(Yt)) & any(is.na(Zt)) & !any(is.na(Xt))) {
estimates = list(Yt = Test_Yt, Zt = Test_Zt)
if (chain) {
Chains$Yt = Yt_chains
Chains$Zt = Ut_chains
}
}else if (any(is.na(Yt)) & !any(is.na(Zt)) & any(is.na(Xt))) {
estimates = list(Yt = Test_Yt, Xt = Test_Xt)
if (chain) {
Chains$Yt = Yt_chains
Chains$Xt = Ut_chains
}
}else if (!any(is.na(Yt)) & any(is.na(Zt)) & !any(is.na(Xt))) {
estimates = list(Zt = Test_Zt, Xt = Test_Xt)
if (chain) {
Chains$Zt = Ut_chains[1:sum(PosNAMat[[2]][1,]),]
Chains$Xt = Ut_chains[-c(1:sum(PosNAMat[[2]][1,])),]
}
}else if (any(is.na(Yt)) & !any(is.na(Zt)) & !any(is.na(Xt))) {
estimates = list(Yt = Test_Yt)
if (chain) {
Chains$Yt = Yt_chains
}
}else if (!any(is.na(Yt)) & any(is.na(Zt)) & !any(is.na(Xt))) {
estimates = list(Zt = Test_Zt)
if (chain) {
Chains$Zt = Ut_chains
}
}else if (!any(is.na(Yt)) & !any(is.na(Zt)) & any(is.na(Xt))) {
estimates = list(Xt = Test_Xt)
if (chain) {
Chains$Xt = Ut_chains
}
}
compiler::enableJIT(0)
if (chain) {
result = list(tsregime = ini_obj$tsregime_obj, estimates = estimates, Chains = Chains)
}else{
result = list(tsregime = ini_obj$tsregime_obj, estimates = estimates)
}
class(result) = 'regime_missing'
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/BMTAR/R/mtarmissing.R
|
#==================================================================================================#
# Date: 07/04/2019
# Description:
#-> When r_init is NULL default is the proportion that separate the observations in l equal parts
#-> When r is unknown, a metropolis-hasting algorithm is used for its posteriori with an uniform proposal
# Function:
#==================================================================================================#
mtarns = function(ini_obj, level = 0.95, burn = NULL, niter = 1000, chain = FALSE, r_init = NULL){
if (!is.logical(chain)) {stop('chain must be a logical object')}
# checking
if (!inherits(ini_obj, 'regime_inipars')) {
stop('ini_obj must be a regime_inipars object')
}
# data
Yt = ini_obj$tsregime_obj$Yt
Ut = cbind(ini_obj$tsregime_obj$Zt,ini_obj$tsregime_obj$Xt)
k = ini_obj$tsregime_obj$k
N = ini_obj$tsregime_obj$N
nu = ini_obj$tsregime_obj$nu
if (is.null(nu)) {nu = 0}
# parameters
r = ini_obj$pars$r
l = ini_obj$pars$l
if (is.null(ini_obj$pars$orders)) {
stop('orders must be known and enter in list_model$pars$orders to use mtarns')
}
orders = ini_obj$pars$orders
# code
burn = ifelse(is.null(burn),round(0.3*niter),burn)
other = 100
pj = orders$pj
qj = orders$qj
dj = orders$dj
Yt = t(Yt)
if (l == 1) {
if (is.null(Ut)) {
Ut = matrix(0, ncol = N,nrow = 1)
}else{
Ut = rbind(matrix(0, ncol = N,nrow = 1),t(Ut)) # only for co-variable
}
}else{Ut = t(Ut)}
Zt = Ut[1,]
if (nu == 0) {
Xt = matrix(0,ncol = N,nrow = 1)
qj = rep(0,l)
}else{
Xt = t(ini_obj$tsregime_obj$Xt)
}
eta = 1 + pj*k + qj*nu + dj
# functions and values for r
dmunif = function(r,a,b){
names(a) = names(b) = NULL
volume = ((b - a)^{l - 1})/(factorial(l - 1))
for (i in 1:{l - 1}) {
if (r[i] >= a & r[i] <= b) {
if (l <= 2) {prob = 1}
if (l > 2) {
prob = 1
for (j in 1:{l - 2}) {
if (r[j] < r[j + 1]) {prob = prob*1}else{prob = prob*0}
}
}
}else{prob = 0}
}
rj = matrix(nrow = 2,ncol = l)
rj[,1] = c(-Inf,r[1])
rj[,l] = c(rev(r)[1],Inf)
if (l > 2) {
for (i2 in 2:{l - 1}) {rj[,i2] = c(r[i2 - 1],r[i2])}
}
Ind = c()
for (j in 1:l) {
for (w in 1:N) {
if (Zt[w] > rj[1,j] & Zt[w] <= rj[2,j]) {
Ind[w] = j
}
}
}
Nrg = c()
for (lj in 1:l) {
Nrg[lj] = length(Ind[Ind == lj])
}
if (sum(Nrg/sum(Nrg) > 0.2) == l) {prob = 1*prob}else{prob = 0*prob}
return(prob/volume)
}
# initials values for r
rini = ini_obj$init$r
if (is.null(r)) {
a = ifelse(is.null(rini$za),min(Zt),stats::quantile(Zt,probs = rini$za))
b = ifelse(is.null(rini$zb),max(Zt),stats::quantile(Zt,probs = rini$zb))
}
lists = function(r,...){
rj = matrix(nrow = 2,ncol = l)
if (l == 1) {
rj[,1] = c(-Inf,Inf)
}else{
rj[,1] = c(-Inf,r[1])
rj[,l] = c(rev(r)[1],Inf)
}
if (l > 2) {for (i2 in 2:{l - 1}) {rj[,i2] = c(r[i2 - 1],r[i2])}}
# indicator variable for the regime
Ind = vector(mode = 'numeric',length = N)
for (j in 1:l) {
Ind[Zt > rj[1,j] & Zt <= rj[2,j]] = j
}
Nrg = vector(mode = 'numeric')
listaWj = listaYj = vector('list', l)
Inj_W = function(ti,Yt,Zt,Xt,p,q,d){
yti = vector(mode = "numeric")
for (w in 1:p) {yti = c(yti,Yt[,ti - w])}
xti = vector(mode = "numeric")
for (w in 1:q) {xti = c(xti,Xt[,ti - w])}
zti = vector(mode = "numeric")
for (w in 1:d) {zti = c(zti,Zt[ti - w])}
if (q == 0 & d != 0) {
wtj = c(1,yti,zti)
}else if (d == 0 & q != 0) {
wtj = c(1,yti,xti)
}else if (d == 0 & q == 0) {
wtj = c(1,yti)
}else{
wtj = c(1,yti,xti,zti)}
return(wtj)
}
Inj_W = Vectorize(Inj_W,vectorize.args = "ti")
for (lj in 1:l) {
p = pj[lj]
q = qj[lj]
d = dj[lj]
maxj = max(p,q,d)
Inj = which(Ind == lj)
Inj = Inj[Inj > maxj]
Nrg[lj] = length(Inj)
Yj = matrix(Yt[,Inj],nrow = k,ncol = Nrg[lj])
# matrix Wj =(1,lagY,lagX,lagZ)
if (identical(Inj,integer(0))) {
Wj = matrix(nrow = eta[lj],ncol = 0)
}else{
Wj = sapply(Inj,Inj_W,Yt = Yt,Zt = Zt,Xt = Xt,p = p,q = q,d = d)
}
listaWj[[lj]] = Wj
listaYj[[lj]] = Yj
}
return(list(Nrg = Nrg,listaW = listaWj,listaY = listaYj,Ind = Ind))
}
fycond = function(i2,listr,...){
acum = 0
Nrg = listr$Nrg
for (lj in 1:l) {
yj = c(listr$listaY[[lj]])
Wj = listr$listaW[[lj]]
if (is.null(Sigma)) {
acum = acum + t(yj - {t(Wj) %x% diag(k)} %*% theta_iter[[lj]][,i2]) %*% {
diag(Nrg[lj]) %x% solve(sigma_iter[[lj]][[i2]])} %*% (yj - {t(Wj) %x% diag(k)} %*% theta_iter[[lj]][,i2])
}else{
acum = acum + t(yj - {t(Wj) %x% diag(k)} %*% theta_iter[[lj]][,i2]) %*% {
diag(Nrg[lj]) %x% solve(sigma[[lj]])} %*% (yj - {t(Wj) %x% diag(k)} %*% theta_iter[[lj]][,i2])
}
}
if (is.null(Sigma)) {
sigmareg = lapply(sigma_iter,function(x){x[[i2]]})
val = prodB(Brobdingnag::as.brob(sapply(sigmareg,function(x){
return(c(determinant(x,logarithm = FALSE)$modulus))}))^{-Nrg/2})*exp(-1/2*Brobdingnag::as.brob(acum))
}else{
val = prodB(Brobdingnag::as.brob(sapply(sigma,function(x){
return(c(determinant(x,logarithm = FALSE)$modulus))}))^{-Nrg/2})*exp(-1/2*Brobdingnag::as.brob(acum))
}
return(val)
}
# objects for each regimen and iterations
Sigma = ini_obj$pars$Sigma
theta_iter = itheta0j = icov0j = vector('list')
if (is.null(Sigma)) {
sigma_iter = iS0j = inu0j = vector('list')
}else{
sigma = vector('list')
}
# set initial values for each regime in each chain
thetaini = ini_obj$init$Theta
for (lj in 1:l) {
theta_iter[[lj]] = matrix(ncol = niter + burn + other,nrow = k*eta[lj])
itheta0j[[lj]] = thetaini[[paste0('R',lj)]]$theta0j
icov0j[[lj]] = thetaini[[paste0('R',lj)]]$cov0j
theta_iter[[lj]][,1] = mvtnorm::rmvnorm(1,mean = itheta0j[[lj]],sigma = icov0j[[lj]])
if (is.null(Sigma)) {
sigma_iter[[lj]] = vector('list',length = niter + burn + other)
sigmaini = ini_obj$init$Sigma
iS0j[[lj]] = sigmaini[[paste0('R',lj)]]$S0j
inu0j[[lj]] = sigmaini[[paste0('R',lj)]]$nu0j
sigma_iter[[lj]][[1]] = MCMCpack::riwish(v = inu0j[[lj]],S = iS0j[[lj]])
}else{
sigma[[lj]] = Sigma[[paste0('R',lj)]]
}
}
# last check
if (is.null(r) & l == 1) {r = 0
}else if (is.null(r) & l != 1) {
r_iter = matrix(ncol = niter + burn + other,nrow = l - 1)
if (!is.null(r_init)) {
if (is.numeric(r_init) & length(r_init) == {l - 1}) {
if (dmunif(r_init,a,b) == 0) {
stop('r_init must be in Zt range and for l >= 2, r[i] < r[i+1]')
}
}else{stop('r_init must be vector of length l - 1')}
}
if (l != 1) {
if (is.null(r_init)) {
r_iter[,1] = c(stats::quantile(Zt, probs = 1/l*(1:{l - 1})))
}else{
r_iter[,1] = r_init
}
}
}
# iterations gibbs and metropolis for r unknown
if (is.null(r)) {
message('Estimating non-structural parameters and threshold(s) ...','\n')
pb = utils::txtProgressBar(min = 2, max = niter + burn + other, style = 3)
acep = 0
for (i in 2:{niter + burn + other}) {
listj = lists(r_iter[,i - 1])
for (lj in 1:l) {
Wj = listj$listaW[[lj]]
Yj = listj$listaY[[lj]]
Nj = listj$Nrg[lj]
yj = c(Yj)
theta0j = itheta0j[[lj]]
sigma0j = icov0j[[lj]]
if (!is.null(Sigma)) {
Vj = solve(Wj %*% t(Wj) %x% solve(sigma[[lj]] %*% sigma[[lj]]) + solve(sigma0j))
thetaj = Vj %*% {(Wj %x% solve(sigma[[lj]] %*% sigma[[lj]])) %*% yj + solve(sigma0j) %*% theta0j}
theta_iter[[lj]][,i] = mvtnorm::rmvnorm(1,mean = thetaj,sigma = Vj)
}else{
S0j = iS0j[[lj]]
nu0j = inu0j[[lj]]
Vj = solve(Wj %*% t(Wj) %x% solve(sigma_iter[[lj]][[i - 1]]) + solve(sigma0j))
thetaj = Vj %*% {(Wj %x% solve(sigma_iter[[lj]][[i - 1]])) %*% yj + solve(sigma0j) %*% theta0j}
theta_iter[[lj]][,i] = mvtnorm::rmvnorm(1,mean = thetaj,sigma = Vj)
Hj = ks::invvec(theta_iter[[lj]][,i],nrow = k,ncol = eta[lj])
Sj = (Yj - Hj %*% Wj) %*% t(Yj - Hj %*% Wj)
sigma_iter[[lj]][[i]] = MCMCpack::riwish(v = Nj + nu0j,S = Sj + S0j)
}
}
# use of metropolis with random walk
if (i <= other) {
ek = mvtnorm::rmvnorm(1,mean = rep(0,l - 1),sigma = 0.5*diag(l - 1))
}else{
ek = stats::runif(l - 1,-abs(rini$val_rmh),abs(rini$val_rmh))
}
rk = r_iter[,i - 1] + ek
listrk = lists(rk)
pr = dmunif(rk,a,b)*fycond(i,listrk)
px = dmunif(r_iter[,i - 1],a,b)*fycond(i,listj)
alpha = min(1,as.numeric(pr/px))
if (alpha >= stats::runif(1)) {
r_iter[,i] = rk
acep = acep + 1
}else{
r_iter[,i] = r_iter[,i - 1]
acep = acep
}
utils::setTxtProgressBar(pb,i)
}
close(pb)
message('\n')
}else{#r known
listj = lists(r)
for (lj in 1:l) {
Wj = listj$listaW[[lj]]
Yj = listj$listaY[[lj]]
Nj = listj$Nrg[lj]
yj = c(Yj)
theta0j = itheta0j[[lj]]
sigma0j = icov0j[[lj]]
message('Estimating non-structural parameters with threshold(s) known ...',paste0('Reg_',lj),'\n')
pb = utils::txtProgressBar(min = 2, max = niter + burn + other, style = 3)
for (i in 2:{niter + burn + other}) {
if (!is.null(Sigma)) {
Vj = solve(Wj %*% t(Wj) %x% solve(sigma[[lj]] %*% sigma[[lj]]) + solve(sigma0j))
thetaj = Vj %*% {(Wj %x% solve(sigma[[lj]] %*% sigma[[lj]])) %*% yj + solve(sigma0j) %*% theta0j}
theta_iter[[lj]][,i] = mvtnorm::rmvnorm(1,mean = thetaj,sigma = Vj)
}else{
S0j = iS0j[[lj]]
nu0j = inu0j[[lj]]
Vj = solve(Wj %*% t(Wj) %x% solve(sigma_iter[[lj]][[i - 1]]) + solve(sigma0j))
thetaj = Vj %*% {(Wj %x% solve(sigma_iter[[lj]][[i - 1]])) %*% yj + solve(sigma0j) %*% theta0j}
theta_iter[[lj]][,i] = mvtnorm::rmvnorm(1,mean = thetaj,sigma = Vj)
Hj = ks::invvec(theta_iter[[lj]][,i],nrow = k,ncol = eta[lj])
Sj = (Yj - Hj %*% Wj) %*% t(Yj - Hj %*% Wj)
sigma_iter[[lj]][[i]] = MCMCpack::riwish(v = Nj + nu0j,S = Sj + S0j)
}
utils::setTxtProgressBar(pb,i)
}
message('\n')
}
close(pb)
}
message('Saving results ... \n')
# objects for chains and info in each regime
Rest = thetaest = thetachain = vector('list', l)
names(Rest) = names(thetaest) = names(thetachain) = paste0('R',1:l)
if (is.null(Sigma)) {
sigmaest = sigmachain = vector('list', l)
names(sigmaest) = names(sigmachain) = paste0('R',1:l)
}
# save chains and creation of the 'regime' type object
if (is.null(r)) {
if (l > 2) {
r_iter = r_iter[,-c(1:{other + burn})]
}else{
r_iter = r_iter[-c(1:{other + burn})]
}
rest = matrix(nrow = l - 1,ncol = 3)
colnames(rest) = colnames(rest) =
c(paste('lower limit ',(1 - level)/2*100,'%',sep = ''),'mean',paste('upper limit ',(1 + level)/2*100,'%',sep = ''))
rchain = matrix(r_iter,ncol = niter,nrow = l - 1)
rest[,1] = apply(rchain,1,stats::quantile,probs = (1 - level)/2)
rest[,3] = apply(rchain,1,stats::quantile,probs = (1 + level)/2)
rest[,2] = apply(rchain,1,mean)
rvec = c(rest[,2],'prop %' = acep/niter*100)
}else{
rvec = c('mean' = r)
}
# logLik
listj = lists(rvec[1])
logLikj = vector(mode = "numeric")
for (lj in 1:l) {
theta_iter[[lj]] = theta_iter[[lj]][,-c(1:other)]
# save chains of theta
thetachain[[lj]] = theta_iter[[lj]][,-c(1:burn)]
# credibility intervals for theta
vectheta = matrix(nrow = k*eta[lj],ncol = 3)
colnames(vectheta) = c(paste0('lower limit ',(1 - level)/2*100,'%'),'mean',paste0('upper limit ',(1 + level)/2*100,'%'))
vectheta[,1] = apply(thetachain[[lj]],1,stats::quantile,probs = (1 - level)/2)
vectheta[,3] = apply(thetachain[[lj]],1,stats::quantile,probs = (1 + level)/2)
vectheta[,2] = apply(thetachain[[lj]],1,mean)
thetaest[[lj]] = vectheta
if (nu != 0 & qj[lj] != 0 & dj[lj] != 0) {
rownames(vectheta) =
rep(c('phi0',rep(paste0('phi',1:pj[lj]),each = k),rep(paste0('beta',1:qj[lj]),each = nu),paste0('delta',1:dj[lj])),k)
}else if (nu != 0 & qj[lj] != 0 & dj[lj] == 0) {
rownames(vectheta) =
rep(c('phi0',rep(paste0('phi',1:pj[lj]),each = k),rep(paste0('beta',1:qj[lj]),each = nu)),k)
}else if (qj[lj] == 0 & dj[lj] != 0) {
rownames(vectheta) =
rep(c('phi0',rep(paste0('phi',1:pj[lj]),each = k),paste0('delta',1:dj[lj])),k)
}else if (qj[lj] == 0 & dj[lj] == 0) {
rownames(vectheta) =
rep(c('phi0',rep(paste0('phi',1:pj[lj]),each = k)),k)
}
if (is.null(Sigma)) {
sigma_iter[[lj]] = sigma_iter[[lj]][-c(1:other)]
SigmaPrep = function(x){return(c(expm::sqrtm(matrix(x,k,k))))}
# save chains of sigma
sigmachain[[lj]] = sapply(sigma_iter[[lj]][-c(1:burn)], ks::vec)
# credibility intervals for sigma^1/2
vecsigma = matrix(nrow = k*k,ncol = 3)
colnames(vecsigma) = c(paste0('lower limit ',(1 - level)/2*100,'%'),'mean',paste0('upper limit ',(1 + level)/2*100,'%'))
rownames(vecsigma) = c(sapply(1:k, function(x){paste0(1:k,x)}))
if (k == 1) {
vecsigma[,1] = sqrt(stats::quantile(sigmachain[[lj]],probs = (1 - level)/2))
vecsigma[,3] = sqrt(stats::quantile(sigmachain[[lj]],probs = (1 + level)/2))
vecsigma[,2] = sqrt(mean(sigmachain[[lj]]))
}else{
vecsigma[,1] = SigmaPrep(apply(sigmachain[[lj]],1,stats::quantile,probs = (1 - level)/2))
vecsigma[,3] = SigmaPrep(apply(sigmachain[[lj]],1,stats::quantile,probs = (1 + level)/2))
vecsigma[,2] = SigmaPrep(apply(sigmachain[[lj]],1,mean))
}
sigmaest[[lj]] = vecsigma
}
# creation of the 'regime' type object
p = pj[lj]
q = qj[lj]
d = dj[lj]
if (q == 0 & d == 0) {
thetaind = c(0,(10 + (1:p)) %x% rep(1,k))
}else if (q != 0 & d == 0) {
thetaind = c(0,(10 + (1:p)) %x% rep(1,k),(20 + (1:q)) %x% rep(1,nu))
}else if (q == 0 & d != 0) {
thetaind = c(0,(10 + (1:p)) %x% rep(1,k),30 + (1:d))
}else{
thetaind = c(0,(10 + (1:p)) %x% rep(1,k),(20 + (1:q)) %x% rep(1,nu),30 + (1:d))
}
Thetaj = ks::invvec(thetaest[[lj]][,2],ncol = eta[lj],nrow = k)
Ri = vector('list')
Ri$cs = matrix(Thetaj[,thetaind == 0],nrow = k,ncol = 1)
Ri$phi = vector('list', p)
names(Ri$phi) = paste0('phi',1:p)
for (j in 1:p) {
Ri$phi[[j]] = matrix(Thetaj[,thetaind == (10 + j)],nrow = k,ncol = k)
}
if (q != 0) {
Ri$beta = vector('list', q)
names(Ri$beta) = paste0('beta',1:q)
for (j in 1:q) {Ri$beta[[j]] = matrix(Thetaj[,thetaind == (20 + j)],nrow = k,ncol = nu)}
}
if (d != 0) {
Ri$delta = vector('list', d)
names(Ri$delta) = paste0('delta',1:d)
for (j in 1:d) {Ri$delta[[j]] = matrix(Thetaj[,thetaind == (30 + j)],nrow = k,ncol = 1)}
}
if (is.null(Sigma)) {
Ri$sigma = ks::invvec(sigmaest[[lj]][,2],ncol = k,nrow = k)
}else{
Ri$sigma = sigma[[lj]]
}
Rest[[lj]] = mtaregime(orders = list(p = p,q = q,d = d),cs = Ri$cs,
Phi = Ri$phi,Beta = Ri$beta,Delta = Ri$delta,
Sigma = Ri$sigma)
Wj = listj$listaW[[lj]]
Yj = listj$listaY[[lj]]
Nj = listj$Nrg[lj]
yj = c(Yj)
Hj = ks::invvec(thetaest[[lj]][,2],nrow = k,ncol = eta[lj])
Sj = (Yj - Hj %*% Wj) %*% t(Yj - Hj %*% Wj)
logLikj[lj] = log(det(Sj/Nj))
}
# exits
## chain
if (chain) {
Chain = vector('list')
Chain$Theta = thetachain
if (is.null(r) & l != 1) {Chain$r = rchain}
if (is.null(Sigma)) {Chain$Sigma = sigmachain}
}
## estimates and credibility interval
estimates = vector('list')
estimates$Theta = thetaest
if (is.null(r) & l != 1) {estimates$r = rest}
if (is.null(Sigma)) {estimates$Sigma = sigmaest}
data = ini_obj$tsregime_obj
# fitted.values and residuals
Yt_fit = Yt_res = matrix(ncol = N,nrow = k)
for (t in 1:N) {
lj = listj$Ind[t]
p = pj[lj]
q = qj[lj]
d = dj[lj]
Wj = matrix(0,nrow = eta[lj],ncol = 1)
yti = vector(mode = "numeric")
for (w in 1:p) {
if (t - w > 0) {yti = c(yti,Yt[,t - w])
}else{yti = c(yti,rep(0,k))}}
if (identical(yti,numeric(0))) {yti = rep(0,k*p)}
xti = vector(mode = "numeric")
for (w in 1:q) {
if (t - w > 0) {xti = c(xti,Xt[,t - w])
}else{xti = c(xti,rep(0,nu))}}
if (identical(xti,numeric(0))) {xti = rep(0,nu*q)}
zti = vector(mode = "numeric")
for (w in 1:d) {
if (t - w > 0) {zti = c(zti,Zt[t - w])
}else{zti = c(zti,0)}}
if (identical(zti,numeric(0))) {zti = rep(0,d)}
if (q == 0 & d != 0) {
wtj = c(1,yti,zti)
}else if (d == 0 & q != 0) {
wtj = c(1,yti,xti)
}else if (d == 0 & q == 0) {
wtj = c(1,yti)
}else{
wtj = c(1,yti,xti,zti)}
Wj[,1] = wtj
Hj = ks::invvec(thetaest[[lj]][,2],nrow = k,ncol = eta[lj])
Sig = as.matrix(Rest[[lj]]$sigma)
Yt_fit[,t] = Hj %*% Wj
Yt_res[,t] = solve(Sig) %*% (Yt[,t] - Yt_fit[,t])
}
if (chain) {
results = list(Nj = listj$Nrg,estimates = estimates,regime = Rest,Chain = Chain,
residuals = t(Yt_res), fitted.values = t(Yt_fit),
logLikj = logLikj,data = data,r = rvec,orders = list(pj = pj,qj = qj,dj = dj))
}else{
results = list(Nj = listj$Nrg,estimates = estimates,regime = Rest,
residuals = t(Yt_res), fitted.values = t(Yt_fit),
logLikj = logLikj,data = data,r = rvec,orders = list(pj = pj,qj = qj,dj = dj))
}
class(results) = 'regime_model'
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/BMTAR/R/mtarns.R
|
#==================================================================================================#
# Date: 14/04/2020
# Description: Function for estimate number of regimes when unknown. Default method is
# Metropolized Carlin and Chib, when NAIC is TRUE number of regimes is choose only with this result.
# Function:
#==================================================================================================#
mtarnumreg = function(ini_obj, level = 0.95, burn_m = NULL, niter_m = 1000,
iterprev = 500, chain_m = FALSE, list_m = FALSE, NAIC = FALSE,
ordersprev = list(maxpj = 2,maxqj = 0,maxdj = 0), parallel = FALSE){
compiler::enableJIT(3)
if (!is.logical(chain_m)) {stop('chain_m must be a logical object')}
if (!is.logical(parallel)) {stop('paralell must be a logical object')}
if (!is.logical(NAIC)) {stop('NAIC must be a logical object')}
# Checking
if (!inherits(ini_obj, 'regime_inipars')) {
stop('ini_obj must be a regime_inipars object')
}
if (is.null(ini_obj$tsregime_obj$Zt)) {
stop('Threshold process must be enter in ini_obj for evaluate l0_max number of regimes')}
if (is.null(ini_obj$l0_min)) {
message('l0_min NULL default 2 \n')
l0_min = 2
}else{
l0_min = ini_obj$l0_min
}
if (is.null(ini_obj$l0_max)) {
message('l0_max NULL default 3 \n')
l0 = 3
}else{
l0 = ini_obj$l0_max
}
if (is.null(ini_obj$method)) {
message('method NULL default KUO \n')
method = 'KUO'
}else{
method = ini_obj$method
}
# data
Yt = ini_obj$tsregime_obj$Yt
Ut = cbind(ini_obj$tsregime_obj$Zt,ini_obj$tsregime_obj$Xt)
k = ini_obj$tsregime_obj$k
N = ini_obj$tsregime_obj$N
nu = ini_obj$tsregime_obj$nu
if (is.null(nu)) {nu = 0}
maxpj = ifelse(is.null(ordersprev$maxpj),2,ordersprev$maxpj)
maxqj = ifelse(is.null(ordersprev$maxqj),0,ordersprev$maxqj)
maxdj = ifelse(is.null(ordersprev$maxdj),0,ordersprev$maxdj)
# Code
burn_m = ifelse(is.null(burn_m),round(0.1*niter_m),burn_m)
Yt = t(Yt)
Zt = Ut[,1]
if (nu == 0) {
Xt = matrix(0,ncol = N,nrow = 1)
}else{
Xt = t(ini_obj$tsregime_obj$Xt)
}
rini = ini_obj$init$r
a = ifelse(is.null(rini$za),min(Zt),stats::quantile(Zt,probs = rini$za))
b = ifelse(is.null(rini$zb),max(Zt),stats::quantile(Zt,probs = rini$zb))
### FUNCTIONS
dmunif = function(r, a, b){
l = length(r) + 1
names(a) = names(b) = NULL
volume = ((b - a)^{l - 1})/(factorial(l - 1))
for (i in 1:{l - 1}) {
if (r[i] >= a & r[i] <= b) {
if (l <= 2) {prob = 1}
if (l > 2) {
prob = 1
for (j in 1:{l - 2}) {
if (r[j] < r[j + 1]) {prob = prob*1}else{prob = prob*0}
}
}
}else{prob = 0}
}
rj = matrix(nrow = 2,ncol = l)
rj[,1] = c(-Inf,r[1])
rj[,l] = c(rev(r)[1],Inf)
if (l > 2) {
for (i2 in 2:{l - 1}) {rj[,i2] = c(r[i2 - 1],r[i2])}
}
Ind = c()
for (j in 1:l) {
for (w in 1:N) {
if (Zt[w] > rj[1,j] & Zt[w] <= rj[2,j]) {
Ind[w] = j
}
}
}
Nrg = c()
for (lj in 1:l) {
Nrg[lj] = length(Ind[Ind == lj])
}
if (sum(Nrg/sum(Nrg) > 0.2) == l) {prob = 1*prob}else{prob = 0*prob}
return(prob/volume)
}
dmunif = compiler::cmpfun(dmunif)
rdunif = function(m,l0,...){
sec = l0_min:l0
sec = sec[sec != m]
if (length(sec) == 1) {
muestra = sec
}else{
muestra = sample(x = sec, size = 1)
}
return(muestra)
}
rdunif = compiler::cmpfun(rdunif)
### Function to create lists
lists = function(l, r, pjmax, qjmax, djmax, etam, ...){
rj = matrix(nrow = 2,ncol = l)
rj[,1] = c(-Inf,r[1])
rj[,l] = c(rev(r)[1],Inf)
if (l > 2) {for (j2 in 2:{l - 1}) {rj[,j2] = c(r[j2 - 1],r[j2])}}
# indicator variable for the regime
Ind = vector(mode = 'numeric',length = N)
for (j in 1:l) {
Ind[Zt > rj[1,j] & Zt <= rj[2,j]] = j
}
Nrg = vector(mode = 'numeric')
listaXj = listaYj = list()
length(listaXj) = length(listaYj) = l
Inj_X = function(ti,Yt,Zt,Xt,p,q,d){
yti = vector(mode = "numeric")
for (w in 1:p) {yti = c(yti,Yt[,ti - w])}
xti = vector(mode = "numeric")
for (w in 1:q) {xti = c(xti,Xt[,ti - w])}
zti = vector(mode = "numeric")
for (w in 1:d) {zti = c(zti,Zt[ti - w])}
if (q == 0 & d != 0) {
wtj = c(1,yti,zti)
}else if (d == 0 & q != 0) {
wtj = c(1,yti,xti)
}else if (d == 0 & q == 0) {
wtj = c(1,yti)
}else{
wtj = c(1,yti,xti,zti)}
return(wtj)
}
Inj_X = Vectorize(Inj_X,vectorize.args = "ti")
for (lj in 1:l) {
p = pjmax[lj]
q = qjmax[lj]
d = djmax[lj]
maxj = max(p,q,d)
Inj = which(Ind == lj)
Inj = Inj[Inj > maxj]
Nrg[lj] = length(Inj)
Yj = matrix(Yt[,Inj],nrow = k,ncol = Nrg[lj])
if (identical(Inj,integer(0))) {
Xj = matrix(nrow = 0,ncol = etam[lj]*k)
}else{
Wj = sapply(Inj,Inj_X,Yt = Yt,Zt = Zt,Xt = Xt,p = p,q = q,d = d)
Xj = t(Wj) %x% diag(k)[1,]
if (k != 1) {for (s in 2:k) {Xj = cbind(Xj,t(Wj) %x% diag(k)[s,])}}
}
listaXj[[lj]] = Xj
listaYj[[lj]] = Yj
}
return(list(Nrg = Nrg,listaX = listaXj,listaY = listaYj))
}
lists = compiler::cmpfun(lists)
### Likelihood function for Yt
fycond = function(ir, listar, gamma, theta, sigma){
acum = 0
l = length(listar$listaY)
Nrg = listar$Nrg
for (lj in 1:l) {
yj = c(listar$listaY[[lj]])
Xj = listar$listaX[[lj]]
acum = acum + t(yj - Xj %*% diag(gamma[[lj]][,ir]) %*% theta[[lj]][,ir]) %*%
{diag(Nrg[lj]) %x% solve(sigma[[lj]][[ir]])} %*%
(yj - Xj %*% diag(gamma[[lj]][,ir]) %*% theta[[lj]][,ir])
}
sigmareg = lapply(sigma,function(x){x[[ir]]})
cte = prodB(Brobdingnag::as.brob(sapply(sigmareg,function(x){
return(c(determinant(x,logarithm = FALSE)$modulus))}))^{-Nrg/2})
val = cte*exp(-1/2*Brobdingnag::as.brob(acum))
return(val)
}
fycond = compiler::cmpfun(fycond)
### Previous iterations
fill = function(m, iter = 500, burn = 1000, ...){
i = 1
ordersm = list(pj = rep(maxpj,m),qj = rep(maxqj,m),dj = rep(maxdj,m))
etam = 1 + ordersm$pj*k + ordersm$qj*nu + ordersm$dj
ini_obj_m = mtarinipars(tsregime_obj = ini_obj$tsregime_obj,
list_model = list(pars = list('l' = m),orders = ordersm),method = method)
#Parameters priori
theta0Pm = lapply(ini_obj_m$init$Theta,function(x){x$theta0j})
sigma0Pm = lapply(ini_obj_m$init$Theta,function(x){x$cov0j})
S0Pm = lapply(ini_obj_m$init$Sigma,function(x){x$S0j})
nu0Pm = lapply(ini_obj_m$init$Sigma,function(x){x$nu0j})
pij0Pm = ini_obj_m$init$Gamma
# previous iterations
par = mtarstr(ini_obj = ini_obj_m, niter = iter, chain = TRUE,burn = burn)
#Parameters pseudo Theta
theta0Sm = lapply(par$estimates$Theta,function(x){x[,2]})
sigma0Sm = lapply(par$Chain$Theta,function(x){stats::cov(t(x))})
#Parameters pseudo Sigma
S0Sm = lapply(par$regime,function(x){x$sigma})
nu0Sm = lapply(1:m,function(x){1000})
#Parameters pseudo Gamma
pij0Sm = lapply(par$Chain$Gamma,function(x){apply(x,1,mean)})
#Parameters pseudo R
if (m != 1) {
rmean0Sm = par$estimates$r[,2]
rcov0Sm = stats::cov(t(par$Chain$r))
}else{
rmean0Sm = rcov0Sm = NULL
}
# cadenas y primeros valores
theta_iter = sigma_iter = gam_iter = Dj = Rj = vector('list',m)
if (method == 'SSVS') {
tauij = itauij = cij = vector('list',m)
}
if (m != 1) {
r_iter = matrix(ncol = niter_m + burn_m,nrow = m - 1)
r_iter[,1] = c(stats::quantile(Zt, probs = 1/m*(1:{m - 1})))
r0iter = r_iter[,1]
}else{
r_iter = NULL
r0iter = 0
}
for (lj in 1:m) {
theta_iter[[lj]] = gam_iter[[lj]] = matrix(ncol = niter_m + burn_m,nrow = k*etam[lj])
sigma_iter[[lj]] = vector('list',length = niter_m + burn_m)
gam_iter[[lj]][,1] = rep(1,k*etam[lj])
if (method == 'KUO') {
theta_iter[[lj]][,1] = mvtnorm::rmvnorm(1,mean = theta0Pm[[lj]],sigma = sigma0Pm[[lj]])
}
if (method == 'SSVS') {
cij[[lj]] = ini_obj_m$init$Theta[[paste0('R',lj)]]$Cij
tauij[[lj]] = ini_obj_m$init$Theta[[paste0('R',lj)]]$Tauij
itauij[[lj]] = cij[[lj]]*tauij[[lj]]
Dj[[lj]] = diag(itauij[[lj]])
Rj[[lj]] = diag(k*etam[lj])
theta_iter[[lj]][,1] = mvtnorm::rmvnorm(1,mean = rep(0,k*etam[lj]),sigma = Dj[[lj]] %*% Rj[[lj]] %*% Dj[[lj]])
}
sigma_iter[[lj]][[1]] = MCMCpack::riwish(v = nu0Pm[[lj]],S = S0Pm[[lj]])
}
# LISTS
if (method == 'SSVS') {
iniP = list(Theta = list(mean = theta0Pm, cov = sigma0Pm), Sigma = list(cov = S0Pm,gl = nu0Pm),
Gamma = list(prob = pij0Pm, itauij = itauij, tauij = tauij, cij = cij, Rj = Rj))
}else{
iniP = list(Theta = list(mean = theta0Pm, cov = sigma0Pm), Sigma = list(cov = S0Pm,gl = nu0Pm),
Gamma = list(prob = pij0Pm))
}
iniS = list(Theta = list(mean = theta0Sm,cov = sigma0Sm), Sigma = list(cov = S0Sm,gl = nu0Sm),
Gamma = list(prob = pij0Sm), r = list(mean = rmean0Sm, cov = rcov0Sm))
listchain = list(Theta = theta_iter, Sigma = sigma_iter,
Gamma = gam_iter, r = r_iter)
listr = lists(l = m,r = r0iter,pjmax = ordersm$pj,qjmax = ordersm$qj,djmax = ordersm$dj, etam)
return(list(i = i,orders = ordersm,Priori = iniP,Pseudo = iniS,Chain = listchain,listr = listr,par = par))
}
fill = compiler::cmpfun(fill)
### Function to compute posterioris
updatelist = function(l, ...){
rgamber = function(pos, reg, ig, ...){
gam_j = gam_iter
gam_j[[reg]][pos,ig] = 1
pycond1 = fycond(ig,listaj,gam_j,theta_iter,sigma_iter)
gam_j[[reg]][pos,ig] = 0
pycond0 = fycond(ig,listaj,gam_j,theta_iter,sigma_iter)
if (method == 'KUO') {
aij = pycond1*pij[[reg]][pos]
bij = pycond0*(1 - pij[[reg]][pos])
}else if (method == 'SSVS') {
Xj = listaj$listaX[[reg]]
yj = c(listaj$listaY[[reg]])
gam_j[[reg]][pos,ig] = 1
itauij[[reg]][gam_j[[reg]][,ig] == 0] = tauij[[reg]][gam_j[[reg]][,ig] == 0]
itauij[[reg]][gam_j[[reg]][,ig] == 1] =
cij[[reg]][gam_j[[reg]][,ig] == 1]*tauij[[reg]][gam_j[[reg]][,ig] == 1]
Dj[[reg]] = diag(itauij[[reg]])
pthetacond1 = dmnormB(x = theta_iter[[reg]][,ig],mean = rep(0,k*etam[reg]),sigma = Dj[[reg]] %*% Rj[[reg]] %*% Dj[[reg]])
aij = pycond1*pthetacond1*pij[[reg]][pos]
gam_j[[reg]][pos,ig] = 0
itauij[[reg]][gam_j[[reg]][,ig] == 0] = tauij[[reg]][gam_j[[reg]][,ig] == 0]
itauij[[reg]][gam_j[[reg]][,ig] == 1] =
cij[[reg]][gam_j[[reg]][,ig] == 1]*tauij[[reg]][gam_j[[reg]][,ig] == 1]
Dj[[reg]] = diag(itauij[[reg]])
pthetacond0 = dmnormB(x = theta_iter[[reg]][,ig],mean = rep(0,k*etam[reg]),sigma = Dj[[reg]] %*% Rj[[reg]] %*% Dj[[reg]])
bij = pycond0*pthetacond0*(1 - pij[[reg]][pos])
}
return(stats::rbinom(1,size = 1,prob = as.numeric((aij)/(aij + bij))))
}
rgamber = Vectorize(rgamber,"pos")
listPr = listm[[paste0('m',l)]]
Dj = vector('list')
Rj = vector('list')
i2 = listPr$i
#creando temporales
pjmax = listPr$orders$pj
qjmax = listPr$orders$qj
djmax = listPr$orders$dj
etam = 1 + pjmax*k + qjmax*nu + djmax
theta_iter = listPr$Chain$Theta
sigma_iter = listPr$Chain$Sigma
gam_iter = listPr$Chain$Gamma
r_iter = listPr$Chain$r
theta0j = listPr$Priori$Theta$mean
sigma0j = listPr$Priori$Theta$cov
S0j = listPr$Priori$Sigma$cov
nu0j = listPr$Priori$Sigma$gl
pij = listPr$Priori$Gamma$prob
if (method == 'SSVS') {
itauij = listPr$Priori$Gamma$itauij
tauij = listPr$Priori$Gamma$tauij
cij = listPr$Priori$Gamma$cij
Rj = listPr$Priori$Gamma$Rj
}
#iterations update
if (!is.null(r_iter)) {
listaj = lists(l,r_iter[,i2],pjmax,qjmax,djmax,etam)
}else{
listaj = lists(l,0,pjmax,qjmax,djmax,etam)
}
for (lj in 1:l) {
Xj = listaj$listaX[[lj]]
Yj = listaj$listaY[[lj]]
yj = c(Yj)
Nj = listaj$Nrg[lj]
if (method == 'SSVS') {
itauij[[lj]][gam_iter[[lj]][,i2] == 0] = tauij[[lj]][gam_iter[[lj]][,i2] == 0]
itauij[[lj]][gam_iter[[lj]][,i2] == 1] = cij[[lj]][gam_iter[[lj]][,i2] == 1]*tauij[[lj]][gam_iter[[lj]][,i2] == 1]
Dj[[lj]] = diag(itauij[[lj]])
theta0j = list()
theta0j[[lj]] = rep(0,k*etam[lj])
}else if (method == 'KUO') {
Dj[[lj]] = diag(k*etam[lj])
Rj[[lj]] = sigma0j[[lj]]}
Vj = solve(t(diag(gam_iter[[lj]][,i2])) %*% t(Xj) %*% {diag(Nj) %x% solve(sigma_iter[[lj]][[i2]])} %*% Xj %*% diag(gam_iter[[lj]][,i2]) + solve(Dj[[lj]] %*% Rj[[lj]] %*% Dj[[lj]]))
thetaj = Vj %*% {t(diag(gam_iter[[lj]][,i2])) %*% t(Xj) %*% {diag(Nj) %x% solve(sigma_iter[[lj]][[i2]])} %*% yj + solve(sigma0j[[lj]]) %*% theta0j[[lj]]}
theta_iter[[lj]][,i2 + 1] = mvtnorm::rmvnorm(1,mean = thetaj,sigma = Vj)
Hj = ks::invvec({Xj %*% diag(gam_iter[[lj]][,i2]) %*% theta_iter[[lj]][,i2 + 1]},nrow = k,ncol = Nj)
Sj = (Yj - Hj) %*% t(Yj - Hj)
sigma_iter[[lj]][[i2 + 1]] = MCMCpack::riwish(v = Nj + nu0j[[lj]],S = Sj + S0j[[lj]])
gam_iter[[lj]][,i2 + 1] = gam_iter[[lj]][,i2]
}
#gamma
for (jj in 1:l) {
gam_iter[[jj]][,i2 + 1] = rgamber(pos = 1:{k*etam[jj]},reg = jj,ig = i2 + 1)
}
#r
if (l != 1) {
if (i2 < 70) {
ek = mvtnorm::rmvnorm(1,mean = rep(0,l - 1),sigma = 0.5*diag(l - 1))
}else{
ek = stats::runif(l - 1,-abs(rini$val_rmh),abs(rini$val_rmh))
}
# ek = runif(l - 1,-abs(rini$val_rmh),abs(rini$val_rmh))
rk = r_iter[,i2] + ek
listark = lists(l,rk,pjmax,qjmax,djmax,etam)
pr = dmunif(rk,a,b)*fycond(i2 + 1,listark,gam_iter,theta_iter,sigma_iter)
px = dmunif(r_iter[,i2],a,b)*fycond(i2 + 1,listaj,gam_iter,theta_iter,sigma_iter)
alpha = min(1,as.numeric(pr/px))
if (alpha >= stats::runif(1)) {
r_iter[,i2 + 1] = rk
listr = listark
}else{
r_iter[,i2 + 1] = r_iter[,i2]
listr = listaj
}
}else{
listr = lists(l,0,pjmax,qjmax,djmax,etam)
}
listPr$Chain = list(Theta = theta_iter, Sigma = sigma_iter,Gamma = gam_iter, r = r_iter)
listPr$listr = listr
listPr$i = i2 + 1
return(listPr)
}
updatelist = compiler::cmpfun(updatelist)
rpseudo = function(l,...){
listPr = listm[[paste0('m',l)]]
i2 = listPr$i + 1
pjmax = listPr$orders$pj
qjmax = listPr$orders$qj
djmax = listPr$orders$dj
etam = 1 + pjmax*k + qjmax*nu + djmax
theta_iter = listPr$Chain$Theta
sigma_iter = listPr$Chain$Sigma
gam_iter = listPr$Chain$Gamma
r_iter = listPr$Chain$r
theta0jS = listPr$Pseudo$Theta$mean
sigma0jS = listPr$Pseudo$Theta$cov
S0jS = listPr$Pseudo$Sigma$cov
nu0jS = listPr$Pseudo$Sigma$gl
pijS = listPr$Pseudo$Gamma$prob
rmeanS = listPr$Pseudo$r$mean
rcovS = listPr$Pseudo$r$cov
for (lj in 1:l) {
theta_iter[[lj]][,i2] = mvtnorm::rmvnorm(1,mean = theta0jS[[lj]],sigma = sigma0jS[[lj]])
sigma_iter[[lj]][[i2]] = 1/nu0jS[[lj]]*MCMCpack::rwish(v = nu0jS[[lj]] ,S = S0jS[[lj]])
sigma_iter[[lj]][[i2]] = sigma_iter[[lj]][[i2]] %*% sigma_iter[[lj]][[i2]]
for (iga in 1:{k*etam[lj]}) {
gam_iter[[lj]][iga,i2] = stats::rbinom(n = 1,size = 1,prob = pijS[[lj]][iga])
}
}
if (l != 1) {
r_iter[,i2] = mvtnorm::rmvnorm(1,mean = rmeanS, sigma = as.matrix(rcovS))
listPr$listr = lists(l,r_iter[,i2],pjmax,qjmax,djmax,etam)
}else{
listPr$listr = lists(l,0,pjmax,qjmax,djmax,etam)
}
listPr$Chain = list(Theta = theta_iter, Sigma = sigma_iter,Gamma = gam_iter, r = r_iter)
listPr$i = i2
return(listPr)
}
rpseudo = compiler::cmpfun(rpseudo)
### Function to compute quotient
prodA = function(thetaym, thetaymp){
pgammaPn = pthetaPn = psigmaPn = Brobdingnag::as.brob(1)
pgammaPd = pthetaPd = psigmaPd = Brobdingnag::as.brob(1)
pgammaSn = pthetaSn = psigmaSn = Brobdingnag::as.brob(1)
pgammaSd = pthetaSd = psigmaSd = Brobdingnag::as.brob(1)
lm = length(thetaym$listr$Nrg)
lmp = length(thetaymp$listr$Nrg)
theta_iterm = thetaym$Chain$Theta
sigma_iterm = thetaym$Chain$Sigma
gam_iterm = thetaym$Chain$Gamma
r_iterm = thetaym$Chain$r
iAm = thetaym$i
iAmp = thetaymp$i
theta_itermp = thetaymp$Chain$Theta
sigma_itermp = thetaymp$Chain$Sigma
gam_itermp = thetaymp$Chain$Gamma
r_itermp = thetaymp$Chain$r
theta0jmp = thetaymp$Priori$Theta$mean
sigma0jmp = thetaymp$Priori$Theta$cov
S0jmp = thetaymp$Priori$Sigma$cov
nu0jmp = thetaymp$Priori$Sigma$gl
pijmp = thetaymp$Priori$Gamma$prob
theta0jm = thetaym$Priori$Theta$mean
sigma0jm = thetaym$Priori$Theta$cov
S0jm = thetaym$Priori$Sigma$cov
nu0jm = thetaym$Priori$Sigma$gl
pijm = thetaym$Priori$Gamma$prob
theta0jSm = thetaym$Pseudo$Theta$mean
sigma0jSm = thetaym$Pseudo$Theta$cov
S0jSm = thetaym$Pseudo$Sigma$cov
nu0jSm = thetaym$Pseudo$Sigma$gl
pijSm = thetaym$Pseudo$Gamma$prob
rmeanSm = thetaym$Pseudo$r$mean
rcovSm = thetaym$Pseudo$r$cov
theta0jSmp = thetaymp$Pseudo$Theta$mean
sigma0jSmp = thetaymp$Pseudo$Theta$cov
S0jSmp = thetaymp$Pseudo$Sigma$cov
nu0jSmp = thetaymp$Pseudo$Sigma$gl
pijSmp = thetaymp$Pseudo$Gamma$prob
rmeanSmp = thetaymp$Pseudo$r$mean
rcovSmp = thetaymp$Pseudo$r$cov
for (lj in 1:lmp) {
pgammaPn = pgammaPn*prodB(Brobdingnag::as.brob(stats::dbinom(gam_itermp[[lj]][,iAmp],size = 1,prob = pijmp[[lj]])))
pthetaPn = pthetaPn*dmnormB(theta_itermp[[lj]][,iAmp],mean = theta0jmp[[lj]], sigma = sigma0jmp[[lj]])
psigmaPn = psigmaPn*dwishartB(sigma_itermp[[lj]][[iAmp]], nu = nu0jmp[[lj]],S = solve(as.matrix(S0jmp[[lj]])))
pgammaSd = pgammaSd*prodB(Brobdingnag::as.brob(stats::dbinom(gam_itermp[[lj]][,iAmp],size = 1,prob = pijSmp[[lj]])))
pthetaSd = pthetaSd*dmnormB(theta_itermp[[lj]][,iAmp],mean = theta0jSmp[[lj]], sigma = sigma0jSmp[[lj]])
psigmaSd = psigmaSd*dwishartB(expm::sqrtm(sigma_itermp[[lj]][[iAmp]]), nu = nu0jSmp[[lj]],S = as.matrix(S0jSmp[[lj]]))
}
fn = fycond(iAmp,thetaymp$listr,thetaymp$Chain$Gamma,thetaymp$Chain$Theta,thetaymp$Chain$Sigma)
for (lj in 1:lm) {
pgammaPd = pgammaPd*prodB(Brobdingnag::as.brob(stats::dbinom(gam_iterm[[lj]][,iAm],size = 1,prob = pijm[[lj]])))
pthetaPd = pthetaPd*dmnormB(theta_iterm[[lj]][,iAm],mean = theta0jm[[lj]], sigma = sigma0jm[[lj]])
psigmaPd = psigmaPd*dwishartB(sigma_iterm[[lj]][[iAm]], nu = nu0jm[[lj]],S = solve(as.matrix(S0jm[[lj]])))
pgammaSn = pgammaSn*prodB(Brobdingnag::as.brob(stats::dbinom(gam_iterm[[lj]][,iAm],size = 1,prob = pijSm[[lj]])))
pthetaSn = pthetaSn*dmnormB(theta_iterm[[lj]][,iAm],mean = theta0jSm[[lj]], sigma = sigma0jSm[[lj]])
psigmaSn = psigmaSn*dwishartB(expm::sqrtm(sigma_iterm[[lj]][[iAm]]), nu = nu0jSm[[lj]],S = as.matrix(S0jSm[[lj]]))
}
if (lmp != 1) {
prPn = dmunif(r_itermp[,iAmp],a,b)
prSd = dmnormB(r_itermp[,iAmp],mean = rmeanSmp, sigma = as.matrix(rcovSmp))
}else{
prPn = prSd = 1
}
if (lm != 1) {
prPd = dmunif(r_iterm[,iAm],a,b)
prSn = dmnormB(r_iterm[,iAm],mean = rmeanSm, sigma = rcovSm)
}else{
prPd = prSn = 1
}
fd = fycond(iAm,thetaym$listr,thetaym$Chain$Gamma,thetaym$Chain$Theta,thetaym$Chain$Sigma)
# Calculo para el valor del cociente
vald = fd*(pgammaPd*pthetaPd*psigmaPd*prPd)*(pgammaSd*pthetaSd*psigmaSd*prSd)
valn = fn*(pgammaPn*pthetaPn*psigmaPn*prPn)*(pgammaSn*pthetaSn*psigmaSn*prSn)
val = valn/vald
return(val)
}
prodA = compiler::cmpfun(prodA)
# Previous runs
message('Running previous chains ... \n')
listm = vector('list')
if (parallel) {
micluster = parallel::makeCluster(2)
doParallel::registerDoParallel(micluster)
funcParallel = function(i,iterprev){return(fill(m = i,iter = iterprev, burn = round(0.3*iterprev)))}
parallel::clusterEvalQ(micluster, library(BMTAR))
obj_S = list('ini_obj','burn_m','niter_m','chain_m','list_m',
'ordersprev','k','N','nu','method','fill','maxpj','maxqj','maxdj',
'Zt','Yt','Xt','Ut','lists','fycond','rdunif','dmunif','l0_min')
parallel::clusterExport(cl = micluster,varlist = obj_S,envir = environment())
s = parallel::parLapply(micluster,as.list(l0_min:l0), funcParallel, iterprev = iterprev)
cc = 1
for (o in l0_min:l0) {
listm[[paste0('m',o)]] = s[[cc]]
cc = cc + 1
}
parallel::stopCluster(micluster)
}else{
for (o in l0_min:l0) {
listm[[paste0('m',o)]] = fill(m = o,iter = iterprev, burn = round(0.3*iterprev))
}
}
if (NAIC) {
results = list(tsregim = ini_obj$tsregime_obj,list_m = listm)
for (i in l0_min:l0) {
results$NAIC[[paste0('m',i)]] = mtarNAIC(listm[[paste0('m',i)]]$par)
}
x_NAIC = unlist(sapply(results$NAIC,function(x){x$NAIC}))
results$NAIC_final_m = as.numeric(gsub('m','',names(x_NAIC[which.min(x_NAIC)])))
return(results)
}
# Code
m_iter = vector('numeric')
m_iter[1] = sample(l0_min:l0,1)
acepm = 0
pm_im = matrix(nrow = l0,ncol = niter_m + burn_m - 1)
message('Estimating number of regimes ... \n')
pb = utils::txtProgressBar(min = 2, max = niter_m + burn_m,style = 3)
for (im in 2:{niter_m + burn_m}) {
m_iter[im] = rdunif(m_iter[im - 1],l0)
# Generamos valor para Theta_m y Theta_mp
listm[[paste0('m',m_iter[im - 1])]] = updatelist(l = m_iter[im - 1])
listm[[paste0('m',m_iter[im])]] = rpseudo(l = m_iter[im])
# Calculo para la probabilidad alpha
val = prodA(thetaym = listm[[paste0('m',m_iter[im - 1])]],
thetaymp = listm[[paste0('m',m_iter[im])]])
# Evaluacion de el criterio
alpham = min(1,as.numeric(val))
if (alpham >= stats::runif(1)) {
m_iter[im] = m_iter[im]
acepm = acepm + 1
}else{
m_iter[im] = m_iter[im - 1]
}
pm_im[,im - 1] = table(factor(m_iter,levels = 1:l0))/im
utils::setTxtProgressBar(pb,im)
}
close(pb)
message('Saving results ... \n')
# exits
m_iter = m_iter[-c(1:burn_m)]
vecm = sort(table(m_iter)/length(m_iter)*100,decreasing = TRUE)
namest = c('first freq','second freq','third freq','fourth freq')
ls = as.numeric(names(vecm))
names(vecm) = paste(namest[1:length(vecm)],paste0(round(vecm,2),'%'))
vecm[1:length(vecm)] = ls[1:length(vecm)]
if (chain_m & list_m) {
results = list(tsregim = ini_obj$tsregime_obj,
list_m = listm ,m_chain = m_iter,
prop = pm_im,
estimates = vecm,final_m = vecm[[1]])
}else if (chain_m & !list_m) {
results = list(tsregim = ini_obj$tsregime_obj,
m_chain = m_iter,
prop = pm_im,
estimates = vecm,final_m = vecm[1])
}else if (!chain_m & list_m) {
results = list(tsregim = ini_obj$tsregime_obj,
list_m = listm,
prop = pm_im,
estimates = vecm,final_m = vecm[1])
}else if (!chain_m & !list_m) {
results = list(tsregim = ini_obj$tsregime_obj,
prop = pm_im,
estimates = vecm,
final_m = vecm[1])
}
for (o in l0_min:l0) {
results$NAIC[[paste0('m',o)]] = mtarNAIC(listm[[paste0('m',o)]]$par)
}
compiler::enableJIT(0)
class(results) = 'regime_number'
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/BMTAR/R/mtarnumreg.R
|
#==================================================================================================#
# Date:
# Description:
# Coments:
#-> dependent function of mvtnorm library.
#-> In the simulation the first values of Ut are considered zeros and the first values
#- of Yt as normal noises. Additionally a burn of 100.
#-> hacer comentario respecto a los casos donde explota y aclarar que no se evalua 'estabilidad'
# No evaluamos que Ut cumpla las propiedades de ser proceso markov
# Function:
#==================================================================================================#
mtarsim = function(N, Rg, r = NULL, Xt = NULL, Zt = NULL, seed = NULL){
burn = 1000
if (!{round(N) == N & N > 1}) {stop('N must be an integer greater than 1')}
if (!is.null(Zt)) {
if (!is.numeric(Zt)) {stop('Zt must be a real matrix of dimension Nx1')}
if (!is.matrix(Zt)) {Zt = as.matrix(Zt)}
if (nrow(Zt) != N) {stop('Zt and Yt number of rows must match')}
Zt = t(Zt)
}
if (!is.null(Xt)) {
if (!is.numeric(Xt)) {stop('Xt must be a real matrix of dimension Nx(nu+1)')}
if (!is.matrix(Xt)) {Xt = as.matrix(Xt)}
if (nrow(Xt) != N) {stop('Xt and Yt number of rows must match')}
Xt = t(Xt)
}
Ut = rbind(Zt,Xt)
if (is.null(Ut)) {nu = 0}else{nu = nrow(Ut) - 1}
k = nrow(Rg[[1]]$sigma)
l = length(Rg)
if (l == 1) {
rj = matrix(c(-Inf,Inf),nrow = 2,ncol = l)
if (is.null(Ut)) {
Ut = matrix(0, ncol = N,nrow = 1)
}else{
Ut = rbind(matrix(0, ncol = N,nrow = 1),Xt) # only for covariable
}
}
# Validations
if (!is.list(Rg)) {stop('Rg must be a list type object with objects of class regime')}
if (ncol(Ut) != N | !is.numeric(Ut) | !is.matrix(Ut)) {
stop(paste0('Ut must be a matrix of dimension ',N,'x',nu + 1))}
for (i in 1:l) {
if (class(Rg[[i]]) != 'regime') {stop('Rg must be a list of objects of class regime')}
}
if (l >= 2) {
if (length(r) < 1 | length(r) != (l - 1) | !is.numeric(r) | is.null(r)) {
stop(paste('r must be a numeric vector of length',length(Rg) - 1))}else{
if (l > 2) {for (i in 1:{l - 2}) {
if (r[i] >= r[i + 1]) {stop('r[i] must be smaller than r[i+1]')}}
}
}
}
# values by regime
pj = qj = dj = vector('numeric')
for (i in 1:l) {
pj[i] = length(Rg[[i]]$phi)
qj[i] = length(Rg[[i]]$beta)
dj[i] = length(Rg[[i]]$delta)
}
# create intervals
if (l != 1) {
rj = matrix(nrow = 2,ncol = l)
rj[,1] = c(-Inf,r[1])
rj[,l] = c(rev(r)[1],Inf)
if (l > 2) {
for (i in 2:{l - 1}) {rj[,i] = c(r[i - 1],r[i])}
}
}
# initial Vectors
maxj = max(pj,qj,dj)
Yt = matrix(0,nrow = k,ncol = N + maxj + burn)
if (!is.null(seed)) {set.seed(seed)}
et = t(mvtnorm::rmvnorm(N + maxj + burn,mean = rep(0,k),sigma = diag(k)))
Yt[,1:(maxj + burn)] = et[,1:(maxj + burn)]
Zt = c(rep(0,maxj + burn),Ut[1,])
if (nu == 0) {
Xt = matrix(0,ncol = N + maxj + burn,nrow = 1)
}else{
Xt = cbind(rep(0,nu) %x% matrix(1,ncol = maxj + burn),matrix(Ut[-1,],nrow = nu))
}
# iterations of the simulation
for (i in (maxj + burn + 1):(N + maxj + burn)) {
## evaluate regime
for (w in 1:l) {
if (Zt[i] > rj[1,w] & Zt[i] <= rj[2,w]) {Ri = Rg[[w]]}
}
## calculate from according regime selected
p = length(Ri$phi)
q = length(Ri$beta)
d = length(Ri$delta)
## create matrices
cs = Ri$cs
At = as.matrix(as.data.frame(Ri$phi))
if (q != 0) {
Bt = as.matrix(as.data.frame(Ri$beta))
}else{Bt = matrix(0,nrow = k,ncol = 1)}
if (d != 0) {
Dt = as.matrix(as.data.frame(Ri$delta))
}else{Dt = matrix(0,nrow = k,ncol = 1)}
Sig = as.matrix(Ri$sigma)
## make lags and calculate
yti = c()
for (w in 1:p) {yti = c(yti,Yt[,i - w])}
xti = c()
if (l == 1 & nrow(Ut) != 1) {
xti = c(xti,Xt[,i])
}else{
for (w in 1:ifelse(q == 0,1,q)) {xti = c(xti,Xt[,i - w])}
}
zti = c()
for (w in 1:ifelse(d == 0,1,d)) {zti = c(zti,Zt[i - w])}
Yt[,i] = cs + At %*% yti + Bt %*% xti + Dt %*% zti + Sig %*% et[,i]
}
# delete burn
if (k == 1) {
Yt = as.matrix(Yt[-(1:{maxj + burn})])
}else{
Yt = t(Yt[,-(1:{maxj + burn})])
}
Zt = Zt[-c(1:{maxj + burn})]
if (nu == 1) {
Xt = as.matrix(Xt[,-c(1:{maxj + burn})])
}else{
Xt = t(Xt[,-c(1:{maxj + burn})])
}
if (sum(Xt) != 0 & sum(Zt) != 0) {
sim = tsregime(Yt = Yt,Xt = Xt,Zt = Zt,r = r)
}else if (sum(Xt) == 0 & sum(Zt) != 0) {
sim = tsregime(Yt = Yt,Zt = Zt,r = r)
}else if (sum(Zt) == 0 & sum(Xt) != 0) {
sim = tsregime(Yt = Yt,Xt = Xt)
}else if (sum(Zt) == 0 & sum(Xt) == 0) {
sim = tsregime(Yt = Yt)
}
List_RS = list(Sim = sim, Reg = Rg,pj = pj,qj = qj,dj = dj)
class(List_RS) = 'mtarsim'
return(List_RS)
}
|
/scratch/gouwar.j/cran-all/cranData/BMTAR/R/mtarsim.R
|
#==================================================================================================#
# Date: 17/04/2019
# Description:
#-> When r_init is NULL default is the proportion that separate the observations in l equal parts
#-> Bayesian estimation with MCMC in order: theta gibbs sampling, sigma gibbs sampling, gamma gibbs sampling and
# finally threshold metropolis-hasting with uniform proposal
# Function:
#==================================================================================================#
mtarstr = function(ini_obj, level = 0.95, niter = 1000, burn = NULL, chain = FALSE, r_init = NULL,
parallel = FALSE){
# Just-In-Time (JIT)
compiler::enableJIT(3)
# checking
if (!inherits(ini_obj, 'regime_inipars')) {
stop('ini_obj must be a regime_inipars object')
}
# data
Yt = ini_obj$tsregime_obj$Yt
Ut = cbind(ini_obj$tsregime_obj$Zt,ini_obj$tsregime_obj$Xt)
k = ini_obj$tsregime_obj$k
N = ini_obj$tsregime_obj$N
nu = ini_obj$tsregime_obj$nu
if (is.null(nu)) {nu = 0}
# parameters
if (is.null(ini_obj$pars$l)) {
message('l NULL default 2 \n')
l = 2
}else{
l = ini_obj$pars$l
}
if (names(ini_obj$pars) %in% c('orders','r','Sigma')) {
message('Parameters different of l in list_model$pars will be ignored \n')
}
# method
if (is.null(ini_obj$method)) {
message('Method NULL default KUO \n')
method = 'KUO'
}else{
method = ini_obj$method
}
# unknown
if (is.null(ini_obj$orders)) {
message('orders NULL default por each regime pj = 2,qj = 0,dj = 0 \n')
orders = list(pj = rep(2,l),qj = rep(0,l),dj = rep(0,l))
}else{
orders = ini_obj$orders
}
# code
burn = ifelse(is.null(burn),round(0.3*niter),burn)
other = 100
pjmax = orders$pj
qjmax = orders$qj
djmax = orders$dj
Yt = t(Yt)
if (l == 1) {
if (is.null(Ut)) {
Ut = matrix(0, ncol = N,nrow = 1)
}else{
Ut = rbind(matrix(0, ncol = N,nrow = 1),t(Ut)) # only for covariable
}
}else{Ut = t(Ut)}
Zt = Ut[1,]
if (nu == 0) {
Xt = matrix(0,ncol = N,nrow = 1)
qjmax = rep(0,l)
}else{
Xt = t(ini_obj$tsregime_obj$Xt)
}
eta = 1 + pjmax*k + qjmax*nu + djmax
# objects for each regimen and iterations
theta_iter = sigma_iter = gam_iter = pij = Dj = Rj = vector('list', l)
if (method == 'SSVS') {
tauij = itauij = cij = vector('list', l)
}
if (l != 1) {r_iter = matrix(ncol = niter + burn + other,nrow = l - 1)}else{r_iter = NULL}
# set initial values for each regime in each chain
itheta0j = isigma0j = iS0j = inu0j = vector('list',l)
thetaini = ini_obj$init$Theta
sigmaini = ini_obj$init$Sigma
gammaini = ini_obj$init$Gamma
for (lj in 1:l) {
theta_iter[[lj]] = gam_iter[[lj]] = matrix(ncol = niter + burn + other,nrow = k*eta[lj])
sigma_iter[[lj]] = vector('list',length = niter + burn + other)
itheta0j[[lj]] = thetaini[[paste0('R',lj)]]$theta0j
isigma0j[[lj]] = thetaini[[paste0('R',lj)]]$cov0j
iS0j[[lj]] = sigmaini[[paste0('R',lj)]]$S0j
inu0j[[lj]] = sigmaini[[paste0('R',lj)]]$nu0j
pij[[lj]] = gammaini[[paste0('R',lj)]]
gam_iter[[lj]][,1] = rep(1,k*eta[lj])
if (method == 'KUO') {
theta_iter[[lj]][,1] = mvtnorm::rmvnorm(1,mean = itheta0j[[lj]],sigma = isigma0j[[lj]])
}
if (method == 'SSVS') {
cij[[lj]] = ini_obj$init$Theta[[paste0('R',lj)]]$Cij
tauij[[lj]] = ini_obj$init$Theta[[paste0('R',lj)]]$Tauij
Rj[[lj]] = ini_obj$init$Theta[[paste0('R',lj)]]$R
itauij[[lj]] = cij[[lj]]*tauij[[lj]]
Dj[[lj]] = diag(itauij[[lj]])
theta_iter[[lj]][,1] = mvtnorm::rmvnorm(1,mean = rep(0,k*eta[lj]),sigma = Dj[[lj]] %*% Rj[[lj]] %*% Dj[[lj]])
}
sigma_iter[[lj]][[1]] = MCMCpack::riwish(v = inu0j[[lj]],S = iS0j[[lj]])
}
#
# objects for save regimes, chains and credibility intervals
Rest = thetaest = thetachain =
gamest = gamchain = sigmaest = sigmachain = vector('list', l)
names(Rest) = names(thetaest) = names(thetachain) = names(gamest) = names(gamchain) =
names(sigmaest) = names(sigmachain) = paste0('R',1:l)
# necesary functions
fycond = function(i2,listr,gamma,theta_iter,sigma_iter,l){
acum = 0
Nrg = listr$Nrg
for (lj in 1:l) {
yj = c(listr$listaY[[lj]])
Xj = listr$listaX[[lj]]
acum = acum + t(yj - Xj %*% diag(gamma[[lj]][,i2]) %*% theta_iter[[lj]][,i2]) %*%
{diag(Nrg[lj]) %x% solve(sigma_iter[[lj]][[i2]])} %*%
(yj - Xj %*% diag(gamma[[lj]][,i2]) %*% theta_iter[[lj]][,i2])
}
sigmareg = lapply(sigma_iter,function(x){x[[i2]]})
cte = prodB(Brobdingnag::as.brob(sapply(sigmareg,function(x){
return(c(determinant(x,logarithm = FALSE)$modulus))}))^{-Nrg/2})
val = cte*exp(-1/2*Brobdingnag::as.brob(acum))
return(val)
}
fycond = compiler::cmpfun(fycond)
dmunif = function(r,a,b){
names(a) = names(b) = NULL
volume = ((b - a)^{l - 1})/(factorial(l - 1))
for (i in 1:{l - 1}) {
if (r[i] >= a & r[i] <= b) {
if (l <= 2) {prob = 1}
if (l > 2) {
prob = 1
for (j in 1:{l - 2}) {
if (r[j] < r[j + 1]) {prob = prob*1}else{prob = prob*0}
}
}
}else{prob = 0}
}
rj = matrix(nrow = 2,ncol = l)
rj[,1] = c(-Inf,r[1])
rj[,l] = c(rev(r)[1],Inf)
if (l > 2) {
for (i2 in 2:{l - 1}) {rj[,i2] = c(r[i2 - 1],r[i2])}
}
Ind = c()
for (j in 1:l) {
for (w in 1:N) {
if (Zt[w] > rj[1,j] & Zt[w] <= rj[2,j]) {
Ind[w] = j
}
}
}
Nrg = c()
for (lj in 1:l) {
Nrg[lj] = length(Ind[Ind == lj])
}
if (sum(Nrg/sum(Nrg) > 0.2) == l) {prob = 1*prob}else{prob = 0*prob}
return(prob/volume)
}
dmunif = compiler::cmpfun(dmunif)
lists = function(r,...){
rj = matrix(nrow = 2,ncol = l)
if (l == 1) {
rj[,1] = c(-Inf,Inf)
}else{
rj[,1] = c(-Inf,r[1])
rj[,l] = c(rev(r)[1],Inf)
}
if (l > 2) {for (i2 in 2:{l - 1}) {rj[,i2] = c(r[i2 - 1],r[i2])}}
# indicator variable for the regime
Ind = vector(mode = 'numeric',length = N)
for (j in 1:l) {
Ind[Zt > rj[1,j] & Zt <= rj[2,j]] = j
}
Nrg = vector(mode = 'numeric')
listaXj = listaYj = vector('list', l)
Inj_X = function(ti,Yt,Zt,Xt,p,q,d){
yti = vector(mode = "numeric")
for (w in 1:p) {yti = c(yti,Yt[,ti - w])}
xti = vector(mode = "numeric")
for (w in 1:q) {xti = c(xti,Xt[,ti - w])}
zti = vector(mode = "numeric")
for (w in 1:d) {zti = c(zti,Zt[ti - w])}
if (q == 0 & d != 0) {
wtj = c(1,yti,zti)
}else if (d == 0 & q != 0) {
wtj = c(1,yti,xti)
}else if (d == 0 & q == 0) {
wtj = c(1,yti)
}else{
wtj = c(1,yti,xti,zti)}
return(wtj)
}
Inj_X = Vectorize(Inj_X,vectorize.args = "ti")
for (lj in 1:l) {
p = pjmax[lj]
q = qjmax[lj]
d = djmax[lj]
maxj = max(p,q,d)
Inj = which(Ind == lj)
Inj = Inj[Inj > maxj]
Nrg[lj] = length(Inj)
Yj = matrix(Yt[,Inj],nrow = k,ncol = Nrg[lj])
yj = c(Yj)
if (identical(Inj,integer(0))) {
Xj = matrix(nrow = 0,ncol = eta[lj]*k)
}else{
Wj = sapply(Inj,Inj_X,Yt = Yt,Zt = Zt,Xt = Xt,p = p,q = q,d = d)
Xj = t(Wj) %x% diag(k)[1,]
if (k != 1) {for (s in 2:k) {Xj = cbind(Xj,t(Wj) %x% diag(k)[s,])}}
}
listaXj[[lj]] = Xj
listaYj[[lj]] = Yj
}
return(list(Nrg = Nrg,listaX = listaXj,listaY = listaYj, Ind = Ind))
}
lists = compiler::cmpfun(lists)
rgamber = function(pos,reg,i,listj,theta_iter,sigma_iter,gam_iter,...){
gam_j = gam_iter
gam_j[[reg]][pos,i] = 1
pycond1 = fycond(i,listj,gam_j,theta_iter,sigma_iter,l)
gam_j[[reg]][pos,i] = 0
pycond0 = fycond(i,listj,gam_j,theta_iter,sigma_iter,l)
if (method == 'KUO') {
aij = pycond1*pij[[reg]][pos]
bij = pycond0*(1 - pij[[reg]][pos])
}else if (method == 'SSVS') {
gam_j[[reg]][pos,i] = 1
itauij[[reg]][gam_j[[reg]][,i] == 0] = tauij[[reg]][gam_j[[reg]][,i] == 0]
itauij[[reg]][gam_j[[reg]][,i] == 1] =
cij[[reg]][gam_j[[reg]][,i] == 1]*tauij[[reg]][gam_j[[reg]][,i] == 1]
Dj[[reg]] = diag(itauij[[reg]])
pthetacond1 = dmnormB(x = theta_iter[[reg]][,i],mean = rep(0,k*eta[reg]),sigma = Dj[[reg]] %*% Rj[[reg]] %*% Dj[[reg]])
aij = pycond1*pthetacond1*pij[[reg]][pos]
gam_j[[reg]][pos,i] = 0
itauij[[reg]][gam_j[[reg]][,i] == 0] = tauij[[reg]][gam_j[[reg]][,i] == 0]
itauij[[reg]][gam_j[[reg]][,i] == 1] =
cij[[reg]][gam_j[[reg]][,i] == 1]*tauij[[reg]][gam_j[[reg]][,i] == 1]
Dj[[reg]] = diag(itauij[[reg]])
pthetacond0 = dmnormB(x = theta_iter[[reg]][,i],mean = rep(0,k*eta[reg]),sigma = Dj[[reg]] %*% Rj[[reg]] %*% Dj[[reg]])
bij = pycond0*pthetacond0*(1 - pij[[reg]][pos])
}
return(stats::rbinom(1,size = 1,prob = as.numeric((aij)/(aij + bij))))
}
rgamber = compiler::cmpfun(Vectorize(rgamber,"pos"))
# edges for rmunif
rini = ini_obj$init$r
a = ifelse(is.null(rini$za),min(Zt),stats::quantile(Zt,probs = rini$za))
b = ifelse(is.null(rini$zb),max(Zt),stats::quantile(Zt,probs = rini$zb))
#
# last check
if (!is.null(r_init)) {
if (is.numeric(r_init) & length(r_init) == {l - 1}) {
if (dmunif(r_init,a,b) == 0) {
stop('r_init must be in Zt range and for l >= 2, r[i] < r[i+1]')
}
}else{stop('r_init must be vector of length l - 1')}
}
if (l != 1) {
if (is.null(r_init)) {
r_iter[,1] = c(stats::quantile(Zt, probs = 1/l*(1:{l - 1})))
}else{
r_iter[,1] = r_init
}
}
list_m = list(r_iter = r_iter,theta_iter = theta_iter,sigma_iter = sigma_iter,gam_iter = gam_iter)
# iterations function
if (parallel) {
nclus = 2
micluster = parallel::makeCluster(nclus)
doParallel::registerDoParallel(micluster)
funcParallel = function(ik,iterprev,reg,i,listj,theta_iter,sigma_iter,gam_iter){
return(rgamber(pos = ik,reg,i = i,listj = listj,theta_iter,sigma_iter,gam_iter))
}
parallel::clusterEvalQ(micluster, library(BMTAR))
obj_S = list('method','dmnormB','k','eta','Rj','rgamber','fycond','lists','l','N','eta',
'pij')
parallel::clusterExport(cl = micluster,varlist = obj_S,envir = environment())
}
iter_str = function(i, list_m, ...){
r_iter = list_m$r_iter
theta_iter = list_m$theta_iter
sigma_iter = list_m$sigma_iter
gam_iter = list_m$gam_iter
if (l != 1) {
listj = lists(r_iter[,i - 1])
}else{
listj = lists(0)
}
for (lj in 1:l) {
Xj = listj$listaX[[lj]]
Yj = listj$listaY[[lj]]
yj = c(Yj)
Nj = listj$Nrg[lj]
theta0j = itheta0j[[lj]]
sigma0j = isigma0j[[lj]]
S0j = iS0j[[lj]]
nu0j = inu0j[[lj]]
if (method == 'SSVS') {
itauij[[lj]][gam_iter[[lj]][,i - 1] == 0] = tauij[[lj]][gam_iter[[lj]][,i - 1] == 0]
itauij[[lj]][gam_iter[[lj]][,i - 1] == 1] = cij[[lj]][gam_iter[[lj]][,i - 1] == 1]*tauij[[lj]][gam_iter[[lj]][,i - 1] == 1]
Dj[[lj]] = diag(itauij[[lj]])
theta0j = rep(0,k*eta[lj])
}else if (method == 'KUO') {
Dj[[lj]] = diag(k*eta[lj])
Rj[[lj]] = sigma0j
}
Vj = solve(t(diag(gam_iter[[lj]][,i - 1])) %*% t(Xj) %*% {diag(Nj) %x% solve(sigma_iter[[lj]][[i - 1]])} %*% Xj %*% diag(gam_iter[[lj]][,i - 1]) + solve(Dj[[lj]] %*% Rj[[lj]] %*% Dj[[lj]]))
thetaj = Vj %*% {t(diag(gam_iter[[lj]][,i - 1])) %*% t(Xj) %*% {diag(Nj) %x% solve(sigma_iter[[lj]][[i - 1]])} %*% yj + solve(sigma0j) %*% theta0j}
theta_iter[[lj]][,i] = mvtnorm::rmvnorm(1,mean = thetaj,sigma = Vj)
Hj = ks::invvec({Xj %*% diag(gam_iter[[lj]][,i - 1]) %*% theta_iter[[lj]][,i]},nrow = k,ncol = Nj)
Sj = (Yj - Hj) %*% t(Yj - Hj)
sigma_iter[[lj]][[i]] = MCMCpack::riwish(v = Nj + nu0j,S = Sj + S0j)
gam_iter[[lj]][,i] = gam_iter[[lj]][,i - 1]
}
for (jj in 1:l) {
if (parallel) {
count = 0
list_parallel = vector(mode = 'list',length = nclus)
range_clus = floor(k*eta[jj]/nclus) + 1
for (ih in 1:length(list_parallel)) {
if ({1 + count} > k*eta[jj]) {break()}
if ({range_clus + count} > k*eta[jj]) {
list_parallel[[ih]] = c({1 + count}:{k*eta[jj]})
}else{
list_parallel[[ih]] = c({1 + count}:{range_clus + count})
}
count = count + range_clus
}
list_parallel = list_parallel[!unlist(lapply(list_parallel,is.null))]
parallel::clusterExport(cl = micluster,varlist = list('jj'),envir = environment())
s = parallel::parLapply(micluster,list_parallel, funcParallel,
reg = jj,i = i,listj = listj,theta_iter = theta_iter,sigma_iter = sigma_iter,gam_iter = gam_iter)
gam_iter[[jj]][,i] = unlist(s)
}else{
gam_iter[[jj]][,i] = rgamber(pos = 1:{k*eta[jj]},reg = jj,i = i,listj = listj,theta_iter,sigma_iter,gam_iter)
}
}
if (l != 1) {
if (i <= other) {
ek = mvtnorm::rmvnorm(1,mean = rep(0,l - 1),sigma = 0.5*diag(l - 1))
}else{
ek = stats::runif(l - 1,-abs(rini$val_rmh),abs(rini$val_rmh))
}
rk = r_iter[,i - 1] + ek
listrk = lists(rk)
pr = dmunif(rk,a,b)*fycond(i,listrk,gam_iter,theta_iter,sigma_iter,l)
px = dmunif(r_iter[,i - 1],a,b)*fycond(i,listj,gam_iter,theta_iter,sigma_iter,l)
alpha = min(1,as.numeric(pr/px))
if (alpha >= stats::runif(1)) {
r_iter[,i] = rk
acep = acep + 1
}else{
r_iter[,i] = r_iter[,i - 1]
acep = acep
}
}
list_m$r_iter = r_iter
list_m$theta_iter = theta_iter
list_m$sigma_iter = sigma_iter
list_m$gam_iter = gam_iter
return(list_m)
}
iter_str = compiler::cmpfun(iter_str)
# iterations: gibbs and metropolis sampling
acep = 0
message('Estimating threshold(s), structural and non-structural parameters ...','\n')
pb = utils::txtProgressBar(min = 2, max = niter + burn + other,style = 3)
for (i in 2:{niter + burn + other}) {
iter_i = iter_str(i,list_m)
list_m = iter_i
utils::setTxtProgressBar(pb,i)
}
if (parallel) {parallel::stopCluster(micluster)}
close(pb)
message('Saving results ... \n')
if (l > 2) {
r_iter = list_m$r_iter[,-c(1:{other + burn})]
}else if (l == 2) {
r_iter = list_m$r_iter[-c(1:{other + burn})]
}else{
r_iter = NULL
}
theta_iter = list_m$theta_iter
sigma_iter = list_m$sigma_iter
gam_iter = list_m$gam_iter
# exits
if (l != 1) {
rest = matrix(nrow = l - 1,ncol = 3)
colnames(rest) = colnames(rest) =
c(paste('lower limit ',(1 - level)/2*100,'%',sep = ''),'mean',paste('upper limit ',(1 + level)/2*100,'%',sep = ''))
rchain = matrix(r_iter,ncol = niter,nrow = l - 1)
rest[,1] = apply(rchain,1,stats::quantile,probs = (1 - level)/2)
rest[,3] = apply(rchain,1,stats::quantile,probs = (1 + level)/2)
rest[,2] = apply(rchain,1,mean)
rvec = c(rest[,2],'prop %' = round(acep/niter*100,4))
}else{
rvec = NULL
}
# save chains
# logLik
if (l >= 2) {
listj = lists(rest[,2])
}else{
listj = lists(0)
}
SigmaPrep = function(x){return(c(expm::sqrtm(matrix(x,k,k))))}
logLikj = vector(mode = 'numeric')
pf = qf = df = vector('numeric')
for (lj in 1:l) {
theta_iter[[lj]] = theta_iter[[lj]][,-c(1:other)]
gam_iter[[lj]] = gam_iter[[lj]][,-c(1:other)]
sigma_iter[[lj]] = sigma_iter[[lj]][-c(1:other)]
#
thetachain[[lj]] = theta_iter[[lj]][,-c(1:burn)]
gamchain[[lj]] = gam_iter[[lj]][,-c(1:burn)]
sigmachain[[lj]] = sapply(sigma_iter[[lj]][-c(1:burn)], ks::vec)
# credibility intervals
vecgamma = vectheta = matrix(nrow = k*eta[lj],ncol = 3)
vecsigma = matrix(nrow = k*k,ncol = 3)
colnames(vectheta) = colnames(vecsigma) =
c(paste0('lower limit ',(1 - level)/2*100,'%'),'mean',paste0('upper limit ',(1 + level)/2*100,'%'))
rownames(vecsigma) = c(sapply(1:k, function(x){paste0(1:k,x)}))
if (nu != 0 & qjmax[lj] != 0 & djmax[lj] != 0) {
rownames(vectheta) = rownames(vecgamma) =
rep(c('phi0',rep(paste0('phi',1:pjmax[lj]),each = k),rep(paste0('beta',1:qjmax[lj]),each = nu),paste0('delta',1:djmax[lj])),k)
}else if (nu != 0 & qjmax[lj] != 0 & djmax[lj] == 0) {
rownames(vectheta) = rownames(vecgamma) =
rep(c('phi0',rep(paste0('phi',1:pjmax[lj]),each = k),rep(paste0('beta',1:qjmax[lj]),each = nu)),k)
}else if (qjmax[lj] == 0 & djmax[lj] != 0) {
rownames(vectheta) = rownames(vecgamma) =
rep(c('phi0',rep(paste0('phi',1:pjmax[lj]),each = k),paste0('delta',1:djmax[lj])),k)
}else if (qjmax[lj] == 0 & djmax[lj] == 0) {
rownames(vectheta) = rownames(vecgamma) =
rep(c('phi0',rep(paste0('phi',1:pjmax[lj]),each = k)),k)
}
rownames(vecsigma) = c(sapply(1:k, function(x){paste0(1:k,x)}))
vectheta[,1] = apply(thetachain[[lj]],1,stats::quantile,probs = (1 - level)/2)
vectheta[,3] = apply(thetachain[[lj]],1,stats::quantile,probs = (1 + level)/2)
vectheta[,2] = apply(thetachain[[lj]],1,mean)
thetaest[[lj]] = vectheta
if (k == 1) {
vecsigma[,1] = sqrt(stats::quantile(sigmachain[[lj]],probs = (1 - level)/2))
vecsigma[,3] = sqrt(stats::quantile(sigmachain[[lj]],probs = (1 + level)/2))
vecsigma[,2] = sqrt(mean(sigmachain[[lj]]))
}else{
vecsigma[,1] = SigmaPrep(apply(sigmachain[[lj]],1,stats::quantile,probs = (1 - level)/2))
vecsigma[,3] = SigmaPrep(apply(sigmachain[[lj]],1,stats::quantile,probs = (1 + level)/2))
vecsigma[,2] = SigmaPrep(apply(sigmachain[[lj]],1,mean))
}
sigmaest[[lj]] = vecsigma
vec = apply(gamchain[[lj]],2,paste,collapse = '')
vecs = sort(table(vec), decreasing = TRUE)[1:3]
colnames(vecgamma) = c(paste('first freq',paste0(vecs[1]/niter*100,'%')),
paste('second freq',paste0(vecs[2]/niter*100,'%')),
paste('third freq',paste0(vecs[3]/niter*100,'%')))
vecgamma[,1] = gamchain[[lj]][,which(vec == names(vecs[1]))[1]]
vecgamma[,2] = gamchain[[lj]][,which(vec == names(vecs[2]))[1]]
vecgamma[,3] = gamchain[[lj]][,which(vec == names(vecs[3]))[1]]
gamest[[lj]] = vecgamma
# creation of the 'regime' type object
p = pjmax[lj]
q = qjmax[lj]
d = djmax[lj]
if (q == 0 & d == 0) {
thetaind = c(90,(10 + (1:p)) %x% rep(1,k))
}else if (q != 0 & d == 0) {
thetaind = c(90,(10 + (1:p)) %x% rep(1,k),(20 + (1:q)) %x% rep(1,nu))
}else if (q == 0 & d != 0) {
thetaind = c(90,(10 + (1:p)) %x% rep(1,k),30 + (1:d))
}else{
thetaind = c(90,(10 + (1:p)) %x% rep(1,k),(20 + (1:q)) %x% rep(1,nu),30 + (1:d))
}
Thetaj = t(ks::invvec(thetaest[[lj]][,2],ncol = k,nrow = eta[lj]))*t(ks::invvec(gamest[[lj]][,1],ncol = k,nrow = eta[lj]))
Ri = vector('list')
cs = matrix(Thetaj[,thetaind == 90],nrow = k,ncol = 1)
if (sum(cs == 0) != k) {Ri$cs = cs}
phiest = vector('list', p)
names(phiest) = paste0('phi',1:p)
for (j in 1:p) {phiest[[j]] = matrix(Thetaj[,thetaind == (10 + j)],nrow = k,ncol = k)}
pest = sapply(phiest,function(x){sum(x == 0) != k*k})
if (sum(pest) != 0) {
Ri$phi = phiest[pest]
}
if (q != 0) {
betaest = vector('list', q)
names(betaest) = paste0('beta',1:q)
for (j in 1:q) {betaest[[j]] = matrix(Thetaj[,thetaind == (20 + j)],nrow = k,ncol = nu)}
qest = sapply(betaest,function(x){sum(x == 0) != k*nu})
if (sum(qest) != 0) {
Ri$beta = betaest[qest]
}
}
if (d != 0) {
deltaest = vector('list', d)
names(deltaest) = paste0('delta',1:d)
for (j in 1:d) {deltaest[[j]] = matrix(Thetaj[,thetaind == (30 + j)],nrow = k,ncol = 1)}
dest = sapply(deltaest,function(x){sum(x == 0) != k})
if (sum(dest) != 0) {
Ri$delta = deltaest[dest]
}
}
Ri$sigma = ks::invvec(sigmaest[[lj]][,2],ncol = k,nrow = k)
if (!is.null(Ri$phi)) {
pf[lj] = max(as.numeric(sapply(names(Ri$phi),substr,4,4)))
}else{
pf[lj] = 1
Ri$phi = list(phi1 = matrix(0,k,k))
}
if (!is.null(Ri$beta)) {
qf[lj] = max(as.numeric(sapply(names(Ri$beta),substr,5,5)))
}else{qf[lj] = 0}
if (!is.null(Ri$delta)) {
df[lj] = max(as.numeric(sapply(names(Ri$delta),substr,6,6)))
}else{df[lj] = 0}
Rest[[lj]] = mtaregime(orders = list(p = pf[lj],q = qf[lj],d = df[lj]),cs = Ri$cs,
Phi = Ri$phi,Beta = Ri$beta,Delta = Ri$delta,
Sigma = Ri$sigma)
Xj = listj$listaX[[lj]]
Yj = listj$listaY[[lj]]
Nj = listj$Nrg[lj]
Hj = ks::invvec({Xj %*% diag(gamest[[lj]][,1]) %*% thetaest[[lj]][,2]},nrow = k,ncol = Nj)
Sj = (Yj - Hj) %*% t(Yj - Hj)
logLikj[lj] = log(det(Sj/Nj))
}
# exits
## chain
if (chain) {
Chain = vector('list')
Chain$Theta = thetachain
Chain$Gamma = gamchain
Chain$Sigma = sigmachain
if (l != 1) {Chain$r = rchain}
}
## estimates
estimates = vector('list')
estimates$Theta = thetaest
estimates$Gamma = gamest
estimates$Sigma = sigmaest
data = ini_obj$tsregime_obj
orders = vector('list')
orders$pj = pf
orders$qj = qf
orders$dj = df
# fitted.values and residuals
Yt_fit = Yt_res = matrix(ncol = N,nrow = k)
for (t in 1:N) {
lj = listj$Ind[t]
p = pjmax[lj]
q = qjmax[lj]
d = djmax[lj]
yti = vector(mode = "numeric")
for (w in 1:p) {
if (t - w > 0) {yti = c(yti,Yt[,t - w])
}else{yti = c(yti,rep(0,k))}}
if (identical(yti,numeric(0))) {yti = rep(0,k*p)}
xti = vector(mode = "numeric")
for (w in 1:q) {
if (t - w > 0) {xti = c(xti,Xt[,t - w])
}else{xti = c(xti,rep(0,nu))}}
if (identical(xti,numeric(0))) {xti = rep(0,nu*q)}
zti = vector(mode = "numeric")
for (w in 1:d) {
if (t - w > 0) {zti = c(zti,Zt[t - w])
}else{zti = c(zti,0)}}
if (identical(zti,numeric(0))) {zti = rep(0,d)}
if (q == 0 & d != 0) {
wtj = c(1,yti,zti)
}else if (d == 0 & q != 0) {
wtj = c(1,yti,xti)
}else if (d == 0 & q == 0) {
wtj = c(1,yti)
}else{
wtj = c(1,yti,xti,zti)}
Xj = t(wtj) %x% diag(k)[1,]
if (k != 1) {for (s in 2:k) {Xj = cbind(Xj,t(wtj) %x% diag(k)[s,])}}
Yt_fit[,t] = Xj %*% diag(gamest[[lj]][,2]) %*% thetaest[[lj]][,2]
Sig = as.matrix(Rest[[lj]]$sigma)
Yt_res[,t] = solve(Sig) %*% (Yt[,t] - Yt_fit[,t])
}
if (l != 1) {estimates$r = rest}
if (chain) {
results = list(Nj = listj$Nrg,estimates = estimates,regime = Rest,Chain = Chain,
residuals = t(Yt_res), fitted.values = t(Yt_fit),
logLikj = logLikj,data = data,r = rvec,orders = orders)
}else{
results = list(Nj = listj$Nrg,estimates = estimates,regime = Rest,
residuals = t(Yt_res), fitted.values = t(Yt_fit),
logLikj = logLikj,data = data,r = rvec,orders = orders)
}
compiler::enableJIT(0)
class(results) = 'regime_model'
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/BMTAR/R/mtarstr.R
|
#==================================================================================================#
# Date: 14/04/2020
# Description:
# Function:
#==================================================================================================#
lists_ind = function(r,Zt,l,...){
N = length(Zt)
rj = matrix(nrow = 2,ncol = l)
if (l == 1) {
rj[,1] = c(-Inf,Inf)
}else{
rj[,1] = c(-Inf,r[1])
rj[,l] = c(rev(r)[1],Inf)
}
if (l > 2) {for (i2 in 2:{l - 1}) {rj[,i2] = c(r[i2 - 1],r[i2])}}
# indicator variable for the regime
if (any(is.na(Zt))) {
posZt = (1:N)[!is.na(Zt)]
}else{
posZt = 1:N
}
Ind = c()
for (j in 1:l) {
for (w in posZt) {
if (Zt[w] > rj[1,j] & Zt[w] <= rj[2,j]) {
Ind[w] = j
}
}
}
return(list(Ind = Ind))
}
tsregime = function(Yt, Zt = NULL, Xt = NULL, r = NULL){
list_result = vector('list')
if (!is.null(r)) {
if (!is.numeric(r)) {stop('r must be a numeric vector')}
l = length(r) + 1
if (l > 2) {for (i in 1:{l - 2}) {
if (r[i] >= r[i + 1]) {stop('r[i] must be smaller than r[i+1]')}}
}
list_result$l = l
if (is.null(Zt)) {
stop('Zt must be enter with threshold value')
}
}
if (!is.numeric(Yt)) {
stop('Yt must be a real matrix of dimension Nxk')
}
if (!is.matrix(Yt)) {Yt = as.matrix(Yt)}
if (!is.null(Zt)) {
if (!is.numeric(Zt)) {stop('Zt must be a real matrix of dimension Nx1')}
if (!is.matrix(Zt)) {Zt = as.matrix(Zt)}
if (nrow(Zt) != nrow(Yt)) {stop('Zt and Yt number of rows must match')}
}else{l = 1}
if (!is.null(Xt)) {
if (!is.numeric(Xt)) {stop('Xt must be a real matrix of dimension Nx(nu+1)')}
if (!is.matrix(Xt)) {Xt = as.matrix(Xt)}
if (nrow(Xt) != nrow(Yt)) {stop('Xt and Yt number of rows must match')}
nu = ncol(Xt)
list_result$nu = nu
}
k = ncol(Yt)
N = nrow(Yt)
list_result$Yt = Yt
list_result$Zt = Zt
if (!is.null(Xt)) {
list_result$Xt = Xt
}
if (any(is.na(Yt)) | any(is.na(Zt)) | any(is.na(Xt))) {
message('Yt, Zt and Xt admit NA values use mtarmissing for estimation \n')
}
if (!is.null(r)) {
list_result$r = r
# Compute which regime belongs observations
list_result$Ind = lists_ind(r,Zt,l)
Table_r = data.frame('N_reg' = c(table(list_result$Ind)),
'Prop_reg' = 100*c(prop.table(table(list_result$Ind))))
rownames(Table_r) = paste('Regim',1:l)
list_result$Summary_r = Table_r
}
list_result$N = N
list_result$k = k
class(list_result) = 'tsregime'
return(list_result)
}
|
/scratch/gouwar.j/cran-all/cranData/BMTAR/R/tsregime.R
|
#' @title Balance a Panel Data Set
#'
#' @description This function drops observations from data.frame
#' that are not part of balanced panel data set.
#'
#' @param data data.frame used in function
#' @param idname unique id
#' @param tname time period name
#' @param return_data.table if TRUE, makeBalancedPanel will
#' return a data.table rather than a data.frame. Default
#' is FALSE.
#' @examples
#' id <- rep(seq(1,100), each = 2) # individual ids for setting up a two period panel
#' t <- rep(seq(1,2),100) # time periods
#' y <- rnorm(200) # outcomes
#' dta <- data.frame(id=id, t=t, y=y) # make into data frame
#' dta <- dta[-7,] # drop the 7th row from the dataset (which creates an unbalanced panel)
#' dta <- makeBalancedPanel(dta, idname="id", tname="t")
#'
#' @return data.frame that is a balanced panel
#' @export
makeBalancedPanel <- function(data,
idname,
tname,
return_data.table=FALSE) {
if (!inherits(data,"data.frame")) {
stop("data must be a data.frame")
}
data.table::setDT(data)
nt <- length(unique(data[[tname]]))
if (!return_data.table) {
return(as.data.frame(data[, if (.N==nt) .SD, by=idname]))
} else if (return_data.table) {
return(data[, if (.N == nt) .SD, by=idname])
}
}
#' @title Panel Data to Repeated Cross Sections
#'
#' @description panel2cs takes a 2 period dataset and turns it
#' into a cross sectional dataset. The data includes the
#' change in time varying variables between the
#' time periods. The default functionality
#' is to keep all the variables from period 1
#' and add all the variables listed by name in timevars
#' from period 2 to those.
#'
#' @param data data.frame used in function
#' @param timevars vector of names of variables to keep
#' @param idname unique id
#' @param tname time period name
#'
#' @return data.frame
#' @export
panel2cs <- function(data, timevars, idname, tname) {
#.Deprecated("panel2cs2")
if (length(unique(data[,tname])) != 2) {
stop("panel2cs only for 2 periods of panel data")
}
# balance the data, just in case
data <- makeBalancedPanel(data, idname, tname)
# put everything in the right order,
# so we can match it easily later on
data <- data[order(data[,idname], data[,tname]),]
tdta <- aggregate(data[,timevars], by=list(data[,idname]), FUN=function(x) { x[2] })
t1 <- unique(data[,tname])
t1 <- t1[order(t1)][1]
retdat <- subset(data, data[,tname]==t1)
retdat$yt1 <- tdta[,2]
retdat$dy <- retdat$yt1 - retdat$y
return(retdat)
}
#' @title Panel Data to Repeated Cross Sections
#'
#' @description panel2cs2 takes a 2 period dataset and turns it
#' into a cross sectional dataset; i.e., long to wide.
#' This function considers a particular case where there is some outcome
#' whose value can change over time. It returns the dataset from the first
#' period with the outcome in the second period and the change in outcomes
#' over time appended to it
#'
#' @param data data.frame used in function
#' @param yname name of outcome variable that can change over time
#' @param idname unique id
#' @param tname time period name
#' @param balance_panel whether to ensure that panel is balanced. Default is TRUE, but code runs somewhat
#' faster if this is set to be FALSE.
#'
#' @return data from first period with .y0 (outcome in first period),
#' .y1 (outcome in second period), and .dy (change in outcomes
#' over time) appended to it
#' @export
panel2cs2 <- function(data, yname, idname, tname, balance_panel=TRUE) {
# check that only 2 periods of data
if (length(unique(data[[tname]])) != 2) {
stop("panel2cs only for 2 periods of panel data")
}
# balance the data, just in case
if (balance_panel) {
data <- makeBalancedPanel(data, idname, tname)
}
# data.table sorting (fast and memory efficient)
data.table::setDT(data)
data.table::setorderv(data, cols = c(idname, tname))
# Trick to speed up by specializing for task at hand
# relies on being sorted by tname above
data$.y1 = data.table::shift(data[[yname]], -1)
data$.y0 = data[[yname]]
data$.dy = data$.y1 - data$.y0
# Subset to first row
first.period <- min(data[[tname]])
data = data[data[[tname]] == first.period,]
data
}
#' @title Convert Vector of ids into Vector of Row Numbers
#'
#' @description ids2rownum takes a vector of ids and converts it to the right
#' row number in the dataset; ids should be unique in the dataset
#' that is, don't pass the function panel data with multiple same ids
#'
#' @param ids vector of ids
#' @param data data frame
#' @param idname unique id
#'
#' @examples
#' ids <- seq(1,1000,length.out=100)
#' ids <- ids[order(runif(100))]
#' df <- data.frame(id=ids)
#' ids2rownum(df$id, df, "id")
#'
#' @return vector of row numbers
#' @export
ids2rownum <- function(ids, data, idname) {
vapply(ids, id2rownum, 1.0, data=data, idname=idname)
}
#' @title Take particular id and convert to row number
#'
#' @description id2rownum takes an id and converts it to the right
#' row number in the dataset; ids should be unique in the dataset
#' that is, don't pass the function panel data with multiple same ids
#'
#' @param id a particular id
#' @param data data frame
#' @param idname unique id
#'
#' @keywords internal
id2rownum <- function(id, data, idname) {
which(data[,idname] == id)
}
#' @title Block Bootstrap
#'
#' @description make draws of all observations with the same id in a panel
#' data context. This is useful for bootstrapping with panel data.
#'
#' @param data data.frame from which you want to bootstrap
#' @param idname column in data which contains an individual identifier
#'
#' @return data.frame bootstrapped from the original dataset; this data.frame
#' will contain new ids
#'
#' @examples
#' \dontshow{ if(!requireNamespace("plm")) {
#' if(interactive() || is.na(Sys.getenv("_R_CHECK_PACKAGE_NAME_", NA))) {
#' stop("package 'plm' is required for this example")
#' } else q() }}
#' data("LaborSupply", package="plm")
#' bbs <- blockBootSample(LaborSupply, "id")
#' nrow(bbs)
#' head(bbs$id)
#'
#' @export
blockBootSample <- function(data, idname) {
n <- nrow(data)
ids <- sample(unique(data[,idname]), replace=TRUE)
newid <- seq(1:length(ids))
b1 <- lapply(1:length(ids), function(i) {
bd <- data[ data[,idname]==ids[i],]
bd[,idname] <- newid[i]
bd
})
do.call(rbind, b1)
}
#' @title Make a Distribution Function
#'
#' @description turn vectors of a values and their distribution function values
#' into an ecdf. Vectors should be the same length and both increasing.
#'
#' @param x vector of values
#' @param Fx vector of the distribution function values
#' @param sorted boolean indicating whether or not x is already sorted;
#' computation is somewhat faster if already sorted
#' @param rearrange boolean indicating whether or not should monotize
#' distribution function
#' @param force01 boolean indicating whether or not to force the values of
#' the distribution function (i.e. Fx) to be between 0 and 1
#' @param method which method to pass to \code{approxfun} to approximate the
#' distribution function. Default is "constant"; other possible choice is
#' "linear". "constant" returns a step function, just like an empirical
#' cdf; "linear" linearly interpolates between neighboring points.
#'
#' @examples
#' y <- rnorm(100)
#' y <- y[order(y)]
#' u <- runif(100)
#' u <- u[order(u)]
#' F <- makeDist(y,u)
#'
#' @return ecdf
#' @export
makeDist <- function(x, Fx, sorted=FALSE, rearrange=FALSE, force01=FALSE, method="constant") {
if (!sorted) {
tmat <- cbind(x, Fx)
tmat <- tmat[order(x),]
x <- tmat[,1]
Fx <- tmat[,2]
}
if (force01) {
Fx <- sapply(Fx, function(Fxval) max(min(Fxval,1),0))
}
if (rearrange) {
Fx <- sort(Fx)
}
retF <- approxfun(x, Fx, method=method,
yleft=0, yright=1, f=0, ties="ordered")
class(retF) <- c("ecdf", "stepfun", class(retF))
assign("nobs", length(x), envir = environment(retF))
retF
}
#' @title Invert Ecdf
#'
#' @description take an ecdf object and invert it to get a step-quantile
#' function
#'
#' @param df an ecdf object
#'
#' @return stepfun object that contains the quantiles of the df
#'
#' @export
invertEcdf <- function(df) {
q <- knots(df)
tau <- df(q)
q <- c(q[1], q)
stepfun(tau, q)
}
## ## TODO: fix this, can reference quantreg package
## ecdf2density <- function(df) {
## q <- knots(df)
## tau <- df(q)
## ## akjfun comes from rq package
## akjfun <- function(z, p, d = 10, g = 300, ...) {
## mz <- sum(z * p)
## sz <- sqrt(sum((z - mz)^2 * p))
## hz <- seq(mz - d * sz, mz + d * sz, length = g)
## fz <- quantreg::akj(z, hz, p = p, ...)$dens
## approxfun(hz, fz)
## }
## p <- diff(taus)
## akjfun(q, p)
## }
#' @title Check Function
#'
#' @description The check function used for optimizing to get quantiles
#'
#' @param a vector to compute quantiles for
#' @param tau between 0 and 1, ex. .5 implies get the median
#'
#' @examples
#' x <- rnorm(100)
#' x[which.min(checkfun(x, 0.5))] ##should be around 0
#'
#' @return numeric value
#' @export
checkfun <- function(a, tau) {
return(a*(tau - (1*(a<=0))))
}
#'@title Weighted Check Function
#'
#' @description Weights the check function
#'
#' @param q the value to check
#' @param cvec vector of data to compute quantiles for
#' @param tau between 0 and 1, ex. .5 implies get the median
#' @param weights the weights, weighted.checkfun normalizes the weights
#' to sum to 1.
#'
#' @return numeric
#' @export
weighted.checkfun = function(q, cvec, tau, weights) {
w <- weights
retval <- mean(w*checkfun(cvec-q,tau))
return(retval)
}
#' @title Quantile of a Weighted Check Function
#'
#' @description Finds the quantile by optimizing the weighted check function
#'
#' @param tau between 0 and 1, ex. .5 implies get the median
#' @param cvec a vector to compute quantiles for
#' @param weights the weights, weighted.checkfun normalizes the weights
#' to sum to 1.
#' @param norm normalize the weights so that they have mean of 1, default is
#' to normalize
#'
#' @keywords internal
getWeightedQuantile <- function(tau, cvec, weights=NULL, norm=TRUE) {
if (is.null(weights)) {
weights <- 1
}
mw <- mean(weights)
if (norm) {
weights <- weights / mw
}
return(optimize(weighted.checkfun,
lower=min(cvec),
upper=max(cvec),
cvec=cvec, tau=tau, weights=weights)$minimum)
}
#' @title Get Weighted Quantiles
#'
#' @description Finds multiple quantiles by repeatedly calling
#' getWeightedQuantile
#'
#' @param tau a vector of values between 0 and 1
#' @param cvec a vector to compute quantiles for
#' @param weights the weights, weighted.checkfun normalizes the weights
#' to sum to 1.
#' @param norm normalize the weights so that they have mean of 1, default is
#' to normalize
#'
#' @return vector of quantiles
#' @export
getWeightedQuantiles <- function(tau, cvec, weights=NULL, norm=TRUE) {
vapply(tau, getWeightedQuantile, 1.0, cvec=cvec, weights=weights, norm=norm)
##wtd.quantile(cvec, weights=weights, probs=tau, normwt=T)
}
#' @title Weighted Mean
#'
#' @description Get the mean applying some weights
#'
#' @param y a vector to compute the mean for
#' @param weights the vector of weights, can be NULL, then will just return mean
#' @param norm normalize the weights so that they have mean of 1, default is
#' to normalize
#'
#' @return the weighted mean
#' @export
getWeightedMean <- function(y, weights=NULL, norm=TRUE) {
if (is.null(weights)) {
weights <- 1
}
mw <- mean(weights)
if (norm) {
weights <- weights/mw
}
mean(weights*y)
}
#' @title Weighted Distribution Function
#'
#' @description Get a distribution function from a vector of values
#' after applying some weights
#'
#' @param y a vector to compute the mean for
#' @param y.seq an optional vector of values to compute the distribution function
#' for; the default is to use all unique values of y
#' @param weights the vector of weights, can be NULL, then will just return mean
#' @param norm normalize the weights so that they have mean of 1, default is
#' to normalize
#'
#' @return ecdf
#' @export
getWeightedDf <- function(y, y.seq=NULL, weights=NULL, norm=TRUE) {
if (is.null(weights)) {
weights <- 1
}
mw <- mean(weights)
if (norm) {
weights <- weights/mw
}
if (is.null(y.seq)) {
y.seq <- unique(y)
y.seq <- y.seq[order(y.seq)]
}
dvals <- vapply(y.seq, FUN=function(x) { mean(weights*(y <= x)) }, 1.0)
makeDist(y.seq, dvals)
}
#' @title Cross Section to Panel
#'
#' @description Turn repeated cross sections data into panel data by
#' imposing rank invariance; does not require
#' that the inputs have the same length
#'
#' @param cs1 data frame, the first cross section
#' @param cs2 data frame, the second cross section
#' @param yname the name of the variable to calculate difference for (should be the same in each dataset)
#'
#' @return the change in outcomes over time
#' @export
cs2panel <- function(cs1, cs2, yname) {
nu <- min(nrow(cs2), nrow(cs2))
if (nu == nrow(cs2)) {
ut <- cs2[,yname]
ut <- ut[order(-ut)] ##orders largest to smallest
ps <- seq(1,0,length.out=length(ut)) ##orders largest to smallest
utmin1 <- quantile(cs1[,yname], probs=ps, type=1)
##F.untreated.change.t <- ecdf(ut-utmin1)
} else {
utmin1 <- cs2[,yname]
utmin1 <- utmin1[order(-utmin1)] ##orders largest to smallest
ps <- seq(1,0,length.out=length(utmin1)) ##orders largest to smallest
ut <- quantile(cs1[,yname], probs=ps, type=1)
##F.untreated.change.t <- ecdf(ut-utmin1)
}
return(ut - utmin1)
}
#' @title Compare Variables across Groups
#'
#' @description \code{compareBinary} takes in a variable e.g. union
#' and runs bivariate regression of x on treatment (for summary statistics)
#'
#' @param x variables to run regression on
#' @param on binary variable
#' @param dta the data to use
#' @param w weights
#' @param report which type of report to make; diff is the difference between
#' the two variables by group
#'
#'
#' @return matrix of results
#' @export
compareBinary <- function(x, on, dta, w=rep(1,nrow(dta)), report=c("diff","levels","both")) {
if (inherits(dta[,x], "factor")) {
df <- model.matrix(as.formula(paste0("~",x,"-1")), dta)
vnames <- colnames(df)
df <- data.frame(cbind(df, dta[,on]))
colnames(df) <- c(vnames, "treat")
t(simplify2array(lapply(vnames, compareSingleBinary, on="treat", dta=df, w=w, report=report)))
} else {
compareSingleBinary(x, on, dta, w, report)
}
}
#' @title Compare a single variable across two groups
#'
#' @description \code{compareBinary} takes in a variable e.g. union
#' and runs bivariate regression of x on treatment (for summary statistics)
#'
#' @inheritParams compareBinary
#'
#' @return matrix of results
#'
#' @keywords internal
compareSingleBinary <- function(x, on, dta, w=rep(1,nrow(dta)), report=c("diff","levels","both")) {
coefmat <- summary(lm(as.formula(paste(x, on ,sep=" ~ ")), data=dta,
weights=w))$coefficients
if (report=="diff") {
return(c(coefmat[1,1] + coefmat[2,1], coefmat[1,1], abs(coefmat[2,3])>1.96))
} else if (report=="levels") { ## report the levels
return(c(coefmat[1,1] + coefmat[2,1], coefmat[1,1], abs(coefmat[2,3])>1.96))
} else if (report=="both") {
return(c(coefmat[1,1] + coefmat[2,1], coefmat[1,1], coefmat[2,1], round(coefmat[2,4],3)))
}
}
#-----------------------------------------------------------------------------
# functions for working with formulas
#-----------------------------------------------------------------------------
#' @title Right-hand Side Variables
#'
#' @description Take a formula and return a vector of the variables
#' on the right hand side
#'
#' @param formla a formula
#'
#' @examples
#' ff <- yvar ~ x1 + x2
#' rhs.vars(ff)
#'
#' ff <- y ~ x1 + I(x1^2)
#' rhs.vars(ff)
#'
#' @return vector of variable names
#' @export
rhs.vars <- function(formla) {
## allvars <- all.vars(formla)
## if (length(formla)==3) {
## allvars <- allvars[-1]
## }
labels(terms(formla))
}
#' @title Left-hand Side Variables
#'
#' @description Take a formula and return a vector of the variables
#' on the left hand side, it will return NULL for a one sided formula
#'
#' @inheritParams rhs.vars
#'
#' @examples
#' ff <- yvar ~ x1 + x2
#' lhs.vars(ff)
#' @return vector of variable names
#' @export
lhs.vars <- function(formla) {
if (length(formla) == 2) {
return(NULL) ## there is no lhs variable
}
all.vars(formla)[1]
}
#' @title Right-hand Side of Formula
#'
#' @description Take a formula and return the right hand side
#' of the formula
#'
#' @param formla a formula
#'
#' @examples
#' ff <- yvar ~ x1 + x2
#' rhs(ff)
#'
#' @return a one sided formula
#' @export
rhs <- function(formla) {
toformula(NULL,rhs.vars(formla))
}
#' @title Variable Names to Formula
#'
#' @description take a name for a y variable and a vector of names
#' for x variables and turn them into a formula
#'
#' @param yname the name of the y variable
#' @param xnames vector of names for x variables
#'
#' @examples
#' toformula("yvar", c("x1","x2"))
#'
#' ## should return yvar ~ 1
#' toformula("yvar", rhs.vars(~1))
#'
#' @return a formula
#' @export
toformula <- function(yname, xnames) {
if (length(xnames)==0) {
return(as.formula(paste0(yname," ~ 1")))
}
out <- paste0(yname,"~")
xpart <- paste0(xnames, collapse="+")
out <- paste0(out,xpart)
out <- as.formula(out)
out
}
#' @title Add a Covariate to a Formula
#' @description \code{addCovFromFormla} adds some covariates to a formula;
#' covs should be a list of variable names
#'
#'
#' @param covs should be a list of variable names
#' @param formla which formula to add covariates to
#' @return formula
#'
#' @examples
#' formla <- y ~ x
#' addCovToFormla(list("w","z"), formla)
#'
#' formla <- ~x
#' addCovToFormla("z", formla)
#'
#' @export
addCovToFormla <- function(covs, formla) {
vs <- rhs.vars(formla) ## vector of x variable names
vs <- c(vs, covs)
formla <- toformula(lhs.vars(formla), vs)
return(formla)
}
#' @title Drop a Covariate from a Formula
#' @description \code{dropCovFromFormla} adds drops some covariates from a
#' formula; covs should be a list of variable names
#'
#'
#' @param covs should be a list of variable names
#' @param formla which formula to drop covariates from
#' @return formula
#'
#' @examples
#' formla <- y ~ x + w + z
#' dropCovFromFormla(list("w","z"), formla)
#'
#' dropCovFromFormla("z", formla)
#'
#' @export
dropCovFromFormla <- function(covs, formla) {
vs <- rhs.vars(formla)
vs <- vs[!(vs %in% covs)]
toformula(lhs.vars(formla), vs)
}
#' @title Combine Two Distribution Functions
#'
#' @description Combines two distribution functions with given weights by pstrat
#' @param y.seq sequence of possible y values
#' @param dflist list of distribution functions to combine
#' @param pstrat a vector of weights to put on each distribution function;
#' if weights are not provided then equal weight is given to each
#' distribution function
#' @param ... additional arguments that can be past to BMisc::makeDist
#'
#' @examples
#' x <- rnorm(100)
#' y <- rnorm(100,1,1)
#' Fx <- ecdf(x)
#' Fy <- ecdf(y)
#' both <- combineDfs(seq(-2,3,0.1), list(Fx,Fy))
#' plot(Fx, col="green")
#' plot(Fy, col="blue", add=TRUE)
#' plot(both, add=TRUE)
#'
#' @return ecdf
#' @export
combineDfs <- function(y.seq, dflist, pstrat=NULL, ...) {
if (is.null(pstrat)) {
pstrat <- rep(1/length(dflist), length(dflist))
}
y.seq <- y.seq[order(y.seq)]
df.valslist <- lapply(dflist, function(ddff) {
ddff(y.seq)})
df.valsmat <- simplify2array(df.valslist)
for (i in 1:length(pstrat)) {
df.valsmat[,i] <- df.valsmat[,i]*pstrat[i]
}
df.vals <- rowSums(df.valsmat)
makeDist(y.seq, df.vals, ...)
}
#' @title Subsample of Observations from Panel Data
#'
#' @description returns a subsample of a panel data set; in particular drops
#' all observations that are not in \code{keepids}. If it is not set,
#' randomly keeps \code{nkeep} observations.
#'
#' @param dta a data.frame which is a balanced panel
#' @param idname the name of the id variable
#' @param tname the name of the time variable
#' @param keepids which ids to keep
#' @param nkeep how many ids to keep (only used if \code{keepids}
#' is not set); the default is the number of unique ids
#'
#' @examples
#' \dontshow{ if(!requireNamespace("plm")) {
#' if(interactive() || is.na(Sys.getenv("_R_CHECK_PACKAGE_NAME_", NA))) {
#' stop("package 'plm' is required for this example")
#' } else q() }}
#' data("LaborSupply", package="plm")
#' nrow(LaborSupply)
#' unique(LaborSupply$year)
#' ss <- subsample(LaborSupply, "id", "year", nkeep=100)
#' nrow(ss)
#'
#' @return a data.frame that contains a subsample of \code{dta}
#'
#' @export
subsample <- function(dta, idname, tname, keepids=NULL, nkeep=NULL) {
ids <- unique(dta[,idname])
if (is.null(keepids)) {
if (is.null(nkeep)) nkeep <- length(ids)
keepids <- sample(ids, size=nkeep)
}
retdta <- dta[ dta[,idname] %in% keepids, ]
retdta
}
## THESE ARE THROWING ERRORS
## this should return the distribution function
## currently running ci.qte and then inverting, but probably
## would be better to calculate it directly
## ci.treated.Df <- function(data, y.seq, formla, xformla, probs, weights=NULL, se, iters, retEachIter, method="logit", pl, cores) {
## qp <- QTEparams(formla, xformla, t=NULL, tmin1=NULL, tmin2=NULL, tname=NULL, data=data, weights=weights, idname=NULL, probs=probs, iters=iters, alp=alp, method=method, plot=plot, se=se, retEachIter=retEachIter, bootstrapiter=FALSE, seedvec=NULL, pl=pl, cores=cores)
## setupData(qp)
## pscore.reg <- glm(data[,treat] ~ as.matrix(data[,x]),
## family=binomial(link=method))
## pscore <- fitted(pscore.reg)
## d <- data[,treat]
## y <- data[,yname]
## y.seq <- y.seq[order(y.seq)]
## df.vals <- vapply(y.seq, function(x) {
## mean((d/pscore)*(y <= x) / (mean(d/pscore))) }, 1.0)
## makeDist(y.seq, df.vals)
## }
## ## this should return the distribution function
## ## currently running ci.qte and then inverting, but probably
## ## would be better to calculate it directly
## ci.untreated.Df <- function(data, y.seq, formla, xformla, probs, weights=NULL, se, iters, retEachIter, method="logit", pl, cores) {
## ##OLD: using qte method
## ##cfirp <- ci.qte(formla=formla, xformla=xformla,
## ## probs=probs, weights=weights, se=se, iters=iters,
## ## retEachIter=RE, pl=pl, cores=cores, data=data)
## ##list(cfirp$F.treated.t, cfirp$F.treated.t.cf)
## qp <- QTEparams(formla, xformla, t=NULL, tmin1=NULL, tmin2=NULL, tname=NULL, data=data, weights=weights, idname=NULL, probs=probs, iters=iters, alp=alp, method=method, plot=plot, se=se, retEachIter=retEachIter, bootstrapiter=FALSE, seedvec=NULL, pl=pl, cores=cores)
## setupData(qp)
## pscore.reg <- glm(data[,treat] ~ as.matrix(data[,x]),
## family=binomial(link=method))
## pscore <- fitted(pscore.reg)
## d <- data[,treat]
## y <- data[,yname]
## y.seq <- y.seq[order(y.seq)]
## df.vals <- vapply(y.seq, function(x) {
## mean(((1-d)/(1-pscore))*(y <= x) / mean((1-d)/(1-pscore))) }, 1.0)
## makeDist(y.seq, df.vals)
## }
## ##get the distribution function
## ## under stratified random sampling
## strat.ci.df <- function(y.seq, stratvarname, pstrat, formla, xformla, data, probs, weights, se, iters, retEachIter, pl, cores) {
## browser()
## cdta <- lapply(unique(data[,stratvarname]),
## function(x) { data[data[,stratvarname]==x,] })
## ctreatedflist <- lapply(cdta, ci.treated.Df, y.seq=y.seq, formla=formla,
## xformla=xformla, probs=probs,
## se=se, iters=iters, retEachIter=retEachIter,
## pl=pl, cores=cores)
## treated.df <- combineDfs(y.seq, ctreatedflist, pstrat)
## cuntreatedflist <- lapply(cdta, ci.untreated.Df, y.seq=y.seq, formla=formla,
## xformla=xformla, probs=probs,
## se=se, iters=iters, retEachIter=retEachIter,
## pl=pl, cores=cores)
## untreated.df <- combineDfs(y.seq, cuntreatedflist, pstrat)
## return(c(treated.df, untreated.df))
## }
#' @title Return Particular Element from Each Element in a List
#' @description a function to take a list and get a particular part
#' out of each element in the list
#' @param listolists a list
#' @param whichone which item to get out of each list (can be numeric or name)
#'
#' @return list of all the elements 'whichone' from each list
#'
#' @examples
#' len <- 100 # number elements in list
#' lis <- lapply(1:len, function(l) list(x=(-l), y=l^2) ) # create list
#' getListElement(lis, "x")[1] # should be equal to -1
#' getListElement(lis, 1)[1] # should be equal to -1
#'
#' @export
getListElement <- function(listolists, whichone=1) {
lapply(listolists, function(l) l[[whichone]])
}
#' @title source_all
#'
#' @description Source all the files in a folder
#'
#' @param fldr path to a folder
#'
#' @export
source_all <- function(fldr) {
sapply(paste0(fldr,list.files(fldr)), source)
}
#' @title TorF
#' @description A function to replace NA's with FALSE in vector of logicals
#' @param cond a vector of conditions to check
#' @param use_isTRUE whether or not to use a vectorized version
#' of isTRUE. This is generally slower but covers more cases.
#' @return logical vector
#'
#' @export
TorF <- function(cond, use_isTRUE=FALSE) {
if (!is.logical(cond)) stop("cond should be a logical vector")
if (use_isTRUE) {
cond <- sapply(cond, isTRUE)
} else {
cond[is.na(cond)] <- FALSE
}
cond
}
#' @title get_group_inner
#' @description Calculates the group for a particular unit
#' @param this_df a data.frame, for this function it should be specific to
#' a particular unit
#' @inheritParams get_group
#' @keywords internal
#' @export
get_group_inner <- function(this_df, tname, treatname) {
if ( all(this_df[,treatname] == 0) ) return(0)
as.numeric( this_df[ this_df[,treatname] == 1, ][1,tname] )
}
#' @title get_group
#' @description A function to calculate a unit's group in a panel data setting
#' with a binary treatment and staggered treatment adoption and where
#' there is a column in the data indicating whether or not a unit is treated
#' @param df the data.frame used in the function
#' @param idname name of column that holds the unit id
#' @param tname name of column that holds the time period
#' @param treatname name of column with the treatment indicator
#' @export
get_group <- function(df, idname, tname, treatname) {
group_vec <- df %>%
group_by(.data[[idname]]) %>%
group_map(~ rep(get_group_inner(.x, tname, treatname), nrow(.x))) %>%
unlist()
group_vec
}
#' @title get_YiGmin1_inner
#' @description Calculates a units outcome (or also can be used for a covariate)
#' in the period right before it becomes treated. The unit's group must
#' be specified at this point. This function operates on a data.frame
#' that is already local to a particular unit.
#' @param this_df a data.frame, for this function it should be specific to
#' a particular unit
#' @inheritParams get_YiGmin1
#' @keywords internal
#' @export
get_YiGmin1_inner <- function(this_df, yname, tname, gname) {
this_df <- as.data.frame(this_df)
maxT <- max(this_df[,tname])
this_group <- unique(this_df[,gname])
YiGmin1 <- ifelse(this_group==0,
this_df[this_df[,tname]==maxT,yname],
this_df[this_df[,tname]==(this_group-1),yname])
YiGmin1
}
#' @title get_YiGmin1
#' @description A function to calculate outcomes for units in the period
#' right before they become treated (this function can also be used to recover
#' covariates, etc. in the period right before a unit becomes treated).
#' For units that do not
#' participate in the treatment (and therefore have group==0), they are
#' assigned their outcome in the last period.
#' @param yname name of column containing the outcome (or other variable)
#' for which to calculate its outcome in the immediate pre-treatment period
#' @param gname name of column containing the unit's group
#' @inheritParams get_group
#' @export
get_YiGmin1 <- function(df, idname, yname, tname, gname) {
YiGmin1_vec <- df %>%
group_by(.data[[idname]]) %>%
group_map(~ rep(get_YiGmin1_inner(.x, yname, tname, gname), nrow(.x))) %>%
unlist()
YiGmin1_vec
}
#' @title get_Yi1_inner
#' @description Calculates a units outcome in the first time period.
#' This function operates on a data.frame that is already local to a particular
#' unit.
#' @inheritParams get_YiGmin1_inner
#' @keywords internal
#' @export
get_Yi1_inner <- function(this_df, yname, tname, gname) {
this_df <- as.data.frame(this_df)
minT <- min(this_df[,tname])
Yi1 <- this_df[this_df[,tname]==minT,yname]
Yi1
}
#' @title get_Yi1
#' @description A function to calculate outcomes for units in the first time
#' period that is available in a panel data setting (this function can also
#' be used to recover covariates, etc. in the first period).
#' @inheritParams get_YiGmin1
#' @export
get_Yi1 <- function(df, idname, yname, tname, gname) {
Yi1_vec <- df %>%
group_by(.data[[idname]]) %>%
group_map(~ rep(get_Yi1_inner(.x, yname, tname, gname), nrow(.x))) %>%
unlist()
Yi1_vec
}
#' @title get_Yit_inner
#' @description Calculates a units outcome in some particular period `tp`.
#' This function operates on a data.frame that is already local to a particular
#' unit.
#' @inheritParams get_YiGmin1_inner
#' @param tp The time period for which to get the outcome
#' @keywords internal
#' @export
get_Yit_inner <- function(this_df, tp, yname, tname) {
this_df <- as.data.frame(this_df)
Yit <- this_df[this_df[,tname]==tp,yname]
Yit
}
#' @title get_Yit
#' @description A function to calculate outcomes for units in a particular
#' time period `tp` in a panel data setting (this function can also
#' be used to recover covariates, etc. in the first period).
#' @inheritParams get_YiGmin1
#' @inheritParams get_Yit_inner
#' @return a vector of outcomes in period t, the vector
#' will have the length nT (i.e., this is returned for
#' each element in the panel, not for a particular period)
#' @export
get_Yit <- function(df, tp, idname, yname, tname) {
Yit_vec <- df %>%
group_by(.data[[idname]]) %>%
group_map(~ rep(get_Yit_inner(.x, tp, yname, tname), nrow(.x))) %>%
unlist()
Yit_vec
}
#' @title get_Yibar_inner
#' @description Calculates a units average outcome across all periods.
#' This function operates on a data.frame that is already local to a particular
#' unit.
#' @inheritParams get_YiGmin1_inner
#' @keywords internal
#' @export
get_Yibar_inner <- function(this_df, yname) {
this_df <- as.data.frame(this_df)
mean(this_df[,yname])
}
#' @title get_Yibar
#' @description A function to calculate the average outcome across all time
#' periods separately for each unit in a panel data setting (this function can also
#' be used to recover covariates, etc.).
#' @inheritParams get_YiGmin1
#' @export
get_Yibar <- function(df, idname, yname) {
Yibar_vec <- df %>%
group_by(.data[[idname]]) %>%
group_map(~ rep(get_Yibar_inner(.x, yname), nrow(.x))) %>%
unlist()
Yibar_vec
}
#' @title get_Yibar_pre_inner
#' @description Calculates a unit's average outcome in pre-treatment periods
#' (or also can be used for a covariate). The unit's group must
#' be specified at this point. This function operates on a data.frame
#' that is already local to a particular unit.
#' @param this_df a data.frame, for this function it should be specific to
#' a particular unit
#' @inheritParams get_YiGmin1_inner
#' @keywords internal
#' @export
get_Yibar_pre_inner <- function(this_df, yname, tname, gname) {
this_df <- as.data.frame(this_df)
maxT <- max(this_df[,tname])
this_group <- unique(this_df[,gname])
Yibarpre <- ifelse(this_group==0,
mean(this_df[,yname]),
mean(this_df[this_df[,tname] < this_group, yname]) )
Yibarpre
}
#' @title get_Yibar_pre
#' @description A function to calculate average outcomes for units in
#' their pre-treatment periods (this function can also be used to recover
#' pre-treatment averages of covariates, etc.).
#' For units that do not
#' participate in the treatment (and therefore have group==0), the
#' function calculates their overall average outcome.
#' @param yname name of column containing the outcome (or other variable)
#' for which to calculate its outcome in the immediate pre-treatment period
#' @param gname name of column containing the unit's group
#' @inheritParams get_YiGmin1
#' @export
get_Yibar_pre <- function(df, idname, yname, tname, gname) {
YiGmin1_vec <- df %>%
group_by(.data[[idname]]) %>%
group_map(~ rep(get_Yibar_pre_inner(.x, yname, tname, gname), nrow(.x))) %>%
unlist()
YiGmin1_vec
}
#' @title get_lagYi
#' @description A function that calculates lagged outcomes in a panel data setting.
#' If the data.frame that is passed in has nxT rows, the resulting vector will
#' also have nxT elements with one element for each unit set to be NA
#' @inheritParams get_Yi1
#' @param nlags The number of periods to lag. The default is 1, which computes
#' the lag from the previous period.
#' @export
get_lagYi <- function(df, idname, yname, tname, nlags=1) {
df <- df %>%
dplyr::group_by(.data[[idname]]) %>%
dplyr::mutate(.lag=dplyr::lag(.data[[yname]],nlags,order_by=.data[[tname]]))
df$.lag
}
#' @title get_first_difference
#' @description A function that calculates the first difference in a panel data
#' setting. If the data.frame that is passed in has nxT rows, the resulting
#' vector will also have nxT elements with one element for each unit set to be
#' NA.
#' @inheritParams get_lagYi
#' @export
get_first_difference <- function(df, idname, yname, tname) {
df$.lag <- get_lagYi(df, idname, yname, tname)
df[,yname] - df$.lag
}
|
/scratch/gouwar.j/cran-all/cranData/BMisc/R/BMisc.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @title element_wise_mult
#'
#' @description This is a function that takes in two matrices of dimension
#' nxB and nxk and returns a Bxk matrix that comes from
#' element-wise multiplication of every column
#' in the first matrix times the entire second matrix and the
#' averaging over the n-dimension. It is equivalent (but faster
#' than) the following R code:
#' `sapply(1:biters, function(b) sqrt(n)*colMeans(Umat[,b]*inf.func))`
#' . This function is particularly useful for fast computations
#' using the multiplier bootstrap.
#'
#' @param U nxB matrix (e.g., these could be a matrix of
#' Rademachar weights for B bootstrap iterations using the
#' multiplier bootstrap
#' @param inf_func nxk matrix of (e.g., these could be a matrix
#' containing the influence function for different parameter
#' estimates)
#'
#' @return a Bxk matrix
#' @export
element_wise_mult <- function(U, inf_func) {
.Call('_BMisc_element_wise_mult', PACKAGE = 'BMisc', U, inf_func)
}
#' @title multiplier_bootstrap
#'
#' @description A function that takes in an influence function (an
#' nxk matrix) and the number of bootstrap iterations and
#' returns a Bxk matrix of bootstrap results. This function
#' uses Rademechar weights.
#'
#' @param inf_func nxk matrix of (e.g., these could be a matrix
#' containing the influence function for different parameter
#' estimates)
#' @param biters the number of bootstrap iterations
#'
#' @return a Bxk matrix
#' @export
multiplier_bootstrap <- function(inf_func, biters) {
.Call('_BMisc_multiplier_bootstrap', PACKAGE = 'BMisc', inf_func, biters)
}
|
/scratch/gouwar.j/cran-all/cranData/BMisc/R/RcppExports.R
|
#' @title BMisc
#'
#' @description A set of miscellaneous helper functions
#'
#' @docType package
#' @name BMisc
#' @useDynLib BMisc
#' @importFrom Rcpp sourceCpp
#' @import graphics
#' @import stats
#' @importFrom dplyr group_by mutate %>% group_map
NULL
|
/scratch/gouwar.j/cran-all/cranData/BMisc/R/imports.R
|
# data.table is generally careful to minimize the scope for namespace
# conflicts (i.e., functions with the same name as in other packages);
# a more conservative approach using @importFrom should be careful to
# import any needed data.table special symbols as well, e.g., if you
# run DT[ , .N, by='grp'] in your package, you'll need to add
# @importFrom data.table .N to prevent the NOTE from R CMD check.
# See ?data.table::`special-symbols` for the list of such symbols
# data.table defines; see the 'Importing data.table' vignette for more
# advice (vignette('datatable-importing', 'data.table')).
#
#' @import data.table
NULL
|
/scratch/gouwar.j/cran-all/cranData/BMisc/R/utils-data-table.R
|
utils::globalVariables(c(".data"))
|
/scratch/gouwar.j/cran-all/cranData/BMisc/R/zzz.R
|
clusterplots <-
function(L,data){
# Function that plots the time series clusters generated by either of
# the functions: "tseriesca", "tseriescm" or "tseriescq".
#
# IN:
#
# L <- output list from the functions: "tseriesca", "tseriescm"
# or "tseriescq".
# data <- Data frame with the time series information.
#
# OUT:
#
# Plots of the time series clusters.
scale <- L$scale
data <- scaleandperiods(data,scale)
mydata <- data$mydata # Matrix with the scaled data.
periods <- data$periods # Array with the data periods.
T <- nrow(mydata)
fT <- floor(T/3)
auxtt <- matrix(0,fT,1)
for(i in 1:fT){
auxtt[i] <- 3*i
}
mstar <- L$mstar
gnstar <- L$gnstar
par(mfrow = c(2,2))
for(j in 1:mstar){
cc <- as.matrix(which(gnstar == j))
cl <- rainbow(nrow(cc))
if(scale==TRUE){
ts.plot(mydata[,cc],gpars=list(type = "l",main = paste("Group",j),xlab = "",xaxt = 'n',ylab = "Scaled variable in [0,1]",col = cl))
axis(1,at = auxtt,labels = periods[auxtt],las = 2,tck = 0)
}else{
ts.plot(mydata[,cc],gpars=list(type = "l",main = paste("Group",j),xlab = "",xaxt = 'n',ylab = "Clustering variable",col = cl))
axis(1,at = auxtt,labels = periods[auxtt],las = 2,tck = 0)
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BNPTSclust/R/clusterplots.R
|
comp11 <-
function(y){
# Function that computes the distinct observations in a numeric vector.
# It is based entirely on the "comp1" function from the BNPdensidty
# package with the exception that it returns the variable that
# rearranges the numeric vector into one with its unique values only.
#
# IN:
#
# y <- numeric vector.
#
# OUT:
#
# jstar <- variable that rearranges "y" into a vector with its unique
# values.
# nstar <- frequency of each distinct observation in "y".
# rstar <- number of distinct observations in "y".
# gn <- variable that indicates the group number to which every
# entry in "y" belongs.
n <- length(y)
mat <- outer(y, y, "==")
jstar <- led <- rep(FALSE, n)
for (j in seq(n)) {
if (!led[j]) {
jstar[j] <- TRUE
if (j == n)
break
ji <- seq(j + 1, n)
tt <- mat[ji, j] %in% TRUE
led[ji] <- led[ji] | tt
}
if (all(led[-seq(j)]))
break
}
ystar <- y[jstar]
nstar <- apply(as.matrix(mat[, jstar]), 2, sum)
rstar <- length(nstar)
gn <- match(y, ystar)
return(list(jstar = jstar, nstar = nstar, rstar = rstar, gn = gn))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPTSclust/R/comp11.R
|
designmatrices <-
function(level,trend,seasonality,deg,T,n,fun){
# Function that generates the design matrices of the clustering
# algorithm based on the parameters that the user wants to consider,
# i.e. level, polinomial trend and/or seasonal components. It also
# returns the number of parameters that are considered and not
# considered for clustering. Since this function is for internal use,
# its arguments are taken directly from the clustering functions.
#
# IN:
#
# level <- Variable that indicates if the level of the time
# series will be considered for clustering. If
# level = 0, then it is omitted. If level = 1, then it
# is taken into account.
# trend <- Variable that indicates if the polinomial trend of
# the model will be considered for clustering. If
# trend = 0, then it is omitted. If trend = 1, then it
# is taken into account.
# seasonality <- Variable that indicates if the seasonal components
# of the model will be considered for clustering.
# If seasonality = 0, then they are omitted. If
# seasonality = 1, then they are taken into account.
# deg <- Degree of the polinomial trend of the model.
# T <- Number of periods of the time series.
# n <- Number of time series.
# fun <- Clustering function being used.
#
# OUT:
#
# Z <- Design matrix of the parameters not considered for clustering.
# X <- Design matrix of the parameters considered for clustering.
# p <- Number of parameters not considered for clustering.
# d <- Number of parameters considered for clustering.
if(fun == "tseriesca"){
M <- matrix(0,T,1+deg) # Matrix with all components.
M[,1] <- 1 # Level components.
for(i in 1:deg){ # Trend components.
M[,i+1] <- seq(T)^i
}
if(level == 0 & trend == 0){
p <- 1+deg
d <- 0
Z <- as.matrix(M)
return(list(p=p,d=d,Z=Z))
}
if(level == 1 & trend == 0){
p <- deg
d <- 1
Z <- as.matrix(M[,(2:(deg+1))])
X <- as.matrix(M[,1])
return(list(p=p,d=d,Z=Z,X=X))
}
if(level == 0 & trend == 1){
p <- 1
d <- deg
Z <- as.matrix(M[,1])
X <- as.matrix(M[,(2:(deg+1))])
return(list(p=p,d=d,Z=Z,X=X))
}
if(level == 1 & trend == 1){
p <- 0
d <- 1+deg
X <- as.matrix(M)
return(list(p=p,d=d,X=X))
}
}
if(fun == "tseriescm"){
M <- matrix(0,T,1+deg+11) # Matrix with all components.
M[,1] <- 1 # Level components.
for(i in 1:deg){ # Trend components.
M[,i+1] <- seq(T)^i
}
# Seasonal components
num <- floor(T/12) # Number of years present in the data
if (num < 1){ # If the number of months in the data is less than 12, the design matrix is filled this way
X2 <- diag(1,(T-1))
X2 <- cbind(X2,matrix(0,(T-1),1))
X <- rbind(X2,matrix(0,1,T))
}else{
X21 <- rbind(diag(1,11),matrix(0,1,11)) # Matrix that contains the indicator functions for the 11 months and one row of zeros to avoid singularity problems in the design matrix
X2 <- X21
resid <- T %% 12 # Number of the year (num+1) present in the data
if (num >= 2){
for (i in 2:num){
X2 <- rbind(X2,X21)
}
}
}
M[,((deg+2):(1+deg+11))] <- rbind(X2,X21[0:resid,])
if(level == 0 & trend == 0 & seasonality == 0){
p <- 1+deg+11
d <- 0
Z <- as.matrix(M)
return(list(p=p,d=d,Z=Z))
}
if(level == 0 & trend == 0 & seasonality == 1){
p <- 1+deg
d <- 11
Z <- as.matrix(M[,(1:(deg+1))])
X <- as.matrix(M[,((deg+2):(1+deg+11))])
return(list(p=p,d=d,Z=Z,X=X))
}
if(level == 0 & trend == 1 & seasonality == 0){
p <- 1+11
d <- deg
Z <- as.matrix(cbind(M[,1],M[,(deg+2):(1+deg+11)]))
X <- as.matrix(M[,(2:(deg+1))])
return(list(p=p,d=d,Z=Z,X=X))
}
if(level == 1 & trend == 0 & seasonality == 0){
p <- deg+11
d <- 1
Z <- as.matrix(M[,(2:(1+deg+11))])
X <- as.matrix(M[,1])
return(list(p=p,d=d,Z=Z,X=X))
}
if(level == 1 & trend == 1 & seasonality == 0){
p <- 11
d <- 1+deg
Z <- as.matrix(M[,(deg+2):(1+deg+11)])
X <- as.matrix(M[,(1:(deg+1))])
return(list(p=p,d=d,Z=Z,X=X))
}
if(level == 1 & trend == 0 & seasonality == 1){
p <- deg
d <- 1+11
Z <- as.matrix(M[,(2:(deg+1))])
X <- as.matrix(cbind(M[,1],M[,((deg+2):(1+deg+11))]))
return(list(p=p,d=d,Z=Z,X=X))
}
if(level == 0 & trend == 1 & seasonality == 1){
p <- 1
d <- deg+11
Z <- as.matrix(M[,1])
X <- as.matrix(M[,(2:(1+deg+11))])
return(list(p=p,d=d,Z=Z,X=X))
}
if(level == 1 & trend == 1 & seasonality == 1){
p <- 0
d <- 1+deg+11
X <- as.matrix(M)
return(list(p=p,d=d,X=X))
}
}
if(fun == "tseriescq"){
M <- matrix(0,T,1+deg+3) # Matrix with all components.
M[,1] <- 1 # Level components.
for(i in 1:deg){ # Trend components.
M[,i+1] <- seq(T)^i
}
# Seasonal components
num <- floor(T/4) # Number of years present in the data
if (num < 1){ # If the number of months in the data is less than 12, the design matrix is filled this way
X2 <- diag(1,(T-1))
X2 <- cbind(X2,matrix(0,(T-1),1))
X <- rbind(X2,matrix(0,1,T))
}else{
X21 <- rbind(diag(1,3),matrix(0,1,3)) # Matrix that contains the indicator functions for the 11 months and one row of zeros to avoid singularity problems in the design matrix
X2 <- X21
resid <- T %% 4 # Number of the year (num+1) present in the data
if (num >= 2){
for (i in 2:num){
X2 <- rbind(X2,X21)
}
}
}
M[,((deg+2):(1+deg+3))] <- rbind(X2,X21[0:resid,])
if(level == 0 & trend == 0 & seasonality == 0){
p <- 1+deg+3
d <- 0
Z <- as.matrix(M)
return(list(p=p,d=d,Z=Z))
}
if(level == 0 & trend == 0 & seasonality == 1){
p <- 1+deg
d <- 3
Z <- as.matrix(M[,(1:(deg+1))])
X <- as.matrix(M[,((deg+2):(1+deg+3))])
return(list(p=p,d=d,Z=Z,X=X))
}
if(level == 0 & trend == 1 & seasonality == 0){
p <- 1+3
d <- deg
Z <- as.matrix(cbind(M[,1],M[,(deg+2):(1+deg+3)]))
X <- as.matrix(M[,(2:(deg+1))])
return(list(p=p,d=d,Z=Z,X=X))
}
if(level == 1 & trend == 0 & seasonality == 0){
p <- deg+3
d <- 1
Z <- as.matrix(M[,(2:(1+deg+3))])
X <- as.matrix(M[,1])
return(list(p=p,d=d,Z=Z,X=X))
}
if(level == 1 & trend == 1 & seasonality == 0){
p <- 3
d <- 1+deg
Z <- as.matrix(M[,(deg+2):(1+deg+3)])
X <- as.matrix(M[,(1:(deg+1))])
return(list(p=p,d=d,Z=Z,X=X))
}
if(level == 1 & trend == 0 & seasonality == 1){
p <- deg
d <- 1+3
Z <- as.matrix(M[,(2:(deg+1))])
X <- as.matrix(cbind(M[,1],M[,((deg+2):(1+deg+3))]))
return(list(p=p,d=d,Z=Z,X=X))
}
if(level == 0 & trend == 1 & seasonality == 1){
p <- 1
d <- deg+3
Z <- as.matrix(M[,1])
X <- as.matrix(M[,(2:(1+deg+3))])
return(list(p=p,d=d,Z=Z,X=X))
}
if(level == 1 & trend == 1 & seasonality == 1){
p <- 0
d <- 1+deg+3
X <- as.matrix(M)
return(list(p=p,d=d,X=X))
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BNPTSclust/R/designmatrices.R
|
diagplots <-
function(L){
# Function that produces the diagnostic plots to assess the convergence
# of the Markov Chains generated by either of the functions:
# "tseriesca", "tseriescm" or "tseriescq".
#
# IN:
#
# L <- output list from the functions: "tseriesca", "tseriescm"
# or "tseriescq".
#
# OUT:
#
# Trace plots, histograms and ergodic mean plots of the posterior
# distribution sample from the model parameters.
sig2epssample <- L$sig2epssample
sig2alphasample <- L$sig2alphasample
sig2betasample <- L$sig2betasample
sig2thesample <- L$sig2thesample
rhosample <- L$rhosample
asample <- L$asample
bsample <- L$bsample
msample <- L$msample
n <- length(msample)
# Trace plots
par(mfrow = c(2,2))
plot(1:n,sig2epssample[,1],type = "l",main = "Trace plot of sig2eps",xlab = "Iteration number",ylab = "Simulated value")
plot(1:n,sig2thesample,type = "l",main = "Trace plot of sig2theta",xlab = "Iteration number",ylab = "Simulated value")
if(is.null(sig2betasample) == TRUE){
plot(1:n,sig2alphasample[,1],type = "l",main = "Trace plot of sig2alpha",xlab = "Iteration number",ylab = "Simulated value")
}else if(is.null(sig2alphasample) == TRUE){
plot(1:n,sig2betasample[,1],type = "l",main = "Trace plot of sig2beta",xlab = "Iteration number",ylab = "Simulated value")
}else{
plot(1:n,sig2alphasample[,1],type = "l",main = "Trace plot of sig2alpha",xlab = "Iteration number",ylab = "Simulated value")
plot(1:n,sig2betasample[,1],type = "l",main = "Trace plot of sig2beta",xlab = "Iteration number",ylab = "Simulated value")
}
par(mfrow = c(2,2))
plot(1:n,rhosample,type = "l",main = "Trace plot of rho",xlab = "Iteration number",ylab = "Simulated value")
plot(1:n,asample,type = "l",main = "Trace plot of a",xlab = "Iteration number",ylab = "Simulated value")
plot(1:n,bsample,type = "l",main = "Trace plot of b",xlab = "Iteration number",ylab = "Simulated value")
plot(1:n,msample,type = "l",main = "m",xlab = "Iteration number",ylab = "Number of groups")
# Histograms
par(mfrow = c(2,2))
hist(sig2epssample[,1],main = "Hist. of sig2eps",xlab = "Simulated values")
hist(sig2thesample,main = "Hist. of sig2the",xlab = "Simulated values")
if(is.null(sig2betasample) == TRUE){
hist(sig2alphasample[,1],main = "Hist. of sig2alpha",xlab = "Simulated values")
}else if(is.null(sig2alphasample) == TRUE){
hist(sig2betasample[,1],main = "Hist. of sig2beta",xlab = "Simulated values")
}else{
hist(sig2alphasample[,1],main = "Hist. of sig2alpha",xlab = "Simulated values")
hist(sig2betasample[,1],main = "Hist. of sig2beta",xlab = "Simulated values")
}
par(mfrow = c(2,2))
hist(rhosample,main = "Hist. of rho",xlab = "Simulated values")
hist(asample,main = "Hist. of a",xlab = "Simulated values")
hist(bsample,main = "Hist. of b",xlab = "Simulated values")
hist(msample,main = "Hist. of m",xlab = "Number of groups")
# Ergodic means
par(mfrow = c(2,2))
plot(1:n,cumsum(sig2epssample[,1])/1:n,type = "l",main = "Ergodic mean of sig2eps",xlab = "Iteration number",ylab = "")
plot(1:n,cumsum(sig2thesample)/1:n,type = "l",main = "Ergodic mean of sig2the",xlab = "Iteration number",ylab = "")
if(is.null(sig2betasample) == TRUE){
plot(1:n,cumsum(sig2alphasample[,1])/1:n,type = "l",main = "Ergodic mean of sig2alpha",xlab = "Iteration number",ylab = "")
}else if(is.null(sig2alphasample) == TRUE){
plot(1:n,cumsum(sig2betasample[,1])/1:n,type = "l",main = "Ergodic mean of sig2beta",xlab = "Iteration number",ylab = "")
}else{
plot(1:n,cumsum(sig2alphasample[,1])/1:n,type = "l",main = "Ergodic mean of sig2alpha",xlab = "Iteration number",ylab = "")
plot(1:n,cumsum(sig2betasample[,1])/1:n,type = "l",main = "Ergodic mean of sig2beta",xlab = "Iteration number",ylab = "")
}
par(mfrow = c(2,2))
plot(1:n,cumsum(rhosample)/1:n,type = "l",main = "Ergodic mean of rho",xlab = "Iteration number",ylab = "")
plot(1:n,cumsum(asample)/1:n,type = "l",main = "Ergodic mean of a",xlab = "Iteration number",ylab = "")
plot(1:n,cumsum(bsample)/1:n,type = "l",main = "Ergodic mean of b",xlab = "Iteration number",ylab = "")
plot(1:n,cumsum(msample)/1:n,type = "l",main = "Ergodic mean of m",xlab = "Iteration number",ylab = "")
}
|
/scratch/gouwar.j/cran-all/cranData/BNPTSclust/R/diagplots.R
|
scaleandperiods <-
function(data,scale){
# Function that receives a data frame with the time series data and
# scales it in the [0,1] interval, if desired by the user.
# The function considers that the time periods of the data appear
# as row names.
#
# IN:
#
# data <- data frame with the time series information.
# scale <- Flag that indicates if the time series data should be scaled to the
# [0,1] interval with a linear transformation as proposed by
# Nieto-Barajas and Contreras-Cristan (2014). If TRUE, then the time
# series are scaled to the [0,1] interval. Its value comes directly from
# the "scale" argument of the clustering functions.
#
# OUT:
#
# periods <- array with the time periods of the data.
# mydata <- data frame with the time series data.
# cts <- variable that indicates if some time series were removed
# because they were constant in time. If no time series were
# removed, cts = 0. If there were time series removed, cts
# indicates the column of such time series.
n <- nrow(data)
m <- ncol(data)
periods <- rownames(data)
mydata <- data
colnames(mydata) <- colnames(data)
names <- colnames(mydata)
maxima <- matrix(0,m,1)
minima <- matrix(0,m,1)
for (i in 1:m){
maxima[i,1] = max(mydata[,i])
minima[i,1] = min(mydata[,i])
}
cts <- which(maxima == minima)
if(length(cts) != 0){
cat("Removing series ",names[cts]," since they are constant.","\n")
mydata <- mydata[,-cts]
n <- nrow(mydata)
m <- ncol(mydata)
maxima <- matrix(0,m,1)
minima <- matrix(0,m,1)
for (i in 1:m){
maxima[i,1] = max(mydata[,i])
minima[i,1] = min(mydata[,i])
}
}
if(scale==TRUE){
for (j in 1:m){
m1 = maxima[j,1] - minima[j,1]
for (k in 1:n){
mydata[k,j] = 1 + (1/m1)*(mydata[k,j] - maxima[j,1])
}
}
}
return(list(periods = periods, mydata = mydata, cts = cts))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPTSclust/R/scaleandperiods.R
|
tseriesca <-
function(data,maxiter=500,burnin=floor(0.1*maxiter),thinning=5,scale=TRUE,
level=FALSE,trend=TRUE,deg=2,c0eps=2,c1eps=1,c0beta=2,
c1beta=1,c0alpha=2,c1alpha=1,priora=TRUE,pia=0.5,q0a=1,
q1a=1,priorb=TRUE,q0b=1,q1b=1,a=0.25,b=0,indlpml=FALSE){
# Function that performs the time series clustering algorithm
# described in Nieto-Barajas and Contreras-Cristan (2014) "A Bayesian
# Non-Parametric Approach for Time Series Clustering". Bayesian
# Analysis, Vol. 9, No. 1 (2014) pp.147-170". This function is
# designed for annual time series data.
#
# IN:
#
# data <- Data frame with the time series information.
# maxiter <- Maximum number of iterations for Gibbs sampling.
# Default value = 2000.
# burnin <- Burn-in period of the Markov Chain generated by Gibbs
# sampling.
# thinning <- Number that indicates how many Gibbs sampling simulations
# should be skipped to form the Markov Chain.
# scale <- Flag that indicates if the time series data should be scaled to the
# [0,1] interval with a linear transformation as proposed by
# Nieto-Barajas and Contreras-Cristan (2014). If TRUE, then the time
# series are scaled to the [0,1] interval.
# level <- Flag that indicates if the level of the time
# series will be considered for clustering. If TRUE, then it
# is taken into account.
# trend <- Flag that indicates if the polinomial trend of
# the model will be considered for clustering.
# If TRUE, then it is taken into account.
# deg <- Degree of the polinomial trend of the model.
# Default value = 2.
# c0eps <- Shape parameter of the hyper-prior distribution
# on sig2eps. Default value = 2.
# c1eps <- Rate parameter of the hyper-prior distribution
# on sig2eps. Default value = 1.
# c0beta <- Shape parameter of the hyper-prior distribution
# on sig2beta. Default value = 2.
# c1beta <- Rate parameter of the hyper-prior distribution
# on sig2beta. Default value = 1.
# c0alpha <- Shape parameter of the hyper-prior distribution
# on sig2alpha. Default value = 2.
# c1alpha <- Rate parameter of the hyper-prior distribution
# on sig2alpha. Default value = 1.
# priora <- Flag that indicates if a prior on parameter "a" is
# to be assigned. If TRUE, a prior on "a" is assigned.
# Default value = FALSE.
# pia <- Mixing proportion of the prior distribution on parameter
# "a". Default value = 0.5.
# q0a <- Shape parameter of the continuous part of the prior
# distribution on parameter "a". Default value = 1.
# q1a <- Shape parameter of the continuous part of the prior
# distribution on parameter "a". Default value = 1.
# priorb <- Flag that indicates if a prior on parameter "b" is
# to be assigned. If TRUE, a prior on "b" is assigned.
# Default value = FALSE.
# q0b <- Shape parameter of the prior distribution on parameter
# "b". Default value = 1.
# q1b <- Shape parameter of the prior distribution on parameter
# "b". Default value = 1.
# a <- Initial/fixed value of parameter "a".
# Default value = 0.25.
# b <- Initial/fixed value of parameter "b".
# Default value = 0.
# indlpml <- Flag that indicates if the LPML is to be calculated.
# If TRUE, LPML is calculated. Default value = FALSE.
#
# OUT:
#
# mstar <- Number of groups of the chosen cluster configuration.
# gnstar <- Array that contains the group number to which each time
# series belongs.
# HM <- Heterogeneity Measure of the chosen cluster configuration.
# arrho <- Acceptance rate of the parameter "rho".
# ara <- Acceptance rate of the parameter "a".
# arb <- Acceptance rate of the parameter "b".
# sig2epssample <- Matrix that in its columns contains the sample of each sig2eps_i's posterior distribution after Gibbs sampling.
# sig2alphasample <- Matrix that in its columns contains the sample of each sig2alpha_i's posterior distribution after Gibbs sampling.
# sig2betasample <- Matrix that in its columns contains the sample of each sig2beta_i's posterior distribution after Gibbs sampling.
# sig2thesample <- Vector that contains the sample of sig2the's posterior distribution after Gibbs sampling.
# rhosample <- Vector that contains the sample of rho's posterior distribution after Gibbs sampling.
# asample <- Vector that contains the sample of a's posterior distribution after Gibbs sampling.
# bsample <- Vector that contains the sample of b's posterior distribution after Gibbs sampling.
# msample <- Vector that contains the sample of the number of groups at each Gibbs sampling iteration.
# lpml <- If indlpml = 1, lpml contains the value of the LPML of the
# chosen model.
# scale <- Flag that indicates if the time series data were scaled to the
# [0,1] interval with a linear transformation. This will be taken as an input for the
# plotting functions.
if(level == TRUE){
level <- 1
}else{
level <- 0
}
if(trend == TRUE){
trend <- 1
}else{
trend <- 0
}
if(priora == TRUE){
priora <- 1
}else{
priora <- 0
}
if(priorb == TRUE){
priorb <- 1
}else{
priorb <- 0
}
if(indlpml == TRUE){
indlpml <- 1
}else{
indlpml <- 0
}
if(deg%%1 != 0 | deg <= 0){
stop("deg must be a positive integer number.")
}
if(maxiter%%1 != 0 | maxiter <= 0){
stop("maxiter must be a positive (large) integer number.")
}
if(burnin%%1 != 0 | burnin < 0){
stop("burnin must be a non-negative integer number.")
}
if(thinning%%1 != 0 | thinning < 0){
stop("thinning must be a non-negative integer number.")
}
if(maxiter <= burnin){
stop("maxiter cannot be less than or equal to burnin.")
}
if(c0eps <= 0 | c1eps <= 0 | c0beta <= 0 | c1beta <= 0 |
c0alpha <= 0 | c1alpha <= 0){
stop("c0eps,c1eps,c0beta,c1beta,c0alpha and c1alpha must be
positive numbers.")
}
if(pia <= 0 | pia >= 1){
stop("The mixing proportion pia must be a number in (0,1).")
}
if(q0a <= 0 | q1a <= 0){
stop("q0a and q1a must be positive numbers.")
}
if(a < 0 | a >= 1){
stop("'a' must be a number in [0,1).")
}
if(q0b <= 0 | q1b <= 0){
stop("q0b and q1b must be positive numbers.")
}
if(b <= -a){
stop("'b' must be greater than '-a'.")
}
data <- scaleandperiods(data,scale)
mydata <- as.matrix(data$mydata) # Matrix with the scaled data.
periods <- data$periods # Array with the data periods.
cts <- data$cts # Variable that indicates if any time series
# were removed from the original data set because they were constant.
##### CONSTRUCTION OF THE DESIGN MATRICES #####
T <- nrow(mydata) # Number of periods of the time series
n <- ncol(mydata) # Number of time series present in the data
DM <- designmatrices(level,trend,seasonality=0,deg,T,n,fun="tseriesca")
p <- DM$p
d <- DM$d
if((level+trend) == 0){
Z <- DM$Z
}else if((level+trend) == 2){
X <- DM$X
}else{
Z <- DM$Z
X <- DM$X
}
##### INITIAL VALUES FOR THE PARAMETERS THAT WILL BE PART OF THE GIBBS SAMPLING #####
sig2eps <- matrix(1,n,1) # Vector that has the diagonal entries of the variance-covariance matrix for every epsilon_i.
sig2the <- 1 # Initial value for sig2the.
rho <- 0 # Initial value for rho.
P <- matrix(0,T,T) # Initial matrix P.
for (j in seq(T)){
for (k in seq(T)){
P[j,k] <- rho^(abs(j-k))
}
}
R <- sig2the*P # Initial matrix R.
if((level+trend) == 0){
sig2alpha <- matrix(1,p,1) # Vector that has the diagonal entries of the variance-covariance matrix for alpha.
sigmaalpha <- diag(c(sig2alpha),p,p) # Variance-covariance matrix for alpha.
invsigmaalpha <- diag(1/c(sig2alpha),p,p) # Inverse variance-covariance matrix for alpha.
alpha <- matrix(mvrnorm(n,matrix(0,p,1),sigmaalpha),p,n) # alpha is a matrix with a vector value of alpha for every time series in its columns.
theta <- matrix(mvrnorm(n,matrix(0,T,1),R),T,n) # theta is a matrix with a vector value of theta for every time series in its columns.
gamma <- theta # gamma is the union by rows of the beta and theta matrices.
}else if((level+trend) == 2){
sig2beta <- matrix(1,d,1) # Vector that has the diagonal entries of the variance-covariance matrix for beta.
sigmabeta <- diag(c(sig2beta),d,d) # Variance-covariance matrix for beta.
invsigmabeta <- diag(1/c(sig2beta),d,d) # Inverse variance-covariance matrix for beta.
beta <- matrix(mvrnorm(n,matrix(0,d,1),sigmabeta),d,n) # beta is a matrix with a vector value of beta for every time series in its columns.
theta <- matrix(mvrnorm(n,matrix(0,T,1),R),T,n) # theta is a matrix with a vector value of theta for every time series in its columns.
gamma <- rbind(beta,theta) # gamma is the union by rows of the beta and theta matrices.
}else{
sig2beta <- matrix(1,d,1) # Vector that has the diagonal entries of the variance-covariance matrix for beta.
sigmabeta <- diag(c(sig2beta),d,d) # Variance-covariance matrix for beta.
invsigmabeta <- diag(1/c(sig2beta),d,d) # Inverse variance-covariance matrix for beta.
sig2alpha <- matrix(1,p,1) # Vector that has the diagonal entries of the variance-covariance matrix for alpha.
sigmaalpha <- diag(c(sig2alpha),p,p) # Variance-covariance matrix for alpha.
invsigmaalpha <- diag(1/c(sig2alpha),p,p) # Inverse variance-covariance matrix for alpha.
alpha <- matrix(mvrnorm(n,matrix(0,p,1),sigmaalpha),p,n) # alpha is a matrix with a vector value of alpha for every time series in its columns.
beta <- matrix(mvrnorm(n,matrix(0,d,1),sigmabeta),d,n) # beta is a matrix with a vector value of beta for every time series in its columns.
theta <- matrix(mvrnorm(n,matrix(0,T,1),R),T,n) # theta is a matrix with a vector value of theta for every time series in its columns.
gamma <- rbind(beta,theta) # gamma is the union by rows of the beta and theta matrices.
}
iter <- 0 # Counter for each Gibbs sampling iteration.
iter1 <- 0 # Counter for the number of iterations saved during the Gibbs sampling.
arrho <- 0 # Variable that will contain the acceptance rate of rho in the Metropolis-Hastings step.
ara <- 0 # Variable that will contain the acceptance rate of a in the Metropolis-Hastings step.
arb <- 0 # Variable that will contain the acceptance rate of b in the Metropolis-Hastings step.
sim <- matrix(0,n,n) # Initialization of the similarities matrix.
if(thinning == 0){
CL <- floor(maxiter-burnin)
}else{
CL <- floor((maxiter-burnin)/thinning)
}
memory <- matrix(0,CL*n,n) # Matrix that will contain the cluster configuration of every iteration that is saved during the Gibbs sampling.
memorygn <- matrix(0,CL,n) # Matrix that will save the group number to which each time series belongs in every iteration saved.
sig2epssample <- matrix(0,CL,n) # Matrix that in its columns will contain the sample of each sig2eps_i's posterior distribution after Gibbs sampling.
sig2thesample <- matrix(0,CL,1) # Vector that will contain the sample of sig2the's posterior distribution after Gibbs sampling.
rhosample <- matrix(0,CL,1) # Vector that will contain the sample of rho's posterior distribution after Gibbs sampling.
asample <- matrix(0,CL,1) # Vector that will contain the sample of a's posterior distribution after Gibbs sampling.
bsample <- matrix(0,CL,1) # Vector that will contain the sample of b's posterior distribution after Gibbs sampling.
msample <- matrix(0,CL,1) # Vector that will contain the sample of the number of groups at each Gibbs sampling iteration.
if((level+trend) == 0){
sig2alphasample <- matrix(0,CL,p) # Matrix that in its columns will contain the sample of each sig2alpha_i's posterior distribution after Gibbs sampling.
}else if((level+trend) == 2){
sig2betasample <- matrix(0,CL,d) # Matrix that in its columns will contain the sample of each sig2beta_i's posterior distribution after Gibbs sampling.
}else{
sig2alphasample <- matrix(0,CL,p) # Matrix that in its columns will contain the sample of each sig2alpha_i's posterior distribution after Gibbs sampling.
sig2betasample <- matrix(0,CL,d) # Matrix that in its columns will contain the sample of each sig2beta_i's posterior distribution after Gibbs sampling.
}
if(indlpml != 0){
iter2 <- 0
auxlpml <- matrix(0,floor((maxiter-burnin)/10),n)
}
##### BEGINNING OF GIBBS SAMPLING #####
while(iter < maxiter){
##### 1) SIMULATION OF ALPHA'S POSTERIOR DISTRIBUTION #####
if((level+trend) != 2){
if((level+trend) == 0){
for(i in 1:n){
sigmaeps <- diag(c(sig2eps[i]),T)
Q <- sigmaeps + R
Qinv <- chol2inv(chol(Q))
Winv <- Qinv
W <- Q
Valphainv <- (t(Z) %*% Winv %*% Z) + invsigmaalpha
Valpha <- chol2inv(chol(Valphainv))
mualpha <- Valpha %*% t(Z) %*% Winv %*% mydata[,i]
alpha[,i] <- mvrnorm(1,mualpha,Valpha)
}
}else{
for(i in 1:n){
sigmaeps <- diag(c(sig2eps[i]),T)
Q <- sigmaeps + R
Qinv <- chol2inv(chol(Q))
Vinv <- t(X) %*% Qinv %*% X + invsigmabeta
V <- chol2inv(chol(Vinv))
Winv <- Qinv + (Qinv %*% X %*% V %*% t(X) %*% Qinv)
W <- chol2inv(chol(Winv))
Valphainv <- (t(Z) %*% Winv %*% Z) + invsigmaalpha
Valpha <- chol2inv(chol(Valphainv))
mualpha <- Valpha %*% t(Z) %*% Winv %*% mydata[,i]
alpha[,i] <- mvrnorm(1,mualpha,Valpha)
}
}
}
##### 2) SIMULATION OF GAMMA'S = (BETA,THETA) POSTERIOR DISTRIBUTION #####
for(i in 1:n){
gr <- comp11(gamma[1,-i]) # Only the first entries of gamma[,-i] are compared to determine the cluster configuration
jstar <- gr$jstar # Object that contains the positions of the unique vectors in gamma[,-i]
gmi <- gamma[,-i] # Matrix with all the elements of gamma, except for the i-th element
gammastar <- as.matrix(gmi[,jstar]) # Matrix with the unique vectors in gamma(-i)
mi <- gr$rstar # Number of unique vectors in gamma(-i) (Number of groups)
nstar <- gr$nstar # Frequency of each unique vector in gamma(-i)
if((level+trend) == 0){
thetastar <- as.matrix(gammastar[(d+1):(T+d),])
}else{
if(d == 1){
betastar <- t(as.matrix(gammastar[1:d,]))
thetastar <- as.matrix(gammastar[(d+1):(T+d),])
}else{
betastar <- as.matrix(gammastar[1:d,]) # Separation of unique vectors between betastar and thetastar
thetastar <- as.matrix(gammastar[(d+1):(T+d),])
}
}
# Matrices necessary for the following steps
sigmaeps <- sig2eps[i]*diag(1,T)
invsigmaeps <- (1/sig2eps[i])*diag(1,T)
Q <- sigmaeps + R
Qinv <- chol2inv(chol(Q))
if((level+trend) == 0){
Winv <- Qinv
W <- Q
}else{
Vinv <- t(X) %*% Qinv %*% X + invsigmabeta
V <- chol2inv(chol(Vinv))
Winv <- Qinv + (Qinv %*% X %*% V %*% t(X) %*% Qinv)
W <- chol2inv(chol(Winv))
}
# Computing weigths for gamma(i)'s posterior distribution
if((level+trend) == 0){
dj <- matrix(0,mi,1)
d0 <- (b + a*mi)*dmvnorm(mydata[,i],(Z %*% alpha[,i]),W)
den <- 0
for(j in 1:mi){
dj[j] <- (nstar[j] - a)*dmvnorm(mydata[,i],(Z %*% alpha[,i] + thetastar[,j]),sigmaeps)
}
den <- d0 + sum(dj)
if(den == 0){
d0 <- log(b + a*mi) + dmvnorm(mydata[,i],(Z %*% alpha[,i]),W,log=TRUE)
for(j in 1:mi){
dj[j] <- log(nstar[j] - a) + dmvnorm(mydata[,i],(Z %*% alpha[,i] + thetastar[,j]),sigmaeps,log=TRUE)
}
dj <- rbind(dj,d0)
aa <- min(dj)
q <- (1+(dj-aa)+(dj-aa)^2/2)/sum(1+(dj-aa)+(dj-aa)^2/2)
}else{
q <- dj/den
q <- rbind(q,d0/den)
}
}else if((level+trend) == 2){
dj <- matrix(0,mi,1)
d0 <- (b + a*mi)*dmvnorm(mydata[,i],matrix(0,T,1),W)
den <- 0
for(j in 1:mi){
dj[j] <- (nstar[j] - a)*dmvnorm(mydata[,i],(X %*% betastar[,j] + thetastar[,j]),sigmaeps)
}
den <- d0 + sum(dj)
if(den == 0){
d0 <- log(b + a*mi) + dmvnorm(mydata[,i],matrix(0,T,1),W,log=TRUE)
for(j in 1:mi){
dj[j] <- log(nstar[j] - a) + dmvnorm(mydata[,i],(X %*% betastar[,j] + thetastar[,j]),sigmaeps,log=TRUE)
}
dj <- rbind(dj,d0)
aa <- min(dj)
q <- (1+(dj-aa)+(dj-aa)^2/2)/sum(1+(dj-aa)+(dj-aa)^2/2)
}else{
q <- dj/den
q <- rbind(q,d0/den)
}
}else{
dj <- matrix(0,mi,1)
d0 <- (b + a*mi)*dmvnorm(mydata[,i],(Z %*% alpha[,i]),W)
den <- 0
for(j in 1:mi){
dj[j] <- (nstar[j] - a)*dmvnorm(mydata[,i],(Z %*% alpha[,i] + X %*% betastar[,j] + thetastar[,j]),sigmaeps)
}
den <- d0 + sum(dj)
if(den == 0){
d0 <- log(b + a*mi) + dmvnorm(mydata[,i],(Z %*% alpha[,i]),W,log=TRUE)
for(j in 1:mi){
dj[j] <- log(nstar[j] - a) + dmvnorm(mydata[,i],(Z %*% alpha[,i] + X %*% betastar[,j] + thetastar[,j]),sigmaeps,log=TRUE)
}
dj <- rbind(dj,d0)
aa <- min(dj)
q <- (1+(dj-aa)+(dj-aa)^2/2)/sum(1+(dj-aa)+(dj-aa)^2/2)
}else{
q <- dj/den
q <- rbind(q,d0/den)
}
}
# Sampling a number between 1 and (mi+1) to determine what will be the simulated value for gamma(i)
# The probabilities of the sample are based on the weights previously computed
y <- sample((1:(mi+1)), size=1, prob = q)
# If sample returns the value (mi+1), a new vector from g0 will be simulated and assigned to gamma(i)
if (y == mi+1){
if((level+trend) == 0){
Sthetai <- chol2inv(chol(invsigmaeps + chol2inv(chol(R))))
muthetai <- Sthetai %*% invsigmaeps %*% (mydata[,i] - (Z %*% alpha[,i]))
theta0 <- matrix(mvrnorm(1,muthetai,Sthetai),T,1)
gamma[,i] <- theta0
}else if((level+trend) == 2){
Sthetai <- chol2inv(chol(invsigmaeps + chol2inv(chol(R))))
muthetai <- Sthetai %*% invsigmaeps %*% (mydata[,i] - (X %*% beta[,i]))
mubetai <- V %*% t(X) %*% Qinv %*% mydata[,i]
beta0 <- matrix(mvrnorm(1,mubetai,V),d,1)
theta0 <- matrix(mvrnorm(1,muthetai,Sthetai),T,1)
gamma[,i] <- rbind(beta0,theta0)
}else{
Sthetai <- chol2inv(chol(invsigmaeps + chol2inv(chol(R))))
muthetai <- Sthetai %*% invsigmaeps %*% (mydata[,i] - (Z %*% alpha[,i]) - (X %*% beta[,i]))
mubetai <- V %*% t(X) %*% Qinv %*% (mydata[,i] - (Z %*% alpha[,i]))
beta0 <- matrix(mvrnorm(1,mubetai,V),d,1)
theta0 <- matrix(mvrnorm(1,muthetai,Sthetai),T,1)
gamma[,i] <- rbind(beta0,theta0)
}
} else{
gamma[,i] = gammastar[,y] # Otherwise, column y from gammastar will be assigned to gamma(i)
}
}
##### 2.1) ACCELERATION STEP AND CONSTRUCTION OF SIMILARITIES MATRIX #####
gr <- comp11(gamma[1,]) # Computation of all latent classes of the gamma vectors after the simulation of their posterior distribution.
jstar <- gr$jstar
gammastar <- as.matrix(gamma[,jstar]) # Unique values of the gamma vectors.
m <- gr$rstar # Total number of latent classes (groups).
nstar <- gr$nstar # Frequency of each latent class (group).
gn <- gr$gn # Identifier of the group to which each time series belongs.
if((level+trend) == 0){
theta <- as.matrix(gamma[((d+1):(T+d)),])
thetastar <- as.matrix(gammastar[(d+1):(T+d),])
}else{
if(d == 1){
beta <- t(as.matrix(gamma[(1:d),])) # Splitting the gamma vectors in beta and theta.
theta <- as.matrix(gamma[((d+1):(T+d)),])
betastar <- t(as.matrix(gammastar[(1:d),]))
thetastar <- as.matrix(gammastar[((d+1):(T+d)),])
}else{
beta <- as.matrix(gamma[(1:d),]) # Splitting the gamma vectors in beta and theta.
theta <- as.matrix(gamma[((d+1):(T+d)),])
betastar <- as.matrix(gammastar[(1:d),])
thetastar <- as.matrix(gammastar[((d+1):(T+d)),])
}
}
for(j in 1:m){
if((level+trend) == 0){
cc <- which(gn == j) # Identifying the cluster configuration of each group.
aux <- matrix(0,T,T) # Calculating the necessary matrices for the simulation of the distributions for the acceleration step.
aux1 <- matrix(0,T,1)
aux2 <- matrix(0,T,1)
for(i in 1:nstar[j]){
aux <- aux + diag((1/sig2eps[cc[i]]),T)
aux1 <- aux1 + (diag((1/sig2eps[cc[i]]),T) %*% (mydata[,cc[i]] - Z %*% alpha[,cc[i]]))
}
Sthetastar <- chol2inv(chol(aux + chol2inv(chol(R))))
muthetastar <- Sthetastar %*% aux1
theta[,cc] <- mvrnorm(1,muthetastar,Sthetastar)
}else if((level+trend) == 2){
cc <- which(gn == j) # Identifying the cluster configuration of each group.
aux <- matrix(0,T,T) # Calculating the necessary matrices for the simulation of the distributions for the acceleration step.
aux1 <- matrix(0,T,1)
aux2 <- matrix(0,T,1)
for(i in 1:nstar[j]){
aux <- aux + diag((1/sig2eps[cc[i]]),T)
aux1 <- aux1 + (diag((1/sig2eps[cc[i]]),T) %*% (mydata[,cc[i]] - X %*% betastar[,j]))
aux2 <- aux2 + (diag((1/sig2eps[cc[i]]),T) %*% (mydata[,cc[i]] - thetastar[,j]))
}
Sthetastar <- chol2inv(chol(aux + chol2inv(chol(R))))
muthetastar <- Sthetastar %*% aux1
Sbetastar <- chol2inv(chol((t(X) %*% aux %*% X) + invsigmabeta))
mubetastar <- Sbetastar %*% t(X) %*% aux2
beta[,cc] <- mvrnorm(1,mubetastar,Sbetastar)
theta[,cc] <- mvrnorm(1,muthetastar,Sthetastar)
}else{
cc <- which(gn == j) # Identifying the cluster configuration of each group.
aux <- matrix(0,T,T) # Calculating the necessary matrices for the simulation of the distributions for the acceleration step.
aux1 <- matrix(0,T,1)
aux2 <- matrix(0,T,1)
for(i in 1:nstar[j]){
aux <- aux + diag((1/sig2eps[cc[i]]),T)
aux1 <- aux1 + (diag((1/sig2eps[cc[i]]),T) %*% (mydata[,cc[i]] - Z %*% alpha[,cc[i]] - X %*% betastar[,j]))
aux2 <- aux2 + (diag((1/sig2eps[cc[i]]),T) %*% (mydata[,cc[i]] - Z %*% alpha[,cc[i]] - thetastar[,j]))
}
Sthetastar <- chol2inv(chol(aux + chol2inv(chol(R))))
muthetastar <- Sthetastar %*% aux1
Sbetastar <- chol2inv(chol((t(X) %*% aux %*% X) + invsigmabeta))
mubetastar <- Sbetastar %*% t(X) %*% aux2
beta[,cc] <- mvrnorm(1,mubetastar,Sbetastar)
theta[,cc] <- mvrnorm(1,muthetastar,Sthetastar)
}
# Computation of similarities matrix and saving the cluster configuration of the current iteration.
if((iter %% thinning) == 0 & iter >= burnin){
for(i1 in 1:nstar[j]){
for(i2 in i1:nstar[j]){
sim[cc[i1],cc[i2]] <- sim[cc[i1],cc[i2]] + 1
sim[cc[i2],cc[i1]] <- sim[cc[i2],cc[i1]] + 1
memory[(cc[i1] + (n*iter1)),cc[i2]] <- memory[(cc[i1] + (n*iter1)),cc[i2]] + 1
memory[(cc[i2] + (n*iter1)),cc[i1]] <- memory[(cc[i2] + (n*iter1)),cc[i1]] + 1
}
}
}
}
if((level+trend) == 0){
gamma <- theta # Obtaining all gamma vectors after the acceleration step.
}else{
gamma <- rbind(beta,theta) # Obtaining all gamma vectors after the acceleration step.
}
gr <- comp11(gamma[1,]) # Obtaining all the latent classes in the gamma vectors.
jstar <- gr$jstar
gammastar <- as.matrix(gamma[,jstar]) # Unique values of the gamma vectors.
m <- gr$rstar # Number of groups after acceleration step.
nstar <- gr$nstar # Frequency of each group.
gn <- gr$gn # Identifier of the group to which each latent class belongs.
if((level+trend) == 0){
theta <- as.matrix(gamma[((d+1):(T+d)),])
thetastar <- as.matrix(gammastar[((d+1):(T+d)),])
}else{
if(d == 1){
beta <- t(as.matrix(gamma[(1:d),])) # Splitting the gamma vectors in beta and theta.
theta <- as.matrix(gamma[((d+1):(T+d)),])
betastar <- t(as.matrix(gammastar[(1:d),]))
thetastar <- as.matrix(gammastar[((d+1):(T+d)),])
}else{
beta <- as.matrix(gamma[(1:d),]) # Splitting the gamma vectors in beta and theta.
theta <- as.matrix(gamma[((d+1):(T+d)),])
betastar <- as.matrix(gammastar[(1:d),])
thetastar <- as.matrix(gammastar[((d+1):(T+d)),])
}
}
##### 6) SIMULATION OF SIG2THE'S POSTERIOR DISTRIBUTION #####
cholP <- chol(P) # Calculation of the Cholesky factorization of P.
Pinv <- chol2inv(cholP) # Obtaining the inverse of P.
s1 <- 0
# Calculating the sum necessary for the rate parameter of the posterior distribution.
for(j in 1:m){
s1 <- s1 + t(as.matrix(thetastar[,j])) %*% Pinv %*% as.matrix(thetastar[,j])
}
sig2the <- 1/rgamma(1,(m*T/2),(s1/2))
##### 7) SIMULATION OF RHO'S POSTERIOR DISTRIBUTION (Metropolis-Hastings step) #####
rhomh <- runif(1,-1,1) # Sampling from the proposal distribution.
Pmh <- matrix(0,T,T)
# Calculating the matrix P for the proposed value rhomh.
for (j in 1:T){
for (k in 1:T){
Pmh[j,k] <- rhomh^(abs(j-k))
}
}
cholPmh <- chol(Pmh) # Calculating the Cholesky factor of Pmh.
Pmhinv <- chol2inv(cholPmh) # Obtaining the inverse from Pmh
s <- 0
# Calculating the sum necessary for the computation of the acceptance probability.
for(j in 1:m){
s <- s + t(as.matrix(thetastar[,j])) %*% (Pmhinv-Pinv) %*% as.matrix(thetastar[,j])
}
# Computation of the acceptance probability.
q <- (-m/2)*(log(prod(diag(cholPmh)))- log(prod(diag(cholP)))) - ((1/(2*sig2the))*s) + (1/2)*(log(1 + rhomh*rhomh) - log(1 + rho*rho)) - log(1 - rhomh*rhomh) + log(1 - rho*rho)
# Definition of the acceptance probability.
quot <- min(0,q)
# Sampling a uniform random variable in [0,1] to determine if the proposal is accepted or not.
unif1 <- runif(1,0,1)
# Acceptance step.
if(log(unif1) <= quot){
rho <- rhomh
arrho <- arrho + 1
for (j in seq(T)){
for (k in seq(T)){
P[j,k] <- rho^(abs(j-k))
}
}
}
R <- sig2the*P
##### 3) SIMULATION OF SIG2EPS' POSTERIOR DISTRIBUTION #####
if((level+trend) == 0){
M <- t(mydata - Z%*%alpha - theta) %*% (mydata - Z%*%alpha - theta)
}else if((level+trend) == 2){
M <- t(mydata - X%*%beta - theta) %*% (mydata - X%*%beta - theta)
}else{
M <- t(mydata - Z%*%alpha - X%*%beta - theta) %*% (mydata - Z%*%alpha - X%*%beta - theta)
}
sig2eps <- 1/rgamma(n,(c0eps + T/2),(c1eps + diag(M)/2))
##### 4) SIMULATION OF SIMGAALPHA'S POSTERIOR DISTRIBUTION #####
if((level+trend) != 2){
sig2alpha <- 1/rgamma(p,(c0alpha + n/2),(c1alpha + rowSums(alpha^2)))
sigmaalpha <- diag(c(sig2alpha),p,p)
invsigmaalpha <- diag(1/c(sig2alpha),p,p)
}
##### 5) SIMULATION OF SIGMABETA'S POSTERIOR DISTRIBUTION #####
if((level+trend) != 0){
sig2beta <- 1/rgamma(d,(c0beta + m/2),(c1beta + colSums(betastar^2)/2))
sigmabeta <- diag(c(sig2beta),d,d)
invsigmabeta <- diag(1/c(sig2beta),d,d)
}
##### 8) SIMULATION OF A'S POSTERIOR DISTRIBUTION (METROPOLIS-HASTINGS WITH UNIFORM PROPOSALS) #####
if(priora == 1){
if (b < 0){
amh <- runif(1,-b,1)
} else{
unif2 <- runif(1,0,1)
if (unif2 <= 0.5){
amh <- 0
} else{
amh <- runif(1,0,1)
}
}
# If b is not greater than -a, then accept the proposal directly.
if ((a+b) <= 0){
a <- amh
} else{
quot1 <- 0
if(m > 1){
for (j in 1:(m-1)){
quot1 <- quot1 + log(b + j*amh) + log(gamma(nstar[j] - amh)) - log(gamma(1 - amh)) - log(b + j*a) - log(gamma(nstar[j] - a)) + log(gamma(1 - a))
}
}
quot1 <- quot1 + log(gamma((nstar[m] - amh))) - log(gamma(1 - amh)) - log(gamma((nstar[m] - a))) + log(gamma(1 - a))
if (a == 0){
fa <- 0.5
} else{
fa <- 0.5*dbeta(a,q0a,q1a)
}
if (amh == 0){
famh <- 0.5
} else{
famh <- 0.5*dbeta(amh,q0a,q1a)
}
# Quotient to evaluate the Metropolis-Hastings step in logs
quot1 <- quot1 + log(famh) - log(fa)
# Determination of the probability for the Metropolis-Hastings step
alphamh1 <- min(quot1,0)
unif3 <- runif(1,0,1)
# Acceptance step
if (log(unif3) <= alphamh1){
a <- amh
ara <- ara + 1
}
}
}
##### 9) SIMULATION OF B'S POSTERIOR DISTRIBUTION (METROPOLIS-HASTINGS WITH GAMMA PROPOSALS) #####
if(priorb == 1){
y1 <- rgamma(1,1,0.1)
bmh <- y1 - a
# If b is not greater than -a, then accept the proposal directly.
if ((a+b) <= 0){
b <- bmh
} else{
quot2 <- 0
if(m > 1){
for (j in 1:(m-1)){
quot2 <- quot2 + log(bmh + j*a) - log(b + j*a)
}
}
fb <- dgamma(a+b,q0b,q1b)
fbmh <- dgamma(y1,q0b,q1b)
# Quotient to evaluate the Metropolis-Hastings step in logs
quot2 <- quot2 + (log(gamma(bmh+1)) - log(gamma(bmh+n)) - log(gamma(b+1)) + log(gamma(b+n))) + (log(fbmh) - log(fb)) - 0.1*(b - bmh)
# Determination of the probability for the Metropolis-Hastings step
alphamh2 <- min(quot2,0)
unif4 <- runif(1,0,1)
# Acceptance step
if (log(unif4) <= alphamh2){
b <- bmh
arb <- arb + 1
}
}
}
if((iter %% thinning) == 0 & iter >= burnin){
iter1 <- iter1 + 1
sig2epssample[iter1,] <- sig2eps
sig2thesample[iter1] <- sig2the
rhosample[iter1] <- rho
asample[iter1] <- a
bsample[iter1] <- b
msample[iter1] <- m
memorygn[iter1,] <- gn
if((level+trend) == 0){
sig2alphasample[iter1,] <- sig2alpha
}else if((level+trend) == 2){
sig2betasample[iter1,] <- sig2beta
}else{
sig2alphasample[iter1,] <- sig2alpha
sig2betasample[iter1,] <- sig2beta
}
}
if(indlpml != 0){
if((iter %% 10) == 0 & iter >= burnin){
iter2 <- iter2 + 1
for(i in 1:n){
if((level+trend) == 0){
for(j in 1:m){
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((nstar[j]-a)/(b+n))*dmvnorm(mydata[,i],(Z %*% alpha[,i] + thetastar[,j]),diag(sig2eps[i],T))
}
sigmaeps <- diag(sig2eps[i],T)
invsigmaeps <- diag((1/sig2eps[i]),T)
Q <- sigmaeps + R
Qinv <- solve(Q)
Winv <- Qinv
W <- Q
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((b+(a*m))/(b+n))*dmvnorm(mydata[,i],(Z %*% alpha[,i]),W)
}else if((level+trend) == 2){
for(j in 1:m){
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((nstar[j]-a)/(b+n))*dmvnorm(mydata[,i],(X %*% betastar[,j] + thetastar[,j]),diag(sig2eps[i],T))
}
sigmaeps <- diag(sig2eps[i],T)
invsigmaeps <- diag((1/sig2eps[i]),T)
Q <- sigmaeps + R
Qinv <- solve(Q)
Vinv <- t(X) %*% Qinv %*% X + invsigmabeta
V <- solve(Vinv)
Winv <- Qinv + (Qinv %*% X %*% V %*% t(X) %*% Qinv)
W <- solve(Winv)
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((b+(a*m))/(b+n))*dmvnorm(mydata[,i],(matrix(0,T,1)),W)
}else{
for(j in 1:m){
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((nstar[j]-a)/(b+n))*dmvnorm(mydata[,i],(Z %*% alpha[,i] + X %*% betastar[,j] + thetastar[,j]),diag(sig2eps[i],T))
}
sigmaeps <- diag(sig2eps[i],T)
invsigmaeps <- diag((1/sig2eps[i]),T)
Q <- sigmaeps + R
Qinv <- solve(Q)
Vinv <- t(X) %*% Qinv %*% X + invsigmabeta
V <- solve(Vinv)
Winv <- Qinv + (Qinv %*% X %*% V %*% t(X) %*% Qinv)
W <- solve(Winv)
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((b+(a*m))/(b+n))*dmvnorm(mydata[,i],(Z %*% alpha[,i]),W)
}
}
}
}
iter <- iter + 1
if(iter %% 50 == 0){
cat("Iteration Number: ",iter,". Progress: ",(iter/maxiter)*100,"%","\n")
}
}
##### END OF GIBBS SAMPLING #####
# Calculation of acceptance rates and similarities matrix
arrho <- arrho/iter
ara <- ara/iter
arb <- arb/iter
sim <- sim/iter1
dist <- matrix(0,CL,1)
# Calculating the distance between each cluster configuration to the similarities matrix
for (i in 1:CL){
aux4 <- memory[(((i-1)*n)+1):(i*n),] - sim
dist[i] <- norm(aux4,"F")
}
# Determining which cluster configuration minimizes the distance to the similarities matrix
mstar <- msample[which.min(dist)]
gnstar <- memorygn[which.min(dist),]
##### HM MEASURE CALCULATION #####
HM <- 0
for(j in 1:mstar){
cc <- as.matrix(which(gnstar == j))
HM1 <- 0
if(length(cc) > 1){
for(i1 in 1:length(cc)){
for(i2 in 1:i1){
HM1 <- HM1 + sum((mydata[,cc[i2]] - mydata[,cc[i1]])^2)
}
}
HM <- HM + (2/(length(cc)-1))*HM1
}
}
names <- colnames(mydata)
cat("Number of groups of the chosen cluster configuration: ",mstar,"\n")
for(i in 1:mstar){
cat("Time series in group",i,":",names[which(gnstar == i)],"\n")
}
cat("HM Measure: ",HM,"\n")
if(indlpml != 0){
auxlpml <- 1/auxlpml
cpo <- colMeans(auxlpml)
cpo <- 1/cpo
lpml <- sum(log(cpo))
}
if(indlpml !=0){
if((level+trend) == 0){
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,lpml = lpml,
sig2epssample = sig2epssample,sig2alphasample = sig2alphasample,
sig2thesample = sig2thesample,rhosample = rhosample,asample = asample,
bsample = bsample,msample = msample,periods = periods,scale=scale))
}else if((level+trend) == 2){
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,lpml = lpml,
sig2epssample = sig2epssample,sig2betasample = sig2betasample,
sig2thesample = sig2thesample,rhosample = rhosample,asample = asample,
bsample = bsample,msample = msample,periods = periods,scale=scale))
}else{
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,lpml = lpml,
sig2epssample = sig2epssample,sig2alphasample = sig2alphasample,
sig2betasample = sig2betasample,sig2thesample = sig2thesample,rhosample = rhosample,
asample = asample,bsample = bsample,msample = msample,periods = periods,scale=scale))
}
}else{
if((level+trend) == 0){
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,
sig2epssample = sig2epssample,sig2alphasample = sig2alphasample,
sig2thesample = sig2thesample,rhosample = rhosample,asample = asample,
bsample = bsample,msample = msample,periods = periods,scale=scale))
}else if((level+trend) == 2){
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,
sig2epssample = sig2epssample,sig2betasample = sig2betasample,
sig2thesample = sig2thesample,rhosample = rhosample,asample = asample,
bsample = bsample,msample = msample,periods = periods,scale=scale))
}else{
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,
sig2epssample = sig2epssample,sig2alphasample = sig2alphasample,
sig2betasample = sig2betasample,sig2thesample = sig2thesample,rhosample = rhosample,
asample = asample,bsample = bsample,msample = msample,periods = periods,scale=scale))
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BNPTSclust/R/tseriesca.R
|
tseriescm <-
function(data,maxiter=500,burnin=floor(0.1*maxiter),thinning=5,scale=TRUE,
level=FALSE,trend=TRUE,seasonality=TRUE,deg=2,c0eps=2,c1eps=1,
c0beta=2,c1beta=1,c0alpha=2,c1alpha=1,priora=TRUE,pia=0.5,
q0a=1,q1a=1,priorb=TRUE,q0b=1,q1b=1,a=0.25,b=0,indlpml=FALSE){
# Function that performs the time series clustering algorithm
# described in Nieto-Barajas and Contreras-Cristan (2014) "A Bayesian
# Non-Parametric Approach for Time Series Clustering". Bayesian
# Analysis, Vol. 9, No. 1 (2014) pp.147-170". This function is
# designed for monthly time series data.
#
# IN:
#
# data <- Data frame with the time series information.
# maxiter <- Maximum number of iterations for Gibbs sampling.
# Default value = 1000.
# burnin <- Burn-in period of the Markov Chain generated by Gibbs
# sampling.
# thinning <- Number that indicates how many Gibbs sampling simulations
# should be skipped to form the Markov Chain.
# scale <- Flag that indicates if the time series data should be scaled to the
# [0,1] interval with a linear transformation as proposed by
# Nieto-Barajas and Contreras-Cristan (2014). If TRUE, then the time
# series are scaled to the [0,1] interval.
# level <- Flag that indicates if the level of the time
# series will be considered for clustering. If TRUE, then it
# is taken into account.
# trend <- Flag that indicates if the polinomial trend of
# the model will be considered for clustering.
# If TRUE, then it is taken into account.
# seasonality <- Flag that indicates if the seasonal components
# of the model will be considered for clustering.
# If TRUE, then they are taken into account.
# deg <- Degree of the polinomial tendency of the model.
# Default value = 2.
# c0eps <- Shape parameter of the hyper-prior distribution
# on sig2eps. Default value = 2.
# c1eps <- Rate parameter of the hyper-prior distribution
# on sig2eps. Default value = 1.
# c0beta <- Shape parameter of the hyper-prior distribution
# on sig2beta. Default value = 2.
# c1beta <- Rate parameter of the hyper-prior distribution
# on sig2beta. Default value = 1.
# c0alpha <- Shape parameter of the hyper-prior distribution
# on sig2alpha. Default value = 2.
# c1alpha <- Rate parameter of the hyper-prior distribution
# on sig2alpha. Default value = 1.
# priora <- Flag that indicates if a prior on parameter "a" is
# to be assigned. If TRUE, a prior on "a" is assigned.
# Default value = FALSE.
# pia <- Mixing proportion of the prior distribution on parameter
# "a". Default value = 0.5.
# q0a <- Shape parameter of the continuous part of the prior
# distribution on parameter "a". Default value = 1.
# q1a <- Shape parameter of the continuous part of the prior
# distribution on parameter "a". Default value = 1.
# priorb <- Flag that indicates if a prior on parameter "b" is
# to be assigned. If TRUE, a prior on "b" is assigned.
# Default value = FALSE.
# q0b <- Shape parameter of the prior distribution on parameter
# "b". Default value = 1.
# q1b <- Shape parameter of the prior distribution on parameter
# "b". Default value = 1.
# a <- Initial/fixed value of parameter "a".
# Default value = 0.25.
# b <- Initial/fixed value of parameter "b".
# Default value = 0.
# indlpml <- Flag that indicates if the LPML is to be calculated.
# If TRUE, LPML is calculated. Default value = FALSE.
#
# OUT:
#
# mstar <- Number of groups of the chosen cluster configuration.
# gnstar <- Array that contains the group number to which each time
# series belongs.
# HM <- Heterogeneity Measure of the chosen cluster configuration.
# arrho <- Acceptance rate of the parameter "rho".
# ara <- Acceptance rate of the parameter "a".
# arb <- Acceptance rate of the parameter "b".
# sig2epssample <- Matrix that in its columns contains the sample of each sig2eps_i's posterior distribution after Gibbs sampling.
# sig2alphasample <- Matrix that in its columns contains the sample of each sig2alpha_i's posterior distribution after Gibbs sampling.
# sig2betasample <- Matrix that in its columns contains the sample of each sig2beta_i's posterior distribution after Gibbs sampling.
# sig2thesample <- Vector that contains the sample of sig2the's posterior distribution after Gibbs sampling.
# rhosample <- Vector that contains the sample of rho's posterior distribution after Gibbs sampling.
# asample <- Vector that contains the sample of a's posterior distribution after Gibbs sampling.
# bsample <- Vector that contains the sample of b's posterior distribution after Gibbs sampling.
# msample <- Vector that contains the sample of the number of groups at each Gibbs sampling iteration.
# lpml <- If indlpml = 1, lpml contains the value of the LPML of the
# chosen model.
# scale <- Flag that indicates if the time series data were scaled to the
# [0,1] interval with a linear transformation. This will be taken as an input for the
# plotting functions.
if(level == TRUE){
level <- 1
}else{
level <- 0
}
if(trend == TRUE){
trend <- 1
}else{
trend <- 0
}
if(seasonality == TRUE){
seasonality <- 1
}else{
seasonality <- 0
}
if(priora == TRUE){
priora <- 1
}else{
priora <- 0
}
if(priorb == TRUE){
priorb <- 1
}else{
priorb <- 0
}
if(indlpml == TRUE){
indlpml <- 1
}else{
indlpml <- 0
}
if(deg%%1 != 0 | deg <= 0){
stop("deg must be a positive integer number.")
}
if(maxiter%%1 != 0 | maxiter <= 0){
stop("maxiter must be a positive (large) integer number.")
}
if(burnin%%1 != 0 | burnin < 0){
stop("burnin must be a non-negative integer number.")
}
if(thinning%%1 != 0 | thinning < 0){
stop("thinning must be a non-negative integer number.")
}
if(maxiter <= burnin){
stop("maxiter cannot be less than or equal to burnin.")
}
if(c0eps <= 0 | c1eps <= 0 | c0beta <= 0 | c1beta <= 0 |
c0alpha <= 0 | c1alpha <= 0){
stop("c0eps,c1eps,c0beta,c1beta,c0alpha and c1alpha must be
positive numbers.")
}
if(pia <= 0 | pia >= 1){
stop("The mixing proportion pia must be a number in (0,1).")
}
if(q0a <= 0 | q1a <= 0){
stop("q0a and q1a must be positive numbers.")
}
if(a < 0 | a >= 1){
stop("'a' must be a number in [0,1).")
}
if(q0b <= 0 | q1b <= 0){
stop("q0b and q1b must be positive numbers.")
}
if(b <= -a){
stop("'b' must be greater than '-a'.")
}
data <- scaleandperiods(data,scale)
mydata <- as.matrix(data$mydata) # Matrix with the scaled data.
periods <- data$periods # Array with the data periods.
cts <- data$cts # Variable that indicates if any time series
# were removed from the original data set because they were constant.
##### CONSTRUCTION OF THE DESIGN MATRICES #####
T <- nrow(mydata) # Number of periods of the time series
n <- ncol(mydata) # Number of time series present in the data
DM <- designmatrices(level,trend,seasonality,deg,T,n,fun="tseriescm")
p <- DM$p
d <- DM$d
if((level+trend+seasonality) == 0){
Z <- DM$Z
}else if((level+trend+seasonality) == 3){
X <- DM$X
}else{
Z <- DM$Z
X <- DM$X
}
##### INITIAL VALUES FOR THE PARAMETERS THAT WILL BE PART OF THE GIBBS SAMPLING #####
sig2eps <- matrix(1,n,1) # Vector that has the diagonal entries of the variance-covariance matrix for every epsilon_i.
sig2the <- 1 # Initial value for sig2the.
rho <- 0 # Initial value for rho.
P <- matrix(0,T,T) # Initial matrix P.
for (j in seq(T)){
for (k in seq(T)){
P[j,k] <- rho^(abs(j-k))
}
}
R <- sig2the*P # Initial matrix R.
if((level+trend+seasonality) == 0){
sig2alpha <- matrix(1,p,1) # Vector that has the diagonal entries of the variance-covariance matrix for alpha.
sigmaalpha <- diag(c(sig2alpha),p,p) # Variance-covariance matrix for alpha.
invsigmaalpha <- diag(1/c(sig2alpha),p,p) # Inverse variance-covariance matrix for alpha.
alpha <- matrix(mvrnorm(n,matrix(0,p,1),sigmaalpha),p,n) # alpha is a matrix with a vector value of alpha for every time series in its columns.
theta <- matrix(mvrnorm(n,matrix(0,T,1),R),T,n) # theta is a matrix with a vector value of theta for every time series in its columns.
gamma <- theta # gamma is the union by rows of the beta and theta matrices.
}else if((level+trend+seasonality) == 3){
sig2beta <- matrix(1,d,1) # Vector that has the diagonal entries of the variance-covariance matrix for beta.
sigmabeta <- diag(c(sig2beta),d,d) # Variance-covariance matrix for beta.
invsigmabeta <- diag(1/c(sig2beta),d,d) # Inverse variance-covariance matrix for beta.
beta <- matrix(mvrnorm(n,matrix(0,d,1),sigmabeta),d,n) # beta is a matrix with a vector value of beta for every time series in its columns.
theta <- matrix(mvrnorm(n,matrix(0,T,1),R),T,n) # theta is a matrix with a vector value of theta for every time series in its columns.
gamma <- rbind(beta,theta) # gamma is the union by rows of the beta and theta matrices.
}else{
sig2beta <- matrix(1,d,1) # Vector that has the diagonal entries of the variance-covariance matrix for beta.
sigmabeta <- diag(c(sig2beta),d,d) # Variance-covariance matrix for beta.
invsigmabeta <- diag(1/c(sig2beta),d,d) # Inverse variance-covariance matrix for beta.
sig2alpha <- matrix(1,p,1) # Vector that has the diagonal entries of the variance-covariance matrix for alpha.
sigmaalpha <- diag(c(sig2alpha),p,p) # Variance-covariance matrix for alpha.
invsigmaalpha <- diag(1/c(sig2alpha),p,p) # Inverse variance-covariance matrix for alpha.
alpha <- matrix(mvrnorm(n,matrix(0,p,1),sigmaalpha),p,n) # alpha is a matrix with a vector value of alpha for every time series in its columns.
beta <- matrix(mvrnorm(n,matrix(0,d,1),sigmabeta),d,n) # beta is a matrix with a vector value of beta for every time series in its columns.
theta <- matrix(mvrnorm(n,matrix(0,T,1),R),T,n) # theta is a matrix with a vector value of theta for every time series in its columns.
gamma <- rbind(beta,theta) # gamma is the union by rows of the beta and theta matrices.
}
iter <- 0 # Counter for each Gibbs sampling iteration.
iter1 <- 0 # Counter for the number of iterations saved during the Gibbs sampling.
arrho <- 0 # Variable that will contain the acceptance rate of rho in the Metropolis-Hastings step.
ara <- 0 # Variable that will contain the acceptance rate of a in the Metropolis-Hastings step.
arb <- 0 # Variable that will contain the acceptance rate of b in the Metropolis-Hastings step.
sim <- matrix(0,n,n) # Initialization of the similarities matrix.
if(thinning == 0){
CL <- floor(maxiter-burnin)
}else{
CL <- floor((maxiter-burnin)/thinning)
}
memory <- matrix(0,CL*n,n) # Matrix that will contain the cluster configuration of every iteration that is saved during the Gibbs sampling.
memorygn <- matrix(0,CL,n) # Matrix that will save the group number to which each time series belongs in every iteration saved.
sig2epssample <- matrix(0,CL,n) # Matrix that in its columns will contain the sample of each sig2eps_i's posterior distribution after Gibbs sampling.
sig2thesample <- matrix(0,CL,1) # Vector that will contain the sample of sig2the's posterior distribution after Gibbs sampling.
rhosample <- matrix(0,CL,1) # Vector that will contain the sample of rho's posterior distribution after Gibbs sampling.
asample <- matrix(0,CL,1) # Vector that will contain the sample of a's posterior distribution after Gibbs sampling.
bsample <- matrix(0,CL,1) # Vector that will contain the sample of b's posterior distribution after Gibbs sampling.
msample <- matrix(0,CL,1) # Vector that will contain the sample of the number of groups at each Gibbs sampling iteration.
if((level+trend+seasonality) == 0){
sig2alphasample <- matrix(0,CL,p) # Matrix that in its columns will contain the sample of each sig2alpha_i's posterior distribution after Gibbs sampling.
}else if((level+trend+seasonality) == 3){
sig2betasample <- matrix(0,CL,d) # Matrix that in its columns will contain the sample of each sig2beta_i's posterior distribution after Gibbs sampling.
}else{
sig2alphasample <- matrix(0,CL,p) # Matrix that in its columns will contain the sample of each sig2alpha_i's posterior distribution after Gibbs sampling.
sig2betasample <- matrix(0,CL,d) # Matrix that in its columns will contain the sample of each sig2beta_i's posterior distribution after Gibbs sampling.
}
if(indlpml != 0){
iter2 <- 0
auxlpml <- matrix(0,floor((maxiter-burnin)/10),n)
}
##### BEGINNING OF GIBBS SAMPLING #####
while(iter < maxiter){
##### 1) SIMULATION OF ALPHA'S POSTERIOR DISTRIBUTION #####
if((level+trend+seasonality) != 3){
if((level+trend+seasonality) == 0){
for(i in 1:n){
sigmaeps <- diag(c(sig2eps[i]),T)
Q <- sigmaeps + R
Qinv <- chol2inv(chol(Q))
Winv <- Qinv
W <- Q
Valphainv <- (t(Z) %*% Winv %*% Z) + invsigmaalpha
Valpha <- chol2inv(chol(Valphainv))
mualpha <- Valpha %*% t(Z) %*% Winv %*% mydata[,i]
alpha[,i] <- mvrnorm(1,mualpha,Valpha)
}
}else{
for(i in 1:n){
sigmaeps <- diag(c(sig2eps[i]),T)
Q <- sigmaeps + R
Qinv <- chol2inv(chol(Q))
Vinv <- t(X) %*% Qinv %*% X + invsigmabeta
V <- chol2inv(chol(Vinv))
Winv <- Qinv + (Qinv %*% X %*% V %*% t(X) %*% Qinv)
W <- chol2inv(chol(Winv))
Valphainv <- (t(Z) %*% Winv %*% Z) + invsigmaalpha
Valpha <- chol2inv(chol(Valphainv))
mualpha <- Valpha %*% t(Z) %*% Winv %*% mydata[,i]
alpha[,i] <- mvrnorm(1,mualpha,Valpha)
}
}
}
##### 2) SIMULATION OF GAMMA'S = (BETA,THETA) POSTERIOR DISTRIBUTION #####
for(i in 1:n){
gr <- comp11(gamma[1,-i]) # Only the first entries of gamma[,-i] are compared to determine the cluster configuration
jstar <- gr$jstar # Object that contains the positions of the unique vectors in gamma[,-i]
gmi <- gamma[,-i] # Matrix with all the elements of gamma, except for the i-th element
gammastar <- as.matrix(gmi[,jstar]) # Matrix with the unique vectors in gamma(-i)
mi <- gr$rstar # Number of unique vectors in gamma(-i) (Number of groups)
nstar <- gr$nstar # Frequency of each unique vector in gamma(-i)
if((level+trend+seasonality) == 0){
thetastar <- as.matrix(gammastar[(d+1):(T+d),])
}else{
if(d == 1){
betastar <- t(as.matrix(gammastar[1:d,]))
thetastar <- as.matrix(gammastar[(d+1):(T+d),])
}else{
betastar <- as.matrix(gammastar[1:d,]) # Separation of unique vectors between betastar and thetastar
thetastar <- as.matrix(gammastar[(d+1):(T+d),])
}
}
# Matrices necessary for the following steps
sigmaeps <- sig2eps[i]*diag(1,T)
invsigmaeps <- (1/sig2eps[i])*diag(1,T)
Q <- sigmaeps + R
Qinv <- chol2inv(chol(Q))
if((level+trend+seasonality) == 0){
Winv <- Qinv
W <- Q
}else{
Vinv <- t(X) %*% Qinv %*% X + invsigmabeta
V <- chol2inv(chol(Vinv))
Winv <- Qinv + (Qinv %*% X %*% V %*% t(X) %*% Qinv)
W <- chol2inv(chol(Winv))
}
# Computing weigths for gamma(i)'s posterior distribution
if((level+trend+seasonality) == 0){
dj <- matrix(0,mi,1)
d0 <- (b + a*mi)*dmvnorm(mydata[,i],(Z %*% alpha[,i]),W)
den <- 0
for(j in 1:mi){
dj[j] <- (nstar[j] - a)*dmvnorm(mydata[,i],(Z %*% alpha[,i] + thetastar[,j]),sigmaeps)
}
den <- d0 + sum(dj)
if(den == 0){
d0 <- log(b + a*mi) + dmvnorm(mydata[,i],(Z %*% alpha[,i]),W,log=TRUE)
for(j in 1:mi){
dj[j] <- log(nstar[j] - a) + dmvnorm(mydata[,i],(Z %*% alpha[,i] + thetastar[,j]),sigmaeps,log=TRUE)
}
dj <- rbind(dj,d0)
aa <- min(dj)
q <- (1+(dj-aa)+(dj-aa)^2/2)/sum(1+(dj-aa)+(dj-aa)^2/2)
}else{
q <- dj/den
q <- rbind(q,d0/den)
}
}else if((level+trend+seasonality) == 3){
dj <- matrix(0,mi,1)
d0 <- (b + a*mi)*dmvnorm(mydata[,i],matrix(0,T,1),W)
den <- 0
for(j in 1:mi){
dj[j] <- (nstar[j] - a)*dmvnorm(mydata[,i],(X %*% betastar[,j] + thetastar[,j]),sigmaeps)
}
den <- d0 + sum(dj)
if(den == 0){
d0 <- log(b + a*mi) + dmvnorm(mydata[,i],matrix(0,T,1),W,log=TRUE)
for(j in 1:mi){
dj[j] <- log(nstar[j] - a) + dmvnorm(mydata[,i],(X %*% betastar[,j] + thetastar[,j]),sigmaeps,log=TRUE)
}
dj <- rbind(dj,d0)
aa <- min(dj)
q <- (1+(dj-aa)+(dj-aa)^2/2)/sum(1+(dj-aa)+(dj-aa)^2/2)
}else{
q <- dj/den
q <- rbind(q,d0/den)
}
}else{
dj <- matrix(0,mi,1)
d0 <- (b + a*mi)*dmvnorm(mydata[,i],(Z %*% alpha[,i]),W)
den <- 0
for(j in 1:mi){
dj[j] <- (nstar[j] - a)*dmvnorm(mydata[,i],(Z %*% alpha[,i] + X %*% betastar[,j] + thetastar[,j]),sigmaeps)
}
den <- d0 + sum(dj)
if(den == 0){
d0 <- log(b + a*mi) + dmvnorm(mydata[,i],(Z %*% alpha[,i]),W,log=TRUE)
for(j in 1:mi){
dj[j] <- log(nstar[j] - a) + dmvnorm(mydata[,i],(Z %*% alpha[,i] + X %*% betastar[,j] + thetastar[,j]),sigmaeps,log=TRUE)
}
dj <- rbind(dj,d0)
aa <- min(dj)
q <- (1+(dj-aa)+(dj-aa)^2/2)/sum(1+(dj-aa)+(dj-aa)^2/2)
}else{
q <- dj/den
q <- rbind(q,d0/den)
}
}
# Sampling a number between 1 and (mi+1) to determine what will be the simulated value for gamma(i)
# The probabilities of the sample are based on the weights previously computed
y <- sample((1:(mi+1)), size=1, prob = q)
# If sample returns the value (mi+1), a new vector from g0 will be simulated and assigned to gamma(i)
if (y == mi+1){
if((level+trend+seasonality) == 0){
Sthetai <- chol2inv(chol(invsigmaeps + chol2inv(chol(R))))
muthetai <- Sthetai %*% invsigmaeps %*% (mydata[,i] - (Z %*% alpha[,i]))
theta0 <- matrix(mvrnorm(1,muthetai,Sthetai),T,1)
gamma[,i] <- theta0
}else if((level+trend+seasonality) == 3){
Sthetai <- chol2inv(chol(invsigmaeps + chol2inv(chol(R))))
muthetai <- Sthetai %*% invsigmaeps %*% (mydata[,i] - (X %*% beta[,i]))
mubetai <- V %*% t(X) %*% Qinv %*% mydata[,i]
beta0 <- matrix(mvrnorm(1,mubetai,V),d,1)
theta0 <- matrix(mvrnorm(1,muthetai,Sthetai),T,1)
gamma[,i] <- rbind(beta0,theta0)
}else{
Sthetai <- chol2inv(chol(invsigmaeps + chol2inv(chol(R))))
muthetai <- Sthetai %*% invsigmaeps %*% (mydata[,i] - (Z %*% alpha[,i]) - (X %*% beta[,i]))
mubetai <- V %*% t(X) %*% Qinv %*% (mydata[,i] - (Z %*% alpha[,i]))
beta0 <- matrix(mvrnorm(1,mubetai,V),d,1)
theta0 <- matrix(mvrnorm(1,muthetai,Sthetai),T,1)
gamma[,i] <- rbind(beta0,theta0)
}
} else{
gamma[,i] = gammastar[,y] # Otherwise, column y from gammastar will be assigned to gamma(i)
}
}
##### 2.1) ACCELERATION STEP AND CONSTRUCTION OF SIMILARITIES MATRIX #####
gr <- comp11(gamma[1,]) # Computation of all latent classes of the gamma vectors after the simulation of their posterior distribution.
jstar <- gr$jstar
gammastar <- as.matrix(gamma[,jstar]) # Unique values of the gamma vectors.
m <- gr$rstar # Total number of latent classes (groups).
nstar <- gr$nstar # Frequency of each latent class (group).
gn <- gr$gn # Identifier of the group to which each time series belongs.
if((level+trend+seasonality) == 0){
theta <- as.matrix(gamma[((d+1):(T+d)),])
thetastar <- as.matrix(gammastar[(d+1):(T+d),])
}else{
if(d == 1){
beta <- t(as.matrix(gamma[(1:d),])) # Splitting the gamma vectors in beta and theta.
theta <- as.matrix(gamma[((d+1):(T+d)),])
betastar <- t(as.matrix(gammastar[(1:d),]))
thetastar <- as.matrix(gammastar[((d+1):(T+d)),])
}else{
beta <- as.matrix(gamma[(1:d),]) # Splitting the gamma vectors in beta and theta.
theta <- as.matrix(gamma[((d+1):(T+d)),])
betastar <- as.matrix(gammastar[(1:d),])
thetastar <- as.matrix(gammastar[((d+1):(T+d)),])
}
}
for(j in 1:m){
if((level+trend+seasonality) == 0){
cc <- which(gn == j) # Identifying the cluster configuration of each group.
aux <- matrix(0,T,T) # Calculating the necessary matrices for the simulation of the distributions for the acceleration step.
aux1 <- matrix(0,T,1)
aux2 <- matrix(0,T,1)
for(i in 1:nstar[j]){
aux <- aux + diag((1/sig2eps[cc[i]]),T)
aux1 <- aux1 + (diag((1/sig2eps[cc[i]]),T) %*% (mydata[,cc[i]] - Z %*% alpha[,cc[i]]))
}
Sthetastar <- chol2inv(chol(aux + chol2inv(chol(R))))
muthetastar <- Sthetastar %*% aux1
theta[,cc] <- mvrnorm(1,muthetastar,Sthetastar)
}else if((level+trend+seasonality) == 3){
cc <- which(gn == j) # Identifying the cluster configuration of each group.
aux <- matrix(0,T,T) # Calculating the necessary matrices for the simulation of the distributions for the acceleration step.
aux1 <- matrix(0,T,1)
aux2 <- matrix(0,T,1)
for(i in 1:nstar[j]){
aux <- aux + diag((1/sig2eps[cc[i]]),T)
aux1 <- aux1 + (diag((1/sig2eps[cc[i]]),T) %*% (mydata[,cc[i]] - X %*% betastar[,j]))
aux2 <- aux2 + (diag((1/sig2eps[cc[i]]),T) %*% (mydata[,cc[i]] - thetastar[,j]))
}
Sthetastar <- chol2inv(chol(aux + chol2inv(chol(R))))
muthetastar <- Sthetastar %*% aux1
Sbetastar <- chol2inv(chol((t(X) %*% aux %*% X) + invsigmabeta))
mubetastar <- Sbetastar %*% t(X) %*% aux2
beta[,cc] <- mvrnorm(1,mubetastar,Sbetastar)
theta[,cc] <- mvrnorm(1,muthetastar,Sthetastar)
}else{
cc <- which(gn == j) # Identifying the cluster configuration of each group.
aux <- matrix(0,T,T) # Calculating the necessary matrices for the simulation of the distributions for the acceleration step.
aux1 <- matrix(0,T,1)
aux2 <- matrix(0,T,1)
for(i in 1:nstar[j]){
aux <- aux + diag((1/sig2eps[cc[i]]),T)
aux1 <- aux1 + (diag((1/sig2eps[cc[i]]),T) %*% (mydata[,cc[i]] - Z %*% alpha[,cc[i]] - X %*% betastar[,j]))
aux2 <- aux2 + (diag((1/sig2eps[cc[i]]),T) %*% (mydata[,cc[i]] - Z %*% alpha[,cc[i]] - thetastar[,j]))
}
Sthetastar <- chol2inv(chol(aux + chol2inv(chol(R))))
muthetastar <- Sthetastar %*% aux1
Sbetastar <- chol2inv(chol((t(X) %*% aux %*% X) + invsigmabeta))
mubetastar <- Sbetastar %*% t(X) %*% aux2
beta[,cc] <- mvrnorm(1,mubetastar,Sbetastar)
theta[,cc] <- mvrnorm(1,muthetastar,Sthetastar)
}
# Computation of similarities matrix and saving the cluster configuration of the current iteration.
if((iter %% thinning) == 0 & iter >= burnin){
for(i1 in 1:nstar[j]){
for(i2 in i1:nstar[j]){
sim[cc[i1],cc[i2]] <- sim[cc[i1],cc[i2]] + 1
sim[cc[i2],cc[i1]] <- sim[cc[i2],cc[i1]] + 1
memory[(cc[i1] + (n*iter1)),cc[i2]] <- memory[(cc[i1] + (n*iter1)),cc[i2]] + 1
memory[(cc[i2] + (n*iter1)),cc[i1]] <- memory[(cc[i2] + (n*iter1)),cc[i1]] + 1
}
}
}
}
if((level+trend+seasonality) == 0){
gamma <- theta # Obtaining all gamma vectors after the acceleration step.
}else{
gamma <- rbind(beta,theta) # Obtaining all gamma vectors after the acceleration step.
}
gr <- comp11(gamma[1,]) # Obtaining all the latent classes in the gamma vectors.
jstar <- gr$jstar
gammastar <- as.matrix(gamma[,jstar]) # Unique values of the gamma vectors.
m <- gr$rstar # Number of groups after acceleration step.
nstar <- gr$nstar # Frequency of each group.
gn <- gr$gn # Identifier of the group to which each latent class belongs.
if((level+trend+seasonality) == 0){
theta <- as.matrix(gamma[((d+1):(T+d)),])
thetastar <- as.matrix(gammastar[((d+1):(T+d)),])
}else{
if(d == 1){
beta <- t(as.matrix(gamma[(1:d),])) # Splitting the gamma vectors in beta and theta.
theta <- as.matrix(gamma[((d+1):(T+d)),])
betastar <- t(as.matrix(gammastar[(1:d),]))
thetastar <- as.matrix(gammastar[((d+1):(T+d)),])
}else{
beta <- as.matrix(gamma[(1:d),]) # Splitting the gamma vectors in beta and theta.
theta <- as.matrix(gamma[((d+1):(T+d)),])
betastar <- as.matrix(gammastar[(1:d),])
thetastar <- as.matrix(gammastar[((d+1):(T+d)),])
}
}
##### 6) SIMULATION OF SIG2THE'S POSTERIOR DISTRIBUTION #####
cholP <- chol(P) # Calculation of the Cholesky factorization of P.
Pinv <- chol2inv(cholP) # Obtaining the inverse of P.
s1 <- 0
# Calculating the sum necessary for the rate parameter of the posterior distribution.
for(j in 1:m){
s1 <- s1 + t(as.matrix(thetastar[,j])) %*% Pinv %*% as.matrix(thetastar[,j])
}
sig2the <- 1/rgamma(1,(m*T/2),(s1/2))
##### 7) SIMULATION OF RHO'S POSTERIOR DISTRIBUTION (Metropolis-Hastings step) #####
rhomh <- runif(1,-1,1) # Sampling from the proposal distribution.
Pmh <- matrix(0,T,T)
# Calculating the matrix P for the proposed value rhomh.
for (j in 1:T){
for (k in 1:T){
Pmh[j,k] <- rhomh^(abs(j-k))
}
}
cholPmh <- chol(Pmh) # Calculating the Cholesky factor of Pmh.
Pmhinv <- chol2inv(cholPmh) # Obtaining the inverse from Pmh
s <- 0
# Calculating the sum necessary for the computation of the acceptance probability.
for(j in 1:m){
s <- s + t(as.matrix(thetastar[,j])) %*% (Pmhinv-Pinv) %*% as.matrix(thetastar[,j])
}
# Computation of the acceptance probability.
q <- (-m/2)*(log(prod(diag(cholPmh)))- log(prod(diag(cholP)))) - ((1/(2*sig2the))*s) + (1/2)*(log(1 + rhomh*rhomh) - log(1 + rho*rho)) - log(1 - rhomh*rhomh) + log(1 - rho*rho)
# Definition of the acceptance probability.
quot <- min(0,q)
# Sampling a uniform random variable in [0,1] to determine if the proposal is accepted or not.
unif1 <- runif(1,0,1)
# Acceptance step.
if(log(unif1) <= quot){
rho <- rhomh
arrho <- arrho + 1
for (j in seq(T)){
for (k in seq(T)){
P[j,k] <- rho^(abs(j-k))
}
}
}
R <- sig2the*P
##### 3) SIMULATION OF SIG2EPS' POSTERIOR DISTRIBUTION #####
if((level+trend+seasonality) == 0){
M <- t(mydata - Z%*%alpha - theta) %*% (mydata - Z%*%alpha - theta)
}else if((level+trend+seasonality) == 3){
M <- t(mydata - X%*%beta - theta) %*% (mydata - X%*%beta - theta)
}else{
M <- t(mydata - Z%*%alpha - X%*%beta - theta) %*% (mydata - Z%*%alpha - X%*%beta - theta)
}
sig2eps <- 1/rgamma(n,(c0eps + T/2),(c1eps + diag(M)/2))
##### 4) SIMULATION OF SIMGAALPHA'S POSTERIOR DISTRIBUTION #####
if((level+trend+seasonality) != 3){
sig2alpha <- 1/rgamma(p,(c0alpha + n/2),(c1alpha + rowSums(alpha^2)))
sigmaalpha <- diag(c(sig2alpha),p,p)
invsigmaalpha <- diag(1/c(sig2alpha),p,p)
}
##### 5) SIMULATION OF SIGMABETA'S POSTERIOR DISTRIBUTION #####
if((level+trend+seasonality) != 0){
sig2beta <- 1/rgamma(d,(c0beta + m/2),(c1beta + colSums(betastar^2)/2))
sigmabeta <- diag(c(sig2beta),d,d)
invsigmabeta <- diag(1/c(sig2beta),d,d)
}
##### 8) SIMULATION OF A'S POSTERIOR DISTRIBUTION (METROPOLIS-HASTINGS WITH UNIFORM PROPOSALS) #####
if(priora == 1){
if (b < 0){
amh <- runif(1,-b,1)
} else{
unif2 <- runif(1,0,1)
if (unif2 <= 0.5){
amh <- 0
} else{
amh <- runif(1,0,1)
}
}
# If b is not greater than -a, then accept the proposal directly.
if ((a+b) <= 0){
a <- amh
} else{
quot1 <- 0
if(m > 1){
for (j in 1:(m-1)){
quot1 <- quot1 + log(b + j*amh) + log(gamma(nstar[j] - amh)) - log(gamma(1 - amh)) - log(b + j*a) - log(gamma(nstar[j] - a)) + log(gamma(1 - a))
}
}
quot1 <- quot1 + log(gamma((nstar[m] - amh))) - log(gamma(1 - amh)) - log(gamma((nstar[m] - a))) + log(gamma(1 - a))
if (a == 0){
fa <- 0.5
} else{
fa <- 0.5*dbeta(a,q0a,q1a)
}
if (amh == 0){
famh <- 0.5
} else{
famh <- 0.5*dbeta(amh,q0a,q1a)
}
# Quotient to evaluate the Metropolis-Hastings step in logs
quot1 <- quot1 + log(famh) - log(fa)
# Determination of the probability for the Metropolis-Hastings step
alphamh1 <- min(quot1,0)
unif3 <- runif(1,0,1)
# Acceptance step
if (log(unif3) <= alphamh1){
a <- amh
ara <- ara + 1
}
}
}
##### 9) SIMULATION OF B'S POSTERIOR DISTRIBUTION (METROPOLIS-HASTINGS WITH GAMMA PROPOSALS) #####
if(priorb == 1){
y1 <- rgamma(1,1,0.1)
bmh <- y1 - a
# If b is not greater than -a, then accept the proposal directly.
if ((a+b) <= 0){
b <- bmh
} else{
quot2 <- 0
if(m > 1){
for (j in 1:(m-1)){
quot2 <- quot2 + log(bmh + j*a) - log(b + j*a)
}
}
fb <- dgamma(a+b,q0b,q1b)
fbmh <- dgamma(y1,q0b,q1b)
# Quotient to evaluate the Metropolis-Hastings step in logs
quot2 <- quot2 + (log(gamma(bmh+1)) - log(gamma(bmh+n)) - log(gamma(b+1)) + log(gamma(b+n))) + (log(fbmh) - log(fb)) - 0.1*(b - bmh)
# Determination of the probability for the Metropolis-Hastings step
alphamh2 <- min(quot2,0)
unif4 <- runif(1,0,1)
# Acceptance step
if (log(unif4) <= alphamh2){
b <- bmh
arb <- arb + 1
}
}
}
if((iter %% thinning) == 0 & iter >= burnin){
iter1 <- iter1 + 1
sig2epssample[iter1,] <- sig2eps
sig2thesample[iter1] <- sig2the
rhosample[iter1] <- rho
asample[iter1] <- a
bsample[iter1] <- b
msample[iter1] <- m
memorygn[iter1,] <- gn
if((level+trend+seasonality) == 0){
sig2alphasample[iter1,] <- sig2alpha
}else if((level+trend+seasonality) == 3){
sig2betasample[iter1,] <- sig2beta
}else{
sig2alphasample[iter1,] <- sig2alpha
sig2betasample[iter1,] <- sig2beta
}
}
if(indlpml != 0){
if((iter %% 10) == 0 & iter >= burnin){
iter2 <- iter2 + 1
for(i in 1:n){
if((level+trend+seasonality) == 0){
for(j in 1:m){
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((nstar[j]-a)/(b+n))*dmvnorm(mydata[,i],(Z %*% alpha[,i] + thetastar[,j]),diag(sig2eps[i],T))
}
sigmaeps <- diag(sig2eps[i],T)
invsigmaeps <- diag((1/sig2eps[i]),T)
Q <- sigmaeps + R
Qinv <- solve(Q)
Winv <- Qinv
W <- Q
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((b+(a*m))/(b+n))*dmvnorm(mydata[,i],(Z %*% alpha[,i]),W)
}else if((level+trend+seasonality) == 3){
for(j in 1:m){
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((nstar[j]-a)/(b+n))*dmvnorm(mydata[,i],(X %*% betastar[,j] + thetastar[,j]),diag(sig2eps[i],T))
}
sigmaeps <- diag(sig2eps[i],T)
invsigmaeps <- diag((1/sig2eps[i]),T)
Q <- sigmaeps + R
Qinv <- solve(Q)
Vinv <- t(X) %*% Qinv %*% X + invsigmabeta
V <- solve(Vinv)
Winv <- Qinv + (Qinv %*% X %*% V %*% t(X) %*% Qinv)
W <- solve(Winv)
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((b+(a*m))/(b+n))*dmvnorm(mydata[,i],(matrix(0,T,1)),W)
}else{
for(j in 1:m){
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((nstar[j]-a)/(b+n))*dmvnorm(mydata[,i],(Z %*% alpha[,i] + X %*% betastar[,j] + thetastar[,j]),diag(sig2eps[i],T))
}
sigmaeps <- diag(sig2eps[i],T)
invsigmaeps <- diag((1/sig2eps[i]),T)
Q <- sigmaeps + R
Qinv <- solve(Q)
Vinv <- t(X) %*% Qinv %*% X + invsigmabeta
V <- solve(Vinv)
Winv <- Qinv + (Qinv %*% X %*% V %*% t(X) %*% Qinv)
W <- solve(Winv)
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((b+(a*m))/(b+n))*dmvnorm(mydata[,i],(Z %*% alpha[,i]),W)
}
}
}
}
iter <- iter + 1
if(iter %% 50 == 0){
cat("Iteration Number: ",iter,". Progress: ",(iter/maxiter)*100,"%","\n")
}
}
##### END OF GIBBS SAMPLING #####
# Calculation of acceptance rates and similarities matrix
arrho <- arrho/iter
ara <- ara/iter
arb <- arb/iter
sim <- sim/iter1
dist <- matrix(0,CL,1)
# Calculating the distance between each cluster configuration to the similarities matrix
for (i in 1:CL){
aux4 <- memory[(((i-1)*n)+1):(i*n),] - sim
dist[i] <- norm(aux4,"F")
}
# Determining which cluster configuration minimizes the distance to the similarities matrix
mstar <- msample[which.min(dist)]
gnstar <- memorygn[which.min(dist),]
##### HM MEASURE CALCULATION #####
HM <- 0
for(j in 1:mstar){
cc <- as.matrix(which(gnstar == j))
HM1 <- 0
if(length(cc) > 1){
for(i1 in 1:length(cc)){
for(i2 in 1:i1){
HM1 <- HM1 + sum((mydata[,cc[i2]] - mydata[,cc[i1]])^2)
}
}
HM <- HM + (2/(length(cc)-1))*HM1
}
}
names <- colnames(mydata)
cat("Number of groups of the chosen cluster configuration: ",mstar,"\n")
for(i in 1:mstar){
cat("Time series in group",i,":",names[which(gnstar == i)],"\n")
}
cat("HM Measure: ",HM,"\n")
if(indlpml != 0){
auxlpml <- 1/auxlpml
cpo <- colMeans(auxlpml)
cpo <- 1/cpo
lpml <- sum(log(cpo))
}
if(indlpml !=0){
if((level+trend+seasonality) == 0){
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,lpml = lpml,
sig2epssample = sig2epssample,sig2alphasample = sig2alphasample,
sig2thesample = sig2thesample,rhosample = rhosample,asample = asample,
bsample = bsample,msample = msample,periods = periods,scale=scale))
}else if((level+trend+seasonality) == 3){
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,lpml = lpml,
sig2epssample = sig2epssample,sig2betasample = sig2betasample,
sig2thesample = sig2thesample,rhosample = rhosample,asample = asample,
bsample = bsample,msample = msample,periods = periods,scale=scale))
}else{
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,lpml = lpml,
sig2epssample = sig2epssample,sig2alphasample = sig2alphasample,
sig2betasample = sig2betasample,sig2thesample = sig2thesample,rhosample = rhosample,
asample = asample,bsample = bsample,msample = msample,periods = periods,scale=scale))
}
}else{
if((level+trend+seasonality) == 0){
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,
sig2epssample = sig2epssample,sig2alphasample = sig2alphasample,
sig2thesample = sig2thesample,rhosample = rhosample,asample = asample,
bsample = bsample,msample = msample,periods = periods,scale=scale))
}else if((level+trend+seasonality) == 3){
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,
sig2epssample = sig2epssample,sig2betasample = sig2betasample,
sig2thesample = sig2thesample,rhosample = rhosample,asample = asample,
bsample = bsample,msample = msample,periods = periods,scale=scale))
}else{
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,
sig2epssample = sig2epssample,sig2alphasample = sig2alphasample,
sig2betasample = sig2betasample,sig2thesample = sig2thesample,rhosample = rhosample,
asample = asample,bsample = bsample,msample = msample,periods = periods,scale=scale))
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BNPTSclust/R/tseriescm.R
|
tseriescq <-
function(data,maxiter=500,burnin=floor(0.1*maxiter),thinning=5,scale=TRUE,
level=FALSE,trend=TRUE,seasonality=TRUE,deg=2,c0eps=2,c1eps=1,
c0beta=2,c1beta=1,c0alpha=2,c1alpha=1,priora=TRUE,pia=0.5,
q0a=1,q1a=1,priorb=TRUE,q0b=1,q1b=1,a=0.25,b=0,indlpml=FALSE){
# Function that performs the time series clustering algorithm
# described in Nieto-Barajas and Contreras-Cristan (2014) "A Bayesian
# Non-Parametric Approach for Time Series Clustering". Bayesian
# Analysis, Vol. 9, No. 1 (2014) pp.147-170". This function is
# designed for quarterly time series data.
#
# IN:
#
# data <- Data frame with the time series information.
# maxiter <- Maximum number of iterations for Gibbs sampling.
# Default value = 1000.
# burnin <- Burn-in period of the Markov Chain generated by Gibbs
# sampling.
# thinning <- Number that indicates how many Gibbs sampling simulations
# should be skipped to form the Markov Chain.
# scale <- Flag that indicates if the time series data should be scaled to the
# [0,1] interval with a linear transformation as proposed by
# Nieto-Barajas and Contreras-Cristan (2014). If TRUE, then the time
# series are scaled to the [0,1] interval.
# level <- Flag that indicates if the level of the time
# series will be considered for clustering. If TRUE, then it
# is taken into account.
# trend <- Flag that indicates if the polinomial trend of
# the model will be considered for clustering.
# If TRUE, then it is taken into account.
# seasonality <- Flag that indicates if the seasonal components
# of the model will be considered for clustering.
# If TRUE, then they are taken into account.
# deg <- Degree of the polinomial tendency of the model.
# Default value = 2.
# c0eps <- Shape parameter of the hyper-prior distribution
# on sig2eps. Default value = 2.
# c1eps <- Rate parameter of the hyper-prior distribution
# on sig2eps. Default value = 1.
# c0beta <- Shape parameter of the hyper-prior distribution
# on sig2beta. Default value = 2.
# c1beta <- Rate parameter of the hyper-prior distribution
# on sig2beta. Default value = 1.
# c0alpha <- Shape parameter of the hyper-prior distribution
# on sig2alpha. Default value = 2.
# c1alpha <- Rate parameter of the hyper-prior distribution
# on sig2alpha. Default value = 1.
# priora <- Flag that indicates if a prior on parameter "a" is
# to be assigned. If TRUE, a prior on "a" is assigned.
# Default value = FALSE.
# pia <- Mixing proportion of the prior distribution on parameter
# "a". Default value = 0.5.
# q0a <- Shape parameter of the continuous part of the prior
# distribution on parameter "a". Default value = 1.
# q1a <- Shape parameter of the continuous part of the prior
# distribution on parameter "a". Default value = 1.
# priorb <- Flag that indicates if a prior on parameter "b" is
# to be assigned. If TRUE, a prior on "b" is assigned.
# Default value = FALSE.
# q0b <- Shape parameter of the prior distribution on parameter
# "b". Default value = 1.
# q1b <- Shape parameter of the prior distribution on parameter
# "b". Default value = 1.
# a <- Initial/fixed value of parameter "a".
# Default value = 0.25.
# b <- Initial/fixed value of parameter "b".
# Default value = 0.
# indlpml <- Flag that indicates if the LPML is to be calculated.
# If TRUE, LPML is calculated. Default value = FALSE.
#
# OUT:
#
# mstar <- Number of groups of the chosen cluster configuration.
# gnstar <- Array that contains the group number to which each time
# series belongs.
# HM <- Heterogeneity Measure of the chosen cluster configuration.
# arrho <- Acceptance rate of the parameter "rho".
# ara <- Acceptance rate of the parameter "a".
# arb <- Acceptance rate of the parameter "b".
# sig2epssample <- Matrix that in its columns contains the sample of each sig2eps_i's posterior distribution after Gibbs sampling.
# sig2alphasample <- Matrix that in its columns contains the sample of each sig2alpha_i's posterior distribution after Gibbs sampling.
# sig2betasample <- Matrix that in its columns contains the sample of each sig2beta_i's posterior distribution after Gibbs sampling.
# sig2thesample <- Vector that contains the sample of sig2the's posterior distribution after Gibbs sampling.
# rhosample <- Vector that contains the sample of rho's posterior distribution after Gibbs sampling.
# asample <- Vector that contains the sample of a's posterior distribution after Gibbs sampling.
# bsample <- Vector that contains the sample of b's posterior distribution after Gibbs sampling.
# msample <- Vector that contains the sample of the number of groups at each Gibbs sampling iteration.
# lpml <- If indlpml = 1, lpml contains the value of the LPML of the
# chosen model.
# scale <- Flag that indicates if the time series data were scaled to the
# [0,1] interval with a linear transformation. This will be taken as an input for the
# plotting functions.
if(level == TRUE){
level <- 1
}else{
level <- 0
}
if(trend == TRUE){
trend <- 1
}else{
trend <- 0
}
if(seasonality == TRUE){
seasonality <- 1
}else{
seasonality <- 0
}
if(priora == TRUE){
priora <- 1
}else{
priora <- 0
}
if(priorb == TRUE){
priorb <- 1
}else{
priorb <- 0
}
if(indlpml == TRUE){
indlpml <- 1
}else{
indlpml <- 0
}
if(deg%%1 != 0 | deg <= 0){
stop("deg must be a positive integer number.")
}
if(maxiter%%1 != 0 | maxiter <= 0){
stop("maxiter must be a positive (large) integer number.")
}
if(burnin%%1 != 0 | burnin < 0){
stop("burnin must be a non-negative integer number.")
}
if(thinning%%1 != 0 | thinning < 0){
stop("thinning must be a non-negative integer number.")
}
if(maxiter <= burnin){
stop("maxiter cannot be less than or equal to burnin.")
}
if(c0eps <= 0 | c1eps <= 0 | c0beta <= 0 | c1beta <= 0 |
c0alpha <= 0 | c1alpha <= 0){
stop("c0eps,c1eps,c0beta,c1beta,c0alpha and c1alpha must be
positive numbers.")
}
if(pia <= 0 | pia >= 1){
stop("The mixing proportion pia must be a number in (0,1).")
}
if(q0a <= 0 | q1a <= 0){
stop("q0a and q1a must be positive numbers.")
}
if(a < 0 | a >= 1){
stop("'a' must be a number in [0,1).")
}
if(q0b <= 0 | q1b <= 0){
stop("q0b and q1b must be positive numbers.")
}
if(b <= -a){
stop("'b' must be greater than '-a'.")
}
data <- scaleandperiods(data,scale)
mydata <- as.matrix(data$mydata) # Matrix with the scaled data.
periods <- data$periods # Array with the data periods.
cts <- data$cts # Variable that indicates if any time series
# were removed from the original data set because they were constant.
##### CONSTRUCTION OF THE DESIGN MATRICES #####
T <- nrow(mydata) # Number of periods of the time series
n <- ncol(mydata) # Number of time series present in the data
DM <- designmatrices(level,trend,seasonality,deg,T,n,fun="tseriescq")
p <- DM$p
d <- DM$d
if((level+trend+seasonality) == 0){
Z <- DM$Z
}else if((level+trend+seasonality) == 3){
X <- DM$X
}else{
Z <- DM$Z
X <- DM$X
}
##### INITIAL VALUES FOR THE PARAMETERS THAT WILL BE PART OF THE GIBBS SAMPLING #####
sig2eps <- matrix(1,n,1) # Vector that has the diagonal entries of the variance-covariance matrix for every epsilon_i.
sig2the <- 1 # Initial value for sig2the.
rho <- 0 # Initial value for rho.
P <- matrix(0,T,T) # Initial matrix P.
for (j in seq(T)){
for (k in seq(T)){
P[j,k] <- rho^(abs(j-k))
}
}
R <- sig2the*P # Initial matrix R.
if((level+trend+seasonality) == 0){
sig2alpha <- matrix(1,p,1) # Vector that has the diagonal entries of the variance-covariance matrix for alpha.
sigmaalpha <- diag(c(sig2alpha),p,p) # Variance-covariance matrix for alpha.
invsigmaalpha <- diag(1/c(sig2alpha),p,p) # Inverse variance-covariance matrix for alpha.
alpha <- matrix(mvrnorm(n,matrix(0,p,1),sigmaalpha),p,n) # alpha is a matrix with a vector value of alpha for every time series in its columns.
theta <- matrix(mvrnorm(n,matrix(0,T,1),R),T,n) # theta is a matrix with a vector value of theta for every time series in its columns.
gamma <- theta # gamma is the union by rows of the beta and theta matrices.
}else if((level+trend+seasonality) == 3){
sig2beta <- matrix(1,d,1) # Vector that has the diagonal entries of the variance-covariance matrix for beta.
sigmabeta <- diag(c(sig2beta),d,d) # Variance-covariance matrix for beta.
invsigmabeta <- diag(1/c(sig2beta),d,d) # Inverse variance-covariance matrix for beta.
beta <- matrix(mvrnorm(n,matrix(0,d,1),sigmabeta),d,n) # beta is a matrix with a vector value of beta for every time series in its columns.
theta <- matrix(mvrnorm(n,matrix(0,T,1),R),T,n) # theta is a matrix with a vector value of theta for every time series in its columns.
gamma <- rbind(beta,theta) # gamma is the union by rows of the beta and theta matrices.
}else{
sig2beta <- matrix(1,d,1) # Vector that has the diagonal entries of the variance-covariance matrix for beta.
sigmabeta <- diag(c(sig2beta),d,d) # Variance-covariance matrix for beta.
invsigmabeta <- diag(1/c(sig2beta),d,d) # Inverse variance-covariance matrix for beta.
sig2alpha <- matrix(1,p,1) # Vector that has the diagonal entries of the variance-covariance matrix for alpha.
sigmaalpha <- diag(c(sig2alpha),p,p) # Variance-covariance matrix for alpha.
invsigmaalpha <- diag(1/c(sig2alpha),p,p) # Inverse variance-covariance matrix for alpha.
alpha <- matrix(mvrnorm(n,matrix(0,p,1),sigmaalpha),p,n) # alpha is a matrix with a vector value of alpha for every time series in its columns.
beta <- matrix(mvrnorm(n,matrix(0,d,1),sigmabeta),d,n) # beta is a matrix with a vector value of beta for every time series in its columns.
theta <- matrix(mvrnorm(n,matrix(0,T,1),R),T,n) # theta is a matrix with a vector value of theta for every time series in its columns.
gamma <- rbind(beta,theta) # gamma is the union by rows of the beta and theta matrices.
}
iter <- 0 # Counter for each Gibbs sampling iteration.
iter1 <- 0 # Counter for the number of iterations saved during the Gibbs sampling.
arrho <- 0 # Variable that will contain the acceptance rate of rho in the Metropolis-Hastings step.
ara <- 0 # Variable that will contain the acceptance rate of a in the Metropolis-Hastings step.
arb <- 0 # Variable that will contain the acceptance rate of b in the Metropolis-Hastings step.
sim <- matrix(0,n,n) # Initialization of the similarities matrix.
if(thinning == 0){
CL <- floor(maxiter-burnin)
}else{
CL <- floor((maxiter-burnin)/thinning)
}
memory <- matrix(0,CL*n,n) # Matrix that will contain the cluster configuration of every iteration that is saved during the Gibbs sampling.
memorygn <- matrix(0,CL,n) # Matrix that will save the group number to which each time series belongs in every iteration saved.
sig2epssample <- matrix(0,CL,n) # Matrix that in its columns will contain the sample of each sig2eps_i's posterior distribution after Gibbs sampling.
sig2thesample <- matrix(0,CL,1) # Vector that will contain the sample of sig2the's posterior distribution after Gibbs sampling.
rhosample <- matrix(0,CL,1) # Vector that will contain the sample of rho's posterior distribution after Gibbs sampling.
asample <- matrix(0,CL,1) # Vector that will contain the sample of a's posterior distribution after Gibbs sampling.
bsample <- matrix(0,CL,1) # Vector that will contain the sample of b's posterior distribution after Gibbs sampling.
msample <- matrix(0,CL,1) # Vector that will contain the sample of the number of groups at each Gibbs sampling iteration.
if((level+trend+seasonality) == 0){
sig2alphasample <- matrix(0,CL,p) # Matrix that in its columns will contain the sample of each sig2alpha_i's posterior distribution after Gibbs sampling.
}else if((level+trend+seasonality) == 3){
sig2betasample <- matrix(0,CL,d) # Matrix that in its columns will contain the sample of each sig2beta_i's posterior distribution after Gibbs sampling.
}else{
sig2alphasample <- matrix(0,CL,p) # Matrix that in its columns will contain the sample of each sig2alpha_i's posterior distribution after Gibbs sampling.
sig2betasample <- matrix(0,CL,d) # Matrix that in its columns will contain the sample of each sig2beta_i's posterior distribution after Gibbs sampling.
}
if(indlpml != 0){
iter2 <- 0
auxlpml <- matrix(0,floor((maxiter-burnin)/10),n)
}
##### BEGINNING OF GIBBS SAMPLING #####
while(iter < maxiter){
##### 1) SIMULATION OF ALPHA'S POSTERIOR DISTRIBUTION #####
if((level+trend+seasonality) != 3){
if((level+trend+seasonality) == 0){
for(i in 1:n){
sigmaeps <- diag(c(sig2eps[i]),T)
Q <- sigmaeps + R
Qinv <- chol2inv(chol(Q))
Winv <- Qinv
W <- Q
Valphainv <- (t(Z) %*% Winv %*% Z) + invsigmaalpha
Valpha <- chol2inv(chol(Valphainv))
mualpha <- Valpha %*% t(Z) %*% Winv %*% mydata[,i]
alpha[,i] <- mvrnorm(1,mualpha,Valpha)
}
}else{
for(i in 1:n){
sigmaeps <- diag(c(sig2eps[i]),T)
Q <- sigmaeps + R
Qinv <- chol2inv(chol(Q))
Vinv <- t(X) %*% Qinv %*% X + invsigmabeta
V <- chol2inv(chol(Vinv))
Winv <- Qinv + (Qinv %*% X %*% V %*% t(X) %*% Qinv)
W <- chol2inv(chol(Winv))
Valphainv <- (t(Z) %*% Winv %*% Z) + invsigmaalpha
Valpha <- chol2inv(chol(Valphainv))
mualpha <- Valpha %*% t(Z) %*% Winv %*% mydata[,i]
alpha[,i] <- mvrnorm(1,mualpha,Valpha)
}
}
}
##### 2) SIMULATION OF GAMMA'S = (BETA,THETA) POSTERIOR DISTRIBUTION #####
for(i in 1:n){
gr <- comp11(gamma[1,-i]) # Only the first entries of gamma[,-i] are compared to determine the cluster configuration
jstar <- gr$jstar # Object that contains the positions of the unique vectors in gamma[,-i]
gmi <- gamma[,-i] # Matrix with all the elements of gamma, except for the i-th element
gammastar <- as.matrix(gmi[,jstar]) # Matrix with the unique vectors in gamma(-i)
mi <- gr$rstar # Number of unique vectors in gamma(-i) (Number of groups)
nstar <- gr$nstar # Frequency of each unique vector in gamma(-i)
if((level+trend+seasonality) == 0){
thetastar <- as.matrix(gammastar[(d+1):(T+d),])
}else{
if(d == 1){
betastar <- t(as.matrix(gammastar[1:d,]))
thetastar <- as.matrix(gammastar[(d+1):(T+d),])
}else{
betastar <- as.matrix(gammastar[1:d,]) # Separation of unique vectors between betastar and thetastar
thetastar <- as.matrix(gammastar[(d+1):(T+d),])
}
}
# Matrices necessary for the following steps
sigmaeps <- sig2eps[i]*diag(1,T)
invsigmaeps <- (1/sig2eps[i])*diag(1,T)
Q <- sigmaeps + R
Qinv <- chol2inv(chol(Q))
if((level+trend+seasonality) == 0){
Winv <- Qinv
W <- Q
}else{
Vinv <- t(X) %*% Qinv %*% X + invsigmabeta
V <- chol2inv(chol(Vinv))
Winv <- Qinv + (Qinv %*% X %*% V %*% t(X) %*% Qinv)
W <- chol2inv(chol(Winv))
}
# Computing weigths for gamma(i)'s posterior distribution
if((level+trend+seasonality) == 0){
dj <- matrix(0,mi,1)
d0 <- (b + a*mi)*dmvnorm(mydata[,i],(Z %*% alpha[,i]),W)
den <- 0
for(j in 1:mi){
dj[j] <- (nstar[j] - a)*dmvnorm(mydata[,i],(Z %*% alpha[,i] + thetastar[,j]),sigmaeps)
}
den <- d0 + sum(dj)
if(den == 0){
d0 <- log(b + a*mi) + dmvnorm(mydata[,i],(Z %*% alpha[,i]),W,log=TRUE)
for(j in 1:mi){
dj[j] <- log(nstar[j] - a) + dmvnorm(mydata[,i],(Z %*% alpha[,i] + thetastar[,j]),sigmaeps,log=TRUE)
}
dj <- rbind(dj,d0)
aa <- min(dj)
q <- (1+(dj-aa)+(dj-aa)^2/2)/sum(1+(dj-aa)+(dj-aa)^2/2)
}else{
q <- dj/den
q <- rbind(q,d0/den)
}
}else if((level+trend+seasonality) == 3){
dj <- matrix(0,mi,1)
d0 <- (b + a*mi)*dmvnorm(mydata[,i],matrix(0,T,1),W)
den <- 0
for(j in 1:mi){
dj[j] <- (nstar[j] - a)*dmvnorm(mydata[,i],(X %*% betastar[,j] + thetastar[,j]),sigmaeps)
}
den <- d0 + sum(dj)
if(den == 0){
d0 <- log(b + a*mi) + dmvnorm(mydata[,i],matrix(0,T,1),W,log=TRUE)
for(j in 1:mi){
dj[j] <- log(nstar[j] - a) + dmvnorm(mydata[,i],(X %*% betastar[,j] + thetastar[,j]),sigmaeps,log=TRUE)
}
dj <- rbind(dj,d0)
aa <- min(dj)
q <- (1+(dj-aa)+(dj-aa)^2/2)/sum(1+(dj-aa)+(dj-aa)^2/2)
}else{
q <- dj/den
q <- rbind(q,d0/den)
}
}else{
dj <- matrix(0,mi,1)
d0 <- (b + a*mi)*dmvnorm(mydata[,i],(Z %*% alpha[,i]),W)
den <- 0
for(j in 1:mi){
dj[j] <- (nstar[j] - a)*dmvnorm(mydata[,i],(Z %*% alpha[,i] + X %*% betastar[,j] + thetastar[,j]),sigmaeps)
}
den <- d0 + sum(dj)
if(den == 0){
d0 <- log(b + a*mi) + dmvnorm(mydata[,i],(Z %*% alpha[,i]),W,log=TRUE)
for(j in 1:mi){
dj[j] <- log(nstar[j] - a) + dmvnorm(mydata[,i],(Z %*% alpha[,i] + X %*% betastar[,j] + thetastar[,j]),sigmaeps,log=TRUE)
}
dj <- rbind(dj,d0)
aa <- min(dj)
q <- (1+(dj-aa)+(dj-aa)^2/2)/sum(1+(dj-aa)+(dj-aa)^2/2)
}else{
q <- dj/den
q <- rbind(q,d0/den)
}
}
# Sampling a number between 1 and (mi+1) to determine what will be the simulated value for gamma(i)
# The probabilities of the sample are based on the weights previously computed
y <- sample((1:(mi+1)), size=1, prob = q)
# If sample returns the value (mi+1), a new vector from g0 will be simulated and assigned to gamma(i)
if (y == mi+1){
if((level+trend+seasonality) == 0){
Sthetai <- chol2inv(chol(invsigmaeps + chol2inv(chol(R))))
muthetai <- Sthetai %*% invsigmaeps %*% (mydata[,i] - (Z %*% alpha[,i]))
theta0 <- matrix(mvrnorm(1,muthetai,Sthetai),T,1)
gamma[,i] <- theta0
}else if((level+trend+seasonality) == 3){
Sthetai <- chol2inv(chol(invsigmaeps + chol2inv(chol(R))))
muthetai <- Sthetai %*% invsigmaeps %*% (mydata[,i] - (X %*% beta[,i]))
mubetai <- V %*% t(X) %*% Qinv %*% mydata[,i]
beta0 <- matrix(mvrnorm(1,mubetai,V),d,1)
theta0 <- matrix(mvrnorm(1,muthetai,Sthetai),T,1)
gamma[,i] <- rbind(beta0,theta0)
}else{
Sthetai <- chol2inv(chol(invsigmaeps + chol2inv(chol(R))))
muthetai <- Sthetai %*% invsigmaeps %*% (mydata[,i] - (Z %*% alpha[,i]) - (X %*% beta[,i]))
mubetai <- V %*% t(X) %*% Qinv %*% (mydata[,i] - (Z %*% alpha[,i]))
beta0 <- matrix(mvrnorm(1,mubetai,V),d,1)
theta0 <- matrix(mvrnorm(1,muthetai,Sthetai),T,1)
gamma[,i] <- rbind(beta0,theta0)
}
} else{
gamma[,i] = gammastar[,y] # Otherwise, column y from gammastar will be assigned to gamma(i)
}
}
##### 2.1) ACCELERATION STEP AND CONSTRUCTION OF SIMILARITIES MATRIX #####
gr <- comp11(gamma[1,]) # Computation of all latent classes of the gamma vectors after the simulation of their posterior distribution.
jstar <- gr$jstar
gammastar <- as.matrix(gamma[,jstar]) # Unique values of the gamma vectors.
m <- gr$rstar # Total number of latent classes (groups).
nstar <- gr$nstar # Frequency of each latent class (group).
gn <- gr$gn # Identifier of the group to which each time series belongs.
if((level+trend+seasonality) == 0){
theta <- as.matrix(gamma[((d+1):(T+d)),])
thetastar <- as.matrix(gammastar[(d+1):(T+d),])
}else{
if(d == 1){
beta <- t(as.matrix(gamma[(1:d),])) # Splitting the gamma vectors in beta and theta.
theta <- as.matrix(gamma[((d+1):(T+d)),])
betastar <- t(as.matrix(gammastar[(1:d),]))
thetastar <- as.matrix(gammastar[((d+1):(T+d)),])
}else{
beta <- as.matrix(gamma[(1:d),]) # Splitting the gamma vectors in beta and theta.
theta <- as.matrix(gamma[((d+1):(T+d)),])
betastar <- as.matrix(gammastar[(1:d),])
thetastar <- as.matrix(gammastar[((d+1):(T+d)),])
}
}
for(j in 1:m){
if((level+trend+seasonality) == 0){
cc <- which(gn == j) # Identifying the cluster configuration of each group.
aux <- matrix(0,T,T) # Calculating the necessary matrices for the simulation of the distributions for the acceleration step.
aux1 <- matrix(0,T,1)
aux2 <- matrix(0,T,1)
for(i in 1:nstar[j]){
aux <- aux + diag((1/sig2eps[cc[i]]),T)
aux1 <- aux1 + (diag((1/sig2eps[cc[i]]),T) %*% (mydata[,cc[i]] - Z %*% alpha[,cc[i]]))
}
Sthetastar <- chol2inv(chol(aux + chol2inv(chol(R))))
muthetastar <- Sthetastar %*% aux1
theta[,cc] <- mvrnorm(1,muthetastar,Sthetastar)
}else if((level+trend+seasonality) == 3){
cc <- which(gn == j) # Identifying the cluster configuration of each group.
aux <- matrix(0,T,T) # Calculating the necessary matrices for the simulation of the distributions for the acceleration step.
aux1 <- matrix(0,T,1)
aux2 <- matrix(0,T,1)
for(i in 1:nstar[j]){
aux <- aux + diag((1/sig2eps[cc[i]]),T)
aux1 <- aux1 + (diag((1/sig2eps[cc[i]]),T) %*% (mydata[,cc[i]] - X %*% betastar[,j]))
aux2 <- aux2 + (diag((1/sig2eps[cc[i]]),T) %*% (mydata[,cc[i]] - thetastar[,j]))
}
Sthetastar <- chol2inv(chol(aux + chol2inv(chol(R))))
muthetastar <- Sthetastar %*% aux1
Sbetastar <- chol2inv(chol((t(X) %*% aux %*% X) + invsigmabeta))
mubetastar <- Sbetastar %*% t(X) %*% aux2
beta[,cc] <- mvrnorm(1,mubetastar,Sbetastar)
theta[,cc] <- mvrnorm(1,muthetastar,Sthetastar)
}else{
cc <- which(gn == j) # Identifying the cluster configuration of each group.
aux <- matrix(0,T,T) # Calculating the necessary matrices for the simulation of the distributions for the acceleration step.
aux1 <- matrix(0,T,1)
aux2 <- matrix(0,T,1)
for(i in 1:nstar[j]){
aux <- aux + diag((1/sig2eps[cc[i]]),T)
aux1 <- aux1 + (diag((1/sig2eps[cc[i]]),T) %*% (mydata[,cc[i]] - Z %*% alpha[,cc[i]] - X %*% betastar[,j]))
aux2 <- aux2 + (diag((1/sig2eps[cc[i]]),T) %*% (mydata[,cc[i]] - Z %*% alpha[,cc[i]] - thetastar[,j]))
}
Sthetastar <- chol2inv(chol(aux + chol2inv(chol(R))))
muthetastar <- Sthetastar %*% aux1
Sbetastar <- chol2inv(chol((t(X) %*% aux %*% X) + invsigmabeta))
mubetastar <- Sbetastar %*% t(X) %*% aux2
beta[,cc] <- mvrnorm(1,mubetastar,Sbetastar)
theta[,cc] <- mvrnorm(1,muthetastar,Sthetastar)
}
# Computation of similarities matrix and saving the cluster configuration of the current iteration.
if((iter %% thinning) == 0 & iter >= burnin){
for(i1 in 1:nstar[j]){
for(i2 in i1:nstar[j]){
sim[cc[i1],cc[i2]] <- sim[cc[i1],cc[i2]] + 1
sim[cc[i2],cc[i1]] <- sim[cc[i2],cc[i1]] + 1
memory[(cc[i1] + (n*iter1)),cc[i2]] <- memory[(cc[i1] + (n*iter1)),cc[i2]] + 1
memory[(cc[i2] + (n*iter1)),cc[i1]] <- memory[(cc[i2] + (n*iter1)),cc[i1]] + 1
}
}
}
}
if((level+trend+seasonality) == 0){
gamma <- theta # Obtaining all gamma vectors after the acceleration step.
}else{
gamma <- rbind(beta,theta) # Obtaining all gamma vectors after the acceleration step.
}
gr <- comp11(gamma[1,]) # Obtaining all the latent classes in the gamma vectors.
jstar <- gr$jstar
gammastar <- as.matrix(gamma[,jstar]) # Unique values of the gamma vectors.
m <- gr$rstar # Number of groups after acceleration step.
nstar <- gr$nstar # Frequency of each group.
gn <- gr$gn # Identifier of the group to which each latent class belongs.
if((level+trend+seasonality) == 0){
theta <- as.matrix(gamma[((d+1):(T+d)),])
thetastar <- as.matrix(gammastar[((d+1):(T+d)),])
}else{
if(d == 1){
beta <- t(as.matrix(gamma[(1:d),])) # Splitting the gamma vectors in beta and theta.
theta <- as.matrix(gamma[((d+1):(T+d)),])
betastar <- t(as.matrix(gammastar[(1:d),]))
thetastar <- as.matrix(gammastar[((d+1):(T+d)),])
}else{
beta <- as.matrix(gamma[(1:d),]) # Splitting the gamma vectors in beta and theta.
theta <- as.matrix(gamma[((d+1):(T+d)),])
betastar <- as.matrix(gammastar[(1:d),])
thetastar <- as.matrix(gammastar[((d+1):(T+d)),])
}
}
##### 6) SIMULATION OF SIG2THE'S POSTERIOR DISTRIBUTION #####
cholP <- chol(P) # Calculation of the Cholesky factorization of P.
Pinv <- chol2inv(cholP) # Obtaining the inverse of P.
s1 <- 0
# Calculating the sum necessary for the rate parameter of the posterior distribution.
for(j in 1:m){
s1 <- s1 + t(as.matrix(thetastar[,j])) %*% Pinv %*% as.matrix(thetastar[,j])
}
sig2the <- 1/rgamma(1,(m*T/2),(s1/2))
##### 7) SIMULATION OF RHO'S POSTERIOR DISTRIBUTION (Metropolis-Hastings step) #####
rhomh <- runif(1,-1,1) # Sampling from the proposal distribution.
Pmh <- matrix(0,T,T)
# Calculating the matrix P for the proposed value rhomh.
for (j in 1:T){
for (k in 1:T){
Pmh[j,k] <- rhomh^(abs(j-k))
}
}
cholPmh <- chol(Pmh) # Calculating the Cholesky factor of Pmh.
Pmhinv <- chol2inv(cholPmh) # Obtaining the inverse from Pmh
s <- 0
# Calculating the sum necessary for the computation of the acceptance probability.
for(j in 1:m){
s <- s + t(as.matrix(thetastar[,j])) %*% (Pmhinv-Pinv) %*% as.matrix(thetastar[,j])
}
# Computation of the acceptance probability.
q <- (-m/2)*(log(prod(diag(cholPmh)))- log(prod(diag(cholP)))) - ((1/(2*sig2the))*s) + (1/2)*(log(1 + rhomh*rhomh) - log(1 + rho*rho)) - log(1 - rhomh*rhomh) + log(1 - rho*rho)
# Definition of the acceptance probability.
quot <- min(0,q)
# Sampling a uniform random variable in [0,1] to determine if the proposal is accepted or not.
unif1 <- runif(1,0,1)
# Acceptance step.
if(log(unif1) <= quot){
rho <- rhomh
arrho <- arrho + 1
for (j in seq(T)){
for (k in seq(T)){
P[j,k] <- rho^(abs(j-k))
}
}
}
R <- sig2the*P
##### 3) SIMULATION OF SIG2EPS' POSTERIOR DISTRIBUTION #####
if((level+trend+seasonality) == 0){
M <- t(mydata - Z%*%alpha - theta) %*% (mydata - Z%*%alpha - theta)
}else if((level+trend+seasonality) == 3){
M <- t(mydata - X%*%beta - theta) %*% (mydata - X%*%beta - theta)
}else{
M <- t(mydata - Z%*%alpha - X%*%beta - theta) %*% (mydata - Z%*%alpha - X%*%beta - theta)
}
sig2eps <- 1/rgamma(n,(c0eps + T/2),(c1eps + diag(M)/2))
##### 4) SIMULATION OF SIMGAALPHA'S POSTERIOR DISTRIBUTION #####
if((level+trend+seasonality) != 3){
sig2alpha <- 1/rgamma(p,(c0alpha + n/2),(c1alpha + rowSums(alpha^2)))
sigmaalpha <- diag(c(sig2alpha),p,p)
invsigmaalpha <- diag(1/c(sig2alpha),p,p)
}
##### 5) SIMULATION OF SIGMABETA'S POSTERIOR DISTRIBUTION #####
if((level+trend+seasonality) != 0){
sig2beta <- 1/rgamma(d,(c0beta + m/2),(c1beta + colSums(betastar^2)/2))
sigmabeta <- diag(c(sig2beta),d,d)
invsigmabeta <- diag(1/c(sig2beta),d,d)
}
##### 8) SIMULATION OF A'S POSTERIOR DISTRIBUTION (METROPOLIS-HASTINGS WITH UNIFORM PROPOSALS) #####
if(priora == 1){
if (b < 0){
amh <- runif(1,-b,1)
} else{
unif2 <- runif(1,0,1)
if (unif2 <= 0.5){
amh <- 0
} else{
amh <- runif(1,0,1)
}
}
# If b is not greater than -a, then accept the proposal directly.
if ((a+b) <= 0){
a <- amh
} else{
quot1 <- 0
if(m > 1){
for (j in 1:(m-1)){
quot1 <- quot1 + log(b + j*amh) + log(gamma(nstar[j] - amh)) - log(gamma(1 - amh)) - log(b + j*a) - log(gamma(nstar[j] - a)) + log(gamma(1 - a))
}
}
quot1 <- quot1 + log(gamma((nstar[m] - amh))) - log(gamma(1 - amh)) - log(gamma((nstar[m] - a))) + log(gamma(1 - a))
if (a == 0){
fa <- 0.5
} else{
fa <- 0.5*dbeta(a,q0a,q1a)
}
if (amh == 0){
famh <- 0.5
} else{
famh <- 0.5*dbeta(amh,q0a,q1a)
}
# Quotient to evaluate the Metropolis-Hastings step in logs
quot1 <- quot1 + log(famh) - log(fa)
# Determination of the probability for the Metropolis-Hastings step
alphamh1 <- min(quot1,0)
unif3 <- runif(1,0,1)
# Acceptance step
if (log(unif3) <= alphamh1){
a <- amh
ara <- ara + 1
}
}
}
##### 9) SIMULATION OF B'S POSTERIOR DISTRIBUTION (METROPOLIS-HASTINGS WITH GAMMA PROPOSALS) #####
if(priorb == 1){
y1 <- rgamma(1,1,0.1)
bmh <- y1 - a
# If b is not greater than -a, then accept the proposal directly.
if ((a+b) <= 0){
b <- bmh
} else{
quot2 <- 0
if(m > 1){
for (j in 1:(m-1)){
quot2 <- quot2 + log(bmh + j*a) - log(b + j*a)
}
}
fb <- dgamma(a+b,q0b,q1b)
fbmh <- dgamma(y1,q0b,q1b)
# Quotient to evaluate the Metropolis-Hastings step in logs
quot2 <- quot2 + (log(gamma(bmh+1)) - log(gamma(bmh+n)) - log(gamma(b+1)) + log(gamma(b+n))) + (log(fbmh) - log(fb))- 0.1*(b - bmh)
# Determination of the probability for the Metropolis-Hastings step
alphamh2 <- min(quot2,0)
unif4 <- runif(1,0,1)
# Acceptance step
if (log(unif4) <= alphamh2){
b <- bmh
arb <- arb + 1
}
}
}
if((iter %% thinning) == 0 & iter >= burnin){
iter1 <- iter1 + 1
sig2epssample[iter1,] <- sig2eps
sig2thesample[iter1] <- sig2the
rhosample[iter1] <- rho
asample[iter1] <- a
bsample[iter1] <- b
msample[iter1] <- m
memorygn[iter1,] <- gn
if((level+trend+seasonality) == 0){
sig2alphasample[iter1,] <- sig2alpha
}else if((level+trend+seasonality) == 3){
sig2betasample[iter1,] <- sig2beta
}else{
sig2alphasample[iter1,] <- sig2alpha
sig2betasample[iter1,] <- sig2beta
}
}
if(indlpml != 0){
if((iter %% 10) == 0 & iter >= burnin){
iter2 <- iter2 + 1
for(i in 1:n){
if((level+trend+seasonality) == 0){
for(j in 1:m){
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((nstar[j]-a)/(b+n))*dmvnorm(mydata[,i],(Z %*% alpha[,i] + thetastar[,j]),diag(sig2eps[i],T))
}
sigmaeps <- diag(sig2eps[i],T)
invsigmaeps <- diag((1/sig2eps[i]),T)
Q <- sigmaeps + R
Qinv <- solve(Q)
Winv <- Qinv
W <- Q
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((b+(a*m))/(b+n))*dmvnorm(mydata[,i],(Z %*% alpha[,i]),W)
}else if((level+trend+seasonality) == 3){
for(j in 1:m){
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((nstar[j]-a)/(b+n))*dmvnorm(mydata[,i],(X %*% betastar[,j] + thetastar[,j]),diag(sig2eps[i],T))
}
sigmaeps <- diag(sig2eps[i],T)
invsigmaeps <- diag((1/sig2eps[i]),T)
Q <- sigmaeps + R
Qinv <- solve(Q)
Vinv <- t(X) %*% Qinv %*% X + invsigmabeta
V <- solve(Vinv)
Winv <- Qinv + (Qinv %*% X %*% V %*% t(X) %*% Qinv)
W <- solve(Winv)
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((b+(a*m))/(b+n))*dmvnorm(mydata[,i],(matrix(0,T,1)),W)
}else{
for(j in 1:m){
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((nstar[j]-a)/(b+n))*dmvnorm(mydata[,i],(Z %*% alpha[,i] + X %*% betastar[,j] + thetastar[,j]),diag(sig2eps[i],T))
}
sigmaeps <- diag(sig2eps[i],T)
invsigmaeps <- diag((1/sig2eps[i]),T)
Q <- sigmaeps + R
Qinv <- solve(Q)
Vinv <- t(X) %*% Qinv %*% X + invsigmabeta
V <- solve(Vinv)
Winv <- Qinv + (Qinv %*% X %*% V %*% t(X) %*% Qinv)
W <- solve(Winv)
auxlpml[iter2,i] <- auxlpml[iter2,i] + ((b+(a*m))/(b+n))*dmvnorm(mydata[,i],(Z %*% alpha[,i]),W)
}
}
}
}
iter <- iter + 1
if(iter %% 50 == 0){
cat("Iteration Number: ",iter,". Progress: ",(iter/maxiter)*100,"%","\n")
}
}
##### END OF GIBBS SAMPLING #####
# Calculation of acceptance rates and similarities matrix
arrho <- arrho/iter
ara <- ara/iter
arb <- arb/iter
sim <- sim/iter1
dist <- matrix(0,CL,1)
# Calculating the distance between each cluster configuration to the similarities matrix
for (i in 1:CL){
aux4 <- memory[(((i-1)*n)+1):(i*n),] - sim
dist[i] <- norm(aux4,"F")
}
# Determining which cluster configuration minimizes the distance to the similarities matrix
mstar <- msample[which.min(dist)]
gnstar <- memorygn[which.min(dist),]
##### HM MEASURE CALCULATION #####
HM <- 0
for(j in 1:mstar){
cc <- as.matrix(which(gnstar == j))
HM1 <- 0
if(length(cc) > 1){
for(i1 in 1:length(cc)){
for(i2 in 1:i1){
HM1 <- HM1 + sum((mydata[,cc[i2]] - mydata[,cc[i1]])^2)
}
}
HM <- HM + (2/(length(cc)-1))*HM1
}
}
names <- colnames(mydata)
cat("Number of groups of the chosen cluster configuration: ",mstar,"\n")
for(i in 1:mstar){
cat("Time series in group",i,":",names[which(gnstar == i)],"\n")
}
cat("HM Measure: ",HM,"\n")
if(indlpml != 0){
auxlpml <- 1/auxlpml
cpo <- colMeans(auxlpml)
cpo <- 1/cpo
lpml <- sum(log(cpo))
}
if(indlpml !=0){
if((level+trend+seasonality) == 0){
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,lpml = lpml,
sig2epssample = sig2epssample,sig2alphasample = sig2alphasample,
sig2thesample = sig2thesample,rhosample = rhosample,asample = asample,
bsample = bsample,msample = msample,periods = periods,scale=scale))
}else if((level+trend+seasonality) == 3){
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,lpml = lpml,
sig2epssample = sig2epssample,sig2betasample = sig2betasample,
sig2thesample = sig2thesample,rhosample = rhosample,asample = asample,
bsample = bsample,msample = msample,periods = periods,scale=scale))
}else{
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,lpml = lpml,
sig2epssample = sig2epssample,sig2alphasample = sig2alphasample,
sig2betasample = sig2betasample,sig2thesample = sig2thesample,rhosample = rhosample,
asample = asample,bsample = bsample,msample = msample,periods = periods,scale=scale))
}
}else{
if((level+trend+seasonality) == 0){
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,
sig2epssample = sig2epssample,sig2alphasample = sig2alphasample,
sig2thesample = sig2thesample,rhosample = rhosample,asample = asample,
bsample = bsample,msample = msample,periods = periods,scale=scale))
}else if((level+trend+seasonality) == 3){
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,
sig2epssample = sig2epssample,sig2betasample = sig2betasample,
sig2thesample = sig2thesample,rhosample = rhosample,asample = asample,
bsample = bsample,msample = msample,periods = periods,scale=scale))
}else{
return(list(mstar = mstar,gnstar = gnstar,HM = HM,arrho = arrho,ara = ara,arb = arb,
sig2epssample = sig2epssample,sig2alphasample = sig2alphasample,
sig2betasample = sig2betasample,sig2thesample = sig2thesample,rhosample = rhosample,
asample = asample,bsample = bsample,msample = msample,periods = periods,scale=scale))
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BNPTSclust/R/tseriescq.R
|
#' Acidity Index Dataset
#'
#' Concerns an acidity index measured in a sample of 155 lakes in north-central
#' Wisconsin.
#'
#'
#' @name acidity
#' @docType data
#' @format A real vector with 155 observations.
#' @references Crawford, S. L., DeGroot, M. H., Kadane, J. B. and Small, M. J.
#' (1992). Modeling lake chemistry distributions: approximate Bayesian methods
#' for estimating a finite mixture model. Technometrics, 34, 441-453.
#' @keywords datasets
#' @examples
#'
#' data(acidity)
#' hist(acidity)
NULL
#' Bayesian nonparametric density estimation
#'
#' This package performs Bayesian nonparametric density estimation for exact
#' and censored data via a normalized random measure mixture model. The package
#' allows the user to specify the mixture kernel, the mixing normalized measure
#' and the choice of performing fully nonparametric mixtures on locations and
#' scales, or semiparametric mixtures on locations only with common scale
#' parameter. Options for the kernels are: two kernels with support in the real
#' line (gaussian and double exponential), two more kernels in the positive
#' line (gamma and lognormal) and one with bounded support (beta). The options
#' for the normalized random measures are members of the class of normalized
#' generalized gamma, which include the Dirichlet process, the normalized
#' inverse gaussian process and the normalized stable process. The type of
#' censored data handled by the package is right, left and interval.
#'
#' \tabular{ll}{ Package: \tab BNPdensity\cr Type: \tab Package\cr Version:
#' \tab 2016.10\cr Date: \tab 2016-10-14\cr License: \tab GPL version 2 or
#' later\cr LazyLoad: \tab yes\cr } The package includes four main functions:
#' MixNRMI1, MixNRMI2, MixNRMI1cens and MixNRMI2cens which implement
#' semiparametric and fully nonparametric mixtures for exact data, and
#' semiparametric and fully nonparametric mixtures for censored data
#' respectively. Additionally, the package includes several other functions
#' required for sampling from conditional distributions in the MCMC
#' implementation. These functions are intended for internal use only.
#'
#' @name BNPdensity-package
#' @aliases BNPdensity-package BNPdensity
#' @docType package
#' @author Barrios, E., Lijoi, A., Nieto-Barajas, L. E. and Prünster, I.;
#' Contributor: Guillaume Kon Kam King.; Maintainer: Ernesto Barrios <ebarrios
#' at itam.mx>
#' @seealso \code{\link{MixNRMI1}}, \code{\link{MixNRMI2}},
#' \code{\link{MixNRMI1cens}}, \code{\link{MixNRMI2cens}}
#' @references Barrios, E., Lijoi, A., Nieto-Barajas, L. E. and Prünster, I.
#' (2013). Modeling with Normalized Random Measure Mixture Models. Statistical
#' Science. Vol. 28, No. 3, 313-334.
#'
#' Kon Kam King, G., Arbel, J. and Prünster, I. (2016). Species Sensitivity
#' Distribution revisited: a Bayesian nonparametric approach. In preparation.
#' @keywords package
#' @examples
#'
#' example(MixNRMI1)
#' example(MixNRMI2)
#' example(MixNRMI1cens)
#' example(MixNRMI2cens)
NULL
#' Fit of MixNRMI1 function to the enzyme dataset
#'
#' This object contains the output when setting set.seed(150520) and running
#' the function Enzyme1.out <- MixNRMI1(enzyme, Alpha = 1, Kappa = 0.007, Gama = 0.5, distr.k = "gamma", distr.p0 = "gamma", asigma = 1, bsigma = 1, Meps = 0.005, Nit = 5000, Pbi = 0.2)
#'
#' See function MixNRMI1
#'
#' @name Enzyme1.out
#' @docType data
#' @keywords datasets
#' @examples
#'
#' data(Enzyme1.out)
NULL
#' Fit of MixNRMI2 function to the enzyme dataset
#'
#' This object contains the output when setting set.seed(150520) and running
#' the function Enzyme2.out <- MixNRMI2(enzyme, Alpha = 1, Kappa = 0.007, Gama = 0.5, distr.k = "gamma", distr.py0 = "gamma", distr.pz0 = "gamma", mu.pz0 = 1, sigma.pz0 = 1, Meps = 0.005, Nit = 5000, Pbi = 0.2)
#' See function MixNRMI2
#'
#' @name Enzyme2.out
#' @docType data
#' @keywords datasets
#' @examples
#'
#' data(Enzyme2.out)
NULL
#' Enzyme Dataset
#'
#' Concerns the distribution of enzymatic activity in the blood, for an enzyme
#' involved in the metabolism of carcinogenetic substances, among a group of
#' 245 unrelated individuals.
#'
#'
#' @name enzyme
#' @docType data
#' @format A data frame with 244 observations on the following variable:
#' \describe{ \item{list("enzyme")}{A numeric vector.} }
#' @references Bechtel, Y. C., Bonaiti-Pellie, C., Poisson, N., Magnette, J.
#' and Bechtel, P.R. (1993). A population and family study of
#' N-acetyltransferase using caffeine urinary metabolites. Clin. Pharm. Therp.,
#' 54, 134-141.
#' @keywords datasets
#' @examples
#'
#' data(enzyme)
#' hist(enzyme)
NULL
#' Fit of MixNRMI1 function to the galaxy dataset
#'
#' This object contains the output when setting set.seed(150520) and running
#' the function MixNRMI1(galaxy, Alpha = 1, Kappa = 0.015, Gama = 0.5, distr.k = "normal", distr.p0 = "gamma", asigma = 1, bsigma = 1, delta = 7, Meps = 0.005, Nit = 5000, Pbi = 0.2)
#'
#' See function MixNRMI1.
#'
#' @name Galaxy1.out
#' @docType data
#' @keywords datasets
#' @examples
#'
#' data(Galaxy1.out)
NULL
#' Fit of MixNRMI2 function to the galaxy dataset
#'
#' This object contains the output when setting set.seed(150520) and running
#' the function Enzyme2.out <- MixNRMI2(x, Alpha = 1, Kappa = 0.007, Gama = 0.5, distr.k = "gamma", distr.py0 = "gamma", distr.pz0 = "gamma", mu.pz0 = 1, sigma.pz0 = 1, Meps = 0.005, Nit = 5000, Pbi = 0.2)
#'
#' See function MixNRMI2.
#'
#' @name Galaxy2.out
#' @docType data
#' @keywords datasets
#' @examples
#'
#' data(Galaxy2.out)
NULL
#' Galaxy Data Set
#'
#' Velocities of 82 galaxies diverging from our own galaxy.
#'
#'
#' @name galaxy
#' @docType data
#' @format A data frame with 82 observations on the following variable:
#' \describe{ \item{list("velocity")}{A numeric vector.} }
#' @references Roeder, K. (1990) "Density estimation with confidence sets
#' exemplified by superclusters and voids in the galaxies". Journal of the
#' American Statistical Association. 85, 617-624.
#' @keywords datasets
#' @examples
#'
#' data(galaxy)
#' hist(galaxy)
NULL
#' Salinity tolerance
#'
#' 72-hour acute salinity tolerance (LC50 values) of riverine
#' macro-invertebrates.
#'
#'
#' @name salinity
#' @docType data
#' @format A data frame with 108 observations on the following two variables:
#' \describe{
#' \item{left}{A numeric vector.}
#' \item{right}{A
#' numeric vector.} }
#' @references Kefford, B.J., Nugegoda, D., Metzeling, L., Fields, E. 2006.
#' Validating species sensitivity distributions using salinity tolerance of
#' riverine macroinvertebrates in the southern Murray-darling Basin (Victoria,
#' Australia). Canadian Journal of Fisheries and Aquatic Science, 63,
#' 1865-1877.
#' @source \code{fitdistrplus} R-package
#' @keywords datasets
#' @examples
#'
#' data(salinity)
#' hist(salinity$left)
NULL
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/BNPdensity-package.R
|
#' @import stats
#' @import ggplot2
#' @import compiler
get_CDF_full_BNPdensity <- function(fit, xs = seq(-5, 5, length.out = 100)) {
pmix_vec_loop(qs = xs, locations_list = fit$means, scales_list = fit$sigmas, weights_list = fit$weights, distr.k = fit$distr.k)
}
get_PDF_full_BNPdensity <- function(fit, xs = seq(-5, 5, length.out = 100)) {
dmix_vec_loop(xs = xs, locations_list = fit$means, scales_list = fit$sigmas, weights_list = fit$weights, distr.k = fit$distr.k)
}
get_quantiles_full_BNPdensity <- function(fit, ps = seq(-5, 5, length.out = 100), thinning_to = 500) {
it_retained <- compute_thinning_grid(length(fit$means), thinning_to = thinning_to)
qmix_vec_loop(
ps = ps,
locations_list = fit$means[it_retained],
scales_list = fit$sigmas[it_retained],
weights_list = fit$weights[it_retained],
distr.k = fit$distr.k
)
}
get_CDF_semi_BNPdensity <- function(fit, xs = seq(-5, 5, length.out = 100)) {
fit$sigmas_filled <- fill_sigmas(fit)
pmix_vec_loop(qs = xs, locations_list = fit$means, scales_list = fit$sigmas_filled, weights_list = fit$weights, distr.k = fit$distr.k)
}
get_PDF_semi_BNPdensity <- function(fit, xs = seq(-5, 5, length.out = 100)) {
fit$sigmas_filled <- fill_sigmas(fit)
dmix_vec_loop(xs = xs, locations_list = fit$means, scales_list = fit$sigmas_filled, weights_list = fit$weights, distr.k = fit$distr.k)
}
get_quantiles_semi_BNPdensity <- function(fit, ps = seq(-5, 5, length.out = 100), thinning_to = 500) {
fit$sigmas_filled <- fill_sigmas(fit)
it_retained <- compute_thinning_grid(length(fit$means), thinning_to = thinning_to)
qmix_vec_loop(
ps = ps,
locations_list = fit$means[it_retained],
scales_list = fit$sigmas[it_retained],
weights_list = fit$weights[it_retained],
distr.k = fit$distr.k
)
}
#' Plot the empirical and fitted CDF for non censored data.
#'
#'
#' @param fit The result of the fit, obtained through the function MixNRMI1 or
#' MixNRMI2.
#' @return Plot of the empirical and fitted CDF for non censored data.
#' @examples
#'
#' set.seed(150520)
#' data(acidity)
#' out <- MixNRMI1(acidity, extras = TRUE, Nit = 10)
#' BNPdensity:::plotCDF_noncensored(out)
plotCDF_noncensored <- function(fit) {
data <- fit$data
grid <- grid_from_data(data)
if (is_semiparametric(fit)) {
cdf <- get_CDF_semi_BNPdensity(fit = fit, xs = grid)
}
else {
cdf <- get_CDF_full_BNPdensity(fit = fit, xs = grid)
}
ggplot2::ggplot(data = data.frame(data = grid, CDF = cdf), aes_string(x = "data", y = "CDF")) +
geom_line(color = "red") +
theme_classic() +
stat_ecdf(data = data.frame(data), aes(y = NULL), geom = "step") +
xlab("Data")
}
#' Plot the Turnbull CDF and fitted CDF for censored data.
#'
#'
#' @param fit The result of the fit, obtained through the function MixNRMI1cens
#' or MixNRMI2cens.
#' @return Plot of the empirical and fitted CDF for non censored data.
#' @examples
#'
#' set.seed(150520)
#' data(salinity)
#' out <- MixNRMI1cens(salinity$left, salinity$right, extras = TRUE, Nit = 100)
#' BNPdensity:::plotCDF_censored(out)
plotCDF_censored <- function(fit) {
data <- fit$data
grid <- grid_from_data(data)
Survival_object <- survival::survfit(formula = survival::Surv(data$left, data$right, type = "interval2") ~ 1)
if (is_semiparametric(fit)) {
cdf <- get_CDF_semi_BNPdensity(fit = fit, xs = grid)
}
else {
cdf <- get_CDF_full_BNPdensity(fit = fit, xs = grid)
}
ggplot2::ggplot(data = data.frame(data = grid, CDF = cdf), aes_string(x = "data", y = "CDF")) +
geom_line(color = "red") +
theme_classic() +
geom_step(data = data.frame(x = c(Survival_object$time, max(grid)), y = c(1 - Survival_object$surv, 1)), aes_string(x = "x", y = "y")) +
xlab("Data")
}
#' Plot the density and a histogram for non censored data.
#'
#' @param fit The result of the fit, obtained through the function MixNRMI1 or
#' MixNRMI2.
#' @return Plot of the density and a histogram for non censored data.
#' @examples
#'
#' set.seed(150520)
#' data(acidity)
#' out <- MixNRMI1(acidity, extras = TRUE, Nit = 100)
#' BNPdensity:::plotPDF_noncensored(out)
plotPDF_noncensored <- function(fit) {
p <- plotPDF_censored(fit)
p$layers <- c(geom_histogram(data = data.frame(data = fit$data), aes_string(y = "..density..")), p$layers)
return(p)
}
#' Plot the density for censored data.
#'
#'
#' @param fit The result of the fit, obtained through the function MixNRMI1cens
#' or MixNRMI2cens.
#' @return Plot of the density and a histogram for non censored data.
#' @examples
#'
#' set.seed(150520)
#' data(salinity)
#' out <- MixNRMI1cens(xleft = salinity$left, xright = salinity$right, extras = TRUE, Nit = 100)
#' BNPdensity:::plotPDF_censored(out)
plotPDF_censored <- function(fit) {
grid <- grid_from_data(fit$data)
if (is_semiparametric(fit)) {
pdf <- get_PDF_semi_BNPdensity(fit = fit, xs = grid)
}
else {
pdf <- get_PDF_full_BNPdensity(fit = fit, xs = grid)
}
ggplot2::ggplot(data = data.frame(data = grid, PDF = pdf), aes_string(x = "data", y = "PDF")) +
geom_line(color = "red") +
theme_classic() +
xlab("Data")
}
#' Plot the percentile-percentile graph for non censored data.
#'
#'
#' @param fit The result of the fit, obtained through the function MixNRMI1 or
#' MixNRMI2.
#' @return Percentile-percentile plot for non censored data.
#' @examples
#'
#' set.seed(150520)
#' data(acidity)
#' out <- MixNRMI1(acidity, extras = TRUE, Nit = 100)
#' BNPdensity:::pp_plot_noncensored(out)
pp_plot_noncensored <- function(fit) {
data <- fit$data
if (is_semiparametric(fit)) {
cdf <- get_CDF_semi_BNPdensity(fit = fit, xs = data)
}
else {
cdf <- get_CDF_full_BNPdensity(fit = fit, xs = data)
}
ggplot2::ggplot(data = data.frame(x = cdf, y = ecdf(data)(data)), aes_string(x = "x", y = "y")) +
geom_point() +
geom_abline(slope = 1, intercept = 0, color = "red") +
theme_classic() +
xlab("Theoretical percentiles") +
ylab("Empirical percentiles")
}
#' Plot the quantile-quantile graph for non censored data.
#'
#' This function may be rather slow for many iterations/many data because it
#' relies on numerical inversion of the mixture Cumulative Distribution
#' Function.
#'
#' @param fit The result of the fit, obtained through the function MixNRMI1 or
#' MixNRMI2, MixMRMI1cens or MixMRMI2cens
#' @param thinning_to How many iterations to compute the mean posterior
#' quantiles
#' @return quantile-quantile plot for non censored data.
#' @examples
#'
#'
#' ### Not run
#' # set.seed(150520)
#' # data(acidity)
#' # out <- MixNRMI1(acidity, extras = TRUE, Nit = 100)
#' # BNPdensity:::qq_plot_noncensored(out)
qq_plot_noncensored <- function(fit, thinning_to = 500) {
data <- sort(fit$data)
ndat <- length(data)
percentiles_to_compute <- 1:ndat / (ndat + 1)
if (is_semiparametric(fit)) {
theoretical_quantiles <- get_quantiles_semi_BNPdensity(fit = fit, ps = percentiles_to_compute, thinning_to = thinning_to)
}
else {
theoretical_quantiles <- get_quantiles_full_BNPdensity(fit = fit, ps = percentiles_to_compute, thinning_to = thinning_to)
}
ggplot2::ggplot(data = data.frame(x = theoretical_quantiles, y = data), aes_string(x = "x", y = "y")) +
geom_point() +
geom_abline(slope = 1, intercept = 0, color = "red") +
theme_classic() +
xlab("Theoretical quantiles") +
ylab("Empirical quantiles")
}
#' Plot the percentile-percentile graph for non censored data, using the
#' Turnbull estimator the position of the percentiles.
#'
#'
#' @param fit The result of the fit, obtained through the function MixNRMI1cens
#' or MixNRMI2cens.
#' @return Percentile-percentile graph using the Turnbull estimator
#' @examples
#'
#' set.seed(150520)
#' data(salinity)
#' out <- MixNRMI1cens(xleft = salinity$left, xright = salinity$right, extras = TRUE, Nit = 100)
#' BNPdensity:::pp_plot_censored(out)
pp_plot_censored <- function(fit) {
Survival_object <- survival::survfit(formula = survival::Surv(fit$data$left, fit$data$right, type = "interval2") ~ 1)
estimated_data <- Survival_object$time
if (is_semiparametric(fit)) {
cdf <- get_CDF_semi_BNPdensity(fit = fit, xs = estimated_data)
}
else {
cdf <- get_CDF_full_BNPdensity(fit = fit, xs = estimated_data)
}
ggplot2::ggplot(data = data.frame(x = cdf, y = 1 - Survival_object$surv), aes_string(x = "x", y = "y")) +
geom_point() +
geom_abline(slope = 1, intercept = 0, color = "red") +
theme_classic() +
xlab("Theoretical percentiles") +
ylab("Empirical percentiles (Turnbull)")
}
# min_greater_than_0 = function(x) min(x[x>0])
which_min_greater_than_0 <- function(x) which.min(ifelse(test = x < 0, yes = Inf, no = x))
compute_quantiles_from_Turnbull_estimate <- function(Survival_object) {
cdf <- 1 - Survival_object$surv
grid <- Survival_object$time
ndat <- length(grid)
percentiles_to_compute <- 1:ndat / (ndat + 1)
return(sapply(percentiles_to_compute, function(p) grid[which_min_greater_than_0(cdf - p)]))
}
#' Plot the quantile-quantile graph for censored data.
#'
#' This function may be rather slow for many iterations/many data because it
#' relies on numerical inversion of the mixture Cumulative Distribution
#' Function. set.seed(150520) data(salinity) out <- MixNRMI1cens(xleft =
#' salinity$left, xright = salinity$right, extras = TRUE, Nit = 100)
#' BNPdensity:::qq_plot_censored(out)
#'
#' @param fit The result of the fit, obtained through the function MixNRMI1 or
#' MixNRMI2, MixMRMI1cens or MixMRMI2cens
#' @param thinning_to How many iterations to compute the mean posterior
#' quantiles
#' @return quantile-quantile plot for non censored data.
qq_plot_censored <- function(fit, thinning_to = 500) {
# Survival_object <- survival::survfit(formula = survival::Surv(fit$data$left, fit$data$right, type = "interval2") ~ 1)
# estimated_data <- sort(Survival_object$time)
Turnbull_quantiles <- compute_quantiles_from_Turnbull_estimate(survival::survfit(formula = survival::Surv(fit$data$left, fit$data$right, type = "interval2") ~ 1))
ndat <- length(Turnbull_quantiles)
percentiles_to_compute <- 1:ndat / (ndat + 1)
if (is_semiparametric(fit)) {
theoretical_quantiles <- get_quantiles_semi_BNPdensity(fit = fit, ps = percentiles_to_compute, thinning_to = thinning_to)
}
else {
theoretical_quantiles <- get_quantiles_full_BNPdensity(fit = fit, ps = percentiles_to_compute, thinning_to = thinning_to)
}
ggplot2::ggplot(data = data.frame(x = theoretical_quantiles, y = Turnbull_quantiles), aes_string(x = "x", y = "y")) +
geom_point() +
geom_abline(slope = 1, intercept = 0, color = "red") +
theme_classic() +
xlab("Theoretical quantiles") +
ylab("Empirical quantiles (Turnbull)")
}
#' Plot Goodness of fits graphical checks for non censored data
#'
#'
#' @param fit The result of the fit, obtained through the function MixNRMI1 or
#' MixNRMI2, MixMRMI1cens or MixMRMI2cens
#' @param qq_plot Whether to compute the QQ-plot
#' @param thinning_to How many iterations to compute the mean posterior
#' quantiles
#' @return A density plot with histogram, a cumulative density plot with the
#' empirical cumulative distribution, and a percentile-percentile plot.
#' @examples
#'
#' set.seed(150520)
#' data(acidity)
#' out <- MixNRMI1(acidity, extras = TRUE, Nit = 100)
#' BNPdensity:::GOFplots_noncensored(out)
GOFplots_noncensored <- function(fit, qq_plot = FALSE, thinning_to = 500) {
CDFplot <- plotCDF_noncensored(fit)
PDFplot <- plotPDF_noncensored(fit)
pplot <- pp_plot_noncensored(fit)
if (qq_plot) {
qqplot <- qq_plot_noncensored(fit, thinning_to = thinning_to)
gridExtra::grid.arrange(PDFplot, CDFplot, pplot, qqplot)
}
else {
gridExtra::grid.arrange(PDFplot, CDFplot, pplot)
}
}
#' Plot Goodness of fits graphical checks for censored data
#'
#'
#' @param fit The result of the fit, obtained through the function MixNRMI1 or
#' MixNRMI2, MixMRMI1cens or MixMRMI2cens
#' @param qq_plot Whether to compute the QQ-plot
#' @param thinning_to How many iterations to compute the mean posterior
#' quantiles
#' @return A density plot, a cumulative density plot with the Turnbull
#' cumulative distribution, and a percentile-percentile plot.
#' @examples
#'
#' set.seed(150520)
#' data(salinty)
#' out <- MixNRMI1cens(salinity$left, salinity$right, extras = TRUE, Nit = 100)
#' BNPdensity:::GOFplots_censored(out)
GOFplots_censored <- function(fit, qq_plot = FALSE, thinning_to = 500) {
CDFplot <- plotCDF_censored(fit)
PDFplot <- plotPDF_censored(fit)
pplot <- pp_plot_censored(fit)
if (qq_plot) {
qqplot <- qq_plot_censored(fit, thinning_to = thinning_to)
gridExtra::grid.arrange(PDFplot, CDFplot, pplot, qqplot)
}
else {
gridExtra::grid.arrange(PDFplot, CDFplot, pplot)
}
}
#' Plot Goodness of fits graphical checks for censored data
#'
#'
#' @param fit The result of the fit, obtained through the function MixNRMI1 or
#' MixNRMI2, MixMRMI1cens or MixMRMI2cens
#' @param qq_plot Whether to compute the QQ-plot
#' @param thinning_to How many iterations to compute the mean posterior
#' quantiles
#' @return A density plot, a cumulative density plot with the Turnbull
#' cumulative distribution, a percentile-percentile plot, and potentially a
#' quantile-quantile plot.
#' @examples
#'
#' set.seed(150520)
#' data(salinity)
#' out <- MixNRMI1cens(salinity$left, salinity$right, extras = TRUE, Nit = 100)
#' GOFplots(out)
#' @export GOFplots
GOFplots <- function(fit, qq_plot = FALSE, thinning_to = 500) {
if (is_censored(fit$data)) {
GOFplots_censored(fit, qq_plot = qq_plot, thinning_to = 500)
}
else {
GOFplots_noncensored(fit, qq_plot = qq_plot, thinning_to = 500)
}
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/GOFplots.R
|
#' Normalized Random Measures Mixture of Type I
#'
#' Bayesian nonparametric estimation based on normalized measures driven
#' mixtures for locations.
#'
#' This generic function fits a normalized random measure (NRMI) mixture model
#' for density estimation (James et al. 2009). Specifically, the model assumes
#' a normalized generalized gamma (NGG) prior for the locations (means) of the
#' mixture kernel and a parametric prior for the common smoothing parameter
#' sigma, leading to a semiparametric mixture model.
#'
#' The details of the model are: \deqn{X_i|Y_i,\sigma \sim k(\cdot
#' |Y_i,\sigma)}{X_i|Y_i,sigma ~ k(.|Y_i,sigma)} \deqn{Y_i|P \sim P,\quad
#' i=1,\dots,n}{Y_i|P ~ P, i=1,...,n} \deqn{P \sim \textrm{NGG(\texttt{Alpha,
#' Kappa, Gama; P\_0})}}{P ~ NGG(Alpha, Kappa, Gama; P_0)} \deqn{\sigma \sim
#' \textrm{Gamma(asigma, bsigma)}}{sigma ~ Gamma(asigma, bsigma)} where
#' \eqn{X_i}'s are the observed data, \eqn{Y_i}'s are latent (location)
#' variables, \code{sigma} is the smoothing parameter, \code{k} is a parametric
#' kernel parameterized in terms of mean and standard deviation, \code{(Alpha,
#' Kappa, Gama; P_0)} are the parameters of the NGG prior with \code{P_0} being
#' the centering measure whose parameters are assigned vague hyper prior
#' distributions, and \code{(asigma,bsigma)} are the hyper-parameters of the
#' gamma prior on the smoothing parameter \code{sigma}. In particular:
#' \code{NGG(Alpha, 1, 0; P_0)} defines a Dirichlet process; \code{NGG(1,
#' Kappa, 1/2; P_0)} defines a Normalized inverse Gaussian process; and
#' \code{NGG(1, 0, Gama; P_0)} defines a normalized stable process.
#'
#' The evaluation grid ranges from \code{min(x) - epsilon} to \code{max(x) +
#' epsilon}. By default \code{epsilon=sd(x)/4}.
#'
#' @param x Numeric vector. Data set to which the density is fitted.
#' @param probs Numeric vector. Desired quantiles of the density estimates.
#' @param Alpha Numeric constant. Total mass of the centering measure. See
#' details.
#' @param Kappa Numeric positive constant. See details.
#' @param Gama Numeric constant. \eqn{0\leq \texttt{Gama} \leq 1}{0 <= Gama <=
#' 1}. See details.
#' @param distr.k The distribution name for the kernel. Allowed names are "normal", "gamma", "beta", "double exponential", "lognormal" or their common abbreviations "norm", "exp", or an integer number identifying the mixture kernel: 1 = Normal; 2 = Gamma; 3 = Beta; 4 = Double Exponential; 5 = Lognormal.
#' @param distr.p0 The distribution name for the centering measure. Allowed names are "normal", "gamma", "beta", or their common abbreviations "norm", "exp", or an integer number identifying the centering measure: 1 = Normal; 2 = Gamma; 3 = Beta.
#' @param asigma Numeric positive constant. Shape parameter of the gamma prior
#' on the standard deviation of the mixture kernel \code{distr.k}.
#' @param bsigma Numeric positive constant. Rate parameter of the gamma prior
#' on the standard deviation of the mixture kernel \code{distr.k}.
#' @param delta_S Numeric positive constant. Metropolis-Hastings proposal
#' variation coefficient for sampling sigma.
#' @param delta_U Numeric positive constant. Metropolis-Hastings proposal
#' variation coefficient for sampling the latent U.
#' @param Meps Numeric constant. Relative error of the jump sizes in the
#' continuous component of the process. Smaller values imply larger number of
#' jumps.
#' @param Nx Integer constant. Number of grid points for the evaluation of the
#' density estimate.
#' @param Nit Integer constant. Number of MCMC iterations.
#' @param Pbi Numeric constant. Burn-in period proportion of Nit.
#' @param epsilon Numeric constant. Extension to the evaluation grid range.
#' See details.
#' @param printtime Logical. If TRUE, prints out the execution time.
#' @param extras Logical. If TRUE, gives additional objects: means, weights and
#' Js.
#' @param adaptive Logical. If TRUE, uses an adaptive MCMC strategy to sample the latent U (adaptive delta_U).
#'
#' @return The function returns a MixNRMI1 object. It is based on a list with the following components:
#' \item{xx}{Numeric vector. Evaluation grid.}
#' \item{qx}{Numeric array. Matrix
#' of dimension \eqn{\texttt{Nx} \times (\texttt{length(probs)} + 1)}{Nx x
#' (length(probs)+1)} with the posterior mean and the desired quantiles input
#' in \code{probs}.}
#' \item{cpo}{Numeric vector of \code{length(x)} with
#' conditional predictive ordinates.}
#' \item{R}{Numeric vector of
#' \code{length(Nit*(1-Pbi))} with the number of mixtures components
#' (clusters).}
#' \item{S}{Numeric vector of \code{length(Nit*(1-Pbi))} with the
#' values of common standard deviation sigma.}
#' \item{U}{Numeric vector of
#' \code{length(Nit*(1-Pbi))} with the values of the latent variable U.}
#' \item{Allocs}{List of \code{length(Nit*(1-Pbi))} with the clustering
#' allocations.}
#' \item{means}{List of \code{length(Nit*(1-Pbi))} with the
#' cluster means (locations). Only if extras = TRUE.}
#' \item{weights}{List of
#' \code{length(Nit*(1-Pbi))} with the mixture weights. Only if extras = TRUE.}
#' \item{Js}{List of \code{length(Nit*(1-Pbi))} with the unnormalized weights
#' (jump sizes). Only if extras = TRUE.}
#' \item{Nm}{Integer constant. Number of
#' jumps of the continuous component of the unnormalized process.}
#' \item{Nx}{Integer constant. Number of grid points for the evaluation of the
#' density estimate.}
#' \item{Nit}{Integer constant. Number of MCMC iterations.}
#' \item{Pbi}{Numeric constant. Burn-in period proportion of \code{Nit}.}
#' \item{procTime}{Numeric vector with execution time provided by
#' \code{proc.time} function.}
#' \item{distr.k}{Integer corresponding to the kernel chosen for the mixture}
#' \item{data}{Data used for the fit}
#' \item{NRMI_params}{A named list with the parameters of the NRMI process}
#' @section Warning : The function is computing intensive. Be patient.
#' @author Barrios, E., Kon Kam King, G., Lijoi, A., Nieto-Barajas, L.E. and Prüenster, I.
#' @seealso \code{\link{MixNRMI2}}, \code{\link{MixNRMI1cens}},
#' \code{\link{MixNRMI2cens}}, \code{\link{multMixNRMI1}}
#' @references 1.- Barrios, E., Lijoi, A., Nieto-Barajas, L. E. and Prünster,
#' I. (2013). Modeling with Normalized Random Measure Mixture Models.
#' Statistical Science. Vol. 28, No. 3, 313-334.
#'
#' 2.- James, L.F., Lijoi, A. and Prünster, I. (2009). Posterior analysis for
#' normalized random measure with independent increments. Scand. J. Statist 36,
#' 76-97.
#' @keywords distribution models nonparametrics
#' @examples
#'
#' ### Example 1
#' \dontrun{
#' # Data
#' data(acidity)
#' x <- acidity
#' # Fitting the model under default specifications
#' out <- MixNRMI1(x)
#' # Plotting density estimate + 95% credible interval
#' plot(out)
#' ### Example 2
#' set.seed(150520)
#' data(enzyme)
#' x <- enzyme
#' Enzyme1.out <- MixNRMI1(x, Alpha = 1, Kappa = 0.007, Gama = 0.5,
#' distr.k = "gamma", distr.p0 = "gamma",
#' asigma = 1, bsigma = 1, Meps=0.005,
#' Nit = 5000, Pbi = 0.2)
#' attach(Enzyme1.out)
#' # Plotting density estimate + 95% credible interval
#' plot(Enzyme1.out)
#' # Plotting number of clusters
#' par(mfrow = c(2, 1))
#' plot(R, type = "l", main = "Trace of R")
#' hist(R, breaks = min(R - 0.5):max(R + 0.5), probability = TRUE)
#' # Plotting sigma
#' par(mfrow = c(2, 1))
#' plot(S, type = "l", main = "Trace of sigma")
#' hist(S, nclass = 20, probability = TRUE, main = "Histogram of sigma")
#' # Plotting u
#' par(mfrow = c(2, 1))
#' plot(U, type = "l", main = "Trace of U")
#' hist(U, nclass = 20, probability = TRUE, main = "Histogram of U")
#' # Plotting cpo
#' par(mfrow = c(2, 1))
#' plot(cpo, main = "Scatter plot of CPO's")
#' boxplot(cpo, horizontal = TRUE, main = "Boxplot of CPO's")
#' print(paste("Average log(CPO)=", round(mean(log(cpo)), 4)))
#' print(paste("Median log(CPO)=", round(median(log(cpo)), 4)))
#' detach()
#' }
#'
#' ### Example 3
#' ## Do not run
#' # set.seed(150520)
#' # data(galaxy)
#' # x <- galaxy
#' # Galaxy1.out <- MixNRMI1(x, Alpha = 1, Kappa = 0.015, Gama = 0.5,
#' # distr.k = "normal", distr.p0 = "gamma",
#' # asigma = 1, bsigma = 1, delta = 7, Meps=0.005,
#' # Nit = 5000, Pbi = 0.2)
#'
#' # The output of this run is already loaded in the package
#' # To show results run the following
#' # Data
#' data(galaxy)
#' x <- galaxy
#' data(Galaxy1.out)
#' attach(Galaxy1.out)
#' # Plotting density estimate + 95% credible interval
#' plot(Galaxy1.out)
#' # Plotting number of clusters
#' par(mfrow = c(2, 1))
#' plot(R, type = "l", main = "Trace of R")
#' hist(R, breaks = min(R - 0.5):max(R + 0.5), probability = TRUE)
#' # Plotting sigma
#' par(mfrow = c(2, 1))
#' plot(S, type = "l", main = "Trace of sigma")
#' hist(S, nclass = 20, probability = TRUE, main = "Histogram of sigma")
#' # Plotting u
#' par(mfrow = c(2, 1))
#' plot(U, type = "l", main = "Trace of U")
#' hist(U, nclass = 20, probability = TRUE, main = "Histogram of U")
#' # Plotting cpo
#' par(mfrow = c(2, 1))
#' plot(cpo, main = "Scatter plot of CPO's")
#' boxplot(cpo, horizontal = TRUE, main = "Boxplot of CPO's")
#' print(paste("Average log(CPO)=", round(mean(log(cpo)), 4)))
#' print(paste("Median log(CPO)=", round(median(log(cpo)), 4)))
#' detach()
#' @export MixNRMI1
MixNRMI1 <-
function(x, probs = c(0.025, 0.5, 0.975), Alpha = 1, Kappa = 0,
Gama = 0.4, distr.k = "normal", distr.p0 = 1, asigma = 0.5, bsigma = 0.5,
delta_S = 3, delta_U = 2, Meps = 0.01, Nx = 150, Nit = 1500,
Pbi = 0.1, epsilon = NULL, printtime = TRUE, extras = TRUE, adaptive = FALSE) {
if (is.null(distr.k)) {
stop("Argument distr.k is NULL. Should be provided. See help for details.")
}
if (is.null(distr.p0)) {
stop("Argument distr.p0 is NULL. Should be provided. See help for details.")
}
distr.k <- process_dist_name(distr.k)
distr.p0 <- process_dist_name(distr.p0)
tInit <- proc.time()
n <- length(x)
y <- x
xsort <- sort(x)
y[seq(n / 2)] <- mean(xsort[seq(n / 2)])
y[-seq(n / 2)] <- mean(xsort[-seq(n / 2)])
u <- 1
sigma <- 1
if (is.null(epsilon)) {
epsilon <- sd(x) / 4
}
xx <- seq(min(x) - epsilon, max(x) + epsilon, length = Nx)
Fxx <- matrix(NA, nrow = Nx, ncol = Nit)
fx <- matrix(NA, nrow = n, ncol = Nit)
R <- seq(Nit)
S <- seq(Nit)
U <- seq(Nit)
Nmt <- seq(Nit)
Allocs <- vector(mode = "list", length = Nit)
if (adaptive) {
optimal_delta <- rep(NA, n)
}
if (extras) {
means <- vector(mode = "list", length = Nit)
weights <- vector(mode = "list", length = Nit)
Js <- vector(mode = "list", length = Nit)
if (adaptive) {
delta_Us <- seq(Nit)
}
}
mu.p0 <- mean(x)
sigma.p0 <- sd(x)
for (j in seq(Nit)) {
if (floor(j / 500) == ceiling(j / 500)) {
cat("MCMC iteration", j, "of", Nit, "\n")
}
tt <- comp1(y)
ystar <- tt$ystar
nstar <- tt$nstar
r <- tt$r
# if (is.na(optimal_delta[r])) {
# optimal_delta[r] <- compute_optimal_delta_given_r(r = r, gamma = Gama, kappa = Kappa, a = Alpha, n = n)
# }
idx <- tt$idx
Allocs[[max(1, j - 1)]] <- idx
if (Gama != 0) {
if (adaptive) {
tmp <- gs3_adaptive3(u, n = n, r = r, alpha = Alpha, kappa = Kappa, gama = Gama, delta = delta_U, U = U, iter = j, adapt = adaptive)
u <- tmp$u_prime
delta_U <- tmp$delta
}
else {
u <- gs3(u,
n = n, r = r, alpha = Alpha, kappa = Kappa,
gama = Gama, delta = delta_U
)
}
}
JiC <- MvInv(
eps = Meps, u = u, alpha = Alpha, kappa = Kappa,
gama = Gama, N = 50001
)
Nm <- length(JiC)
TauiC <- rk(Nm, distr = distr.p0, mu = mu.p0, sigma = sigma.p0)
ystar <- gs4(ystar, x, idx,
distr.k = distr.k, sigma.k = sigma,
distr.p0 = distr.p0, mu.p0 = mu.p0, sigma.p0 = sigma.p0
)
Jstar <- rgamma(r, nstar - Gama, Kappa + u)
Tau <- c(TauiC, ystar)
J <- c(JiC, Jstar)
tt <- gsHP(ystar, r, distr.p0)
mu.p0 <- tt$mu.py0
sigma.p0 <- tt$sigma.py0
y <- fcondYXA(x, distr = distr.k, Tau = Tau, J = J, sigma = sigma)
sigma <- gs5(sigma, x, y,
distr = distr.k, asigma = asigma,
bsigma = bsigma, delta = delta_S
)
Fxx[, j] <- fcondXA(xx,
distr = distr.k, Tau = Tau, J = J,
sigma = sigma
)
fx[, j] <- fcondXA(x,
distr = distr.k, Tau = Tau, J = J,
sigma = sigma
)
R[j] <- r
S[j] <- sigma
U[j] <- u
Nmt[j] <- Nm
if (extras) {
means[[j]] <- Tau
weights[[j]] <- J / sum(J)
Js[[j]] <- J
if (adaptive) {
delta_Us[j] <- delta_U
}
}
}
tt <- comp1(y)
Allocs[[Nit]] <- tt$idx
biseq <- seq(floor(Pbi * Nit))
Fxx <- Fxx[, -biseq]
qx <- as.data.frame(t(apply(Fxx, 1, quantile, probs = probs)))
names(qx) <- paste("q", probs, sep = "")
qx <- cbind(mean = apply(Fxx, 1, mean), qx)
R <- R[-biseq]
S <- S[-biseq]
U <- U[-biseq]
Allocs <- Allocs[-biseq]
if (extras) {
means <- means[-biseq]
weights <- weights[-biseq]
Js <- Js[-biseq]
if (adaptive) {
delta_Us <- delta_Us[-biseq]
}
}
cpo <- 1 / apply(1 / fx[, -biseq], 1, mean)
if (printtime) {
cat(" >>> Total processing time (sec.):\n")
print(procTime <- proc.time() - tInit)
}
res <- list(
xx = xx, qx = qx, cpo = cpo, R = R, S = S,
U = U, Allocs = Allocs, Nm = Nmt, Nx = Nx, Nit = Nit,
Pbi = Pbi, procTime = procTime, distr.k = distr.k, data = x,
NRMI_params = list("Alpha" = Alpha, "Kappa" = Kappa, "Gamma" = Gama)
)
if (extras) {
res$means <- means
res$weights <- weights
res$Js <- Js
if (adaptive) {
res$delta_Us <- delta_Us
}
}
return(structure(res, class = "NRMI1"))
}
#' Plot the density estimate and the 95\% credible interval
#'
#' The density estimate is the mean posterior density computed on the data
#' points.
#'
#'
#' @param x A fitted object of class NRMI1
#' @param ... Further arguments to be passed to generic function, ignored at the moment
#' @return A graph with the density estimate, the 95\% credible interval and a
#' histogram of the data
#' @export
#' @examples
#'
#' ## Example for non censored data
#'
#' data(acidity)
#' out <- MixNRMI1(acidity, Nit = 50)
#' plot(out)
#'
#' ## Example for censored data
#'
#' data(salinity)
#' out <- MixNRMI1cens(salinity$left, salinity$right, Nit = 50)
#' plot(out)
plot.NRMI1 <- function(x, ...) {
if (is_censored(x$data)) {
plotfit_censored(x)
}
else {
plotfit_noncensored(x)
}
}
#' S3 method for class 'MixNRMI1'
#'
#' @param x A fitted object of class NRMI1
#' @param ... Further arguments to be passed to generic function, ignored at the moment
#'
#' @return A visualization of the important information about the object
#' @export
#'
#' @examples
#'
#' ## Example for non censored data
#'
#' data(acidity)
#' out <- MixNRMI1(acidity, Nit = 50)
#' print(out)
#'
#' ## Example for censored data
#'
#' data(salinity)
#' out <- MixNRMI1cens(salinity$left, salinity$right, Nit = 50)
#' print(out)
print.NRMI1 <- function(x, ...) {
kernel_name <- tolower(give_kernel_name(x$distr.k))
writeLines(paste("Fit of a semiparametric", kernel_name, "mixture model on", length(x$data), "data points.\nThe MCMC algorithm was run for", x$Nit, "iterations with", 100 * x$Pbi, "% discarded for burn-in."))
}
#' S3 method for class 'MixNRMI1'
#'
#' @param object A fitted object of class NRMI1
#' @param number_of_clusters Whether to compute the optimal number of clusters, which can be a time-consuming operation (see \code{\link{compute_optimal_clustering}})
#' @param ... Further arguments to be passed to generic function, ignored at the moment
#'
#' @return Prints out the text for the summary S3 methods
#' @export
#'
#' @examples
#'
#' ## Example for non censored data
#'
#' data(acidity)
#' out <- MixNRMI1(acidity, Nit = 50)
#' summary(out)
summary.NRMI1 <- function(object, number_of_clusters = FALSE, ...) {
kernel_name <- tolower(give_kernel_name(object$distr.k))
kernel_comment <- paste("A semiparametric", kernel_name, "mixture model was used.")
NRMI_comment <- paste("Density estimation using a", comment_on_NRMI_type(object$NRMI_params))
summarytext(object, kernel_comment, NRMI_comment, number_of_clusters = number_of_clusters)
}
#' Extract the Conditional Predictive Ordinates (CPOs) from a fitted object
#'
#' @param object A fit obtained through from the functions MixNRMI1/MixNRMI1cens
#' @param ... Further arguments to be passed to generic function, ignored at the moment
#'
#' @return A vector of Conditional Predictive Ordinates (CPOs)
#' @export
#'
#' @examples
#' data(acidity)
#' out <- MixNRMI1(acidity, Nit = 50)
#' cpo(out)
cpo.NRMI1 <- function(object, ...) {
return(object$cpo)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/MixNRMI1.R
|
#' Normalized Random Measures Mixture of Type I for censored data
#'
#' Bayesian nonparametric estimation based on normalized measures driven
#' mixtures for locations.
#'
#' This generic function fits a normalized random measure (NRMI) mixture model
#' for density estimation (James et al. 2009) with censored data. Specifically,
#' the model assumes a normalized generalized gamma (NGG) prior for the
#' locations (means) of the mixture kernel and a parametric prior for the
#' common smoothing parameter sigma, leading to a semiparametric mixture model.
#'
#' This function coincides with \code{\link{MixNRMI1}} when the lower (xleft)
#' and upper (xright) censoring limits correspond to the same exact value.
#'
#' The details of the model are: \deqn{X_i|Y_i,\sigma \sim k(\cdot
#' |Y_i,\sigma)}{X_i|Y_i,sigma ~ k(.|Y_i,sigma)} \deqn{Y_i|P \sim P,\quad
#' i=1,\dots,n}{Y_i|P ~ P, i=1,...,n} \deqn{P \sim \textrm{NGG(\texttt{Alpha,
#' Kappa, Gama; P\_0})}}{P ~ NGG(Alpha, Kappa, Gama; P_0)} \deqn{\sigma \sim
#' \textrm{Gamma(asigma, bsigma)}}{sigma ~ Gamma(asigma, bsigma)} where
#' \eqn{X_i}'s are the observed data, \eqn{Y_i}'s are latent (location)
#' variables, \code{sigma} is the smoothing parameter, \code{k} is a parametric
#' kernel parameterized in terms of mean and standard deviation, \code{(Alpha,
#' Kappa, Gama; P_0)} are the parameters of the NGG prior with \code{P_0} being
#' the centering measure whose parameters are assigned vague hyper prior
#' distributions, and \code{(asigma,bsigma)} are the hyper-parameters of the
#' gamma prior on the smoothing parameter \code{sigma}. In particular:
#' \code{NGG(Alpha, 1, 0; P_0)} defines a Dirichlet process; \code{NGG(1,
#' Kappa, 1/2; P_0)} defines a Normalized inverse Gaussian process; and
#' \code{NGG(1, 0, Gama; P_0)} defines a normalized stable process.
#'
#' The evaluation grid ranges from \code{min(x) - epsilon} to \code{max(x) +
#' epsilon}. By default \code{epsilon=sd(x)/4}.
#'
#' @param xleft Numeric vector. Lower limit of interval censoring. For exact
#' data the same as xright
#' @param xright Numeric vector. Upper limit of interval censoring. For exact
#' data the same as xleft.
#' @param probs Numeric vector. Desired quantiles of the density estimates.
#' @param Alpha Numeric constant. Total mass of the centering measure. See
#' details.
#' @param Kappa Numeric positive constant. See details.
#' @param Gama Numeric constant. \eqn{0\leq \texttt{Gama} \leq 1}{0 <= Gama <=
#' 1}. See details.
#' @param distr.k The distribution name for the kernel. Allowed names are "normal", "gamma", "beta", "double exponential", "lognormal" or their common abbreviations "norm", "exp", or an integer number identifying the mixture kernel: 1 = Normal; 2 = Gamma; 3 = Beta; 4 = Double Exponential; 5 = Lognormal.
#' @param distr.p0 The distribution name for the centering measure. Allowed names are "normal", "gamma", "beta", or their common abbreviations "norm", "exp", or an integer number identifying the centering measure: 1 = Normal; 2 = Gamma; 3 = Beta.
#' @param asigma Numeric positive constant. Shape parameter of the gamma prior
#' on the standard deviation of the mixture kernel \code{distr.k}.
#' @param bsigma Numeric positive constant. Rate parameter of the gamma prior
#' on the standard deviation of the mixture kernel \code{distr.k}.
#' @param delta_S Numeric positive constant. Metropolis-Hastings proposal
#' variation coefficient for sampling sigma.
#' @param delta_U Numeric positive constant. Metropolis-Hastings proposal
#' variation coefficient for sampling the latent U.
#' @param Meps Numeric constant. Relative error of the jump sizes in the
#' continuous component of the process. Smaller values imply larger number of
#' jumps.
#' @param Nx Integer constant. Number of grid points for the evaluation of the
#' density estimate.
#' @param Nit Integer constant. Number of MCMC iterations.
#' @param Pbi Numeric constant. Burn-in period proportion of Nit.
#' @param epsilon Numeric constant. Extension to the evaluation grid range.
#' See details.
#' @param printtime Logical. If TRUE, prints out the execution time.
#' @param extras Logical. If TRUE, gives additional objects: means, weights and
#' Js.
#' @param adaptive Logical. If TRUE, uses an adaptive MCMC strategy to sample the latent U (adaptive delta_U).
#'
#' @return The function returns a list with the following components:
#' \item{xx}{Numeric vector. Evaluation grid.} \item{qx}{Numeric array. Matrix
#' of dimension \eqn{\texttt{Nx} \times (\texttt{length(probs)} + 1)}{Nx x
#' (length(probs)+1)} with the posterior mean and the desired quantiles input
#' in \code{probs}.} \item{cpo}{Numeric vector of \code{length(x)} with
#' conditional predictive ordinates.} \item{R}{Numeric vector of
#' \code{length(Nit*(1-Pbi))} with the number of mixtures components
#' (clusters).} \item{S}{Numeric vector of \code{length(Nit*(1-Pbi))} with the
#' values of common standard deviation sigma.} \item{U}{Numeric vector of
#' \code{length(Nit*(1-Pbi))} with the values of the latent variable U.}
#' \item{Allocs}{List of \code{length(Nit*(1-Pbi))} with the clustering
#' allocations.} \item{means}{List of \code{length(Nit*(1-Pbi))} with the
#' cluster means (locations). Only if extras = TRUE.} \item{weights}{List of
#' \code{length(Nit*(1-Pbi))} with the mixture weights. Only if extras = TRUE.}
#' \item{Js}{List of \code{length(Nit*(1-Pbi))} with the unnormalized weights
#' (jump sizes). Only if extras = TRUE.} \item{Nm}{Integer constant. Number of
#' jumps of the continuous component of the unnormalized process.}
#' \item{Nx}{Integer constant. Number of grid points for the evaluation of the
#' density estimate.} \item{Nit}{Integer constant. Number of MCMC iterations.}
#' \item{Pbi}{Numeric constant. Burn-in period proportion of \code{Nit}.}
#' \item{procTime}{Numeric vector with execution time provided by
#' \code{proc.time} function.}
#' \item{distr.k}{Integer corresponding to the kernel chosen for the mixture}
#' \item{data}{Data used for the fit}
#' \item{NRMI_params}{A named list with the parameters of the NRMI process}
#' @param adaptive Logical. If TRUE, uses an adaptive MCMC strategy to sample the latent U (adaptive delta_U).
#'
#' @section Warning : The function is computing intensive. Be patient.
#' @author Barrios, E., Kon Kam King, G. and Nieto-Barajas, L.E.
#' @seealso \code{\link{MixNRMI2}}, \code{\link{MixNRMI1cens}},
#' \code{\link{MixNRMI2cens}}, \code{\link{multMixNRMI1}}
#' @references 1.- Barrios, E., Lijoi, A., Nieto-Barajas, L. E. and Prünster,
#' I. (2013). Modeling with Normalized Random Measure Mixture Models.
#' Statistical Science. Vol. 28, No. 3, 313-334.
#'
#' 2.- James, L.F., Lijoi, A. and Prünster, I. (2009). Posterior analysis for
#' normalized random measure with independent increments. Scand. J. Statist 36,
#' 76-97.
#'
#' 3.- Kon Kam King, G., Arbel, J. and Prünster, I. (2016). Species
#' Sensitivity Distribution revisited: a Bayesian nonparametric approach. In
#' preparation.
#' @keywords distribution models nonparametrics
#' @examples
#'
#' ### Example 1
#' \dontrun{
#' # Data
#' data(acidity)
#' x <- acidity
#' # Fitting the model under default specifications
#' out <- MixNRMI1cens(x, x)
#' # Plotting density estimate + 95% credible interval
#' plot(out)
#' }
#'
#' \dontrun{
#' ### Example 2
#' # Data
#' data(salinity)
#' # Fitting the model under default specifications
#' out <- MixNRMI1cens(xleft = salinity$left, xright = salinity$right, Nit = 5000)
#' # Plotting density estimate + 95% credible interval
#' attach(out)
#' plot(out)
#' # Plotting number of clusters
#' par(mfrow = c(2, 1))
#' plot(R, type = "l", main = "Trace of R")
#' hist(R, breaks = min(R - 0.5):max(R + 0.5), probability = TRUE)
#' detach()
#' }
#'
#' @export MixNRMI1cens
MixNRMI1cens <-
function(xleft, xright, probs = c(0.025, 0.5, 0.975), Alpha = 1,
Kappa = 0, Gama = 0.4, distr.k = "normal", distr.p0 = "normal", asigma = 0.5,
bsigma = 0.5, delta_S = 3, delta_U = 2, Meps = 0.01, Nx = 150,
Nit = 1500, Pbi = 0.1, epsilon = NULL, printtime = TRUE,
extras = TRUE, adaptive = FALSE) {
if (is.null(distr.k)) {
stop("Argument distr.k is NULL. Should be provided. See help for details.")
}
if (is.null(distr.p0)) {
stop("Argument distr.p0 is NULL. Should be provided. See help for details.")
}
distr.k <- process_dist_name(distr.k)
distr.p0 <- process_dist_name(distr.p0)
tInit <- proc.time()
cens_data_check(xleft, xright)
xpoint <- as.numeric(na.omit(0.5 * (xleft + xright)))
npoint <- length(xpoint)
censor_code <- censor_code_rl(xleft, xright)
censor_code_filters <- lapply(0:3, FUN = function(x) {
censor_code ==
x
})
names(censor_code_filters) <- 0:3
n <- length(xleft)
y <- seq(n)
xsort <- sort(xpoint)
y[seq(n / 2)] <- mean(xsort[seq(npoint / 2)])
y[-seq(n / 2)] <- mean(xsort[-seq(npoint / 2)])
u <- 1
sigma <- sd(xpoint)
if (is.null(epsilon)) {
epsilon <- sd(xpoint) / 4
}
xx <- seq(min(xpoint) - epsilon, max(xpoint) + epsilon, length = Nx)
Fxx <- matrix(NA, nrow = Nx, ncol = Nit)
fx <- matrix(NA, nrow = n, ncol = Nit)
R <- seq(Nit)
S <- seq(Nit)
U <- seq(Nit)
Nmt <- seq(Nit)
Allocs <- vector(mode = "list", length = Nit)
if (adaptive) {
optimal_delta <- rep(NA, n)
}
if (extras) {
means <- vector(mode = "list", length = Nit)
weights <- vector(mode = "list", length = Nit)
Js <- vector(mode = "list", length = Nit)
if (adaptive) {
delta_Us <- seq(Nit)
}
}
mu.p0 <- mean(xpoint)
sigma.p0 <- sd(xpoint)
for (j in seq(Nit)) {
if (floor(j / 500) == ceiling(j / 500)) {
cat("MCMC iteration", j, "of", Nit, "\n")
}
tt <- comp1(y)
ystar <- tt$ystar
nstar <- tt$nstar
r <- tt$r
# if (is.na(optimal_delta[r])) {
# optimal_delta[r] <- compute_optimal_delta_given_r(r = r, gamma = Gama, kappa = Kappa, a = Alpha, n = n)
# }
idx <- tt$idx
Allocs[[max(1, j - 1)]] <- idx
if (Gama != 0) {
if (adaptive) {
tmp <- gs3_adaptive3(u, n = n, r = r, alpha = Alpha, kappa = Kappa, gama = Gama, delta = delta_U, U = U, iter = j, adapt = adaptive)
u <- tmp$u_prime
delta_U <- tmp$delta
}
else {
u <- gs3(u,
n = n, r = r, alpha = Alpha, kappa = Kappa,
gama = Gama, delta = delta_U
)
}
}
JiC <- MvInv(
eps = Meps, u = u, alpha = Alpha, kappa = Kappa,
gama = Gama, N = 50001
)
Nm <- length(JiC)
TauiC <- rk(Nm, distr = distr.p0, mu = mu.p0, sigma = sigma.p0)
ystar <- gs4cens2(
ystar = ystar, xleft = xleft, xright = xright,
censor_code = censor_code, idx = idx, distr.k = distr.k,
sigma.k = sigma, distr.p0 = distr.p0, mu.p0 = mu.p0,
sigma.p0 = sigma.p0
)
Jstar <- rgamma(r, nstar - Gama, Kappa + u)
Tau <- c(TauiC, ystar)
J <- c(JiC, Jstar)
tt <- gsHP(ystar, r, distr.p0)
mu.p0 <- tt$mu.py0
sigma.p0 <- tt$sigma.py0
y <- fcondYXAcens2(
xleft = xleft, xright = xright, censor_code_filters = censor_code_filters,
distr = distr.k, Tau = Tau, J = J, sigma = sigma
)
sigma <- gs5cens2(
sigma = sigma, xleft = xleft, xright = xright,
censor_code = censor_code, y = y, distr = distr.k,
asigma = asigma, bsigma = bsigma, delta = delta_S
)
Fxx[, j] <- fcondXA(xx,
distr = distr.k, Tau = Tau, J = J,
sigma = sigma
)
fx[, j] <- fcondYXAcens2(
xleft = xleft, xright = xright,
censor_code_filters = censor_code_filters, distr = distr.k,
Tau = Tau, J = J, sigma = sigma
)
R[j] <- r
S[j] <- sigma
U[j] <- u
Nmt[j] <- Nm
if (extras) {
means[[j]] <- Tau
weights[[j]] <- J / sum(J)
Js[[j]] <- J
if (adaptive) {
delta_Us[j] <- delta_U
}
}
}
tt <- comp1(y)
Allocs[[Nit]] <- tt$idx
biseq <- seq(floor(Pbi * Nit))
Fxx <- Fxx[, -biseq]
qx <- as.data.frame(t(apply(Fxx, 1, quantile, probs = probs)))
names(qx) <- paste("q", probs, sep = "")
qx <- cbind(mean = apply(Fxx, 1, mean), qx)
R <- R[-biseq]
S <- S[-biseq]
U <- U[-biseq]
Allocs <- Allocs[-biseq]
if (extras) {
means <- means[-biseq]
weights <- weights[-biseq]
Js <- Js[-biseq]
if (adaptive) {
delta_Us <- delta_Us[-biseq]
}
}
cpo <- 1 / apply(1 / fx[, -biseq], 1, mean)
if (printtime) {
cat(" >>> Total processing time (sec.):\n")
print(procTime <- proc.time() - tInit)
}
res <- list(
xx = xx, qx = qx, cpo = cpo, R = R, S = S,
U = U, Allocs = Allocs, Nm = Nmt, Nx = Nx, Nit = Nit,
Pbi = Pbi, procTime = procTime, distr.k = distr.k, data = data.frame(left = xleft, right = xright),
NRMI_params = list("Alpha" = Alpha, "Kappa" = Kappa, "Gamma" = Gama)
)
if (extras) {
res$means <- means
res$weights <- weights
res$Js <- Js
if (adaptive) {
res$delta_Us <- delta_Us
}
}
return(structure(res, class = "NRMI1"))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/MixNRMI1cens.R
|
#' Normalized Random Measures Mixture of Type II
#'
#' Bayesian nonparametric estimation based on normalized measures driven
#' mixtures for locations and scales.
#'
#' This generic function fits a normalized random measure (NRMI) mixture model
#' for density estimation (James et al. 2009). Specifically, the model assumes
#' a normalized generalized gamma (NGG) prior for both, locations (means) and
#' standard deviations, of the mixture kernel, leading to a fully nonparametric
#' mixture model.
#'
#' The details of the model are: \deqn{X_i|Y_i,Z_i \sim
#' k(\cdot|Y_i,Z_i)}{X_i|Y_i,Z_i ~ k(.|Y_i,Z_i)} \deqn{(Y_i,Z_i)|P \sim P,
#' i=1,\dots,n}{(Y_i,Z_i)|P ~ P, i=1,...,n} \deqn{P \sim
#' \textrm{NGG}(\texttt{Alpha, Kappa, Gama; P\_0})}{P ~ NGG(Alpha, Kappa, Gama;
#' P_0)} where, \eqn{X_i}'s are the observed data, \eqn{(Y_i,Z_i)}'s are
#' bivariate latent (location and scale) vectors, \code{k} is a parametric
#' kernel parameterized in terms of mean and standard deviation, \code{(Alpha,
#' Kappa, Gama; P_0)} are the parameters of the NGG prior with a bivariate
#' \code{P_0} being the centering measure with independent components, that is,
#' \eqn{P_0(Y,Z) = P_0(Y)*P_0(Z)}. The parameters of \code{P_0(Y)} are assigned
#' vague hyper prior distributions and \code{(mu.pz0,sigma.pz0)} are the
#' hyper-parameters of \code{P_0(Z)}. In particular, \code{NGG(Alpha, 1, 0;
#' P_0)} defines a Dirichlet process; \code{NGG(1, Kappa, 1/2;P_0)} defines a
#' Normalized inverse Gaussian process; and \code{NGG(1, 0, Gama; P_0)} defines
#' a normalized stable process. The evaluation grid ranges from \code{min(x) -
#' epsilon} to \code{max(x) + epsilon}. By default \code{epsilon=sd(x)/4}.
#'
#' @param x Numeric vector. Data set to which the density is fitted.
#' @param probs Numeric vector. Desired quantiles of the density estimates.
#' @param Alpha Numeric constant. Total mass of the centering measure. See
#' details.
#' @param Kappa Numeric positive constant. See details.
#' @param Gama Numeric constant. \eqn{0 \leq Gama \leq 1}{0 <= Gama <=1}. See
#' details.
#' @param distr.k The distribution name for the kernel. Allowed names are "normal", "gamma", "beta", "double exponential", "lognormal" or their common abbreviations "norm", "exp", or an integer number identifying the mixture kernel: 1 = Normal; 2 = Gamma; 3 = Beta; 4 = Double Exponential; 5 = Lognormal.
#' @param distr.py0 The distribution name for the centering measure for locations. Allowed names are "normal", "gamma", "beta", or their common abbreviations "norm", "exp", or an integer number identifying the centering measure for locations: 1 = Normal; 2 = Gamma; 3 = Beta.
#' @param distr.pz0 The distribution name for the centering measure for scales. Allowed names are "gamma", or an integer number identifying the centering measure for
#' scales: 2 = Gamma. For more options use \code{\link{MixNRMI2cens}}.
#' @param mu.pz0 Numeric constant. Prior mean of the centering measure for
#' scales.
#' @param sigma.pz0 Numeric constant. Prior standard deviation of the centering
#' measure for scales.
#' @param delta_S Numeric positive constant. Metropolis-Hastings proposal
#' variation coefficient for sampling the scales.
#' @param kappa Numeric positive constant. Metropolis-Hastings proposal
#' variation coefficient for sampling the location parameters.
#' @param delta_U Numeric positive constant. Metropolis-Hastings proposal
#' variation coefficient for sampling the latent U. If `adaptive=TRUE`, `delta_U`is the starting value for the adaptation.
#' @param Meps Numeric constant. Relative error of the jump sizes in the
#' continuous component of the process. Smaller values imply larger number of
#' jumps.
#' @param Nx Integer constant. Number of grid points for the evaluation of the
#' density estimate.
#' @param Nit Integer constant. Number of MCMC iterations.
#' @param Pbi Numeric constant. Burn-in period proportion of \code{Nit}.
#' @param epsilon Numeric constant. Extension to the evaluation grid range.
#' See details.
#' @param printtime Logical. If TRUE, prints out the execution time.
#' @param extras Logical. If TRUE, gives additional objects: means, sigmas,
#' weights and Js.
#' @param adaptive Logical. If TRUE, uses an adaptive MCMC strategy to sample the latent U (adaptive delta_U).
#'
#' @return The function returns a list with the following components:
#' \item{xx}{Numeric vector. Evaluation grid.}
#' \item{qx}{Numeric array. Matrix
#' of dimension \eqn{\texttt{Nx} \times (\texttt{length(probs)} + 1)}{Nx x
#' (length(probs)+1)} with the posterior mean and the desired quantiles input
#' in \code{probs}.}
#' \item{cpo}{Numeric vector of \code{length(x)} with
#' conditional predictive ordinates.}
#' \item{R}{Numeric vector of
#' \code{length(Nit*(1-Pbi))} with the number of mixtures components
#' (clusters).}
#' \item{U}{Numeric vector of \code{length(Nit*(1-Pbi))} with the
#' values of the latent variable U.}
#' \item{Allocs}{List of
#' \code{length(Nit*(1-Pbi))} with the clustering allocations.}
#' \item{means}{List of \code{length(Nit*(1-Pbi))} with the cluster means
#' (locations). Only if extras = TRUE.}
#' \item{sigmas}{Numeric vector of
#' \code{length(Nit*(1-Pbi))} with the cluster standard deviations. Only if
#' extras = TRUE.}
#' \item{weights}{List of \code{length(Nit*(1-Pbi))} with the
#' mixture weights. Only if extras = TRUE.}
#' \item{Js}{List of
#' \code{length(Nit*(1-Pbi))} with the unnormalized weights (jump sizes). Only
#' if extras = TRUE.}
#' \item{Nm}{Integer constant. Number of jumps of the
#' continuous component of the unnormalized process.}
#' \item{delta_Us}{List of
#' \code{length(Nit*(1-Pbi))} with the sequence of adapted delta_U used in the MH step for the latent variable U.}
#' \item{Nx}{Integer
#' constant. Number of grid points for the evaluation of the density estimate.}
#' \item{Nit}{Integer constant. Number of MCMC iterations.}
#' \item{Pbi}{Numeric constant. Burn-in period proportion of \code{Nit}.}
#' \item{procTime}{Numeric vector with execution time provided by \code{proc.time} function.}
#' \item{distr.k}{Integer corresponding to the kernel chosen for the mixture}
#' \item{data}{Data used for the fit}
#' \item{NRMI_params}{A named list with the parameters of the NRMI process}
#' @section Warning : The function is computing intensive. Be patient.
#' @author Barrios, Kon Kam King, G., E., Lijoi, A., Nieto-Barajas, L.E. and Prüenster, I.
#' @seealso \code{\link{MixNRMI2}}, \code{\link{MixNRMI1cens}},
#' \code{\link{MixNRMI2cens}}, \code{\link{multMixNRMI1}}
#' @references 1.- Barrios, E., Lijoi, A., Nieto-Barajas, L. E. and Prünster,
#' I. (2013). Modeling with Normalized Random Measure Mixture Models.
#' Statistical Science. Vol. 28, No. 3, 313-334.
#'
#' 2.- James, L.F., Lijoi, A. and Prünster, I. (2009). Posterior analysis for
#' normalized random measure with independent increments. Scand. J. Statist 36,
#' 76-97.
#'
#' 3.- Arbel, J., Kon Kam King, G., Lijoi, A., Nieto-Barajas, L.E. and Prüenster, I. (2021). BNPdensity: a package for Bayesian Nonparametric density estimation using Normalised Random Measures with Independent Increments.. Australian and New Zealand Journal of Statistics, to appear
#' @keywords distribution models nonparametrics
#' @examples
#' \dontrun{
#' ### Example 1
#' # Data
#' data(acidity)
#' x <- acidity
#' # Fitting the model under default specifications
#' out <- MixNRMI2(x)
#' # Plotting density estimate + 95% credible interval
#' plot(out)
#' }
#'
#' ### Example 2
#' ## Do not run
#' # set.seed(150520)
#' # data(enzyme)
#' # x <- enzyme
#' # Enzyme2.out <- MixNRMI2(x, Alpha = 1, Kappa = 0.007, Gama = 0.5,
#' # distr.k = "gamma", distr.py0 = "gamma",
#' # distr.pz0 = "gamma", mu.pz0 = 1, sigma.pz0 = 1, Meps=0.005,
#' # Nit = 5000, Pbi = 0.2)
#' # The output of this run is already loaded in the package
#' # To show results run the following
#' # Data
#' data(enzyme)
#' x <- enzyme
#' data(Enzyme2.out)
#' attach(Enzyme2.out)
#' # Plotting density estimate + 95% credible interval
#' plot(Enzyme2.out)
#' # Plotting number of clusters
#' par(mfrow = c(2, 1))
#' plot(R, type = "l", main = "Trace of R")
#' hist(R, breaks = min(R - 0.5):max(R + 0.5), probability = TRUE)
#' # Plotting u
#' par(mfrow = c(2, 1))
#' plot(U, type = "l", main = "Trace of U")
#' hist(U, nclass = 20, probability = TRUE, main = "Histogram of U")
#' # Plotting cpo
#' par(mfrow = c(2, 1))
#' plot(cpo, main = "Scatter plot of CPO's")
#' boxplot(cpo, horizontal = TRUE, main = "Boxplot of CPO's")
#' print(paste("Average log(CPO)=", round(mean(log(cpo)), 4)))
#' print(paste("Median log(CPO)=", round(median(log(cpo)), 4)))
#' detach()
#'
#' ### Example 3
#' ## Do not run
#' # set.seed(150520)
#' # data(galaxy)
#' # x <- galaxy
#' # Galaxy2.out <- MixNRMI2(x, Alpha = 1, Kappa = 0.015, Gama = 0.5,
#' # distr.k = "normal", distr.py0 = "gamma",
#' # distr.pz0 = "gamma", mu.pz0 = 1, sigma.pz0 = 1, Meps=0.005,
#' # Nit = 5000, Pbi = 0.2)
#' # The output of this run is already loaded in the package
#' # To show results run the following
#' # Data
#' data(galaxy)
#' x <- galaxy
#' data(Galaxy2.out)
#' attach(Galaxy2.out)
#' # Plotting density estimate + 95% credible interval
#' plot(Galaxy2.out)
#' # Plotting number of clusters
#' par(mfrow = c(2, 1))
#' plot(R, type = "l", main = "Trace of R")
#' hist(R, breaks = min(R - 0.5):max(R + 0.5), probability = TRUE)
#' # Plotting u
#' par(mfrow = c(2, 1))
#' plot(U, type = "l", main = "Trace of U")
#' hist(U, nclass = 20, probability = TRUE, main = "Histogram of U")
#' # Plotting cpo
#' par(mfrow = c(2, 1))
#' plot(cpo, main = "Scatter plot of CPO's")
#' boxplot(cpo, horizontal = TRUE, main = "Boxplot of CPO's")
#' print(paste("Average log(CPO)=", round(mean(log(cpo)), 4)))
#' print(paste("Median log(CPO)=", round(median(log(cpo)), 4)))
#' detach()
#' @export MixNRMI2
MixNRMI2 <-
function(x, probs = c(0.025, 0.5, 0.975), Alpha = 1, Kappa = 0,
Gama = 0.4, distr.k = "normal", distr.py0 = "normal", distr.pz0 = "gamma", mu.pz0 = 3,
sigma.pz0 = sqrt(10), delta_S = 4, kappa = 2, delta_U = 2, Meps = 0.01,
Nx = 150, Nit = 1500, Pbi = 0.1, epsilon = NULL, printtime = TRUE,
extras = TRUE, adaptive = FALSE) {
if (is.null(distr.k)) {
stop("Argument distr.k is NULL. Should be provided. See help for details.")
}
if (is.null(distr.py0)) {
stop("Argument distr.py0 is NULL. Should be provided. See help for details.")
}
distr.k <- process_dist_name(distr.k)
distr.py0 <- process_dist_name(distr.py0)
distr.pz0 <- process_dist_name(distr.pz0)
tInit <- proc.time()
n <- length(x)
y <- x
xsort <- sort(x)
y[seq(n / 2)] <- mean(xsort[seq(n / 2)])
y[-seq(n / 2)] <- mean(xsort[-seq(n / 2)])
z <- rep(1, n)
u <- 1
if (is.null(epsilon)) {
epsilon <- sd(x) / 4
}
xx <- seq(min(x) - epsilon, max(x) + epsilon, length = Nx)
Fxx <- matrix(NA, nrow = Nx, ncol = Nit)
fx <- matrix(NA, nrow = n, ncol = Nit)
R <- seq(Nit)
U <- seq(Nit)
Nmt <- seq(Nit)
Allocs <- vector(mode = "list", length = Nit)
if (adaptive) {
optimal_delta <- rep(NA, n)
}
if (extras) {
means <- vector(mode = "list", length = Nit)
sigmas <- vector(mode = "list", length = Nit)
weights <- vector(mode = "list", length = Nit)
Js <- vector(mode = "list", length = Nit)
if (adaptive) {
delta_Us <- seq(Nit)
}
}
mu.py0 <- mean(x)
sigma.py0 <- sd(x)
for (j in seq(Nit)) {
if (floor(j / 500) == ceiling(j / 500)) {
cat("MCMC iteration", j, "of", Nit, "\n")
}
tt <- comp2(y, z)
ystar <- tt$ystar
zstar <- tt$zstar
nstar <- tt$nstar
rstar <- tt$rstar
idx <- tt$idx
Allocs[[max(1, j - 1)]] <- idx
# if (is.na(optimal_delta[rstar])) {
# optimal_delta[rstar] <- compute_optimal_delta_given_r(r = rstar, gamma = Gama, kappa = Kappa, a = Alpha, n = n)
# }
if (Gama != 0) {
if (adaptive) {
tmp <- gs3_adaptive3(u, n = n, r = rstar, alpha = Alpha, kappa = Kappa, gama = Gama, delta = delta_U, U = U, iter = j, adapt = adaptive)
u <- tmp$u_prime
delta_U <- tmp$delta
}
else {
u <- gs3(u,
n = n, r = rstar, alpha = Alpha, kappa = Kappa,
gama = Gama, delta = delta_U
)
}
}
JiC <- MvInv(
eps = Meps, u = u, alpha = Alpha, kappa = Kappa,
gama = Gama, N = 50001
)
Nm <- length(JiC)
TauyC <- rk(Nm, distr = distr.py0, mu = mu.py0, sigma = sigma.py0)
TauzC <- rk(Nm, distr = distr.pz0, mu = mu.pz0, sigma = sigma.pz0)
tt <- gsYZstar(ystar, zstar, nstar, rstar, idx, x, delta_S,
kappa,
distr.k = distr.k, distr.py0 = distr.py0,
mu.py0 = mu.py0, sigma.py0 = sigma.py0, distr.pz0 = distr.pz0,
mu.pz0 = mu.pz0, sigma.pz0 = sigma.pz0
)
ystar <- tt$ystar
zstar <- tt$zstar
tt <- gsHP(ystar, rstar, distr.py0)
mu.py0 <- tt$mu.py0
sigma.py0 <- tt$sigma.py0
Jstar <- rgamma(rstar, nstar - Gama, Kappa + u)
Tauy <- c(TauyC, ystar)
Tauz <- c(TauzC, zstar)
J <- c(JiC, Jstar)
tt <- fcondYZXA(x, distr = distr.k, Tauy, Tauz, J)
y <- tt[, 1]
z <- tt[, 2]
Fxx[, j] <- fcondXA2(xx,
distr = distr.k, Tauy, Tauz,
J
)
fx[, j] <- fcondXA2(x, distr = distr.k, Tauy, Tauz, J)
R[j] <- rstar
U[j] <- u
Nmt[j] <- Nm
if (extras) {
means[[j]] <- Tauy
sigmas[[j]] <- Tauz
weights[[j]] <- J / sum(J)
Js[[j]] <- J
if (adaptive) {
delta_Us[j] <- delta_U
}
}
}
tt <- comp2(y, z)
Allocs[[Nit]] <- tt$idx
biseq <- seq(floor(Pbi * Nit))
Fxx <- Fxx[, -biseq]
qx <- as.data.frame(t(apply(Fxx, 1, quantile, probs = probs)))
names(qx) <- paste("q", probs, sep = "")
qx <- cbind(mean = apply(Fxx, 1, mean), qx)
R <- R[-biseq]
U <- U[-biseq]
Allocs <- Allocs[-biseq]
if (extras) {
means <- means[-biseq]
sigmas <- sigmas[-biseq]
weights <- weights[-biseq]
Js <- Js[-biseq]
if (adaptive) {
delta_Us <- delta_Us[-biseq]
}
}
cpo <- 1 / apply(1 / fx[, -biseq], 1, mean)
if (printtime) {
cat(" >>> Total processing time (sec.):\n")
print(procTime <- proc.time() - tInit)
}
res <- list(
xx = xx, qx = qx, cpo = cpo, R = R, U = U,
Allocs = Allocs, Nm = Nmt, Nx = Nx, Nit = Nit, Pbi = Pbi,
procTime = procTime, distr.k = distr.k, data = x,
NRMI_params = list("Alpha" = Alpha, "Kappa" = Kappa, "Gamma" = Gama)
)
if (extras) {
res$means <- means
res$sigmas <- sigmas
res$weights <- weights
res$Js <- Js
if (adaptive) {
res$delta_Us <- delta_Us
}
}
return(structure(res, class = "NRMI2"))
}
#' Plot the density estimate and the 95\% credible interval
#'
#' The density estimate is the mean posterior density computed on the data
#' points.
#'
#'
#' @param x A fitted object of class NRMI2
#' @param ... Further arguments to be passed to generic function, ignored at the moment
#' @return A graph with the density estimate, the 95\% credible interval and a
#' histogram of the data
#' @export
#' @examples
#' ## Example for non censored data
#'
#' data(acidity)
#' out <- MixNRMI2(acidity, Nit = 20)
#' plot(out)
#'
#' ## Example for censored data
#'
#' data(salinity)
#' out <- MixNRMI2cens(salinity$left, salinity$right, Nit = 20)
#' plot(out)
plot.NRMI2 <- function(x, ...) {
if (is_censored(x$data)) {
plotfit_censored(x)
}
else {
plotfit_noncensored(x)
}
}
#' S3 method for class 'MixNRMI2'
#'
#' @param x A fitted object of class NRMI2
#' @param ... Further arguments to be passed to generic function, ignored at the moment
#'
#' @return A visualization of the important information about the object
#' @export
#'
#' @examples
#' #' ## Example for censored data
#' data(acidity)
#' out <- MixNRMI2(acidity, Nit = 20)
#' print(out)
#'
#' data(salinity)
#' out <- MixNRMI2cens(salinity$left, salinity$right, Nit = 20)
#' print(out)
print.NRMI2 <- function(x, ...) {
kernel_name <- tolower(give_kernel_name(x$distr.k))
writeLines(paste("Fit of a nonparametric", kernel_name, "mixture model on", length(x$data), "data points.\nThe MCMC algorithm was run for", x$Nit, "iterations with", 100 * x$Pbi, "% discarded for burn-in."))
}
#' S3 method for class 'MixNRMI2'
#'
#' @param object A fitted object of class NRMI2
#' @param number_of_clusters Whether to compute the optimal number of clusters, which can be a time-consuming operation (see \code{\link{compute_optimal_clustering}})
#' @param ... Further arguments to be passed to generic function, ignored at the moment
#'
#' @return Prints out the text for the summary S3 methods
#' @export
#'
#' @examples
#' data(acidity)
#' out <- MixNRMI2(acidity, Nit = 20)
#' summary(out)
#'
#' data(salinity)
#' out <- MixNRMI2cens(salinity$left, salinity$right, Nit = 20)
#' summary(out)
summary.NRMI2 <- function(object, number_of_clusters = FALSE, ...) {
kernel_name <- tolower(give_kernel_name(object$distr.k))
kernel_comment <- paste("A nonparametric", kernel_name, "mixture model was used.")
NRMI_comment <- paste("Density estimation using a", comment_on_NRMI_type(object$NRMI_params))
summarytext(object, kernel_comment, NRMI_comment, number_of_clusters = number_of_clusters)
}
#' Extract the Conditional Predictive Ordinates (CPOs) from a fitted object
#'
#' @param object A fit obtained through from the function MixNRMI2/MixNRMI2cens
#' @param ... Further arguments to be passed to generic function, ignored at the moment
#'
#' @return A vector of Conditional Predictive Ordinates (CPOs)
#' @export
#'
#' @examples
#' data(acidity)
#' out <- MixNRMI2(acidity, Nit = 50)
#' cpo(out)
cpo.NRMI2 <- function(object, ...) {
return(object$cpo)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/MixNRMI2.R
|
#' Normalized Random Measures Mixture of Type II for censored data
#'
#' Bayesian nonparametric estimation based on normalized measures driven
#' mixtures for locations and scales.
#'
#' This generic function fits a normalized random measure (NRMI) mixture model
#' for density estimation (James et al. 2009). Specifically, the model assumes
#' a normalized generalized gamma (NGG) prior for both, locations (means) and
#' standard deviations, of the mixture kernel, leading to a fully nonparametric
#' mixture model.
#'
#' The details of the model are: \deqn{X_i|Y_i,Z_i \sim
#' k(\cdot|Y_i,Z_i)}{X_i|Y_i,Z_i ~ k(.|Y_i,Z_i)} \deqn{(Y_i,Z_i)|P \sim P,
#' i=1,\dots,n}{(Y_i,Z_i)|P ~ P, i=1,...,n} \deqn{P \sim
#' \textrm{NGG}(\texttt{Alpha, Kappa, Gama; P\_0})}{P ~ NGG(Alpha, Kappa, Gama;
#' P_0)} where, \eqn{X_i}'s are the observed data, \eqn{(Y_i,Z_i)}'s are
#' bivariate latent (location and scale) vectors, \code{k} is a parametric
#' kernel parameterized in terms of mean and standard deviation, \code{(Alpha,
#' Kappa, Gama; P_0)} are the parameters of the NGG prior with a bivariate
#' \code{P_0} being the centering measure with independent components, that is,
#' \eqn{P_0(Y,Z) = P_0(Y)*P_0(Z)}. The parameters of \code{P_0(Y)} are assigned
#' vague hyper prior distributions and \code{(mu.pz0,sigma.pz0)} are the
#' hyper-parameters of \code{P_0(Z)}. In particular, \code{NGG(Alpha, 1, 0;
#' P_0)} defines a Dirichlet process; \code{NGG(1, Kappa, 1/2;P_0)} defines a
#' Normalized inverse Gaussian process; and \code{NGG(1, 0, Gama; P_0)} defines
#' a normalized stable process. The evaluation grid ranges from \code{min(x) -
#' epsilon} to \code{max(x) + epsilon}. By default \code{epsilon=sd(x)/4}.
#'
#' @param xleft Numeric vector. Lower limit of interval censoring. For exact
#' data the same as xright
#' @param xright Numeric vector. Upper limit of interval censoring. For exact
#' data the same as xleft.
#' @param probs Numeric vector. Desired quantiles of the density estimates.
#' @param Alpha Numeric constant. Total mass of the centering measure. See
#' details.
#' @param Kappa Numeric positive constant. See details.
#' @param Gama Numeric constant. \eqn{0 \leq Gama \leq 1}{0 <= Gama <=1}. See
#' details.
#' @param distr.k The distribution name for the kernel. Allowed names are "normal", "gamma", "beta", "double exponential", "lognormal" or their common abbreviations "norm", "exp", or an integer number identifying the mixture kernel: 1 = Normal; 2 = Gamma; 3 = Beta; 4 = Double Exponential; 5 = Lognormal.
#' @param distr.py0 The distribution name for the centering measure for locations. Allowed names are "normal", "gamma", "beta", or their common abbreviations "norm", "exp", or an integer number identifying the centering measure for locations: 1 = Normal; 2 = Gamma; 3 = Beta.
#' @param distr.pz0 The distribution name for the centering measure for scales. Allowed names are "gamma", "lognormal", "half-Cauchy", "half-normal", "half-student", "uniform" and "truncated normal", or their common abbreviations "norm", "exp", "lnorm", "halfcauchy", "halfnorm", "halft" and "unif", or an integer number identifying the centering measure for scales: 2 = Gamma, 5 = Lognormal, 6 = Half Cauchy, 7 = Half Normal, 8 = Half Student-t, 9 = Uniform, 10 = Truncated Normal.
#' @param mu.pz0 Numeric constant. Prior mean of the centering measure for
#' scales.
#' @param sigma.pz0 Numeric constant. Prior standard deviation of the centering
#' measure for scales.
#' @param delta_S Numeric positive constant. Metropolis-Hastings proposal
#' variation coefficient for sampling the scales.
#' @param kappa Numeric positive constant. Metropolis-Hastings proposal
#' variation coefficient for sampling the location parameters.
#' @param delta_U Numeric positive constant. Metropolis-Hastings proposal
#' variation coefficient for sampling the latent U. If `adaptive=TRUE`, `delta_U`is the starting value for the adaptation.
#' @param Meps Numeric constant. Relative error of the jump sizes in the
#' continuous component of the process. Smaller values imply larger number of
#' jumps.
#' @param Nx Integer constant. Number of grid points for the evaluation of the
#' density estimate.
#' @param Nit Integer constant. Number of MCMC iterations.
#' @param Pbi Numeric constant. Burn-in period proportion of \code{Nit}.
#' @param epsilon Numeric constant. Extension to the evaluation grid range.
#' See details.
#' @param printtime Logical. If TRUE, prints out the execution time.
#' @param extras Logical. If TRUE, gives additional objects: means, sigmas,
#' weights and Js.
#' @param adaptive Logical. If TRUE, uses an adaptive MCMC strategy to sample the latent U (adaptive delta_U).
#'
#' @return The function returns a list with the following components:
#' \item{xx}{Numeric vector. Evaluation grid.}
#' \item{qx}{Numeric array. Matrix
#' of dimension \eqn{\texttt{Nx} \times (\texttt{length(probs)} + 1)}{Nx x
#' (length(probs)+1)} with the posterior mean and the desired quantiles input
#' in \code{probs}.}
#' \item{cpo}{Numeric vector of \code{length(x)} with
#' conditional predictive ordinates.}
#' \item{R}{Numeric vector of
#' \code{length(Nit*(1-Pbi))} with the number of mixtures components
#' (clusters).}
#' \item{U}{Numeric vector of \code{length(Nit*(1-Pbi))} with the
#' values of the latent variable U.}
#' \item{Allocs}{List of
#' \code{length(Nit*(1-Pbi))} with the clustering allocations.}
#' \item{means}{List of \code{length(Nit*(1-Pbi))} with the cluster means
#' (locations). Only if extras = TRUE.}
#' \item{sigmas}{Numeric vector of
#' \code{length(Nit*(1-Pbi))} with the cluster standard deviations. Only if
#' extras = TRUE.}
#' \item{weights}{List of \code{length(Nit*(1-Pbi))} with the
#' mixture weights. Only if extras = TRUE.}
#' \item{Js}{List of
#' \code{length(Nit*(1-Pbi))} with the unnormalized weights (jump sizes). Only
#' if extras = TRUE.}
#' \item{Nm}{Integer constant. Number of jumps of the
#' continuous component of the unnormalized process.}
#' \item{delta_Us}{List of
#' \code{length(Nit*(1-Pbi))} with the sequence of adapted delta_U used in the MH step for the latent variable U.}
#' \item{Nx}{Integer
#' constant. Number of grid points for the evaluation of the density estimate.}
#' \item{Nit}{Integer constant. Number of MCMC iterations.}
#' \item{Pbi}{Numeric
#' constant. Burn-in period proportion of \code{Nit}.}
#' \item{procTime}{Numeric
#' vector with execution time provided by \code{proc.time} function.}
#' \item{distr.k}{Integer corresponding to the kernel chosen for the mixture}
#' \item{data}{Data used for the fit}
#' \item{NRMI_params}{A named list with the parameters of the NRMI process}
#' @section Warning : The function is computing intensive. Be patient.
#' @author Barrios, E., Kon Kam King, G. and Nieto-Barajas, L.E.
#' @seealso \code{\link{MixNRMI2}}, \code{\link{MixNRMI1cens}},
#' \code{\link{MixNRMI2cens}}, \code{\link{multMixNRMI1}}
#' @references 1.- Barrios, E., Lijoi, A., Nieto-Barajas, L. E. and Prünster,
#' I. (2013). Modeling with Normalized Random Measure Mixture Models.
#' Statistical Science. Vol. 28, No. 3, 313-334.
#'
#' 2.- James, L.F., Lijoi, A. and Prünster, I. (2009). Posterior analysis for
#' normalized random measure with independent increments. Scand. J. Statist 36,
#' 76-97.
#'
#' 3.- Kon Kam King, G., Arbel, J. and Prünster, I. (2016). Species
#' Sensitivity Distribution revisited: a Bayesian nonparametric approach. In
#' preparation.
#' @keywords distribution models nonparametrics
#' @examples
#' \dontrun{
#' ### Example 1
#' # Data
#' data(acidity)
#' x <- acidity
#' # Fitting the model under default specifications
#' out <- MixNRMI2cens(x, x)
#' # Plotting density estimate + 95% credible interval
#' plot(out)
#' }
#'
#' \dontrun{
#' ### Example 2
#' # Data
#' data(salinity)
#' # Fitting the model under special specifications
#' out <- MixNRMI2cens(
#' xleft = salinity$left, xright = salinity$right, Nit = 5000, distr.pz0 = 10,
#' mu.pz0 = 1, sigma.pz0 = 2
#' )
#' # Plotting density estimate + 95% credible interval
#' attach(out)
#' plot(out)
#' # Plotting number of clusters
#' par(mfrow = c(2, 1))
#' plot(R, type = "l", main = "Trace of R")
#' hist(R, breaks = min(R - 0.5):max(R + 0.5), probability = TRUE)
#' detach()
#' }
#'
#' @export MixNRMI2cens
MixNRMI2cens <-
function(xleft, xright, probs = c(0.025, 0.5, 0.975), Alpha = 1,
Kappa = 0, Gama = 0.4, distr.k = "normal", distr.py0 = "normal", distr.pz0 = "gamma",
mu.pz0 = 3, sigma.pz0 = sqrt(10), delta_S = 4, kappa = 2, delta_U = 2,
Meps = 0.01, Nx = 150, Nit = 1500, Pbi = 0.1, epsilon = NULL,
printtime = TRUE, extras = TRUE, adaptive = FALSE) {
if (is.null(distr.k)) {
stop("Argument distr.k is NULL. Should be provided. See help for details.")
}
if (is.null(distr.py0)) {
stop("Argument distr.py0 is NULL. Should be provided. See help for details.")
}
distr.k <- process_dist_name(distr.k)
distr.py0 <- process_dist_name(distr.py0)
distr.pz0 <- process_dist_name(distr.pz0)
tInit <- proc.time()
cens_data_check(xleft, xright)
xpoint <- as.numeric(na.omit(0.5 * (xleft + xright)))
npoint <- length(xpoint)
censor_code <- censor_code_rl(xleft, xright)
censor_code_filters <- lapply(0:3, FUN = function(x) {
censor_code ==
x
})
names(censor_code_filters) <- 0:3
n <- length(xleft)
y <- seq(n)
xsort <- sort(xpoint)
y[seq(n / 2)] <- mean(xsort[seq(npoint / 2)])
y[-seq(n / 2)] <- mean(xsort[-seq(npoint / 2)])
z <- rep(1, n)
u <- 1
if (is.null(epsilon)) {
epsilon <- sd(xpoint) / 4
}
xx <- seq(min(xpoint) - epsilon, max(xpoint) + epsilon, length = Nx)
Fxx <- matrix(NA, nrow = Nx, ncol = Nit)
fx <- matrix(NA, nrow = n, ncol = Nit)
R <- seq(Nit)
U <- seq(Nit)
Nmt <- seq(Nit)
Allocs <- vector(mode = "list", length = Nit)
if (adaptive) {
optimal_delta <- rep(NA, n)
}
if (extras) {
means <- vector(mode = "list", length = Nit)
sigmas <- vector(mode = "list", length = Nit)
weights <- vector(mode = "list", length = Nit)
Js <- vector(mode = "list", length = Nit)
if (adaptive) {
delta_Us <- seq(Nit)
}
}
mu.py0 <- mean(xpoint)
sigma.py0 <- sd(xpoint)
for (j in seq(Nit)) {
if (floor(j / 500) == ceiling(j / 500)) {
cat("MCMC iteration", j, "of", Nit, "\n")
}
tt <- comp2(y, z)
ystar <- tt$ystar
zstar <- tt$zstar
nstar <- tt$nstar
rstar <- tt$rstar
idx <- tt$idx
Allocs[[max(1, j - 1)]] <- idx
if (Gama != 0) {
if (adaptive) {
tmp <- gs3_adaptive3(u, n = n, r = rstar, alpha = Alpha, kappa = Kappa, gama = Gama, delta = delta_U, U = U, iter = j, adapt = adaptive)
u <- tmp$u_prime
delta_U <- tmp$delta
}
else {
u <- gs3(u,
n = n, r = rstar, alpha = Alpha, kappa = Kappa,
gama = Gama, delta = delta_U
)
}
}
JiC <- MvInv(
eps = Meps, u = u, alpha = Alpha, kappa = Kappa,
gama = Gama, N = 50001
)
Nm <- length(JiC)
TauyC <- rk(Nm, distr = distr.py0, mu = mu.py0, sigma = sigma.py0)
TauzC <- rk(Nm, distr = distr.pz0, mu = mu.pz0, sigma = sigma.pz0)
if (distr.pz0 == 2) {
tt <- gsYZstarcens2(
ystar = ystar, zstar = zstar,
nstar = nstar, rstar = rstar, idx = idx, xleft = xleft,
xright = xright, censor_code = censor_code, delta = delta_S,
kappa = kappa, distr.k = distr.k, distr.py0 = distr.py0,
mu.py0 = mu.py0, sigma.py0 = sigma.py0, distr.pz0 = distr.pz0,
mu.pz0 = mu.pz0, sigma.pz0 = sigma.pz0
)
ystar <- tt$ystar
zstar <- tt$zstar
}
tt <- gsHP(ystar, rstar, distr.py0)
mu.py0 <- tt$mu.py0
sigma.py0 <- tt$sigma.py0
Jstar <- rgamma(rstar, nstar - Gama, Kappa + u)
Tauy <- c(TauyC, ystar)
Tauz <- c(TauzC, zstar)
J <- c(JiC, Jstar)
tt <- fcondYZXAcens2(
xleft = xleft, xright = xright,
censor_code_filters = censor_code_filters, distr = distr.k,
Tauy = Tauy, Tauz = Tauz, J = J
)
y <- tt[, 1]
z <- tt[, 2]
Fxx[, j] <- fcondXA2(xx,
distr = distr.k, Tauy, Tauz,
J
)
fx[, j] <- fcondXA2cens2(
xleft = xleft, xright = xright,
censor_code_filters = censor_code_filters, distr = distr.k,
Tauy, Tauz, J
)
R[j] <- rstar
U[j] <- u
Nmt[j] <- Nm
if (extras) {
means[[j]] <- Tauy
sigmas[[j]] <- Tauz
weights[[j]] <- J / sum(J)
Js[[j]] <- J
if (adaptive) {
delta_Us[j] <- delta_U
}
}
}
tt <- comp2(y, z)
Allocs[[Nit]] <- tt$idx
biseq <- seq(floor(Pbi * Nit))
Fxx <- Fxx[, -biseq]
qx <- as.data.frame(t(apply(Fxx, 1, quantile, probs = probs)))
names(qx) <- paste("q", probs, sep = "")
qx <- cbind(mean = apply(Fxx, 1, mean), qx)
R <- R[-biseq]
U <- U[-biseq]
Allocs <- Allocs[-biseq]
if (extras) {
means <- means[-biseq]
sigmas <- sigmas[-biseq]
weights <- weights[-biseq]
Js <- Js[-biseq]
if (adaptive) {
delta_Us <- delta_Us[-biseq]
}
}
cpo <- 1 / apply(1 / fx[, -biseq], 1, mean)
if (printtime) {
cat(" >>> Total processing time (sec.):\n")
print(procTime <- proc.time() - tInit)
}
res <- list(
xx = xx, qx = qx, cpo = cpo, R = R, U = U,
Allocs = Allocs, Nm = Nmt, Nx = Nx, Nit = Nit, Pbi = Pbi,
procTime = procTime, distr.k = distr.k, data = data.frame(left = xleft, right = xright),
NRMI_params = list("Alpha" = Alpha, "Kappa" = Kappa, "Gamma" = Gama)
)
if (extras) {
res$means <- means
res$sigmas <- sigmas
res$weights <- weights
res$Js <- Js
if (adaptive) {
res$delta_Us <- delta_Us
}
}
return(structure(res, class = "NRMI2"))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/MixNRMI2cens.R
|
#' Pitman-Yor process mixture of Type I
#'
#' This function calls the PYdensity function from package BNPmix, to allow fitting a Pitman-Yor process mixture to the data.
#'
#' @param x Numeric vector. Data set to which the density is fitted.
#' @param probs Numeric vector. Desired quantiles of the density estimates.
#' @param Alpha Numeric constant. Total mass of the centering measure. See
#' @param Gama Numeric constant. \eqn{0\leq \texttt{Gama} \leq 1}{0 <= Gama <=
#' 1}. See details.
#' @param asigma Numeric positive constant. Shape parameter of the gamma prior
#' on the standard deviation of the mixture kernel. Default value suggested by package BNPmix.
#' @param bsigma Numeric positive constant. Rate parameter of the gamma prior
#' on the standard deviation of the mixture kernel. Default value suggested by package BNPmix.
#' @param Nx Integer constant. Number of grid points for the evaluation of the
#' density estimate.
#' @param Nit Integer constant. Number of MCMC iterations.
#' @param Pbi Numeric constant. Burn-in period proportion of Nit.
#' @param epsilon Numeric constant. Extension to the evaluation grid range.
#' See details.
#' @param printtime Logical. If TRUE, prints out the execution time.
#' @param extras Logical. If TRUE, gives additional objects: means and weights
#' @return The function returns a MixPY1 object. It is based on a list with the following components:
#' \item{xx}{Numeric vector. Evaluation grid.}
#' \item{qx}{Numeric array. Matrix
#' of dimension \eqn{\texttt{Nx} \times (\texttt{length(probs)} + 1)}{Nx x
#' (length(probs)+1)} with the posterior mean and the desired quantiles input
#' in \code{probs}.}
#' \item{R}{Numeric vector of
#' \code{length(Nit*(1-Pbi))} with the number of mixtures components
#' (clusters).}
#' \item{S}{Numeric vector of \code{length(Nit*(1-Pbi))} with the
#' values of common standard deviation sigma.}
#' \item{Allocs}{List of \code{length(Nit*(1-Pbi))} with the clustering
#' allocations.}
#' \item{means}{List of \code{length(Nit*(1-Pbi))} with the
#' cluster means (locations). Only if extras = TRUE.}
#' \item{weights}{List of
#' \code{length(Nit*(1-Pbi))} with the mixture weights. Only if extras = TRUE.}
#' \item{Nit}{Integer constant. Number of MCMC iterations.}
#' \item{Pbi}{Numeric constant. Burn-in period proportion of \code{Nit}.}
#' \item{distr.k}{Integer corresponding to the kernel chosen for the mixture. Always 1, since the Pitman-Yor process is only written to work with Gaussian kernels.}
#' \item{data}{Data used for the fit}
#' \item{PY_params}{A named list with the parameters of the Pitman-Yor process}
#'
#' @export
#'
#' @examples
#' # Data
#' data(acidity)
#' x <- acidity
#' # Fitting the model under default specifications
#' out <- MixPY1(x)
#' # Plotting density estimate + 95% credible interval
#' plot(out)
MixPY1 <- function(x, probs = c(0.025, 0.5, 0.975), Alpha = 1, Gama = 0.4, asigma = 2, bsigma = 1 / var(x), Nx = 100, Nit = 1500, Pbi = 0.5, epsilon = NULL, printtime = TRUE, extras = TRUE) {
if (!requireNamespace("BNPmix", quietly = TRUE)) {
stop("Package \"BNPmix\" is needed for this function to work. Please install it.",
call. = FALSE
)
}
nburn <- floor(Pbi * Nit)
if (is.null(epsilon)) {
epsilon <- sd(x) / 4
}
xx <- seq(min(x) - epsilon, max(x) + epsilon, length = Nx)
restmp <- BNPmix::PYdensity(
y = x,
mcmc = list(niter = Nit, nburn = nburn, model = "L", print_message = printtime),
prior = list(strength = Alpha, discount = Gama, a0 = asigma, b0 = 1 / bsigma),
output = list(grid = xx, out_param = TRUE, out_type = "FULL")
)
clust <- restmp$clust + 1 # 1-based indexing
Allocs <- lapply(X = 1:nrow(restmp$clust), FUN = function(irow) clust[irow, ])
probs <- sort(probs)
qx <- as.data.frame(t(apply(X = restmp$density, MARGIN = 2, FUN = quantile, probs = probs)))
names(qx) <- paste("q", probs, sep = "")
qx <- cbind(mean = apply(restmp$density, 2, mean), qx)
res <- list(
xx = xx, qx = qx, R = unlist(lapply(X = Allocs, FUN = function(x) length(unique(x)))), S = sqrt(restmp$sigma2),
distr.k = 1, Allocs = Allocs, data = x, Nit = Nit, Pbi = Pbi,
PY_params = list("Alpha" = Alpha, "Gamma" = Gama)
)
if (extras) {
res$means <- lapply(restmp$mean, FUN = function(x) x[, 1])
res$weights <- lapply(restmp$probs, FUN = function(x) x[, 1])
}
return(structure(res, class = "PY1"))
}
#' Plot the density estimate and the 95\% credible interval
#'
#' @param x A fitted object of class PY1
#' @param ... Further arguments to be passed to generic function, ignored at the moment
#'
#' @return A graph with the density estimate, the 95\% credible interval and a
#' histogram of the data
#' @export
#'
#' @examples
#' data(acidity)
#' out <- MixPY1(acidity, Nit = 50)
#' plot(out)
plot.PY1 <- function(x, ...) {
plotfit_noncensored(x)
}
#' S3 method for class 'PY1'
#'
#' @param x A fitted object of class PY1
#' @param ... Further arguments to be passed to generic function, ignored at the moment
#'
#' @return A visualization of the important information about the object
#' @export
#'
#' @examples
#'
#' ## Example for non censored data
#'
#' data(acidity)
#' out <- MixPY1(acidity, Nit = 50)
#' print(out)
print.PY1 <- function(x, ...) {
kernel_name <- tolower(give_kernel_name(x$distr.k))
writeLines(paste("Fit of a semiparametric", kernel_name, "mixture model on", length(x$data), "data points.\nThe MCMC algorithm was run for", x$Nit, "iterations with", 100 * x$Pbi, "% discarded for burn-in."))
}
#' S3 method for class 'PY1'
#'
#' @param object A fitted object of class PY1
#' @param number_of_clusters Whether to compute the optimal number of clusters, which can be a time-consuming operation (see \code{\link{compute_optimal_clustering}})
#' @param ... Further arguments to be passed to generic function, ignored at the moment
#'
#' @return Prints out the text for the summary S3 methods
#' @export
#'
#' @examples
#'
#' ## Example for non censored data
#'
#' data(acidity)
#' out <- MixPY1(acidity, Nit = 50)
#' summary(out)
summary.PY1 <- function(object, number_of_clusters = FALSE, ...) {
kernel_name <- tolower(give_kernel_name(object$distr.k))
kernel_comment <- paste("A semiparametric", kernel_name, "mixture model was used.")
PY_comment <- paste("Density estimation using a Pitman-Yor process, \nwith total mass parameter Alpha =", object$PY_params$Alpha, "and discount parameter Gamma =", object$PY_params$Gamma)
summarytext(fit = object, kernel_comment = kernel_comment, BNP_process_comment = PY_comment, number_of_clusters = number_of_clusters)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/MixPY1.R
|
#' Pitman-Yor process mixture of Type II
#'
#' This function calls the PYdensity function from package BNPmix, to allow fitting a Pitman-Yor process mixture to the data.
#'
#' @param x Numeric vector. Data set to which the density is fitted.
#' @param probs Numeric vector. Desired quantiles of the density estimates.
#' @param Alpha Numeric constant. Total mass of the centering measure. See
#' @param Gama Numeric constant. \eqn{0\leq \texttt{Gama} \leq 1}{0 <= Gama <=
#' 1}. See details.
#' @param asigma Numeric positive constant. Shape parameter of the gamma prior
#' on the standard deviation of the mixture kernel. Default value suggested by package BNPmix.
#' @param bsigma Numeric positive constant. Rate parameter of the gamma prior
#' on the standard deviation of the mixture kernel. Default value suggested by package BNPmix.
#' @param Nx Integer constant. Number of grid points for the evaluation of the
#' density estimate.
#' @param Nit Integer constant. Number of MCMC iterations.
#' @param Pbi Numeric constant. Burn-in period proportion of Nit.
#' @param epsilon Numeric constant. Extension to the evaluation grid range.
#' See details.
#' @param printtime Logical. If TRUE, prints out the execution time.
#' @param extras Logical. If TRUE, gives additional objects: means and weights
#' @return The function returns a MixPY2 object. It is based on a list with the following components:
#' \item{xx}{Numeric vector. Evaluation grid.}
#' \item{qx}{Numeric array. Matrix
#' of dimension \eqn{\texttt{Nx} \times (\texttt{length(probs)} + 1)}{Nx x
#' (length(probs)+1)} with the posterior mean and the desired quantiles input
#' in \code{probs}.}
#' \item{R}{Numeric vector of
#' \code{length(Nit*(1-Pbi))} with the number of mixtures components
#' (clusters).}
#' \item{Allocs}{List of \code{length(Nit*(1-Pbi))} with the clustering
#' allocations.}
#' \item{means}{List of \code{length(Nit*(1-Pbi))} with the
#' cluster means (locations). Only if extras = TRUE.}
#' \item{sigmas}{List of \code{length(Nit*(1-Pbi))} with the
#' cluster standard deviations (scales). Only if extras = TRUE.}
#' \item{weights}{List of
#' \code{length(Nit*(1-Pbi))} with the mixture weights. Only if extras = TRUE.}
#' \item{Nit}{Integer constant. Number of MCMC iterations.}
#' \item{Pbi}{Numeric constant. Burn-in period proportion of \code{Nit}.}
#' \item{distr.k}{Integer corresponding to the kernel chosen for the mixture. Always 1, since the Pitman-Yor process is only written to work with Gaussian kernels.}
#' \item{data}{Data used for the fit}
#' \item{PY_params}{A named list with the parameters of the Pitman-Yor process}
#'
#'
#' @export
#'
#' @examples
#' # Data
#' data(acidity)
#' x <- acidity
#' # Fitting the model under default specifications
#' out <- MixPY2(x)
#' # Plotting density estimate + 95% credible interval
#' plot(out)
MixPY2 <- function(x, probs = c(0.025, 0.5, 0.975), Alpha = 1, Gama = 0.4, asigma = 2, bsigma = 1 / var(x), Nx = 100, Nit = 1500, Pbi = 0.5, epsilon = NULL, printtime = TRUE, extras = TRUE) {
if (!requireNamespace("BNPmix", quietly = TRUE)) {
stop("Package \"BNPmix\" is needed for this function to work. Please install it.",
call. = FALSE
)
}
nburn <- floor(Pbi * Nit)
# Niter <- Nit - nburn
if (is.null(epsilon)) {
epsilon <- sd(x) / 4
}
xx <- seq(min(x) - epsilon, max(x) + epsilon, length = Nx)
restmp <- BNPmix::PYdensity(
y = x,
mcmc = list(niter = Nit, nburn = nburn, model = "LS", print_message = printtime),
prior = list(strength = Alpha, discount = Gama, a0 = asigma, b0 = 1 / bsigma),
output = list(grid = xx, out_param = TRUE, out_type = "FULL")
)
clust <- restmp$clust + 1 # 1-based indexing
Allocs <- lapply(X = 1:nrow(restmp$clust), FUN = function(irow) clust[irow, ])
probs <- sort(probs)
qx <- as.data.frame(t(apply(X = restmp$density, MARGIN = 2, FUN = quantile, probs = probs)))
names(qx) <- paste("q", probs, sep = "")
qx <- cbind(mean = apply(restmp$density, 2, mean), qx)
res <- list(
xx = xx, qx = qx, R = unlist(lapply(X = Allocs, FUN = function(x) length(unique(x)))),
distr.k = 1, Allocs = Allocs, data = x, Nit = Nit, Pbi = Pbi,
PY_params = list("Alpha" = Alpha, "Gamma" = Gama)
)
if (extras) {
res$means <- lapply(restmp$mean, FUN = function(x) x[, 1])
res$weights <- lapply(restmp$probs, FUN = function(x) x[, 1])
res$sigmas <- lapply(restmp$sigma2, FUN = function(x) sqrt(x[, 1]))
}
return(structure(res, class = "PY2"))
}
#' Plot the density estimate and the 95\% credible interval
#'
#' @param x A fitted object of class PY2
#' @param ... Further arguments to be passed to generic function, ignored at the moment
#'
#' @return A graph with the density estimate, the 95\% credible interval and a
#' histogram of the data
#' @export
#'
#' @examples
#' data(acidity)
#' out <- MixPY2(acidity, Nit = 50)
#' plot(out)
plot.PY2 <- function(x, ...) {
plotfit_noncensored(x)
}
#' S3 method for class 'PY2'
#'
#' @param x A fitted object of class PY2
#' @param ... Further arguments to be passed to generic function, ignored at the moment
#'
#' @return A visualization of the important information about the object
#' @export
#'
#' @examples
#'
#' ## Example for non censored data
#'
#' data(acidity)
#' out <- MixPY2(acidity, Nit = 50)
#' print(out)
print.PY2 <- function(x, ...) {
kernel_name <- tolower(give_kernel_name(x$distr.k))
writeLines(paste("Fit of a nonparametric", kernel_name, "mixture model on", length(x$data), "data points.\nThe MCMC algorithm was run for", x$Nit, "iterations with", 100 * x$Pbi, "% discarded for burn-in."))
}
#' S3 method for class 'PY2'
#'
#' @param object A fitted object of class PY2
#' @param number_of_clusters Whether to compute the optimal number of clusters, which can be a time-consuming operation (see \code{\link{compute_optimal_clustering}})
#' @param ... Further arguments to be passed to generic function, ignored at the moment
#'
#' @return Prints out the text for the summary S3 methods
#' @export
#'
#' @examples
#'
#' ## Example for non censored data
#'
#' data(acidity)
#' out <- MixPY2(acidity, Nit = 50)
#' summary(out)
summary.PY2 <- function(object, number_of_clusters = FALSE, ...) {
kernel_name <- tolower(give_kernel_name(object$distr.k))
kernel_comment <- paste("A nonparametric", kernel_name, "mixture model was used.")
PY_comment <- paste("Density estimation using a Pitman-Yor process, \nwith total mass parameter Alpha =", object$PY_params$Alpha, "and discount parameter Gamma =", object$PY_params$Gamma)
summarytext(fit = object, kernel_comment = kernel_comment, BNP_process_comment = PY_comment, number_of_clusters = number_of_clusters)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/MixPY2.R
|
#' Continuous Jump heights function
#'
#' This function evaluates the M(v) function that determines the jump heights
#' in the "continuous" part of an increasing additive process.
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(u = 0.5, alpha = 1, beta = 1, gama = 1 / 2, low = 1e-04,
#' upp = 10, N = 5001) {
#' x <- -log(seq(from = exp(-low), to = exp(-upp), length = N))
#' f <- alpha / gamma(1 - gama) * x^(-(1 + gama)) * exp(-(u +
#' beta) * x)
#' dx <- diff(x)
#' h <- (f[-1] + f[-N]) / 2
#' Mv <- rep(0, N)
#' for (i in seq(N - 1, 1)) Mv[i] <- Mv[i + 1] + dx[i] * h[i]
#' return(list(v = x, Mv = Mv))
#' }
Mv <-
function(u, alpha, beta, gama, low, upp, N) {
x <- -log(seq(from = exp(-low), to = exp(-upp), length = N))
f <- alpha / gamma(1 - gama) * x^(-(1 + gama)) * exp(-(u +
beta) * x)
dx <- diff(x)
h <- (f[-1] + f[-N]) / 2
Mv <- rep(0, N)
for (i in seq(N - 1, 1)) Mv[i] <- Mv[i + 1] + dx[i] * h[i]
return(list(v = x, Mv = Mv))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/Mv.R
|
#' Invert jump heights function
#'
#' Determines the jump heights of an increasing additive process by inverting
#' the M(v) function. Use a truncation level based on expected moments of the NGG process (\code{\link{thresholdGG}}).
#' For internal use.
#'
#' @param eps Dummy argument kept for consistency with past versions of the functions
#' @param u Real number. The value of the latent variable at the current step.
#' @param alpha Numeric constant. Total mass of the centering measure.
#' @param kappa Numeric positive constant.
#' @param gama Numeric constant. Discount parameter of the NRMI process.
#' @param N Number of steps in the discretization scheme for the grid inversion.
#'
#' ## The function has been optimised but it is morally defined as:
#' function(eps, u = 0.5, alpha = 1, kappa = 1, gama = 1 / 2, N = 3001) {
#' n <- length(w)
#' v <- rep(NA, n)
#' x <- -log(seq(from = exp(-1e-05), to = exp(-10), length = N))
#' f <- alpha / gamma(1 - gama) * x^(-(1 + gama)) * exp(-(u +
#' kappa) * x)
#' dx <- diff(x)
#' h <- (f[-1] + f[-N]) / 2
#' Mv <- rep(0, N)
#' for (i in seq(N - 1, 1)) Mv[i] <- Mv[i + 1] + dx[i] * h[i]
#' for (j in seq(n)) v[j] <- x[which.min(Mv > w[j])]
#' return(v)
#' }
MvInv <-
function(eps, u = 0.5, alpha = 1, kappa = 1, gama = 1 / 2, N = 3001) # eps no longer required
{
x <- -log(seq(from = exp(-1e-05), to = exp(-10), length = N))
f <- alpha / gamma(1 - gama) * x^(-(1 + gama)) * exp(-(u +
kappa) * x)
dx <- diff(x)
h <- (f[-1] + f[-N]) / 2
Mv <- c(rev(cumsum(rev(dx[-N] * h[-N]))), 0)
M <- ceiling(thresholdGG(
alpha = alpha,
kappa = kappa + u,
gama = gama
)) # upper bound defined via the grid
M <- max(10, M) # We wish to make sure we at least use a few jumps
W <- rexp(n = M)
W <- cumsum(W)
# x_which_min = function(w){ # I guess that this function could be defined outside of MvInV
# x[which.min(Mv > w)]
# }
# x_which_min = Vectorize(x_which_min, vectorize.args = "w")
# v <- x_which_min(W)
if (M < 25) {
## This version is faster because it has no loop, but it involves many passes over Mv which has 3001 elements
return(fill_v1(M, Mv, W, x))
}
else {
return(fill_v2(M, Mv, W, N, x)) # Faster for large values of N
}
}
fill_v1 <- function(M, Mv, W, x) {
v <- rep(NA, M)
for (j in seq(M)) v[j] <- x[which.min(Mv > W[j])]
return(v)
}
fill_v2 <- function(M, Mv, W, N, x) {
v <- rep(NA, M)
iMv <- N
for (i in seq(M)) {
while (iMv > 0 && Mv[iMv] < W[i]) {
iMv <- iMv - 1
# print(paste(iMv, Mv[iMv], W[i]))
}
v[i] <- x[iMv + 1] # This index shift is to keep consistency with previous version of the function, not necessary.
}
# for (j in seq(M)) v[j] <- x[which.min(Mv > W[j])]
return(v)
}
fill_v3 <- function(M, Mv, W, N, x) {
v <- rep(NA, M)
i <- 1
for (iMv in N:1) {
if (i > M) break()
if (Mv[iMv] > W[i]) {
v[i] <- x[iMv + 1] # This index shift is to keep consistency with previous version of the function, not necessary.
i <- i + 1
}
}
if (i < M) v[i:M] <- x[1]
# for (j in seq(M)) v[j] <- x[which.min(Mv > W[j])]
return(v)
}
# fill_v4 = function(M, Mv, W, N, x){
# v <- rep(NA, M)
# iMv = N
# for (i in seq(M)){
# while (Mv[iMv] < W[i]){
# iMv = iMv-1
# }
# print(paste("now index", i))
# print(which.min(Mv > W[i]))
# print(iMv+1)
# v[i] = x[iMv+1]
# }
# # for (j in seq(M)) v[j] <- x[which.min(Mv > W[j])]
# return(v)
# }
#
# library(microbenchmark)
#
# microbenchmark(fill_v1(M, Mv, W, x), fill_v2(M, Mv, W, N, x), fill_v3(M, Mv, W, N, x))
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/MvInv.R
|
MvInv_old <-
function(eps, u = 0.5, alpha = 1, beta = 1, gama = 1 / 2, N = 3001) {
x <- -log(seq(from = exp(-1e-05), to = exp(-10), length = N))
f <- alpha / gamma(1 - gama) * x^(-(1 + gama)) * exp(-(u +
beta) * x)
dx <- diff(x)
h <- (f[-1] + f[-N]) / 2
Mv <- c(rev(cumsum(rev(dx[-N] * h[-N]))), 0)
err <- 1
w <- 0
v <- NULL
while (err > eps) {
w <- w + rgamma(1, 1, 1)
v <- c(v, x[which.min(Mv > w)])
err <- min(v) / sum(v)
}
return(v)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/MvInv_old.R
|
#' Censoring data check
#'
#' Checks that a censored dataset is valid. This performs two checks: check that the dataset does not contain only NA, and check that the for interval censored data, the bounds are in the right order.
#'
#' For internal use
#'
#' @param xleft left bounds for the censored dataset. Can be a real number or NA
#' @param xright right bounds for the censored dataset
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(xleft, xright) {
#' if (any(xright < xleft, na.rm = T)) {
#' stop("in censored data, left bound not always smaller than right bound")
#' }
#' if (any(mapply(FUN = function(xileft, xiright) {
#' is.na(xileft) & is.na(xiright)
#' }, xleft, xright))) {
#' stop("in censored data, there is an NA NA")
#' }
#' }
cens_data_check <-
function(xleft, xright) {
if (any(xright < xleft, na.rm = T)) {
stop("in censored data, left bound not always smaller than right bound")
}
if (any(mapply(FUN = function(xileft, xiright) {
is.na(xileft) & is.na(xiright)
}, xleft, xright))) {
stop("in censored data, there is an NA NA")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/cens_data_check.R
|
#' Censor code right-left
#'
#' Creates censoring code 0:3.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(left, right) {
#' test_ <- function(k) {
#' if (is.na(left[[k]]) & is.na(right[[k]])) {
#' NA
#' } else if (is.na(left[[k]])) {
#' 2
#' } else if (is.na(right[[k]])) {
#' 0
#' } else if (left[[k]] == right[[k]]) {
#' 1
#' } else {
#' 3
#' }
#' }
#' sapply(seq_along(left), FUN = test_)
#' }
censor_code_rl <-
function(left, right) {
test_ <- function(k) {
if (is.na(left[[k]]) & is.na(right[[k]])) {
NA
} else if (is.na(left[[k]])) {
2
} else if (is.na(right[[k]])) {
0
} else if (left[[k]] == right[[k]]) {
1
} else {
3
}
}
sapply(seq_along(left), FUN = test_)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/censor_code_rl.R
|
#' Comment on the NRMI process depending on the value of the parameters
#'
#' @param NRMI_param A named list of the form list("Alpha" = 1, "Kappa" = 0, "Gamma" = 0.4)
#'
#' @return A string containing a comment on the NRMI process
#'
#' @examples
#' BNPdensity:::comment_on_NRMI_type(list("Alpha" = 1, "Kappa" = 0, "Gamma" = 0.4))
#' BNPdensity:::comment_on_NRMI_type(list("Alpha" = 1, "Kappa" = 0.1, "Gamma" = 0.4))
#' BNPdensity:::comment_on_NRMI_type(list("Alpha" = 1, "Kappa" = 0.1, "Gamma" = 0.5))
comment_on_NRMI_type <- function(NRMI_param = list("Alpha" = 1, "Kappa" = 0, "Gamma" = 0.4)) {
if (NRMI_param$Gamma == 0) {
return(paste("Dirichlet process,\nwith concentration parameter Alpha =", NRMI_param$Alpha))
}
else {
if (NRMI_param$Alpha == 1) {
if (NRMI_param$Kappa == 0) {
return(paste("Normalized stable process,\nwith stability parameter Gamma =", NRMI_param$Gamma))
}
else if (NRMI_param$Gamma == 0.5) {
return(paste("Normalized inverse Gaussian process,\nwith parameter Kappa =", NRMI_param$Kappa))
}
else {
return(paste("Normalized generalised gamma process,\nwith parameter Alpha =", NRMI_param$Alpha, "Kappa =", NRMI_param$Kappa, "Gamma =", NRMI_param$Gamma))
}
}
else {
return(paste("Normalized generalised gamma process,\nwith parameter Alpha =", NRMI_param$Alpha, "Kappa =", NRMI_param$Kappa, "Gamma =", NRMI_param$Gamma))
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/comment_on_NRMI_type.R
|
#' Ties function: univariate
#'
#' This function computes the distinct observations and their frequencies in a
#' numeric vector.
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(y) {
#' n <- length(y)
#' mat <- outer(y, y, "==")
#' jstar <- led <- rep(FALSE, n)
#' for (j in seq(n)) {
#' if (!led[j]) {
#' jstar[j] <- TRUE
#' if (j == n) {
#' break
#' }
#' ji <- seq(j + 1, n)
#' tt <- mat[ji, j] %in% TRUE
#' led[ji] <- led[ji] | tt
#' }
#' if (all(led[-seq(j)])) {
#' break
#' }
#' }
#' ystar <- y[jstar]
#' nstar <- apply(mat[, jstar], 2, sum)
#' r <- length(nstar)
#' idx <- match(y, ystar)
#' return(list(ystar = ystar, nstar = nstar, r = r, idx = idx))
#' }
comp1 <-
function(y) {
n <- length(y)
mat <- outer(y, y, "==")
jstar <- led <- rep(FALSE, n)
for (j in seq(n)) {
if (!led[j]) {
jstar[j] <- TRUE
if (j == n) {
break
}
ji <- seq(j + 1, n)
tt <- mat[ji, j] %in% TRUE
led[ji] <- led[ji] | tt
}
if (all(led[-seq(j)])) {
break
}
}
ystar <- y[jstar]
nstar <- apply(as.matrix(mat[, jstar]), 2, sum)
r <- length(nstar)
idx <- match(y, ystar)
return(list(ystar = ystar, nstar = nstar, r = r, idx = idx))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/comp1.R
|
#' Ties function: bivariate
#'
#' This function computes the distinct observations (couples) and their
#' frequencies in a bivariate numeric vector.
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(y, z) {
#' if (length(y) != length(z)) {
#' stop("Vectors y and z should have equal length!")
#' }
#' n <- length(y)
#' matY <- outer(y, y, "==")
#' matZ <- outer(z, z, "==")
#' mat <- matY & matZ
#' jstar <- led <- rep(FALSE, n)
#' for (j in seq(n)) {
#' if (!led[j]) {
#' jstar[j] <- TRUE
#' if (j == n) {
#' break
#' }
#' ji <- seq(j + 1, n)
#' tt <- mat[ji, j] %in% TRUE
#' led[ji] <- led[ji] | tt
#' }
#' if (all(led[-seq(j)])) {
#' break
#' }
#' }
#' ystar <- y[jstar]
#' zstar <- z[jstar]
#' nstar <- apply(mat[, jstar], 2, sum)
#' rstar <- length(nstar)
#' idx <- match(y, ystar)
#' return(list(
#' ystar = ystar, zstar = zstar, nstar = nstar,
#' rstar = rstar, idx = idx
#' ))
#' }
comp2 <-
function(y, z) {
if (length(y) != length(z)) {
stop("Vectors y and z should have equal length!")
}
n <- length(y)
matY <- outer(y, y, "==")
matZ <- outer(z, z, "==")
mat <- matY & matZ
jstar <- led <- rep(FALSE, n)
for (j in seq(n)) {
if (!led[j]) {
jstar[j] <- TRUE
if (j == n) {
break
}
ji <- seq(j + 1, n)
tt <- mat[ji, j] %in% TRUE
led[ji] <- led[ji] | tt
}
if (all(led[-seq(j)])) {
break
}
}
ystar <- y[jstar]
zstar <- z[jstar]
nstar <- apply(as.matrix(mat[, jstar]), 2, sum)
rstar <- length(nstar)
idx <- match(y, ystar)
return(list(
ystar = ystar, zstar = zstar, nstar = nstar,
rstar = rstar, idx = idx
))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/comp2.R
|
Compute_log_likelihood_given_params <- function(fit_, it_retained, parallel, ncores) {
if (is_censored(fit_$data)) {
censor_code <- censor_code_rl(fit_$data$left, fit_$data$right)
censor_code_filters <- lapply(0:3, FUN = function(x) censor_code == x)
names(censor_code_filters) <- 0:3
dpred <- function(iter) {
log(dmixcens(
xlefts = fit_$data$left,
xrights = fit_$data$right,
c_code_filters = censor_code_filters,
locations = fit_$means[[iter]],
scales = fit_$sigmas[[iter]],
weights = fit_$weights[[iter]],
distr.k = fit_$distr.k
))
}
}
else {
dpred <- function(iter) {
log(dmix(fit_$data,
locations = fit_$means[[iter]],
scales = fit_$sigmas[[iter]],
weights = fit_$weights[[iter]],
distr.k = fit_$distr.k
))
}
}
unlist(parallel::mclapply(
X = it_retained,
FUN = function(it) sum(dpred(it)),
mc.cores = ifelse(test = parallel, yes = ncores, no = 1)
))
}
Convert_to_matrix_list <- function(fitlist, thinning_to = 1000, parallel = TRUE, ncores = parallel::detectCores()) {
# number of iterations * number of parameters
if (Sys.info()[["sysname"]] == "Windows") parallel <- FALSE
if (is_semiparametric(fitlist[[1]])) {
fitlist <- lapply(fitlist, function(fit) {
fit$sigmas <- fill_sigmas(fit)
fit
})
}
Nit <- length(fitlist[[1]]$means)
it_retained <- compute_thinning_grid(Nit, thinning_to = thinning_to)
if (is_semiparametric(fitlist[[1]])) {
lapply(X = fitlist, function(fit_i) {
cbind(
ncomp = fit_i$R[it_retained],
Sigma = fit_i$S[it_retained],
Latent_variable = fit_i$U[it_retained],
log_likelihood = Compute_log_likelihood_given_params(fit_i, it_retained, parallel, ncores)
)
})
}
else {
lapply(X = fitlist, function(fit_i) {
cbind(
ncomp = fit_i$R[it_retained],
Latent_variable = fit_i$U[it_retained],
log_likelihood = Compute_log_likelihood_given_params(fit_i, it_retained, parallel, ncores)
)
})
}
}
#' Convert the output of multMixNRMI into a coda mcmc object
#'
#' @param fitlist Output of multMixNRMI.
#' @param thinning_to Final length of the chain after thinning.
#' @param ncores Specify the number of cores to use in the conversion
#' @return a coda::mcmc object
convert_to_mcmc <- function(fitlist, thinning_to = 1000, ncores = parallel::detectCores()) {
coda::as.mcmc.list(coda::as.mcmc(lapply(Convert_to_matrix_list(fitlist, thinning_to = thinning_to, ncores = ncores), coda::mcmc)))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/convert_fit_to_MCMC_chain.R
|
#' Conditional predictive ordinate function
#'
#' This function computes conditional predictive ordinates for each data point.
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(obj) {
#' fx <- obj$fx
#' cpo <- 1 / apply(1 / fx, 1, mean)
#' return(cpo)
#' }
cpo <-
function(obj) {
fx <- obj$fx
cpo <- 1 / apply(1 / fx, 1, mean)
return(cpo)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/cpo.R
|
#' Density half Cauchy
#'
#' Computes the density.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(x, location = 0, scale = 1) {
#' ifelse(x < 0, 0, 1) * dcauchy(x, location, scale) / (1 - pcauchy(
#' 0,
#' location, scale
#' ))
#' }
dhalfcauchy <-
function(x, location = 0, scale = 1) {
ifelse(x < 0, 0, 1) * dcauchy(x, location, scale) / (1 - pcauchy(
0,
location, scale
))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/dhalfcauchy.R
|
#' Density half normal
#'
#' Computes the density.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(x, mean = 0, sd = 1) {
#' ifelse(x < 0, 0, 1) * dnorm(x, mean, sd) / (1 - pnorm(
#' 0, mean,
#' sd
#' ))
#' }
dhalfnorm <-
function(x, mean = 0, sd = 1) {
ifelse(x < 0, 0, 1) * dnorm(x, mean, sd) / (1 - pnorm(
0, mean,
sd
))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/dhalfnorm.R
|
#' Density half Student-t
#'
#' Computes the density.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(x, df = 1, mean = 0, sd = 1) {
#' ifelse(x < 0, 0, 1) * dt_(x, df, mean, sd) / (1 - pt_(
#' 0, df,
#' mean, sd
#' ))
#' }
dhalft <-
function(x, df = 1, mean = 0, sd = 1) {
ifelse(x < 0, 0, 1) * dt_(x, df, mean, sd) / (1 - pt_(
0, df,
mean, sd
))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/dhalft.R
|
dist_name_k_index_dict <- setNames(object = 1:10, nm = c("normal", "gamma", "beta", "exponential", "lognormal", "half-Cauchy", "half-normal", "half-student", "uniform", "truncated normal"))
dist_name_k_index_dict["norm"] <- 1
dist_name_k_index_dict["exp"] <- 4
dist_name_k_index_dict["double exponential"] <- 4
dist_name_k_index_dict["lnorm"] <- 5
dist_name_k_index_dict["halfcauchy"] <- 6
dist_name_k_index_dict["halfnorm"] <- 7
dist_name_k_index_dict["halft"] <- 8
dist_name_k_index_dict["unif"] <- 9
#' Convert distribution names to indices
#'
#' @param distname a character representing the distribution name. Allowed names are "normal", "gamma", "beta", "exponential", "double exponential", "lognormal", "half-Cauchy", "half-normal", "half-student", "uniform" and "truncated normal", or their common abbreviations "norm", "exp", "lnorm", "halfcauchy", "halfnorm", "halft" and "unif".
#'
#' @return an index describing the distribution. 1 = Normal; 2
#' = Gamma; 3 = Beta; 4 = Double Exponential; 5 = Lognormal, 6 = Half-Cauchy, 7 = Half-normal, 8 = Half-Student, 9 = Uniform, 10 = Truncated normal
#'
dist_name_k_index_converter <- function(distname) {
if (length(distname) > 1) stop("Please provide a single distribution name.")
if (!is.character(distname)) {
stop("distname is not a character, please provide a proper distribution name as a character.")
} else {
if (!(distname %in% names(dist_name_k_index_dict))) {
stop("The distribution name was not recognised as one of the available distributions.")
} else {
return(dist_name_k_index_dict[distname])
}
}
}
#' Process the distribution name argument into a distribution index
#'
#' This function is intended to help with compatibility with the previous versions of the package.
#'
#' @param distname Can be an integer or a distribution name. Allowed names are "normal", "gamma", "beta", "exponential", "lognormal", "half-Cauchy", "half-normal", "half-student", "uniform" and "truncated normal", or their common abbreviations "norm", "exp", "halfcauchy", "halfnorm", "halft" and "unif".
#'
#' @return an integer both if distname is an integer or a character
process_dist_name <- function(distname) {
if (distname %in% 1:10) {
return(distname)
} else {
return(dist_name_k_index_converter(distname = distname))
}
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/dist_name_k_converter.R
|
#' Kernel density function
#'
#' This functions evaluates a density at a certain data point. There are 4
#' density options (1 (normal), 2 (gamma), 3 (beta), 4 (exponential), 5 (lognormal), 6 (half-Cauchy), 7 (half-normal), 8 (half-student), 9 (uniform) and 10 (truncated normal)). All densities are parameterized in terms of mean and standard
#' deviation.
#'
#' For internal use.
#'
#' @keywords internal
#'
dk <-
function(x, distr = NULL, mu = NULL, sigma = NULL) {
msg <- "Argument \"distr\" should be defined numeric with possible values 1 (normal), 2 (gamma), 3 (beta), 4 (exponential), 5 (lognormal), 6 (half-Cauchy), 7 (half-normal), 8 (half-student), 9 (uniform) and 10 (truncated normal)"
if (is.null(distr)) {
stop(msg)
}
else if (distr == 1) {
a <- ifelse(is.null(mu), 0, mu)
b <- ifelse(is.null(sigma), 1, sigma)
dk <- dnorm(x, mean = a, sd = b)
}
else if (distr == 2) {
a <- ifelse(is.null(mu), 1, mu^2 / sigma^2)
b <- ifelse(is.null(sigma), 1, mu / sigma^2)
dk <- dgamma(x, shape = a, rate = b)
}
else if (distr == 3) {
a <- ifelse(is.null(mu), 0.5, (1 - mu) * (mu / sigma)^2 -
mu)
b <- ifelse(is.null(sigma), 1 / sqrt(12), (mu * (1 - mu) / sigma^2 -
1) * (1 - mu))
if (any(c(a, b) <= 0)) {
stop(paste(
"\nNegative Beta parameters:\n a =", a,
";\t b =", b
))
}
dk <- dbeta(x, shape1 = a, shape2 = b)
}
else if (distr == 4) {
a <- ifelse(is.null(mu), 0, mu)
b <- ifelse(is.null(sigma), 1 / sqrt(2), sigma / sqrt(2))
dk <- exp(-abs(x - a) / b) / (2 * b)
}
else if (distr == 5) {
a <- ifelse(is.null(mu), exp(1 / 2), log(mu / sqrt(1 + (sigma / mu)^2)))
b <- ifelse(is.null(sigma), exp(1) * (exp(1) - 1), sqrt(log(1 +
(sigma / mu)^2)))
dk <- dlnorm(x, meanlog = a, sdlog = b)
}
else if (distr == 6) {
dk <- dhalfcauchy(x, location = ifelse(is.null(mu), 0,
mu
), scale = ifelse(is.null(sigma), 1, sigma))
}
else if (distr == 7) {
dk <- dhalfnorm(x,
mean = ifelse(is.null(mu), 0, mu),
sd = ifelse(is.null(sigma), 1, sigma)
)
}
else if (distr == 8) {
dk <- dhalft(x, df = 10, mean = ifelse(is.null(mu), 0,
mu
), sd = ifelse(is.null(sigma), 1, sigma))
}
else if (distr == 9) {
dk <- dunif(x, min = ifelse(is.null(mu), 0, mu), max = ifelse(is.null(sigma),
1, sigma
))
}
else if (distr == 10) {
dk <- dtnorm(x, mean = ifelse(is.null(mu), 0, mu), sd = ifelse(is.null(sigma),
1, sigma
), lower = 0.1)
}
else {
stop(msg)
}
return(dk)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/dk.R
|
#' Density of the chosen kernel
#'
#' Computes likelihood contribution for censored data.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(xleft, xright, c_code_filters, distr = NULL, mu = NULL,
#' sigma = NULL) {
#' res <- seq_along(xleft)
#' res[c_code_filters[["1"]]] <- dk(
#' x = xleft[c_code_filters[["1"]]],
#' distr, mu, sigma
#' )
#' res[c_code_filters[["2"]]] <- pk(
#' xright[c_code_filters[["2"]]],
#' distr, mu, sigma
#' )
#' res[c_code_filters[["0"]]] <- 1 - pk(
#' xleft[c_code_filters[["0"]]],
#' distr, mu, sigma
#' )
#' res[c_code_filters[["3"]]] <- pk(
#' xright[c_code_filters[["3"]]],
#' distr, mu, sigma
#' ) - pk(
#' xleft[c_code_filters[["3"]]],
#' distr, mu, sigma
#' )
#' return(res)
#' }
dkcens2 <-
function(xleft, xright, c_code_filters, distr = NULL, mu = NULL,
sigma = NULL) {
res <- seq_along(xleft)
res[c_code_filters[["1"]]] <- dk(
x = xleft[c_code_filters[["1"]]],
distr, mu, sigma
)
res[c_code_filters[["2"]]] <- pk(
xright[c_code_filters[["2"]]],
distr, mu, sigma
)
res[c_code_filters[["0"]]] <- 1 - pk(
xleft[c_code_filters[["0"]]],
distr, mu, sigma
)
res[c_code_filters[["3"]]] <- pk(
xright[c_code_filters[["3"]]],
distr, mu, sigma
) - pk(
xleft[c_code_filters[["3"]]],
distr, mu, sigma
)
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/dkcens2.R
|
#' Density evaluation once
#'
#' Computes the likelihood contribution for one data point in the case of
#' censoring.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(xleft, xright, c_code, distr = NULL, mu = NULL, sigma = NULL) {
#' if (c_code == 1) {
#' dk(x = xleft, distr, mu, sigma)
#' } else if (c_code == 2) {
#' pk(xright, distr, mu, sigma)
#' } else if (c_code == 0) {
#' 1 - pk(xleft, distr, mu, sigma)
#' } else if (c_code == 3) {
#' pk(xright, distr, mu, sigma) - pk(xleft, distr, mu, sigma)
#' } else {
#' stop("Wrong integer code for censored data")
#' }
#' }
dkcens2_1val <-
function(xleft, xright, c_code, distr = NULL, mu = NULL, sigma = NULL) {
if (c_code == 1) {
dk(x = xleft, distr, mu, sigma)
} else if (c_code == 2) {
pk(xright, distr, mu, sigma)
} else if (c_code == 0) {
1 - pk(xleft, distr, mu, sigma)
} else if (c_code == 3) {
pk(xright, distr, mu, sigma) - pk(xleft, distr, mu, sigma)
} else {
stop("Wrong integer code for censored data")
}
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/dkcens2_1val.R
|
#' Non-standard student-t density
#'
#' Computes the density.
#'
#' For internal use
#'
#' @param x Numeric vector. Data set to which the density is evaluated.
#' @param df Numeric constant. Degrees of freedom (> 0, maybe non-integer)
#' @param mean Numeric constant. Location parameter.
#' @param sd Positive numeric constant. Scale parameter.
#'
#' ## The function is currently defined as
#' function(x, df, mean, sd) {
#' dt((x - mean) / sd, df, ncp = 0) / sd
#' }
dt_ <-
function(x, df, mean, sd) {
dt((x - mean) / sd, df, ncp = 0) / sd
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/dt_.R
|
#' Density truncated normal
#'
#' Computes the density.
#'
#' For internal use
#'
#' @note Taken from \code{msm} R-package.
#' @author C. H. Jackson
#' @references Taken from
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(x, mean = 0, sd = 1, lower = -Inf, upper = Inf, log = FALSE) {
#' ret <- numeric(length(x))
#' ret[x < lower | x > upper] <- if (log) {
#' -Inf
#' } else {
#' 0
#' }
#' ret[upper < lower] <- NaN
#' ind <- x >= lower & x <= upper
#' if (any(ind)) {
#' denom <- pnorm(upper, mean, sd) - pnorm(
#' lower, mean,
#' sd
#' )
#' xtmp <- dnorm(x, mean, sd, log)
#' if (log) {
#' xtmp <- xtmp - log(denom)
#' } else {
#' xtmp <- xtmp / denom
#' }
#' ret[x >= lower & x <= upper] <- xtmp[ind]
#' }
#' ret
#' }
dtnorm <-
function(x, mean = 0, sd = 1, lower = -Inf, upper = Inf, log = FALSE) {
ret <- numeric(length(x))
ret[x < lower | x > upper] <- if (log) {
-Inf
} else {
0
}
ret[upper < lower] <- NaN
ind <- x >= lower & x <= upper
if (any(ind)) {
denom <- pnorm(upper, mean, sd) - pnorm(
lower, mean,
sd
)
xtmp <- dnorm(x, mean, sd, log)
if (log) {
xtmp <- xtmp - log(denom)
} else {
xtmp <- xtmp / denom
}
ret[x >= lower & x <= upper] <- xtmp[ind]
}
ret
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/dtnorm.R
|
log_Vnk_PY <- function(n, k, Alpha, Gama) {
if (k == 1) {
lognum <- 0
}
else {
lognum <- sum(log(Alpha + Gama * 1:(k - 1)))
}
return(lognum - sum(log(Alpha + 1 + 0:(n - 2))))
}
# log_Vnk_PY(7, 6, 0.5, 0.01)
# log_Vnk_PY(6, 5, 0.5, 0.001)
# Pochhammer = function(x, n){
# x_mpfr = as.bigq(x)
# gamma(x_mpfr+n)/gamma(x_mpfr)
# }
Cnk <- function(n, k, Gama) {
factor_k <- gmp::factorialZ(k)
# Using more precise sums (package PreciseSums did not afford any improvement)
# This function still risks underflow/overflow in spite of the arbitrary precision packages
sum((-1)^(1:k) * gmp::chooseZ(n = k, k = 1:k) * Rmpfr::pochMpfr(-(1:k) * Gama, n) / factor_k)
# (-1)^(n - k) * noncentral_generalized_factorial_coefficient(n = n, k = k, s = Gama, r = 0) #This tends to hang for the moment, need to find a solution to memoisation
}
log_Cnk <- function(n, k, Gama) {
log(Cnk(n, k, Gama))
}
# log_Cnk(100, 64, 0.4)
#
#
# log_Cnk(6, 5, 0.5)
#
# library(Rmpfr)
# library(gmp)
Pkn_PY <- function(k, n, Alpha, Gama, silence = TRUE) {
if (!silence) print(k)
# exp(log_Vnk_PY(n = n, k = k, Alpha = Alpha, Gama = Gama) - k * log(Gama) + log_Cnk(n = n, k = k, Gama = Gama))
# Using this form, the inaccuracies in Cnk (i.e. getting a negative number) do not give NaN.
# This error might be cancelled when computing the expected number of components.
exp(log_Vnk_PY(n = n, k = k, Alpha = Alpha, Gama = Gama) - k * log(Gama)) * Cnk(n = n, k = k, Gama = Gama)
}
# Pkn_PY(3, 5, 0.2, 0.4)
Pkn_Dirichlet <- function(k, n, Alpha) {
exp(k * log(Alpha) + log(abs(gmp::Stirling1(n, k))) - sum(log(Alpha + 0:(n - 1))))
}
# Pkn_Dirichlet(3, 5, 0.2)
expected_number_of_components_PY <- function(n, Alpha, Gama, ntrunc = NULL, silence = TRUE) {
if (is.null(ntrunc)) {
ntrunc <- n
} else if (ntrunc > n) ntrunc <- n
res <- 0
for (k in 1:ntrunc) {
if (!silence) print(k)
# print(k*Pkn_PY(k, n, Alpha, Gama))
res <- res + k * Pkn_PY(k, n, Alpha, Gama)
}
return(res)
}
#' Computes the expected number of components for a Dirichlet process.
#'
#'
#' @param n Number of data points
#' @param Alpha Numeric constant. Total mass of the centering measure.
#' @param ntrunc Level of truncation when computing the expectation. Defaults
#' to n. If greater than n, it is fixed to n.
#' @param silence Boolean. Whether to print the current calculation step for the Stable process, as the function can be long
#' @return A real value which approximates the expected number of components
#'
#' Reference: P. De Blasi, S. Favaro, A. Lijoi, R. H. Mena, I. Prünster, and M.
#' Ruggiero, “Are Gibbs-type priors the most natural generalization of the
#' Dirichlet process?,” IEEE Trans. Pattern Anal. Mach. Intell., vol. 37, no.
#' 2, pp. 212–229, 2015.
#' @examples
#'
#' expected_number_of_components_Dirichlet(100, 1.2)
#' @export expected_number_of_components_Dirichlet
expected_number_of_components_Dirichlet <- function(n, Alpha, ntrunc = NULL, silence = TRUE) {
if (is.null(ntrunc)) {
ntrunc <- n
} else if (ntrunc > n) ntrunc <- n
res <- 0
for (k in 1:ntrunc) {
if (!silence) print(k)
# print(k*Pkn_PY(k, n, Alpha, Gama))
res <- res + k * Pkn_Dirichlet(k, n, Alpha)
}
return(res)
}
# expected_number_of_components_PY(10, 0., 0.4)
#' Computes the expected number of components for a stable process.
#'
#'
#' @param n Number of data points
#' @param Gama Numeric constant. 0 <= Gama <=1.
#' @param ntrunc Level of truncation when computing the expectation. Defaults
#' to n. If greater than n, it is fixed to n.
#' @return A real value of type mpfr1 which approximates the expected number of
#' components
#'
#' In spite of the high precision arithmetic packages used for in function, it
#' can be numerically unstable for small values of Gama. This is because
#' evaluating a sum with alternated signs, in the generalized factorial
#' coefficients, is tricky. Reference: P. De Blasi, S. Favaro, A. Lijoi, R. H.
#' Mena, I. Prünster, and M. Ruggiero, “Are gibbs-type priors the most natural
#' generalization of the Dirichlet process?,” IEEE Trans. Pattern Anal. Mach.
#' Intell., vol. 37, no. 2, pp. 212–229, 2015.
#' @examples
#'
#' expected_number_of_components_stable(100, 0.8)
#' @export expected_number_of_components_stable
expected_number_of_components_stable <- function(n, Gama, ntrunc = NULL) {
if (!requireNamespace("Rmpfr", quietly = TRUE) && !requireNamespace("gmp", quietly = TRUE)) {
stop("Packages Rmpfr and gmp are needed for this function to work. Please install them.",
call. = FALSE
)
}
expected_number_of_components_PY(n, 0, Gama, ntrunc = ntrunc)
}
#' This plots the prior distribution on the number of components for the stable
#' process. The Dirichlet process is provided for comparison.
#'
#'
#' @param n Number of data points
#' @param Gama Numeric constant. 0 <= Gama <=1.
#' @param Alpha Numeric constant. Total mass of the centering measure for the
#' Dirichlet process.
#' @param grid Integer vector. Level of truncation when computing the expectation. Defaults to
#' n. If greater than n, it is fixed to n.
#' @param silence Boolean. Whether to print the current calculation step for the Stable process, as the function can be long
#'
#' @return A plot with the prior distribution on the number of components.
#' @examples
#'
#' plot_prior_number_of_components(50, 0.4)
#' @export plot_prior_number_of_components
plot_prior_number_of_components <- function(n, Gama, Alpha = 1, grid = NULL, silence = TRUE) {
if (!requireNamespace("Rmpfr", quietly = TRUE) && !requireNamespace("gmp", quietly = TRUE)) {
stop("Packages Rmpfr and gmp are needed for this function to work. Please install them.",
call. = FALSE
)
}
if (is.null(grid)) grid <- 1:n
grid <- unique(round(grid)) # Make sure it is a grid of integers
writeLines("Computing the prior probability on the number of clusters for the Dirichlet process")
Pk_Dirichlet <- data.frame(K = grid, Pk = Vectorize(Pkn_Dirichlet, vectorize.args = "k")(grid, n, Alpha), Process = "Dirichlet")
writeLines("Computing the prior probability on the number of clusters for the Stable process")
Pk_Stable <- data.frame(K = grid, Pk = unlist(lapply(Vectorize(Pkn_PY, vectorize.args = "k")(grid, n, 0, Gama, silence), asNumeric_no_warning)), Process = "Stable")
Pk_Stable$Pk <- convert_nan_to_0(Pk_Stable$Pk) # Correct when the numbers are 0 up to machine precision.
to_plot <- rbind(
Pk_Dirichlet,
Pk_Stable
)
ggplot(data = to_plot, aes_string(x = "K", y = "Pk", colour = "factor(Process)", group = "Process")) +
geom_point() +
geom_line() +
theme_classic() +
viridis::scale_colour_viridis(discrete = T, name = "Process") +
ylab(expression(P[K]))
}
#' If the function Rmpfr::asNumeric returns a warning about inefficiency, silence it.
#'
#' The function Rmpfr::asNumeric prints the following warning: In asMethod(object) : coercing "mpfr1" via "mpfr" (inefficient). It is not clear how to avoid it nor how to silence it, hence this function.
#' A cleaner solution may be available at: https://stackoverflow.com/questions/4948361/how-do-i-save-warnings-and-errors-as-output-from-a-function/4952908#4952908
#'
#' @param x An object of class Rmpfr::mpfr1
#'
#' @return a "numeric" number
asNumeric_no_warning <- function(x) {
tryCatch(
{
Rmpfr::asNumeric(x)
},
warning = function(w) {
if (grepl(pattern = "inefficient", x = as.character(w))) {
suppressWarnings(Rmpfr::asNumeric(x))
}
else {
w
}
},
error = function(e) {
print(paste("error:", e))
}
)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/expected_number_of_clusters_stable_process.R
|
#' Conditional density evaluation in the semiparametric model
#'
#' This function evaluates a density path conditionally on a posterior
#' realization of the normalized measure.
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(x, distr = 1, Tau, J, sigma) {
#' pJ <- J / sum(J)
#' K <- matrix(NA, nrow = length(Tau), ncol = length(x))
#' for (i in seq(Tau)) {
#' K[i, ] <- dk(x, distr = distr, mu = Tau[i], sigma = sigma)
#' }
#' fcondXA <- apply(K, 2, function(x) sum(x * pJ))
#' return(fcondXA)
#' }
fcondXA <-
function(x, distr, Tau, J, sigma) {
pJ <- J / sum(J)
K <- matrix(NA, nrow = length(Tau), ncol = length(x))
for (i in seq(Tau)) {
K[i, ] <- dk(x, distr = distr, mu = Tau[i], sigma = sigma)
}
fcondXA <- apply(K, 2, function(x) sum(x * pJ))
return(fcondXA)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/fcondXA.R
|
#' Conditional density evaluation in the fully nonparametric model
#'
#' This function evaluates a density path conditionally on a posterior
#' realization of the normalized measure.
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(x, distr = 1, Tauy, Tauz, J) {
#' pJ <- J / sum(J)
#' K <- matrix(NA, nrow = length(Tauy), ncol = length(x))
#' for (i in seq(Tauy)) {
#' K[i, ] <- dk(x, distr = distr, mu = Tauy[i], sigma = Tauz[i])
#' }
#' fcondXA2 <- apply(K, 2, function(x) sum(x * pJ))
#' return(fcondXA2)
#' }
fcondXA2 <-
function(x, distr, Tauy, Tauz, J) {
pJ <- J / sum(J)
K <- matrix(NA, nrow = length(Tauy), ncol = length(x))
for (i in seq(Tauy)) {
K[i, ] <- dk(x, distr = distr, mu = Tauy[i], sigma = Tauz[i])
}
fcondXA2 <- apply(K, 2, function(x) sum(x * pJ))
return(fcondXA2)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/fcondXA2.R
|
#' Conditional density evaluation in the fully nonparametric model for censored
#' data
#'
#' This function evaluates a density path conditionally on a posterior
#' realization of the normalized measure.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(xleft, xright, censor_code_filters, distr, Tauy, Tauz,
#' J) {
#' pJ <- J / sum(J)
#' K <- matrix(NA, nrow = length(Tauy), ncol = length(xleft))
#' for (i in seq(Tauy)) {
#' K[i, ] <- dkcens2(
#' xleft = xleft, xright = xright, c_code_filters = censor_code_filters,
#' distr = distr, mu = Tauy[i], sigma = Tauz[i]
#' )
#' }
#' fcondXA2cens <- apply(K, 2, function(x) sum(x * pJ))
#' return(fcondXA2cens)
#' }
fcondXA2cens2 <-
function(xleft, xright, censor_code_filters, distr, Tauy, Tauz,
J) {
pJ <- J / sum(J)
K <- matrix(NA, nrow = length(Tauy), ncol = length(xleft))
for (i in seq(Tauy)) {
K[i, ] <- dkcens2(
xleft = xleft, xright = xright, c_code_filters = censor_code_filters,
distr = distr, mu = Tauy[i], sigma = Tauz[i]
)
}
fcondXA2cens <- apply(K, 2, function(x) sum(x * pJ))
return(fcondXA2cens)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/fcondXA2cens2.R
|
#' Conditional posterior distribution of the latents Y
#'
#' This function simulates form the conditional posterior distribution of the
#' latents Y.
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(x, distr = 1, Tau, J, sigma) {
#' K <- matrix(NA, nrow = length(Tau), ncol = length(x))
#' for (i in seq(Tau)) {
#' K[i, ] <- dk(x, distr = distr, mu = Tau[i], sigma = sigma) *
#' J[i]
#' }
#' pK <- prop.table(K, margin = 2)
#' y <- apply(pK, 2, function(x) sample(Tau, size = 1, prob = x))
#' return(y)
#' }
fcondYXA <-
function(x, distr, Tau, J, sigma) {
K <- matrix(NA, nrow = length(Tau), ncol = length(x))
for (i in seq(Tau)) {
K[i, ] <- dk(x, distr = distr, mu = Tau[i], sigma = sigma) *
J[i]
}
pK <- prop.table(K, margin = 2)
y <- apply(pK, 2, function(x) sample(Tau, size = 1, prob = x))
return(y)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/fcondYXA.R
|
#' Conditional posterior distribution of the latents Y in the censoring case
#'
#' This function simulates form the conditional posterior distribution of the
#' latents Y.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(xleft, xright, censor_code_filters, distr, Tau, J,
#' sigma) {
#' K <- matrix(NA, nrow = length(Tau), ncol = length(xleft))
#' for (i in seq(Tau)) {
#' K[i, ] <- dkcens2(
#' xleft = xleft, xright = xright, c_code_filters = censor_code_filters,
#' distr = distr, mu = Tau[i], sigma = sigma
#' ) * J[i]
#' }
#' pK <- prop.table(K, margin = 2)
#' y <- apply(pK, 2, function(x) sample(Tau, size = 1, prob = x))
#' return(y)
#' }
fcondYXAcens2 <-
function(xleft, xright, censor_code_filters, distr, Tau, J,
sigma) {
K <- matrix(NA, nrow = length(Tau), ncol = length(xleft))
for (i in seq(Tau)) {
K[i, ] <- dkcens2(
xleft = xleft, xright = xright, c_code_filters = censor_code_filters,
distr = distr, mu = Tau[i], sigma = sigma
) * J[i]
}
pK <- prop.table(K, margin = 2)
y <- apply(pK, 2, function(x) sample(Tau, size = 1, prob = x))
return(y)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/fcondYXAcens2.R
|
#' Conditional posterior distribution of the bivariate latents (Y,Z)
#'
#' This function simulates form the conditional posterior distribution of the
#' latents (Y,Z).
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(x, distr = 1, Tauy, Tauz, J) {
#' K <- matrix(NA, nrow = length(Tauy), ncol = length(x))
#' for (i in seq(Tauy)) {
#' K[i, ] <- dk(x, distr = distr, mu = Tauy[i], sigma = Tauz[i]) *
#' J[i]
#' }
#' if (any(is.na(K))) {
#' print(K, Tauy, Tauz, J)
#' }
#' pK <- prop.table(K, margin = 2)
#' j <- apply(pK, 2, function(x) {
#' sample(length(Tauy),
#' size = 1,
#' prob = x
#' )
#' })
#' return(matrix(c(y = Tauy[j], z = Tauz[j]),
#' nrow = length(x),
#' ncol = 2
#' ))
#' }
fcondYZXA <-
function(x, distr, Tauy, Tauz, J) {
K <- matrix(NA, nrow = length(Tauy), ncol = length(x))
for (i in seq(Tauy)) {
K[i, ] <- dk(x, distr = distr, mu = Tauy[i], sigma = Tauz[i]) *
J[i]
}
if (any(is.na(K))) {
print(K, Tauy, Tauz, J)
}
pK <- prop.table(K, margin = 2)
j <- apply(pK, 2, function(x) {
sample(length(Tauy),
size = 1,
prob = x
)
})
return(matrix(c(y = Tauy[j], z = Tauz[j]),
nrow = length(x),
ncol = 2
))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/fcondYZXA.R
|
#' Conditional posterior distribution of the bivariate latents (Y,Z) in the
#' case of censoring
#'
#' This function simulates form the conditional posterior distribution of the
#' latents (Y,Z).
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(xleft, xright, censor_code_filters, distr, Tauy, Tauz,
#' J) {
#' K <- matrix(NA, nrow = length(Tauy), ncol = length(xleft))
#' for (i in seq(Tauy)) {
#' K[i, ] <- dkcens2(xleft, xright,
#' c_code_filters = censor_code_filters,
#' distr = distr, mu = Tauy[i], sigma = Tauz[i]
#' ) * J[i]
#' }
#' if (any(is.na(K))) {
#' print(K, Tauy, Tauz, J)
#' }
#' pK <- prop.table(K, margin = 2)
#' j <- apply(pK, 2, function(x) {
#' sample(length(Tauy),
#' size = 1,
#' prob = x
#' )
#' })
#' return(matrix(c(y = Tauy[j], z = Tauz[j]),
#' nrow = length(xleft),
#' ncol = 2
#' ))
#' }
fcondYZXAcens2 <-
function(xleft, xright, censor_code_filters, distr, Tauy, Tauz,
J) {
K <- matrix(NA, nrow = length(Tauy), ncol = length(xleft))
for (i in seq(Tauy)) {
K[i, ] <- dkcens2(xleft, xright,
c_code_filters = censor_code_filters,
distr = distr, mu = Tauy[i], sigma = Tauz[i]
) * J[i]
}
if (any(is.na(K))) {
print(K, Tauy, Tauz, J)
}
pK <- prop.table(K, margin = 2)
j <- apply(pK, 2, function(x) {
sample(length(Tauy),
size = 1,
prob = x
)
})
return(matrix(c(y = Tauy[j], z = Tauz[j]),
nrow = length(xleft),
ncol = 2
))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/fcondYZXAcens2.R
|
kern_idx <- setNames(object = c("Normal", "Gamma", "Beta", "Double Exponential", "Lognormal"), nm = as.character(seq_along(c("Normal", "Gamma", "Beta", "Double Exponential", "Lognormal"))))
#' Gives the kernel name from the integer code
#'
#' This function is used in the print methods for MixNRMI1, MixNRMI2, MixNRMI1cens, MixNRMI2cens, and all the multMixNRMIx versions
#'
#' @inheritParams MixNRMI1
#'
#' @return A character with the name of the distribution used as the kernel
#'
#' @examples
#' BNPdensity:::give_kernel_name(4)
give_kernel_name <- function(distr.k) {
kern_idx[as.character(distr.k)]
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/give_kernel_name.R
|
#' Conditional posterior distribution of latent U
#'
#' This function simulates from the conditional posterior distribution of the
#' latent U.
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(ut, n = 200, r = 20, alpha = 1, kappa = 1, gama = 1 / 2,
#' delta = 2) {
#' w <- ut
#' ratio <- NaN
#' while (is.nan(ratio)) {
#' v <- ustar <- rgamma(1, shape = delta, rate = delta / ut)
#' vw <- v / w
#' vb <- v + kappa
#' wb <- w + kappa
#' A <- vw^(n - 2 * delta)
#' B <- (vb / wb)^(r * gama - n)
#' D <- vb^gama - wb^gama
#' E <- 1 / vw - vw
#' ratio <- A * B * exp(-alpha / gama * D - delta * E)
#' }
#' p <- min(1, ratio)
#' u <- ifelse(runif(1) <= p, ustar, ut)
#' return(u)
#' }
gs3 <-
function(ut, n, r, alpha, kappa, gama, delta) {
w <- ut
ratio <- NaN
while (is.nan(ratio)) {
v <- ustar <- rgamma(1, shape = delta, rate = delta / ut)
vw <- v / w
vb <- v + kappa
wb <- w + kappa
A <- vw^(n - 2 * delta)
B <- (vb / wb)^(r * gama - n)
D <- vb^gama - wb^gama
E <- 1 / vw - vw
ratio <- A * B * exp(-alpha / gama * D - delta * E)
}
p <- min(1, ratio)
u <- ifelse(runif(1) <= p, ustar, ut)
return(u)
}
#' Target logdensity of U given the data
#'
#' @keywords internal
#'
logf_u_cond_y <- function(u, n, r, gamma, kappa, a) {
(n - 1) * log(u) + (r * gamma - n) * log(u + kappa) - a / gamma * (u + kappa)^gamma
}
#' Contribution of the target logdensity of logU to the Metropolis-Hastings ratio
#'
#' @keywords internal
#'
logf_logu_cond_y <- function(logu, n, r, gamma, kappa, a) {
logu + logf_u_cond_y(u = exp(logu), n = n, r = r, gamma = gamma, kappa = kappa, a = a)
}
#' Contribution of the proposal kernel logdensity to the Metropolis-Hastings ratio
#'
#' @keywords internal
#'
logdprop_logu <- function(logu_prime, logu, delta) {
dnorm(x = logu_prime, mean = logu, sd = delta, log = T)
}
#' Proposal distribution for logU
#'
#' This function makes a proposal for a new value of logU
#'
#' @inheritParams logacceptance_ratio_logu
#' @keywords internal
#'
rprop_logu <- function(logu, delta) {
rnorm(n = 1, mean = logu, sd = delta)
}
#' Metropolis-Hastings ratio for the conditional of logU
#'
#' This function computes the Metropolis-Hastings ratio to decide whether to accept or reject a new value for logU.
#'
#' @param logu Real, log of the latent variable U at the current iteration.
#' @param logu_prime Real, log of the new proposed latent variable U.
#' @param a Positive real. Total mass of the centering measure.
#' @inheritParams gs3_log
#'
#' @keywords internal
#'
logacceptance_ratio_logu <- function(logu, logu_prime, n, r, gamma, kappa, a, delta) {
log_ratio <- logf_logu_cond_y(logu_prime, n, r, gamma, kappa, a) - logf_logu_cond_y(logu, n, r, gamma, kappa, a) + logdprop_logu(logu, logu_prime, delta) - logdprop_logu(logu_prime, logu, delta)
return(min(0, log_ratio))
}
#' Conditional posterior distribution of latent logU
#'
#' This function simulates from the conditional posterior distribution of a log transformation of the
#' latent U.
#'
#' @param logut Real, log of the latent variable U at the current iteration.
#' @param n Integer, number of data points.
#' @param r Integer, number of clusters.
#' @param alpha Positive real. Total mass of the centering measure.
#' @param kappa Positive real. A parameter of the NRMI process.
#' @param gama Real. \eqn{0\leq \texttt{gama} \leq 1}{0 <= gama <=
#' 1}. See details.
#'
#' @param delta Scale of the Metropolis-Hastings proposal distribution
#'
#' @keywords internal
#'
gs3_log <-
function(logut, n, r, alpha, kappa, gama, delta) {
logu_prime <- rprop_logu(logu = logut, delta = delta)
logq1 <- logacceptance_ratio_logu(logu = logut, logu_prime = logu_prime, n = n, r = r, gamma = gama, kappa = kappa, a = alpha, delta = delta)
if (log(runif(n = 1)) < logq1) {
return(logu_prime)
}
else {
return(logut)
}
}
#' Conditional posterior distribution of latent U
#'
#' This function simulates from the conditional posterior distribution of the
#' latent U, with an adaptive proposal
#'
#' @keywords internal
#'
gs3_adaptive3 <- function(ut, n, r, alpha, kappa, gama, delta, U, iter, adapt = FALSE) {
target_acc_rate <- 0.44
batch_size <- 100
if (adapt && (iter %% batch_size == 0)) {
acc_rate <- length(unique(U[(iter - batch_size + 1):iter])) / batch_size
logincrement <- 2 * min(0.25, 1 / sqrt(iter))
# increment = min(0.5, 5 / sqrt(iter))
if (acc_rate < 0.44) {
delta_i <- delta * exp(-logincrement)
}
else {
delta_i <- delta * exp(+logincrement)
}
}
else {
delta_i <- delta
}
logu_prime <- gs3_log(logut = log(ut), n = n, r = r, alpha = alpha, kappa = kappa, gama = gama, delta = delta_i)
return(list(u_prime = exp(logu_prime), delta = delta_i))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/gs3.R
|
#' Resampling Ystar function
#'
#' This function resamples the distinct Ystar in the semiparametric model.
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(ystar, x, idx, distr.k, sigma.k, distr.p0, mu.p0, sigma.p0) {
#' r <- length(ystar)
#' nstar <- as.numeric(table(idx))
#' for (j in seq(r)) {
#' id <- which(!is.na(match(idx, j)))
#' xj <- x[id]
#' xbar <- sum(xj) / nstar[j]
#' y2star <- rk(1, distr = distr.k, mu = xbar, sigma = sigma.k / sqrt(nstar[j]))
#' f.ratio <- rfyzstar(y2star, ystar[j], xj,
#' distr = distr.k, sigma = sigma.k,
#' distr.p0 = distr.p0, mu.p0 = mu.p0, sigma.p0 = sigma.p0
#' )
#' k.ratio <- dk(ystar[j],
#' distr = distr.k,
#' mu = xbar, sigma = sigma.k / sqrt(nstar[j])
#' ) / dk(y2star,
#' distr = distr.k, mu = xbar, sigma = sigma.k / sqrt(nstar[j])
#' )
#' q2 <- min(1, f.ratio * k.ratio)
#' ystar[j] <- ifelse(runif(1) <= q2, y2star, ystar[j])
#' }
#' return(ystar)
#' }
gs4 <-
function(ystar, x, idx, distr.k, sigma.k, distr.p0, mu.p0, sigma.p0) {
r <- length(ystar)
nstar <- as.numeric(table(idx))
for (j in seq(r)) {
id <- which(!is.na(match(idx, j)))
xj <- x[id]
xbar <- sum(xj) / nstar[j]
y2star <- rk(1, distr = distr.k, mu = xbar, sigma = sigma.k / sqrt(nstar[j]))
f.ratio <- rfystar(y2star, ystar[j], xj,
distr.k = distr.k,
sigma.k = sigma.k, distr.p0 = distr.p0, mu.p0 = mu.p0,
sigma.p0 = sigma.p0
)
k.ratio <- dk(ystar[j], distr = distr.k, mu = xbar, sigma = sigma.k / sqrt(nstar[j])) / dk(y2star,
distr = distr.k, mu = xbar, sigma = sigma.k / sqrt(nstar[j])
)
q2 <- min(1, f.ratio * k.ratio)
ystar[j] <- ifelse(runif(1) <= q2, y2star, ystar[j])
}
return(ystar)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/gs4.R
|
#' Resampling Ystar function in the case of censoring
#'
#' This function resamples the distinct Ystar in the semiparametric model.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(ystar, xleft, xright, censor_code, idx, distr.k, sigma.k,
#' distr.p0, mu.p0, sigma.p0) {
#' r <- length(ystar)
#' nstar <- as.numeric(table(idx))
#' for (j in seq(r)) {
#' id <- which(!is.na(match(idx, j)))
#' xjleft <- xleft[id]
#' xjright <- xright[id]
#' xbar <- 0.5 * sum(xjleft + xjright, na.rm = T) / nstar[j]
#' y2star <- rk(1, distr = distr.k, mu = xbar, sigma = sigma.k / sqrt(nstar[j]))
#' f.ratio <- rfystarcens2(
#' v = y2star, v2 = ystar[j], xleft = xjleft,
#' xright = xjright, censor_code = censor_code[id],
#' distr.k = distr.k, sigma.k = sigma.k, distr.p0 = distr.p0,
#' mu.p0 = mu.p0, sigma.p0 = sigma.p0
#' )
#' k.ratio <- dk(ystar[j], distr = distr.k, mu = xbar, sigma = sigma.k / sqrt(nstar[j])) /
#' dk(y2star,
#' distr = distr.k, mu = xbar, sigma = sigma.k / sqrt(nstar[j])
#' )
#' if (!is.nan(f.ratio * k.ratio)) {
#' q2 <- min(1, f.ratio * k.ratio)
#' ystar[j] <- ifelse(runif(1) <= q2, y2star, ystar[j])
#' }
#' }
#' return(ystar)
#' }
gs4cens2 <-
function(ystar, xleft, xright, censor_code, idx, distr.k, sigma.k,
distr.p0, mu.p0, sigma.p0) {
r <- length(ystar)
nstar <- as.numeric(table(idx))
for (j in seq(r)) {
id <- which(!is.na(match(idx, j)))
xjleft <- xleft[id]
xjright <- xright[id]
xbar <- 0.5 * sum(xjleft + xjright, na.rm = T) / nstar[j]
y2star <- rk(1, distr = distr.k, mu = xbar, sigma = sigma.k / sqrt(nstar[j]))
f.ratio <- rfystarcens2(
v = y2star, v2 = ystar[j], xleft = xjleft,
xright = xjright, censor_code = censor_code[id],
distr.k = distr.k, sigma.k = sigma.k, distr.p0 = distr.p0,
mu.p0 = mu.p0, sigma.p0 = sigma.p0
)
k.ratio <- dk(ystar[j], distr = distr.k, mu = xbar, sigma = sigma.k / sqrt(nstar[j])) / dk(y2star,
distr = distr.k, mu = xbar, sigma = sigma.k / sqrt(nstar[j])
)
if (!is.nan(f.ratio * k.ratio)) {
q2 <- min(1, f.ratio * k.ratio)
ystar[j] <- ifelse(runif(1) <= q2, y2star, ystar[j])
}
}
return(ystar)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/gs4cens2.R
|
#' Conditional posterior distribution of sigma
#'
#' This function simulates from the conditional posterior distribution of
#' sigma.
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(sigma, x, y, distr = 1, asigma = 1, bsigma = 2, delta = 4) {
#' sigmaStar <- rgamma(1, shape = delta, rate = delta / sigma)
#' sigmaT <- sigma
#' qgammas <- sigmaT / sigmaStar
#' Qgammas <- sigmaStar / sigmaT
#' Term2 <- qgammas^(2 * delta - 1) * exp(-delta * (qgammas -
#' Qgammas))
#' Kgamma <- Qgammas^(asigma - 1) * exp(-bsigma * (sigmaStar -
#' sigmaT))
#' Prod <- 1
#' for (i in seq(length(x))) {
#' Prod <- Prod * (dk(x[i], distr = distr, mu = y[i], sigma = sigmaStar) / dk(x[i],
#' distr = distr, mu = y[i], sigma = sigmaT
#' ))
#' }
#' q3 <- min(1, Kgamma * Prod * Term2)
#' sigma <- ifelse(runif(1) <= q3, sigmaStar, sigmaT)
#' return(sigma)
#' }
gs5 <-
function(sigma, x, y, distr, asigma, bsigma, delta) {
sigmaStar <- rgamma(1, shape = delta, rate = delta / sigma)
sigmaT <- sigma
qgammas <- sigmaT / sigmaStar
Qgammas <- sigmaStar / sigmaT
Term2 <- qgammas^(2 * delta - 1) * exp(-delta * (qgammas -
Qgammas))
Kgamma <- Qgammas^(asigma - 1) * exp(-bsigma * (sigmaStar -
sigmaT))
Prod <- 1
for (i in seq(length(x))) {
Prod <- Prod * (dk(x[i], distr = distr, mu = y[i], sigma = sigmaStar) / dk(x[i],
distr = distr, mu = y[i], sigma = sigmaT
))
}
q3 <- min(1, Kgamma * Prod * Term2)
sigma <- ifelse(runif(1) <= q3, sigmaStar, sigmaT)
return(sigma)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/gs5.R
|
#' Conditional posterior distribution of sigma in the case of censoring
#'
#' This function simulates from the conditional posterior distribution of
#' sigma.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(sigma, xleft, xright, censor_code, y, distr = 1, asigma = 1,
#' bsigma = 2, delta = 4) {
#' sigmaStar <- rgamma(1, shape = delta, rate = delta / sigma)
#' sigmaT <- sigma
#' qgammas <- sigmaT / sigmaStar
#' Qgammas <- sigmaStar / sigmaT
#' Term2 <- qgammas^(2 * delta - 1) * exp(-delta * (qgammas -
#' Qgammas))
#' Kgamma <- Qgammas^(asigma - 1) * exp(-bsigma * (sigmaStar -
#' sigmaT))
#' Prod <- 1
#' for (i in seq_along(xleft)) {
#' Prod <- Prod * dkcens2_1val(
#' xleft = xleft[i], xright = xright[i],
#' c_code = censor_code[i], distr = distr, mu = y[i],
#' sigma = sigmaStar
#' ) / dkcens2_1val(
#' xleft = xleft[i],
#' xright = xright[i], c_code = censor_code[i], distr = distr,
#' mu = y[i], sigma = sigmaT
#' )
#' }
#' q3 <- min(1, Kgamma * Prod * Term2)
#' sigma <- ifelse(runif(1) <= q3, sigmaStar, sigmaT)
#' return(sigma)
#' }
gs5cens2 <-
function(sigma, xleft, xright, censor_code, y, distr = 1, asigma = 1,
bsigma = 2, delta = 4) {
sigmaStar <- rgamma(1, shape = delta, rate = delta / sigma)
sigmaT <- sigma
qgammas <- sigmaT / sigmaStar
Qgammas <- sigmaStar / sigmaT
Term2 <- qgammas^(2 * delta - 1) * exp(-delta * (qgammas -
Qgammas))
Kgamma <- Qgammas^(asigma - 1) * exp(-bsigma * (sigmaStar -
sigmaT))
Prod <- 1
for (i in seq_along(xleft)) {
Prod <- Prod * dkcens2_1val(
xleft = xleft[i], xright = xright[i],
c_code = censor_code[i], distr = distr, mu = y[i],
sigma = sigmaStar
) / dkcens2_1val(
xleft = xleft[i],
xright = xright[i], c_code = censor_code[i], distr = distr,
mu = y[i], sigma = sigmaT
)
}
q3 <- min(1, Kgamma * Prod * Term2)
sigma <- ifelse(runif(1) <= q3, sigmaStar, sigmaT)
return(sigma)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/gs5cens2.R
|
#' Updates the hyper-parameters of py0
#'
#' This function updates the hyper-parameters of the centering distribution
#' py0.
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(ystar, rstar, distr) {
#' if (distr == 1) {
#' mu0 <- 0
#' s0 <- 0.01
#' q1 <- 0.1
#' q2 <- 0.1
#' a <- q1 + rstar / 2
#' b <- q2 + (rstar - 1) * var(ystar) / 2 + s0 * rstar * (mean(ystar) - mu0)^2 / 2 / (s0 + rstar)
#' t2 <- rgamma(1, shape = a, rate = b)
#' a <- (s0 * mu0 + sum(ystar)) / (s0 + rstar)
#' b <- (s0 + rstar) * t2
#' t1 <- rnorm(1, mean = a, sd = 1 / sqrt(b))
#' mu.py0 <- t1
#' sigma.py0 <- 1 / sqrt(t2)
#' }
#' else if (distr == 2) {
#' q1 <- 0.01
#' q2 <- 0.01
#' t1 <- rgamma(1, shape = q1 + rstar, rate = q2 + sum(ystar))
#' mu.py0 <- sigma.py0 <- 1 / t1
#' }
#' else if (distr == 3) {
#' q1 <- 0.01
#' q2 <- 0.01
#' t1 <- rgamma(1, shape = q1 + rstar, rate = q2 - sum(log(ystar)))
#' mu.py0 <- t1 / (t1 + 1)
#' sigma.py0 <- sqrt(t1 / (t1 + 1)^2 / (t1 + 2))
#' }
#' else {
#' stop("Argument \"distr\" should be defined numeric with possible values 1,2 or 3")
#' }
#' return(list(mu.py0 = mu.py0, sigma.py0 = sigma.py0))
#' }
gsHP <-
function(ystar, rstar, distr) {
if (distr == 1) {
mu0 <- 0
s0 <- 0.01
q1 <- 0.1
q2 <- 0.1
a <- q1 + rstar / 2
if (rstar > 1) {
b <- q2 + (rstar - 1) * var(ystar) / 2 + s0 * rstar *
(mean(ystar) - mu0)^2 / 2 / (s0 + rstar)
}
else {
b <- q2 + s0 * rstar * (mean(ystar) - mu0)^2 / 2 / (s0 +
rstar)
}
t2 <- rgamma(1, shape = a, rate = b)
a <- (s0 * mu0 + sum(ystar)) / (s0 + rstar)
b <- (s0 + rstar) * t2
t1 <- rnorm(1, mean = a, sd = 1 / sqrt(b))
mu.py0 <- t1
sigma.py0 <- 1 / sqrt(t2)
}
else if (distr == 2) {
q1 <- 0.01
q2 <- 0.01
t1 <- rgamma(1, shape = q1 + rstar, rate = q2 + sum(ystar))
mu.py0 <- sigma.py0 <- 1 / t1
}
else if (distr == 3) {
q1 <- 0.01
q2 <- 0.01
t1 <- rgamma(1, shape = q1 + rstar, rate = q2 - sum(log(ystar)))
mu.py0 <- t1 / (t1 + 1)
sigma.py0 <- sqrt(t1 / (t1 + 1)^2 / (t1 + 2))
}
else {
stop("Argument \"distr\" should be defined numeric with possible values 1,2 or 3")
}
return(list(mu.py0 = mu.py0, sigma.py0 = sigma.py0))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/gsHP.R
|
#' Jointly resampling Ystar and Zstar function
#'
#' This function resamples jointly the distinct pairs (Ystar,Zstar) in the
#' fully nonparametric model.
#'
#' For internal use.
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(ystar, zstar, nstar, rstar, idx, x, delta, kappa, distr.k,
#' distr.py0, mu.py0, sigma.py0, distr.pz0, mu.pz0, sigma.pz0) {
#' for (j in seq(rstar)) {
#' flag <- 1
#' while (flag == 1) {
#' id <- which(!is.na(match(idx, j)))
#' xj <- x[id]
#' xbar <- sum(xj) / nstar[j]
#' z2star <- rk(1, distr = distr.pz0, mu = zstar[j], sigma = zstar[j] / sqrt(delta))
#' y2star <- rk(1, distr = distr.py0, mu = xbar, sigma = kappa * z2star / sqrt(nstar[j]))
#' f.ratio <- rfyzstar(y2star, ystar[j], z2star, zstar[j], xj,
#' distr.k = distr.k,
#' distr.py0 = distr.py0, mu.py0 = mu.py0, sigma.py0 = sigma.py0,
#' distr.pz0 = distr.pz0, mu.pz0 = mu.pz0, sigma.pz0 = sigma.pz0
#' )
#' k.ratioNum <- dk(zstar[j],
#' distr = distr.pz0, mu = z2star,
#' sigma = z2star / sqrt(delta)
#' )
#' k.ratioDen <- dk(z2star,
#' distr = distr.pz0, mu = zstar[j],
#' sigma = zstar[j] / sqrt(delta)
#' )
#' k.ratio <- k.ratioNum / k.ratioDen
#' k.ratioNum <- dk(ystar[j],
#' distr = distr.py0, mu = xbar,
#' sigma = kappa * zstar[j] / sqrt(nstar[j])
#' )
#' k.ratioDen <- dk(y2star,
#' distr = distr.py0, mu = xbar,
#' sigma = kappa * z2star / sqrt(nstar[j])
#' )
#' k.ratio <- k.ratio * k.ratioNum / k.ratioDen
#' q2 <- min(1, f.ratio * k.ratio)
#' if (is.na(q2)) {
#' flag <- 1
#' } else {
#' if (runif(1) <= q2) {
#' ystar[j] <- y2star
#' zstar[j] <- z2star
#' flag <- 0
#' }
#' }
#' }
#' }
#' return(list(ystar = ystar, zstar = zstar))
#' }
gsYZstar <-
function(ystar, zstar, nstar, rstar, idx, x, delta, kappa, distr.k,
distr.py0, mu.py0, sigma.py0, distr.pz0, mu.pz0, sigma.pz0) {
for (j in seq(rstar)) {
flag <- 1
while (flag == 1) {
id <- which(!is.na(match(idx, j)))
xj <- x[id]
xbar <- sum(xj) / nstar[j]
z2star <- rk(1,
distr = distr.pz0, mu = zstar[j],
sigma = zstar[j] / sqrt(delta)
)
y2star <- rk(1, distr = distr.py0, mu = xbar, sigma = kappa *
z2star / sqrt(nstar[j]))
f.ratio <- rfyzstar(y2star, ystar[j], z2star, zstar[j],
xj,
distr.k = distr.k, distr.py0 = distr.py0,
mu.py0 = mu.py0, sigma.py0 = sigma.py0, distr.pz0 = distr.pz0,
mu.pz0 = mu.pz0, sigma.pz0 = sigma.pz0
)
k.ratioNum <- dk(zstar[j],
distr = distr.pz0, mu = z2star,
sigma = z2star / sqrt(delta)
)
k.ratioDen <- dk(z2star,
distr = distr.pz0, mu = zstar[j],
sigma = zstar[j] / sqrt(delta)
)
k.ratio <- k.ratioNum / k.ratioDen
k.ratioNum <- dk(ystar[j],
distr = distr.py0, mu = xbar,
sigma = kappa * zstar[j] / sqrt(nstar[j])
)
k.ratioDen <- dk(y2star,
distr = distr.py0, mu = xbar,
sigma = kappa * z2star / sqrt(nstar[j])
)
k.ratio <- k.ratio * k.ratioNum / k.ratioDen
q2 <- min(1, f.ratio * k.ratio)
if (is.na(q2)) {
flag <- 1
}
else {
flag <- 0
if (runif(1) <= q2) {
ystar[j] <- y2star
zstar[j] <- z2star
}
}
}
}
return(list(ystar = ystar, zstar = zstar))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/gsYZstar.R
|
#' Jointly resampling Ystar and Zstar function in the case of censoring
#'
#' This function resamples jointly the distinct pairs (Ystar,Zstar) in the
#' fully nonparametric model.
#'
#' For internal use
#'
#' @keywords internal
#' @examples
#'
#' ## The function is currently defined as
#' function(ystar, zstar, nstar, rstar, idx, xleft, xright, censor_code,
#' delta, kappa, distr.k, distr.py0, mu.py0, sigma.py0, distr.pz0,
#' mu.pz0, sigma.pz0) {
#' for (j in seq(rstar)) {
#' flag <- 1
#' while (flag == 1) {
#' id <- which(!is.na(match(idx, j)))
#' xjleft <- xleft[id]
#' xjright <- xright[id]
#' xbar <- 0.5 * sum(xjleft + xjright, na.rm = T) / nstar[j]
#' z2star <- rk(1,
#' distr = distr.pz0, mu = zstar[j],
#' sigma = zstar[j] / sqrt(delta)
#' )
#' y2star <- rk(1, distr = distr.py0, mu = xbar, sigma = kappa *
#' z2star / sqrt(nstar[j]))
#' f.ratio <- rfyzstarcens2(
#' v = y2star, v2 = ystar[j],
#' z = z2star, z2 = zstar[j], xleft = xjleft, xright = xjright,
#' censor_code = censor_code[id], distr.k = distr.k,
#' distr.py0 = distr.py0, mu.py0 = mu.py0, sigma.py0 = sigma.py0,
#' distr.pz0 = distr.pz0, mu.pz0 = mu.pz0, sigma.pz0 = sigma.pz0
#' )
#' k.ratioNum <- dk(zstar[j],
#' distr = distr.pz0, mu = z2star,
#' sigma = z2star / sqrt(delta)
#' )
#' k.ratioDen <- dk(z2star,
#' distr = distr.pz0, mu = zstar[j],
#' sigma = zstar[j] / sqrt(delta)
#' )
#' k.ratio <- k.ratioNum / k.ratioDen
#' k.ratioNum <- dk(ystar[j],
#' distr = distr.py0, mu = xbar,
#' sigma = kappa * zstar[j] / sqrt(nstar[j])
#' )
#' k.ratioDen <- dk(y2star,
#' distr = distr.py0, mu = xbar,
#' sigma = kappa * z2star / sqrt(nstar[j])
#' )
#' k.ratio <- k.ratio * k.ratioNum / k.ratioDen
#' q2 <- min(1, f.ratio * k.ratio)
#' if (is.na(q2)) {
#' flag <- 1
#' }
#' else {
#' flag <- 0
#' if (runif(1) <= q2) {
#' ystar[j] <- y2star
#' zstar[j] <- z2star
#' }
#' }
#' }
#' }
#' return(list(ystar = ystar, zstar = zstar))
#' }
gsYZstarcens2 <-
function(ystar, zstar, nstar, rstar, idx, xleft, xright, censor_code,
delta, kappa, distr.k, distr.py0, mu.py0, sigma.py0, distr.pz0,
mu.pz0, sigma.pz0) {
for (j in seq(rstar)) {
flag <- 1
while (flag == 1) {
id <- which(!is.na(match(idx, j)))
xjleft <- xleft[id]
xjright <- xright[id]
xbar <- 0.5 * sum(xjleft + xjright, na.rm = T) / nstar[j]
z2star <- rk(1,
distr = distr.pz0, mu = zstar[j],
sigma = zstar[j] / sqrt(delta)
)
y2star <- rk(1, distr = distr.py0, mu = xbar, sigma = kappa *
z2star / sqrt(nstar[j]))
f.ratio <- rfyzstarcens2(
v = y2star, v2 = ystar[j],
z = z2star, z2 = zstar[j], xleft = xjleft, xright = xjright,
censor_code = censor_code[id], distr.k = distr.k,
distr.py0 = distr.py0, mu.py0 = mu.py0, sigma.py0 = sigma.py0,
distr.pz0 = distr.pz0, mu.pz0 = mu.pz0, sigma.pz0 = sigma.pz0
)
k.ratioNum <- dk(zstar[j],
distr = distr.pz0, mu = z2star,
sigma = z2star / sqrt(delta)
)
k.ratioDen <- dk(z2star,
distr = distr.pz0, mu = zstar[j],
sigma = zstar[j] / sqrt(delta)
)
k.ratio <- k.ratioNum / k.ratioDen
k.ratioNum <- dk(ystar[j],
distr = distr.py0, mu = xbar,
sigma = kappa * zstar[j] / sqrt(nstar[j])
)
k.ratioDen <- dk(y2star,
distr = distr.py0, mu = xbar,
sigma = kappa * z2star / sqrt(nstar[j])
)
k.ratio <- k.ratio * k.ratioNum / k.ratioDen
q2 <- min(1, f.ratio * k.ratio)
if (is.na(q2)) {
flag <- 1
}
else {
flag <- 0
if (runif(1) <= q2) {
ystar[j] <- y2star
zstar[j] <- z2star
}
}
}
}
return(list(ystar = ystar, zstar = zstar))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/gsYZstarcens2.R
|
#' Tests if a fit is a semi parametric or nonparametric model.
#'
#'
#' @param fit The result of the fit, obtained through the function MixNRMI1 or
#' MixNRMI2.
#' @return TRUE if the fit is a semiparametric model
#' @examples
#'
#' set.seed(150520)
#' data(acidity)
#' x <- enzyme
#' out <- MixNRMI1(enzyme, extras = TRUE, Nit = 10)
#' BNPdensity:::is_semiparametric(out)
is_semiparametric <- function(fit) {
return(!is.null(fit$S))
}
convert_nan_to_0 <- function(vec) {
ifelse(is.nan(vec), yes = 0, no = vec)
}
#' Repeat the common scale parameter of a semiparametric model to match the
#' dimension of the location parameters.
#'
#'
#' @param semiparametric_fit The result of the fit, obtained through the
#' function MixNRMI1.
#' @return an adequate list of vectors of sigmas
fill_sigmas <- function(semiparametric_fit) {
mapply(FUN = function(means, sigma) {
rep(sigma, length(means))
}, semiparametric_fit$means, semiparametric_fit$S)
}
#' Create a plotting grid from non-censored data.
#'
#'
#' @param data Non-censored input data from which to compute the grid.
#' @param npoints Number of points on the grid.
#' @return a vector containing the plotting grid
grid_from_data_noncensored <- function(data, npoints = 100) {
data_range <- max(data) - min(data)
return(seq(min(data) - 0.1 * data_range, max(data) + 0.1 * data_range, length.out = 100))
}
#' Create a plotting grid from censored data.
#'
#'
#' @param data Censored input data from which to compute the grid.
#' @param npoints Number of points on the grid.
#' @return a vector containing the plotting grid
grid_from_data_censored <- function(data, npoints = 100) {
max_ <- max(max(data$left, na.rm = T), max(data$right, na.rm = T))
min_ <- min(min(data$left, na.rm = T), min(data$right, na.rm = T))
data_range_adapted <- (max_ - min_) / sqrt(nrow(data))
return(seq(min_ - 0.1 * data_range_adapted, max_ + 0.1 * data_range_adapted, length.out = 100))
}
#' Create a plotting grid from censored or non-censored data.
#'
#'
#' @param data Input data from which to compute the grid.
#' @param npoints Number of points on the grid.
#' @return a vector containing the plotting grid
grid_from_data <- function(data, npoints = 100) {
if (is_censored(data)) {
grid_from_data_censored(data, npoints = npoints)
}
else {
grid_from_data_noncensored(data, npoints = npoints)
}
}
#' Test if the data is censored
#'
#'
#' @param dat The dataset to be tested
#' @return TRUE if the data is censored
#' @examples
#'
#' data(salinity)
#' BNPdensity:::is_censored(salinity)
is_censored <- function(dat) {
if (is.null(ncol(dat))) {
FALSE
} else {
TRUE
}
}
#' Compute the grid for thinning the MCMC chain
#'
#' This function creates an real grid then rounds it. If the grid is fine
#' enough, there is a risk that rounding ties, i.e. iteration which are kept
#' twice. To avoid this, if the total number of iterations is smaller than
#' twice the number of iterations desired after thinning, the chain is not
#' thinned.
#'
#' @param Nit Length of the MCMC chain
#' @param thinning_to Desired number of iterations after thinning.
#' @return an integer vector of the MCMC iterations retained.
compute_thinning_grid <- function(Nit, thinning_to = 10) {
if (Nit <= 2 * thinning_to) { # Factor 2 to reduce the probability of having the same iterations selected twice
it_retained <- 1:Nit
}
else {
it_retained <- round(seq(1, Nit, length.out = thinning_to))
}
return(it_retained)
}
#' Add x and y
#'
#' This is a helper function for use in Reduce() over a list of vectors
#'
#' @param x first argument of the sum
#' @param y second argument of the sum
#'
#' @return x + y
#'
add <- function(x, y) {
x + y
}
#' Extract the Conditional Predictive Ordinates (CPOs) from a fitted object
#'
#' @param object A fit obtained through one of the NRMI functions
#' @param ...
#'
#' @return A vector of Conditional Predictive Ordinates (CPOs)
#' @export
cpo <- function(object, ...) {
UseMethod("cpo")
}
cpo.default <- function(object, ...) "Unknown class"
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/helper_functions.R
|
pmix_vec_loop <-
function(qs,
locations_list,
scales_list,
weights_list,
distr.k) {
mean_over_list(qs, locations_list, scales_list, weights_list, distr.k, pmix)
}
dmix_vec_loop <-
function(xs,
locations_list,
scales_list,
weights_list,
distr.k) {
mean_over_list(xs, locations_list, scales_list, weights_list, distr.k, dmix)
}
qmix_vec_loop <-
function(ps,
locations_list,
scales_list,
weights_list,
distr.k) {
mean_over_list(ps, locations_list, scales_list, weights_list, distr.k, qmix)
}
mean_over_list <-
function(xs,
locations_list,
scales_list,
weights_list,
distr.k,
mixdistfun) {
res <- 0.0 * xs
nit <- length(locations_list)
for (it in 1:nit) {
res <- res + mixdistfun(
xs,
locations_list[[it]],
scales_list[[it]],
weights_list[[it]],
distr.k
)
}
return(res / nit)
}
mixdistfun <-
function(xs,
locations,
scales,
weights,
distr.k,
distfun) {
res <- 0.0 * xs
for (cmp in seq_along(locations)) {
res <-
res + weights[cmp] * distfun(xs,
distr = distr.k,
mu = locations[cmp],
sigma = scales[cmp]
)
}
return(res)
}
dmix <- function(xs, locations, scales, weights, distr.k) {
mixdistfun(xs, locations, scales, weights, distr.k, dk)
}
pmix <- function(qs, locations, scales, weights, distr.k) {
mixdistfun(qs, locations, scales, weights, distr.k, pk)
}
mixdistfun_cens <-
function(xlefts,
xrights,
c_code_filters,
locations,
scales,
weights,
distr.k,
distfun) {
res <- 0.0 * xlefts
for (cmp in seq_along(locations)) {
res <-
res + weights[cmp] * distfun(xlefts,
xrights,
c_code_filters,
distr = distr.k,
mu = locations[cmp],
sigma = scales[cmp]
)
}
return(res)
}
dmixcens <- function(xlefts,
xrights,
c_code_filters,
locations,
scales,
weights,
distr.k) {
mixdistfun_cens(xlefts, xrights, c_code_filters, locations, scales, weights, distr.k, dkcens2)
}
qmix_one_val_with_scales <- function(p, locations, scales, weights, distr.k, max_scale, min_loc, max_loc) {
f_help_vec <- function(qs) {
p - pmix(qs, locations, scales, weights, distr.k)
}
if (distr.k == 2 | distr.k == 5) {
lowerbound <- 0
upperbound <- max_loc + 100 * max_scale
}
else if (distr.k == 3) {
lowerbound <- 0
upperbound <- 1
}
else {
lowerbound <- min_loc - 100 * max_scale
upperbound <- max_loc + 100 * max_scale
}
uniroot(f = f_help_vec, lower = lowerbound, upper = upperbound)$root
}
qmix_one_val <- function(p, locations, scales, weights, distr.k) {
max_scale <- max(scales)
max_loc <- max(locations)
min_loc <- min(locations)
qmix_one_val_with_scales(p, locations, scales, weights, distr.k, max_scale, min_loc, max_loc)
}
qmix <- function(ps, locations, scales, weights, distr.k, parallel = TRUE) {
if (Sys.info()[["sysname"]] == "Windows") parallel <- FALSE
max_scale <- max(scales)
max_loc <- max(locations)
min_loc <- min(locations)
unlist(parallel::mclapply(ps,
FUN = function(p) qmix_one_val_with_scales(p, locations, scales, weights, distr.k, max_scale, min_loc, max_loc),
mc.cores = ifelse(test = parallel, yes = parallel::detectCores(), no = 1)
))
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/mixture_density_functions.R
|
#' Multiple chains of MixNRMI1
#'
#' @inheritParams MixNRMI1
#' @param nchains The number of chains to run.
#' @param parallel Whether to run the chains in parallel. Only works on UNIX-like systems as it rests on Fork parallelism
#' @param ncores Number of cores for the parallel run. Defaults to parallel::detectCores(), i.e. the maximum number of cores detected by R on your system.
#'
#' @return a list containing the multiple fits.
#' @seealso \code{\link{MixNRMI2}}, \code{\link{MixNRMI1cens}},
#' \code{\link{MixNRMI2cens}}
#' @examples
#'
#' data(acidity)
#' multMixNRMI1(acidity, parallel = TRUE, Nit = 10, ncores = 2)
#' @export multMixNRMI1
multMixNRMI1 <- function(x, probs = c(0.025, 0.5, 0.975), Alpha = 1, Kappa = 0,
Gama = 0.4, distr.k = "normal", distr.p0 = "normal", asigma = 0.5, bsigma = 0.5,
delta_S = 3, delta_U = 2, Meps = 0.01, Nx = 150, Nit = 1500,
Pbi = 0.1, epsilon = NULL, printtime = TRUE, extras = TRUE, adaptive = FALSE,
nchains = 4, parallel = TRUE, ncores = parallel::detectCores()) {
if (Sys.info()[["sysname"]] == "Windows") parallel <- FALSE
res <- parallel::mclapply(
X = 1:nchains,
FUN = function(chainID) {
MixNRMI1(
x, probs, Alpha, Kappa,
Gama, distr.k, distr.p0, asigma, bsigma,
delta_S, delta_U, Meps, Nx, Nit, Pbi,
epsilon, printtime, extras, adaptive
)
},
mc.cores = ifelse(test = parallel, yes = ncores, no = 1),
mc.set.seed = TRUE
)
return(structure(res, class = c("multNRMI")))
}
#' Multiple chains of MixNRMI2
#'
#' @inheritParams MixNRMI2
#' @param nchains The number of chains to run.
#' @param parallel Whether to run the chains in parallel. Only works on UNIX-like systems as it rests on Fork parallelism
#' @param ncores Number of cores for the parallel run. Defaults to parallel::detectCores(), i.e. the maximum number of cores detected by R on your system.
#'
#' @return a list containing the multiple fits.
#' @seealso \code{\link{MixNRMI2}}, \code{\link{MixNRMI1cens}},
#' \code{\link{MixNRMI2cens}}, \code{\link{multMixNRMI1}}
#' @examples
#'
#' data(acidity)
#' multMixNRMI2(acidity, parallel = TRUE, Nit = 10, ncores = 2)
#' @export multMixNRMI2
multMixNRMI2 <- function(x, probs = c(0.025, 0.5, 0.975), Alpha = 1, Kappa = 0,
Gama = 0.4, distr.k = "normal", distr.py0 = "normal", distr.pz0 = "gamma",
mu.pz0 = 3, sigma.pz0 = sqrt(10), delta_S = 4, kappa = 2, delta_U = 2,
Meps = 0.01, Nx = 150, Nit = 1500, Pbi = 0.1, epsilon = NULL,
printtime = TRUE, extras = TRUE, adaptive = FALSE,
nchains = 4, parallel = FALSE, ncores = parallel::detectCores()) {
if (Sys.info()[["sysname"]] == "Windows") parallel <- FALSE
res <- parallel::mclapply(
X = 1:nchains,
FUN = function(chainID) {
MixNRMI2(
x, probs, Alpha, Kappa,
Gama, distr.k, distr.py0, distr.pz0, mu.pz0,
sigma.pz0, delta_S, kappa, delta_U, Meps,
Nx, Nit, Pbi, epsilon, printtime, extras,
adaptive
)
},
mc.cores = ifelse(test = parallel, yes = ncores, no = 1),
mc.set.seed = TRUE
)
return(structure(res, class = c("multNRMI")))
}
#' Multiple chains of MixNRMI1cens
#'
#' @inheritParams MixNRMI1cens
#' @param nchains The number of chains to run.
#' @param parallel Whether to run the chains in parallel. Only works on
#' UNIX-like systems as it rests on Fork parallelism
#' @param ncores Number of cores for the parallel run. Defaults to
#' parallel::detectCores(), i.e. the maximum number of cores detected by R on
#' your system.
#' @return a list containing the multiple fits.
#' @seealso \code{\link{MixNRMI2}}, \code{\link{MixNRMI1cens}},
#' \code{\link{MixNRMI2cens}}, \code{\link{multMixNRMI1}}
#' @examples
#'
#' data(salinity)
#' multMixNRMI1cens(salinity$left, salinity$right, parallel = TRUE, Nit = 10, ncores = 2)
#' @export multMixNRMI1cens
multMixNRMI1cens <- function(xleft, xright, probs = c(0.025, 0.5, 0.975), Alpha = 1, Kappa = 0,
Gama = 0.4, distr.k = "normal", distr.p0 = "normal", asigma = 0.5, bsigma = 0.5,
delta_S = 3, delta_U = 2, Meps = 0.01, Nx = 150, Nit = 1500,
Pbi = 0.1, epsilon = NULL, printtime = TRUE, extras = TRUE, adaptive = FALSE,
nchains = 4, parallel = TRUE, ncores = parallel::detectCores()) {
if (Sys.info()[["sysname"]] == "Windows") parallel <- FALSE
res <- parallel::mclapply(
X = 1:nchains,
FUN = function(chainID) {
MixNRMI1cens(
xleft, xright, probs, Alpha, Kappa,
Gama, distr.k, distr.p0, asigma, bsigma,
delta_S, delta_U, Meps, Nx, Nit, Pbi,
epsilon, printtime, extras, adaptive
)
},
mc.cores = ifelse(test = parallel, yes = ncores, no = 1),
mc.set.seed = TRUE
)
return(structure(res, class = c("multNRMI")))
}
#' Multiple chains of MixNRMI2cens
#'
#' @inheritParams MixNRMI2cens
#' @param nchains The number of chains to run.
#' @param parallel Whether to run the chains in parallel. Only works on
#' UNIX-like systems as it rests on Fork parallelism
#' @param ncores Number of cores for the parallel run. Defaults to
#' parallel::detectCores(), i.e. the maximum number of cores detected by R on
#' your system.
#' @return a list containing the multiple fits.
#' @seealso \code{\link{MixNRMI2}}, \code{\link{MixNRMI1cens}},
#' \code{\link{MixNRMI2cens}}, \code{\link{multMixNRMI1}}
#' @examples
#'
#' data(salinity)
#' \dontrun{
#' multMixNRMI2cens(salinity$left, salinity$right, parallel = TRUE, Nit = 20, ncores = 2)
#' }
#'
#' @export multMixNRMI2cens
multMixNRMI2cens <- function(xleft, xright, probs = c(0.025, 0.5, 0.975), Alpha = 1,
Kappa = 0, Gama = 0.4, distr.k = "normal", distr.py0 = "normal", distr.pz0 = "gamma",
mu.pz0 = 3, sigma.pz0 = sqrt(10), delta_S = 4, kappa = 2, delta_U = 2,
Meps = 0.01, Nx = 150, Nit = 1500, Pbi = 0.1, epsilon = NULL,
printtime = TRUE, extras = TRUE, adaptive = FALSE,
nchains = 4, parallel = TRUE, ncores = parallel::detectCores()) {
if (Sys.info()[["sysname"]] == "Windows") parallel <- FALSE
res <- parallel::mclapply(
X = 1:nchains,
FUN = function(chainID) {
MixNRMI2cens(
xleft, xright, probs, Alpha, Kappa,
Gama, distr.k, distr.py0, distr.pz0, mu.pz0,
sigma.pz0, delta_S, kappa, delta_U, Meps,
Nx, Nit, Pbi, epsilon, printtime, extras, adaptive
)
},
mc.cores = ifelse(test = parallel, yes = ncores, no = 1),
mc.set.seed = TRUE
)
return(structure(res, class = c("multNRMI")))
}
#' Convert the output of multMixNRMI into a coda mcmc object
#'
#' @importFrom coda as.mcmc
#' @param x Output of multMixNRMI.
#' @param ... Further arguments to be passed to specific methods
#' @param thinning_to Final length of the chain after thinning.
#' @param ncores Specify the number of cores to use in the conversion
#' @return a coda::mcmc object
#' @method as.mcmc multNRMI
#' @export
#' @examples
#' data(acidity)
#' out <- multMixNRMI1(acidity, parallel = TRUE, Nit = 10, ncores = 2)
#' coda::as.mcmc(out, ncores = 2)
as.mcmc.multNRMI <- function(x, ..., thinning_to = 1000, ncores = parallel::detectCores()) {
res <- coda::as.mcmc(lapply(Convert_to_matrix_list(x, thinning_to = thinning_to, ncores = ncores), coda::mcmc))
class(res) <- c("multNRMI", class(res))
return(res)
}
#' Plot the density estimate and the 95\% credible interval
#'
#' The density estimate is the mean posterior density computed on the data
#' points.
#'
#'
#' @param x An object of class multNRMI
#' @param ... Further arguments to be passed to generic functions, ignored at the moment
#' @return A graph with the density estimate, the 95\% credible interval.
#' Includes a histogram if the data is non censored.
#' @export
#' @examples
#' \donttest{
#' data(salinity)
#' fit <- multMixNRMI2cens(salinity$left, salinity$right, parallel = TRUE, Nit = 10, ncores = 2)
#' plot(fit)
#' }
plot.multNRMI <- function(x, ...) {
# This assumes that chains have the same length and can be given equal weight when combining
res <- x[[1]]
nchains <- length(x)
m <- ncol(res$qx)
res$qx[, 1] <- 1 / nchains * Reduce(f = add, lapply(X = x, FUN = function(x) x$qx[, 1]))
res$qx[, 2] <- 1 / nchains * Reduce(f = add, lapply(X = x, FUN = function(x) x$qx[, 2]))
res$qx[, m] <- 1 / nchains * Reduce(f = add, lapply(X = x, FUN = function(x) x$qx[, m]))
plot(res)
}
#' S3 method for class 'multNRMI'
#'
#' @param x An object of class multNRMI
#' @param ... Further arguments to be passed to generic functions, ignored at the moment
#'
#' @return A visualization of the important information about the object
#' @export
#'
#' @examples
#' \donttest{
#' data(salinity)
#' out <- multMixNRMI2cens(salinity$left, salinity$right, parallel = TRUE, Nit = 10, ncores = 2)
#' print(out)
#' }
print.multNRMI <- function(x, ...) {
print(x[[1]])
writeLines(paste(length(x), "independent MCMC chains were run in parallel"))
}
#' S3 method for class 'multNRMI'
#'
#' @param object A fitted object of class NRMI1cens
#' @param number_of_clusters Whether to compute the optimal number of clusters, which can be a time-consuming operation (see \code{\link{compute_optimal_clustering}})
#' @param ... Further arguments to be passed to generic function, ignored at the moment
#'
#' @return Prints out the text for the summary S3 methods
#' @export
#'
#' @examples
#' \donttest{
#' data(salinity)
#' out <- multMixNRMI2cens(salinity$left, salinity$right, parallel = TRUE, Nit = 10, ncores = 2)
#' summary(out)
#' }
summary.multNRMI <- function(object, number_of_clusters = FALSE, ...) {
kernel_name <- tolower(give_kernel_name(object[[1]]$distr.k))
NRMI_comment <- paste("Density estimation using a", comment_on_NRMI_type(object[[1]]$NRMI_params))
kernel_comment <- paste("A nonparametric", kernel_name, "mixture model was used.")
ndata <- ifelse(is_censored(object[[1]]$data), nrow(object[[1]]$data), length(object[[1]]$data))
data_comment <- paste("There were", ndata, "data points.")
n_chains <- length(object)
MCMC_comment <- paste(n_chains, " MCMC chains were run for ", object[[1]]$Nit, " iterations with ", 100 * object[[1]]$Pbi, "% discarded for burn-in.", sep = "")
if (number_of_clusters) {
collected_allocs <- list("Allocs" = Reduce(c, lapply(object, function(x) x$Allocs)))
estimated_clustering <- compute_optimal_clustering(collected_allocs)
clustering_comment <- paste("The estimated number of clusters in the data is ", length(unique(estimated_clustering)), ".", sep = "")
}
else {
clustering_comment <- "To obtain information on the estimated number of clusters, please use summary(object, number_of_clusters = TRUE)."
}
writeLines(paste(NRMI_comment, "\n", kernel_comment, "\n", data_comment, "\n", MCMC_comment, "\n", clustering_comment, sep = ""))
}
#' Extract the Conditional Predictive Ordinates (CPOs) from a list of fitted objects
#'
#' This function assumes that all chains have the same size. To allow for different chain sizes, care should be paid to proper weighting.
#'
#' @param object A fit obtained through from the functions MixNRMI1/MixNRMI1cens
#' @param ... Further arguments to be passed to generic function, ignored at the moment
#'
#' @return A vector of Conditional Predictive Ordinates (CPOs)
#' @export
#'
#' @examples
#' data(acidity)
#' out <- multMixNRMI1(acidity, parallel = TRUE, Nit = 10, ncores = 2)
#' cpo(out)
cpo.multNRMI <- function(object, ...) {
nchains <- length(object)
inv_cpos_by_chain <- lapply(object, function(x) 1 / x$cpo)
inv_cpos <- 1 / nchains * Reduce(add, inv_cpos_by_chain)
return(1 / inv_cpos)
}
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/multMixNRMI.R
|
# library(memoise)
# # library(gmp)
# library(Rmpfr)
# noncentral_generalised_factorial_coefficient <- memoise::memoise(function(n, k, s, r) {
# # print(paste('n =', n, 'k =', k))
# stopifnot(n >= 0, k >= 0)
# if (k == 0) {
# if (n == 0) {
# 1
# } else {
# Rmpfr::pochMpfr(r, n)
# }
# }
# else {
# if (k > n) {
# 0
# } else {
# (s * k + r - n + 1) * noncentral_generalised_factorial_coefficient(n - 1, k, s, r) + s * noncentral_generalised_factorial_coefficient(n - 1, k - 1, s, r)
# }
# }
# })
# noncentral_generalised_factorial_coefficient(0,0,1,2)
# noncentral_generalised_factorial_coefficient(3,0,1,2)
# noncentral_generalised_factorial_coefficient(3,4,1,2)
# noncentral_generalised_factorial_coefficient(4,3,1,2)
# noncentral_generalised_factorial_coefficient(20,3,1,2)
# noncentral_generalised_factorial_coefficient(1,1,0.5,0)
# noncentral_generalised_factorial_coefficient(10, 4, 0.5, 0)
# noncentral_generalised_factorial_coefficient(100, 1, 0.5, 0)
# noncentral_generalised_factorial_coefficient(100, 4, 0.5, 0)
# noncentral_generalised_factorial_coefficient(100, 50, 0.5, 0)
# library(parallel)
# res = mclapply(1:100, FUN = function(x) noncentral_generalised_factorial_coefficient(100, x, 0.5, 0))
#
# noncentral_generalised_factorial_coefficient(6, 5, 0.4, 0)
# noncentral_generalised_factorial_coefficient(6, 5, 0.5, 0)
# noncentral_generalised_factorial_coefficient(6, 5, 0.9, 0)
# noncentral_generalised_factorial_coefficient(100, 1, 0.4, 0)
|
/scratch/gouwar.j/cran-all/cranData/BNPdensity/R/noncentral_generalised_factorial_coefficient.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.