content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' Create prior default method #' #' @param x a vector of x values at which the prior is to be specified (the support of the prior). This should contain #' unique values in ascending order. The function will sort values if x is unsorted with a warning, and will halt if x contains any #' duplicates or negative lag 1 differences. #' @param wt a vector of weights corresponding to the weight of the prior at the #' given x values. #' @param \dots optional exta arguments. Not currently used. #' @return a linear interpolation function where the weights have been scaled so #' the function (numerically) integrates to 1. #' @export #' createPrior.default = function(x, wt, ...){ if(length(x) != length(wt)) stop("x and wt must be of equal length") if(any(wt < 0)){ stop("All weights must be >= 0") } if(is.unsorted(x)){ warning("x is not in ascending order. We have sorted it for you, but this might have unintended consequences") o = order(x) x = x[o] wt = wt[o] } if(any(diff(x) <= 0)){ stop("x has duplicated values or negative differences. x should contain unique values in ascending order.") } fx = approxfun(x, wt, yleft = 0, yright = 0, rule = 2) A = integrate(fx, x[1], x[length(x)])$value if(A != 1){ if(!Bolstad.control(...)$quiet){ cat("Normalizing prior. Normalizing constant: ") cat(A, "\n") } fx = approxfun(x, wt/A, yleft = 0, yright = 0, rule = 2) } return(fx) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/createPrior.default.R
cumDistFun = function(X, x, px){ Px = cumsum(px) r = rep(0, length(X)) r[X >= max(x)] = 1 i = X >= min(x) & X < max(x) j = sapply(X[i], function(y)max(which(x <= y))) r[i] = Px[j] return(r) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/cumDistFun.R
#' Plot the prior, likelihood, and posterior on the same plot. #' #' This function takes any object of class \code{Bolstad} and plots the prior, #' likelihood and posterior on the same plot. The aim is to show the influence #' of the prior, and the likelihood on the posterior. #' #' #' @param x an object of class \code{Bolstad}. #' @param \dots any other arguments to be passed to the \code{plot} function. #' @note Note that \code{xlab}, \code{ylab}, \code{main}, \code{axes}, #' \code{xlim}, \code{ylim} and \code{type} are all used in the function so #' specifying them is unlikely to have any effect. #' @author James Curran #' @keywords plots #' @examples #' #' # an example with a binomial sampling situation #' results = binobp(4, 12, 3, 3, plot = FALSE) #' decomp(results) #' #' # an example with normal data #' y = c(2.99,5.56,2.83,3.47) #' results = normnp(y, 3, 2, 1, plot = FALSE) #' decomp(results) #' #' @export decomp decomp = function(x, ...){ if(class(x) != "Bolstad") stop("This function only works for objects of class Bolstad") oPar = par(mfrow = c(3, 1), mar = c(1, 1, 1, 1)) with(x, { yLims = c(0, 1.1 * max(results$posterior, results$prior)); plot(prior ~ param.x, ylim = yLims, type = "l", xlim = range(param.x), xlab = "", ylab = "", main = "", axes = FALSE, ...); polygon(param.x, prior, col = "red"); box(); r = legend("topleft", legend = "Prior",lty = 1, bty = "n", plot = FALSE)$text; text(r$x, r$y, "Prior", adj = 0); plot(likelihood ~ param.x, type="l", xlab = "", ylab = "", main = "", axes = FALSE, ...); polygon(param.x, likelihood, col = "green"); box(); r = legend("topleft", legend = "Prior",lty = 1, bty = "n", plot = FALSE)$text; text(r$x, r$y, "Likelihood", adj = 0); plot(posterior ~ param.x, ylim = yLims, type = "l", xlab = "", ylab = "", main = "", axes = F, ...); polygon(param.x, posterior, col = "blue"); box(); r = legend("topleft", legend = "Prior",lty = 1, bty = "n", plot = FALSE)$text; text(r$x, r$y, "Posterior", adj = 0); }) par(oPar) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/decomp.R
#' Lines method for Bolstad objects #' #' Allows simple addition of posterior distributions from other results to an #' existing plot #' #' @param x an object of class \code{Bolstad}. #' @param \dots any additional parameters to be passed to \code{graphics::lines}. #' #' @method lines Bolstad #' #' @export lines.Bolstad = function(x, ...) lines(x$param.x, x$posterior, ...)
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/lines.Bolstad.R
#'Calculate the posterior mean #' #'Calculate the posterior mean of an object of class \code{Bolstad}. If the #'object has a member \code{mean} then it will return this value otherwise it #'will calculate \eqn{\int_{-\infty}^{+\infty}\theta f(\theta|x).d\theta} using #'linear interpolation to approximate the density function and numerical #'integration where \eqn{\theta} is the variable for which we want to do #'Bayesian inference, and \eqn{x} is the data. #' #'@param x An object of class \code{Bolstad} #'@param \dots Any other arguments. This parameter is currently ignored but it #' could be useful in the future to deal with problematic data. #'@return The posterior mean of the variable of inference given the data. #'@examples #' # The useful of this method is really highlighted when we have a general #' # continuous prior. In this example we are interested in the posterior mean of #' # an normal mean. Our prior is triangular over [-3, 3] #'set.seed(123) #'x = rnorm(20, -0.5, 1) #'mu = seq(-3, 3, by = 0.001) #'mu.prior = rep(0, length(mu)) #'mu.prior[mu <= 0] = 1 / 3 + mu[mu <= 0] / 9 #'mu.prior[mu > 0] = 1 / 3 - mu[mu > 0] / 9 #'results = normgcp(x, 1, density = "user", mu = mu, mu.prior = mu.prior) #'mean(results) #'@method mean Bolstad #'@export mean.Bolstad = function(x, ...){ if(any(grepl("mean", names(x)))) return(x$mean) xVals = x$param.x fx = approxfun(xVals, xVals * x$posterior) return(integrate(fx, min(xVals), max(xVals))$value) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/mean.Bolstad.R
#' @title Median generic #' #' Compute the posterior median of the posterior distribution #' #' @param x an object. #' @param na.rm Ideally if \code{TRUE} then missing values will be removed, but not currently used. #' @param ... [>=R3.4.0 only] Not currently used. #' @details If \code{x} is an object of class \code{Bolstad} then the posterior #' median of the parameter of interest will be calculated. #' @author James Curran #' @method median Bolstad if(is.na(match("...", names(formals(median))))) { median.Bolstad = function(x, na.rm = FALSE) { return(quantile(x, probs = 0.5)) } }else{ median.Bolstad = function(x, na.rm = FALSE, ...) { return(quantile(x, probs = 0.5)) } }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/median.Bolstad.R
#' Bayesian inference on a mutlivariate normal (MVN) mean with a multivariate normal (MVN) prior #' #' Evaluates posterior density for \eqn{\mu}{mu}, the mean of a #' MVN distribution, with a MVN prior on \eqn{\mu}{mu} #' #' #' @param y a vector of observations from a MVN distribution with unknown #' mean and known variance-covariance. #' @param m0 the mean vector of the MVN prior, or a scalar constant so that the prior #' vector of length \eqn{k}{k} with the same element repeated k times, e.g. \code{m0 = 0} #' @param V0 the variance-covariance matrix of the MVN prior, or the diagonal #' of the variance-covariance matrix of the MVN prior, or a scalar constant, say \eqn{n_0}{n0}, #' so the prior is \eqn{n_0\times \mathbf{I}_k}{n0 * I} where \eqn{\mathbf{I}_k}{I} is the \eqn{k}{k} by \eqn{k}{k} identity matrix. #' @param Sigma the known variance covariance matrix of the data. If #' this value is NULL, which it is by default, then the sample covariance is used. NOTE: #' if this is the case then the cdf and quantile functions should really be multivariate #' t, but they are not - in which case the results are only (approximately) valid for large samples. #' @param \dots any other values to be passed to Bolstad.control #' @return A list will be returned with the following components: #' \item{mean}{the posterior mean of the MVN posterior distribution} #' \item{var}{the posterior variance-covariance matrix of the MVN posterior distribution} #' \item{cdf}{a function that will evaluation the posterior cdf at a given point. This function calls \code{mvtnmorm::pmvnorm}.} #' \item{quantile}{a function that will find quantiles from the posterior given input probabilities. This function calls \code{mvtnorm::qmvnorm}.} #' @keywords misc #' @export mvnmvnp = function(y, m0 = 0, V0 = 1, Sigma = NULL, ...){ yBar = matrix(colMeans(y), ncol = 1) k = ncol(y) if(length(m0) == 1){ m0 = rep(m0, k) } m0 = matrix(m0, ncol = 1) if(!is.matrix(V0)){ if(length(V0) == 1){ V0 = diag(V0, k, k) }else{ if(length(V0) != k){ stop("V0 must either be a scalar, a vector of length k, or a k x k symmetric matrix, where k = ncol(y)") }else{ V0 = diag(V0, k, k) } } }else{ if(!isSymmetric(V0)){ stop("The prior variance-covariance V0 must be a k x k symmetric matrix, where k = ncol(y)") } } quiet = Bolstad.control(...)$quiet if(!quiet){ cat("The prior mean is:\n\n") cat(paste0(m0, collapse = " ")) cat("\n\n") cat("The prior variance is:\n\n") print(V0) cat("\n\n") } n = nrow(y) if(is.null(Sigma)){ Sigma = cov(y) if(!quiet){ cat("Using the sample variance for Sigma\n") } } Sigma.inv = solve(Sigma) prior.precision = solve(V0) post.precision = prior.precision + n * Sigma.inv post.var = solve(post.precision) post.mean = post.var %*% prior.precision %*% m0 + n * post.var %*% Sigma.inv %*% yBar if(!quiet){ cat("The posterior mean is:\n\n") cat(paste0(post.mean, collapse = " ")) cat("\n\n") cat("The posterior variance is:\n\n") print(post.var) } results = list(parameter = 'mu', mean = post.mean, var = post.var, cdf = function(x, ...)mvtnorm::pmvnorm(x, post.mean, post.var, ...), quantileFun = function(probs, ...)mvtnorm::qmvnorm(probs, post.mean, post.var, ...)) class(results) = 'Bolstad' invisible(results) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/mvnmvnp.R
#' Bayesian inference on a normal mean with a discrete prior #' #' Evaluates and plots the posterior density for \eqn{\mu}{mu}, the mean of a #' normal distribution, with a discrete prior on \eqn{\mu}{mu} #' #' #' @param x a vector of observations from a normal distribution with unknown #' mean and known std. deviation. #' @param sigma.x the population std. deviation of the normal distribution #' @param mu a vector of possibilities for the probability of success in a #' single trial. If mu is NULL then a uniform prior is used. #' @param mu.prior the associated prior probability mass. #' @param n.mu the number of possible \eqn{\mu}{mu} values in the prior #' @param \dots additional arguments that are passed to \code{Bolstad.control} #' @return A list will be returned with the following components: \item{mu}{the #' vector of possible \eqn{\mu}{mu} values used in the prior} #' \item{mu.prior}{the associated probability mass for the values in #' \eqn{\mu}{mu}} \item{likelihood}{the scaled likelihood function for #' \eqn{\mu}{mu} given \eqn{x} and \eqn{\sigma_x}{sigma.x}} #' \item{posterior}{the posterior probability of \eqn{\mu}{mu} given \eqn{x} #' and \eqn{\sigma_x}{sigma.x}} #' @seealso \code{\link{normnp}} \code{\link{normgcp}} #' @keywords misc #' @examples #' #' ## generate a sample of 20 observations from a N(-0.5,1) population #' x = rnorm(20,-0.5,1) #' #' ## find the posterior density with a uniform prior on mu #' normdp(x,1) #' #' ## find the posterior density with a non-uniform prior on mu #' mu = seq(-3,3,by=0.1) #' mu.prior = runif(length(mu)) #' mu.prior = sort(mu.prior/sum(mu.prior)) #' normdp(x,1,mu,mu.prior) #' #' ## Let mu have the discrete distribution with 5 possible #' ## values, 2, 2.5, 3, 3.5 and 4, and associated prior probability of #' ## 0.1, 0.2, 0.4, 0.2, 0.1 respectively. Find the posterior #' ## distribution after a drawing random sample of n = 5 observations #' ## from a N(mu,1) distribution y = [1.52, 0.02, 3.35, 3.49, 1.82] #' mu = seq(2,4,by=0.5) #' mu.prior = c(0.1,0.2,0.4,0.2,0.1) #' y = c(1.52,0.02,3.35,3.49,1.82) #' normdp(y,1,mu,mu.prior) #' #' @export normdp normdp = function(x, sigma.x = NULL, mu = NULL, mu.prior = NULL, n.mu = 50, ...){ ## x - the vector of observations ## sigma.x - the population standard deviation ## mu - vector of possible values of the population mean ## mu.prior - the associated prior probability mass ## n.mu - if mu is NULL then a uniform prior with n.mu points is used ## the likelihood and posterior are returned as a ## list if(n.mu < 3) stop("Number of prior values of theta must be greater than 2") if(is.null(mu)){ mu = seq(min(x)-sigma.x,max(x)+sigma.x,length = n.mu) mu.prior = rep(1/n.mu,n.mu) } mx = mean(x) quiet = Bolstad.control(...)$quiet if(is.null(sigma.x)){ sigma.x = sd(x-mx) if(!quiet){ cat(paste("Standard deviation of the residuals :" ,signif(sigma.x,4),"\n",sep="")) } }else{ if(sigma.x>0){ if(!quiet){ cat(paste("Known standard deviation :",signif(sigma.x,4),"\n",sep="")) } }else{ stop("The standard deviation must be greater than zero") } } if(any(mu.prior<0) | any(mu.prior>1)) stop("Prior probabilities must be between 0 and 1 inclusive") if(round(sum(mu.prior),7)!=1){ warning("The prior probabilities did not sum to 1, therefore the prior has been normalized") mu.prior = mu.prior/sum(mu.prior) } n.mu = length(mu) nx = length(x) snx = sigma.x^2/nx likelihood = exp(-0.5*(mx-mu)^2/snx) posterior = likelihood*mu.prior/sum(likelihood*mu.prior) if(Bolstad.control(...)$plot){ plot(mu,posterior,ylim=c(0,1.1*max(posterior,mu.prior)),pch = 20, col = "blue", xlab=expression(mu),ylab=expression(Probabilty(mu))) points(mu, mu.prior, pch = 20, col = "red") legend("topleft", bty = "n", fill = c("blue", "red"), legend = c("Posterior","Prior"), cex = 0.7) } mx = sum(mu * posterior) vx = sum((mu - mx)^2 * posterior) results = list(name = 'mu', param.x = mu, prior = mu.prior, likelihood = likelihood, posterior = posterior, mean = mx, var = vx, cdf = function(x, ...)cumDistFun(x, mu, posterior), quantileFun = function(probs, ...)qFun(probs, mu, posterior)) class(results) = 'Bolstad' invisible(results) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/normdp.r
#' Bayesian inference on a normal mean with a general continuous prior #' #' Evaluates and plots the posterior density for \eqn{\mu}{mu}, the mean of a #' normal distribution, with a general continuous prior on \eqn{\mu}{mu} #' #' #' @param x a vector of observations from a normal distribution with unknown #' mean and known std. deviation. #' @param sigma.x the population std. deviation of the normal distribution #' @param density distributional form of the prior density can be one of: #' "normal", "unform", or "user". #' @param params if density = "normal" then params must contain at least a mean #' and possible a std. deviation. If a std. deviation is not specified then #' sigma.x will be used as the std. deviation of the prior. If density = #' "uniform" then params must contain a minimum and a maximum value for the #' uniform prior. If a maximum and minimum are not specified then a #' \eqn{U[0,1]} prior is used #' @param n.mu the number of possible \eqn{\mu}{mu} values in the prior #' @param mu a vector of possibilities for the probability of success in a #' single trial. Must be set if density="user" #' @param mu.prior the associated prior density. Must be set if density="user" #' @param \dots additional arguments that are passed to \code{Bolstad.control} #' @return A list will be returned with the following components: #' \item{likelihood}{the scaled likelihood function for \eqn{\mu}{mu} given #' \eqn{x} and \eqn{\sigma_x}{sigma.x}} \item{posterior}{the posterior #' probability of \eqn{\mu}{mu} given \eqn{x} and \eqn{\sigma}{sigma.x}} #' \item{mu}{the vector of possible \eqn{\mu}{mu} values used in the prior} #' \item{mu.prior}{the associated probability mass for the values in #' \eqn{\mu}{mu}} #' @seealso \code{\link{normdp}} \code{\link{normnp}} #' @keywords misc #' @examples #' #' ## generate a sample of 20 observations from a N(-0.5,1) population #' x = rnorm(20,-0.5,1) #' #' ## find the posterior density with a uniform U[-3,3] prior on mu #' normgcp(x, 1, params = c(-3, 3)) #' #' ## find the posterior density with a non-uniform prior on mu #' mu = seq(-3, 3, by = 0.1) #' mu.prior = rep(0, length(mu)) #' mu.prior[mu <= 0] = 1 / 3 + mu[mu <= 0] /9 #' mu.prior[mu > 0] = 1 / 3 - mu[mu > 0] / 9 #' normgcp(x, 1, density = "user", mu = mu, mu.prior = mu.prior) #' #' ## find the CDF for the previous example and plot it #' ## Note the syntax for sintegral has changed #' results = normgcp(x,1,density="user",mu=mu,mu.prior=mu.prior) #' cdf = sintegral(mu,results$posterior,n.pts=length(mu))$cdf #' plot(cdf,type="l",xlab=expression(mu[0]) #' ,ylab=expression(Pr(mu<=mu[0]))) #' #' ## use the CDF for the previous example to find a 95% #' ## credible interval for mu. Thanks to John Wilkinson for this simplified code #' #' lcb = cdf$x[with(cdf,which.max(x[y<=0.025]))] #' ucb = cdf$x[with(cdf,which.max(x[y<=0.975]))] #' cat(paste("Approximate 95% credible interval : [" #' ,round(lcb,4)," ",round(ucb,4),"]\n",sep="")) #' #' ## use the CDF from the previous example to find the posterior mean #' ## and std. deviation #' dens = mu*results$posterior #' post.mean = sintegral(mu,dens)$value #' #' dens = (mu-post.mean)^2*results$posterior #' post.var = sintegral(mu,dens)$value #' post.sd = sqrt(post.var) #' #' ## use the mean and std. deviation from the previous example to find #' ## an approximate 95% credible interval #' lb = post.mean-qnorm(0.975)*post.sd #' ub = post.mean+qnorm(0.975)*post.sd #' #' #' cat(paste("Approximate 95% credible interval : [" #' ,round(lb,4)," ",round(ub,4),"]\n",sep="")) #' #' ## repeat the last example but use the new summary functions for the posterior #' results = normgcp(x, 1, density = "user", mu = mu, mu.prior = mu.prior) #' #' ## use the cdf function to get the cdf and plot it #' postCDF = cdf(results) ## note this is a function #' plot(results$mu, postCDF(results$mu), type="l", xlab = expression(mu[0]), #' ylab = expression(Pr(mu <= mu[0]))) #' #' ## use the quantile function to get a 95% credible interval #' ci = quantile(results, c(0.025, 0.975)) #' ci #' #' ## use the mean and sd functions to get the posterior mean and standard deviation #' postMean = mean(results) #' postSD = sd(results) #' postMean #' postSD #' #' ## use the mean and std. deviation from the previous example to find #' ## an approximate 95% credible interval #' ciApprox = postMean + c(-1,1) * qnorm(0.975) * postSD #' ciApprox #' #' @export normgcp normgcp = function(x, sigma.x = NULL, density = c("uniform", "normal", "flat", "user") , params = NULL, n.mu = 50, mu = NULL, mu.prior = NULL, ...){ ## x - the vector of observations ## sigma.x - the population standard deviation ## density - distributional form of the prior density ## can be one of : flat, normal, uniform, or user ## by default a continuous uniform prior is used ## mu - vector of possible values of the population mean ## mu.prior - the associated prior probability mass ## ret - if true then the likelihood and posterior are returned as a ## list mean.x = mean(x) if(n.mu < 3) stop("Number of prior values of mu must be greater than 2") if(is.null(sigma.x)){ sigma.x = sd(x - mean.x) if(!Bolstad.control(...)$quiet){ cat(paste("Standard deviation of the residuals :", signif(sigma.x,4),"\n", sep = "")) } }else{ if(!Bolstad.control(...)$quiet){ cat(paste("Known standard deviation :", signif(sigma.x, 4),"\n",sep="")) } } density = match.arg(density) if(density == 'flat'){ if(is.null(mu)){ bds = qnorm(mean.x, sigma.x, c(0.005, 0.995)) mu = seq(from = bds[1], to = bds[2], length = n.mu) } height = dnorm(qnorm(0.975, mean.x, sigma.x), mean.x, sigma.x) mu.prior = rep(height, length(mu)) likelihood = posterior = dnorm(mu, mean.x, sigma.x) ## flat prior has posterior mean equal to sample mean, ## and posterior variance equal to the observation variance if(Bolstad.control(...)$plot){ plot(mu, posterior, ylim = c(0, 1.1 * max(posterior, mu.prior)), type = "l", lty = 1,col="blue", xlab = expression(mu), ylab = expression(Probabilty(mu))) abline(h = height, lty = 2, col = "red") legend("topleft", bty = "n", cex = 0.7, lty = 1:2, col = c("blue", "red"), legend = c("Posterior", "Prior")) } }else{ if(density == 'normal'){ if(is.null(params) | length(params) < 1) stop("You must supply a mean for a normal prior") mx = params[1] if(length(params) == 2) ## user has supplied sd as well s.x = params[2] else s.x = sigma.x mu = seq(mx - 3.5 * s.x, mx + 3.5 * s.x, length = n.mu) mu.prior = dnorm(mu,mx,s.x) }else if(density == 'uniform'){ if(is.null(params)){ ## set params to mean+/-3.5sd by default params = c(mean.x - 3.5 * sigma.x, mean.x + 3.5 * sigma.x) } if(length(params)<2) stop("You must supply a minimum and a maximum to use a uniform prior") minx = params[1] maxx = params[2] if(maxx <= minx) stop("The maximum must be greater than the minimum for a uniform prior") mu = seq(minx, maxx, length = n.mu) mu.prior = dunif(mu, minx, maxx) }else{ ## user specified prior if(is.null(mu) | is.null(mu.prior)) stop("If you wish to use a non-uniform continuous prior then you must supply a mean vector, mu, and an associated density vector, mu.prior") if(is.function(mu.prior)) mu.prior = mu.prior(mu) } if(any(mu.prior< 0)) stop("Prior densities must be >=0") crude.int = sum(diff(mu) * mu.prior[-1]) if(round(crude.int, 3) != 1){ warning("The prior probabilities did not sum to 1, therefore the prior has been normalized") mu.prior = mu.prior / crude.int print(crude.int) } n.mu = length(mu) mx = mean(x) nx = length(x) snx = sigma.x^2/nx likelihood = exp(-0.5*(mx-mu)^2/snx) ## Numerically integrate the denominator ## First calculate the height of the function to be integrated f.x.mu = likelihood * mu.prior ## Now get a linear approximation so that we don't have to worry about ## the number of points specified by the user ap = approx(mu,f.x.mu,n=513) integral = sum(ap$y[2*(1:256)-1]+4*ap$y[2*(1:256)]+ap$y[2*(1:256)+1]) integral = (ap$x[2]-ap$x[1])*integral/3 posterior = likelihood*mu.prior/integral } if(Bolstad.control(...)$plot){ plot(mu, posterior, ylim = c(0, 1.1 * max(posterior, mu.prior)), type = "l", lty = 1,col="blue", xlab = expression(mu), ylab = expression(Probabilty(mu))) lines(mu,mu.prior,lty=2,col="red") legend("topleft", bty = "n", cex = 0.7, lty = 1:2, col = c("blue", "red"), legend = c("Posterior", "Prior")) } results = list(name = 'mu', param.x = mu, prior = mu.prior, likelihood = likelihood, posterior = posterior, mu = mu, mu.prior = mu.prior #for backwards compat. only ) class(results) = 'Bolstad' invisible(results) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/normgcp.r
#' Bayesian inference on a normal mean with a mixture of normal priors #' #' Evaluates and plots the posterior density for \eqn{\mu}{mu}, the mean of a #' normal distribution, with a mixture of normal priors on \eqn{\mu}{mu} #' #' #' @param x a vector of observations from a normal distribution with unknown #' mean and known std. deviation. #' @param sigma.x the population std. deviation of the observations. #' @param prior0 the vector of length 2 which contains the means and standard #' deviation of your precise prior. #' @param prior1 the vector of length 2 which contains the means and standard #' deviation of your vague prior. #' @param mu a vector of prior possibilities for the mean. If it is \code{NULL}, #' then a vector centered on the sample mean is created. #' @param n.mu the number of possible \eqn{\mu}{mu} values in the prior. #' @param p the mixing proportion for the two component normal priors. #' @param \dots additional arguments that are passed to \code{Bolstad.control} #' @return A list will be returned with the following components: \item{mu}{the #' vector of possible \eqn{\mu}{mu} values used in the prior} \item{prior}{the #' associated probability mass for the values in \eqn{\mu}{mu}} #' \item{likelihood}{the scaled likelihood function for \eqn{\mu}{mu} given #' \eqn{x} and \eqn{\sigma_x}{sigma.x}} \item{posterior}{the posterior #' probability of \eqn{\mu}{mu} given \eqn{x} and \eqn{\sigma_x}{sigma.x}} #' @seealso \code{\link{binomixp}} \code{\link{normdp}} \code{\link{normgcp}} #' @keywords misc #' @examples #' #' ## generate a sample of 20 observations from a N(-0.5, 1) population #' x = rnorm(20, -0.5, 1) #' #' ## find the posterior density with a N(0, 1) prior on mu - a 50:50 mix of #' ## two N(0, 1) densities #' normmixp(x, 1, c(0, 1), c(0, 1)) #' #' ## find the posterior density with 50:50 mix of a N(0.5, 3) prior and a #' ## N(0, 1) prior on mu #' normmixp(x, 1, c(0.5, 3), c(0, 1)) #' #' ## Find the posterior density for mu, given a random sample of 4 #' ## observations from N(mu, 1), y = [2.99, 5.56, 2.83, 3.47], #' ## and a 80:20 mix of a N(3, 2) prior and a N(0, 100) prior for mu #' x = c(2.99, 5.56, 2.83, 3.47) #' normmixp(x, 1, c(3, 2), c(0, 100), 0.8) #' #' @export normmixp normmixp = function(x, sigma.x, prior0, prior1, p = 0.5, mu = NULL, n.mu = max(100, length(mu)), ...){ if(length(x) == 0) stop("Error: x must contain at least one observation") if(sigma.x <= 0) stop("Error: sigma.x must be greater than zero") if(length(prior0) != 2 || length(prior1) != 2) stop("Error: there must be 2 parameters for each prior, 2 means and 2 standard deviations") prior.means = c(prior0[1], prior1[1]) prior.sds = c(prior0[2], prior1[2]) if(any(prior.sds <= 0)) stop("Error: the prior standard deviations should be greater than zero") if(p <= 0 || p >= 1) stop("Error: p should be between 0 and 1 exclusive") n = length(x) x.bar = mean(x) q0 = p q1 = 1 - p post.prec0 = (1 / prior.sds[1]^2) + (n / sigma.x^2) post.var0 = 1 / post.prec0 post.sd0 = sqrt(post.var0) post.mean0 = (prior.means[1] / (prior.sds[1]^2 * post.prec0)) + (n * x.bar / (sigma.x^2 * post.prec0)) post.prec1 = (1 / prior.sds[2]^2) + (n / sigma.x^2) post.var1 = 1 / post.prec1 post.sd1 = sqrt(post.var1) post.mean1 = (prior.means[2] / (prior.sds[2]^2 * post.prec1)) + (n * x.bar / (sigma.x^2 * post.prec1)) quiet = Bolstad.control(...)$quiet if(!quiet){ cat("Posterior summary statistics of component 0\n") cat("--------------------------------------------\n") cat(paste("Mean:\t\t", signif(post.mean0, 3), "\n")) cat(paste("Std. Dev.:\t", signif(post.sd0, 4), "\n")) cat(paste("Variance:\t", signif(post.var0, 4), "\n")) cat(paste("Precision:\t", signif(post.prec0, 4), "\n\n")) cat("Posterior summary statistics of component 1\n") cat("-------------------------------------------\n") cat(paste("Mean:\t\t", signif(post.mean1, 3), "\n")) cat(paste("Std. Dev.:\t", signif(post.sd1, 4), "\n")) cat(paste("Variance:\t", signif(post.var1, 4), "\n")) cat(paste("Precision:\t", signif(post.prec1, 4), "\n\n")) } sd.x = sqrt(sigma.x^2 / n + prior.sds[1]^2) f0 = dnorm(x.bar, prior.means[1], sd.x) if(!quiet){ cat("Predictive density of the sample mean under component 0\n") cat("------------------------------------------------------\n") cat(paste("Sample mean:\t", signif(x.bar, 3), "\n")) cat(paste("Pred. mean:\t", signif(prior.means[1], 3), "\n")) cat(paste("Pred. SD:\t", signif(sd.x, 4), "\n")) cat(paste("Density:\t", signif(f0, 4), "\n\n")) } sd.x = sqrt(sigma.x^2 / n + prior.sds[2]^2) f1 = dnorm(x.bar, prior.means[2], sd.x) if(!quiet){ cat("Predictive density of the sample mean under component 1\n") cat("------------------------------------------------------\n") cat(paste("Sample mean:\t", signif(x.bar, 3), "\n")) cat(paste("Pred. mean:\t", signif(prior.means[2], 3), "\n")) cat(paste("Pred. SD:\t", signif(sd.x, 4), "\n")) cat(paste("Density:\t", signif(f1, 4), "\n\n")) } qp0 = q0 * f0 / (q0 * f0 + q1 * f1) qp1 = 1 - qp0 if(!quiet){ cat(paste("Post. mixing proportion for component 0:\t", signif(qp0, 3), "\n")) cat(paste("Post. mixing proportion for component 1:\t", signif(qp1, 3), "\n")) } step.size = k1 = k2 = 0 if(is.null(mu)){ k1 = qnorm(0.0001, prior0[1], prior0[2]) k2 = qnorm(0.9999, prior0[1], prior0[2]) step.size = (k2 - k1) / 1000 mu = seq(k1, k2, by = step.size) } if(length(mu) < n.mu){ k1 = min(mu) k2 = max(mu) mu = seq(k1, k2, length = n.mu) step.size = diff(mu)[1] }else{ k1 = min(mu) k2 = max(mu) step.size = diff(mu)[1] } prior.0 = dnorm(mu, prior.means[1], prior.sds[1]) prior.1 = dnorm(mu, prior.means[2], prior.sds[2]) prior = q0 * prior.0 + q1 * prior.1 posterior.0 = dnorm(mu, post.mean0, post.sd0) posterior.1 = dnorm(mu, post.mean1, post.sd1) posterior = qp0 * posterior.0 + qp1 * posterior.1 loglik = -(mu - x.bar)^2 / (2 * sigma.x^2 / n) loglik = loglik - max(loglik) likelihood = exp(loglik) normalizing.factor = sum(likelihood) * step.size likelihood = likelihood / normalizing.factor f.mu = approxfun(mu, likelihood) cat(paste("\nIntegral of likelihood over mu: ", round(integrate(f.mu, k1, k2)$value, 5), "\n")) if(Bolstad.control(...)$plot){ o.par = par(mfrow = c(2, 2)) ##plot the priors and the mixture prior y.max = max(prior.0, prior.1, prior) plot(mu, prior.0, ylim = c(0, y.max * 1.1), xlab = expression(mu), ylab = "Density" , main = "Mixture prior and it's components" , type = "l", lty = 2, col = "black") lines(mu, prior.1, lty = 3, col = "red") lines(mu, prior, lty = 1, col = "blue") legend("topleft", bty = "n", cex = 0.7, legend = c(expression(prior[0]), expression(prior[1]) , expression(prior[mix])), lty = c(2, 3, 1), col = c("black", "red", "blue")) ##plot the posteriors and the mixture posterior y.max = max(posterior.0, posterior.1, posterior) plot(mu, posterior.0, ylim = c(0, y.max * 1.1), xlab = expression(mu), ylab = "Density" , main = "Mixture posterior and it's components" , type = "l", lty = 2, col = "black") lines(mu, posterior.1, lty = 3, col = "red") lines(mu, posterior, lty = 1, col = "blue") legend("topleft", bty = "n", cex = 0.7, legend = c(expression(posterior[0]), expression(posterior[1]) , expression(posterior[mix])), lty = c(2, 3, 1), col = c("black", "red", "blue")) ##plot the mixture posterior likelihood and mixture posterior y.max = max(prior, posterior, likelihood) plot(mu, prior, ylim = c(0, y.max * 1.1), xlab = expression(mu), ylab = "Density" , main = "Mixture prior, likelihood and mixture posterior" , type = "l", lty = 2, col = "black") lines(mu, likelihood, lty = 3, col = "red") lines(mu, posterior, lty = 1, col = "blue") legend("topleft", bty = "n", cex = 0.7, legend = c(expression(prior[mix]), expression(likelihood) , expression(posterior[mix])), lty = c(2, 3, 1), col = c("black", "red", "blue")) par(o.par) } comp1 = list(name = 'mu', param.x = mu, prior = prior.0, likelihood = likelihood, posterior = posterior.0, mean = post.mean0, var = post.var0, cdf = function(q, ...){pnorm(q, mean = post.mean0, sd = post.sd0, ...)}, quantileFun = function(probs, ...){qnorm(probs, mean = post.mean0, sd = post.sd0, ...)}) class(comp1) = "Bolstad" comp2 = list(name = 'mu', param.x = mu, prior = prior.1, likelihood = likelihood, posterior = posterior.1, mean = post.mean1, var = post.var1, cdf = function(q, ...){pnorm(q, mean = post.mean1, sd = post.sd1, ...)}, quantileFun = function(probs, ...){qnorm(probs, mean = post.mean1, sd = post.sd1, ...)}) class(comp2) = "Bolstad" mix = list(name = 'mu', param.x = mu, prior = prior, likelihood = likelihood, posterior = posterior, mu = mu ## for backwards compatibility only ) class(mix) = "Bolstad" invisible(list(comp1 = comp1, comp2 = comp2, mix = mix)) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/normmixp.r
#' Bayesian inference on a normal mean with a normal prior #' #' Evaluates and plots the posterior density for \eqn{\mu}{mu}, the mean of a #' normal distribution, with a normal prior on \eqn{\mu}{mu} #' #' #' @param x a vector of observations from a normal distribution with unknown #' mean and known std. deviation. #' @param m.x the mean of the normal prior #' @param s.x the standard deviation of the normal prior #' @param sigma.x the population std. deviation of the normal distribution. If #' this value is NULL, which it is by default, then a flat prior is used and #' m.x and s.x are ignored #' @param mu a vector of prior possibilities for the true mean. If this is \code{null}, #' then a set of values centered on the sample mean is used. #' @param n.mu the number of possible \eqn{\mu}{mu} values in the prior #' @param \dots optional control arguments. See \code{\link{Bolstad.control}} #' @return A list will be returned with the following components: \item{mu}{the #' vector of possible \eqn{\mu}{mu} values used in the prior} #' \item{mu.prior}{the associated probability mass for the values in #' \eqn{\mu}{mu}} \item{likelihood}{the scaled likelihood function for #' \eqn{\mu}{mu} given \eqn{x} and \eqn{\sigma_x}{sigma.x}} #' \item{posterior}{the posterior probability of \eqn{\mu}{mu} given \eqn{x} #' and \eqn{\sigma_x}{sigma.x}} \item{mean}{the posterior mean} \item{sd}{the #' posterior standard deviation} \item{qtls}{a selection of quantiles from the #' posterior density} #' @seealso \code{\link{normdp}} \code{\link{normgcp}} #' @keywords misc #' @examples #' #' ## generate a sample of 20 observations from a N(-0.5,1) population #' x = rnorm(20,-0.5,1) #' #' ## find the posterior density with a N(0,1) prior on mu #' normnp(x,sigma=1) #' #' ## find the posterior density with N(0.5,3) prior on mu #' normnp(x,0.5,3,1) #' #' ## Find the posterior density for mu, given a random sample of 4 #' ## observations from N(mu,sigma^2=1), y = [2.99, 5.56, 2.83, 3.47], #' ## and a N(3,sd=2)$ prior for mu #' y = c(2.99,5.56,2.83,3.47) #' normnp(y,3,2,1) #' #' @export normnp normnp = function(x, m.x = 0 , s.x = 1, sigma.x = NULL, mu = NULL, n.mu = max(100, length(mu)), ...){ mean.x = mean(x) n.x = length(x) quiet = Bolstad.control(...)$quiet if(is.null(sigma.x)){ sigma.x = sd(x - mean.x) if(!quiet){ cat(paste("Standard deviation of the residuals :",signif(sigma.x,4),"\n",sep="")) } }else{ if(sigma.x > 0){ if(!quiet){ cat(paste("Known standard deviation :",signif(sigma.x,4),"\n",sep="")) } }else{ stop("Standard deviation sigma.x must be greater than zero") } } if(is.null(mu)){ lb = ub = 0 if(s.x <= 0){ lb = mean.x - 3.5 * sigma.x / sqrt(n.x) ub = mean.x + 3.5 * sigma.x / sqrt(n.x) }else{ lb = m.x - 3.5 * s.x ub = m.x + 3.5 * s.x } mu = seq(lb, ub, length = n.mu) }else{ if(length(mu) < n.mu) mu = seq(min(mu), max(mu), length = n.mu) } if(s.x <= 0){ prior.precision = 0 m.x = 0 lb = min(mu) ub = max(mu) mu.prior = rep(1 / (ub - lb), n.mu) }else{ mu.prior = dnorm(mu, m.x, s.x) prior.precision = 1/s.x^2 } likelihood = exp(-n.x/(2*sigma.x^2)*(mean.x-mu)^2) post.precision = prior.precision+(n.x/sigma.x^2) post.sd = sqrt(1/post.precision) post.mean = (prior.precision/post.precision*m.x)+((n.x/sigma.x^2)/post.precision*mean.x) if(!quiet){ cat(paste("Posterior mean : ",round(post.mean,7),"\n",sep="")) cat(paste("Posterior std. deviation : ",round(post.sd,7),"\n",sep="")) } posterior = dnorm(mu,post.mean,post.sd) if(Bolstad.control(...)$plot){ plot(mu,posterior,ylim=c(0,1.1*max(posterior,mu.prior)),type="l", lty=1,col="blue", xlab=expression(mu),ylab=expression(Probabilty(mu)), main="Shape of prior and posterior") lines(mu,mu.prior,lty=2,col="red") left = min(mu)+diff(range(mu))*0.05 legend("topleft",bty = "n", lty = 1:2, col = c("blue", "red"), legend = c("Posterior", "Prior"), cex = 0.7) } probs = c(0.005,0.01,0.025,0.05,0.5,0.95,0.975,0.99,0.995) qtls = qnorm(probs,post.mean,post.sd) names(qtls) = probs if(!quiet){ cat("\nProb.\tQuantile \n") cat("------\t----------\n") for(i in 1:length(probs)){ cat(sprintf("%5.3f\t%10.7f\n", round(probs[i],3), round(qtls[i],7))) } } results = list(name = 'mu', param.x = mu, prior = mu.prior, likelihood = likelihood, posterior = posterior, mean = post.mean, var = post.sd^2, sd = post.sd, quantiles = qtls, cdf = function(x, ...)pnorm(x, post.mean, post.sd, ...), quantileFun = function(probs, ...)qnorm(probs, post.mean, post.sd, ...)) class(results) = 'Bolstad' invisible(results) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/normnp.r
#' Bayesian inference for a normal standard deviation with a scaled inverse #' chi-squared distribution #' #' Evaluates and plots the posterior density for \eqn{\sigma}{sigma}, the #' standard deviation of a Normal distribution where the mean \eqn{\mu}{mu} is #' known #' #' #' @param y a random sample from a #' \eqn{normal(\mu,\sigma^2)}{normal(mu,sigma^2)} distribution. #' @param mu the known population mean of the random sample. #' @param S0 the prior scaling factor. #' @param kappa the degrees of freedom of the prior. #' @param \dots additional arguments that are passed to \code{Bolstad.control} #' @return A list will be returned with the following components: #' #' \item{sigma}{the vaules of \eqn{\sigma}{sigma} for which the prior, #' likelihood and posterior have been calculated} \item{prior}{the prior #' density for \eqn{\sigma}{sigma}} \item{likelihood}{the likelihood function #' for \eqn{\sigma}{sigma} given \eqn{y}{y}} \item{posterior}{the posterior #' density of \eqn{\mu}{sigma} given \eqn{y}{y}} \item{S1}{the posterior #' scaling constant} \item{kappa1}{the posterior degrees of freedom} #' @keywords misc #' @examples #' #' ## Suppose we have five observations from a normal(mu, sigma^2) #' ## distribution mu = 200 which are 206.4, 197.4, 212.7, 208.5. #' y = c(206.4, 197.4, 212.7, 208.5, 203.4) #' #' ## We wish to choose a prior that has a median of 8. This happens when #' ## S0 = 29.11 and kappa = 1 #' nvaricp(y,200,29.11,1) #' #' ## Same as the previous example but a calculate a 95% credible #' ## interval for sigma. NOTE this method has changed #' results = nvaricp(y,200,29.11,1) #' quantile(results, probs = c(0.025, 0.975)) #' @export nvaricp nvaricp = function(y, mu, S0, kappa, ...){ # dots = list(...) # cred.int = dots[[pmatch("cred.int", names(dots))]] # alpha = dots[[pmatch("alpha", names(dots))]] # # if(is.null(cred.int)) # cred.int = FALSE # # if(is.null(alpha)) # alpha = 0.05 n = length(y) SST = sum((y-mu)^2) if(kappa > 0){ S1 = S0 + SST kappa1 = kappa + n k1 = qchisq(0.01, kappa) k2 = S0/k1 k3 = sqrt(k2) sigma = seq(0,k3,length = 1002)[-1] k4 = diff(sigma)[1] sigma.sq = sigma^2 log.prior = -((kappa-1)/2+1)*log(sigma.sq)-S0/(2*sigma.sq) prior = exp(log.prior) log.like = -(n/2)*log(sigma.sq)-SST/(2*sigma.sq) likelihood = exp(log.like) kint = ((2*sum(prior))-prior[1]-prior[1001])*k4/2*0.99 prior = prior/kint posterior = prior*likelihood kint = ((2*sum(posterior))-posterior[1]-posterior[1001])*k4/2 posterior = posterior/kint y.max = max(c(prior,posterior)) k1 = qchisq(0.01,kappa1) k2 = S1/k1 k3 = sqrt(k2) if(Bolstad.control(...)$plot){ plot(sigma, prior, type = "l", col = "blue", ylim = c(0, 1.1 * y.max), xlim = c(0,k3), main = expression(paste("Shape of Inverse ", chi^2," and posterior for ", sigma, sep = "")), xlab = expression(sigma), ylab = "Density") lines(sigma, posterior, lty = 1, col = "red") legend("topleft", lty = 1, lwd = 2, col = c("blue","red"), legend = c("Prior", "Posterior"), bty = "n") } }else if(kappa == 0){ ## Jeffrey's prior S = 0 S1 = S+SST kappa1 = kappa+n k1 = qchisq(0.001,kappa1) k2 = S1/k1 k3 = sqrt(k2) k4 = k3/1000 sigma = seq(0,k3,length = 1002)[-1] sigma.sq = sigma^2 likelihood = NULL log.posterior = -((kappa1-1)/2+1)*log(sigma.sq)-S1/(2*sigma.sq) posterior = exp(log.posterior) kint = ((2*sum(posterior))-posterior[1]-posterior[1001])*k4/(2*.999) posterior = posterior/kint log.prior = -((kappa-1)/2+1)*log(sigma.sq)-S0/(2*sigma.sq) prior = exp(log.prior) kint = ((2*sum(prior))-prior[1]-prior[1001])*k4/2 prior = prior/kint k1 = qchisq(0.01,kappa1) k2 = S1/k1 k3 = sqrt(k2) k4 = 1.2*max(posterior) if(Bolstad.control(...)$plot){ plot(sigma, prior, type = "l",col = "blue", ylim = c(0,k4), main = expression(paste("Shape of prior and posterior for ", sigma, sep = "")), xlab = expression(sigma),ylab = "Density") lines(sigma, posterior, col = "red") legend("topleft", lty = 1, lwd = 2, col = c("blue","red"), legend = c("Prior", "Posterior"), bty = "n") } }else if(kappa<0){ S0 = 0 S1 = S0+SST kappa1 = kappa+n k1 = qchisq(0.001,kappa1) k2 = S1/k1 k3 = sqrt(k2) k4 = k3/1000 sigma = seq(0,k3,length = 1002)[-1] sigma.sq = sigma^2 log.posterior = -((kappa1-1)/2+1)*log(sigma.sq)-S1/(2*sigma.sq) posterior = exp(log.posterior) kint = ((2*sum(posterior))-posterior[1]-posterior[1001])*k4/(2*.999) posterior = posterior/kint log.prior = -((kappa-1)/2+1)*log(sigma.sq)-S0/(2*sigma.sq) prior = exp(log.prior) kint = ((2*sum(prior))-prior[1]-prior[1001])*k4/2 prior = prior/kint likelihood = NULL k1 = qchisq(0.01,kappa1) k2 = S1/k1 k3 = sqrt(k2) k4 = 1.2*max(posterior) if(Bolstad.control(...)$plot){ plot(sigma, prior, type = "l",col = "blue", xlim = c(0,k3),ylim = c(0,k4), main = expression(paste("Shape of prior and posterior for ", sigma, sep = "")), xlab = expression(sigma),ylab = "Density") lines(sigma, posterior, col = "red") legend("topleft", lty = 1, lwd = 2, col = c("blue","red"), legend = c("Prior", "Posterior"), bty = "n") } } if(!Bolstad.control(...)$quiet){ cat(paste("S1: ",signif(S1,4)," kappa1 :", signif(kappa1,3),"\n",sep = "")) } # if(cred.int){ # msg = paste0("This argument is deprecated and will not be supported in future releases.", # "\nPlease use the quantile function instead.\n") # warning(msg) # # if(kappa1<2) # cat("Unable to calculate credible interval for sigma if kappa1<= 2\n") # else{ # sigmahat.post.mean = sqrt(S1/(kappa1-2)) # cat(paste("Estimate of sigma using posterior mean: ", # signif(sigmahat.post.mean,4),"\n",sep = "")) # } # # q50 = qchisq(p = 0.5, df = kappa1) # sigmahat.post.median = sqrt(S1/q50) # cat(paste("Estimate of sigma using posterior median: ", # signif(sigmahat.post.median,4),"\n",sep = "")) # # ci = sqrt(S1 / qchisq(p = 1 - c(alpha * 0.5, 1 - alpha * 0.5), df = kappa1)) # ciStr = sprintf("%d%% credible interval for sigma: [%4g, %4g]\n", round(100 * (1 - alpha)), signif(ci[1], 4), signif(ci[2], 4)) # cat(ciStr) # # if(Bolstad.control(...)$plot) # abline(v = ci, col = "blue", lty = 3) # # } results = list(param.x = sigma, prior = prior, likelihood = likelihood, posterior = posterior, sigma = sigma, # for backwards compat. only S1 = S1, kappa1 = kappa1, mean = ifelse(kappa1 > 2, sqrt(S1 / (kappa1 - 2)), NA), median = sqrt(S1 / qchisq(0.5, kappa1)), var = ifelse(kappa1 > 4, 2 * S1^2 / ((kappa1 - 2)^2 * (kappa1 - 4)), NA), sd = sqrt(ifelse(kappa1 > 4, 2 * S1^2 / ((kappa1 - 2)^2 * (kappa1 - 4)), NA)), cdf = function(y, ...){ pchisq(S1 / y^2, df = kappa1, ...) }, quantileFun = function(probs, ...){ sqrt(S1 / qchisq(p = 1 - probs, df = kappa1, ...)) } ) class(results) = 'Bolstad' invisible(results) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/nvaricp.r
#' Plot method for objects of type Bolstad #' #' A unified plotting method for plotting the prior, likelihood and posterior #' from any of the analyses in the book #' #' The function provides a unified way of plotting the prior, likelihood and #' posterior from any of the functions in the library that return these #' quantities. It will produce an overlay of the lines by default, or separate #' panels if \code{overlay = FALSE}. #' #' @param x A S3 object of class Bolstad #' @param overlay if \code{FALSE} then up to three plots will be drawn #' side-by-side #' @param which Control which of the prior = 1, likelihood = 2, and posterior = #' 3, are plots. This is set to prior and posterior by default to retain #' compatibility with the book #' @param densCols The colors of the lines for each of the prior, likelihood and #' posterior #' @param legendLoc The location of the legend, usually either \code{"topright"} #' or \code{"topleft"} #' @param scaleLike If \code{TRUE}, then the likelihood will be scaled to have #' approximately the same maximum value as the posterior #' @param xlab Label for x axis #' @param ylab Label for y axis #' @param main Title of plot #' @param ylim Vector giving y coordinate range #' @param cex Character expansion multiplier #' @param \dots Any remaining arguments are fed to the \code{plot} command #' @author James Curran #' @keywords plot #' @examples #' #' x = rnorm(20,-0.5,1) #' ## find the posterior density with a N(0,1) prior on mu #' b = normnp(x,sigma=1) #' plot(b) #' plot(b, which = 1:3) #' plot(b, overlay = FALSE, which = 1:3) #' @export plot.Bolstad = function(x, overlay = TRUE, which = c(1, 3), densCols = c("red","green","blue")[which], legendLoc = "topleft", scaleLike = FALSE, xlab = eval(expression(x$name)), ylab = "", main = "Shape of prior and posterior", ylim = c(0, max(cbind(x$prior, x$likelihood, x$posterior)[,which]) * 1.1), cex = 0.7, ...){ which = sort(which) if(is.null(which) || length(which) <= 0 || length(which) > 3 || any(!grepl('^[1-3]+$', which))){ stop("parameter which can only take vectors of length 3 containing the values 1, 2 and 3") } if(scaleLike){ sf = max(x$posterior) / max(x$likelihood) x$likelihood = x$likelihood * sf } bLegend = !grepl("none", tolower(legendLoc)) if(overlay){ with(x,{ Y = as.matrix(cbind(prior, likelihood, posterior)[,which]); plot(param.x, Y[,1], ylim = ylim, type="l", lty = (3:1)[which[1]], col = densCols[1], xlab = xlab, ylab = "", main = main, ...); i = 2; while(i <= ncol(Y)){ lines(param.x, Y[,i], lty = (3:1)[which[i]], col = densCols[i]); i = i + 1; }; if(bLegend){ legend(legendLoc, lty = (3:1)[which], col = densCols, legend = c("Prior", "Likelihood", "Posterior")[which], bty = 'n', cex = cex); }; }) }else{ oldpar = par(mfrow = c(1, length(which)), mai = c(0.7, 0.1, 0.2, 0.1), yaxs = 'i', xaxs = 'i') with(x,{ Y = cbind(prior, likelihood, posterior)[,which] legend = c("Prior", "Likelihood", "Posterior")[which] plot(param.x, Y[,1], ylim = ylim, type="l", col = densCols[1], xlab = eval(expression(name)), ylab = "", main = legend[1], axes = FALSE, ...) axis(1) box() for(i in 2:ncol(Y)){ plot(param.x, Y[,i], ylim = ylim, col = densCols[i], type = 'l', xlab = "", main = legend[i], axes = FALSE, ...) box() } par(oldpar) }) } }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/plot.Bolstad.r
#' Poisson sampling with a discrete prior #' #' Evaluates and plots the posterior density for \eqn{\mu}{mu}, the mean rate #' of occurance in a Poisson process and a discrete prior on \eqn{\mu}{mu} #' #' #' @param y.obs a random sample from a Poisson distribution. #' @param mu a vector of possibilities for the mean rate of occurance of an #' event over a finite period of space or time. #' @param mu.prior the associated prior probability mass. #' @param \dots additional arguments that are passed to \code{Bolstad.control} #' @return A list will be returned with the following components: #' #' \item{likelihood}{the scaled likelihood function for \eqn{\mu}{mu} given #' \eqn{y_{obs}}{y.obs}} \item{posterior}{the posterior probability of #' \eqn{\mu}{mu} given \eqn{y_{obs}}{y.obs}} \item{mu}{the vector of possible #' \eqn{\mu}{mu} values used in the prior} \item{mu.prior}{the associated #' probability mass for the values in \eqn{\mu}{mu}} #' @seealso \code{\link{poisgamp}} \code{\link{poisgcp}} #' @keywords misc #' @examples #' #' ## simplest call with an observation of 4 and a uniform prior on the #' ## values mu = 1,2,3 #' poisdp(4,1:3,c(1,1,1)/3) #' #' ## Same as the previous example but a non-uniform discrete prior #' mu = 1:3 #' mu.prior = c(0.3,0.4,0.3) #' poisdp(4,mu=mu,mu.prior=mu.prior) #' #' ## Same as the previous example but a non-uniform discrete prior #' mu = seq(0.5,9.5,by=0.05) #' mu.prior = runif(length(mu)) #' mu.prior = sort(mu.prior/sum(mu.prior)) #' poisdp(4,mu=mu,mu.prior=mu.prior) #' #' ## A random sample of 50 observations from a Poisson distribution with #' ## parameter mu = 3 and non-uniform prior #' y.obs = rpois(50,3) #' mu = c(1:5) #' mu.prior = c(0.1,0.1,0.05,0.25,0.5) #' results = poisdp(y.obs, mu, mu.prior) #' #' ## Same as the previous example but a non-uniform discrete prior #' mu = seq(0.5,5.5,by=0.05) #' mu.prior = runif(length(mu)) #' mu.prior = sort(mu.prior/sum(mu.prior)) #' y.obs = rpois(50,3) #' poisdp(y.obs,mu=mu,mu.prior=mu.prior) #' #' #' @export poisdp poisdp = function(y.obs, mu, mu.prior, ...){ if(length(y.obs) == 0 || is.null(y.obs)) stop("Error: y.obs must contain at least one value") if(any(y.obs < 0)) stop("Error: y.obs cannot contain negative values") if(length(mu) != length(mu.prior)) stop("Error: the lengths of mu and mu.prior are unequal.\nThere must be a corresponding probability for each value of mu") if(sum(mu<=0)>0) stop("Error: the values of the rate paramter mu, must be greater than zero") if(sum(mu.prior<0)>0 || sum(mu.prior>1)>0) stop("Error: prior probabilities must be between zero and one") if(sum(mu.prior)!=1){ warning("Warning: the prior does not sum to 1. The prior has been rescaled") mu.prior = mu.prior/sum(mu.prior) } n = length(y.obs) if(n == 1){ k = y.obs m = length(mu) cat("Prior\n") cat("-----\n") prior.matrix = cbind(mu,mu.prior) colnames(prior.matrix) = c("mu","Pr(mu)") print(prior.matrix) k1 = 0.9995 k2 = mu[m] cat(paste("\nk1:\t",k1,"\nk2:\t",k2,"\n\n")) n = qpois(k1,k2) y1 = 0:n k1 = mu k1[k1==0] = 1e-9 f.cond = matrix(0,nrow=m,ncol=n+1) for(i in 1:m) f.cond[i,] = dpois(y1,k1[i]) rownames(f.cond) = mu colnames(f.cond) = y1 cat("Conditional probability of y1 given mu\n") cat("-------------------------------------\n") print(f.cond) cat("\n\n") matrix.prior = diag(mu.prior) f.joint = matrix.prior%*%f.cond cat("Joint probability of y1 and mu\n") cat("-------------------------------\n") print(f.joint) cat("\n\n") f.marg = apply(f.joint,2,sum) cat("Marginal probability of y1\n") cat("-------------------------\n") print(f.marg) cat("\n\n") ## extract the column of f.cond corresponding to y.obs likelihood = f.cond[,y.obs+1] posterior = likelihood*mu.prior posterior = posterior/sum(posterior) results = cbind(mu,mu.prior,likelihood,posterior) colnames(results) = c("Mu","Prior","Likelihood","Posterior") print(results) }else{ m = length(mu) likelihood = rep(0,m) for(i in 1:m) likelihood[i] = exp(sum(dpois(y.obs,max(mu[i],1e-9),log=TRUE))) posterior = likelihood*mu.prior posterior = posterior/sum(posterior) results = cbind(mu,mu.prior,likelihood,posterior) colnames(results) = c("Mu","Prior","Likelihood","Posterior") print(results) } if(Bolstad.control(...)$plot){ plot.data = rbind(mu.prior,posterior) if(length(mu.prior)<=10){ colnames(plot.data) = mu y.max = max(mu.prior,posterior) midpoints = barplot(plot.data,beside=TRUE,col=c("red","blue") ,xlab=expression(mu) ,ylab=expression(paste("Pr(",mu,"|","y)")) ,ylim=c(0,y.max*1.1) ,main=expression( paste("Prior and posterior probability for ", mu ," given the data y"))) legend("topleft", cex = 0.7, bty = "n", legend=c("Prior","Posterior"), fill=c("red","blue")) box() }else{ y.max = max(mu.prior,posterior) plot(mu,mu.prior,type="l",lty=2,col="red" ,xlab=expression(mu) ,ylab=expression(paste("Pr(",mu,"|","y)")) ,ylim=c(0,y.max*1.1) ,main=expression(paste("Prior and posterior probability for ", mu ," given the data y"))) lines(mu,posterior,lty=1,col="blue") legend("topleft", cex = 0.7, bty = "n", lty = c(2,1), col = c("red"," blue"), legend=c("Prior","Posterior")) } } mx = sum(mu * posterior) vx = sum((mu - mx)^2 * posterior) results= list(name = 'mu', param.x = mu, prior = mu.prior, likelihood = likelihood, posterior = posterior, mean = mx, var = vx, cdf = function(m){cumDistFun(m, mu, posterior)}, quantileFun = function(probs, ...){qFun(probs, mu, posterior)}, mu = mu, mu.prior = mu.prior #for backwards compat. only ) class(results) = 'Bolstad' invisible(results) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/poisdp.r
#' Poisson sampling with a gamma prior #' #' Evaluates and plots the posterior density for \eqn{\mu}{mu}, the mean rate #' of occurance in a Poisson process and a \eqn{gamma} prior on \eqn{\mu}{mu} #' #' #' @param y a random sample from a Poisson distribution. #' @param shape the shape parameter of the \eqn{gamma} prior. #' @param rate the rate parameter of the \eqn{gamma} prior. Note that the scale #' is \eqn{1 / rate} #' @param scale the scale parameter of the \eqn{gamma} prior #' @param alpha the width of the credible interval is controlled by the #' parameter alpha. #' @param \dots additional arguments that are passed to \code{Bolstad.control} #' @return An object of class 'Bolstad' is returned. This is a list with the #' following components: #' #' \item{prior}{the prior density assigned to \eqn{\mu}{mu}} #' \item{likelihood}{the scaled likelihood function for \eqn{\mu}{mu} given #' \eqn{y}} \item{posterior}{the posterior probability of \eqn{\mu}{mu} given #' \eqn{y}} \item{shape}{the shape parameter for the \eqn{gamma} posterior} #' \item{rate}{the rate parameter for the \eqn{gamma} posterior} #' @seealso \code{\link{poisdp}} \code{\link{poisgcp}} #' @keywords misc #' @examples #' #' ## simplest call with an observation of 4 and a gamma(1, 1), i.e. an exponential prior on the #' ## mu #' poisgamp(4, 1, 1) #' #' ## Same as the previous example but a gamma(10, ) prior #' poisgamp(4, 10, 1) #' #' ## Same as the previous example but an improper gamma(1, ) prior #' poisgamp(4, 1, 0) #' #' ## A random sample of 50 observations from a Poisson distribution with #' ## parameter mu = 3 and gamma(6,3) prior #' set.seed(123) #' y = rpois(50,3) #' poisgamp(y,6,3) #' #' ## In this example we have a random sample from a Poisson distribution #' ## with an unknown mean. We will use a gamma(6,3) prior to obtain the #' ## posterior gamma distribution, and use the R function qgamma to get a #' ## 95% credible interval for mu #' y = c(3,4,4,3,3,4,2,3,1,7) #' results = poisgamp(y,6,3) #' ci = qgamma(c(0.025,0.975),results$shape, results$rate) #' cat(paste("95% credible interval for mu: [",round(ci[1],3), ",", round(ci[2],3)),"]\n") #' #' ## In this example we have a random sample from a Poisson distribution #' ## with an unknown mean. We will use a gamma(6,3) prior to obtain the #' ## posterior gamma distribution, and use the R function qgamma to get a #' ## 95% credible interval for mu #' y = c(3,4,4,3,3,4,2,3,1,7) #' results = poisgamp(y, 6, 3) #' ci = quantile(results, c(0.025, 0.975)) #' cat(paste("95% credible interval for mu: [",round(ci[1],3), ",", round(ci[2],3)),"]\n") #' #' #' @export poisgamp poisgamp = function(y, shape, rate = 1, scale = 1 / rate, alpha = 0.05, ...){ n = length(y) y.sum = sum(y) if(is.null(y) || length(y)==0) stop("Error: y has no data") if(any(y < 0)) stop("Error: y contains negative values") if(scale !=1 & rate == 1){ rate = 1 / scale } if(shape < 0 || rate < 0) stop("Shape parameter and rate parameter must be greater than or equal to zero") quiet = Bolstad.control(...)$quiet if(!quiet){ cat("Summary statistics for data\n") cat("---------------------------\n") cat(paste("Number of observations:\t", n, "\n")) cat(paste("Sum of observations:\t", y.sum, "\n\n")) } if(rate > 0){ ##proper gamma prior upperBnd = qgamma(0.9999, shape, rate) stepSize = upperBnd / 1000 mu = seq(0, upperBnd, by = stepSize) shapePost = shape + y.sum ratePost = rate + n prior = dgamma(mu, shape, rate) posterior = dgamma(mu, shapePost, ratePost) }else if(rate == 0){ shapePost = shape + y.sum ratePost = rate + n upperBnd = qgamma(0.9999, shapePost, ratePost) stepSize = upperBnd / 1000 mu = seq(0, upperBnd, by = stepSize) ##mu[1] = mu[2] ## fixes infinite upper bound problem prior = mu^(shape - 1) priorInt = integrate(function(x)x^(shape - 1), mu[1], mu[1001])$value prior = prior / priorInt }else{ stop("Error: rate must be greater or equal to zero") } likelihood = sapply(mu, dpois, x = y, simplify = "array") if(is.matrix(likelihood)){ likelihood = apply(likelihood, 2, prod) } posterior = dgamma(mu, shapePost, ratePost) credInt = qgamma(c(alpha * 0.5 , 1 - alpha * 0.5), shapePost, ratePost) if(!quiet){ cat("Summary statistics for posterior\n") cat("--------------------------------\n") cat(paste("Shape parameter (r):\t", shapePost, "\n")) cat(paste("Rate parameter (v):\t", ratePost, "\n")) cat(sprintf("%d%% credible interval for mu:\t[%.2f, %.2f]\n", round(100 * (1 - alpha)), credInt[1], credInt[2])) } if(Bolstad.control(...)$plot){ y.max = max(prior[is.finite(prior)], posterior) plot(mu[is.finite(prior)], prior[is.finite(prior)], ylim = c(0, 1.1 * y.max), xlab = expression(mu), ylab = "Density", main = "Shape of gamma prior and posterior\n for Poisson mean", type = "l", lty = 2, col = "red") lines(mu, posterior, lty = 3, col = "blue") legend("topleft", bty = "n", lty = 2:3, col = c("red", "blue"), legend = c("Prior", "Posterior"), cex = 0.7) } results = list(name = 'mu', param.x = mu, prior = prior, likelihood = likelihood, posterior = posterior, mean = shapePost / ratePost, var = shapePost / ratePost^2, cdf = function(m, ...){pgamma(m, shape = shapePost, rate = ratePost, ...)}, quantileFun = function(probs, ...){qgamma(probs, shape = shapePost, rate = ratePost, ...)}, mu = mu, # for backwards compatibility only shape = shapePost, rate = ratePost) class(results) = 'Bolstad' invisible(results) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/poisgamp.R
#' Poisson sampling with a general continuous prior #' #' Evaluates and plots the posterior density for \eqn{\mu}{mu}, the mean rate #' of occurance of an event or objects, with Poisson sampling and a general #' continuous prior on \eqn{\mu}{mu} #' #' #' @param y A random sample of one or more observations from a Poisson #' distribution #' @param density may be one of \code{"gamma"}, \code{"normal"}, or \code{"user"} #' @param params if density is one of the parameteric forms then then a vector #' of parameters must be supplied. gamma: a0,b0 normal: mean,sd #' @param n.mu the number of possible \eqn{\mu}{mu} values in the prior. This #' number must be greater than or equal to 100. It is ignored when #' density="user". #' @param mu either a vector of possibilities for the mean of a Poisson distribution, or a range (a vector of length 2) of values. #' This must be set if \code{density = "user"}. If \code{mu} is a range, then \code{n.mu} will be used to decide how many points to #' discretise this range over. #' @param mu.prior either a vector containing y values correspoding to the values in \code{mu}, or a function. #' This is used to specifiy the prior \eqn{f(\mu)}{f(mu)}. So \code{mu.prior} can be a vector containing \eqn{f(\mu_i)}{f(mu[i])} #' for every \eqn{\mu_i}{mu[i]}, or a funtion. This must be set if \code{density == "user"}. #' @param print.sum.stat if set to TRUE then the posterior mean, posterior #' variance, and a credible interval for the mean are printed. The width of the #' credible interval is controlled by the parameter alpha. #' @param alpha The width of the credible interval is controlled by the #' parameter alpha. #' @param \dots additional arguments that are passed to \code{Bolstad.control} #' @return A list will be returned with the following components: \item{mu}{the #' vector of possible \eqn{\mu}{mu} values used in the prior} #' \item{mu.prior}{the associated probability mass for the values in #' \eqn{\mu}{mu}} \item{likelihood}{the scaled likelihood function for #' \eqn{\mu}{mu} given \eqn{y}} \item{posterior}{the posterior probability of #' \eqn{\mu}{mu} given \eqn{y}} #' @seealso \code{\link{poisdp}} \code{\link{poisgamp}} #' @keywords misc #' @examples #' #' ## Our data is random sample is 3, 4, 3, 0, 1. We will try a normal #' ## prior with a mean of 2 and a standard deviation of 0.5. #' y = c(3,4,3,0,1) #' poisgcp(y, density = "normal", params = c(2,0.5)) #' #' ## The same data as above, but with a gamma(6,8) prior #' y = c(3,4,3,0,1) #' poisgcp(y, density = "gamma", params = c(6,8)) #' #' ## The same data as above, but a user specified continuous prior. #' ## We will use print.sum.stat to get a 99% credible interval for mu. #' y = c(3,4,3,0,1) #' mu = seq(0,8,by=0.001) #' mu.prior = c(seq(0,2,by=0.001),rep(2,1999),seq(2,0,by=-0.0005))/10 #' poisgcp(y,"user",mu=mu,mu.prior=mu.prior,print.sum.stat=TRUE,alpha=0.01) #' #' ## find the posterior CDF using the results from the previous example #' ## and Simpson's rule. Note that the syntax of sintegral has changed. #' results = poisgcp(y,"user",mu=mu,mu.prior=mu.prior) #' cdf = sintegral(mu,results$posterior,n.pts=length(mu))$cdf #' plot(cdf,type="l",xlab=expression(mu[0]) #' ,ylab=expression(Pr(mu<=mu[0]))) #' #' ## use the cdf to find the 95% credible region. #' lcb = cdf$x[with(cdf,which.max(x[y<=0.025]))] #' ucb = cdf$x[with(cdf,which.max(x[y<=0.975]))] #' cat(paste("Approximate 95% credible interval : [" #' ,round(lcb,4)," ",round(ucb,4),"]\n",sep="")) #' #' ## find the posterior mean, variance and std. deviation #' ## using Simpson's rule and the output from the previous example #' dens = mu*results$posterior # calculate mu*f(mu | x, n) #' post.mean = sintegral(mu,dens)$value #' #' dens = (mu-post.mean)^2*results$posterior #' post.var = sintegral(mu,dens)$value #' post.sd = sqrt(post.var) #' #' # calculate an approximate 95% credible region using the posterior mean and #' # std. deviation #' lb = post.mean-qnorm(0.975)*post.sd #' ub = post.mean+qnorm(0.975)*post.sd #' #' cat(paste("Approximate 95% credible interval : [" #' ,round(lb,4)," ",round(ub,4),"]\n",sep="")) #' #' # NOTE: All the examples given above can now be done trivially in this package #' #' ## find the posterior CDF using the results from the previous example #' results = poisgcp(y,"user",mu=mu,mu.prior=mu.prior) #' cdf = cdf(results) #' curve(cdf,type="l",xlab=expression(mu[0]) #' ,ylab=expression(Pr(mu<=mu[0]))) #' #' ## use the quantile function to find the 95% credible region. #' ci = quantile(results, c(0.025, 0.975)) #' cat(paste0("Approximate 95% credible interval : [" #' ,round(ci[1],4)," ",round(ci[2],4),"]\n")) #' #' ## find the posterior mean, variance and std. deviation #' ## using the output from the previous example #' post.mean = mean(results) #' #' post.var = var(results) #' post.sd = sd(results) #' #' # calculate an approximate 95% credible region using the posterior mean and #' # std. deviation #' ci = post.mean + c(-1, 1) * qnorm(0.975) * post.sd #' #' cat(paste("Approximate 95% credible interval : [" #' ,round(ci[1],4)," ",round(ci[2],4),"]\n",sep="")) #' #' ## Example 10.1 Dianna's prior #' # Firstly we need to write a function that replicates Diana's prior #' f = function(mu){ #' result = rep(0, length(mu)) #' result[mu >=0 & mu <=2] = mu[mu >=0 & mu <=2] #' result[mu >=2 & mu <=4] = 2 #' result[mu >=4 & mu <=8] = 4 - 0.5 * mu[mu >=4 & mu <=8] #' #' ## we don't need to scale so the prior integrates to one, #' ## but it makes the results nicer to see #' #' A = 2 + 4 + 4 #' result = result / A #' #' return(result) #' } #' #' results = poisgcp(y, mu = c(0, 10), mu.prior = f) #' #' @export poisgcp poisgcp = function(y, density = c("normal", "gamma", "user"), params = c(0, 1), n.mu = 100, mu = NULL, mu.prior = NULL, print.sum.stat = FALSE, alpha = 0.05, ...) { n = length(y) y.sum = sum(y) y.bar = mean(y) if (is.null(y)) stop("Error: y has no data") if (any(y < 0)) stop("Error: data contains negative values") if (n.mu < 100) stop("Error: there must be at least 100 points in the prior") density = match.arg(density) if (density == "user" || is.function(mu.prior) || (length(mu) > 1 && length(mu.prior) > 1)){ if(density != "user"){ errMessage = paste("A user density is being used because mu and mu.prior have been provided.", "If this was not your intention then remove these vectors from the ", "function call", sep = "\n") warning(errMessage) } if(is.function(mu.prior)){ if(length(mu) != 2){ errMessage = paste("mu.prior is a function, therefore, mu must be a vector of length 2", "over which mu.prior is evaluated ", "function call", sep = "\n") stop(errMessage) } mu = seq(mu[1], mu[2], length = n.mu) mu.prior = mu.prior(mu) if(any(mu.prior < 0)){ stop("mu.prior must be greater than, or equal to zero for all values of mu") } } if (is.null(mu) || is.null(mu.prior)) stop( "Error: a vector of possibilities (mu) and associated densities must be specified for a user prior" ) if (length(mu) != length(mu.prior)) stop("Error: There must be an equal number of values in mu and mu prior") } else if (density == "normal" & (is.null(mu) || is.null(mu.prior))) { ## shouldn't need the second clause as it should get trapped by the above if (length(params) != 2) stop("Error: A mean and a std. deviation must be specified for a normal prior") mx = params[1] sx = params[2] if(mx <= 0){ stop("Error: the prior mean must be greater than zero") } if (sx <= 0) stop("Error: the std. deviation of a normal prior must be greater than zero") lb = min(qnorm(1 / n.mu, mx, sx), mx - 3.5 * sx) ub = max(qnorm(1 - 1 / n.mu, mx, sx), mx + 3.5 * sx) if (lb < 0) { msg = paste0("Error: ", paste0("The normal prior has negative values. ", "Whilst this is true for all normal distributions, you can sneak ", "around it by using a large positive mean relative to the ", "std. deviation.")) stop(msg) } mu = seq(lb, ub, length = n.mu) mu.prior = dnorm(mu, mx, sx) / pnorm(0, mx, sx, lower.tail = FALSE) ## note this changed in 0.2-36 to rescale for the probability mass below 0 } else if (density == "gamma" & (is.null(mu) || is.null(mu.prior))) { if (length(params) != 2) stop("Error: there must be two parameters, a0 and b0 for a gamma prior") if (sum(params < 0) > 0) stop("Error: the parameters of a gamma prior must be positive") a0 = params[1] b0 = params[2] gamma.bds = qgamma(c(0.005, 0.995), a0, b0) mu = seq(gamma.bds[1], gamma.bds[2], length = n.mu) mu.prior = dgamma(mu, a0, b0) } else{ stop( paste( "Error: unrecognized density: ", density, ". The options are normal, gamma or user." ) ) } if (sum(mu < 0) > 0) stop("Error: mu cannot contain negative values") quiet = Bolstad.control(...)$quiet if (!quiet) { cat("Summary statistics for data\n") cat("---------------------------\n") cat(paste("Number of observations:\t", n, "\n")) cat(paste("Sum of observations:\t", y.sum, "\n")) } log.lik = y.sum * log(mu) - n * mu likelihood = exp(log.lik) fx.joint = approxfun(mu, mu.prior * likelihood) normalizing.constant = integrate(fx.joint, min(mu), max(mu))$value posterior = likelihood * mu.prior / normalizing.constant if (print.sum.stat) { fx.posterior = approxfun(mu, posterior) x.fx = approxfun(mu, posterior * mu) posterior.mean = integrate(x.fx, min(mu), max(mu))$value xmusq.fx = approxfun(mu, (mu - posterior.mean) ^ 2 * posterior) posterior.var = integrate(xmusq.fx, min(mu), max(mu))$value cat("\nPosterior distribution summary statistics\n") cat("-----------------------------------------\n") cat(paste("Post. mean:\t", round(posterior.mean, 3), "\n")) cat(paste("Post. var.:\t", round(posterior.var, 4), "\n")) mu.int = seq(min(mu), max(mu), length = 256) f.mu = fx.posterior(mu.int) suppressMessages({ cdf = sintegral(mu.int, f.mu)$cdf fx.posterior.invcdf = approxfun(cdf$y, cdf$x) }) lb = fx.posterior.invcdf(alpha / 2) ub = fx.posterior.invcdf(1 - alpha / 2) cat(paste( round(100 * (1 - alpha)), "% cred. int.: [" , round(lb, 3), ",", round(ub, 3), "]\n\n" )) } if (Bolstad.control(...)$plot) { y.max = max(mu.prior, posterior) plot( mu, mu.prior, ylim = c(0, 1.1 * y.max), xlab = expression(mu) , ylab = "Density", , main = "Shape of continuous prior and posterior for Poisson mean" , type = "l", lty = 2, col = "red" ) lines(mu, posterior, lty = 3, col = "blue") legend( "topleft", bty = "n", lty = 2:3, col = c("red", "blue"), legend = c("Prior", "Posterior"), cex = 0.7 ) } results = list( name = 'mu', param.x = mu, prior = mu.prior, likelihood = likelihood, posterior = posterior, mu = mu, mu.prior = mu.prior # for backwards compatibility only ) class(results) = 'Bolstad' invisible(results) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/poisgcp.r
#' Print method for objects of class \code{Bolstad} #' #' This function provides a print summary method for the output of #' \code{bayes.lm}. #' #' #' #' @param x an object of class \code{Bolstad} #' @param digits number of digits to print #' @param \dots any other arguments that are to be passed to \code{print.default} #' @details if x has both class \code{Bolstad} and \code{lm} then a print method #' similar to \code{print.lm} is called, otherwise \code{print.default} is called #' @author James Curran #' @seealso \code{\link{bayes.lm}} #' @export print.Bolstad = function(x, digits = max(3L, getOption("digits") - 3L), ...) { if(length(class(x)) == 2 && all(grepl("Bolstad|lm", class(x)))){ getTermLabels = function(x) attr(x$terms, "term.labels") cat("\nCall:", paste0(deparse(x$call)), sep = "\n", collapse= "\n") if (length(coef(x))) { cat("Coefficients:\n") c = coef(x) names(c) = c("(Intercept)", getTermLabels(x)) print(format(c, digits = digits), print.gap = 2L, quote = FALSE) } cat("\n") }else{ print.default(x, ...) } }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/print.Bolstad.R
print.sintegral = function(x, ...){ cat(paste0("Value: ", x$value, "\n", ...)) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/print.sintegral.R
#' Print method for objects of class \code{sscsample} #' #' This function provides a print summary method for the output of #' \code{sscsample}. The \code{sscsample} produces a large number of samples #' from a fixed population using either simple random, stratified, or cluster #' sampling. This function provides the means of each sample plus the number of #' observations from each ethnicity stratum in the sample. #' #' #' @param x an object of class \code{sscsamp} produced by \code{sscsample} #' @param \dots any other arguments that are to be passed to \code{cat} #' @author James Curran #' @seealso \code{\link{sscsample}} #' @export print.sscsamp = function(x, ...){ cat("Sample Mean Stratum 1 Stratum 2 Stratum 3\n", ...) cat("------ ------- --------- --------- ---------\n", ...) n.samples = length(x$means) fmt = '%6d %7.4f %9d %9d %9d\n' for (r in 1:n.samples) { s = sprintf(fmt, r, round(x$means[r], 4), x$s.strata[r,1], x$s.strata[r, 2], x$s.strata[r, 3]) cat(s, ...) } }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/print.sscamp.R
#' @export print.summary.Bolstad = function(x, digits = max(3L, getOption("digits") - 3L), ...) { cat("\nCall:\n") cat(paste0(deparse(x$call), sep = "\n"), "\n") cat("Residuals:", "\n") resid = x$residuals if (x$res.df > 5L) { nam <- c("Min", "1Q", "Median", "3Q", "Max") rq <- if (length(dim(resid)) == 2L) structure(apply(t(resid), 1L, quantile), dimnames = list(nam, dimnames(resid)[[2L]])) else { zz <- zapsmall(quantile(resid), digits + 1L) structure(zz, names = nam) } } print(rq, digits = digits, ...) cat("\nCoefficients:\n") coef.mat = matrix(nrow = x$rank, ncol = 3) colnames(coef.mat) = c("Posterior Mean", "Std. Error", "t value") rownames(coef.mat) = c(x$terms) coef.mat[, "Posterior Mean"] = x$coef coef.mat[, "Std. Error"] = x$std.err coef.mat[, "t value"] = x$coef / x$std.err print(format(coef.mat, digits = digits), print.gap = 2L, quote = FALSE, ...) cat("---\n") if (!is.null(x$prior)) { prior.coef = x$prior$b0 cat("Prior Coefficients:\n") names(prior.coef) = x$terms print(format(prior.coef, digits = digits), print.gap = 2L, quote = FALSE, ...) prior.cov = x$prior$V0 cat("\nPrior Covariance Matrix:\n") dimnames(prior.cov) = list(x$terms, x$terms) print(format(prior.cov, digits = digits), print.gap = 2L, quote = FALSE, ...) } else { cat("\nNote: No prior given (Using flat prior).\n") } }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/print.summary.Bolstad.R
qFun = function(probs, x, px){ Px = cumsum(px) r = rep(0, length(probs)) r[probs < 0 | probs > 1] = NA r[probs = 0] = min(x) r[probs = 1] = max(x) qp = function(p){ d = which(Px - p >= 0)[1] x[d] } i = probs > 0 & probs < 1 r[i] = sapply(probs[i], qp) return(r) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/qFun.R
#' Posterior quantiles #' #' @param x an object of class \code{Bolstad} #' @param probs numeric vector of probabilities with values in \eqn{[0,1]}. #' @param \dots, any extra arguments needed. #' @details If \code{x} is of class \code{Bolstad} then this will find the #' quantiles of the posterior distribution using numerical integration and #' linear interpolation if necessary. #' @method quantile Bolstad #' @export quantile.Bolstad = function(x, probs = seq(0, 1, 0.25), ...){ if(any(grepl("quantileFun", names(x)))) return(x$quantileFun(probs, ...)) res = sintegral(x$param.x, x$posterior)$cdf qFn = approxfun(res$y, res$x) return(qFn(probs)) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/quantile.Bolstad.R
#' Standard deviation generic #' #' @param x an object. #' @param \dots Any additional arguments to be passed to \code{sd}. #' @export sd = function(x, ...){ UseMethod("sd") } #' @export sd.default = function(x, ...){ stats::sd(x, ...) } #' Posterior standard deviation #' #' @param x an object of class \code{Bolstad} for which we want to compute the standard deviation. #' @param \dots Any additional arguments to be passed to \code{sd}. #' #' Calculate the posterior standard deviation of an object of class \code{Bolstad}. If the #' object has a member \code{sd} then it will return this value otherwise it #' will calculate the posterior standard deviation \eqn{sd[\theta|x]} using #' linear interpolation to approximate the density function and numerical #' integration where \eqn{\theta} is the variable for which we want to do #' Bayesian inference, and \eqn{x} is the data. #' #' @examples #' ## The usefulness of this method is really highlighted when we have a general #' ## continuous prior. In this example we are interested in the posterior #' ## standard deviation of an normal mean. Our prior is triangular over [-3, 3] #' set.seed(123) #' x = rnorm(20, -0.5, 1) #' #' mu = seq(-3, 3, by = 0.001) #' #' mu.prior = rep(0, length(mu)) #' mu.prior[mu <= 0] = 1 / 3 + mu[mu <= 0] / 9 #' mu.prior[mu > 0] = 1 / 3 - mu[mu > 0] / 9 #' #' results = normgcp(x, 1, density = "user", mu = mu, mu.prior = mu.prior, plot = FALSE) #' sd(results) #' @author James M. Curran #' @export sd.Bolstad = function(x, ...){ return(sqrt(var(x, ...))) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/sd.R
#' Numerical integration using Simpson's Rule #' #' Takes a vector of \eqn{x} values and a corresponding set of postive #' \eqn{f(x)=y} values, or a function, and evaluates the area under the curve: #' \deqn{ \int{f(x)dx} }. #' #' #' @param x a sequence of \eqn{x} values. #' @param fx the value of the function to be integrated at \eqn{x} or a #' function #' @param n.pts the number of points to be used in the integration. If \code{x} #' contains more than n.pts then n.pts will be set to \code{length(x)} #' @return A list containing two elements, \code{value} - the value of the #' intergral, and \code{cdf} - a list containing elements x and y which give a #' numeric specification of the cdf. #' @keywords misc #' @examples #' #' ## integrate the normal density from -3 to 3 #' x = seq(-3, 3, length = 100) #' fx = dnorm(x) #' estimate = sintegral(x,fx)$value #' true.val = diff(pnorm(c(-3,3))) #' abs.error = abs(estimate-true.val) #' rel.pct.error = 100*abs(estimate-true.val)/true.val #' cat(paste("Absolute error :",round(abs.error,7),"\n")) #' cat(paste("Relative percentage error :",round(rel.pct.error,6),"percent\n")) #' #' ## repeat the example above using dnorm as function #' x = seq(-3, 3, length = 100) #' estimate = sintegral(x,dnorm)$value #' true.val = diff(pnorm(c(-3,3))) #' abs.error = abs(estimate-true.val) #' rel.pct.error = 100*abs(estimate-true.val)/true.val #' cat(paste("Absolute error :",round(abs.error,7),"\n")) #' cat(paste("Relative percentage error :",round(rel.pct.error,6)," percent\n")) #' #' ## use the cdf #' #' cdf = sintegral(x,dnorm)$cdf #' plot(cdf, type = 'l', col = "black") #' lines(x, pnorm(x), col = "red", lty = 2) #' #' ## integrate the function x^2-1 over the range 1-2 #' x = seq(1,2,length = 100) #' sintegral(x,function(x){x^2-1})$value #' #' ## compare to integrate #' integrate(function(x){x^2-1},1,2) #' #' #' @export sintegral sintegral = function(x, fx, n.pts = max(256, length(x))){ ## numerically integrates fx over x using Simpsons rule ## x - a sequence of x values ## fx - the value of the function to be integrated at x ## - or a function ## n.pts - the number of points to be used in the integration if(class(fx) == "function") fx = fx(x) n.x = length(x) if(n.x != length(fx)) stop("Unequal input vector lengths") ## Shouldn't need this any more # if(n.pts < 64) # n.pts = 64 ## use linear approximation to get equally spaced x values ap = approx(x, fx, n = 2 * n.pts + 1) h = diff(ap$x)[1] integral = h*(ap$y[2 * (1:n.pts) - 1] + 4 * ap$y[2 * (1:n.pts)] + ap$y[2 * (1:n.pts) + 1]) / 3 results = list(value = sum(integral), cdf = list(x = ap$x[2*(1:n.pts)], y = cumsum(integral))) class(results) = "sintegral" return(results) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/sintegral.r
#' Simple, Stratified and Cluster Sampling #' #' Samples from a fixed population using either simple random sampling, #' stratitified sampling or cluster sampling. #' #' #' @param size the desired size of the sample #' @param n.samples the number of repeat samples to take #' @param sample.type the sampling method. Can be one of "simple", #' "stratified", "cluser" or 1, 2, 3 where 1 corresponds to "simple", 2 to #' "stratified" and 3 to "cluster" #' @param x a vector of measurements for each unit in the population. By #' default x is not used, and the builtin data set sscsample.data is used #' @param strata a corresponding vector for each unit in the population #' indicating membership to a stratum #' @param cluster a corresponding vector for each unit in the population #' indicating membership to a cluster #' @return A list will be returned with the following components: #' \item{samples}{a matrix with the number of rows equal to size and the number #' of columns equal to n.samples. Each column corresponds to a sample drawn #' from the population} \item{s.strata}{a matrix showing how many units from #' each stratum were included in the sample} \item{means}{a vector containing #' the mean of each sample drawn} #' @author James M. Curran, Dept. of Statistics, University of Auckland. Janko #' Dietzsch, Proteomics Algorithm and Simulation,Zentrum f. Bioinformatik #' Tuebingen Fakultaet f. Informations- und Kognitionswissenschaften, #' Universitaet Tuebingen #' @keywords misc #' @examples #' #' ## Draw 200 samples of size 20 using simple random sampling #' sscsample(20,200) #' #' ## Draw 200 samples of size 20 using simple random sampling and store the #' ## results. Extract the means of all 200 samples, and the 50th sample #' res = sscsample(20,200) #' res$means #' res$samples[,50] #' #' @export sscsample = function (size, n.samples, sample.type = c("simple", "cluster", "stratified"), x = NULL, strata = NULL, cluster = NULL){ ## Written initially by: ## James M. Curran, ## Dept. of Statistics, University of Auckland ## Auckland, New Zealand ## ## Modified, corrected and improved by: ## Janko Dietzsch ## Proteomics Algorithm and Simulation ## Zentrum f. Bioinformatik Tuebingen ## Fakultaet f. Informations- und Kognitionswissenschaften ## Universitaet Tuebingen ## R. Mark Sharp ## Southwest National Primate Center ## Southwest Foundation for Biomedical Research group.idx.by.name = function(name,names.vec,idx){ return(idx[names.vec == name]) } draw.stratum = function(thresholds) { r = runif(1) for (i in 1:length(thresholds)) if (r < thresholds[i]){ stratum = i; break; } return(stratum) } # data(sscsample.data) #sscsample.data = sscsample.data if (is.null(x)) x = sscsample.data$income nx = length(x) if (size > nx) stop("Sample size must be less than population size") if (is.null(strata)) strata = sscsample.data$ethnicity strata.names = unique(strata) n.strata = length(strata.names) if (nx != length(strata)) stop("The length of the strata and data vectors must be equal") if (is.null(cluster)) cluster = sscsample.data$neighborhood n.clusters = length(unique(cluster)) if (nx != length(cluster)) stop("The length of the cluster and data vectors must be equal") samples = matrix(0, nrow = size, ncol = n.samples) sample.type = match.arg(sample.type, c("simple", "cluster", "stratified")) if(sample.type == "stratified" | sample.type == 2){ idx.vec = 1:nx stratified.data = lapply(strata.names,group.idx.by.name, names.vec=strata,idx=idx.vec) names(stratified.data) = strata.names sample.strata.size = size * sapply(stratified.data, length) / nx sample.strata.units = floor(sample.strata.size) ## integer part of units sample.strata.fractions = sample.strata.size - sample.strata.units ## determine the fractional unit parts for every stratum sample.unit.residuals = sum(sample.strata.fractions) ## how many remaining units are determined by fractions ## prepare the random draw of the residual units if (sample.unit.residuals > 0){ for (i in 2:length(sample.strata.fractions)) sample.strata.fractions[i] = sample.strata.fractions[i] + sample.strata.fractions[i-1] sample.strata.thresholds = sample.strata.fractions / sample.unit.residuals } }else if (sample.type == "cluster" | sample.type == 3) { cluster.names = unique(cluster) cluster.names = sort(cluster.names) ## clustered.data should be ordered to be useful inside the 'sampling-loop' idx.vec = 1:nx clustered.data = lapply(cluster.names,group.idx.by.name,names.vec=cluster,idx=idx.vec) names(clustered.data) = cluster.names } for (r in 1:n.samples) { if (sample.type == "simple" | sample.type == 1){ sample.idx = sample(1:nx, size) }else if (sample.type == "stratified" | sample.type == 2){ for (stratum in 1:n.strata) { ## Sample the whole units from all strata if (stratum == 1) sample.idx = sample(stratified.data[[stratum]], sample.strata.units[stratum]) else sample.idx = c(sample.idx, sample(stratified.data[[stratum]], sample.strata.units[stratum])) } if (sample.unit.residuals > 0) { ## Are there fractional parts? for (i in 1:sample.unit.residuals) { ## sample the residual units randomly but according the fractions selected.stratum = draw.stratum(sample.strata.thresholds) repeat { ## draw a unit that was not already selected draw.idx = sample(stratified.data[[selected.stratum]],1) if (! draw.idx %in% sample.idx) break ## Have we already sampled this unit? } sample.idx = c(sample.idx, draw.idx) } } } else if (sample.type == "cluster" | sample.type == 3) { ## This part samples as many clusters as necessary to reach the specified ## sampling size. sample.idx = vector(mode="numeric") temp.cluster.names = cluster.names while (size > length(sample.idx)) { sampled.cluster.name = sample(temp.cluster.names,1) rest = size - length(sample.idx) if (length(clustered.data[[sampled.cluster.name]]) <= rest) { sample.idx = c(sample.idx, clustered.data[[sampled.cluster.name]]) } else { sample.idx = c(sample.idx, sample(clustered.data[[sampled.cluster.name]], rest)) } temp.cluster.names = temp.cluster.names[temp.cluster.names != sampled.cluster.name] } } else stop(paste("Unknown sampling sample.type :", sample.type)) samples[, r] = sample.idx } means = rep(0, n.samples) s.strata = matrix(0, nrow = n.samples, ncol = n.strata) sample.out = matrix(0, nrow = size, ncol = n.samples) for (r in 1:n.samples) { idx = samples[, r] means[r] = mean(x[idx]) for (j in 1:n.strata) s.strata[r, j] = sum(strata[idx] == strata.names[j]) sample.out[, r] = x[idx] } results = list(samples = samples, s.strata = s.strata, means = means) class(results) = "sscsamp" return(results) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/sscsample.r
#' Summarizing Bayesian Multiple Linear Regression #' #' \code{summary} method for output of \code{\link{bayes.lm}}. #' #' #' @param object an object of "\code{Bolstad}" that is the result of a call to \code{\link{bayes.lm}} #' @param \dots any further arguments to be passed to \code{print} #' #' #' @seealso The function to fit the model \code{\link{bayes.lm}} #' #' The function \code{\link{coef}} to extract the matrix of posterior means along with standard errors and t-statistics. #' #' @export summary.Bolstad = function(object, ...) { if(length(class(object)) == 2 && all(grepl("Bolstad|lm", class(object)))){ getTermLabels = function(x){ attr(x$terms, "term.labels") } z = object ans = list(rank = z$rank, call = z$call, terms = c("(Intercept)", getTermLabels(z)), coef = z$coefficients, std.err = diag(z$post.var), prior = z$prior, residuals = as.vector(z$residuals), res.df = z$df.residual, ...) class(ans) = "summary.Bolstad" ans }else{ summary.default(object, ...) } }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/summary.Bolstad.R
#' Variance generic #' #' @param x an object for which we want to compute the variance #' @param \dots Any additional arguments to be passed to \code{var}. #' @export var = function(x, ...){ UseMethod("var") } #' @export var.default = function(x, ...){ stats::var(x, ...) } #' @export var.Bolstad = function(x, ...){ if(any(grepl("var", names(x)))) return(x$var) xVals = x$param.x mx = mean(x, ...) fx = approxfun(xVals, (xVals - mx)^2 * x$posterior) return(integrate(fx, min(xVals), max(xVals))$value) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/var.R
#' Monte Carlo study of randomized and blocked designs #' #' Simulates completely randomized design and randomized block designs from a #' population of experimental units with underlying response values \eqn{y} and #' underlying other variable values \eqn{x} (possibly lurking) #' #' #' @param x a set of lurking values which are correlated with the response #' @param y a set of response values #' @param corr the correlation between the response and lurking variable #' @param size the size of the treatment groups #' @param n.treatments the number of treatments #' @param n.rep the number of Monte Carlo replicates #' @param \dots additional parameters which are passed to \code{Bolstad.control} #' @return If the ouput of xdesign is assigned to a variable, then a list is #' returned with the following components: \item{block.means}{a vector of the #' means of the lurking variable from each replicate of the simulation stored #' by treatment number within replicate number} \item{treat.means}{a vector of #' the means of the response variable from each replicate of the simulation #' stored by treatment number within replicate number} \item{ind}{a vector #' containing the treatment group numbers. Note that there will be twice as #' many group numbers as there are treatments corresponding to the simulations #' done using a completely randomized design and the simulations done using a #' randomized block design} #' @keywords misc #' @examples #' #' # Carry out simulations using the default parameters #' #' xdesign() #' #' # Carry out simulations using a simulated response with 5 treaments, #' # groups of size 25, and a correlation of -0.6 between the response #' # and lurking variable #' #' xdesign(corr = -0.6, size = 25, n.treatments = 5) #' #' @export xdesign xdesign = function(x = NULL, y = NULL, corr = 0.8, size = 20, n.treatments = 4, n.rep = 500, ...){ if(is.null(x)){ ## simulate some data nx = size*n.treatments x = rnorm(nx) y = rnorm(nx) y = sqrt(1 - corr^2) * y + corr * x } nx = size * n.treatments if(length(x) != length(y)) stop("x and y must be of equal length") if(length(x) != size * n.treatments) stop("x and y must be equal to the same size times the number of treatments") if(corr < (-1) | corr > 1) stop("Correlation coeficient must be between -1 and 1") if(n.rep < 10) stop("Must have at least 10 Monte Carlo replicates") quiet = Bolstad.control(...)$quiet if(!quiet){ cat("Variable\tN\tMean\tMedian\tTrMean\tStDev\tSE Mean\n") cat(paste("X\t",length(x), round(mean(x),3), round(median(x),3), round(mean(x,trim=0.1),3), round(sd(x),3), round(sd(x)/sqrt(length(x)),3),sep="\t")) cat("\n") cat(paste("Y\t",length(y), round(mean(y),3), round(median(y),3), round(mean(y,trim=0.1),3), round(sd(y),3), round(sd(y)/sqrt(length(y)),3),sep="\t")) cat("\n\n") qx = quantile(x,c(0.25,0.75)) qy = quantile(y,c(0.25,0.75)) cat("Variable\tMinimum\tMaximum\tQ1\tQ3\n") cat(paste("X\t",round(min(x),3) ,round(max(x),3) ,round(qx[1],3) ,round(qx[2],3),sep="\t")) cat("\n") cat(paste("Y\t",round(min(y),3) ,round(max(y),3) ,round(qy[1],3) ,round(qy[2],3),sep="\t")) cat("\n\n") cat("The Pearson correlation between X and Y is: "); cat(paste(round(cor(x,y),3),"\n\n")) } if(Bolstad.control(...)$plot) plot(x, y) ssx = rep(0,n.rep) ssy = rep(0,n.rep) treat.groupmean = matrix(0,ncol=n.treatments,nrow=n.rep) block.groupmean = matrix(0,ncol=n.treatments,nrow=n.rep) for(block in c(FALSE,TRUE)){ ## block is indicator for blocking ## FALSE = completely randomized design, ## TRUE = randomized block design for(i in 1:n.rep){ if(!block){ group = rep(1:n.treatments,size) z = rnorm(nx) o = order(z) z = z[o] group = group[o] x2 = x y2 = y }else{ o = order(x) x2 = x[o] y2 = y[o] group = NULL for(j in 1:size){ gp = 1:n.treatments z = rnorm(n.treatments) gp = gp[order(z)] group = c(group,gp) } } split.x = split(x2,group) split.y = split(y2,group) x.bar = sapply(split.x,mean) y.bar = sapply(split.y,mean) x.mean = mean(x.bar) y.mean = mean(y.bar) ssx[i] = sum((x.bar-x.mean)^2) ssy[i] = sum((y.bar-y.mean)^2) treat.groupmean[i,] = y.bar block.groupmean[i,] = x.bar } if(!block){ treat.var0 = as.vector(treat.groupmean) block.var0 = as.vector(block.groupmean) index0 = rep(1:n.treatments,rep(n.rep,n.treatments)) }else{ treat.var1 = as.vector(treat.groupmean) block.var1 = as.vector(block.groupmean) index1 = rep(1:n.treatments,rep(n.rep,n.treatments)) } } treat.var = c(treat.var0,treat.var1) block.var = c(block.var0,block.var1) index = c(index0,index1) ind = rep(1:2,c(length(treat.var0),length(treat.var1))) ind = n.treatments*(ind-1)+index if(Bolstad.control(...)$plot){ par(ask=interactive()) rng = range(block.var) y.lims = max(abs(c(rng[1]-0.1*diff(rng),rng[2]+0.1*diff(rng)))) y.lims = c(-y.lims,y.lims) boxplot(block.var~ind, main="Boxplots of Lurking/Blocking variable group means", sub="Lurking variable in completely randomized design\nBlocking variable in randomized block design", col=rep(c("blue","red"), rep(n.treatments,2)), ylim = y.lims) legend("topright", bty = "n", cex = 0.7, legend = c("Completely randomized design", "Randomized block design"), fill = c("blue","red")) rng = range(treat.var) y.lims = max(abs(c(rng[1]-0.1*diff(rng),rng[2]+0.1*diff(rng)))) y.lims = c(-y.lims,y.lims) boxplot(treat.var~ind ,main="Boxplots of treatment group means" ,col=rep(c("blue","red"),rep(n.treatments,2)) ,ylim=y.lims) legend("topright", cex = 0.7, bty = "n", legend=c("Completely randomized design", "Randomized block design"), fill = c("blue","red")) if(!quiet){ x = treat.var[ind<=n.treatments] y = treat.var[ind>n.treatments] cat("Variable\tN\tMean\tMedian\tTrMean\tStDev\tSE Mean\n") cat(paste("Randomized",length(x), round(mean(x),3), round(median(x),3), round(mean(x,trim=0.1),3), round(sd(x),3), round(sd(x)/sqrt(length(x)),3),sep="\t")) cat("\n") cat(paste("Blocked\t",length(y), round(mean(y),3), round(median(y),3), round(mean(y,trim=0.1),3), round(sd(y),3), round(sd(y)/sqrt(length(y)),3),sep="\t")) cat("\n\n") qx = quantile(x,c(0.25,0.75)) qy = quantile(y,c(0.25,0.75)) cat("Variable\tMinimum\tMaximum\tQ1\tQ3\n") cat(paste("Randomized",round(min(x),3) ,round(max(x),3) ,round(qx[1],3) ,round(qx[2],3),sep="\t")) cat("\n") cat(paste("Blocked\t",round(min(y),3) ,round(max(y),3) ,round(qy[1],3) ,round(qy[2],3),sep="\t")) cat("\n\n") } } invisible(list(block.means=block.var,treat.means=treat.var,ind=ind)) }
/scratch/gouwar.j/cran-all/cranData/Bolstad/R/xdesign.r
#' Bayesian Cox Proportional Hazards Modelling #' #' Uses a Metropolis Hastings scheme on the proportional hazards model to draw #' sample from posterior. Uses a matched curvature Student's t candidate #' generating distribution with 4 degrees of freedom to give heavy tails. #' #' #' @param y the Poisson censored response vector. It has value 0 when the #' variable is censored and 1 when it is not censored. #' @param t time #' @param x matrix of covariates #' @param steps the number of steps to use in the Metropolis-Hastings updating #' @param priorMean the mean of the prior #' @param priorVar the variance of the prior #' @param mleMean the mean of the matched curvature likelihood #' @param mleVar the covariance matrix of the matched curvature likelihood #' @param startValue a vector of starting values for all of the regression #' coefficients including the intercept #' @param randomSeed a random seed to use for different chains #' @param plots Plot the time series and auto correlation functions for each of #' the model coefficients #' @return A list containing the following components: #' #' \item{beta}{a data frame containing the sample of the model coefficients #' from the posterior distribution} \item{mleMean}{the mean of the matched #' curvature likelihood. This is useful if you've used a training set to #' estimate the value and wish to use it with another data set} #' \item{mleVar}{the covariance matrix of the matched curvature likelihood. See #' mleMean for why you'd want this} #' @export BayesCPH BayesCPH = function(y, t, x, steps = 1000, priorMean = NULL, priorVar = NULL, mleMean = NULL, mleVar, startValue = NULL, randomSeed = NULL, plots = FALSE) { if (!is.null(randomSeed)) set.seed(randomSeed) nObs = length(y) if (is.vector(x)) x = as.matrix(x, ncol = 1) nParameters = ncol(x) + 1 ## number of covariates + intercept if (!is.null(startValue)) { if (length(startValue) < nParameters) { stop("You must have as many starting values as you have model parameters") } } ## inital mean of the matched curvature likelihood if (is.null(mleMean)) mleMean = c(log(mean(y)), rep(0, nParameters - 1)) X = cbind(rep(1, nObs), x) Xt = t(X) calcMatchedCurvatureNormLike = function() { betaX = X %*% mleMean Mu = t * exp(betaX) Vdiag = Mu Y = betaX + (y - Mu)/Mu ## I have no idea why the diag command doesn't work as it should: e.g. Vyinv = ## diag(Vdiag, nrow = length(Vdiag)) therefore this two-step procedure is needed Vyinv = matrix(0, nrow = nObs, ncol = nObs) diag(Vyinv) = Vdiag XtV = Xt %*% Vyinv VLinv = XtV %*% X VL = solve(VLinv) w1 = VL %*% XtV mleMean = w1 %*% Y ## Loop iterations to converge to MLE for (k in 1:20) { betaX = X %*% mleMean Mu = t * exp(betaX) Vdiag = Mu Y = betaX + (y - Mu)/Mu Vyinv = matrix(0, nrow = nObs, ncol = nObs) diag(Vyinv) = Vdiag XtV = Xt %*% Vyinv VLinv = XtV %*% X VL = solve(VLinv) w1 = VL %*% XtV mleMean = w1 %*% Y } return(list(mleMean = mleMean, mleVar = VL)) } ## calcMatchedCurvatureNormLike normApproxPosterior = function() { result = list(postMean = rep(0, nParameters), postVar = matrix(0, ncol = nParameters, nrow = nParameters)) ## if the prior mean and variance isn't specified then set it equal to the mle mean and ## variance if (is.null(priorMean) & is.null(priorVar)) { result$postMean = mleMean result$postVar = mleVar } else { mleVarInv = solve(mleVar) priorVarInv = solve(priorVar) postPrec = mleVarInv + priorVarInv result$postVar = solve(postPrec) w2 = postVar %*% priorVarInv w4 = w2 * priorMean w3 = postVar %*% mleVarInv w5 = w3 * mleMean result$postMean = w4 + w5 } return(result) } # debug(calcMatchedCurvatureNormLike) mleParams = calcMatchedCurvatureNormLike() mleMean = mleParams$mleMean mleVar = mleParams$mleVar posterior = normApproxPosterior() postMean = posterior$postMean postVar = posterior$postVar U = chol(postVar) candBeta = matrix(rt(steps * nParameters, df = 4), ncol = nParameters) if (!is.null(startValue)) candBeta[1, ] = startValue WM2 = candBeta %*% U WM3 = matrix(rep(postMean, rep(steps, nParameters)), ncol = nParameters) WM4 = WM2 + WM3 V2 = cov(WM4) ft0 = apply(dt(candBeta, df = 4), 1, prod) ftn = apply(dnorm(candBeta), 1, prod) q1 = ft0/1 ## Metropolis-Hastings BetaXt = WM4 %*% Xt BetaXt = exp(BetaXt) for (j in 1:nObs) BetaXt[, j] = -t[j] * BetaXt[, j] + y[j] * log(t[j] * BetaXt[, j]) logg1 = rowSums(BetaXt) logg1 = logg1 - max(logg1) # g1 = exp(logg1) logq1 = log(q1) u = runif(steps) i1 = 1 betaSample = WM4 for (n in 2:steps) { alpha = exp(logq1[i1] + logg1[n] - logq1[n] - logg1[i1]) alpha = ifelse(alpha > 1, 1, alpha) if (u[n] >= alpha) { ## reject betaSample[n, ] = WM4[i1, ] } else { betaSample[n, ] = WM4[n, ] i1 = n } } beta.df = data.frame(betaSample) names(beta.df) = paste("b", 0:(ncol(beta.df) - 1), sep = "") describe(beta.df) Mean.beta = sapply(beta.df, mean) StdDev.beta = sapply(beta.df, sd) Z.beta = Mean.beta/StdDev.beta print(data.frame(Mean.beta, StdDev.beta, Z.beta)) if (plots) { ## nRows = ceiling(sqrt(nParameters)) nRows = nParameters ## nCols = floor(sqrt(nParamerts)) nCols = 2 oldPar = par(mfrow = c(nRows, nCols)) nms = names(beta.df) for (i in 1:nParameters) { plot(ts(beta.df[, i]), main = paste("Time series plot of", nms[i]), ylab = nms[i]) plot(acf(beta.df[, i], plot = FALSE), main = paste("Autocorrelation plot of", nms[i])) } par(oldPar) } invisible(list(beta = beta.df, mleMean = mleMean, mleVar = mleVar)) }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/BayesCPH.R
#' Bayesian Logistic Regression #' #' Performas Metropolis Hastings on the logistic regression model to draw #' sample from posterior. Uses a matched curvature Student's t candidate #' generating distribution with 4 degrees of freedom to give heavy tails. #' #' #' @param y the binary response vector #' @param x matrix of covariates #' @param steps the number of steps to use in the Metropolis-Hastings updating #' @param priorMean the mean of the prior #' @param priorVar the variance of the prior #' @param mleMean the mean of the matched curvature likelihood #' @param mleVar the covariance matrix of the matched curvature likelihood #' @param startValue a vector of starting values for all of the regression #' coefficients including the intercept #' @param randomSeed a random seed to use for different chains #' @param plots Plot the time series and auto correlation functions for each of #' the model coefficients #' @return A list containing the following components: #' #' \item{beta}{a data frame containing the sample of the model coefficients #' from the posterior distribution} \item{mleMean}{the mean of the matched #' curvature likelihood. This is useful if you've used a training set to #' estimate the value and wish to use it with another data set} #' \item{mleVar}{the covariance matrix of the matched curvature likelihood. See #' mleMean for why you'd want this} #' @examples #' #' data(logisticTest.df) #' BayesLogistic(logisticTest.df$y, logisticTest.df$x) #' #' @export BayesLogistic BayesLogistic = function(y, x, steps = 1000, priorMean = NULL, priorVar = NULL, mleMean = NULL, mleVar, startValue = NULL, randomSeed = NULL, plots = FALSE) { if (!is.null(randomSeed)) set.seed(randomSeed) nObs = length(y) if (is.vector(x)) x = as.matrix(x, ncol = 1) nParameters = ncol(x) + 1 ## number of covariates + intercept if (!is.null(startValue)) { if (length(startValue) < nParameters) { stop("You must have as many starting values as you have model parameters") } } ## inital mean of the matched curvature likelihood if (is.null(mleMean)) mleMean = c(log(mean(y)/(1 - mean(y))), rep(0, nParameters - 1)) X = cbind(rep(1, nObs), x) Xt = t(X) calcMatchedCurvatureNormLike = function() { betaX = X %*% mleMean Pi = exp(betaX)/(1 + exp(betaX)) Vdiag = Pi * (1 - Pi) Y = betaX + (y - Pi)/Vdiag ## I have no idea why the diag command doesn't work as it should: e.g. Vyinv = ## diag(Vdiag, nrow = length(Vdiag)) therefore this two-step procedure is needed Vyinv = matrix(0, nrow = nObs, ncol = nObs) diag(Vyinv) = Vdiag XtV = Xt %*% Vyinv VLinv = XtV %*% X VL = solve(VLinv) w1 = VL %*% XtV mleMean = w1 %*% Y ## Loop iterations to converge to MLE for (k in 1:5) { betaX = X %*% mleMean Pi = exp(betaX)/(1 + exp(betaX)) Vdiag = Pi * (1 - Pi) Y = betaX + (y - Pi)/Vdiag Vyinv = matrix(0, nrow = nObs, ncol = nObs) diag(Vyinv) = Vdiag XtV = Xt %*% Vyinv VLinv = XtV %*% X VL = solve(VLinv) w1 = VL %*% XtV mleMean = w1 %*% Y } return(list(mleMean = mleMean, mleVar = VL)) } ## calcMatchedCurvatureNormLike normApproxPosterior = function() { result = list(postMean = rep(0, nParameters), postVar = matrix(0, ncol = nParameters, nrow = nParameters)) ## if the prior mean and variance isn't specified then set it equal to the mle mean and ## variance if (is.null(priorMean)) { priorMean = result$postMean = mleMean } if (is.null(priorVar)) { priorVar = result$postVar = mleVar } mleVarInv = solve(mleVar) priorVarInv = solve(priorVar) postPrec = mleVarInv + priorVarInv result$postVar = solve(postPrec) result$postMean = result$postVar %*% priorVarInv %*% priorMean + result$postVar %*% mleVarInv %*% mleMean return(result) } mleParams = calcMatchedCurvatureNormLike() mleMean = mleParams$mleMean mleVar = mleParams$mleVar posterior = normApproxPosterior() postMean = posterior$postMean postVar = posterior$postVar U = chol(postVar) L = t(U) candBeta = matrix(rt(steps * nParameters, df = 4), ncol = nParameters) if (!is.null(startValue)) candBeta[1, ] = startValue WM2 = candBeta %*% U WM3 = matrix(rep(postMean, rep(steps, nParameters)), ncol = nParameters) WM4 = WM2 + WM3 V2 = cov(WM4) ft0 = apply(dt(candBeta, df = 4), 1, prod) fn0 = apply(dnorm(candBeta), 1, prod) q1 = ft0/1 ## Metropolis-Hastings Sum1 = WM4 %*% Xt Pi1 = exp(Sum1)/(1 + exp(Sum1)) for (j in 1:nObs) Pi1[, j] = log(Pi1[, j]^y[j] * (1 - Pi1[, j])^(1 - y[j])) g0 = exp(rowSums(Pi1)) g1 = g0 if (!is.null(priorMean)) g1 = g0 * fn0 g1 = g1/max(g1) q1 = q1/max(q1) plot(q1, g1) u = runif(steps) i1 = 1 betaSample = WM4 for (n in 2:steps) { alpha = q1[i1] * g1[n]/(q1[n] * g1[i1]) alpha = ifelse(alpha > 1, 1, alpha) if (u[n] >= alpha) { ## reject betaSample[n, ] = WM4[i1, ] } else { betaSample[n, ] = WM4[n, ] i1 = n } } beta.df = data.frame(betaSample) names(beta.df) = paste("b", 0:(ncol(beta.df) - 1), sep = "") describe(beta.df) Mean.beta = sapply(beta.df, mean) StdDev.beta = sapply(beta.df, sd) Z.beta = Mean.beta/StdDev.beta print(data.frame(Mean.beta, StdDev.beta, Z.beta)) if (plots) { ## nRows = ceiling(sqrt(nParameters)) nRows = nParameters ## nCols = floor(sqrt(nParamerts)) nCols = 2 oldPar = par(mfrow = c(nRows, nCols)) nms = names(beta.df) for (i in 1:nParameters) { plot(ts(beta.df[, i]), main = paste("Time series plot of", nms[i]), ylab = nms[i]) plot(acf(beta.df[, i], plot = FALSE), main = paste("Autocorrelation plot of", nms[i])) } par(oldPar) } invisible(list(beta = beta.df, mleMean = mleMean, mleVar = mleVar)) }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/BayesLogistic.R
#' Bayesian Pois Regression #' #' Performs Metropolis Hastings on the logistic regression model to draw sample #' from posterior. Uses a matched curvature Student's t candidate generating #' distribution with 4 degrees of freedom to give heavy tails. #' #' #' @param y the binary response vector #' @param x matrix of covariates #' @param steps the number of steps to use in the Metropolis-Hastings updating #' @param priorMean the mean of the prior #' @param priorVar the variance of the prior #' @param mleMean the mean of the matched curvature likelihood #' @param mleVar the covariance matrix of the matched curvature likelihood #' @param startValue a vector of starting values for all of the regression #' coefficients including the intercept #' @param randomSeed a random seed to use for different chains #' @param plots Plot the time series and auto correlation functions for each of #' the model coefficients #' @return A list containing the following components: #' #' \item{beta}{a data frame containing the sample of the model coefficients #' from the posterior distribution} \item{mleMean}{the mean of the matched #' curvature likelihood. This is useful if you've used a training set to #' estimate the value and wish to use it with another data set} #' \item{mleVar}{the covariance matrix of the matched curvature likelihood. See #' mleMean for why you'd want this} #' @examples #' #' data(poissonTest.df) #' results = BayesPois(poissonTest.df$y, poissonTest.df$x) #' #' @export BayesPois BayesPois = function(y, x, steps = 1000, priorMean = NULL, priorVar = NULL, mleMean = NULL, mleVar, startValue = NULL, randomSeed = NULL, plots = FALSE) { if (!is.null(randomSeed)) set.seed(randomSeed) nObs = length(y) if (is.vector(x)) x = as.matrix(x, ncol = 1) nParameters = ncol(x) + 1 ## number of covariates + intercept if (!is.null(startValue)) { if (length(startValue) < nParameters) { stop("You must have as many starting values as you have model parameters") } } ## inital mean of the matched curvature likelihood if (is.null(mleMean)) mleMean = c(log(mean(y)), rep(0, nParameters - 1)) X = cbind(rep(1, nObs), x) Xt = t(X) calcMatchedCurvatureNormLike = function() { betaX = X %*% mleMean Mu = exp(betaX) Vdiag = Mu Y = betaX + (y - Mu)/Mu ## I have no idea why the diag command doesn't work as it should: e.g. Vyinv = ## diag(Vdiag, nrow = length(Vdiag)) therefore this two-step procedure is needed Vyinv = matrix(0, nrow = nObs, ncol = nObs) diag(Vyinv) = Vdiag XtV = Xt %*% Vyinv VLinv = XtV %*% X VL = solve(VLinv) w1 = VL %*% XtV mleMean = w1 %*% Y ## Loop iterations to converge to MLE for (k in 1:20) { betaX = X %*% mleMean Mu = exp(betaX) Vdiag = Mu Y = betaX + (y - Mu)/Mu Vyinv = matrix(0, nrow = nObs, ncol = nObs) diag(Vyinv) = Vdiag XtV = Xt %*% Vyinv VLinv = XtV %*% X VL = solve(VLinv) w1 = VL %*% XtV mleMean = w1 %*% Y } return(list(mleMean = mleMean, mleVar = VL)) } ## calcMatchedCurvatureNormLike normApproxPosterior = function() { result = list(postMean = rep(0, nParameters), postVar = matrix(0, ncol = nParameters, nrow = nParameters)) ## if the prior mean and variance isn't specified then set it equal to the mle mean and ## variance if (is.null(priorMean) & is.null(priorVar)) { result$postMean = mleMean result$postVar = mleVar } else { mleVarInv = solve(mleVar) priorVarInv = solve(priorVar) postPrec = mleVarInv + priorVarInv result$postVar = solve(postPrec) w2 = postVar %*% priorVarInv w4 = w2 * priorMean w3 = postVar %*% mleVarInv w5 = w3 * mleMean result$postMean = w4 + w5 } return(result) } mleParams = calcMatchedCurvatureNormLike() mleMean = mleParams$mleMean mleVar = mleParams$mleVar posterior = normApproxPosterior() postMean = posterior$postMean postVar = posterior$postVar U = chol(postVar) candBeta = matrix(rt(steps * nParameters, df = 4), ncol = nParameters) if (!is.null(startValue)) candBeta[1, ] = startValue WM2 = candBeta %*% U WM3 = matrix(rep(postMean, rep(steps, nParameters)), ncol = nParameters) WM4 = WM2 + WM3 V2 = cov(WM4) ft0 = apply(dt(candBeta, df = 4), 1, prod) ftn = apply(dnorm(candBeta), 1, prod) q1 = ft0/1 ## Metropolis-Hastings BetaXt = WM4 %*% Xt BetaXt = exp(BetaXt) for (j in 1:nObs) BetaXt[, j] = -BetaXt[, j] + y[j] * log(BetaXt[, j]) logg1 = rowSums(BetaXt) logg1 = logg1 - max(logg1) # g1 = exp(logg1) logq1 = log(q1) u = runif(steps) i1 = 1 betaSample = WM4 for (n in 2:steps) { alpha = exp(logq1[i1] + logg1[n] - logq1[n] - logg1[i1]) alpha = ifelse(alpha > 1, 1, alpha) if (u[n] >= alpha) { ## reject betaSample[n, ] = WM4[i1, ] } else { betaSample[n, ] = WM4[n, ] i1 = n } } beta.df = data.frame(betaSample) names(beta.df) = paste("b", 0:(ncol(beta.df) - 1), sep = "") describe(beta.df) Mean.beta = sapply(beta.df, mean) StdDev.beta = sapply(beta.df, sd) Z.beta = Mean.beta/StdDev.beta print(data.frame(Mean.beta, StdDev.beta, Z.beta)) if (plots) { ## nRows = ceiling(sqrt(nParameters)) nRows = nParameters ## nCols = floor(sqrt(nParamerts)) nCols = 2 oldPar = par(mfrow = c(nRows, nCols)) nms = names(beta.df) for (i in 1:nParameters) { plot(ts(beta.df[, i]), main = paste("Time series plot of", nms[i]), ylab = nms[i]) plot(acf(beta.df[, i], plot = FALSE), main = paste("Autocorrelation plot of", nms[i])) } par(oldPar) } invisible(list(beta = beta.df, mleMean = mleMean, mleVar = mleVar)) }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/BayesPois.R
#' HIV Survival data #' #' Data from a hypothetical HMO-HIV+ study shown in Table 1.1 of Hosmer, D.W. #' and Lemeshow, S. (1998) Applied Survival Analysis: Regression Modeling of #' Time to Event Data, John Wiley and Sons Inc., New York, NY #' #' #' @name AidsSurvival.df #' @docType data #' @format A data frame with 100 observations on 7 variables. \tabular{rlll}{ #' [,1] \tab id \tab numeric \tab l Subject ID code \cr [,2] \tab entdate \tab #' date \tab Entry date (ddmmyr) \cr [,3] \tab enddate \tab date \tab Entry #' date (ddmmyr) \cr [,4] \tab time \tab numeric \tab Survival Time = days #' between Entry date and End date \cr [,5] \tab age \tab numeric \tab Age in #' years \cr [,6] \tab drug \tab factor \tab History of IV drug use (0 = No, 1 #' = Yes) \cr [,7] \tab censor \tab factor \tab Follow-Up Status1 = Death due #' to AIDS or AIDS \cr \tab \tab \tab related factors (0 = Alive at study end #' or lost to follow-up)\cr } #' @keywords datasets NULL #' Chapter 10 Example 16 data #' #' A random sample of size 10 from a \eqn{N(\mu, \sigma^{2})} distribution #' where both mu and sigma are unknown parameters. #' #' #' @name c10ex16.df #' @aliases c10ex16.df ex16.df #' @docType data #' @format A data frame with 10 observations in a single variable called y #' @keywords datasets NULL #' Coronary Heart Disease Chapter 8 Example 11 #' #' The age and coronory heart disease status of 100 individuals taken from #' Hosmer and Lemeshow (1989). #' #' #' @name chd.df #' @docType data #' @format A data frame with 100 observations in two columns \tabular{lrrr}{ #' [,1] \tab age \tab numeric \tab age in years \cr [,2] \tab chd \tab numeric #' factor \tab coronary heat disease status. Levels (1 = Yes), (0 = No) \cr } #' @keywords datasets NULL #' Test data for hiermeanReg #' #' Data for testing hiermeanReg which uses Gibbs sampling on a hierarchical #' normal mean model with regression on covariates #' #' #' @name hiermeanRegTest.df #' @docType data #' @format A data frame with 30 observations on 4 variables. \tabular{rlll}{\ #' [1,] \tab y \tab numeric \tab the response vector \cr [2,] \tab group \tab #' factor \tab the grouping factor levels 1-3 \cr [3,] \tab x1 \tab numeric #' \tab the first covariate \cr [4,] \tab x2 \tab numeric \tab the second #' covariate \cr } #' @seealso hiermeanReg #' @keywords datasets NULL #' Test data for bayesLogistic #' #' A test data set for bayesLogisticReg #' #' #' @name logisticTest.df #' @docType data #' @format A data frame with 100 observations on 6 variables. \tabular{rlll}{ #' [1,] \tab x \tab numeric \tab the covariate \cr [2,] \tab eps \tab numeric #' \tab the error in the response \cr [3,] \tab logit.p \tab numeric \tab the #' logit of the probability of success given x = 2 + 3*x + eps \cr [4,] \tab p #' \tab numeric \tab the probability of success given x \cr [5,] \tab u \tab #' numeric \tab a U[0,1] random variable \cr [6,] \tab y \tab binary \tab if #' u[i]<p[i] = 1, otherwise 0 } #' @seealso bayesLogistic #' @keywords datasets NULL #' A test data set for bayesPois #' #' A test data set for bayesPois. The data come from the equation #' \eqn{\log(\lambda_{i}) = 1 + 5x_{i} + \epsilon_{i}} where \eqn{\epsilon_{i}} #' comes from N(0,0.01). #' #' #' @name poissonTest.df #' @docType data #' @format A data frame with 100 observations on 5 variables. \tabular{rlll}{ #' [1,] \tab x \tab numeric \tab the covariate \cr [2,] \tab eps \tab numeric #' \tab the error in the log response \cr [3,] \tab log.lam \tab numeric \tab #' \eqn{\log(\lambda_{i}) = 1 + 5x_{i} + \epsilon_{i}} where \eqn{\epsilon_{i}} #' \cr [4,] \tab lam \tab numeric \tab \eqn{\exp(\log(\lambda))} \cr [5,] \tab #' y \tab numeric \tab a Poisson random variate with mean \eqn{\lambda_{i}} \cr #' } #' @seealso bayesPois #' @keywords datasets NULL ## usethis namespace: start #' @importFrom graphics box #' @importFrom graphics hist #' @importFrom graphics lines #' @importFrom graphics par #' @importFrom stats acf #' @importFrom stats approx #' @importFrom stats approxfun #' @importFrom stats coef #' @importFrom stats cov #' @importFrom stats dnorm #' @importFrom stats dt #' @importFrom stats ecdf #' @importFrom stats lm #' @importFrom stats median #' @importFrom stats quantile #' @importFrom stats rchisq #' @importFrom stats rnorm #' @importFrom stats rt #' @importFrom stats runif #' @importFrom stats sd #' @importFrom stats ts #' @importFrom stats var ## usethis namespace: end NULL
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/Bolstad2-package.R
#' Calculate the Gelman Rubin statistic #' #' Calculate the Gelman Rubin statistic #' #' #' @aliases GelmanRubin GR #' @param theta A matrix containing samples from at least two chains on a #' parameter theta. Each chain should 2n iterations. The last n iterations will #' be used to calculate the statistic #' @return A list containing n, the between chain variance B, the within chain #' variance W, the estimated variance of the parameter vHat, and the Gelman #' Rubin statistic \eqn{R = \sqrt{vHat/W}} #' @references Gelman, A. and Rubin, D.B. (1992) 'Inference from iterative #' simulations using multiple sequences with discussion.' Statistical Science #' 8, pp. 457-511 #' @examples #' #' ## take four chains sampling from a normal mixture density #' theta0 = c(0,1) #' theta1 = c(3,2) #' p = 0.6 #' candidate = c(0, 3) #' #' v1 = normMixMH(theta0, theta1, p, candidate, steps = 200) #' v2 = normMixMH(theta0, theta1, p, candidate, steps = 200) #' v3 = normMixMH(theta0, theta1, p, candidate, steps = 200) #' v4 = normMixMH(theta0, theta1, p, candidate, steps = 200) #' #' theta=cbind(v1,v2,v3,v4) #' GelmanRubin(theta) #' #' @export GelmanRubin GelmanRubin = function(theta) { ## theta is a matrix of outputs from various chains if (!is.matrix(theta)) { stop("theta must be a matrix") } nObs = nrow(theta) nCols = ncol(theta) n1 = floor(nObs * 0.5) n2 = nObs - n1 if (nObs < 100) stop("There must be at least 100 observations from each chain") if (nCols < 2) stop("There must be at least two chains") theta = theta[-(1:n1), ] # take only the second half of the data vars = apply(theta, 2, var) means = apply(theta, 2, mean) mBar = mean(means) B = n2 * sum((means - mBar)^2)/(nCols - 1) W = sum(vars)/nCols sigmaSq = ((n2 - 1) * W + B)/(n2) vHat = sigmaSq + B/(n2 * nCols) df = n2 R = sqrt(vHat/W * (df/(df - 2))) results.df = data.frame(n = n2, B, W, vHat, R) cat(paste(R, "\n")) invisible(results.df) } GR = function(theta) { return(GelmanRubin(theta)) }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/GelmanRubin.R
#' Metropolis Hastings sampling from a Bivariate Normal distribution #' #' This function uses the MetropolisHastings algorithm to draw a sample from a #' correlated bivariate normal target density using a random walk candidate and #' an independent candidate density respectively where we are drawing both #' parameters in a single draw. It can also use the blockwise Metropolis #' Hastings algorithm and Gibbs sampling respectively to draw a sample from the #' correlated bivariate normal target. #' #' #' @param rho the correlation coefficient for the bivariate normal #' @param rho1 the correlation of the candidate distribution. Only used when #' type = 'ind' #' @param sigma the standard deviations of the marginal distributions of the #' independent candidate density. Only used when type = 'ind' #' @param steps the number of Metropolis Hastings steps #' @param type the type of candidate generation to use. Can be one of 'rw' = #' random walk, 'ind' = independent normals, 'gibbs' = Gibbs sampling or #' 'block' = blockwise. It is sufficient to use 'r','i','g', or 'b' #' @return returns a list which contains a data frame called targetSample with #' members x and y. These are the samples from the target density. #' @examples #' #' ## independent chain #' chain1.df=bivnormMH(0.9)$targetSample #' #' ## random walk chain #' chain2.df=bivnormMH(0.9, type = 'r')$targetSample #' #' #' ## blockwise MH chain #' chain3.df=bivnormMH(0.9, type = 'b')$targetSample #' #' ## Gibbs sampling chain #' chain4.df=bivnormMH(0.9, type = 'g')$targetSample #' #' oldPar = par(mfrow=c(2,2)) #' plot(y ~ x, type = 'l', chain1.df, main = 'Independent') #' plot(y ~ x, type = 'l', chain2.df, main = 'Random Walk') #' plot(y ~ x, type = 'l', chain3.df, main = 'Blockwise') #' plot(y ~ x, type = 'l', chain4.df, main = 'Gibbs') #' par(oldPar) #' #' @export bivnormMH bivnormMH = function(rho, rho1 = 0.9, sigma = c(1.2, 1.2), steps = 1000, type = "ind") { if (rho < -1 | rho > 1) { stop("rho must be between -1 and 1") } if (steps < 100) warning("You should really do more than 100 steps") target = candidate = matrix(0, ncol = 2, nrow = steps) if (length(grep("^[Rr]", type)) > 0) { type = "rw" } else if (length(grep("^[Ii]", type)) > 0) { type = "ind" } else if (length(grep("^[Bb]", type)) > 0) { type = "block" } else if (length(grep("^[Gg]", type)) > 0) { type = "gibbs" } else { stop("Type must be one of rw, ind, block or gibbs") } x0 = c(0, 0) x1 = c(0, 0) mu = c(0, 0) if (type == "rw") { startValue = c(2, 2) sigma1 = 0.5 var1 = sigma1^2 sigma2 = 1 var2 = sigma2^2 k = 2 * pi/1000 u = runif(steps) z1 = rnorm(steps, 0, sigma1) z2 = rnorm(steps, 0, sigma1) w = 1 - rho^2 target[1, ] = startValue x1 = target[1, ] mu = target[1, ] x0 = c(mu[1] + z1[1], mu[2] + z2[1]) candidate[2, ] = x0 for (n in 2:steps) { n1 = n - 1 x1 = target[n1, ] x0 = candidate[n, ] canDens = exp(-1/(2 * var2 * w) * (x0[1]^2 - 2 * rho * x0[1] * x0[2] + x0[2]^2)) curDens = exp(-1/(2 * var2 * w) * (x1[1]^2 - 2 * rho * x1[1] * x1[2] + x1[2]^2)) if (u[n] < canDens/curDens) { ## candidate accepted target[n, ] = x0 } else { ## candidate rejected target[n, ] = target[n1, ] } mu = target[n, ] x0 = c(mu[1] + z1[n], mu[2] + z2[n]) if (n < steps) candidate[n + 1, ] = x0 } } else if (type == "ind") { if (rho1 < -1 | rho > 1) stop("rho1 must be between -1 and 1") if (any(sigma <= 0)) stop("The elements of sigma must be strictly positive non-zero") u = runif(steps) startValue = c(2, 1.5) x1 = startValue x0 = matrix(rnorm(2 * steps, rep(0, 2 * steps), rep(sigma, steps)), ncol = 2) x0[, 2] = rho1 * x0[, 1] + sqrt(1 - rho1^2) * x0[, 2] gDensInd = function(x, rho) { y = x[2] x = x[1] return(exp(-0.5/(1 - rho^2) * (x^2 - 2 * rho * x * y + y^2))) } qDens = function(x, rho, rho1, sigma) { zy = x[2]/sigma[2] zx = x[1]/sigma[1] return(exp(-0.5/(1 - rho1^2) * (zx^2 - 2 * rho * zx * zy + zy^2))) } for (n in 1:steps) { target[n, ] = x1 candidate[n, ] = x0[n, ] cand = gDensInd(x0[n, ], rho) cur = gDensInd(x1, rho) qcand = qDens(x0[n, ], rho, rho1, sigma) qcur = qDens(x1, rho, rho1, sigma) ratio = (cand/cur) * (qcur/qcand) if (u[n] < ratio) x1 = x0[n, ] } } else if (type == "block") { twosteps = 2 * steps target = candidate = matrix(0, ncol = 2, nrow = twosteps) u = runif(twosteps) startValue = c(2, 1.5) x1 = startValue sx = sqrt(1 - rho^2) vx = sx^2 sigma1 = 0.75 var1 = sigma1^2 gDensBlock = function(x, mx, vx) { return(exp(-0.5 * (x - mx)^2/vx)) } for (n in 1:steps) { ## draw from Block 1 mx = rho * x1[2] ## draw candidate x0 = c(rnorm(1, mx, sigma1), x1[2]) n1 = 2 * n - 1 target[n1, ] = x1 candidate[n1, ] = x0 cand = gDensBlock(x0[1], mx, vx) cur = gDensBlock(x1[1], mx, vx) qcand = gDensBlock(x0[1], mx, var1) qcur = gDensBlock(x1[1], mx, var1) ratio = (cand/cur) * (qcur/qcand) if (u[n1] < ratio) x1 = x0 ## draw from block 2 my = rho * x1[1] ## draw candidate x0 = c(x1[1], rnorm(1, my, sigma1)) n2 = 2 * n target[n2, ] = x1 candidate[n2, ] = x0 cand = gDensBlock(x0[2], my, vx) cur = gDensBlock(x1[2], my, vx) qcand = gDensBlock(x0[2], my, var1) qcur = gDensBlock(x1[2], my, var1) ratio = (cand/cur) * (qcur/qcand) if (u[n2] < ratio) x1 = x0 } } else if (type == "gibbs") { twosteps = 2 * steps target = candidate = matrix(0, ncol = 2, nrow = twosteps) u = runif(twosteps) startValue = c(2, 1.5) x1 = startValue sx = sqrt(1 - rho^2) mx = rho * x1[1] vx = sx^2 sigma1 = sx var1 = sigma1^2 gDensGibbs = function(x, mx, vx) { return(exp(-0.5 * (x - mx)^2/vx)) } for (n in 1:steps) { mx = rho * x1[1] ## draw a candidate x0 = c(rnorm(1, mx, sigma1), x1[2]) n1 = 2 * n - 1 target[n1, ] = x1 candidate[n1, ] = x0 cand = gDensGibbs(x0[1], mx, vx) cur = gDensGibbs(x1[1], mx, vx) qcand = gDensGibbs(x0[1], mx, var1) qcur = gDensGibbs(x1[1], mx, var1) ratio = (cand/cur) * (qcur/qcand) if (u[n1] < ratio) x1 = x0 ## draw a candidate mx = rho * x1[1] x0 = c(x1[1], rnorm(1, mx, sigma1)) n2 = 2 * n target[n2, ] = x1 candidate[n2, ] = x0 cand = gDensGibbs(x0[2], mx, vx) cur = gDensGibbs(x1[2], mx, vx) qcand = gDensGibbs(x0[2], mx, var1) qcur = gDensGibbs(x1[2], mx, var1) ratio = (cand/cur) * (qcur/qcand) if (u[n2] < ratio) x1 = x0 } } target = data.frame(x = target[, 1], y = target[, 2]) plot(y ~ x, data = target, type = "l") invisible(list(targetSample = target)) }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/bivnormMH.R
#' Calculate a credible interval from a numerically specified posterior CDF or #' from a sample from the posterior #' #' Calculates a lower, upper, or two-sided credible interval from the numerical #' posterior CDF or from a sample from the posterior. #' #' This function uses linear interpolation to calculate bounds for points that #' may not be specified by CDF #' #' @param theta either a sample from the posterior density or the values over #' which the the posterior CDF is specified #' @param cdf the values of the CDF, \eqn{F(\theta) = #' \int_{-\infty}^{\theta}f(t).df} where \eqn{f(t)} is the PDF. This only needs #' to be specified if a numerically specified posterior is being used #' @param conf the desired 'confidence' level #' @param type the type of interval to return, 'lower' = one sided lower bound, #' 'two-sided' = two - sided, or 'upper' = one sided upper bound. It is #' sufficient to use 'l','t' or 'u' #' @return a list containing the elements lower.bound, uppper.bound or both #' depending on type #' @examples #' #' ## commands for calculating a numerical posterior CDF. #' ## In this example, the likelihood is proportional to #' ## \eqn{\theta^{3/2}\times \exp(-\theta/4)} and a N(6, 9) prior is used. #' theta = seq(from = 0.001, to = 40, by = 0.001) #' prior = dnorm(theta,6,3) #' ppnLike = theta^1.5*exp(-theta/4) #' ppnPost = prior*ppnLike #' scaleFactor = sintegral(theta, ppnPost)$int #' posterior = ppnPost/scaleFactor #' cdf = sintegral(theta, posterior)$y #' ci = credInt(theta, cdf) #' par(mfrow=c(2,2)) #' plot(prior ~ theta, type = 'l', main = 'Prior N(6, 9)') #' plot(ppnLike ~ theta, type = 'l', main = 'Proportional likelihood') #' plot(posterior ~ theta, type = 'l', main = 'Posterior') #' abline(v=c(unlist(ci))) #' #' ## Use an inverse method to take a random sample of size 1000 #' ## from the posterior #' suppressWarnings({Finv = approxfun(cdf,theta)}) #' thetaSample = Finv(runif(1000)) #' ci = credInt(thetaSample) #' #' @export credInt credInt = function(theta, cdf = NULL, conf = 0.95, type = "twosided") { if (conf <= 0 | conf >= 1) stop("conf must be between 0 and 1") if (length(grep("^[Ll]", type)) > 0) { type = "lower" } else if (length(grep("^[Tt]", type)) > 0) { type = "twosided" } else if (length(grep("^[Uu]", type)) > 0) { type = "upper" } else { stop("type must be one of lower, upper or twosided") } alpha = 1 - conf n = length(theta) if (n < 10) stop("theta must have at least ten values") if (!is.null(cdf)) { suppressWarnings({Finv = approxfun(x = cdf, y = theta)}) if (type == "lower") { lower.bound = Finv(alpha) cat(paste("Lower credible bound is : ", lower.bound, "\n", sep = "")) invisible(list(lower.bound = lower.bound)) } else if (type == "upper") { upper.bound = Finv(1 - alpha) cat(paste("Upper credible bound is : ", upper.bound, "\n", sep = "")) invisible(list(upper.bound = upper.bound)) } else { lower.bound = Finv(alpha/2) upper.bound = Finv(1 - alpha/2) cat(paste("Credible interval is : (", lower.bound, ",", upper.bound, ")\n", sep = "")) invisible(list(lower.bound = lower.bound, upper.bound = upper.bound)) } } else { if (type == "lower") { lower.bound = quantile(theta, alpha) cat(paste("Lower credible bound is ", lower.bound, "\n", sep = "")) invisible(list(lower.bound = lower.bound)) } else if (type == "upper") { upper.bound = quantile(theta, 1 - alpha) cat(paste("Upper credible bound is ", upper.bound, "\n", sep = "")) invisible(list(upper.bound = upper.bound)) } else { bounds = quantile(theta, c(alpha/2, 1 - alpha/2)) lower.bound = bounds[1] upper.bound = bounds[2] cat(paste("Credible interval is (", lower.bound, ",", upper.bound, ")\n", sep = "")) invisible(list(lower.bound = lower.bound, upper.bound = upper.bound)) } } }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/credInt.R
#' Calculate a credible interval from a numerically specified posterior CDF #' #' Calculates a lower, upper, or two-sided credible interval from the numerical #' posterior CDF. #' #' This function uses linear interpolation to calculate bounds for points that #' may not be specified by CDF #' #' @param theta the values over which the the posterior CDF is specified #' @param cdf the values of the CDF, \eqn{F(\theta) = #' \int_{-\infty}^{\theta}f(t).df} where \eqn{f(t)} is the PDF. #' @param conf the desired 'confidence' level #' @param type the type of interval to return, 'lower' = one sided lower bound, #' 'two-sided' = two - sided, or 'upper' = one sided upper bound. It is #' sufficient to use 'l','t' or 'u' #' @return a list containing the elements lower.bound, uppper.bound or both #' depending on type #' @examples #' #' ## commands for calculating a numerical posterior CDF. #' ## In this example, the likelihood is proportional to #' ## \eqn{\theta^{3/2}\times \exp(-\theta/4)} and a N(6, 9) prior is used. #' theta = seq(from = 0.001, to = 40, by = 0.001) #' prior = dnorm(theta,6,3) #' ppnLike = theta^1.5*exp(-theta/4) #' ppnPost = prior*ppnLike #' scaleFactor = sintegral(theta, ppnPost)$int #' posterior = ppnPost/scaleFactor #' cdf = sintegral(theta, posterior)$y #' ci=credIntNum(theta, cdf) #' par(mfrow=c(2,2)) #' plot(prior ~ theta, type = 'l', main = 'Prior N(6, 9)') #' plot(ppnLike ~ theta, type = 'l', main = 'Proportional likelihood') #' plot(posterior ~ theta, type = 'l', main = 'Posterior') #' abline(v=c(unlist(ci))) #' #' @export credIntNum credIntNum = function(theta, cdf, conf = 0.95, type = "twosided") { if (conf <= 0 | conf >= 1) stop("conf must be between 0 and 1") if (length(grep("^[Ll]", type)) > 0) { type = "lower" } else if (length(grep("^[Tt]", type)) > 0) { type = "twosided" } else if (length(grep("^[Uu]", type)) > 0) { type = "upper" } else { stop("type must be one of lower, upper or twosided") } alpha = 1 - conf n = length(theta) if (n < 10) stop("theta must have at least ten values") Finv = approxfun(cdf, theta) if (type == "lower") { lower.bound = Finv(alpha) cat(paste("Lower credible bound is : ", lower.bound, "\n", sep = "")) invisible(list(lower.bound = lower.bound)) } else if (type == "upper") { upper.bound = Finv(1 - alpha) cat(paste("Upper credible bound is : ", upper.bound, "\n", sep = "")) invisible(list(upper.bound = upper.bound)) } else { lower.bound = Finv(alpha/2) upper.bound = Finv(1 - alpha/2) cat(paste("Credible interval is : (", lower.bound, ",", upper.bound, ")\n", sep = "")) invisible(list(lower.bound = lower.bound, upper.bound = upper.bound)) } }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/credIntNum.R
#' Calculate a credible interval from a numerically specified posterior CDF #' #' Calculates a lower, upper, or two-sided credible interval from the numerical #' posterior CDF. #' #' This function uses linear interpolation to calculate bounds for points that #' may not be specified by CDF #' #' @param theta a sample from the posterior density #' @param conf the desired 'confidence' level #' @param type the type of interval to return, 'lower' = one sided lower bound, #' 'two-sided' = two - sided, or 'upper' = one sided upper bound. It is #' sufficient to use 'l','t' or 'u' #' @return a list containing the elements lower.bound, uppper.bound or both #' depending on type #' @examples #' #' ## posterior is N(0,1) #' theta = rnorm(1000) #' ci=credIntSamp(theta) #' plot(density(theta)) #' abline(v=c(unlist(ci))) #' #' @export credIntSamp credIntSamp = function(theta, conf = 0.95, type = "twosided") { if (length(theta) < 10) warning("theta is a very small sample, therefore the results may not be accurate") if (conf <= 0 | conf >= 1) stop("conf must be between 0 and 1") if (length(grep("^[Ll]", type)) > 0) { type = "lower" } else if (length(grep("^[Uu]", type)) > 0) { type = "upper" } else if (length(grep("^[Tt]", type)) > 0) { type = "twosided" } else { stop("Type must be one of lower, upper or twosided") } alpha = 1 - conf if (type == "lower") { lower.bound = quantile(theta, alpha) cat(paste("Lower credible bound is ", lower.bound, "\n", sep = "")) invisible(list(lower.bound = lower.bound)) } else if (type == "upper") { upper.bound = quantile(theta, 1 - alpha) cat(paste("Upper credible bound is ", upper.bound, "\n", sep = "")) invisible(list(upper.bound = upper.bound)) } else { bounds = quantile(theta, c(alpha/2, 1 - alpha/2)) lower.bound = bounds[1] upper.bound = bounds[2] cat(paste("Credible interval is (", lower.bound, ",", upper.bound, ")\n", sep = "")) invisible(list(lower.bound = lower.bound, upper.bound = upper.bound)) } }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/credIntSamp.R
#' Give simple descriptive statistics for a matrix or a data frame #' #' This function is designed to emulate the Minitab function DESCRIBE. It gives #' simple descriptive statistics for a data frame #' #' #' @param x A matrix or data.frame with numeric entries. Different variables #' are represented by columns. #' @param varNames A vector of variable names for each of the columns #' @return A data.frame containing the following elements: \item{N}{The number #' of observations for each variable} \item{mean}{The sample mean for each #' variable} \item{stdev}{The sample standard deviation} \item{sterr}{The #' standard error of the mean} \item{min}{The minimum} \item{q1}{The lower #' quartile} \item{med}{The median} \item{q3}{The upper quartile} #' \item{max}{The maximum} #' @examples #' #' data(poissonTest.df) #' describe(poissonTest.df) #' #' @export describe describe = function(x, varNames = NULL) { ## Mimics Minitab's desc function nameX = deparse(substitute(x)) if (is.matrix(x)) { x = data.frame(x) if (is.null(varNames)) varNames = paste(nameX, 0:(ncol(x) - 1), sep = "") names(x) = varNames } nx = sapply(x, length) mx = sapply(x, mean) sx = sapply(x, sd) SEx = sapply(x, sd)/sqrt(nx) minx = sapply(x, min) maxx = sapply(x, max) q1x = sapply(x, quantile, prob = 0.25) medx = sapply(x, median) q3x = sapply(x, quantile, prob = 0.75) stats.df = data.frame(N = nx, mean = mx, stdev = sx, sterr = SEx, min = minx, q1 = q1x, med = medx, q3 = q3x, max = maxx) nVars = ncol(x) print(stats.df) invisible(stats.df) }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/describe.R
#' Hierarchical Normal Means Regression Model #' #' fits a hierarchical normal model of the form \eqn{E[y_{ij}] = \mu_{j} + #' \beta_{1}x_{i1}+\dots+\beta_{p}x_{ip}} #' #' #' @param design a list with elements y = response vector, group = grouping #' vector, x = matrix of covariates or NULL if there are no covariates #' @param priorTau a list with elements tau0 and v0 #' @param priorPsi a list with elements psi0 and eta0 #' @param priorVar a list with elements s0 and kappa0 #' @param priorBeta a list with elements b0 and bMat or NULL if x is NULL #' @param steps the number of Gibbs sampling steps to take #' @param startValue a list with possible elements tau, psi, mu, sigmasq and #' beta. tau, psi and sigmasq must all be scalars. mu and beta must be vectors #' with as many elements as there are groups and covariates respectively #' @param randomSeed a random seed for the random number generator #' @return A data frame with variables: \item{tau}{Samples from the posterior #' distribution of tau} \item{psi}{Samples from the posterior distribution of #' psi} \item{mu}{Samples from the posterior distribution of mu} #' \item{beta}{Samples from the posterior distribution of beta if there are any #' covariates} \item{sigmaSq}{Samples from the posterior distribution of #' \eqn{\sigma^2}} \item{sigma}{Samples from the posterior distribution of #' sigma} #' @examples #' #' priorTau = list(tau0 = 0, v0 = 1000) #' priorPsi = list(psi0 = 500, eta0 = 1) #' priorVar = list(s0 = 500, kappa0 = 1) #' priorBeta = list(b0 = c(0,0), bMat = matrix(c(1000,100,100,1000), ncol = 2)) #' #' data(hiermeanRegTest.df) #' data.df = hiermeanRegTest.df #' design = list(y = data.df$y, group = data.df$group, #' x = as.matrix(data.df[,3:4])) #' r=hierMeanReg(design, priorTau, priorPsi, priorVar, priorBeta) #' #' oldPar = par(mfrow = c(3,3)) #' plot(density(r$tau)) #' plot(density(r$psi)) #' plot(density(r$mu.1)) #' plot(density(r$mu.2)) #' plot(density(r$mu.3)) #' plot(density(r$beta.1)) #' plot(density(r$beta.2)) #' plot(density(r$sigmaSq)) #' par(oldPar) #' #' ## example with no covariates #' priorTau = list(tau0 = 0, v0 = 1000) #' priorPsi = list(psi0 = 500, eta0 = 1) #' priorVar = list(s0 = 500, kappa0 = 1) #' #' data(hiermeanRegTest.df) #' data.df = hiermeanRegTest.df #' design = list(y = data.df$y, group = data.df$group, x = NULL) #' r=hierMeanReg(design, priorTau, priorPsi, priorVar) #' #' oldPar = par(mfrow = c(3,2)) #' plot(density(r$tau)) #' plot(density(r$psi)) #' plot(density(r$mu.1)) #' plot(density(r$mu.2)) #' plot(density(r$mu.3)) #' plot(density(r$sigmaSq)) #' par(oldPar) #' #' #' @export hierMeanReg hierMeanReg = function(design, priorTau, priorPsi, priorVar, priorBeta = NULL, steps = 1000, startValue = NULL, randomSeed = NULL) { ## design is expected to be a list with elements, y = response vector, group = grouping ## vector, x = matrix of covariates or null ## priorTau is a list with elements tau0 and v0 priorPsi is a list with elements psi0 and ## eta0 priorVar is a list with elements s0 and kappa0 priorBeta is a list with elements b0 ## and bMat or null if x is null startValue is a list with possible elements tau, psi, mu, ## sigma and beta if (!is.null(randomSeed)) set.seed(randomSeed) names(design) = tolower(names(design)) if (any(!(names(design) %in% c("y", "group", "x")))) { stop("design must have elements y, group and x") } names(priorTau) = tolower(names(priorTau)) if (any(!(names(priorTau) %in% c("tau0", "v0")))) { stop("priorTau must have elements tau0 and v0") } names(priorPsi) = tolower(names(priorPsi)) if (any(!(names(priorPsi) %in% c("psi0", "eta0")))) { stop("priorPsi must have elements psi0 and eta0") } names(priorVar) = tolower(names(priorVar)) if (any(!(names(priorVar) %in% c("s0", "kappa0")))) { stop("priorVar must have elements s0 and kappa0") } if (!is.null(startValue)) { names(startValue) = tolower(names(startValue)) if (any(!(names(startValue) %in% c("tau", "psi", "mu", "sigmasq", "beta")))) { stop("startValue can only have elements tau, psi, mu, sigmasq, beta") } } bReg = TRUE if (is.null(design$x)) { nCovariates = 0 bReg = FALSE } else { if (any(!(names(priorBeta) %in% c("b0", "bMat")))) { stop("priorBeta must have elements b0 and bMat") } nCovariates = ncol(design$x) if (length(priorBeta$b0) != nCovariates) stop("b0 must have as many elements as there are covariates") if (ncol(priorBeta$bMat) != nrow(priorBeta$bMat)) stop("bMat must be a square matrix") if (ncol(priorBeta$bMat) != nCovariates) stop("bMat must have as many rows(columns) as there are covariates") } nObs = length(design$y) design$group = factor(design$group) nGroups = length(levels(design$group)) kappa1 = priorVar$kappa0 + nObs eta1 = priorPsi$eta0 + nGroups y = design$y group = design$group X = Xt = XtX = bMat2 = prec0 = precObs = prec1 = NULL betaSample = zBeta = NULL if (bReg) { X = design$x Xt = t(X) XtX = Xt %*% X prec0 = solve(priorBeta$bMat) bMat2 = prec0 %*% priorBeta$b0 betaSample = matrix(0, ncol = nCovariates, nrow = steps) L = t(chol(priorBeta$bMat)) w4 = rnorm(nCovariates) if (!is.null(startValue) & ("beta" %in% names(startValue))) { betaSample[1, ] = startValue$beta } else { betaSample[1, ] = L %*% w4 } zBeta = matrix(rnorm(nCovariates * steps), ncol = nCovariates) } tauSample = rep(0, steps) psiSample = rep(0, steps) muSample = matrix(0, ncol = nGroups, nrow = steps) sigmaSqSample = rep(0, steps) if (!is.null(startValue) & ("tau" %in% names(startValue))) { tauSample[1] = startValue$tau } else { tauSample[1] = priorTau$tau0 + rnorm(1) * sqrt(priorTau$v0) } if (!is.null(startValue) & ("psi" %in% names(startValue))) { psiSample[1] = startValue$psi } else { psiSample[1] = priorPsi$eta0/rchisq(1, df = priorPsi$eta0) } if (!is.null(startValue) & ("mu" %in% names(startValue))) { muSample[1, ] = startValue$mu } else { muSample[1, ] = tauSample[1] + rnorm(3) * sqrt(psiSample[1]) } if (!is.null(startValue) & ("sigmasq" %in% names(startValue))) { sigmaSqSample[1] = startValue$sigmasq } else { sigmaSqSample[1] = priorVar$kappa0/rchisq(1, df = priorVar$kappa0) } nj = sapply(split(y, group), length) ## pre-generate arrays of r.v's zTau = rnorm(steps) chiPsi = rchisq(steps, df = eta1) zMu = matrix(rnorm(nGroups * steps), ncol = nGroups) chiSigmaSq = rchisq(steps, df = kappa1) mu1 = rep(0, nGroups) for (n in 2:steps) { muSum = sum(muSample[n - 1, ]) muBar = muSum/nGroups v1 = priorTau$v0 * psiSample[n - 1]/(psiSample[n - 1] + nGroups * priorTau$v0) tau1 = v1 * (priorTau$tau0/priorTau$v0 + nGroups * muBar/psiSample[n - 1]) tauSample[n] = tau1 + zTau[n] * sqrt(v1) SSq = sum((muSample[n - 1, ] - tauSample[n])^2) psi1 = priorPsi$psi0 + SSq psiSample[n] = psi1/chiPsi[n] for (j in 1:nGroups) { varMu = (sigmaSqSample[n - 1] * psiSample[n - 1])/(sigmaSqSample[n - 1] + nj[j] * psiSample[n - 1]) xBeta = 0 if (bReg) xBeta = X[group == j, ] %*% betaSample[n - 1, ] zBar = mean(y[group == j] - xBeta) muBar = varMu * (tauSample[n]/psiSample[n - 1] + nj[j] * zBar/sigmaSqSample[n - 1]) muSample[n, j] = muBar + zMu[n, j] * sqrt(varMu) mu1[j] = muSample[n, j] } yMu = y - mu1[group] xBeta = 0 if (bReg) { precObs = XtX/sigmaSqSample[n - 1] prec1 = prec0 + precObs bMat1 = solve(prec1) bLS = coef(lm(yMu ~ -1 + X)) b1 = bMat1 %*% bMat2 + bMat1 %*% (precObs %*% bLS) L = t(chol(bMat1)) betaSample[n, ] = L %*% zBeta[n, ] + b1 xBeta = X %*% betaSample[n, ] } SSw = sum((yMu - xBeta)^2) s1 = priorVar$s0 + SSw sigmaSqSample[n] = s1/chiSigmaSq[n] } sigmaSample = sqrt(sigmaSqSample) results.df = NULL if (bReg) { results.df = data.frame(tau = tauSample, psi = psiSample, mu = muSample, beta = betaSample, sigmaSq = sigmaSqSample, sigma = sigmaSample) } else { results.df = data.frame(tau = tauSample, psi = psiSample, mu = muSample, sigmaSq = sigmaSqSample, sigma = sigmaSample) } describe(results.df) invisible(results.df) }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/hierMeanReg.R
#' Draw a sample from a posterior distribution of data with an unknown mean and #' variance using Gibbs sampling #' #' normGibbs draws a Gibbs sample from the posterior distribution of the #' parameters given the data fron normal distribution with unknown mean and #' variance. The prior for \eqn{\mu} given \eqn{var} is prior mean \eqn{m0} #' and prior variance \eqn{var/n0} . That means \eqn{n0} is the 'equivalent #' sample size.' The prior distribution of the variance is \eqn{s0} times an #' inverse chi-squared with \eqn{kappa0} degrees of freedom. The joint prior is #' the product \eqn{g(var)g(mu|var)}. #' #' #' @param y A vector containing the data #' @param steps The number of iterations of Gibbs sampling to carry out #' @param type Either 'ind' for sampling from an independent conjugate prior or #' 'joint' for sampling from a joint conjugate prior. 'i' and 'j' can be used #' as compact notation #' @param \dots If type = 'ind' then the user can specify the prior for #' \eqn{\mu} with a parameter priorMu which can either be a single number m0, #' or m0 and n0. if m0 and n0 are not specified then m0 and n0 are 0 by #' default. The user can also specify priorVar, which if given, must be a #' vector with two elements s0 and kappa0. If s0 and kappa0 are not given then #' they are zero by default. If type = 'joint' then priorMu must be a vector of #' length two with elements m0 and sd0. The user can also specify priorVar, #' which if given, must be a vector with two elements s0 and kappa0. If s0 and #' kappa0 are not given then they are zero by default. #' @return A data frame containing three variables \tabular{rll}{ [1,] \tab mu #' \tab a sample from the posterior distribution of the mean \cr [2,] \tab sig #' \tab a sample from the posterior distribution of the standard deviation \cr #' [3,] \tab mu \tab a sample from the posterior distribution of the variance = #' sig^2 \cr } #' @author James M. Curran #' @examples #' #' ## firstly generate some random data #' mu = rnorm(1) #' sigma = rgamma(1,5,1) #' y = rnorm(100, mu, sigma) #' #' ## A \eqn{N(10,3^2)} prior for \eqn{\mu} and a 25 times inverse chi-squared #' ## with one degree of freedom prior for \eqn{\sigma^2} #' MCMCSampleInd = normGibbs(y, steps = 5000, priorMu = c(10,3), #' priorVar = c(25,1)) #' #' #' ## We can also use a joint conjugate prior for \eqn{\mu} and \eqn{\sigma^2}. #' ## This will be a \emph{normal}\eqn{(m,\sigma^2/n_0)} prior for \eqn{\mu} given #' ## the variance \eqn{\sigma^2}, and an \eqn{s0} times an \emph{inverse #' ## chi-squared} prior for \eqn{\sigma^2}. #' MCMCSampleJoint = normGibbs(y, steps = 5000, type = 'joint', #' priorMu = c(10,3), priorVar = c(25,1)) #' #' ## Now plot the results #' oldPar = par(mfrow=c(2,2)) #' #' plot(density(MCMCSampleInd$mu),xlab=expression(mu), main = #' 'Independent') #' abline(v=mu) #' plot(density(MCMCSampleInd$sig),xlab=expression(sig), main = #' 'Independent') #' abline(v=sigma) #' #' plot(density(MCMCSampleJoint$mu),xlab=expression(mu), main = #' 'Joint') #' abline(v=mu) #' plot(density(MCMCSampleJoint$sig),xlab=expression(sig), main = #' 'Joint') #' abline(v=sigma) #' #' #' @export normGibbs normGibbs = function(y, steps = 1000, type = "ind", ...) { if (length(grep("[Ii]", type)) > 0) { type = "ind" } else if (length(grep("[Jj]", type)) > 0) { type = "joint" } else { stop("Type must be ind or joint") } dots = list(...) if (type == "ind") { ## The dots can carry priorMu which, if specified can be a single number m0 or a vector ## m0, n0 m0 = 0 n0 = 0 muSpecified = FALSE if ("priorMu" %in% names(dots)) { if (length(dots$priorMu) == 2) { m0 = dots$priorMu[1] n0 = dots$priorMu[2] muSpecified = TRUE } else { m0 = dots$priorMu muSpecified = TRUE } } ## The dots can carry priorVar which, if specified must be a vector of length 2 s0 = 0 kappa0 = 0 nObs = length(y) yBar = mean(y) SSy = sum((y - yBar)^2) varSpecified = FALSE if ("priorVar" %in% names(dots)) { if (length(dots$priorVar) != 2) stop("priorVar must have two elements, s0 and kappa0") else { s0 = dots$priorVar[1] kappa0 = dots$priorVar[2] varSpecified = TRUE } } v0 = ifelse(varSpecified, s0/rchisq(1, df = kappa0), SSy/(nObs - 1)) kappa1 = kappa0 + nObs prec0 = 0 mu0 = yBar if (muSpecified) { prec0 = n0/v0 mu0 = m0 + rnorm(1) * sqrt(v0) } else { prec0 = 0 m0 = 0 mu0 = yBar } SSt = sum((y - mu0)^2) s1 = s0 + SSt Chi = rchisq(steps, df = kappa1) z = rnorm(steps) varSample = c(s1/Chi[1], rep(0, steps - 1)) prec0 = ifelse(muSpecified, n0/varSample[1], 0) precData = nObs/varSample[1] prec1 = prec0 + precData v1 = 1/prec1 m1 = m0 * prec0/prec1 + yBar * precData/prec1 muSample = rep(0, steps) muSample[1] = z[1] * sqrt(v1) + m1 for (i in 2:steps) { SSt = sum((y - muSample[i - 1])^2) s1 = s0 + SSt varSample[i] = s1/Chi[i] prec0 = ifelse(muSpecified, n0/varSample[i], 0) precData = nObs/varSample[i] prec1 = prec0 + precData v1 = 1/prec1 std1 = sqrt(v1) m1 = m0 * prec0/prec1 + yBar * precData/prec1 muSample[i] = z[i] * std1 + m1 } sigmaSample = sqrt(varSample) oldPar = par(mfrow = c(2, 2)) plot(ts(muSample), main = "Time series plot of mu", ylab = expression(mu)) plot(ts(varSample), main = "Time series plot of var", ylab = expression(sigma^2)) plot(ts(sigmaSample), main = "Time series plot of sigma", ylab = expression(sigma)) hist(muSample, main = "Histogram of mu", prob = TRUE) mx = mean(muSample) sx = sd(muSample) bds = mx + c(-5, 5) * sx xValues = seq(bds[1], bds[2], length = 200) yValues = dnorm(xValues, mx, sx) lines(xValues, yValues) par(oldPar) results.df = data.frame(mu = muSample, sig = sigmaSample, var = varSample) describe(results.df) invisible(results.df) } else { ## type = 'joint' ## The dots can carry priorMu which, if specified can be a single number m0 or a vector ## m0, n0 m0 = 0 n0 = 0 v0 = 0 muSpecified = FALSE if ("priorMu" %in% names(dots)) { if (length(dots$priorMu) == 2) { m0 = dots$priorMu[1] n0 = dots$priorMu[2] muSpecified = TRUE } else { stop("priorMu must contain a mean and an effective sample size") } } ## The dots can carry priorVar which, if specified must be a vector of length 2 s0 = 0 kappa0 = 0 varSpecified = FALSE v0 = 0 if ("priorVar" %in% names(dots)) { if (length(dots$priorVar) != 2) stop("priorVar must have two elements, s0 and kappa0") else { s0 = dots$priorVar[1] kappa0 = dots$priorVar[2] varSpecified = TRUE } } nObs = length(y) yBar = mean(y) SSy = sum((y - yBar)^2) v0 = ifelse(varSpecified, s0/rchisq(1, df = kappa0), SSy/(nObs - 1)) kappa1 = kappa0 + nObs prec0 = 0 mu0 = yBar muSample = rep(0, steps) if (muSpecified) { prec0 = n0/v0 mu0 = m0 + rnorm(1) * sqrt(v0) } else { m0 = 0 } SSt = sum((y - mu0)^2) s1 = s0 + SSt Chi = rchisq(steps, df = kappa1) varSample = c(s1/Chi[1], rep(0, steps - 1)) Z = rnorm(steps) prec0 = ifelse(muSpecified, n0/varSample[1], 0) precData = nObs/varSample[1] prec1 = prec0 + precData v1 = 1/prec1 m1 = prec0/prec1 * m0 + precData/prec1 * yBar muSample[1] = Z[1] * sqrt(v1) + m1 for (i in 2:steps) { SSt = sum((y - muSample[i - 1])^2) s1 = s0 + SSt varSample[i] = s1/Chi[i] prec0 = ifelse(muSpecified, n0/varSample[i], 0) precData = nObs/varSample[i] prec1 = prec0 + precData v1 = 1/prec1 m1 = prec0/prec1 * m0 + precData/prec1 * yBar muSample[i] = Z[i] * sqrt(v1) + m1 } sigmaSample = sqrt(varSample) oldPar = par(mfrow = c(2, 2)) plot(ts(muSample), main = "Time series plot of mu", ylab = expression(mu)) plot(ts(varSample), main = "Time series plot of var", ylab = expression(sigma^2)) plot(ts(sigmaSample), main = "Time series plot of sigma", ylab = expression(sigma)) hist(muSample, main = "Histogram of mu", prob = TRUE) mx = mean(muSample) sx = sd(muSample) bds = mx + c(-5, 5) * sx xValues = seq(bds[1], bds[2], length = 200) yValues = dnorm(xValues, mx, sx) lines(xValues, yValues) par(oldPar) results.df = data.frame(mu = muSample, sig = sigmaSample, var = varSample) describe(results.df) invisible(results.df) } }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/normGibbs.R
#' Sample from a normal mixture model using Metropolis-Hastings #' #' normMixMH uses the Metropolis-Hastings algorithm to draw a sample from a #' univariate target distribution that is a mixture of two normal distributions #' using an independent normal candidate density or a random walk normal #' candidate density. #' #' #' @param theta0 A vector of length two containing the mean and standard #' deviation of the first component of the normal mixture #' @param theta1 A vector of length two containing the mean and standard #' deviation of the second component of the normal mixture #' @param p A value between 0 and 1 representing the mixture proportion, so #' that the true density is \eqn{p\times f(\mu1,\sigma1) + (1-p)\times #' f(\mu_2,\sigma_2)} #' @param candidate A vector of length two containing the mean and standard #' deviation of the candidate density #' @param steps The number of steps to be used in the Metropolis-Hastings #' algorithm. steps must be greater than 100 #' @param type Either 'ind' or 'rw' depending on whether a independent #' candidate density or random walk candidate density is to be used. 'i' and #' 'r' may be used as alternative compact notation #' @param startValue A starting value for the chain #' @param randomSeed A seed for the random number generator. Only used when you #' want the same sequence of random numbers in the chain #' @return A vector containing a sample from the normal mixture distribution. #' @examples #' #' ## Set up the normal mixture #' theta0 = c(0,1) #' theta1 = c(3,2) #' p = 0.8 #' #' ## Sample from an independent N(0,3^2) candidate density #' candidate = c(0, 3) #' MCMCsampleInd = normMixMH(theta0, theta1, p, candidate) #' #' #' ## If we wish to use the alternative random walk N(0, 0.5^2) #' ## candidate density #' candidate = c(0, 0.5) #' MCMCsampleRW = normMixMH(theta0, theta1, p, candidate, type = 'rw') #' #' @export normMixMH normMixMH = function(theta0, theta1, p, candidate, steps = 1000, type = "ind", randomSeed = NULL, startValue = NULL) { if (steps < 100) { warning("Function should take at least 100 steps") } if (p <= 0 | p >= 1) stop("Mixture proprotion p must be between 0 and 1") mu0 = theta0[1] sigma0 = theta0[2] mu1 = theta1[1] sigma1 = theta1[2] mu = candidate[1] sigma = candidate[2] if (any(c(sigma0, sigma0, sigma) <= 0)) stop("All standard deviations must be strictly non-zero and positive") if (length(grep("[Ii]", type)) > 0) { type = "ind" } else if (length(grep("[Rr]", type)) > 0) { type = "rw" } else { stop("Type must be ind or rw") } theta = seq(from = min(mu0 - 3 * sigma0, mu1 - 3 * sigma1), to = max(mu0 + 3 * sigma0, mu1 + 3 * sigma1), by = 0.001) fx = p * dnorm(theta, mu0, sigma0) + (1 - p) * dnorm(theta, mu1, sigma1) targetSample = rep(startValue, steps) if (type == "rw") { if (!is.null(randomSeed)) set.seed(randomSeed) z = rnorm(steps, mu, sigma) u = runif(steps) if (is.null(startValue)) startValue = z[1] targetSample[1] = startValue g = rep(0, steps) proposal = rep(0, steps) alpha = rep(0, steps) k1 = p/sigma0 * exp(-0.5 * ((targetSample[1] - mu0)/sigma0)^2) k2 = (1 - p)/sigma1 * exp(-0.5 * ((targetSample[1] - mu1)/sigma1)^2) g[1] = k1 + k2 i1 = 1 for (n in 2:steps) { proposal[n] = targetSample[i1] + z[n] k1 = p/sigma0 * exp(-0.5 * ((proposal[n] - mu0)/sigma0)^2) k2 = (1 - p)/sigma1 * exp(-0.5 * ((proposal[n] - mu1)/sigma1)^2) g[n] = k1 + k2 k3 = g[n] k4 = g[i1] alpha[n] = ifelse(k3/k4 > 1, 1, k3/k4) ## Metropolis-Hastings Step reject if (u[n] >= alpha[n]) { targetSample[n] = targetSample[i1] } else { ## accept targetSample[n] = proposal[n] i1 = n } } } else { if (!is.null(randomSeed)) set.seed(randomSeed) z = rnorm(steps, mu, sigma) u = runif(steps) if (is.null(startValue)) startValue = z[1] density0 = dnorm(z, mu, sigma) density1 = dnorm(z, mu0, sigma0) density2 = dnorm(z, mu1, sigma1) densityMix = p * density1 + (1 - p) * density2 alpha = rep(0, steps) targetSample[1] = startValue i1 = 1 for (n in 2:steps) { alpha[n] = density0[i1] * densityMix[n]/(density0[n] * densityMix[i1]) alpha[n] = ifelse(alpha[n] > 1, 1, alpha[n]) ## Metropolis-Hastings Step if (u[n] >= alpha[n]) { targetSample[n] = targetSample[i1] } else { targetSample[n] = z[n] i1 = n } } } oldPar = par(mfrow = c(1, 2), pty = "s") h = hist(targetSample, plot = FALSE) ymax = max(c(h$density, fx)) * 1.05 hist(targetSample, prob = TRUE, col = "light blue", xlim = range(theta), ylim = c(0, ymax), main = "Sample from target density", xlab = "x", ylab = "Density") lines(theta, fx) box() plot(targetSample, type = "l", main = "", ylab = "Target Sample") par(oldPar) invisible(targetSample) }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/normMixMH.R
#' Test a one sided hypothesis from a numerically specified posterior CDF or #' from a sample from the posterior #' #' Calculates the probability of a one sided null hypothesis from a numerically #' calculated posterior CDF or from a sample from the posterior. #' #' This function uses linear interpolation to calculate bounds for points that #' may not be specified by CDF #' #' @param theta0 the hypothesized value, i.e. H0: theta <= theta0 #' @param theta a sample of values from the posterior density, or, if cdf is #' not NULL then the values over which the the posterior CDF is specified #' @param cdf the values of the CDF, \eqn{F(\theta) = #' \int_{-\infty}^{\theta}f(t).df} where \eqn{f(t)} is the PDF. #' @param type the type of probability to return, 'lower' = Pr(theta <= theta0) #' or 'upper' = Pr(theta >= theta0). It is sufficient to use 'l' or 'u' #' @return a list containing the element prob which will be the upper or lower #' tail probability depending on type #' @examples #' #' ## commands for calculating a numerical posterior CDF. #' ## In this example, the likelihood is proportional to #' ## \eqn{\theta^{3/2}\times \exp(-\theta/4)} and a N(6, 9) prior is used. #' theta = seq(from = 0.001, to = 40, by = 0.001) #' prior = dnorm(theta,6,3) #' ppnLike = theta^1.5*exp(-theta/4) #' ppnPost = prior*ppnLike #' scaleFactor = sintegral(theta, ppnPost)$int #' posterior = ppnPost/scaleFactor #' cdf = sintegral(theta, posterior)$y #' pNull(15, theta, cdf) #' #' ## Use an inverse method to take a random sample of size 1000 #' ## from the posterior #' suppressWarnings({Finv = approxfun(cdf, theta)}) #' thetaSample = Finv(runif(1000)) #' pNull(15, thetaSample) #' #' @export pNull pNull = function(theta0, theta, cdf = NULL, type = "upper") { if (length(theta) < 10) stop("theta must have at least ten values") if (length(grep("^[lL]", type)) > 0) { type = "lower" } else if (length(grep("^[Uu]", type)) > 0) { type = "upper" } else { stop("type must be one of lower or upper") } Fx = ecdf(theta) if (!is.null(cdf)) { o = order(theta) if (any(theta[o] != theta)) { warning("theta is not in ascending order. This may cause problems") } suppressWarnings({Fx = approxfun(x = theta, y = cdf)}) } if (type == "lower") { prob = Fx(theta0) cat(paste("Posterior Pr(theta<=theta0) is ", prob, "\n", sep = "")) invisible(list(prob = prob)) } else { prob = 1 - Fx(theta0) cat(paste("Posterior Pr(theta>=theta0) is ", prob, "\n", sep = "")) invisible(list(prob = prob)) } }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/pNull.R
#' Test a one sided hypothesis from a numerically specified posterior CDF #' #' Calculates the probability of a one sided null hypothesis from a numerically #' calculated posterior CDF. #' #' This function uses linear interpolation to calculate bounds for points that #' may not be specified by CDF #' #' @param theta0 the hypothesized value, i.e. H0: theta <= theta0 #' @param theta the values over which the the posterior CDF is specified #' @param cdf the values of the CDF, \eqn{F(\theta) = #' \int_{-\infty}^{\theta}f(t).df} where \eqn{f(t)} is the PDF. #' @param type the type of probability to return, 'lower' = Pr(theta <= theta0) #' or 'upper' = Pr(theta >= theta0). It is sufficient to use 'l' or 'u' #' @return a list containing the element prob which will be the upper or lower #' tail probability depending on type #' @examples #' #' ## commands for calculating a numerical posterior CDF. #' ## In this example, the likelihood is proportional to #' ## \eqn{\theta^{3/2}\times \exp(-\theta/4)} and a N(6, 9) prior is used. #' theta = seq(from = 0.001, to = 40, by = 0.001) #' prior = dnorm(theta,6,3) #' ppnLike = theta^1.5*exp(-theta/4) #' ppnPost = prior*ppnLike #' scaleFactor = sintegral(theta, ppnPost)$int #' posterior = ppnPost/scaleFactor #' cdf = sintegral(theta, posterior)$y #' pnullNum(1, theta, cdf) #' #' @export pnullNum pnullNum = function(theta0, theta, cdf, type = "upper") { if (length(theta) < 10) stop("theta must have at least ten values") if (length(grep("^[lL]", type)) > 0) { type = "lower" } else if (length(grep("^[Uu]", type)) > 0) { type = "upper" } else { stop("type must be one of lower or upper") } Fx = approxfun(theta, cdf) if (type == "lower") { prob = 1 - Fx(theta0) cat(paste("Posterior Pr(theta<=theta0) is ", prob, "\n", sep = "")) invisible(list(prob = prob)) } else { prob = Fx(theta0) cat(paste("Posterior Pr(theta>=theta0) is ", prob, "\n", sep = "")) invisible(list(prob = prob)) } }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/pnullNum.R
#' Test a one sided hypothesis using a sample from a posterior density #' #' Calculates the probability of a one sided null hypothesis from a sample from #' a posterior density. #' #' This function uses linear interpolation to calculate bounds for points that #' may not be specified by CDF #' #' @param theta a sample of values from a posterior density #' @param theta0 the hypothesized value, i.e. H0: theta <= theta0 #' @param type the type of probability to return, 'lower' = Pr(theta <= theta0) #' or 'upper' = Pr(theta >= theta0). It is sufficient to use 'l' or 'u' #' @return a list containing the element prob which will be the upper or lower #' tail probability depending on type #' @examples #' #' ## The posterior density is N(3,1) #' theta = rnorm(1000,3) #' #' ## test whether the true mean is greater than 0 (it is obviously!) #' pnullSamp(theta) #' #' @export pnullSamp pnullSamp = function(theta, theta0 = 0, type = "upper") { if (length(theta) < 10) stop("theta must have at least ten values") if (length(grep("^[lL]", type)) > 0) { type = "lower" } else if (length(grep("^[Uu]", type)) > 0) { type = "upper" } else { stop("type must be one of lower or upper") } Fx = ecdf(theta) if (type == "lower") { prob = 1 - Fx(theta0) cat(paste("Posterior Pr(theta<=theta0) is ", prob, "\n", sep = "")) invisible(list(prob = prob)) } else { prob = Fx(theta0) cat(paste("Posterior Pr(theta>=theta0) is ", prob, "\n", sep = "")) invisible(list(prob = prob)) } }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/pnullSamp.R
#' Numerical integration using Simpson's Rule #' #' Takes a vector of \eqn{x} values and a corresponding set of postive #' \eqn{f(x)=y} values and evaluates the area under the curve: \deqn{ #' \int{f(x)dx} }. #' #' #' @param x a sequence of \eqn{x} values. #' @param fx the value of the function to be integrated at \eqn{x}. #' @param n.pts the number of points to be used in the integration. #' @return returns a list with the following elements \item{x}{the x-values at #' which the integral has been evaluated} \item{y}{the cummulative integral} #' \item{int}{the value of the integral over the whole range} #' @keywords misc #' @examples #' #' ## integrate the normal density from -3 to 3 #' x=seq(-3,3,length=100) #' fx=dnorm(x) #' estimate=sintegral(x,fx)$int #' true.val=diff(pnorm(c(-3,3))) #' cat(paste('Absolute error :',round(abs(estimate-true.val),7),'\n')) #' cat(paste('Relative percentage error :', 100*round((abs(estimate-true.val)/true.val),6),'%\n')) #' #' @export sintegral sintegral = function(x, fx, n.pts = 256) { ## numerically integrates fx over x using Simpsons rule x - a sequence of x values fx - the ## value of the function to be integrated at x n.pts - the number of points to be used in ## the integration n.x = length(x) if (n.x != length(fx)) stop("Unequal input vector lengths") if (n.pts < 64) n.pts = 64 if (length(x) > n.pts) n.pts = length(x) ## use linear approximation to get equally spaced x values ap = approx(x, fx, n = 2 * n.pts + 1) h = diff(ap$x)[1] integral = h * (ap$y[2 * (1:n.pts) - 1] + 4 * ap$y[2 * (1:n.pts)] + ap$y[2 * (1:n.pts) + 1])/3 (list(x = ap$x[2 * (1:n.pts)], y = cumsum(integral), int = sum(integral))) }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/sintegral.R
#' Thin an MCMC sample #' #' Thins the output from an MCMC process #' #' Note this function does not check to see if k is sensible. #' #' @param x A vector, matrix or data.frame containing output from an MCMC #' sampling scheme #' @param k An integer. This function takes every kth element from x #' @return A thinned vector, matrix or data frame containing every kth element #' of x. #' @examples #' #' ## A blockwise Metropolis-Hastings chain of 1000 elements, thinned to #' ## 5th element #' ## #' #' MCMCSampleBW = bivnormMH(0.9, type = 'block') #' MCMCSampleBW = thin(MCMCSampleBW, 5) #' #' @export thin thin = function(x, k) { ## returns every kth element of a vector, matrix, or data.frame if (is.vector(x)) { n = length(x) idx = which((1:n)%%k == 0) return(x[idx]) } else if (is.matrix(x) | is.data.frame(x)) { nRow = nrow(x) idx = which((1:nRow)%%k == 0) return(x[idx, ]) } else { stop("x must be a vector, a matrix or a data.frame") } }
/scratch/gouwar.j/cran-all/cranData/Bolstad2/R/thin.R
AidsSurvival.df = data(AidsSurvival) y = AidsSurvival.df$censor t = AidsSurvival.df$time X = cbind(AidsSurvival.df$age, AidsSurvival.df$drug) BayesCPH(y, t, X, plots = TRUE)
/scratch/gouwar.j/cran-all/cranData/Bolstad2/demo/BayesCPH.R
data(logisticTest.df) BayesLogistic(logisticTest.df$y, logisticTest.df$x, plots = T)
/scratch/gouwar.j/cran-all/cranData/Bolstad2/demo/BayesLogisticReg.R
data(poissonTest.df) BayesPois(poissonTest.df$y, poissonTest.df$x, plots = T)
/scratch/gouwar.j/cran-all/cranData/Bolstad2/demo/BayesPoissonReg.R
priorTau = list(tau0 = 0, v0 = 1000) priorPsi = list(psi0 = 500, eta0 = 1) priorVar = list(s0 = 500, kappa0 = 1) priorBeta = list(b0 = c(0, 0), bMat = matrix(c(1000, 100, 100, 1000), nc = 2)) data(hiermeanRegTest.df) data.df = hiermeanRegTest.df design = list(y = data.df$y, group = data.df$group, x = as.matrix(data.df[, 3:4])) r = hierMeanReg(design, priorTau, priorPsi, priorVar, priorBeta)
/scratch/gouwar.j/cran-all/cranData/Bolstad2/demo/hiermeanReg.R
source("normGibbs.r") y = scan("Example15.csv") # debug(normGibbs) normGibbs(y, priorMu = c(20, 1), priorVar = c(11.37, 1))
/scratch/gouwar.j/cran-all/cranData/Bolstad2/demo/normGibbs.R
#'BoltzMM: A package for probability computation, data generation, and model estimation of fully-visible Boltzmann machines. #' #'The BoltzMM package allows for computation of probability mass functions of fully-visible Boltzmann machines via \code{pfvbm} and \code{allpfvbm}. #'Random data can be generated using \code{rfvbm}. Maximum pseudolikelihood estimation of parameters via the MM algorithm can be conducted using \code{fitfvbm}. #'Computation of partial derivatives and Hessians can be performed via \code{fvbmpartiald} and \code{fvbmHessian}. #'Covariance estimation and normal standard errors can be computed using \code{fvbmcov} and \code{fvbmstderr}. #' #'@author Andrew T. Jones and Hien D. Nguyen #'@references H.D. Nguyen and I.A. Wood (2016), Asymptotic normality of the maximum pseudolikelihood estimator for fully-visible Boltzmann machines, IEEE Transactions on Neural Networks and Learning Systems, vol. 27, pp. 897-902. #' #' H.D. Nguyen and I.A. Wood (2016), A block successive lower-bound maximization algorithm for the maximum pseudolikelihood estimation of fully visible Boltzmann machines, Neural Computation, vol 28, pp. 485-492. #' #'@docType package #'@name BoltzMM NULL #'@importFrom stats pnorm NULL #'Standard errors for the parameter elements of a fitted fully-visible Boltzmann machine. #'@description Computes the normal approximation standard errors from the sandwich estimator of the covariance matrix for a maximum pseudolikelihood estimated fully-visible Boltzmann machine. #'@param data An N by n matrix, where each of the N rows contains a length n string of spin variables (i.e. each element is -1 or 1). #'@param covarmat A covariance matrix generated from \code{fvbmcov}. #'@return A list containing 2 objects: a vector containing the standard errors corresponding to the bias parameters \code{bvec_se}, and a matrix containing the standard errors corresponding to the interaction parameters \code{Mmat_se}. #'@references H.D. Nguyen and I.A. Wood (2016), Asymptotic normality of the maximum pseudolikelihood estimator for fully-visible Boltzmann machines, IEEE Transactions on Neural Networks and Learning Systems, vol. 27, pp. 897-902. #'@author Andrew T. Jones and Hien D. Nguyen #'@examples #'# Generate num=1000 random strings of n=3 binary spin variables under bvec and Mmat. #'num <- 1000 #'bvec <- c(0,0.5,0.25) #'Mmat <- matrix(0.1,3,3) - diag(0.1,3,3) #'data <- rfvbm(num,bvec,Mmat) #'# Fit a fully visible Boltzmann machine to data, starting from parameters bvec and Mmat. #'model <- fitfvbm(data,bvec,Mmat) #'# Compute the sandwich covariance matrix using the data and the model. #'covarmat <- fvbmcov(data,model,fvbmHess) #'# Compute the standard errors of the parameter elements according to a normal approximation. #'fvbmstderr(data,covarmat) #'@export fvbmstderr <- function(data,covarmat) { N <- dim(data)[1] D <- dim(data)[2] stderr <- sqrt(diag(covarmat))/sqrt(N) bvec <- stderr[c(1:D)] Mmat <- matrix(0,D,D) Mmat[lower.tri(Mmat)] <- stderr[-c(1:D)] Mmat <- Mmat + t(Mmat) return(list(bvec_se = bvec, Mmat_se = Mmat)) } #'Hessian of the log-pseudolikelihood function for a fitted fully-visible Boltzmann machine. #'@description Computes the Hessian with respect to all unique parameter elements of the bias vector and interaction matrix of a fully-visible Boltzmann machine, for some random length n string of spin variables (i.e. each element is -1 or 1) and some fitted parameter values. #'@param data An N by n matrix, where each of the N rows contains a length n string of spin variables (i.e. each element is -1 or 1). #'@param model List generated from \code{fitfvbm}. #'@return The n+choose(n,2) by n+choose(n,2) Hessian matrix, summed over the N rows of \code{data} and evaluated at the fitted parameter values provided in \code{model}. Each row (column) is a unique element of the bias vector and interaction matrix. The rows are arranged in lexicographical order with the bias elements first, followed by the interaction elements. For example, if n=3, the order would be bias[1], bias[2] bias[3], interaction[1,2], interaction[1,3], and interaction[2,3]. #'@references H.D. Nguyen and I.A. Wood (2016), Asymptotic normality of the maximum pseudolikelihood estimator for fully-visible Boltzmann machines, IEEE Transactions on Neural Networks and Learning Systems, vol. 27, pp. 897-902. #'@author Andrew T. Jones and Hien D. Nguyen #'@examples # Generate num=1000 random strings of n=3 binary spin variables under bvec and Mmat. #'num <- 1000 #'bvec <- c(0,0.5,0.25) #'Mmat <- matrix(0.1,3,3) - diag(0.1,3,3) #'data <- rfvbm(num,bvec,Mmat) #'# Fit a fully visible Boltzmann machine to data, starting from parameters bvec and Mmat. #'model <- fitfvbm(data,bvec,Mmat) #'# Compute the Hessian matrix summed over all num rows of data. #'fvbmHess(data,model) #'@export fvbmHess <- function(data, model) { bvec <- model[[2]] Mmat <- model[[3]] N <- dim(data)[1] D <- length(bvec) HessComps <- list() HessComps[[1]] <- matrix(0,D+1,D+1) for (jj in 1:D) { HessComps[[jj]] <- matrix(0,D+1,D+1) for (ii in 1:N) { x_bar <- as.matrix(c(1,data[ii,]),D+1,1) HessComps[[jj]] <- HessComps[[jj]] - x_bar%*%t(x_bar)/ cosh(sum(Mmat[jj,]*data[ii,])+bvec[jj])^2 } } Index <- matrix(0,D,D) Index[lower.tri(Index)] <- 1:(D*(D-1)/2) Index <- Index + t(Index) BigHess <- matrix(0,D+D*(D-1)/2,D+D*(D-1)/2) for (jj in 1:D) { WHICH <- which(Index[lower.tri(Index)]%in%Index[jj,]) #Index[lower.tri(Index)] is 1:(D*(D-1)/2) #Index[jj,] is row j of Index NonZero <- HessComps[[jj]][-c(jj+1),] NonZero <- NonZero[,-c(jj+1)] BigHess[c(jj,D+WHICH),c(jj,D+WHICH)] <- BigHess[c(jj,D+WHICH),c(jj,D+WHICH)] + NonZero } return(BigHess) } #'Hypothesis testing for a fully-visible Boltzmann machine. #'@description Tests the hypothesis that the true bias and interaction parameter values are those in \code{nullmodel}, given \code{data} and \code{model}. #'@param data An N by n matrix, where each of the N rows contains a length n string of spin variables (i.e. each element is -1 or 1). #'@param model List generated from \code{fitfvbm}. #'@param nullmodel A list containing two elements: a vector of length n \code{bvec}, and an n by n matrix \code{Mmat}. A list generated by \code{fitfvbm} is also sufficient. #'@return A list containing 4 objects: a vector containing the z-scores corresponding to the bias parameters \code{bvec_z},a vector containing the p-values corresponding to the bias parameters \code{bvec_p},a matrix containing the z-scores corresponding to the interaction parameters \code{Mmat_z}, and a matrix containing the standard errors corresponding to the interaction parameters \code{Mmat_p}. #'@references H.D. Nguyen and I.A. Wood (2016), Asymptotic normality of the maximum pseudolikelihood estimator for fully-visible Boltzmann machines, IEEE Transactions on Neural Networks and Learning Systems, vol. 27, pp. 897-902. #'@author Andrew T. Jones and Hien D. Nguyen #'@examples #'# Generate num=1000 random strings of n=3 binary spin variables under bvec and Mmat. #'num <- 1000; bvec <- c(0,0.5,0.25); Mmat <- matrix(0.1,3,3) - diag(0.1,3,3); #'data <- rfvbm(num,bvec,Mmat) #'# Fit a fully visible Boltzmann machine to data, starting from parameters bvec and Mmat. #'model <- fitfvbm(data,bvec,Mmat) #' #'#Propose a null hypothesis model #'nullmodel <- list(bvec = c(0,0,0), Mmat = matrix(0,3,3)) #' #'# Compute z-scores #'fvbmtests(data,model,nullmodel) #'@export fvbmtests <- function(data,model,nullmodel) { #Compute the z-scores #get Hessian Hess <- fvbmHess(data,model) #get Covaraince matrix covmat <- fvbmcov(data,model,fvbmHess) #get standard errors stderr <- fvbmstderr(data,covmat) #z-scores for bias parameters zb <- (model$bvec-nullmodel$bvec)/stderr$bvec_se #z-scores for interaction parameters zM <- (model$Mmat-nullmodel$Mmat)/stderr$Mmat_se diag(zM)<-NA #p-values from z-scores pvalb <- 2*pnorm(-abs(zb)) pvalM <- 2*pnorm(-abs(zM)) diag(pvalM)<-NA #return list return(list(bvec_z = zb, bvec_p = pvalb, Mmat_z = zM, Mmat_p = pvalM)) } #'Marginal probability function for a fully-visible Boltzmann machine. #'@description Computes the marginal probabilities (for values = +1 in each coordinate) under under some specified bias vector and interaction matrix, specified by \code{bvec} and \code{Mmat}, respectively. #'@param bvec Vector of length n containing real valued bias parameters. #'@param Mmat Symmetric n by n matrix, with zeros along the diagonal, containing the interaction parameters. #'@return Vector of length n containing the marginal probabilities of +1 in each coordinate. #'@references H.D. Nguyen and I.A. Wood (2016), Asymptotic normality of the maximum pseudolikelihood estimator for fully-visible Boltzmann machines, IEEE Transactions on Neural Networks and Learning Systems, vol. 27, pp. 897-902. #'@author Andrew T. Jones and Hien D. Nguyen #'@examples #'#Compute the marginal probabilities under bvec and Mmat. #'# Set the parameter values #'bvec <- c(0,0.5,0.25) #'Mmat <- matrix(0.1,3,3) - diag(0.1,3,3) # Compute the marginal probabilities #'marginpfvbm(bvec,Mmat) #'@export marginpfvbm <- function(bvec, Mmat) { # Get dimension of vector n <- length(bvec) ## Get all strings of length n and probabilities strings <- expand.grid(rep(list(0:1),n)) allprob <-allpfvbm(bvec,Mmat) #sum over allprob margins <- array(NA,n) for (i in seq_len(n)){ margins[i] <- sum(allprob[which(strings[,i]==1)]) } #return marginal probability vector return(margins) } #'@title Senate voting data from the 45th Australian Parliament. #' #'@description A dataset he data from the first sitting of the Senate of the 45th #' Australian Parliament, until the final sitting of the year 2016. The first division during #'this period was conducted on the 31st of August 2016, and the last division was performed #'on the 1st of December 2016. In total, 147 divisions were performed during this period. #' #'Each row represents a division(vote), each column is a party or independent. #'Data is either "Yes" or "No" depending on the vote. Absences and abstentions are left as NA. #'See \url{https://hal.archives-ouvertes.fr/hal-01927188v1} for details of data preparation. #' #'@source \url{www.aph.gov.au/Parliamentary_Business/Statistics/Senate_StatsNet/General/divisions} #'@docType data #'@keywords datasets #'@name senate #'@usage data(senate) #'@format A data frame with 147 rows (votes) and 9 variables (parties). #'@author Jessica J. Bagnall #'@examples #'dim(senate) NULL
/scratch/gouwar.j/cran-all/cranData/BoltzMM/R/BoltzMM.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #' @importFrom Rcpp sourceCpp #' @useDynLib BoltzMM NULL #'Probability mass function of a fully-visible Boltzmann machine evaluated for an individual vector. #'@description Compute the probability of a string of n>1 binary spin variables (i.e. each element is -1 or 1) arising from a fully-visible Boltzmann machine with some specified bias vector and interaction matrix. #'@param xval Vector of length n containing binary spin variables. #'@param bvec Vector of length n containing real valued bias parameters. #'@param Mmat Symmetric n by n matrix, with zeros along the diagonal, containing the interaction parameters. #'@return The probability of the random string \code{xval} under a fully-visible Boltzmann machine with bias vector \code{bvec} and interaction matrix \code{Mmat}. #'@references H.D. Nguyen and I.A. Wood (2016), Asymptotic normality of the maximum pseudolikelihood estimator for fully-visible Boltzmann machines, IEEE Transactions on Neural Networks and Learning Systems, vol. 27, pp. 897-902. #'@author Andrew T. Jones and Hien D. Nguyen #'@examples # Compute the probability of the vector xval=(-1,1,-1), under bvec and Mmat. #'xval <- c(-1,1,-1) #'bvec <- c(0,0.5,0.25) #'Mmat <- matrix(0.1,3,3) - diag(0.1,3,3) #'pfvbm(xval,bvec,Mmat) #'@export pfvbm <- function(xval, bvec, Mmat) { .Call('_BoltzMM_pfvbm', PACKAGE = 'BoltzMM', xval, bvec, Mmat) } #'Probability mass function of a fully-visible Boltzmann machine evaluated for all possible vectors. #'@description Compute the probability of all 2^n strings of n>1 binary spin variables (i.e. each element is -1 or 1) arising from a fully-visible Boltzmann machine with some specified bias vector and interaction matrix. #'@param bvec Vector of length n containing real valued bias parameters. #'@param Mmat Symmetric n by n matrix, with zeros along the diagonal, containing the interaction parameters. #'@return A vector of the probabilities of all 2^n binary spin vectors under a fully-visible Boltzmann machine with bias vector \code{bvec} and interaction matrix \code{Mmat}. Probabilities are reported in ascending order of the binary strings; i.e for n=2 the reporting order is (-1,1), (-1,1), (1,-1), and (1,1). #'@references H.D. Nguyen and I.A. Wood (2016), Asymptotic normality of the maximum pseudolikelihood estimator for fully-visible Boltzmann machines, IEEE Transactions on Neural Networks and Learning Systems, vol. 27, pp. 897-902. #'@author Andrew T. Jones and Hien D. Nguyen #'@examples # Compute the probability of every length n=3 binary spin vector under bvec and Mmat. #'bvec <- c(0,0.5,0.25) #'Mmat <- matrix(0.1,3,3) - diag(0.1,3,3) #'allpfvbm(bvec,Mmat) #'@export allpfvbm <- function(bvec, Mmat) { .Call('_BoltzMM_allpfvbm', PACKAGE = 'BoltzMM', bvec, Mmat) } #'Random data generation from a fully-visible Boltzmann machine. #'@description Generate N random strings of n>1 binary spin variables (i.e. each element is -1 or 1) arising from a fully-visible Boltzmann machine with some specified bias vector and interaction matrix. #'@param num Number N of random strings to be generated. #'@param bvec Vector of length n containing real valued bias parameters. #'@param Mmat Symmetric n by n matrix, with zeros along the diagonal, containing the interaction parameters. #'@return An N by n matrix, where each row contains a random spin variable string from a fully-visible Boltzmann machine with bias vector \code{bvec} and interaction matrix \code{Mmat}. #'@note The function \code{allpfvbm} must be called each time this function is run. Thus, it is much more efficient to generate N strings all at once, than to generate strings one at a time. #'@references H.D. Nguyen and I.A. Wood (2016), Asymptotic normality of the maximum pseudolikelihood estimator for fully-visible Boltzmann machines, IEEE Transactions on Neural Networks and Learning Systems, vol. 27, pp. 897-902. #'@author Andrew T. Jones and Hien D. Nguyen #'@examples # Generate num=10 random strings of n=3 binary spin variables under bvec and Mmat. #'num <- 10 #'bvec <- c(0,0.5,0.25) #'Mmat <- matrix(0.1,3,3) - diag(0.1,3,3) #'rfvbm(num,bvec,Mmat) #'@export rfvbm <- function(num, bvec, Mmat) { .Call('_BoltzMM_rfvbm', PACKAGE = 'BoltzMM', num, bvec, Mmat) } #'Maximum pseudolikelihood estimation of a fully-visible Boltzmann machine. #'@description Estimates the bias vector and interaction matrix of a fully-visible Boltzmann machine via maximum pseudolikelihood estimation using an MM algorithm. #'@param data An N by n matrix, where each of the N rows contains a length n string of spin variables (i.e. each element is -1 or 1). #'@param bvec Initial estimate for a vector of length n containing real valued bias parameters. #'@param Mmat Initial estimate for a symmetric n by n matrix, with zeros along the diagonal, containing the interaction parameters. #'@param delta_crit Real threshold value for the convergence criterion, based on the relative change in the Euclidean distance of parameter estimates from consecutive iterations. #'@param max_it Integer value indicating the maximum number of iterations that the algorithm is to run for. #'@return A list containing 4 objects: the final log-pseudolikelihood value \code{pll}, a vector containing the estimate of the bias parameters \code{bvec}, a matrix containing the estimate of the interaction parameters \code{Mmat}, and the number of algorithm iterations \code{itt}. #'@references H.D. Nguyen and I.A. Wood (2016), A block successive lower-bound maximization algorithm for the maximum pseudolikelihood estimation of fully visible Boltzmann machines, Neural Computation, vol 28, pp. 485-492 #'@author Andrew T. Jones and Hien D. Nguyen #'@examples # Generate num=1000 random strings of n=3 binary spin variables under bvec and Mmat. #'num <- 1000 #'bvec <- c(0,0.5,0.25) #'Mmat <- matrix(0.1,3,3) - diag(0.1,3,3) #'data <- rfvbm(num,bvec,Mmat) #'# Fit a fully visible Boltzmann machine to data, starting from parameters bvec and Mmat. #'fitfvbm(data,bvec,Mmat) #'@export fitfvbm <- function(data, bvec, Mmat, delta_crit = 0.001, max_it = 1000L) { .Call('_BoltzMM_fitfvbm', PACKAGE = 'BoltzMM', data, bvec, Mmat, delta_crit, max_it) } #'Partial derivatives of the log-pseudolikelihood function for a fitted fully-visible Boltzmann machine. #'@description Computes the partial derivatives for all unique parameter elements of the bias vector and interaction matrix of a fully-visible Boltzmann machine, for some random length n string of spin variables (i.e. each element is -1 or 1) and some fitted parameter values. #'@param data Vector of length n containing binary spin variables. #'@param model List generated from \code{fitfvbm}. #'@return A list containing 2 objects: a vector containing the partial derivatives corresponding to the bias parameters \code{bvec}, and a matrix containing the partial derivatives corresponding to the interaction parameters \code{Mmat}. #'@references H.D. Nguyen and I.A. Wood (2016), Asymptotic normality of the maximum pseudolikelihood estimator for fully-visible Boltzmann machines, IEEE Transactions on Neural Networks and Learning Systems, vol. 27, pp. 897-902. #'@author Andrew T. Jones and Hien D. Nguyen #'@examples # Generate num=1000 random strings of n=3 binary spin variables under bvec and Mmat. #'num <- 1000 #'bvec <- c(0,0.5,0.25) #'Mmat <- matrix(0.1,3,3) - diag(0.1,3,3) #'data <- rfvbm(num,bvec,Mmat) #'# Fit a fully visible Boltzmann machine to data, starting from parameters bvec and Mmat. #'model <- fitfvbm(data,bvec,Mmat) #'# Compute the partial derivatives evaluated at the first observation of data. #'fvbmpartiald(data,model) #'@export fvbmpartiald <- function(data, model) { .Call('_BoltzMM_fvbmpartiald', PACKAGE = 'BoltzMM', data, model) } #'Sandwich estimator of the covariance matrix for a fitted fully-visible Boltzmann machine. #'@description Computes the sandwich estimator of the covariance matrix for a maximum pseudolikelihood estimated fully-visible Boltzmann machine. #'@param data An N by n matrix, where each of the N rows contains a length n string of spin variables (i.e. each element is -1 or 1). #'@param model List generated from \code{fitfvbm}. #'@param fvbmHess A function that computes the Hessian of the parameter elements. Currently, the only implemented method is the default \code{fvbmHess} function. #'@return The n+choose(n,2) by n+choose(n,2) sandwich covariance matrix, estimated using \code{data} and evaluated at the fitted parameter values provided in \code{model}. Each row (column) is a unique element of the bias vector and interaction matrix. The rows are arranged in lexicographical order with the bias elements first, followed by the interaction elements. For example, if n=3, the order would be bias[1], bias[2] bias[3], interaction[1,2], interaction[1,3], and interaction[2,3]. #'@references H.D. Nguyen and I.A. Wood (2016), Asymptotic normality of the maximum pseudolikelihood estimator for fully-visible Boltzmann machines, IEEE Transactions on Neural Networks and Learning Systems, vol. 27, pp. 897-902. #'@author Andrew T. Jones and Hien D. Nguyen #'@examples # Generate num=1000 random strings of n=3 binary spin variables under bvec and Mmat. #'num <- 1000 #'bvec <- c(0,0.5,0.25) #'Mmat <- matrix(0.1,3,3) - diag(0.1,3,3) #'data <- rfvbm(num,bvec,Mmat) #'# Fit a fully visible Boltzmann machine to data, starting from parameters bvec and Mmat. #'model <- fitfvbm(data,bvec,Mmat) #'# Compute the sandwich covariance matrix using the data and the model. #'fvbmcov(data,model,fvbmHess) #'@export fvbmcov <- function(data, model, fvbmHess) { .Call('_BoltzMM_fvbmcov', PACKAGE = 'BoltzMM', data, model, fvbmHess) }
/scratch/gouwar.j/cran-all/cranData/BoltzMM/R/RcppExports.R
Bon_EV <- function(pvalue, alpha) { new_MTP_adjp <- rep(length(pvalue)) BH <- p.adjust(pvalue, "BH") qobj <- qvalue(p = pvalue) qvalues <- qobj$qvalues ngene <- length(pvalue) for (i in 1: ngene) { adjpv <- ngene*(pi0est(pvalue)$pi0)/sum(BH <= alpha, na.rm=TRUE)*pvalue new_MTP_adjp[i] <- min(adjpv[i], 1) } mylist <- list(raw_P_value = pvalue, BH_adjp = BH, Storey_adjp = qvalues, Bon_EV_adjp = new_MTP_adjp) return(mylist) }
/scratch/gouwar.j/cran-all/cranData/BonEV/R/Bon_EV.R
### R code from vignette source 'BonEV.Rnw' ################################################### ### code chunk number 1: BonEV.Rnw:40-41 (eval = FALSE) ################################################### ## install.packages("BonEV") ################################################### ### code chunk number 2: BonEV.Rnw:47-48 (eval = FALSE) ################################################### ## library(BonEV) ################################################### ### code chunk number 3: BonEV.Rnw:59-75 ################################################### library(qvalue) data(hedenfalk) library(BonEV) pvalues <- hedenfalk$p adjp <- Bon_EV(pvalues, 0.05) summary(adjp) results <- cbind(adjp$raw_P_value, adjp$BH_adjp, adjp$Storey_adjp, adjp$Bon_EV_adjp) colnames(results) <- c("raw_P_value", "BH_adjp", "Storey_adjp", "Bon_EV_adjp") results[1:20,] summary(results) ##Compare with Benjami-Hochberg and Storey's q-value procedures sum(adjp$raw_P_value <= 0.05) sum(adjp$BH_adjp <= 0.05) sum(adjp$Storey_adjp <= 0.05) sum(adjp$Bon_EV_adjp <= 0.05)
/scratch/gouwar.j/cran-all/cranData/BonEV/inst/doc/BonEV.R
#' AccrInt (calculation of accrued interest) #' #' \bold{AccrInt} returns the amount of interest accrued from some starting date #' up to some end date and the number of days of interest on the end date. #' #' \tabular{cl}{ #' \code{DCC} \tab required input \cr #' _____________________ \tab _____________________________________________ \cr #' 1,3,5,6,8,10,11,12,15,16 \tab \code{StartDate}, \code{EndDate}, #' \code{Coup}, \code{DCC}, \code{RV} \cr #' 2,14 \tab \code{StartDate}, \code{EndDate}, #' \code{Coup}, \code{DCC}, \code{RV}, #' \code{CpY}, \code{EOM} \cr #' 4 \tab \code{StartDate}, \code{EndDate}, #' \code{Coup}, \code{DCC}, \code{RV}, #' \code{CpY}, \code{EOM}, #' \code{YearNCP} \cr #' 7 \tab \code{StartDate}, \code{EndDate}, #' \code{Coup}, \code{DCC}, \code{RV}, #' \code{Mat} \cr #' 9,13 \tab \code{StartDate}, \code{EndDate}, #' \code{Coup}, \code{DCC}, \code{RV}, #' \code{EOM} \cr #' =================== \tab ======================================== \cr #' } #' #' Assuming that there is no accrued interest on \code{StartDate} the function #' \bold{AccrInt} computes the amount of interest accrued up to \code{EndDate} #' under the terms of the specified day count convention \code{DCC}. The function #' returns a list of two numerics \code{AccrInt}, and \code{DaysAccrued}. #' If \code{InputCheck = 1} the input variables are checked for the correct #' format. The core feature of this function is the proper handling of the #' \emph{\bold{day count conventions}} presented below. The type of the day #' count convention determines the amount of the accrued interest that has #' to be paid by the buyer in the secondary market if the settlement #' takes place between two coupon payment dates. #' #' \itemize{ #' \item Many different day count conventions are used in the market. #' Since there is no central authority that develops these conventions #' there is no standardized nomenclature. The tables below provide #' alternative names that often are used for the respective conventions. #' Type \code{View(List.DCC)} for a list of the day count methods #' currently implemented. #' \item Detailed descriptions of the conventions and their application may #' be found in Djatschenko (2018), and the other provided references. #' } #' #' \bold{Day Count Conventions} #' #' \describe{ #' \item{-}{ #' \tabular{cccl}{ #' \tab \tab \tab \bold{Actual/Actual (ISDA)} \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' DCC \tab | \tab = \tab 1 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' other names \tab | \tab \tab Actual/Actual, Act/Act, Act/Act (ISDA) \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' references \tab | \tab \tab ISDA (1998); ISDA (2006) section 4.16 (b) \cr #' ==========\tab | \tab === \tab =========================================== \cr #' } #' } #' #' \item{-}{ #' \tabular{cccl}{ #' \tab \tab \tab \bold{Actual/Actual (ICMA)} \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' DCC \tab | \tab = \tab 2 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' other names \tab | \tab \tab Actual/Actual (ISMA), Act/Act (ISMA), \cr #' \tab | \tab \tab Act/Act (ICMA), ISMA-99 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' references \tab | \tab \tab ICMA Rule 251; ISDA (2006) section 4.16 (c); \cr #' \tab | \tab \tab SWX (2003) \cr #' ==========\tab | \tab === \tab =========================================== \cr #' } #' } #' #' \item{-}{ #' \tabular{cccl}{ #' \tab \tab \tab \bold{Actual/Actual (AFB)} \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' DCC \tab | \tab = \tab 3 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' other names \tab | \tab \tab AFB Method, Actual/Actual (Euro), \cr #' \tab | \tab \tab Actual/Actual AFB FBF, ACT/365-366 (leap day) \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' references \tab | \tab \tab ISDA (1998); EBF (2004) \cr #' ==========\tab | \tab === \tab =========================================== \cr #' } #' } #' #' \item{-}{ #' \tabular{cccl}{ #' \tab \tab \tab \bold{Actual/365L} \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' DCC \tab | \tab = \tab 4 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' other names \tab | \tab \tab Act/365-366, ISMA-Year \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' references \tab | \tab \tab ICMA Rule 251; SWX (2003) \cr #' ==========\tab | \tab === \tab =========================================== \cr #' } #' } #' #' \item{-}{ #' \tabular{cccl}{ #' \tab \tab \tab \bold{30/360} \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' DCC \tab | \tab = \tab 5 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' other names \tab | \tab \tab 360/360, Bond Basis, 30/360 ISDA \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' references \tab | \tab \tab ISDA (2006) section 4.16 (f); \cr #' \tab | \tab \tab MSRB (2017) Rule G-33 \cr #' ==========\tab | \tab === \tab =========================================== \cr #' } #' } #' #' \item{-}{ #' \tabular{cccl}{ #' \tab \tab \tab \bold{30E/360} \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' DCC \tab | \tab = \tab 6 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' other names \tab | \tab \tab Eurobond Basis, Special German (30S/360), \cr #' \tab | \tab \tab ISMA-30/360 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' references \tab | \tab \tab ICMA Rule 251; ISDA (2006) section 4.16 (g); \cr #' \tab | \tab \tab SWX (2003) \cr #' ==========\tab | \tab === \tab =========================================== \cr #' } #' } #' #' \item{-}{ #' \tabular{cccl}{ #' \tab \tab \tab \bold{30E/360 (ISDA)} \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' DCC \tab | \tab = \tab 7 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' other names \tab | \tab \tab none \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' references \tab | \tab \tab ISDA (2006) section 4.16 (h) \cr #' ==========\tab | \tab === \tab =========================================== \cr #' } #' } #' #' \item{-}{ #' \tabular{cccl}{ #' \tab \tab \tab \bold{30/360 (German)} \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' DCC \tab | \tab = \tab 8 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' other names \tab | \tab \tab 360/360 (German Master); German (30/360) \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' references \tab | \tab \tab EBF (2004); SWX (2003) \cr #' ==========\tab | \tab === \tab =========================================== \cr #' } #' } #' #' \item{-}{ #' \tabular{cccl}{ #' \tab \tab \tab \bold{30/360 US} \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' DCC \tab | \tab = \tab 9 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' other names \tab | \tab \tab 30/360, US (30U/360), 30/360 (SIA) \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' references \tab | \tab \tab Mayle (1993); SWX (2003) \cr #' ==========\tab | \tab === \tab =========================================== \cr #' } #' } #' #' \item{-}{ #' \tabular{cccl}{ #' \tab \tab \tab \bold{Actual/365 (Fixed)} \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' DCC \tab | \tab = \tab 10 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' other names \tab | \tab \tab Act/365 (Fixed), A/365 (Fixed), A/365F, English \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' references \tab | \tab \tab ISDA (2006) section 4.16 (d); SWX (2003) \cr #' ==========\tab | \tab === \tab =========================================== \cr #' } #' } #' #' \item{-}{ #' \tabular{cccl}{ #' \tab \tab \tab \bold{Actual(NL)/365} \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' DCC \tab | \tab = \tab 11 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' other names \tab | \tab \tab Act(No Leap Year)/365 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' references \tab | \tab \tab Krgin (2002); Thomson Reuters EIKON \cr #' ==========\tab | \tab === \tab =========================================== \cr #' \tab \tab \tab \cr #' \tab \tab \tab \cr #' } #' } #' #' \item{-}{ #' \tabular{cccl}{ #' \tab \tab \tab \bold{Actual/360} \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' DCC \tab | \tab = \tab 12 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' other names \tab | \tab \tab Act/360, A/360, French \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' references \tab | \tab \tab ISDA (2006) section 4.16 (e); SWX (2003) \cr #' ==========\tab | \tab === \tab =========================================== \cr #' } #' } #' #' \item{-}{ #' \tabular{cccl}{ #' \tab \tab \tab \bold{30/365} \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' DCC \tab | \tab = \tab 13 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' references \tab | \tab \tab Krgin (2002); Thomson Reuters EIKON \cr #' ==========\tab | \tab === \tab =========================================== \cr #' } #' } #' #' \item{-}{ #' \tabular{cccl}{ #' \tab \tab \tab \bold{Act/365 (Canadian Bond)} \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' DCC \tab | \tab = \tab 14 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' references \tab | \tab \tab IIAC (2018); Thomson Reuters EIKON \cr #' ==========\tab | \tab === \tab =========================================== \cr #' } #' } #' #' \item{-}{ #' \tabular{cccl}{ #' \tab \tab \tab \bold{Act/364} \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' DCC \tab | \tab = \tab 15 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' references \tab | \tab \tab Thomson Reuters EIKON \cr #' ==========\tab | \tab === \tab =========================================== \cr #' } #' } #' #' \item{-}{ #' \tabular{cccl}{ #' \tab \tab \tab \bold{BusDay/252 (Brazilian)} \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' DCC \tab | \tab = \tab 16 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' other names \tab | \tab \tab BUS/252, BD/252 \cr #' ___________\tab | \tab ___ \tab ________________________________________________ \cr #' references \tab | \tab \tab Caputo Silva et al. (2010), \cr #' \tab | \tab \tab Itau Unibanco S.A. (2017) \cr #' ==========\tab | \tab === \tab =========================================== \cr #' } #' } #' #' } #' #' @param StartDate Calendar date on which interest accrual starts. Date class object with format "\%Y-\%m-\%d". (required) #' @param EndDate Calendar date up to which interest accrues. Date class object with format "\%Y-\%m-\%d". (required) #' @param Coup Nominal interest rate per year in percent. (required) #' @param DCC The day count convention for interest accrual. (required) #' @param RV The redemption value of the bond. Default: 100. #' @param CpY Number of interest payments per year (non-negative integer; element of the set #' \{1,2,3,4,6,12\}. Default: 2. #' @param Mat So-called "maturity date" i.e. date on which the redemption value and the final interest #' are paid. Date class object with format "\%Y-\%m-\%d". #' @param YearNCP Year figure of the next coupon payment date after \code{EndDate}. #' @param EOM Boolean indicating whether the bond follows the End-of-Month rule. #' @param DateOrigin Determines the starting point for the daycount in "Date" objects. #' Default: "1970-01-01". #' @param InputCheck If 1, the input variables are checked for the correct format. Default: 1. #' #' @return #' \describe{ #' \item{AccrInt}{ #' Accrued interest on \code{EndDate}, given the other characteristics. #' } #' \item{DaysAccrued}{ #' The number of days of interest from \code{StartDate} to \code{EndDate}. #' } #' } #' #' @references #' \enumerate{ #' \item{Banking Federation of the European Union (EBF), 2004, Master Agreement for Financial Transactions - Supplement to the Derivatives Annex - Interest Rate Transactions.} #' \item{Caputo Silva, Anderson, Lena Oliveira de Carvalho, and Octavio Ladeira de Medeiros, 2010, \emph{Public Debt: The Brazilian Experience} (National Treasury Secretariat and World Bank, Brasilia, BR).} #' \item{Djatschenko, Wadim, The Nitty Gritty of Bond Valuation: A Generalized Methodology for Fixed Coupon Bond Analysis Allowing for Irregular Periods and Various Day Count Conventions (November 5, 2018). Available at SSRN: https://ssrn.com/abstract=3205167.} #' \item{International Capital Market Association (ICMA), 2010, Rule 251 Accrued Interest Calculation - Excerpt from ICMA's Rules and Recommendations.} #' \item{Investment Industry Association of Canada (IIAC), 2018, Canadian Conventions in Fixed Income Markets - A Reference Document of Fixed Income Securities Formulas and Practices; Release: 1.3.} #' \item{International Swaps and Derivatives Association (ISDA), Inc., 1998, "EMU and Market Conventions: Recent Developments".} #' \item{International Swaps and Derivatives Association (ISDA), 2006, Inc., \emph{2006 ISDA Definitions.}, New York.} #' \item{Itau Unibanco S.A., 2017, Brazilian Sovereign Fixed Income and Foreign Exchange Markets - Handbook (First Edition).} #' \item{Krgin, Dragomir, 2002, The Handbook of Global Fixed Income Calculations. (Wiley, New York).} #' \item{Mayle, Jan, 1993, Standard Securities Calculation Methods: Fixed Income Securities Formulas for Price, Yield, and Accrued Interest, volume 1, New York: Securities Industry Association, third edition.} #' \item{Municipal Securities Rulemaking Board (MSRB), 2017, MSRB Rule Book, Washington, DC: Municipal Securities Rulemaking Board.} #' \item{SWX Swiss Exchange and D. Christie, 2003, "Accrued Interest & Yield Calculations and Determination of Holiday Calendars".} #' } #' #' @examples #' StartDate<-rep(as.Date("2011-08-31"),16) #' EndDate<-rep(as.Date("2012-02-29"),16) #' Coup<-rep(5.25,16) #' DCC<-seq(1,16) #' RV<-rep(10000,16) #' CpY<-rep(2,16) #' Mat<-rep(as.Date("2021-08-31"),16) #' YearNCP<-rep(2012,16) #' EOM<-rep(1,16) #' #' DCC_Comparison<-data.frame(StartDate,EndDate,Coup,DCC,RV,CpY,Mat,YearNCP,EOM) #' #' AccrIntOutput<-apply(DCC_Comparison[,c('StartDate','EndDate','Coup','DCC', #' 'RV','CpY','Mat','YearNCP','EOM')],1,function(y) AccrInt(y[1],y[2],y[3], #' y[4],y[5],y[6],y[7],y[8],y[9])) #' # warnings are due to apply's conversion of the variables' classes in #' # DCC_Comparison to class "character" #' Accrued_Interest<-do.call(rbind,lapply(AccrIntOutput, function(x) x[[1]])) #' Days_Accrued<-do.call(rbind,lapply(AccrIntOutput, function(x) x[[2]])) #' DCC_Comparison<-cbind(DCC_Comparison,Accrued_Interest,Days_Accrued) #' DCC_Comparison #' #' #' @export AccrInt<-function(StartDate=as.Date(NA),EndDate=as.Date(NA),Coup=as.numeric(NA),DCC=as.numeric(NA),RV=as.numeric(NA),CpY=as.numeric(NA),Mat=as.Date(NA),YearNCP=as.Date(NA),EOM=as.numeric(NA),DateOrigin=as.Date("1970-01-01"),InputCheck=1) { NAccr<-as.numeric(NA) AccrInt<-as.numeric(NA) if (InputCheck==1) { CheckedInput<-InputFormatCheck(StartDate=StartDate,EndDate=EndDate,Coup=Coup,DCC=DCC,RV=RV,CpY=CpY,Mat=Mat,YearNCP=YearNCP,EOM=EOM,DateOrigin=DateOrigin) StartDate<-CheckedInput$StartDate EndDate<-CheckedInput$EndDate Coup<-CheckedInput$Coup DCC<-CheckedInput$DCC RV<-CheckedInput$RV CpY<-CheckedInput$CpY Mat<-CheckedInput$Mat YearNCP<-CheckedInput$YearNCP EOM<-CheckedInput$EOM DateOrigin<-CheckedInput$DateOrigin } if ((missing(StartDate))|(is.na(StartDate))) { warning("The supplied StartDate is NA or cannot be processed. NA created!") } else { if ((missing(EndDate))|(is.na(EndDate))) { warning("The supplied EndDate is NA or cannot be processed. NA created!") } else { if (EndDate<StartDate) { warning("The supplied EndDate is prior to the supplied StartDate. NA created!") } else { if ((missing(Coup))|(is.na(Coup))) { warning("The supplied interest rate p.a. (Coup) is NA or cannot be processed. NA created!") } else { # If DCC is not provided or NA or not element of {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}, the following code sets it 2 (Act/Act (ICMA)). if ((missing(DCC))|(is.na(DCC))) { DCC<-2 warning("The day count indentifier (DCC) is missing or NA. DCC is set 2 (Act/Act (ICMA))!") } else { if (!(is.element(DCC,c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)))) { DCC<-2 warning("The day count indentifier (DCC) is not element of {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}! DCC is set 2 (Act/Act (ICMA))!") } } if ((DCC==7)&((missing(Mat))|(is.na(Mat)))) { warning("Maturity date (Mat) is missing or NA. Accrued interest computation for the specified day count convention 30E/360 (ISDA) requires a valid Mat value. NA created!") } else { if ((DCC==9)&((missing(EOM))|(is.na(EOM)))) { warning("End-of-Month-Rule identifier (EOM) is missing or NA. Accrued interest computation for the specified day count convention 30/360 US requires a valid EOM value. NA created!") } else { if ((DCC==13)&((missing(EOM))|(is.na(EOM)))) { warning("End-of-Month-Rule identifier (EOM) is missing or NA. Accrued interest computation for the specified day count convention 30/365 requires a valid EOM value. NA created!") } else { if ((DCC==2)&((missing(EOM))|(is.na(EOM)))) { warning("End-of-Month-Rule identifier (EOM) is missing or NA. Accrued interest computation for the specified day count convention Act/Act (ICMA) requires a valid EOM value. NA created!") } else { if ((DCC==14)&((missing(EOM))|(is.na(EOM)))) { warning("End-of-Month-Rule identifier (EOM) is missing or NA. Accrued interest computation for the specified day count convention Act/365 (Canadian Bond) requires a valid EOM value. NA created!") } else { if ((DCC==4)&(CpY!=1)&((missing(YearNCP))|(is.na(YearNCP)))) { warning(paste0("Year figure of the next coupon payment date after EndDate (YearNCP) is missing or NA. Accrued interest computation for the specified day count convention Act/365L with ",CpY, " requires a valid YearNCP value. NA created!")) } else { if ((DCC==2)&((missing(CpY))|(is.na(CpY)))) { CpY<-2 warning("Number of interest payments per year (CpY) is missing or NA. Accrued interest computation for the specified day count convention Act/Act (ICMA) requires a valid CpY value. CpY is set 2!") } else { if ((DCC==2)&(!(is.element(CpY,c(1,2,3,4,6,12))))) { CpY<-2 warning("Number of interest payments per year (CpY) is not element of {1,2,3,4,6,12}. Accrued interest computation for the specified day count convention Act/Act (ICMA) requires a valid CpY value. CpY is set 2!") } } if ((DCC==4)&((missing(CpY))|(is.na(CpY)))) { CpY<-2 warning("Number of interest payments per year (CpY) is missing or NA. Accrued interest computation for the specified day count convention Act/365L requires a valid CpY value. CpY is set 2!") } else { if ((DCC==4)&(!(is.element(CpY,c(1,2,3,4,6,12))))) { CpY<-2 warning("Number of interest payments per year (CpY) is not element of {1,2,3,4,6,12}. Accrued interest computation for the specified day count convention Act/365L requires a valid CpY value. CpY is set 2!") } } if ((DCC==14)&((missing(CpY))|(is.na(CpY)))) { CpY<-2 warning("Number of interest payments per year (CpY) is missing or NA. Accrued interest computation for the specified day count convention Act/365 (Canadian Bond) requires a valid CpY value. CpY is set 2!") } else { if ((DCC==14)&(!(is.element(CpY,c(1,2,3,4,6,12))))) { CpY<-2 warning("Number of interest payments per year (CpY) is not element of {1,2,3,4,6,12}. Accrued interest computation for the specified day count convention Act/365 (Canadian Bond) requires a valid CpY value. CpY is set 2!") } } if (DCC==16) { if ((missing(CpY))|(is.na(CpY))) { CpY<-2 warning("Number of interest payments per year (CpY) is missing or NA. Accrued interest computation for the specified day count convention BusDay/252 (Brazilian) requires a valid CpY value. CpY is set 2!") } else { if (!(is.element(CpY,c(1,2,3,4,6,12)))) { CpY<-2 warning("Number of interest payments per year (CpY) is not element of {1,2,3,4,6,12}. Accrued interest computation for the specified day count convention BusDay/252 (Brazilian) requires a valid CpY value. CpY is set 2!") } } } if ((missing(RV))|(is.na(RV))) { RV<-100 warning("Redemption value (RV) is missing or NA. RV is set 100!") } if (is.element(DCC,c(1,3,5,6,8,10,11,12,15))) { # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2) Atoms_StartDate<-as.numeric(unlist(strsplit(as.character(StartDate),split = "-"))) Atoms_EndDate<-as.numeric(unlist(strsplit(as.character(EndDate),split = "-"))) DIST_Output<-DIST(c(DCC,Atoms_StartDate,Atoms_EndDate)) NAccr<-DIST_Output[1] AccrInt<-RV*(Coup/100)*DIST_Output[2] } else { if (DCC==16) { # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,NonBus) NonBus.Start.End<-length(which((NonBusDays.Brazil$Date>=StartDate)&(NonBusDays.Brazil$Date<EndDate))) Atoms_StartDate<-as.numeric(unlist(strsplit(as.character(StartDate),split = "-"))) Atoms_EndDate<-as.numeric(unlist(strsplit(as.character(EndDate),split = "-"))) DIST_Output<-DIST(c(DCC,Atoms_StartDate,Atoms_EndDate,NonBus.Start.End)) NAccr<-DIST_Output[1] AccrInt<-RV*(((1+Coup/100)^(DIST_Output[2]))-1) } else { if (DCC==4) { # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,YearNCP,CpY) Atoms_StartDate<-as.numeric(unlist(strsplit(as.character(StartDate),split = "-"))) Atoms_EndDate<-as.numeric(unlist(strsplit(as.character(EndDate),split = "-"))) DIST_Output<-DIST(c(DCC,Atoms_StartDate,Atoms_EndDate,YearNCP,CpY)) NAccr<-DIST_Output[1] AccrInt<-RV*(Coup/100)*DIST_Output[2] } else { if (DCC==7) { # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,YMat,MMat,DMat) Atoms_StartDate<-as.numeric(unlist(strsplit(as.character(StartDate),split = "-"))) Atoms_EndDate<-as.numeric(unlist(strsplit(as.character(EndDate),split = "-"))) Atoms_Mat<-as.numeric(unlist(strsplit(as.character(Mat),split = "-"))) DIST_Output<-DIST(c(DCC,Atoms_StartDate,Atoms_EndDate,Atoms_Mat)) NAccr<-DIST_Output[1] AccrInt<-RV*(Coup/100)*DIST_Output[2] } else { if (is.element(DCC,c(9,13))) { # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,EOM) Atoms_StartDate<-as.numeric(unlist(strsplit(as.character(StartDate),split = "-"))) Atoms_EndDate<-as.numeric(unlist(strsplit(as.character(EndDate),split = "-"))) DIST_Output<-DIST(c(DCC,Atoms_StartDate,Atoms_EndDate,EOM)) NAccr<-DIST_Output[1] AccrInt<-RV*(Coup/100)*DIST_Output[2] } else { if (DCC==2|DCC==14) { if (CpY==1) {N_months<-12} if (CpY==2) {N_months<-6} if (CpY==3) {N_months<-4} if (CpY==4) {N_months<-3} if (CpY==6) {N_months<-2} if (CpY==12) {N_months<-1} AtomVector_StartDate<-as.numeric(unlist(strsplit(as.character(StartDate),split = "-"))) Atom1StartDate<-AtomVector_StartDate[1] Atom2StartDate<-AtomVector_StartDate[2] Atom3StartDate<-AtomVector_StartDate[3] StartDateHelp<-as.Date(paste(Atom1StartDate,Atom2StartDate,15,sep="-")) AtomVector_EndDate<-as.numeric(unlist(strsplit(as.character(EndDate),split = "-"))) Atom1EndDate<-AtomVector_EndDate[1] Atom2EndDate<-AtomVector_EndDate[2] Atom3EndDate<-AtomVector_EndDate[3] LDM_EndDate<-as.numeric(Date_LDM(c(Atom1EndDate,Atom2EndDate,Atom3EndDate))) LDM_EndDate<-as.Date(paste(LDM_EndDate[1],LDM_EndDate[2],LDM_EndDate[3],sep="-")) AnnivDates<-seq(StartDateHelp,LDM_EndDate,by=paste(N_months," months",sep="")) # assigning the reference date that determines the day figures of all AnnivDates Atom1Refer<-Atom1StartDate Atom2Refer<-Atom2StartDate Atom3Refer<-Atom3StartDate if (EOM==1) { AnnivDates<-as.Date(timeLastDayInMonth(AnnivDates)) } else { AnnivDates_A<-as.Date(ISOdatetime(t(atoms(as.timeDate(AnnivDates))[1]),t(atoms(as.timeDate(AnnivDates))[2]),Atom3Refer,12,0,0)) if (length(which(is.na(AnnivDates_A)))!=0) { # Assuming %d of LIPD as the %d of all AnnivDates produced NAs. NAs are substituted by the respective last day in month. nas<-which(is.na(AnnivDates_A)) AnnivDates_B<-as.Date(timeLastDayInMonth(AnnivDates[nas])) AnnivDates<-sort(na.omit(append(AnnivDates_A,AnnivDates_B))) } else { AnnivDates<-AnnivDates_A } } AnnivDates<-sort(AnnivDates[!duplicated(AnnivDates)]) # creating the anniversary date preceding AD1 AtomVector_AD1<-as.numeric(unlist(strsplit(as.character(AnnivDates[1]),split = "-"))) Atom1AD1<-AtomVector_AD1[1] Atom2AD1<-AtomVector_AD1[2] Atom3AD1<-AtomVector_AD1[3] PrevDate<-as.numeric(CppPrevDate(c(Atom1AD1,Atom2AD1,Atom3AD1,Atom1AD1,Atom2AD1,Atom3AD1,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) PrevDate<-as.Date(paste(PrevDate[1],PrevDate[2],PrevDate[3],sep="-")) AnnivDates<-c(PrevDate,AnnivDates) AnnivDates<-sort(na.omit(AnnivDates[!duplicated(AnnivDates)])) # creating the anniversary date succeeding ADfin AtomVector_ADfin<-as.numeric(unlist(strsplit(as.character(AnnivDates[length(AnnivDates)]),split = "-"))) Atom1ADfin<-AtomVector_ADfin[1] Atom2ADfin<-AtomVector_ADfin[2] Atom3ADfin<-AtomVector_ADfin[3] SuccDate<-as.numeric(CppSuccDate(c(Atom1ADfin,Atom2ADfin,Atom3ADfin,Atom1ADfin,Atom2ADfin,Atom3ADfin,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) SuccDate<-as.Date(paste(SuccDate[1],SuccDate[2],SuccDate[3],sep="-")) AnnivDates<-c(AnnivDates,SuccDate) AnnivDates<-sort(na.omit(AnnivDates[!duplicated(AnnivDates)])) # DIST: for DCC = 2 x is a vector of 22 integers: # c(DCC,Y1,M1,D1,Y2,M2,D2,Y3,M3,D3,Y4,M4,D4,Y5,M5,D5,Y6,M6,D6,P,N,CpY) with # Y1-M1-D1 = PCD(t_a,AD) ; Y2-M2-D2 = t_a ; Y3-M3-D3 = NCD(t_a,AD) # Y4-M4-D4 = PCD(t_b,AD) ; Y5-M5-D5 = t_b ; Y6-M6-D6 = NCD(t_b,AD) # P = P(t_b,AD) ; N = N(t_a,AD) PCD_StartDate<-PCD(StartDate,AnnivDates) Atoms_PCD_StartDate<-as.numeric(unlist(strsplit(as.character(PCD_StartDate),split = "-"))) # Atom1_PCD_StartDate<-Atoms_PCD_StartDate[1] # Atom2_PCD_StartDate<-Atoms_PCD_StartDate[2] # Atom3_PCD_StartDate<-Atoms_PCD_StartDate[3] NCD_StartDate<-NCD(StartDate,AnnivDates) Atoms_NCD_StartDate<-as.numeric(unlist(strsplit(as.character(NCD_StartDate),split = "-"))) # Atom1_NCD_StartDate<-Atoms_NCD_StartDate[1] # Atom2_NCD_StartDate<-Atoms_NCD_StartDate[2] # Atom3_NCD_StartDate<-Atoms_NCD_StartDate[3] PCD_EndDate<-PCD(EndDate,AnnivDates) Atoms_PCD_EndDate<-as.numeric(unlist(strsplit(as.character(PCD_EndDate),split = "-"))) # Atom1_PCD_EndDate<-Atoms_PCD_EndDate[1] # Atom2_PCD_EndDate<-Atoms_PCD_EndDate[2] # Atom3_PCD_EndDate<-Atoms_PCD_EndDate[3] NCD_EndDate<-NCD(EndDate,AnnivDates) Atoms_NCD_EndDate<-as.numeric(unlist(strsplit(as.character(NCD_EndDate),split = "-"))) # Atom1_NCD_EndDate<-Atoms_NCD_EndDate[1] # Atom2_NCD_EndDate<-Atoms_NCD_EndDate[2] # Atom3_NCD_EndDate<-Atoms_NCD_EndDate[3] AD_indexes<-c(1:length(AnnivDates))-1 AD_List<-list(AnnivDates,AD_indexes) N_StartDate<-AD_List[[2]][which(AD_List[[1]]==NCD_StartDate)] P_EndDate<-AD_List[[2]][which(AD_List[[1]]==PCD_EndDate)] DIST_Output<-DIST(c(DCC,Atoms_PCD_StartDate,AtomVector_StartDate,Atoms_NCD_StartDate, Atoms_PCD_EndDate,AtomVector_EndDate,Atoms_NCD_EndDate,P_EndDate,N_StartDate,CpY)) NAccr<-DIST_Output[1] AccrInt<-RV*(Coup/100)*DIST_Output[2] } } } } } } } } } } } } } } } } Output<-list(AccrInt=AccrInt,DaysAccrued=NAccr) return(Output) }
/scratch/gouwar.j/cran-all/cranData/BondValuation/R/AccrInt.R
#' AnnivDates (time-invariant properties and temporal structure) #' #' \bold{AnnivDates} returns a bond's time-invariant characteristics and temporal structure as a list of #' three or four named data frames. #' #' \bold{AnnivDates} generates a list of the three data frames \code{Warnings}, \code{Traits} #' and \code{DateVectors}. If the variable \code{Coup} is passed to the function, #' the output contains additionally the data frame \code{PaySched}. \bold{AnnivDates} is meant to analyze #' large data frames. Therefore some features are implemented to evaluate the quality of the data. The #' output of these features is stored in the data frame \code{Warnings}. Please see section \bold{Value} #' for a detailed description of the tests run and the meaning of the variables in \code{Warnings}. The #' data frame \code{Traits} contains all time-invariant bond characteristics that were either provided by #' the user or calculated by the function. The data frame \code{DateVectors} contains three vectors #' of Date-Objects named \code{RealDates}, \code{CoupDates} and \code{AnnivDates} and three vectors of #' numerics named \code{RD_indexes}, \code{CD_indexes} and \code{AD_indexes}. These vectors are #' used in the other functions of this package according to the methodology presented in Djatschenko (2018). #' The data frame \code{PaySched} matches \code{CoupDates} #' to the actual amount of interest that the bond pays on the respective interest payment date. Section #' \bold{Value} provides further information on the output of the function \bold{AnnivDates}. Below #' information on the proper input format is provided. Subsequently follows information on the operating #' principle of the function \bold{AnnivDates} and on the assumptions that are met to #' estimate the points in time needed to evaluate a bond. #' #' \itemize{ #' \item The dates \code{Em}, \code{Mat}, \code{FIPD}, \code{LIPD} and \code{FIAD} can be provided as #' \enumerate{ #' \item "Date" with format \code{"\%Y-\%m-\%d"}, or #' \item "numeric" with the appropriate \code{DateOrigin}, or #' \item number of class "character" with the appropriate \code{DateOrigin}, or #' \item string of class "character" in the format \code{"yyyy-mm-dd"}. #' } #' \code{CpY}, \code{RV} and \code{Coup} can be provided either as class "numeric" or as a number of #' class "character". #' #' \item The provided issue date (\code{Em}) is instantly substituted by the first interest accrual #' date (\code{FIAD}) if \code{FIAD} is available and different from \code{Em}. #' #' \item Before the determination of the bond's date characteristics begins, the code evaluates #' the provided calendar dates for plausibility. In this process implausible dates are dropped. #' The sort of corresponding implausibility is identified and stored in a warning flag. (See #' section \bold{Value} for details.) #' #' \item The remaining valid calendar dates are used to gauge whether the bond follows the #' End-of-Month-Rule. The resulting parameter est_EOM can take on the following values: #' \describe{ #' \item{-}{ #' \tabular{cl}{ #' \bold{\emph{Case 1:}} \tab \bold{\code{FIPD} and \code{LIPD} are both \code{NA}} \cr #' ___________ \tab ____________________________________ \cr #' \code{est_EOM = 1} \tab , if \code{Mat} is the last day of a month. \cr #' \code{est_EOM = 0} \tab , else. \cr #' ========== \tab ================================ \cr #' } #' } #' \item{-}{ #' \tabular{cl}{ #' \bold{\emph{Case 2:}} \tab \bold{\code{FIPD} is \code{NA} and \code{LIPD} is a valid calendar date} \cr #' ___________ \tab ____________________________________ \cr #' \code{est_EOM = 1} \tab , if \code{LIPD} is the last day of a month. \cr #' \code{est_EOM = 0} \tab , else. \cr #' ========== \tab ================================ \cr #' } #' } #' \item{-}{ #' \tabular{cl}{ #' \bold{\emph{Case 3:}} \tab \bold{\code{FIPD} is a valid calendar date and \code{LIPD} is \code{NA}} \cr #' ___________ \tab ____________________________________ \cr #' \code{est_EOM = 1} \tab , if \code{FIPD} is the last day of a month. \cr #' \code{est_EOM = 0} \tab , else. \cr #' ========== \tab ================================ \cr #' } #' } #' \item{-}{ #' \tabular{cl}{ #' \bold{\emph{Case 4:}} \tab \bold{\code{FIPD} and \code{LIPD} are valid calendar dates} \cr #' ___________ \tab ____________________________________ \cr #' \code{est_EOM = 1} \tab , if \code{LIPD} is the last day of a month. \cr #' \code{est_EOM = 0} \tab , else. \cr #' ========== \tab ================================ \cr #' } #' } #' } #' \item If \code{EOM} is initially missing or \code{NA} or not element of \code{\{0,1\}}, \code{EOM} #' is set \code{est_EOM} with a warning. #' \item If the initially provided value of \code{EOM} deviates from \code{est_EOM}, the following two #' cases apply: #' \tabular{cl}{ #' ________ \tab _________________________________________ \cr #' Case 1: \tab If \code{EOM = 0} and \code{est_EOM = 1}: \cr #' \tab \code{EOM} is not overridden and remains \code{EOM = 0} \cr #' ________ \tab _________________________________________ \cr #' Case 2: \tab If \code{EOM = 1} and \code{est_EOM = 0}: \cr #' \tab \code{EOM} is overridden and set \code{EOM = 0} with a warning. \cr #' \tab Keeping \code{EOM = 1} in this case would conflict with \cr #' \tab the provided \code{Mat}, \code{FIPD} or \code{LIPD}. \cr #' ________ \tab _________________________________________ \cr #' Note: \tab Set the option \code{FindEOM=TRUE} to always use \cr #' \tab \code{est_EOM} found by the code. \cr #' ======= \tab ==================================== \cr #' } #' #' \item If \code{FIPD} and \code{LIPD} are both available, the lengths of the first and final coupon #' periods are determinate and can be "regular", "long" or "short". To find the interest payment dates #' between \code{FIPD} and \code{LIPD} the following assumptions are met: #' \enumerate{ #' \item \preformatted{The interest payment dates between FIPD and LIPD are #' evenly distributed.} #' \item \preformatted{The value of EOM determines the location of #' all interest payment dates.} #' } #' If assumption 1 is violated, the exact locatations of the interest payment dates between #' \code{FIPD} and \code{LIPD} are ambiguous. The assumption is violated particularly, if #' \enumerate{ #' \item \code{FIPD} and \code{LIPD} are in the same month of the same year but not on the same day, or #' \item the month difference between \code{FIPD} and \code{LIPD} is not a multiple of the number #' of months implied by \code{CpY}, or #' \item \code{FIPD} and \code{LIPD} are not both last day in month, #' their day figures differ and the day figure difference between \code{FIPD} #' and \code{LIPD} is not due to different month lengths. #' } #' In each of the three cases, \code{FIPD} and \code{LIPD} are dropped #' with the flag \code{IPD_CpY_Corrupt = 1}. #' #' \item If neither \code{FIPD} nor \code{LIPD} are available the code #' evaluates the bond based only upon the required variables \code{Em} and #' \code{Mat} (and \code{CpY}, which is \code{2} by default). Since FIPD is #' not given, it is impossible to distinguish between a "short" and "long" odd #' first coupon period, without an assumption on the number of interest #' payment dates. Consequently the first coupon period is assumed to be either #' "regular" or "short". The locations of \code{FIPD} and \code{LIPD} are #' estimated under the following assumptions: #' \enumerate{ #' \item \preformatted{The final coupon period is "regular".} #' \item \preformatted{The interest payment dates between the estimated #' FIPD and Mat are evenly distributed.} #' \item \preformatted{The value of EOM determines the location of #' all interest payment dates.} #' } #' #' \item If \code{LIPD} is available but \code{FIPD} is not, the length #' of the final coupon payment period is determined by \code{LIPD} and #' \code{Mat} and can be "regular", "long" or "short". The locations of #' the interest payment dates are estimated under the following assumptions: #' \enumerate{ #' \item \preformatted{The first coupon period is either "regular" or "short".} #' \item \preformatted{The interest payment dates between the estimated #' FIPD and LIPD are evenly distributed.} #' \item \preformatted{The value of EOM determines the location of #' all interest payment dates.} #' } #' #' \item If \code{FIPD} is available but \code{LIPD} is not, the length #' of the first coupon payment period is determined by \code{Em} and #' \code{FIPD} and can be "regular", "long" or "short". The locations of #' the interest payment dates are estimated under the following assumptions: #' \enumerate{ #' \item \preformatted{The final coupon period is either "regular" or "short".} #' \item \preformatted{The interest payment dates between FIPD and #' the estimated LIPD are evenly distributed.} #' \item \preformatted{The value of EOM determines the location of #' all interest payment dates.} #' } #' } #' #' #' @param Em The bond's issue date. (required) #' @param Mat Maturity date, i.e. date on which the redemption value and the final interest #' are paid. (required) #' @param CpY Number of interest payments per year (non-negative integer; element of the set #' \{0,1,2,3,4,6,12\}. Default: 2. #' @param FIPD First interest payment date after \code{Em}. #' @param LIPD Last interest payment date prior to \code{Mat}. #' @param FIAD Date on which the interest accrual starts (so-called "dated date"). #' @param RV The redemption value of the bond. Default: 100. #' @param Coup Nominal interest rate per year in percent. Default: \code{NA}. #' @param DCC The day count convention the bond follows. Default: \code{NA}. #' For a list of day count conventions currently implemented type \code{View(List.DCC)}. #' @param EOM Boolean indicating whether the bond follows the End-of-Month rule. Default: \code{NA}. #' @param DateOrigin Determines the starting point for the daycount in "Date" objects. #' Default: "1970-01-01". #' @param InputCheck If 1, the input variables are checked for the correct format. Default: 1. #' @param FindEOM If \code{TRUE}, \code{EOM} is overridden by the value inferred from the data. #' Default: \code{FALSE}. #' @param RegCF.equal If 0, the amounts of regular cash flows are calculated according to the #' stipulated \code{DCC}. Any other value forces all regular cash flows to be equal sized. #' Default: 0. #' #' @return All dates are returned irrespective of whether they are on a business day or not. #' \describe{ #' \item{\emph{\bold{DateVectors}} (data frame)}{ #' \describe{ #' \item{-}{} #' \item{\emph{RealDates}}{A vector of Date class objects with format "\%Y-\%m-\%d" in ascending order, #' that contains the issue date, all actual coupon payment dates and the maturity date.} #' \item{\emph{RD_indexes}}{A vector of numerics capturing the temporal structure of the bond.} #' \item{\emph{CoupDates}}{A vector of Date class objects with format "\%Y-\%m-\%d" in ascending order, #' that contains all actual coupon payment dates and the maturity date.} #' \item{\emph{CD_indexes}}{A vector of numerics capturing the temporal structure of the bond.} #' \item{\emph{AnnivDates}}{A vector of Date class objects with format "\%Y-\%m-\%d" in ascending order, #' that contains all theoretical coupon anniversary dates. The first value of \emph{AnnivDates} is the #' anniversary date immediately preceding the issue date, if the bond has an irregular first coupon #' period; otherwise it is the issue date. The final value of \emph{AnnivDates} is the anniversary #' date immediately succeeding the maturity date, if the bond has an irregular final coupon period; #' otherwise it is the maturity date.} #' \item{\emph{AD_indexes}}{A vector of numerics capturing the temporal structure of the bond.} #' \item{-}{} #' } #' } #' \item{\emph{\bold{PaySched}} (data frame)}{ #' \describe{ #' \item{-}{} #' \item{\emph{CoupDates}}{A vector of Date class objects with format "\%Y-\%m-\%d" in ascending order, #' that contains all actual coupon payment dates and the maturity date.} #' \item{\emph{CoupPayments}}{A vector of class "numeric" objects, that contains the actual amounts of #' interest that the bond pays on the respective coupon payment dates. The unit of these payments is the #' same as that of \code{RV} that was passed to the function. \code{RV} is not included in the final #' interest payment.} #' \item{\bold{NOTE:}}{\code{PaySched} is created only if the variable \code{Coup} is provided.} #' \item{-}{} #' } #' } #' \item{\emph{\bold{Traits}} (data frame)}{ #' \describe{ #' \item{-}{} #' \item{\emph{DateOrigin}}{The starting point for the daycount in "Date" objects.} #' \item{\emph{CpY}}{Number of interest payments per year.} #' \item{\emph{FIAD}}{Date on which the interest accrual starts (so-called "dated date").} #' \item{\emph{Em}}{The bond's issue date that was used for calculations.} #' \item{\emph{Em_Orig}}{The bond's issue date that was entered.} #' \item{\emph{FIPD}}{The first interest payment date after \code{Em} that was used for calculations. #' If the entered \code{FIPD} was dropped during the calculation process, #' the value is \code{NA}.} #' \item{\emph{FIPD_Orig}}{The first interest payment date after \code{Em} that was entered.} #' \item{\emph{est_FIPD}}{The estimated first interest payment date after \code{Em}. \code{NA}, if #' a valid \code{FIPD} was entered.} #' \item{\emph{LIPD}}{The last interest payment date prior to \code{Mat} that was used for #' calculations. If the entered \code{LIPD} was dropped during the calculation #' process, the value is \code{NA}.} #' \item{\emph{LIPD_Orig}}{The last interest payment date prior to \code{Mat} that was entered.} #' \item{\emph{est_LIPD}}{The estimated last interest payment date prior to \code{Mat}. \code{NA}, #' if a valid \code{LIPD} was entered.} #' \item{\emph{Mat}}{The maturity date that was entered.} #' \item{\emph{Refer}}{Reference date that determines the day figures of all AnnivDates.} #' \item{\emph{FCPType}}{A character string indicating the type of the first coupon period. #' Values: "long", "regular", "short".} #' \item{\emph{FCPLength}}{Length of the first coupon period as a fraction of a regular coupon period.} #' \item{\emph{LCPType}}{A character string indicating the type of the last coupon period. #' Values: "long", "regular", "short".} #' \item{\emph{LCPLength}}{Length of the final coupon period as a fraction of a regular coupon period.} #' \item{\emph{Par}}{The redemption value of the bond.} #' \item{\emph{CouponInPercent.p.a}}{Nominal interest rate per year in percent.} #' \item{\emph{DayCountConvention}}{The day count convention the bond follows.} #' \item{\emph{EOM_Orig}}{The value of \code{EOM} that was entered.} #' \item{\emph{est_EOM}}{The estimated value of \code{EOM}.} #' \item{\emph{EOM_used}}{The value of \code{EOM} that was used in the calculations.} #' \item{-}{} #' } #' } #' \item{\emph{\bold{Warnings}} (data frame)}{ #' \describe{ #' \item{-}{A set of flags that indicate the occurrence of warnings during the execution. #' Below they are listed according to the hierarchical structure within the function \bold{AnnivDates}.} #' \item{-}{ #' \tabular{rcl}{ #' \tab \tab \cr #' \tab \tab \cr #' \tab \tab \cr #' \bold{\emph{Em_FIAD_differ}} = \tab \tab \cr #' 1 \tab \tab , if the provided issue date (\code{Em}) was substituted by the first \cr #' \tab \tab interest accrual date (\code{FIAD}). \cr #' \tab \tab This happens, if \code{FIAD} is available and different from \code{Em}. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Note:} No warning is displayed. \cr #' ___________________ \tab ___ \tab ________________________________________________ \cr #' 0 \tab \tab , else. \cr #' ================= \tab === \tab =========================================== \cr #' } #' } #' \item{-}{ #' \tabular{rcl}{ #' \bold{\emph{EmMatMissing}} = \tab \tab \cr #' 1 \tab \tab , if either issue date (\code{Em}) or maturity date (\code{Mat}) or both \cr #' \tab \tab are missing or \code{NA}. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Output:} \emph{RealDates} \code{= NA}, \emph{CoupDates} \code{= NA}, \cr #' \tab \tab \emph{AnnivDates} \code{= NA}, \emph{FCPType} \code{= NA}, \emph{LCPType} \code{= NA}. \cr #' ___________________ \tab ___ \tab ________________________________________________ \cr #' 0 \tab \tab , else. \cr #' ================= \tab === \tab =========================================== \cr #' } #' } #' \item{-}{ #' \tabular{rcl}{ #' \bold{\emph{CpYOverride}} = \tab \tab \cr #' 1 \tab \tab , if number of interest periods per year (\code{CpY}) is missing or \cr #' \tab \tab \code{NA}, or if the provided \code{CpY} is not element of \{0,1,2,3,4,6,12\}. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Note:} \code{CpY} is set 2, and the execution continues. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Output:} as if \code{CpY} = 2 was provided initially. \cr #' ___________________ \tab ___ \tab ________________________________________________ \cr #' 0 \tab \tab , else. \cr #' ================= \tab === \tab =========================================== \cr #' } #' } #' \item{-}{ #' \tabular{rcl}{ #' \bold{\emph{RV_set100percent}} = \tab \tab \cr #' 1 \tab \tab , if the redemption value (\code{RV}) is missing or \code{NA}. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Note:} \code{RV} is set 100, and the execution continues. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Output:} as if \code{RV} = 100 was provided initially. \cr #' ___________________ \tab ___ \tab ________________________________________________ \cr #' 0 \tab \tab , else. \cr #' ================= \tab === \tab =========================================== \cr #' } #' } #' \item{-}{ #' \tabular{rcl}{ #' \bold{\emph{NegLifeFlag}} = \tab \tab \cr #' 1 \tab \tab , if the provided maturity date (\code{Mat}) is before or on the \cr #' \tab \tab provided issue date (\code{Em}). \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Output:} \emph{RealDates} \code{= NA}, \emph{CoupDates} \code{= NA}, \cr #' \tab \tab \emph{AnnivDates} \code{= NA}, \emph{FCPType} \code{= NA}, \emph{LCPType} \code{= NA}. \cr #' ___________________ \tab ___ \tab ________________________________________________ \cr #' 0 \tab \tab , else. \cr #' ================= \tab === \tab =========================================== \cr #' } #' } #' \item{-}{ #' \tabular{rcl}{ #' \bold{\emph{ZeroFlag}} = \tab \tab \cr #' 1 \tab \tab , if number of interest payments per year (\code{CpY}) is \code{0}. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Output:} \emph{RealDates} \code{= (Em,Mat)}, \emph{CoupDates} \code{= Mat}, \cr #' \tab \tab \emph{AnnivDates} \code{= (Em,Mat)}, \emph{FCPType} \code{= NA}, \emph{LCPType} \code{= NA}. \cr #' ___________________ \tab ___ \tab ________________________________________________ \cr #' 0 \tab \tab , else. \cr #' ================= \tab === \tab =========================================== \cr #' } #' } #' \item{-}{ #' \tabular{rcl}{ #' \bold{\emph{Em_Mat_SameMY}} = \tab \tab \cr #' 1 \tab \tab , if the issue date (\code{Em}) and the maturity date (\code{Mat}) are in the \cr #' \tab \tab same month of the same year but not on the same day, while \cr #' \tab \tab \code{CpY} is an element of \{1,2,3,4,6,12\}. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Output:} \emph{RealDates} \code{= (Em,Mat)}, \emph{CoupDates} \code{= Mat}, \cr #' \tab \tab \emph{FCPType} \code{= short}, \emph{LCPType} \code{= short}. \cr #' ___________________ \tab ___ \tab ________________________________________________ \cr #' 0 \tab \tab , else. \cr #' ================= \tab === \tab =========================================== \cr #' } #' } #' \item{-}{ #' \tabular{rcl}{ #' \bold{\emph{ChronErrorFlag}} = \tab \tab \cr #' 1 \tab \tab , if the provided dates are in a wrong chronological order. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Note:} \cr #' \tab \tab The correct ascending chronological order is: \cr #' \tab \tab issue date (\code{Em}), first interest payment date (\code{FIPD}), \cr #' \tab \tab last interest payment date (\code{LIPD}), maturity date (\code{Mat}). \cr #' \tab \tab \code{FIPD} and \code{LIPD} are set \code{as.Date(NA)}. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Output:} as if \code{FIPD} and \code{LIPD} were not provided initially. \cr #' ___________________ \tab ___ \tab ________________________________________________ \cr #' 0 \tab \tab , else. \cr #' ================= \tab === \tab =========================================== \cr #' } #' } #' \item{-}{ #' \tabular{rcl}{ #' \bold{\emph{FIPD_LIPD_equal}} = \tab \tab \cr #' 1 \tab \tab if \code{Em} < \code{FIPD} = \code{LIPD} < \code{Mat}. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Output:} \emph{AnnivDates} contains \code{FIPD} and has at least \code{3} elements. \cr #' \tab \tab \emph{RealDates} \code{= (Em,FIPD,Mat)}, \emph{CoupDates} \code{= (FIPD,Mat)}. \cr #' \tab \tab \emph{FCPType} and \emph{LCPType} can be "short", "regular" or "long". \cr #' ___________________ \tab ___ \tab ________________________________________________ \cr #' 0 \tab \tab , else. \cr #' ================= \tab === \tab =========================================== \cr #' } #' } #' \item{-}{ #' \tabular{rcl}{ #' \bold{\emph{IPD_CpY_Corrupt}} = \tab \tab \cr #' 1 \tab \tab , if the provided first interest payment date (\code{FIPD}) and last \cr #' \tab \tab interest payment date (\code{LIPD}) are inconsistent with the \cr #' \tab \tab provided number of interest payments per year (\code{CpY}). \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Note:} \cr #' \tab \tab Inconsistency occurs if \cr #' \tab \tab 1. \code{FIPD} and \code{LIPD} are in the same month of the same year \cr #' \tab \tab but not on the same day, or \cr #' \tab \tab 2. the number of months between \code{FIPD} and \code{LIPD} is not a \cr #' \tab \tab multiple of the number of months implied by \code{CpY}, or \cr #' \tab \tab 3. \code{FIPD} and \code{LIPD} are not both last day in month, their \cr #' \tab \tab day figures differ and the day figure difference between \cr #' \tab \tab \code{FIPD} and \code{LIPD} is not due to different month lengths. \cr #' \tab \tab \cr #' \tab \tab In each of the three cases keeping the provided values of \cr #' \tab \tab \code{FIPD} and \code{LIPD} would violate the assumption, that the \cr #' \tab \tab anniversary dates between \code{FIPD} and \code{LIPD} are evenly \cr #' \tab \tab distributed. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \code{FIPD} and \code{LIPD} are set \code{as.Date(NA)} \cr #' \tab \tab and the execution continues. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Output:} \cr #' \tab \tab as if \code{FIPD} and \code{LIPD} were not provided initially. \cr #' ___________________ \tab ___ \tab ________________________________________________ \cr #' 0 \tab \tab , else. \cr #' ================= \tab === \tab =========================================== \cr #' } #' } #' \item{-}{ #' \tabular{rcl}{ #' \bold{\emph{EOM_Deviation}} = \tab \tab \cr #' 1 \tab \tab , if the provided value of \code{EOM} deviates from the value that \cr #' \tab \tab is inferred from the provided calendar dates. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Note:} \cr #' \tab \tab The program analyses the valid values of \code{Em}, \code{Mat}, \code{FIPD} and \cr #' \tab \tab \code{LIPD} to determine the appropriate value of \code{EOM}. \cr #' \tab \tab \cr #' \tab \tab If the initially provided value of \code{EOM} deviates from the value \cr #' \tab \tab determined by the program, there might be an inconsistency \cr #' \tab \tab in the provided data. \cr #' ___________________ \tab ___ \tab ________________________________________________ \cr #' 0 \tab \tab , else. \cr #' ================= \tab === \tab =========================================== \cr #' \tab \tab \cr #' } #' } #' \item{-}{ #' \tabular{rcl}{ #' \bold{\emph{EOMOverride}} = \tab \tab \cr #' 1 \tab \tab , if the provided value of \code{EOM} is overridden by a value that \cr #' \tab \tab is inferred from the provided calendar dates. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Note:} \cr #' \tab \tab This happens automatically if \code{EOM} is initially missing or \code{NA} \cr #' \tab \tab or not element of \code{\{0,1\}} and if the provided value of \code{EOM} \cr #' \tab \tab conflicts with the provided values of \code{FIPD}, \code{LIPD} or \code{Mat}, \cr #' \tab \tab e.g. if \code{est_EOM = 0} but \code{EOM = 1}. \cr #' \tab \tab If \code{EOM_Deviation = 1} and the option \code{FindEOM} is set \code{TRUE}, \cr #' \tab \tab the initially provided value of \code{EOM} is also overridden by the \cr #' \tab \tab value that is inferred from the provided calendar dates if \cr #' \tab \tab \code{est_EOM = 1} but \code{EOM = 0}. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Output:} \cr #' \tab \tab as if the value of \code{EOM} that is found by the program was \cr #' \tab \tab provided initially. \cr #' ___________________ \tab ___ \tab ________________________________________________ \cr #' 0 \tab \tab , else. \cr #' ================= \tab === \tab =========================================== \cr #' } #' } #' \item{-}{ #' \tabular{rcl}{ #' \bold{\emph{DCCOverride}} = \tab \tab \cr #' 1 \tab \tab if \code{DCC} is missing or NA or not element of c(1:16). \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Note:} \cr #' \tab \tab If the program cannot process the provided day count \cr #' \tab \tab identifier \code{DCC}, it overrides it with \code{DCC} = 2. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Output:} \cr #' \tab \tab as if \code{DCC} = 2 was provided initially. \cr #' ___________________ \tab ___ \tab ________________________________________________ \cr #' 0 \tab \tab , else. \cr #' ================= \tab === \tab =========================================== \cr #' } #' } #' \item{-}{ #' \tabular{rcl}{ #' \bold{\emph{NoCoups}} = \tab \tab \cr #' 1 \tab \tab , if there are no coupon payments between the provided \cr #' \tab \tab issue date (\code{Em}) and the maturity date (\code{Mat}), but the \cr #' \tab \tab provided (\code{CpY}) is not zero. \cr #' \tab \tab ________________________________________________ \cr #' \tab \tab \emph{Output:} \cr #' \tab \tab \emph{RealDates} \code{= (Em,Mat)}, \emph{CoupDates} \code{= (Mat)}, \cr #' \tab \tab \emph{AnnivDates} contains \code{Mat} and has either \cr #' \tab \tab \code{2} or \code{3} elements, \emph{FCPType = LCPType} and \cr #' \tab \tab can be \code{"short"}, \code{"regular"} or \code{"long"}. \cr #' ___________________ \tab ___ \tab ________________________________________________ \cr #' 0 \tab \tab , else. \cr #' ================= \tab === \tab =========================================== \cr #' } #' } #' } #' } #' } #' #' @references #' \enumerate{ #' \item{Djatschenko, Wadim, The Nitty Gritty of Bond Valuation: A Generalized Methodology for Fixed Coupon Bond Analysis Allowing for Irregular Periods and Various Day Count Conventions (November 5, 2018). Available at SSRN: https://ssrn.com/abstract=3205167.} #' } #' #' @examples #' data(SomeBonds2016) #' #' # Applying the function AnnivDates to the data frame SomeBonds2016. #' system.time( #' FullAnalysis<-apply(SomeBonds2016[,c('Issue.Date','Mat.Date','CpY.Input','FIPD.Input', #' 'LIPD.Input','FIAD.Input','RV.Input','Coup.Input','DCC.Input','EOM.Input')],1,function(y) #' AnnivDates(y[1],y[2],y[3],y[4],y[5],y[6],y[7],y[8],y[9],y[10],RegCF.equal=1)), #' gcFirst = TRUE) #' # warnings are due to apply's conversion of the variables' classes in #' # SomeBonds2016 to class "character" #' #' # The output stored in FullAnalysis ist a nested list. #' # Lets look at what is stored in FullAnalysis for a random bond: #' randombond<-sample(c(1:nrow(SomeBonds2016)),1) #' FullAnalysis[[randombond]] #' #' # Extracting the data frame Warnings: #' AllWarnings<-do.call(rbind,lapply(FullAnalysis, `[[`, 1)) #' summary(AllWarnings) #' # binding the Warnings to the bonds #' BondsWithWarnings<-cbind(SomeBonds2016,AllWarnings) #' #' # Extracting the data frame Traits: #' AllTraits<-do.call(rbind,lapply(FullAnalysis, `[[`, 2)) #' summary(AllTraits) #' # binding the Traits to the bonds #' BondsWithTraits<-cbind(SomeBonds2016,AllTraits) #' #' # Extracting the data frame AnnivDates: #' AnnivDates<-lapply(lapply(FullAnalysis, `[[`, 3), `[[`, 5) #' AnnivDates<-lapply(AnnivDates, `length<-`, max(lengths(AnnivDates))) #' AnnivDates<-as.data.frame(do.call(rbind, AnnivDates)) #' AnnivDates<-as.data.frame(lapply(AnnivDates, as.Date, as.Date(AllTraits$DateOrigin[1]))) #' # binding the AnnivDates to the bonds: #' BondsWithAnnivDates<-cbind(SomeBonds2016,AnnivDates) #' #' # Extracting the data frames PaySched for each bond and creating a panel: #' CoupSched<-lapply(FullAnalysis, `[[`, 4) #' CoupSchedPanel<-SomeBonds2016[rep(row.names(SomeBonds2016),sapply(CoupSched, nrow)),] #' CoupSched<-as.data.frame(do.call(rbind, CoupSched)) #' CoupSchedPanel<-cbind(CoupSchedPanel,CoupSched) #' #' #' @import Rcpp #' @importFrom stats na.omit #' @import timeDate #' @export #' @useDynLib BondValuation, .registration = TRUE #' AnnivDates<-function(Em=as.Date(NA),Mat=as.Date(NA),CpY=as.numeric(NA),FIPD=as.Date(NA),LIPD=as.Date(NA),FIAD=as.Date(NA),RV=as.numeric(NA),Coup=as.numeric(NA),DCC=as.numeric(NA),EOM=as.numeric(NA),DateOrigin=as.Date("1970-01-01"),InputCheck=1,FindEOM=FALSE,RegCF.equal=0) { if (length(Em)>1) { arglist<-Em argnames<-c("Em","Mat","CpY","FIPD","LIPD","FIAD","RV","Coup","DCC","EOM","DateOrigin","InputCheck","FindEOM","RegCF.equal") for (i in c(1:length(arglist))) { assign(argnames[i],arglist[i]) } } # Checking whether the arguments are provided as the desired classes. # This is necessary to ensure that the function can be run with apply(), which transforms all input to # class "character" before it starts calcuations. # The code can deal with arguments provided as classes character, numeric or Date with format "%Y-%m-%d". # Otherwise the execution is cancelled with the appropriate error message. # if (InputCheck==1) { CheckedInput<-InputFormatCheck(Em=Em,Mat=Mat,CpY=CpY,FIPD=FIPD,LIPD=LIPD,FIAD=FIAD,RV=RV,Coup=Coup,DCC=DCC,EOM=EOM,DateOrigin=DateOrigin) Em<-CheckedInput$Em Mat<-CheckedInput$Mat CpY<-CheckedInput$CpY FIPD<-CheckedInput$FIPD LIPD<-CheckedInput$LIPD FIAD<-CheckedInput$FIAD RV<-CheckedInput$RV Coup<-CheckedInput$Coup DCC<-CheckedInput$DCC EOM<-CheckedInput$EOM DateOrigin<-CheckedInput$DateOrigin } Em_Orig<-Em FIPD_Orig<-FIPD LIPD_Orig<-LIPD EOM_Orig<-EOM est_EOM<-as.numeric(NA) EOM_used<-as.numeric(NA) Em_FIAD_differ<-0 EmMatMissing<-0 CpYOverride<-0 RV_set100percent<-0 NegLifeFlag<-0 ZeroFlag<-0 Em_Mat_SameMY<-0 ChronErrorFlag<-0 FIPD_LIPD_equal<-0 # if this is 1, it holds Em < FIPD = LIPD < Mat; # Em = FIPD = LIPD and FIPD = LIPD = Mat are not considered here IPD_CpY_Corrupt<-0 EOMOverride<-0 EOM_Deviation<-0 DCCOverride<-0 NoCoups<-0 est_FIPD<-as.Date(NA) est_LIPD<-as.Date(NA) Refer<-as.Date(NA) PaySched<-as.numeric(NA) AnnivDates<-as.Date(NA) CoupDates<-as.Date(NA) RealDates<-as.Date(NA) AD_indexes<-as.numeric(NA) RD_indexes<-as.numeric(NA) CD_indexes<-as.numeric(NA) index_FIPD<-as.numeric(NA) index_LIPD<-as.numeric(NA) FCPType<-as.character(NA) LCPType<-as.character(NA) f<-as.numeric(NA) l<-as.numeric(NA) if ((!(is.na(FIAD)))&(!(FIAD==Em))) { Em<-FIAD Em_FIAD_differ<-1 } if ((missing(CpY))|(is.na(CpY))) { CpY<-2 CpYOverride<-1 warning("Number of interest payments per year (CpY) is missing or NA. CpY is set 2!") } else { if (!(is.element(CpY,c(0,1,2,3,4,6,12)))) { CpY<-2 CpYOverride<-1 warning("Number of interest payments per year (CpY) is not element of {0,1,2,3,4,6,12}! CpY is set 2!") } } if ((missing(RV))|(is.na(RV))) { RV<-100 RV_set100percent<-1 warning("Redemption value (RV) is missing or NA. RV is set 100!") } if (CpY==0) { Coup<-as.numeric(0) CpY<-as.numeric(1) # FIPD<-as.Date(NA) # LIPD<-as.Date(NA) # AnnivDates<-c(Em,Mat) # AnnivDates<-AnnivDates[!duplicated(AnnivDates)] # CoupDates<-c(Mat) # CoupDates<-CoupDates[!duplicated(CoupDates)] # RealDates<-c(Em,Mat) # RealDates<-RealDates[!duplicated(RealDates)] ZeroFlag<-1 warning("This is a Zero Coupon bond! # CoupDates are (Mat), RealDates are (Em,Mat), AnnivDates are (Em,Mat). # The types of the first and last coupon periods are assigned NA!" ) } if ((missing(Em))|(is.na(Em))) { EmMatMissing<-1 warning("Issue date (Em) is missing or NA. NA created!") } else { if ((missing(Mat))|(is.na(Mat))) { EmMatMissing<-1 warning("Maturity date (Mat) is missing or NA. NA created!") } else { if ((!(Mat>Em))) { NegLifeFlag<-1 warning("Issue date (Em) is not before maturity date (Mat)! NA created!") } else { AtomVector_Em<-as.numeric(unlist(strsplit(as.character(Em),split = "-"))) Atom1Em<-AtomVector_Em[1] Atom2Em<-AtomVector_Em[2] Atom3Em<-AtomVector_Em[3] EmHelp<-as.Date(paste(Atom1Em,Atom2Em,1,sep="-")) LDM_Em<-as.numeric(Date_LDM(c(Atom1Em,Atom2Em,Atom3Em))) LDM_Em<-as.Date(paste(LDM_Em[1],LDM_Em[2],LDM_Em[3],sep="-")) AtomVector_Mat<-as.numeric(unlist(strsplit(as.character(Mat),split = "-"))) Atom1Mat<-AtomVector_Mat[1] Atom2Mat<-AtomVector_Mat[2] Atom3Mat<-AtomVector_Mat[3] MatHelp<-as.Date(paste(Atom1Mat,Atom2Mat,15,sep="-")) LDM_Mat<-as.numeric(Date_LDM(c(Atom1Mat,Atom2Mat,Atom3Mat))) LDM_Mat<-as.Date(paste(LDM_Mat[1],LDM_Mat[2],LDM_Mat[3],sep="-")) if ((Atom1Em==Atom1Mat)&(Atom2Em==Atom2Mat)) { Em_Mat_SameMY<-1 warning("The issue date and the maturity date are in the same month of the same year! CoupDates are (Mat), RealDates are (Em,Mat). The types of the first and last coupon periods are assigned \"short\"!") } # N_months is essential for further calculations if (CpY==1) {N_months<-12} if (CpY==2) {N_months<-6} if (CpY==3) {N_months<-4} if (CpY==4) {N_months<-3} if (CpY==6) {N_months<-2} if (CpY==12) {N_months<-1} # the following code tests for the correct chronological order of the provided dates ChronoVec_input<-c(Em,FIPD,LIPD,Mat) ChronoVec_input<-na.omit(ChronoVec_input) ChronoVec_test<-sort(ChronoVec_input) if (any(ChronoVec_input!=ChronoVec_test)) { FIPD<-as.Date(NA) LIPD<-as.Date(NA) ChronErrorFlag<-1 warning("The date inputs are in a wrong chronological order! FIPD and LIPD dropped. Note: The correct ascending chronological order is issue date (Em), first interest payment date (FIPD), last interest payment date (LIPD), maturity date (Mat). (Please note, that issue date (Em) is substituted by the first interest accrual date (FIAD) if FIAD is available and different from Em.)") } # if FIPD and/or LIPD are not dropped, it now holds Em <= FIPD <= LIPD <= Mat and Em < Mat # the following code deals with special cases of the location of the dates if (is.na(FIPD)) { if (!(is.na(LIPD))) { # Case 1: LIPD is available but FIPD is not if (!(Em<LIPD)) { LIPD<-as.Date(NA) # Em is not prior to LIPD --> LIPD is assigned NA } else { if (!(LIPD<Mat)) { LIPD<-as.Date(NA) # LIPD is not prior to Mat --> LIPD is assigned NA } } } } if (!(is.na(FIPD))) { if (is.na(LIPD)) { # Case 2: FIPD is available but LIPD is not if (!(Em<FIPD)) { FIPD<-as.Date(NA) # Em is not prior to FIPD --> FIPD is assigned NA } else { if (!(FIPD<Mat)) { FIPD<-as.Date(NA) # FIPD is not prior to Mat --> FIPD is assigned NA } } } } if ((!is.na(FIPD))&(!is.na(LIPD))) { # Case 3: FIPD and LIPD are both available if (Em<FIPD) { if (FIPD<LIPD) { if (LIPD<Mat) { # it holds Em < FIPD < LIPD < Mat --> do nothing } else { # i.e. if (LIPD==Mat) # it holds Em < FIPD < LIPD = Mat --> LIPD is redundant and is dropped LIPD<-as.Date(NA) } } else { # i.e. if (FIPD==LIPD) if (LIPD<Mat) { # it holds Em < FIPD = LIPD < Mat --> this case is considered further below, # here only the flag is changed FIPD_LIPD_equal<-1 } else { # i.e. if (LIPD==Mat) # it holds Em < FIPD = LIPD = Mat --> FIPD and LIPD are redundant and dropped FIPD<-as.Date(NA) LIPD<-as.Date(NA) } } } else { # i.e. if (Em==FIPD) if (FIPD<LIPD) { if (LIPD<Mat) { # it holds Em = FIPD < LIPD < Mat --> LIPD is redundant and is dropped FIPD<-as.Date(NA) } else { # i.e. if (LIPD==Mat) # it holds Em = FIPD < LIPD = Mat --> FIPD and LIPD are redundant and dropped FIPD<-as.Date(NA) LIPD<-as.Date(NA) } } else { # i.e. if (FIPD==LIPD) # it holds Em = FIPD = LIPD < Mat --> FIPD and LIPD are redundant and dropped FIPD<-as.Date(NA) LIPD<-as.Date(NA) } } } # If FIPD and LIPD are both dropped, it now holds: # Em < Mat and FIPD_LIPD_equal = 0 # If either FIPD or LIPD is dropped, it now holds: # Em < FIPD (or LIPD) < Mat and FIPD_LIPD_equal = 0 # If FIPD and LIPD are not both dropped, two cases can occur: # 1. Em < FIPD = LIPD < Mat and FIPD_LIPD_equal = 1 # 2. Em < FIPD < LIPD < Mat and FIPD_LIPD_equal = 0 # # But still it can happen, that FIPD and LIPD are inconsistent with CpY, such that the assumption, # that the anniversary dates between FIPD and LIPD are evenly distributed, is violated. # This happens, # 1. if FIPD and LIPD are in the same month of the same year but not on the same day, or # 2. if the month difference between FIPD and LIPD is not a multiple of the number of months implied by CpY (N_months), or # 3. if FIPD and LIPD are not both last day in month but their %d differ and the day figure difference between FIPD and LIPD is not due to different month lengths. # if (!is.na(FIPD)) { AtomVector_FIPD<-as.numeric(unlist(strsplit(as.character(FIPD),split = "-"))) Atom1FIPD<-AtomVector_FIPD[1] Atom2FIPD<-AtomVector_FIPD[2] Atom3FIPD<-AtomVector_FIPD[3] FIPDHelp<-as.Date(paste(Atom1FIPD,Atom2FIPD,15,sep="-")) LDM_FIPD<-as.numeric(Date_LDM(c(Atom1FIPD,Atom2FIPD,Atom3FIPD))) LDM_FIPD<-as.Date(paste(LDM_FIPD[1],LDM_FIPD[2],LDM_FIPD[3],sep="-")) } if (!is.na(LIPD)) { AtomVector_LIPD<-as.numeric(unlist(strsplit(as.character(LIPD),split = "-"))) Atom1LIPD<-AtomVector_LIPD[1] Atom2LIPD<-AtomVector_LIPD[2] Atom3LIPD<-AtomVector_LIPD[3] LIPDHelp<-as.Date(paste(Atom1LIPD,Atom2LIPD,15,sep="-")) LDM_LIPD<-as.numeric(Date_LDM(c(Atom1LIPD,Atom2LIPD,Atom3LIPD))) LDM_LIPD<-as.Date(paste(LDM_LIPD[1],LDM_LIPD[2],LDM_LIPD[3],sep="-")) } if ((!is.na(FIPD))&(!is.na(LIPD))) { if ((Atom1FIPD==Atom1LIPD)&(Atom2FIPD==Atom2LIPD)&(Atom3FIPD!=Atom3LIPD)) { FIPD<-as.Date(NA) LIPD<-as.Date(NA) IPD_CpY_Corrupt<-1 warning("FIPD and LIPD are inconsistent with CpY! FIPD and LIPD are dropped. Note: The assumption, that the anniversary dates between FIPD and LIPD are evenly distributed, is violated! Cause: FIPD and LIPD are in the same month of the same year but not on the same day.") } else { if (abs((as.POSIXlt(LIPD)$year*12+as.POSIXlt(LIPD)$mon+1)-(as.POSIXlt(FIPD)$year*12+as.POSIXlt(FIPD)$mon+1))%%N_months!=0) { FIPD<-as.Date(NA) LIPD<-as.Date(NA) IPD_CpY_Corrupt<-1 warning("FIPD and LIPD are inconsistent with CpY! FIPD and LIPD are dropped. Note: The assumption, that the anniversary dates between FIPD and LIPD are evenly distributed, is violated! Cause: The month difference between FIPD and LIPD is not a multiple of the number of months implied by CpY.") } else { if (Atom3FIPD==Atom3LIPD) { # i.e. FIPD and LIPD have the same %d # do nothing } else { # i.e. FIPD and LIPD have different %d if (LDM_FIPD==FIPD) { # i.e. FIPD is last day in month if (LDM_LIPD==LIPD) { # i.e. LIPD is last day in month # do nothing } else { # i.e. LIPD is not last day in month if ((Atom3FIPD+1)==Atom3LIPD) { # i.e. Increasing the %d of FIPD by 1 results in the %d of LIPD. # do nothing } else { # i.e. Increasing the %d of FIPD by 1 does not result in the %d of LIPD. if ((Atom3FIPD+2)==Atom3LIPD) { # i.e. Increasing the %d of FIPD by 2 results in the %d of LIPD. # do nothing } else { # i.e. Increasing the %d of FIPD by 2 does not result in the %d of LIPD. FIPD<-as.Date(NA) LIPD<-as.Date(NA) IPD_CpY_Corrupt<-1 warning("FIPD and LIPD are inconsistent with CpY! FIPD and LIPD are dropped. Note: The assumption, that the anniversary dates between FIPD and LIPD are evenly distributed, is violated! Cause: FIPD and LIPD have different %d. FIPD is last day in month. LIPD is not last day in month. The day figure difference between FIPD and LIPD is not due to different month lengths.") } } } } else { # i.e. FIPD is not last day in month if (LDM_LIPD==LIPD) { # i.e. LIPD is last day in month if ((Atom3LIPD+1)==Atom3FIPD) { # i.e. Increasing the %d of LIPD by 1 results in the %d of FIPD. # do nothing } else { # i.e. Increasing the %d of LIPD by 1 does not result in the %d of FIPD. if ((Atom3LIPD+2)==Atom3FIPD) { # i.e. Increasing the %d of LIPD by 2 results in the %d of FIPD. # do nothing } else { # i.e. Increasing the %d of LIPD by 2 does not result in the %d of FIPD. FIPD<-as.Date(NA) LIPD<-as.Date(NA) IPD_CpY_Corrupt<-1 warning("FIPD and LIPD are inconsistent with CpY! FIPD and LIPD are dropped. Note: The assumption, that the anniversary dates between FIPD and LIPD are evenly distributed, is violated! Cause: FIPD and LIPD have different %d. FIPD is not last day in month. LIPD is last day in month. The day figure difference between FIPD and LIPD is not due to different month lengths.") } } } else { # i.e. LIPD is not last day in month FIPD<-as.Date(NA) LIPD<-as.Date(NA) IPD_CpY_Corrupt<-1 warning("FIPD and LIPD are inconsistent with CpY! FIPD and LIPD are dropped. Note: The assumption, that the anniversary dates between FIPD and LIPD are evenly distributed, is violated! Cause: FIPD and LIPD have different %d. FIPD is not last day in month. LIPD is not last day in month.") } } } } } } # The following code determines est_EOM based on the available calendar dates. if (is.na(FIPD)) { if (is.na(LIPD)) { # Case 1: FIPD and LIPD are both NA if (LDM_Mat==Mat) { est_EOM<-1 } else { est_EOM<-0 } } else { # Case 2: FIPD is NA and LIPD is available if (LDM_LIPD==LIPD) { est_EOM<-1 } else { est_EOM<-0 } } } else { if (is.na(LIPD)) { # Case 3: FIPD is available and LIPD is NA if (LDM_FIPD==FIPD) { est_EOM<-1 } else { est_EOM<-0 } } else { # Case 4: FIPD and LIPD are both available if ((LDM_FIPD==FIPD)&(LDM_LIPD==LIPD)) { est_EOM<-1 } else { est_EOM<-0 } } } if (is.na(EOM_Orig)) { EOMOverride<-1 EOM<-est_EOM warning(paste("EOM was not provided or NA! EOM is set",EOM,". Note: The available calandar dates suggest that EOM =",est_EOM,".")) } else { if (EOM_Orig!=est_EOM) { EOM_Deviation<-1 if (FindEOM==TRUE) { EOM<-est_EOM EOMOverride<-1 warning(paste("The available calandar dates suggest that EOM =",est_EOM,". Option FindEOM = TRUE is active. EOM is set",est_EOM,".")) } else { if ((est_EOM==0)&(EOM==1)) { EOM<-est_EOM EOMOverride<-1 warning(paste("The provided EOM =",EOM_Orig,"conflicts with one or several provided calendar dates. EOM is set",est_EOM,".")) } if ((est_EOM==1)&(EOM==0)) { warning(paste("The available calandar dates suggest that EOM =",est_EOM,". Option FindEOM = FALSE is active. Provided EOM is not overridden and remains EOM =",EOM,".")) } } } } # If DCC is not provided or NA or not element of {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}, the following code sets it 2 (Act/Act (ICMA)). if ((missing(DCC))|(is.na(DCC))) { DCC<-2 DCCOverride<-1 warning("The day count indentifier (DCC) is missing or NA. DCC is set 2 (Act/Act (ICMA))!") } else { if (!(is.element(DCC,c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16)))) { DCC<-2 DCCOverride<-1 warning("The day count indentifier (DCC) is not element of {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}! DCC is set 2 (Act/Act (ICMA))!") } } ######>>>>> ######>>>>> here the computation of the date vectors begins ######>>>>> if (is.na(FIPD)) { # i.e. if FIPD is not given if (is.na(LIPD)) { # i.e. if LIPD is not given # If neither FIPD nor LIPD are available the following code evaluates the bond based only upon the "essential" variables Em, Mat, CpY. # It creates: # estimated FIPD and estimated LIPD # Vector RealDates: (Em,estimated FIPD,regular coupon payment dates,estimated LIPD,Mat) # Vector CoupDates: (estimated FIPD,regular coupon payment dates,estimated LIPD,Mat) # Vector AnnivDates: (notional Em, coupon payment dates incl. estimated FIPD and estimated LIPD, Mat) # Em is only element of AnnivDates, if the bond has a regular first coupon period. # numeric E: the issue date's position in time relative to the bond's notional issue date # numeric M: the maturity date's position in time relative to the bond's notional issue date # numeric f: length of the first coupon period as a fraction of a regular coupon period # numeric l: length of the final coupon period as a fraction of a regular coupon period # # Assumptions: # >>> All coupon periods but the first are "regular". <<< # >>> The value of EOM determines the location of all coupon payment dates. <<< # >>> The interest payment dates between the estimated FIPD and Mat are evenly distributed. <<< # >>> Since FIPD is not given, it is impossible to distinguish between a "short" and "long" odd first coupon period, # without an assumption on the number of interest payment dates. # Consequently the first coupon period is either "regular" or "short". <<< # ### ### Creating AnnivDates ### AnnivDates<-rev(seq(MatHelp,EmHelp,by=paste("-",N_months," months",sep=""))) AnnivDates<-sort(na.omit(AnnivDates[!duplicated(AnnivDates)])) # assigning the reference date that determines the day figures of all AnnivDates Atom1Refer<-Atom1Mat Atom2Refer<-Atom2Mat Atom3Refer<-Atom3Mat if (EOM==1) { AnnivDates<-as.Date(timeLastDayInMonth(AnnivDates)) } else { AnnivDates_A<-as.Date(ISOdatetime(t(atoms(as.timeDate(AnnivDates))[1]),t(atoms(as.timeDate(AnnivDates))[2]),Atom3Refer,12,0,0)) if (length(which(is.na(AnnivDates_A)))!=0) { # Assuming %d of Mat as the %d of all AnnivDates produced NAs. NAs are substituted by the respective last day in month. nas<-which(is.na(AnnivDates_A)) AnnivDates_B<-as.Date(timeLastDayInMonth(AnnivDates[nas])) AnnivDates<-sort(na.omit(append(AnnivDates_A,AnnivDates_B))) } else { AnnivDates<-AnnivDates_A } } AnnivDates<-sort(na.omit(AnnivDates[!duplicated(AnnivDates)])) # creating the anniversary date preceding Em if (AnnivDates[1]!=Em) { AtomVector_AD1<-as.numeric(unlist(strsplit(as.character(AnnivDates[1]),split = "-"))) Atom1AD1<-AtomVector_AD1[1] Atom2AD1<-AtomVector_AD1[2] Atom3AD1<-AtomVector_AD1[3] PrevDate<-as.numeric(CppPrevDate(c(Atom1AD1,Atom2AD1,Atom3AD1,Atom1Em,Atom2Em,Atom3Em,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) if (any(is.na(PrevDate))) { PrevDate<-as.Date(NA) } else { PrevDate<-as.Date(paste(PrevDate[1],PrevDate[2],PrevDate[3],sep="-")) } AnnivDates<-c(PrevDate,AnnivDates) AnnivDates<-sort(na.omit(AnnivDates[!duplicated(AnnivDates)])) # final AnnivDates created! } AD_indexes<-c(1:length(AnnivDates))-1 AD_List<-list(AnnivDates,AD_indexes) index_FIPD<-1 index_LIPD<-AD_indexes[length(AD_indexes)-1] ### ### now estimating FIPD and LIPD and creating the vectors RealDates and CoupDates ### if (length(AnnivDates)==2) { # this is the case when there are no interest payments between Em and Mat NoCoups<-1 est_FIPD<-as.Date(NA) est_LIPD<-as.Date(NA) warning(paste("There are no interest payments between the issue date ( Em =", Em,") and the maturity date ( Mat =",Mat,"), but the number of interest payments per year is CpY =", CpY,". The estimated FIPD and LIPD are assigned NA.")) } else { est_FIPD<-AnnivDates[2] est_LIPD<-AnnivDates[length(AnnivDates)-1] } AD_in_RD<-as.numeric(which((!(AnnivDates<est_FIPD))&(!(AnnivDates>est_LIPD)))) if (length(length(AD_in_RD))==0) { RealDates<-c(Em,Mat) } else { RealDates<-c(Em,AnnivDates[AD_in_RD],Mat) } RealDates<-RealDates[!duplicated(RealDates)] CoupDates<-RealDates[-1] } else { # i.e. if LIPD is given # If LIPD is available but FIPD is not, the following code creates: # estimated FIPD # Vector RealDates: (Em,estimated FIPD,regular coupon payment dates,LIPD,Mat) # Vector CoupDates: (estimated FIPD,regular coupon payment dates,LIPD,Mat) # Vector AnnivDates: (notional Em, coupon payment dates incl. estimated FIPD and LIPD, # possibly notional coupon payment dates between LIPD and Mat, # notional Mat) # Em is only element of AnnivDates, if the bond has a regular first coupon period. # If Em is element of AnnivDates, it is the first element. # Mat is element of AnnivDates, if the bond has a regular final coupon period or # if the bond has a long final coupon period whose length is a multiple of a # regular coupon period. # If Mat is element of AnnivDates, it is the last element. # numeric E: the issue date's position in time relative to the bond's notional issue date # numeric M: the maturity date's position in time relative to the bond's notional issue date # numeric f: length of the first coupon period as a fraction of a regular coupon period # numeric l: length of the final coupon period as a fraction of a regular coupon period # # Since LIPD is available, the length of the last coupon period is determined by LIPD and Mat, # so it can be "long", "regular" or "short". # # To find the interest payment dates between the estimated FIPD and LIPD the following assumptions are met: # >>> The interest payment dates between the estimated FIPD and LIPD are evenly distributed. <<< # >>> The value of EOM determines the location of all coupon payment dates. <<< # >>> Since FIPD is not given, it is impossible to distinguish between a "short" and "long" odd first coupon period, # without an assumption on the number of interest payment dates. # Consequently the first coupon period is either "regular" or "short". <<< # ### ### Creating AnnivDates ### AnnivDates_Head<-rev(seq(LIPDHelp,EmHelp,by=paste("-",N_months," months",sep=""))) AnnivDates_Tail<-seq(LIPDHelp,LDM_Mat,by=paste(N_months," months",sep="")) AnnivDates<-c(AnnivDates_Head,LIPD,AnnivDates_Tail) AnnivDates<-sort(na.omit(AnnivDates[!duplicated(AnnivDates)])) # assigning the reference date that determines the day figures of all AnnivDates Atom1Refer<-Atom1LIPD Atom2Refer<-Atom2LIPD Atom3Refer<-Atom3LIPD if (EOM==1) { AnnivDates<-as.Date(timeLastDayInMonth(AnnivDates)) } else { AnnivDates_A<-as.Date(ISOdatetime(t(atoms(as.timeDate(AnnivDates))[1]),t(atoms(as.timeDate(AnnivDates))[2]),Atom3Refer,12,0,0)) if (length(which(is.na(AnnivDates_A)))!=0) { # Assuming %d of LIPD as the %d of all AnnivDates produced NAs. NAs are substituted by the respective last day in month. nas<-which(is.na(AnnivDates_A)) AnnivDates_B<-as.Date(timeLastDayInMonth(AnnivDates[nas])) AnnivDates<-sort(na.omit(append(AnnivDates_A,AnnivDates_B))) } else { AnnivDates<-AnnivDates_A } } AnnivDates<-sort(AnnivDates[!duplicated(AnnivDates)]) # creating the anniversary date preceding Em if (AnnivDates[1]!=Em) { AtomVector_AD1<-as.numeric(unlist(strsplit(as.character(AnnivDates[1]),split = "-"))) Atom1AD1<-AtomVector_AD1[1] Atom2AD1<-AtomVector_AD1[2] Atom3AD1<-AtomVector_AD1[3] PrevDate<-as.numeric(CppPrevDate(c(Atom1AD1,Atom2AD1,Atom3AD1,Atom1Em,Atom2Em,Atom3Em,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) if (any(is.na(PrevDate))) { PrevDate<-as.Date(NA) } else { PrevDate<-as.Date(paste(PrevDate[1],PrevDate[2],PrevDate[3],sep="-")) } AnnivDates<-c(PrevDate,AnnivDates) AnnivDates<-sort(na.omit(AnnivDates[!duplicated(AnnivDates)])) } # creating the anniversary date succeeding Mat if (AnnivDates[length(AnnivDates)]!=Mat) { AtomVector_ADfin<-as.numeric(unlist(strsplit(as.character(AnnivDates[length(AnnivDates)]),split = "-"))) Atom1ADfin<-AtomVector_ADfin[1] Atom2ADfin<-AtomVector_ADfin[2] Atom3ADfin<-AtomVector_ADfin[3] SuccDate<-as.numeric(CppSuccDate(c(Atom1ADfin,Atom2ADfin,Atom3ADfin,Atom1Mat,Atom2Mat,Atom3Mat,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) if (any(is.na(SuccDate))) { SuccDate<-as.Date(NA) } else { SuccDate<-as.Date(paste(SuccDate[1],SuccDate[2],SuccDate[3],sep="-")) } AnnivDates<-c(AnnivDates,SuccDate) AnnivDates<-sort(na.omit(AnnivDates[!duplicated(AnnivDates)])) # final AnnivDates created! } AD_indexes<-c(1:length(AnnivDates))-1 AD_List<-list(AnnivDates,AD_indexes) index_FIPD<-1 index_LIPD<-AD_List[[2]][which(AD_List[[1]]==LIPD)] ### ### now estimating FIPD and creating the vectors RealDates and CoupDates ### # # AnnivDates has at least 3 elements # AD1 can be Em, AD_Fin can be Mat, AD_PreFin can be LIPD # est_FIPD<-AnnivDates[2] AD_in_RD<-as.numeric(which((!(AnnivDates<est_FIPD))&(!(AnnivDates>LIPD)))) if (length(length(AD_in_RD))==0) { RealDates<-c(Em,Mat) } else { RealDates<-c(Em,AnnivDates[AD_in_RD],Mat) } RealDates<-RealDates[!duplicated(RealDates)] CoupDates<-RealDates[-1] } } else { # i.e. if FIPD is given if (is.na(LIPD)) { # i.e. if LIPD is not given # If FIPD is available but LIPD is not, the following code creates: # estimated LIPD # Vector RealDates: (Em,FIPD,regular coupon payment dates,estimated LIPD,Mat) # Vector CoupDates: (FIPD,regular coupon payment dates,estimated LIPD,Mat) # Vector AnnivDates: (notional Em, possibly notional coupon payment dates between Em and FIPD, # coupon payment dates incl. FIPD and estimated LIPD, notional Mat) # Em is element of AnnivDates, if the bond has a regular first coupon period or # if the bond has a long first coupon period whose length is a multiple of a # regular coupon period. # If Em is element of AnnivDates, it is the first element. # Mat is only element of AnnivDates, if the bond has a regular final coupon period. # If Mat is element of AnnivDates, it is the last element. # numeric E: the issue date's position in time relative to the bond's notional issue date # numeric M: the maturity date's position in time relative to the bond's notional issue date # numeric f: length of the first coupon period as a fraction of a regular coupon period # numeric l: length of the final coupon period as a fraction of a regular coupon period # # Since FIPD is available, the length of the first coupon period is determined by Em and FIPD, # so it can be "long", "regular" or "short". # # To find the interest payment dates between FIPD and the estimated LIPD the following assumptions are met: # >>> The interest payment dates between FIPD and the estimated LIPD are evenly distributed. <<< # >>> The value of EOM determines the location of all coupon payment dates. <<< # >>> Since LIPD is not given, it is impossible to distinguish between a "short" and "long" odd final coupon period, # without an assumption on the number of interest payment dates. # Consequently the final coupon period is either "regular" or "short". <<< # ### ### Creating AnnivDates ### AnnivDates_Head<-rev(seq(FIPDHelp,EmHelp,by=paste("-",N_months," months",sep=""))) AnnivDates_Tail<-seq(FIPDHelp,LDM_Mat,by=paste(N_months," months",sep="")) AnnivDates<-c(AnnivDates_Head,FIPD,AnnivDates_Tail) AnnivDates<-sort(na.omit(AnnivDates[!duplicated(AnnivDates)])) # assigning the reference date that determines the day figures of all AnnivDates Atom1Refer<-Atom1FIPD Atom2Refer<-Atom2FIPD Atom3Refer<-Atom3FIPD if (EOM==1) { AnnivDates<-as.Date(timeLastDayInMonth(AnnivDates)) } else { AnnivDates_A<-as.Date(ISOdatetime(t(atoms(as.timeDate(AnnivDates))[1]),t(atoms(as.timeDate(AnnivDates))[2]),Atom3Refer,12,0,0)) if (length(which(is.na(AnnivDates_A)))!=0) { # Assuming %d of LIPD as the %d of all AnnivDates produced NAs. NAs are substituted by the respective last day in month. nas<-which(is.na(AnnivDates_A)) AnnivDates_B<-as.Date(timeLastDayInMonth(AnnivDates[nas])) AnnivDates<-sort(na.omit(append(AnnivDates_A,AnnivDates_B))) } else { AnnivDates<-AnnivDates_A } } AnnivDates<-sort(AnnivDates[!duplicated(AnnivDates)]) # creating the anniversary date preceding Em if (AnnivDates[1]!=Em) { AtomVector_AD1<-as.numeric(unlist(strsplit(as.character(AnnivDates[1]),split = "-"))) Atom1AD1<-AtomVector_AD1[1] Atom2AD1<-AtomVector_AD1[2] Atom3AD1<-AtomVector_AD1[3] PrevDate<-as.numeric(CppPrevDate(c(Atom1AD1,Atom2AD1,Atom3AD1,Atom1Em,Atom2Em,Atom3Em,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) if (any(is.na(PrevDate))) { PrevDate<-as.Date(NA) } else { PrevDate<-as.Date(paste(PrevDate[1],PrevDate[2],PrevDate[3],sep="-")) } AnnivDates<-c(PrevDate,AnnivDates) AnnivDates<-sort(na.omit(AnnivDates[!duplicated(AnnivDates)])) } # creating the anniversary date succeeding Mat if (AnnivDates[length(AnnivDates)]!=Mat) { AtomVector_ADfin<-as.numeric(unlist(strsplit(as.character(AnnivDates[length(AnnivDates)]),split = "-"))) Atom1ADfin<-AtomVector_ADfin[1] Atom2ADfin<-AtomVector_ADfin[2] Atom3ADfin<-AtomVector_ADfin[3] SuccDate<-as.numeric(CppSuccDate(c(Atom1ADfin,Atom2ADfin,Atom3ADfin,Atom1Mat,Atom2Mat,Atom3Mat,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) if (any(is.na(SuccDate))) { SuccDate<-as.Date(NA) } else { SuccDate<-as.Date(paste(SuccDate[1],SuccDate[2],SuccDate[3],sep="-")) } AnnivDates<-c(AnnivDates,SuccDate) AnnivDates<-sort(na.omit(AnnivDates[!duplicated(AnnivDates)])) # final AnnivDates created! } AD_indexes<-c(1:length(AnnivDates))-(which(AnnivDates==FIPD)-1) AD_List<-list(AnnivDates,AD_indexes) index_FIPD<-AD_List[[2]][which(AD_List[[1]]==FIPD)] index_LIPD<-AD_indexes[length(AD_indexes)-1] ### ### now estimating LIPD and creating the vectors RealDates and CoupDates ### # # AnnivDates has at least 3 elements # AD1 can be Em, AD_Fin can be Mat, AD_2 can be FIPD # est_LIPD<-AnnivDates[length(AnnivDates)-1] AD_in_RD<-as.numeric(which((!(AnnivDates<FIPD))&(!(AnnivDates>est_LIPD)))) if (length(length(AD_in_RD))==0) { RealDates<-c(Em,Mat) } else { RealDates<-c(Em,AnnivDates[AD_in_RD],Mat) } RealDates<-RealDates[!duplicated(RealDates)] CoupDates<-RealDates[-1] } else { # i.e. if LIPD is given # If FIPD and LIPD are both available the following code creates: # Vector RealDates: (Em,FIPD,regular coupon payment dates,LIPD,Mat) # Vector CoupDates: (FIPD,regular coupon payment dates,LIPD,Mat) # Vector AnnivDates: (notional Em, possibly notional coupon payment dates between Em and FIPD, # coupon payment dates incl. FIPD and LIPD, possibly notional coupon payment # dates between LIPD and Mat, notional Mat) # Em is element of AnnivDates, if the bond has a regular first coupon period or # if the bond has a long first coupon period whose length is a multiple of a # regular coupon period. # If Em is element of AnnivDates, it is the first element. # Mat is element of AnnivDates, if the bond has a regular final coupon period or # if the bond has a long final coupon period whose length is a multiple of a # regular coupon period. # If Mat is element of AnnivDates, it is the last element. # numeric E: the issue date's position in time relative to the bond's notional issue date # numeric M: the maturity date's position in time relative to the bond's notional issue date # numeric f: length of the first coupon period as a fraction of a regular coupon period # numeric l: length of the final coupon period as a fraction of a regular coupon period # # Since FIPD and LIPD are both available, the lengths of the first and last coupon periods are determinate # and can be "long", "regular" or "short". # # To find the interest payment dates between FIPD and LIPD the following assumptions are met: # >>> The interest payment dates between FIPD and LIPD are evenly distributed. <<< # >>> The value of EOM determines the location of all coupon payment dates. <<< # # Notes: # If the %d of both FIPD and LIPD are equal or both last day in month there is no ambiguity on the concrete dates. # If the %d of FIPD and LIPD differ and are not both last day in month, and # the day figure difference between FIPD and LIPD is not due to different month lengths, # then the first assumption is violated and FIPD and LIPD are already dropped with IPD_CpY_Corrupt = 1. # If IPD_CpY_Corrupt = 0, then EOM = 0 and the higer day figure among FIPD and LIPD determines the # day figures of all anniversary dates. # # ### ### Creating AnnivDates ### AnnivDates_Head<-rev(seq(FIPDHelp,EmHelp,by=paste("-",N_months," months",sep=""))) AnnivDates_Tail<-seq(FIPDHelp,LDM_Mat,by=paste(N_months," months",sep="")) AnnivDates<-c(AnnivDates_Head,FIPD,LIPD,AnnivDates_Tail) AnnivDates<-sort(na.omit(AnnivDates[!duplicated(AnnivDates)])) # assigning the reference date that determines the day figures of all AnnivDates if (EOM==0) { if ((LDM_FIPD==FIPD)&(LDM_LIPD==LIPD)) { if (Atom3LIPD>Atom3FIPD) { Atom1Refer<-Atom1LIPD Atom2Refer<-Atom2LIPD Atom3Refer<-Atom3LIPD } else { Atom1Refer<-Atom1FIPD Atom2Refer<-Atom2FIPD Atom3Refer<-Atom3FIPD } } else { if (LDM_LIPD==LIPD) { Atom1Refer<-Atom1FIPD Atom2Refer<-Atom2FIPD Atom3Refer<-Atom3FIPD } else { Atom1Refer<-Atom1LIPD Atom2Refer<-Atom2LIPD Atom3Refer<-Atom3LIPD } } } else { Atom1Refer<-Atom1LIPD Atom2Refer<-Atom2LIPD Atom3Refer<-Atom3LIPD } if (EOM==1) { AnnivDates<-as.Date(timeLastDayInMonth(AnnivDates)) } else { AnnivDates_A<-as.Date(ISOdatetime(t(atoms(as.timeDate(AnnivDates))[1]),t(atoms(as.timeDate(AnnivDates))[2]),Atom3Refer,12,0,0)) if (length(which(is.na(AnnivDates_A)))!=0) { # Assuming %d of LIPD as the %d of all AnnivDates produced NAs. NAs are substituted by the respective last day in month. nas<-which(is.na(AnnivDates_A)) AnnivDates_B<-as.Date(timeLastDayInMonth(AnnivDates[nas])) AnnivDates<-sort(na.omit(append(AnnivDates_A,AnnivDates_B))) } else { AnnivDates<-AnnivDates_A } } AnnivDates<-sort(AnnivDates[!duplicated(AnnivDates)]) # creating the anniversary date preceding Em if (AnnivDates[1]!=Em) { AtomVector_AD1<-as.numeric(unlist(strsplit(as.character(AnnivDates[1]),split = "-"))) Atom1AD1<-AtomVector_AD1[1] Atom2AD1<-AtomVector_AD1[2] Atom3AD1<-AtomVector_AD1[3] PrevDate<-as.numeric(CppPrevDate(c(Atom1AD1,Atom2AD1,Atom3AD1,Atom1Em,Atom2Em,Atom3Em,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) if (any(is.na(PrevDate))) { PrevDate<-as.Date(NA) } else { PrevDate<-as.Date(paste(PrevDate[1],PrevDate[2],PrevDate[3],sep="-")) } AnnivDates<-c(PrevDate,AnnivDates) AnnivDates<-sort(na.omit(AnnivDates[!duplicated(AnnivDates)])) } # creating the anniversary date succeeding Mat if (AnnivDates[length(AnnivDates)]!=Mat) { AtomVector_ADfin<-as.numeric(unlist(strsplit(as.character(AnnivDates[length(AnnivDates)]),split = "-"))) Atom1ADfin<-AtomVector_ADfin[1] Atom2ADfin<-AtomVector_ADfin[2] Atom3ADfin<-AtomVector_ADfin[3] SuccDate<-as.numeric(CppSuccDate(c(Atom1ADfin,Atom2ADfin,Atom3ADfin,Atom1Mat,Atom2Mat,Atom3Mat,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) if (any(is.na(SuccDate))) { SuccDate<-as.Date(NA) } else { SuccDate<-as.Date(paste(SuccDate[1],SuccDate[2],SuccDate[3],sep="-")) } AnnivDates<-c(AnnivDates,SuccDate) AnnivDates<-sort(na.omit(AnnivDates[!duplicated(AnnivDates)])) # final AnnivDates created! } AD_indexes<-c(1:length(AnnivDates))-(which(AnnivDates==FIPD)-1) AD_List<-list(AnnivDates,AD_indexes) index_FIPD<-AD_List[[2]][which(AD_List[[1]]==FIPD)] index_LIPD<-AD_List[[2]][which(AD_List[[1]]==LIPD)] ### ### now creating the vectors RealDates and CoupDates ### # # AnnivDates has at least 3 elements # AD1 can be Em, AD_Fin can be Mat, AD_2 can be FIPD and LIPD # if (FIPD==LIPD) { # Dealing with the case Em < FIPD = LIPD < Mat CoupDates<-c(FIPD,Mat) RealDates<-c(Em,FIPD,Mat) } else { # Dealing with the case Em < FIPD < LIPD < Mat AD_in_RD<-as.numeric(which((!(AnnivDates<FIPD))&(!(AnnivDates>LIPD)))) if (length(length(AD_in_RD))==0) { RealDates<-c(Em,Mat) } else { RealDates<-c(Em,AnnivDates[AD_in_RD],Mat) } RealDates<-RealDates[!duplicated(RealDates)] CoupDates<-RealDates[-1] } } } ### ### now using the function DIST to find E, M, f and l ### AtomVector_AD1<-as.numeric(unlist(strsplit(as.character(AnnivDates[1]),split = "-"))) Atom1AD1<-AtomVector_AD1[1] Atom2AD1<-AtomVector_AD1[2] Atom3AD1<-AtomVector_AD1[3] AtomVector_AD2<-as.numeric(unlist(strsplit(as.character(AnnivDates[2]),split = "-"))) Atom1AD2<-AtomVector_AD2[1] Atom2AD2<-AtomVector_AD2[2] Atom3AD2<-AtomVector_AD2[3] AtomVector_AD_PreFin<-as.numeric(unlist(strsplit(as.character(AnnivDates[length(AnnivDates)-1]),split = "-"))) Atom1AD_PreFin<-AtomVector_AD_PreFin[1] Atom2AD_PreFin<-AtomVector_AD_PreFin[2] Atom3AD_PreFin<-AtomVector_AD_PreFin[3] AtomVector_AD_Fin<-as.numeric(unlist(strsplit(as.character(AnnivDates[length(AnnivDates)]),split = "-"))) Atom1AD_Fin<-AtomVector_AD_Fin[1] Atom2AD_Fin<-AtomVector_AD_Fin[2] Atom3AD_Fin<-AtomVector_AD_Fin[3] if (is.element(DCC,c(1,3,5,6,8,10,11,12,15))) { Em_Num<-DIST(c(DCC,Atom1AD1,Atom2AD1,Atom3AD1,Atom1Em,Atom2Em,Atom3Em))[2] Em_Den<-DIST(c(DCC,Atom1AD1,Atom2AD1,Atom3AD1,Atom1AD2,Atom2AD2,Atom3AD2))[2] Mat_Num<-DIST(c(DCC,Atom1AD_PreFin,Atom2AD_PreFin,Atom3AD_PreFin,Atom1Mat,Atom2Mat,Atom3Mat))[2] Mat_Den<-DIST(c(DCC,Atom1AD_PreFin,Atom2AD_PreFin,Atom3AD_PreFin,Atom1AD_Fin,Atom2AD_Fin,Atom3AD_Fin))[2] } if (DCC==2|DCC==14) { OrigDCC<-DCC DCC<-2 Em_Num<-DIST(c(DCC,Atom1AD1,Atom2AD1,Atom3AD1,Atom1AD1,Atom2AD1,Atom3AD1,Atom1AD2,Atom2AD2,Atom3AD2, Atom1AD1,Atom2AD1,Atom3AD1,Atom1Em,Atom2Em,Atom3Em,Atom1AD2,Atom2AD2,Atom3AD2, AD_indexes[1],AD_indexes[2],CpY))[2] Em_Den<-DIST(c(DCC,Atom1AD1,Atom2AD1,Atom3AD1,Atom1AD1,Atom2AD1,Atom3AD1,Atom1AD2,Atom2AD2,Atom3AD2, Atom1AD2,Atom2AD2,Atom3AD2,Atom1AD2,Atom2AD2,Atom3AD2,Atom1AD2,Atom2AD2,(Atom3AD2+1), AD_indexes[2],AD_indexes[2],CpY))[2] # Note: the 2nd line in Em_Den<-DIST(c(... should correctly be: # ...,Atom1AD2,Atom2AD2,Atom3AD2,Atom1AD2,Atom2AD2,Atom3AD2,Atom1AD3,Atom2AD3,Atom3AD3,... # but since Em_Den calculates the distance between two coupon dates, the second summand in # DIST_2(PCD(t_E,AD),NCD(t_E,AD)) is 0 anyway Mat_Num<-DIST(c(DCC,Atom1AD_PreFin,Atom2AD_PreFin,Atom3AD_PreFin,Atom1AD_PreFin,Atom2AD_PreFin,Atom3AD_PreFin,Atom1AD_Fin,Atom2AD_Fin,Atom3AD_Fin, Atom1AD_PreFin,Atom2AD_PreFin,Atom3AD_PreFin,Atom1Mat,Atom2Mat,Atom3Mat,Atom1AD_Fin,Atom2AD_Fin,Atom3AD_Fin, AD_indexes[length(AD_indexes)-1],AD_indexes[length(AD_indexes)],CpY))[2] Mat_Den<-DIST(c(DCC,Atom1AD_PreFin,Atom2AD_PreFin,Atom3AD_PreFin,Atom1AD_PreFin,Atom2AD_PreFin,Atom3AD_PreFin,Atom1AD_Fin,Atom2AD_Fin,Atom3AD_Fin, Atom1AD_Fin,Atom2AD_Fin,Atom3AD_Fin,Atom1AD_Fin,Atom2AD_Fin,Atom3AD_Fin,Atom1AD_Fin,Atom2AD_Fin,(Atom3AD_Fin+1), AD_indexes[length(AD_indexes)],AD_indexes[length(AD_indexes)],CpY))[2] # Note: equivalently to the note above here strictly speaking one should consider # the coupon anniversary date after AD_Fin in the 2nd line DCC<-OrigDCC } if (DCC==4) { Em_Num<-DIST(c(DCC,Atom1AD1,Atom2AD1,Atom3AD1,Atom1Em,Atom2Em,Atom3Em,Atom1AD2,CpY))[2] Em_Den<-DIST(c(DCC,Atom1AD1,Atom2AD1,Atom3AD1,Atom1AD2,Atom2AD2,Atom3AD2,Atom1AD2,CpY))[2] Mat_Num<-DIST(c(DCC,Atom1AD_PreFin,Atom2AD_PreFin,Atom3AD_PreFin,Atom1Mat,Atom2Mat,Atom3Mat,Atom1AD_Fin,CpY))[2] Mat_Den<-DIST(c(DCC,Atom1AD_PreFin,Atom2AD_PreFin,Atom3AD_PreFin,Atom1AD_Fin,Atom2AD_Fin,Atom3AD_Fin,Atom1AD_Fin,CpY))[2] } if (DCC==7) { Em_Num<-DIST(c(DCC,Atom1AD1,Atom2AD1,Atom3AD1,Atom1Em,Atom2Em,Atom3Em,Atom1Mat,Atom2Mat,Atom3Mat))[2] Em_Den<-DIST(c(DCC,Atom1AD1,Atom2AD1,Atom3AD1,Atom1AD2,Atom2AD2,Atom3AD2,Atom1Mat,Atom2Mat,Atom3Mat))[2] Mat_Num<-DIST(c(DCC,Atom1AD_PreFin,Atom2AD_PreFin,Atom3AD_PreFin,Atom1Mat,Atom2Mat,Atom3Mat,Atom1Mat,Atom2Mat,Atom3Mat))[2] Mat_Den<-DIST(c(DCC,Atom1AD_PreFin,Atom2AD_PreFin,Atom3AD_PreFin,Atom1AD_Fin,Atom2AD_Fin,Atom3AD_Fin,Atom1Mat,Atom2Mat,Atom3Mat))[2] } if (is.element(DCC,c(9,13))) { Em_Num<-DIST(c(DCC,Atom1AD1,Atom2AD1,Atom3AD1,Atom1Em,Atom2Em,Atom3Em,EOM))[2] Em_Den<-DIST(c(DCC,Atom1AD1,Atom2AD1,Atom3AD1,Atom1AD2,Atom2AD2,Atom3AD2,EOM))[2] Mat_Num<-DIST(c(DCC,Atom1AD_PreFin,Atom2AD_PreFin,Atom3AD_PreFin,Atom1Mat,Atom2Mat,Atom3Mat,EOM))[2] Mat_Den<-DIST(c(DCC,Atom1AD_PreFin,Atom2AD_PreFin,Atom3AD_PreFin,Atom1AD_Fin,Atom2AD_Fin,Atom3AD_Fin,EOM))[2] } if (DCC==16) { NonBus.AD1.Em<-length(which((NonBusDays.Brazil$Date>=AnnivDates[1])&(NonBusDays.Brazil$Date<Em))) NonBus.AD1.AD2<-length(which((NonBusDays.Brazil$Date>=AnnivDates[1])&(NonBusDays.Brazil$Date<AnnivDates[2]))) NonBus.ADPreFin.Mat<-length(which((NonBusDays.Brazil$Date>=AnnivDates[length(AnnivDates)-1])&(NonBusDays.Brazil$Date<Mat))) NonBus.ADPreFin.ADFin<-length(which((NonBusDays.Brazil$Date>=AnnivDates[length(AnnivDates)-1])&(NonBusDays.Brazil$Date<AnnivDates[length(AnnivDates)]))) Em_Num<-DIST(c(DCC,Atom1AD1,Atom2AD1,Atom3AD1,Atom1Em,Atom2Em,Atom3Em,NonBus.AD1.Em))[2] Em_Den<-DIST(c(DCC,Atom1AD1,Atom2AD1,Atom3AD1,Atom1AD2,Atom2AD2,Atom3AD2,NonBus.AD1.AD2))[2] Mat_Num<-DIST(c(DCC,Atom1AD_PreFin,Atom2AD_PreFin,Atom3AD_PreFin,Atom1Mat,Atom2Mat,Atom3Mat,NonBus.ADPreFin.Mat))[2] Mat_Den<-DIST(c(DCC,Atom1AD_PreFin,Atom2AD_PreFin,Atom3AD_PreFin,Atom1AD_Fin,Atom2AD_Fin,Atom3AD_Fin,NonBus.ADPreFin.ADFin))[2] } E<-Em_Num/Em_Den+AD_indexes[1] M<-Mat_Num/Mat_Den+AD_indexes[length(AD_indexes)-1] f<-1-E l<-M-index_LIPD if (index_LIPD>=1) { CD_indexes<-c(c(seq(1,index_LIPD,by=1)),M) } else { CD_indexes<-M } RD_indexes<-c(E,CD_indexes) RD_indexes<-sort(na.omit(RD_indexes[!duplicated(RD_indexes)])) Refer<-as.Date(paste(Atom1Refer,Atom2Refer,Atom3Refer,sep="-")) # # The following code evaluates the lengths of the first and the final coupon # periods f and l to determine FCPType and LCPType: # character string FCPType: indicates the type of the first coupon period # character string LCPType: indicates the type of the last coupon period if (round(f,3)>1) { FCPType<-"long" } else { if (round(f,3)<1) { FCPType<-"short" } else { FCPType<-"regular" } } if (round(l,3)>1) { LCPType<-"long" } else { if (round(l,3)<1) { LCPType<-"short" } else { LCPType<-"regular" } } if (Em_Mat_SameMY==1) { LCPType<-FCPType l<-f } # for DCC = {1,3,5,6,8,10,11,12,15} x is a NumericMatrix with 9 columns, # each row containing the vector: # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2) with # Y1-M1-D1 = t_a ; Y2-M2-D2 = t_b # # for DCC = {2,14} x is a NumericMatrix with 24 columns, # each row containing the vector: # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,Y3,M3,D3,Y4,M4,D4,Y5,M5,D5,Y6,M6,D6,P,N,CpY) with # Y1-M1-D1 = PCD(t_a,AD) ; Y2-M2-D2 = t_a ; Y3-M3-D3 = NCD(t_a,AD) # Y4-M4-D4 = PCD(t_b,AD) ; Y5-M5-D5 = t_b ; Y6-M6-D6 = NCD(t_b,AD) # P = P(t_b,AD) ; N = N(t_a,AD) # # for DCC = 4 x is a NumericMatrix with 11 columns, # each row containing the vector: # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,Y3,CpY) with # Y1-M1-D1 = t_a ; Y2-M2-D2 = t_b ; Y3-M3-D3 = t_c # # for DCC = 7 x is a NumericMatrix with 12 columns, # each row containing the vector: # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,Y3,M3,D3) with # Y1-M1-D1 = t_a ; Y2-M2-D2 = t_b ; Y3-M3-D3 = t_M # # for DCC = {9,13} x is a NumericMatrix with 10 columns, # each row containing the vector: # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,EOM) with # Y1-M1-D1 = t_a ; Y2-M2-D2 = t_b # # for DCC = 16 x is a NumericMatrix with 10 columns, # each row containing the vector: # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,NonBus) with # Y1-M1-D1 = t_a ; Y2-M2-D2 = t_b # NonBus = # of non-business days between t_a (incl) and t_b (excl) # if (!(is.na(Coup))) { if (is.element(DCC,c(1,3,5,6,8,10,11,12,15))) { RVVec<-rep(RV,length(RealDates)-1) CoupVec<-rep((Coup/100),length(RealDates)-1) DCCVec<-rep(DCC,length(RealDates)-1) D1<-RealDates[-length(RealDates)] D2<-RealDates[-1] D1Atoms<-as.numeric(unlist(strsplit(as.character(D1),split="-"))) D2Atoms<-as.numeric(unlist(strsplit(as.character(D2),split="-"))) D1Matrix<-matrix(t(D1Atoms),nrow=length(RealDates)-1,ncol=3,byrow=TRUE) D2Matrix<-matrix(t(D2Atoms),nrow=length(RealDates)-1,ncol=3,byrow=TRUE) CoupSchedMatrix<-cbind(RVVec,CoupVec,DCCVec,D1Matrix,D2Matrix) CoupPayments<-PayCalc(CoupSchedMatrix) } if (DCC==16) { NonBus.Em.CD1<-length(which((NonBusDays.Brazil$Date>=Em)&(NonBusDays.Brazil$Date<CoupDates[1]))) AtomVector_CD1<-as.numeric(unlist(strsplit(as.character(CoupDates[1]),split = "-"))) Atom1CD1<-AtomVector_CD1[1] Atom2CD1<-AtomVector_CD1[2] Atom3CD1<-AtomVector_CD1[3] Exp.FirstCoup<-DIST(c(DCC,Atom1Em,Atom2Em,Atom3Em,Atom1CD1,Atom2CD1,Atom3CD1,NonBus.Em.CD1))[2] FirstCoup<-RV*(((1+Coup/100)^(Exp.FirstCoup))-1) if (length(RealDates)==2) { CoupPayments<-FirstCoup } else { RegCoup<-RV*(((1+Coup/100)^(1/CpY))-1) NonBus.CD_PreFin.Mat<-length(which((NonBusDays.Brazil$Date>=CoupDates[length(CoupDates)-1])&(NonBusDays.Brazil$Date<Mat))) AtomVector_CD_PreFin<-as.numeric(unlist(strsplit(as.character(CoupDates[length(CoupDates)-1]),split = "-"))) Atom1CD_PreFin<-AtomVector_CD_PreFin[1] Atom2CD_PreFin<-AtomVector_CD_PreFin[2] Atom3CD_PreFin<-AtomVector_CD_PreFin[3] Exp.FinalCoup<-DIST(c(DCC,Atom1CD_PreFin,Atom2CD_PreFin,Atom3CD_PreFin,Atom1Mat,Atom2Mat,Atom3Mat,NonBus.CD_PreFin.Mat))[2] FinalCoup<-RV*(((1+Coup/100)^(Exp.FinalCoup))-1) CoupPayments<-c(FirstCoup,rep(RegCoup,(length(CoupDates)-2)),FinalCoup) } } if (DCC==2|DCC==14) { # creating the anniversary date preceding AD1 AtomVector_AD1<-as.numeric(unlist(strsplit(as.character(AnnivDates[1]),split = "-"))) Atom1AD1<-AtomVector_AD1[1] Atom2AD1<-AtomVector_AD1[2] Atom3AD1<-AtomVector_AD1[3] PrevDate<-as.numeric(CppPrevDate(c(Atom1AD1,Atom2AD1,Atom3AD1,Atom1AD1,Atom2AD1,Atom3AD1,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) PrevDate<-as.Date(paste(PrevDate[1],PrevDate[2],PrevDate[3],sep="-")) AnnivDates_PaySched<-c(PrevDate,AnnivDates) AnnivDates_PaySched<-sort(na.omit(AnnivDates_PaySched[!duplicated(AnnivDates_PaySched)])) # creating the anniversary date succeeding ADfin AtomVector_ADfin<-as.numeric(unlist(strsplit(as.character(AnnivDates[length(AnnivDates)]),split = "-"))) Atom1ADfin<-AtomVector_ADfin[1] Atom2ADfin<-AtomVector_ADfin[2] Atom3ADfin<-AtomVector_ADfin[3] SuccDate<-as.numeric(CppSuccDate(c(Atom1ADfin,Atom2ADfin,Atom3ADfin,Atom1ADfin,Atom2ADfin,Atom3ADfin,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) SuccDate<-as.Date(paste(SuccDate[1],SuccDate[2],SuccDate[3],sep="-")) AnnivDates_PaySched<-c(AnnivDates_PaySched,SuccDate) AnnivDates_PaySched<-sort(na.omit(AnnivDates_PaySched[!duplicated(AnnivDates_PaySched)])) # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,Y3,M3,D3,Y4,M4,D4,Y5,M5,D5,Y6,M6,D6,P,N,CpY) RVVec<-rep(RV,length(RealDates)-1) CoupVec<-rep((Coup/100),length(RealDates)-1) DCCVec<-rep(DCC,length(RealDates)-1) D1<-RealDates[-length(RealDates)] PrevDates_D1<-as.Date(unlist(lapply(D1,PCD,AnnivDates_PaySched)),DateOrigin) SuccDates_D1<-as.Date(unlist(lapply(D1,NCD,AnnivDates_PaySched)),DateOrigin) D2<-RealDates[-1] PrevDates_D2<-as.Date(unlist(lapply(D2,PCD,AnnivDates_PaySched)),DateOrigin) SuccDates_D2<-as.Date(unlist(lapply(D2,NCD,AnnivDates_PaySched)),DateOrigin) P_D2<-rep(AD_List[[2]],length(PrevDates_D2))[which(unlist(lapply(PrevDates_D2, `==`, AD_List[[1]]))==TRUE)] N_D1<-rep(AD_List[[2]],length(SuccDates_D1))[which(unlist(lapply(SuccDates_D1, `==`, AD_List[[1]]))==TRUE)] CpYVec<-rep(CpY,length(RealDates)-1) # extracting atoms from dates PrevDates_D1_Atoms<-as.numeric(unlist(strsplit(as.character(PrevDates_D1),split="-"))) D1Atoms<-as.numeric(unlist(strsplit(as.character(D1),split="-"))) SuccDates_D1_Atoms<-as.numeric(unlist(strsplit(as.character(SuccDates_D1),split="-"))) PrevDates_D2_Atoms<-as.numeric(unlist(strsplit(as.character(PrevDates_D2),split="-"))) D2Atoms<-as.numeric(unlist(strsplit(as.character(D2),split="-"))) SuccDates_D2_Atoms<-as.numeric(unlist(strsplit(as.character(SuccDates_D2),split="-"))) # constructing matrices of the date atoms PrevDates_D1_Matrix<-matrix(t(PrevDates_D1_Atoms),nrow=length(RealDates)-1,ncol=3,byrow=TRUE) D1Matrix<-matrix(t(D1Atoms),nrow=length(RealDates)-1,ncol=3,byrow=TRUE) SuccDates_D1_Matrix<-matrix(t(SuccDates_D1_Atoms),nrow=length(RealDates)-1,ncol=3,byrow=TRUE) PrevDates_D2_Matrix<-matrix(t(PrevDates_D2_Atoms),nrow=length(RealDates)-1,ncol=3,byrow=TRUE) D2Matrix<-matrix(t(D2Atoms),nrow=length(RealDates)-1,ncol=3,byrow=TRUE) SuccDates_D2_Matrix<-matrix(t(SuccDates_D2_Atoms),nrow=length(RealDates)-1,ncol=3,byrow=TRUE) # binding everything into one matrix CoupSchedMatrix<-cbind(RVVec,CoupVec,DCCVec,PrevDates_D1_Matrix,D1Matrix,SuccDates_D1_Matrix, PrevDates_D2_Matrix,D2Matrix,SuccDates_D2_Matrix,P_D2,N_D1,CpYVec) CoupPayments<-PayCalc(CoupSchedMatrix) } if (DCC==4) { # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,Y3,CpY) RVVec<-rep(RV,length(RealDates)-1) CoupVec<-rep((Coup/100),length(RealDates)-1) DCCVec<-rep(DCC,length(RealDates)-1) D1<-RealDates[-length(RealDates)] D2<-RealDates[-1] D1Atoms<-as.numeric(unlist(strsplit(as.character(D1),split="-"))) D2Atoms<-as.numeric(unlist(strsplit(as.character(D2),split="-"))) D1Matrix<-matrix(t(D1Atoms),nrow=length(RealDates)-1,ncol=3,byrow=TRUE) D2Matrix<-matrix(t(D2Atoms),nrow=length(RealDates)-1,ncol=3,byrow=TRUE) Y3Vector<-D2Matrix[,1] CpYVec<-rep(CpY,length(RealDates)-1) CoupSchedMatrix<-cbind(RVVec,CoupVec,DCCVec,D1Matrix,D2Matrix,Y3Vector,CpYVec) CoupPayments<-PayCalc(CoupSchedMatrix) } if (DCC==7) { # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,Y3,M3,D3) RVVec<-rep(RV,length(RealDates)-1) CoupVec<-rep((Coup/100),length(RealDates)-1) DCCVec<-rep(DCC,length(RealDates)-1) D1<-RealDates[-length(RealDates)] D2<-RealDates[-1] MatVec<-rep(Mat,length(RealDates)-1) D1Atoms<-as.numeric(unlist(strsplit(as.character(D1),split="-"))) D2Atoms<-as.numeric(unlist(strsplit(as.character(D2),split="-"))) MatAtoms<-as.numeric(unlist(strsplit(as.character(MatVec),split="-"))) D1Matrix<-matrix(t(D1Atoms),nrow=length(RealDates)-1,ncol=3,byrow=TRUE) D2Matrix<-matrix(t(D2Atoms),nrow=length(RealDates)-1,ncol=3,byrow=TRUE) MatMatrix<-matrix(t(MatAtoms),nrow=length(RealDates)-1,ncol=3,byrow=TRUE) CoupSchedMatrix<-cbind(RVVec,CoupVec,DCCVec,D1Matrix,D2Matrix,MatMatrix) CoupPayments<-PayCalc(CoupSchedMatrix) } if (is.element(DCC,c(9,13))) { # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,EOM) with RVVec<-rep(RV,length(RealDates)-1) CoupVec<-rep((Coup/100),length(RealDates)-1) DCCVec<-rep(DCC,length(RealDates)-1) D1<-RealDates[-length(RealDates)] D2<-RealDates[-1] EOMVec<-rep(EOM,length(RealDates)-1) D1Atoms<-as.numeric(unlist(strsplit(as.character(D1),split="-"))) D2Atoms<-as.numeric(unlist(strsplit(as.character(D2),split="-"))) EOMAtoms<-as.numeric(unlist(strsplit(as.character(EOMVec),split="-"))) D1Matrix<-matrix(t(D1Atoms),nrow=length(RealDates)-1,ncol=3,byrow=TRUE) D2Matrix<-matrix(t(D2Atoms),nrow=length(RealDates)-1,ncol=3,byrow=TRUE) EOMMatrix<-matrix(t(EOMAtoms),nrow=length(RealDates)-1,ncol=3,byrow=TRUE) CoupSchedMatrix<-cbind(RVVec,CoupVec,DCCVec,D1Matrix,D2Matrix,EOMMatrix) CoupPayments<-PayCalc(CoupSchedMatrix) } if ((!(RegCF.equal==0))&(!(DCC==16))) { if (length(CoupPayments)>2) { CoupPayments<- c(CoupPayments[1],rep(RV*(Coup/(100*CpY)),(length(CoupPayments)-2)),CoupPayments[length(CoupPayments)]) PaySched<-data.frame(CoupDates,CoupPayments) } else { PaySched<-data.frame(CoupDates,CoupPayments) } } else { PaySched<-data.frame(CoupDates,CoupPayments) } } } } } Warnings<-data.frame(Em_FIAD_differ=Em_FIAD_differ,EmMatMissing=EmMatMissing,CpYOverride=CpYOverride,RV_set100percent=RV_set100percent, NegLifeFlag=NegLifeFlag,ZeroFlag=ZeroFlag,Em_Mat_SameMY=Em_Mat_SameMY,ChronErrorFlag=ChronErrorFlag, FIPD_LIPD_equal=FIPD_LIPD_equal,IPD_CpY_Corrupt=IPD_CpY_Corrupt,EOM_Deviation=EOM_Deviation,EOMOverride=EOMOverride, DCCOverride=DCCOverride,NoCoups=NoCoups) Traits<-data.frame(DateOrigin=DateOrigin,CpY=CpY,FIAD=FIAD,Em=Em,Em_Orig=Em_Orig,FIPD=FIPD,FIPD_Orig=FIPD_Orig,est_FIPD=est_FIPD,LIPD=LIPD, LIPD_Orig=LIPD_Orig,est_LIPD=est_LIPD,Mat=Mat,Refer=Refer,FCPType=FCPType,FCPLength=f,LCPType=LCPType,LCPLength=l,Par=RV, CouponInPercent.p.a=Coup,DayCountConvention=DCC,EOM_Orig=EOM_Orig,est_EOM=est_EOM,EOM_used=EOM) RealDates<-append(RealDates,rep(NA,max(length(RealDates),length(CoupDates),length(AnnivDates))-length(RealDates))) RD_indexes<-append(RD_indexes,rep(NA,max(length(RealDates),length(CoupDates),length(AnnivDates))-length(RD_indexes))) CoupDates<-append(CoupDates,rep(NA,max(length(RealDates),length(CoupDates),length(AnnivDates))-length(CoupDates))) CD_indexes<-append(CD_indexes,rep(NA,max(length(RealDates),length(CoupDates),length(AnnivDates))-length(CD_indexes))) AnnivDates<-append(AnnivDates,rep(NA,max(length(RealDates),length(CoupDates),length(AnnivDates))-length(AnnivDates))) AD_indexes<-append(AD_indexes,rep(NA,max(length(RealDates),length(CoupDates),length(AnnivDates))-length(AnnivDates))) DateVectors<-data.frame(RealDates=RealDates,RD_indexes=RD_indexes,CoupDates=CoupDates,CD_indexes=CD_indexes,AnnivDates=AnnivDates,AD_indexes=AD_indexes) if (all(is.na(PaySched))) { AnnivDates_Out<-list(Warnings=Warnings,Traits=Traits,DateVectors=DateVectors) } else { AnnivDates_Out<-list(Warnings=Warnings,Traits=Traits,DateVectors=DateVectors,PaySched=PaySched) } return(AnnivDates_Out) }
/scratch/gouwar.j/cran-all/cranData/BondValuation/R/AnnivDates.R
#' BondVal.Price (calculation of CP, AccrInt, DP, ModDUR, MacDUR and Conv) #' #' \bold{BondVal.Price} computes a bond's clean price given its yield. #' #' The function \bold{BondVal.Price} uses the function \bold{AnnivDates} to analyze the bond #' and computes the clean price, the accrued interest, the dirty price and the sensitivity #' measures modified duration (ModDUR), MacAulay duration (MacDUR) and convexity according #' to the methodology presented in Djatschenko (2018). #' #' @param YtM The bond's yield to maturity p.a. on \code{SETT}. (required) #' @param SETT The settlement date. Date class object with format "\%Y-\%m-\%d". (required) #' @param Em The bond's issue date. Date class object with format "\%Y-\%m-\%d". (required) #' @param Mat So-called "maturity date" i.e. date on which the redemption value and the final interest #' are paid. Date class object with format "\%Y-\%m-\%d". (required) #' @param CpY Number of interest payments per year (non-negative integer; element of the set #' \{0,1,2,3,4,6,12\}. Default: 2. #' @param FIPD First interest payment date after \code{Em}. Date class object with format "\%Y-\%m-\%d". Default: \code{NA}. #' @param LIPD Last interest payment date before \code{Mat}. Date class object with format "\%Y-\%m-\%d". Default: \code{NA}. #' @param FIAD Date on which the interest accrual starts (so-called "dated date"). Date class object with format "\%Y-\%m-\%d". Default: \code{NA}. #' @param RV The redemption value of the bond. Default: \code{100}. #' @param Coup Nominal interest rate per year in percent. Default: \code{NA}. #' @param DCC The day count convention the bond follows. Default: \code{NA}. #' For a list of day count conventions currently implemented type \code{View(List.DCC)}. #' @param EOM Boolean indicating whether the bond follows the End-of-Month rule. Default: \code{NA}. #' @param DateOrigin Determines the starting point for the daycount in "Date" objects. #' Default: "1970-01-01". #' @param InputCheck If 1, the input variables are checked for the correct format. Default: 1. #' @param FindEOM If \code{TRUE}, \code{EOM} is overridden by the value inferred from the data. #' Default: \code{FALSE}. #' @param RegCF.equal If 0, the amounts of regular cash flows are calculated according to the #' stipulated \code{DCC}. Any other value forces all regular cash flows to be equal sized. #' Default: 0. #' @param SimpleLastPeriod Specifies the interest calculation method in the final coupon period. Default: \code{TRUE}. #' @param Calc.Method If 1, discount powers are computed with the same DCC as accrued interest. #' If 0, discount powers are computed with DCC=2. Default: 1. #' @param AnnivDatesOutput A list containing the output of the function AnnivDates. Default: \code{NA}. #' #' @return #' \describe{ #' \item{CP}{The bond's clean price.} #' \item{AccrInt}{The amount of accrued interest.} #' \item{DP}{The bond's dirty price.} #' \item{ytm.p.a.}{Annualized yield to maturity.} #' \item{ModDUR.inYears}{Modified duration in years.} #' \item{MacDUR.inYears}{MacAulay duration in years.} #' \item{Conv.inYears}{Convexity in years.} #' \item{ModDUR.inPeriods}{Modified duration in periods.} #' \item{MacDUR.inPeriods}{MacAulay duration in periods.} #' \item{Conv.inPeriods}{Convexity in periods.} #' \item{tau}{Relative Position of the settlement date in regular periods.} #' } #' #' @references #' \enumerate{ #' \item{Djatschenko, Wadim, The Nitty Gritty of Bond Valuation: A Generalized Methodology for Fixed Coupon Bond Analysis Allowing for Irregular Periods and Various Day Count Conventions (November 5, 2018). Available at SSRN: https://ssrn.com/abstract=3205167.} #' } #' #' @examples #' data(PanelSomeBonds2016) #' randombond<-sample(c(1:length(which(!(duplicated(PanelSomeBonds2016$ID.No))))),1) #' df.randombond<-PanelSomeBonds2016[which(PanelSomeBonds2016$ID.No==randombond),] #' #' PreAnalysis.randombond<-suppressWarnings(AnnivDates( #' unlist(df.randombond[ #' 1,c('Issue.Date','Mat.Date','CpY.Input','FIPD.Input','LIPD.Input', #' 'FIAD.Input','RV.Input','Coup.Input','DCC.Input','EOM.Input')], #' use.names=FALSE))) #' #' system.time( #' for (i in c(1:nrow(df.randombond))) { #' BondVal.Price.Output<-suppressWarnings(BondVal.Price( #' unlist( #' df.randombond[ #' i,c('YtM.Input','TradeDate','Issue.Date','Mat.Date','CpY.Input', #' 'FIPD.Input','LIPD.Input','FIAD.Input','RV.Input','Coup.Input', #' 'DCC.Input','EOM.Input')],use.names=FALSE), #' AnnivDatesOutput=PreAnalysis.randombond)) #' df.randombond$CP.Out[i]<-BondVal.Price.Output$CP #' } #' ) #' plot(seq(1,nrow(df.randombond),by=1),df.randombond$CP.Out,"l") #' #' @export #' BondVal.Price<-function(YtM=as.numeric(NA),SETT=as.Date(NA),Em=as.Date(NA),Mat=as.Date(NA),CpY=as.numeric(NA),FIPD=as.Date(NA),LIPD=as.Date(NA),FIAD=as.Date(NA),RV=as.numeric(NA),Coup=as.numeric(NA),DCC=as.numeric(NA),EOM=as.numeric(NA),DateOrigin=as.Date("1970-01-01"),InputCheck=1,FindEOM=FALSE,RegCF.equal=0,SimpleLastPeriod=TRUE,Calc.Method=1,AnnivDatesOutput=as.list(NA)) { if (length(YtM)>1) { arglist<-YtM argnames<-c("YtM","SETT","Em","Mat","CpY","FIPD","LIPD","FIAD","RV","Coup","DCC","EOM","DateOrigin","InputCheck","FindEOM","RegCF.equal","SimpleLastPeriod","Calc.Method") for (i in c(1:length(arglist))) { assign(argnames[i],arglist[i]) } } if (InputCheck==1) { CheckedInput<-InputFormatCheck(YtM=YtM,SETT=SETT,Em=Em,Mat=Mat,CpY=CpY,FIPD=FIPD,LIPD=LIPD,FIAD=FIAD,RV=RV,Coup=Coup,DCC=DCC,EOM=EOM,DateOrigin=DateOrigin) YtM<-CheckedInput$YtM SETT<-CheckedInput$SETT Em<-CheckedInput$Em Mat<-CheckedInput$Mat CpY<-CheckedInput$CpY FIPD<-CheckedInput$FIPD LIPD<-CheckedInput$LIPD FIAD<-CheckedInput$FIAD RV<-CheckedInput$RV Coup<-CheckedInput$Coup DCC<-CheckedInput$DCC EOM<-CheckedInput$EOM DateOrigin<-CheckedInput$DateOrigin } DP<-as.numeric(NA) CP<-as.numeric(NA) n<-as.numeric(NA) k<-as.numeric(NA) CN_tau<-as.numeric(NA) tau<-as.numeric(NA) w<-as.numeric(NA) eta<-as.numeric(NA) z<-as.numeric(NA) AccrInt<-as.numeric(NA) ModDUR.inYears<-as.numeric(NA) MacDUR.inYears<-as.numeric(NA) Conv.inYears<-as.numeric(NA) ModDUR.inPeriods<-as.numeric(NA) MacDUR.inPeriods<-as.numeric(NA) Conv.inPeriods<-as.numeric(NA) CF_final<-as.numeric(NA) CF.remain<-as.numeric(NA) DiscPowerVector<-as.numeric(NA) PriceEqn<-as.numeric(NA) if ((missing(YtM))|(is.na(YtM))) { YtM<-as.numeric(NA) warning("Yield to maturity (YtM) is missing or NA. NA created!") } else { if ((missing(SETT))|(is.na(SETT))) { SETT<-as.Date(NA) warning("Settlement date (SETT) is missing or NA. NA created!") } else { if ((SETT<Em)|(Mat<=SETT)) { warning("Settlement date (SETT) is outside bond's lifespan. NA created!") } else { if (Calc.Method==0) { RegCF.equal<-1 BondAnalysis<-suppressWarnings(AnnivDates(Em=Em,Mat=Mat,CpY=CpY,FIPD=FIPD,LIPD=LIPD,FIAD=FIAD,RV=RV,Coup=Coup,DCC=DCC,EOM=EOM,DateOrigin=DateOrigin,InputCheck=0,FindEOM=FindEOM,RegCF.equal=RegCF.equal)) } else { if (all(is.na(AnnivDatesOutput))) { BondAnalysis<-suppressWarnings(AnnivDates(Em=Em,Mat=Mat,CpY=CpY,FIPD=FIPD,LIPD=LIPD,FIAD=FIAD,RV=RV,Coup=Coup,DCC=DCC,EOM=EOM,DateOrigin=DateOrigin,InputCheck=0,FindEOM=FindEOM,RegCF.equal=RegCF.equal)) } else { BondAnalysis<-AnnivDatesOutput } } CpY<-BondAnalysis$Traits$CpY RV<-BondAnalysis$Traits$Par RealDates<-na.omit(BondAnalysis$DateVectors$RealDates) if ((SETT<RealDates[1])|(SETT>=RealDates[length(RealDates)])) { warning("Settlement date (SETT) is not between issue date (Em) and maturity date (Mat). NA created!") } else { if (is.na(BondAnalysis$Traits$FIPD)) { FIPD<-BondAnalysis$Traits$est_FIPD } else { FIPD<-BondAnalysis$Traits$FIPD } if (is.na(BondAnalysis$Traits$LIPD)) { LIPD<-BondAnalysis$Traits$est_LIPD } else { LIPD<-BondAnalysis$Traits$LIPD } DP.Output<-DP(RV,SETT,Em,Mat,CpY,FIPD,LIPD,FIAD,RV,Coup,DCC,EOM,DateOrigin,InputCheck=0,FindEOM,AnnivDatesOutput=BondAnalysis) if (BondAnalysis$Warnings$ZeroFlag==1) { Coup<-as.numeric(0) CpY<-as.numeric(1) } AccrInt<-DP.Output[[2]]$Accrued_Interest CF.values<-na.omit(BondAnalysis$PaySched$CoupPayments) CF.dates<-na.omit(BondAnalysis$PaySched$CoupDates) CF_List<-list(CF.values,CF.dates) Use.ClosedForm<-as.numeric(0) if (length(CF.values)>2) { if (length(which(!duplicated(CF.values[-c(1,length(CF.values))])==TRUE))==1) { Use.ClosedForm<-as.numeric(1) } } if (Calc.Method==0) { DCC_Orig<-DCC DCC<-2 BondAnalysis<-suppressWarnings(AnnivDates(Em=Em,Mat=Mat,CpY=CpY,FIPD=FIPD,LIPD=LIPD,FIAD=FIAD,RV=RV,Coup=Coup,DCC=DCC,EOM=EOM,DateOrigin=DateOrigin,InputCheck=0,FindEOM=FindEOM,RegCF.equal=RegCF.equal)) } # loading AnnivDates and expanding by one value to each side AD.set<-na.omit(BondAnalysis$DateVectors$AnnivDates) Refer<-BondAnalysis$Traits$Refer AtomVector_Refer<-as.numeric(unlist(strsplit(as.character(Refer),split = "-"))) Atom1Refer<-AtomVector_Refer[1] Atom2Refer<-AtomVector_Refer[2] Atom3Refer<-AtomVector_Refer[3] # creating the anniversary date preceding AD1 AtomVector_AD1<-as.numeric(unlist(strsplit(as.character(AD.set[1]),split = "-"))) Atom1AD1<-AtomVector_AD1[1] Atom2AD1<-AtomVector_AD1[2] Atom3AD1<-AtomVector_AD1[3] PrevDate<-as.numeric(CppPrevDate(c(Atom1AD1,Atom2AD1,Atom3AD1,Atom1AD1,Atom2AD1,Atom3AD1,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) PrevDate<-as.Date(paste(PrevDate[1],PrevDate[2],PrevDate[3],sep="-")) AD.set<-c(PrevDate,AD.set) AD.set<-sort(na.omit(AD.set[!duplicated(AD.set)])) # creating the anniversary date succeeding ADfin AtomVector_ADfin<-as.numeric(unlist(strsplit(as.character(AD.set[length(AD.set)]),split = "-"))) Atom1ADfin<-AtomVector_ADfin[1] Atom2ADfin<-AtomVector_ADfin[2] Atom3ADfin<-AtomVector_ADfin[3] SuccDate<-as.numeric(CppSuccDate(c(Atom1ADfin,Atom2ADfin,Atom3ADfin,Atom1ADfin,Atom2ADfin,Atom3ADfin,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) SuccDate<-as.Date(paste(SuccDate[1],SuccDate[2],SuccDate[3],sep="-")) AD.set<-c(AD.set,SuccDate) AD.set<-sort(na.omit(AD.set[!duplicated(AD.set)])) AD.indexes<-na.omit(BondAnalysis$DateVectors$AD_indexes) AD.indexes<-c((AD.indexes[1]-1),AD.indexes,(AD.indexes[length(AD.indexes)]+1)) SD.set<-na.omit(BondAnalysis$DateVectors$RealDates) SD.indexes<-na.omit(BondAnalysis$DateVectors$RD_indexes) AD_List<-list(AD.set,AD.indexes) SD_List<-list(SD.set,SD.indexes) ### calculating tau AtomVector_Mat<-as.numeric(unlist(strsplit(as.character(Mat),split = "-"))) Atom1Mat<-AtomVector_Mat[1] Atom2Mat<-AtomVector_Mat[2] Atom3Mat<-AtomVector_Mat[3] AtomVector_PCD.SETT<-as.numeric(unlist(strsplit(as.character(PCD(SETT,AD.set)),split = "-"))) Atom1_PCD.SETT<-AtomVector_PCD.SETT[1] Atom2_PCD.SETT<-AtomVector_PCD.SETT[2] Atom3_PCD.SETT<-AtomVector_PCD.SETT[3] AtomVector_SETT<-as.numeric(unlist(strsplit(as.character(SETT),split = "-"))) Atom1_SETT<-AtomVector_SETT[1] Atom2_SETT<-AtomVector_SETT[2] Atom3_SETT<-AtomVector_SETT[3] AtomVector_NCD.SETT<-as.numeric(unlist(strsplit(as.character(NCD(SETT,AD.set)),split = "-"))) Atom1_NCD.SETT<-AtomVector_NCD.SETT[1] Atom2_NCD.SETT<-AtomVector_NCD.SETT[2] Atom3_NCD.SETT<-AtomVector_NCD.SETT[3] AtomVector_NCD.NCD.SETT<-as.numeric(unlist(strsplit(as.character(NCD(NCD(SETT,AD.set),AD.set)),split = "-"))) Atom1_NCD.NCD.SETT<-AtomVector_NCD.NCD.SETT[1] Atom2_NCD.NCD.SETT<-AtomVector_NCD.NCD.SETT[2] Atom3_NCD.NCD.SETT<-AtomVector_NCD.NCD.SETT[3] if (is.element(DCC,c(1,3,5,6,8,10,11,12,15))) { tau_Num<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_SETT,Atom2_SETT,Atom3_SETT))[2] tau_Den<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT))[2] } if (DCC==16) { NonBus.PCD.SETT<-length(which((NonBusDays.Brazil$Date>=PCD(SETT,AD.set))&(NonBusDays.Brazil$Date<SETT))) NonBus.PCD.NCD<-length(which((NonBusDays.Brazil$Date>=PCD(SETT,AD.set))&(NonBusDays.Brazil$Date<NCD(SETT,AD.set)))) tau_Num<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_SETT,Atom2_SETT,Atom3_SETT,NonBus.PCD.SETT))[2] tau_Den<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT,NonBus.PCD.NCD))[2] } if (DCC==2|DCC==14) { OrigDCC<-DCC DCC<-2 tau_Num<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT, Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_SETT,Atom2_SETT,Atom3_SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT, AD_List[[2]][which(AD_List[[1]]==PCD(SETT,AD.set))], AD_List[[2]][which(AD_List[[1]]==NCD(PCD(SETT,AD.set),AD.set))], CpY))[2] tau_Den<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT, Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT,Atom1_NCD.NCD.SETT,Atom2_NCD.NCD.SETT,Atom3_NCD.NCD.SETT, AD_List[[2]][which(AD_List[[1]]==PCD(NCD(SETT,AD.set),AD.set))], AD_List[[2]][which(AD_List[[1]]==NCD(PCD(SETT,AD.set),AD.set))], CpY))[2] DCC<-OrigDCC } if (DCC==4) { tau_Num<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_SETT,Atom2_SETT,Atom3_SETT,Atom1_NCD.SETT,CpY))[2] tau_Den<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT,Atom1_NCD.NCD.SETT,CpY))[2] } if (DCC==7) { tau_Num<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_SETT,Atom2_SETT,Atom3_SETT,Atom1Mat,Atom2Mat,Atom3Mat))[2] tau_Den<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT,Atom1Mat,Atom2Mat,Atom3Mat))[2] } if (is.element(DCC,c(9,13))) { tau_Num<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_SETT,Atom2_SETT,Atom3_SETT,EOM))[2] tau_Den<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT,EOM))[2] } tau<-tau_Num/tau_Den+AD_List[[2]][which(AD_List[[1]]==PCD(SETT,AD.set))] n<-SD.indexes[length(SD.indexes)-1] l<-BondAnalysis$Traits$LCPLength if ((!(is.na(LIPD)))&(SETT<LIPD)) { k<-SD_List[[2]][which(SD_List[[1]]==NCD(SETT,SD.set))] w<-k-tau eta<-n-k z<-l CN_tau<-CF_List[[1]][which(CF_List[[2]]==NCD(SETT,SD.set))] } else { k<-n w<-SD_List[[2]][which(SD_List[[1]]==NCD(SETT,SD.set))]-tau eta<-0 z<-0 CN_tau<-0 } ###################>>>>>> <<<<<<<########################################################## ##############>>>>>> calculation of CP, ModDUR, MacDUR and Conv <<<<<<<##################################################### ###################>>>>>> <<<<<<<########################################################## CF_final<-CF.values[length(CF.values)]+RV if ((z==0)&(SimpleLastPeriod==TRUE)) { # simple interest a<-1+YtM*w/(100*CpY) DP<-CF_final/a CP<-DP-AccrInt ModDUR.inYears<-(-1)*(1/DP)*(-1)*CF_final*(a^(-2))*(w/CpY) MacDUR.inYears<-ModDUR.inYears*a Conv.inYears<-0.5*(1/DP)*2*CF_final*(a^(-3))*(w/CpY)^2 ModDUR.inPeriods<-ModDUR.inYears*CpY MacDUR.inPeriods<-MacDUR.inYears*CpY Conv.inPeriods<-Conv.inYears*(CpY^2) } else { a<-1+YtM/(100*CpY) # if (round(a,8)==1) { # CF.remain<-CF_List[[1]][which(CF_List[[2]]>SETT)] # CF.remain<-c(CF.remain[-length(CF.remain)],CF.remain[length(CF.remain)]+RV) # DP<-sum(CF.remain) # CP<-DP-AccrInt # } else { # # } if ((Use.ClosedForm==1)&(!(round(a,8)==1))) { CF<-CF.values[2] DP<-(a^(-w))*(CN_tau+CF*(((a^eta)-1)/((a^eta)*(a-1)))+CF_final/(a^(eta+z))) CP<-DP-AccrInt ModDUR.inYears<-ModDUR(a,c(1,CN_tau,CF,CF_final,w,eta,z,CpY,DP)) # ModDUR.inYears<-(-1)*(1/DP)*dm_MyPriceEqn(a,c(1,CN_tau,CF,CF_final,w,eta,z,CpY)) MacDUR.inYears<-ModDUR.inYears*a Conv.inYears<-CONV(a,c(2,CN_tau,CF,CF_final,w,eta,z,CpY,DP)) # Conv.inYears<-0.5*(1/DP)*dm_MyPriceEqn(a,c(2,CN_tau,CF,CF_final,w,eta,z,CpY)) ModDUR.inPeriods<-ModDUR.inYears*CpY MacDUR.inPeriods<-MacDUR.inYears*CpY Conv.inPeriods<-Conv.inYears*(CpY^2) } else { if (z>0) { DiscPowerVector<-c((seq(0,eta,by=1)+w),(w+eta+z)) } else { DiscPowerVector<-w } CF.remain<-CF_List[[1]][which(CF_List[[2]]>SETT)] CF.remain<-c(CF.remain[-length(CF.remain)],CF.remain[length(CF.remain)]+RV) # creating the price equation summands<-gsub(" ","",paste(CF.remain,"/(a^",DiscPowerVector,")")) PriceEqn<-paste(summands,collapse="+") PriceFunction.Standard<-function(a) { out<-eval(parse(text=PriceEqn)) return(out) } # creating the first derivative of the price function d_PowerVector<-DiscPowerVector+1 d_summands<-gsub(" ","",paste("(-1)*(",CF.remain,"*",DiscPowerVector,"/(a^",d_PowerVector,"))")) d_PriceEqn<-paste(d_summands,collapse="+") d_PriceFunction.Standard<-function(a) { out<-eval(parse(text=d_PriceEqn)) return(out) } DP<-PriceFunction.Standard(a) CP<-DP-AccrInt # calculating the Modified Duration and the MacAulay Duration ModDUR.inPeriods<-(-1)*(1/DP)*d_PriceFunction.Standard(a) MacDUR.inPeriods<-ModDUR.inPeriods*a ModDUR.inYears<-ModDUR.inPeriods/CpY MacDUR.inYears<-MacDUR.inPeriods/CpY # calculating the Convexity d2_PowerVector<-d_PowerVector+1 d2_summands<-gsub(" ","",paste("(",CF.remain,"*",DiscPowerVector,"*",d_PowerVector,")/(a^",d2_PowerVector,")")) Added.d2_summands<-paste(d2_summands,collapse="+") d2_PriceFunction.Standard <- function(a) { out<-eval(parse(text=Added.d2_summands)) return(out) } Conv.inPeriods<-(1/DP)*0.5*d2_PriceFunction.Standard(a) Conv.inYears<-Conv.inPeriods/(CpY^2) } } } } } } # out<-list(tau=tau,n=n,k=k,w=w,eta=eta,z=z,CN_tau=CN_tau,ytm.p.a.=YtM,DP=DP,CF_final=CF_final, # ModDUR.inPeriods=ModDUR.inPeriods,MacDUR.inPeriods=MacDUR.inPeriods,Conv.inPeriods=Conv.inPeriods, # ModDUR.inYears=ModDUR.inYears,MacDUR.inYears=MacDUR.inYears,Conv.inYears=Conv.inYears,FIPD=FIPD, # LIPD=LIPD) # out<-Added.d2_summands out<-list(CP=CP,AccrInt=AccrInt,DP=DP,ytm.p.a.=YtM,ModDUR.inYears=ModDUR.inYears,MacDUR.inYears=MacDUR.inYears, Conv.inYears=Conv.inYears,ModDUR.inPeriods=ModDUR.inPeriods,MacDUR.inPeriods=MacDUR.inPeriods, Conv.inPeriods=Conv.inPeriods,tau=tau) return(out) }
/scratch/gouwar.j/cran-all/cranData/BondValuation/R/BondVal.Price.R
#' BondVal.Yield (calculation of YtM, AccrInt, DP, ModDUR, MacDUR and Conv) #' #' \bold{BondVal.Yield} returns a bond's yield to maturity given its clean price. #' #' \bold{BondVal.Yield} uses the function \bold{AnnivDates} to analyze the bond #' and computes the yield to maturity, the accrued interest, the dirty price and the sensitivity #' measures modified duration (ModDUR), MacAulay duration (MacDUR) and convexity according #' to the methodology presented in Djatschenko (2018). The yield to maturity is determined #' numerically using the Newton-Raphson method. #' #' @param CP The bond's clean price on \code{SETT}. (required) #' @param SETT The settlement date. Date class object with format "\%Y-\%m-\%d". (required) #' @param Em The bond's issue date. Date class object with format "\%Y-\%m-\%d". (required) #' @param Mat So-called "maturity date" i.e. date on which the redemption value and the final interest #' are paid. Date class object with format "\%Y-\%m-\%d". (required) #' @param CpY Number of interest payments per year (non-negative integer; element of the set #' \{0,1,2,3,4,6,12\}. Default: 2. #' @param FIPD First interest payment date after \code{Em}. Date class object with format "\%Y-\%m-\%d". Default: \code{NA}. #' @param LIPD Last interest payment date before \code{Mat}. Date class object with format "\%Y-\%m-\%d". Default: \code{NA}. #' @param FIAD Date on which the interest accrual starts (so-called "dated date"). Date class object with format "\%Y-\%m-\%d". Default: \code{NA}. #' @param RV The redemption value of the bond. Default: \code{100}. #' @param Coup Nominal interest rate per year in percent. Default: \code{NA}. #' @param DCC The day count convention the bond follows. Default: \code{NA}. #' For a list of day count conventions currently implemented type \code{View(List.DCC)}. #' @param EOM Boolean indicating whether the bond follows the End-of-Month rule. Default: \code{NA}. #' @param DateOrigin Determines the starting point for the daycount in "Date" objects. #' Default: "1970-01-01". #' @param InputCheck If 1, the input variables are checked for the correct format. Default: 1. #' @param FindEOM If \code{TRUE}, \code{EOM} is overridden by the value inferred from the data. #' Default: \code{FALSE}. #' @param RegCF.equal If 0, the amounts of regular cash flows are calculated according to the #' stipulated \code{DCC}. Any other value forces all regular cash flows to be equal sized. #' Default: 0. #' @param SimpleLastPeriod Specifies the interest calculation method in the final coupon period. Default: \code{TRUE}. #' @param Precision desired precision in YtM-calculation. Default: \code{.Machine$double.eps^0.75}. #' @param Calc.Method If 1, discount powers are computed with the same DCC as accrued interest. #' If 0, discount powers are computed with DCC=2. Default: 1. #' @param AnnivDatesOutput A list containing the output of the function AnnivDates. Default: \code{NA}. #' #' @return #' \describe{ #' \item{CP}{The bond's clean price.} #' \item{AccrInt}{The amount of accrued interest.} #' \item{DP}{The bond's dirty price.} #' \item{ytm.p.a.}{Annualized yield to maturity.} #' \item{ModDUR.inYears}{Modified duration in years.} #' \item{MacDUR.inYears}{MacAulay duration in years.} #' \item{Conv.inYears}{Convexity in years.} #' \item{ModDUR.inPeriods}{Modified duration in periods.} #' \item{MacDUR.inPeriods}{MacAulay duration in periods.} #' \item{Conv.inPeriods}{Convexity in periods.} #' \item{tau}{Relative Position of the settlement date in regular periods.} #' } #' #' @references #' \enumerate{ #' \item{Djatschenko, Wadim, The Nitty Gritty of Bond Valuation: A Generalized Methodology for Fixed Coupon Bond Analysis Allowing for Irregular Periods and Various Day Count Conventions (November 5, 2018). Available at SSRN: https://ssrn.com/abstract=3205167.} #' } #' #' @examples #' data(PanelSomeBonds2016) #' randombond<-sample(c(1:length(which(!(duplicated(PanelSomeBonds2016$ID.No))))),1) #' df.randombond<-PanelSomeBonds2016[which(PanelSomeBonds2016$ID.No==randombond),] #' #' PreAnalysis.randombond<-suppressWarnings(AnnivDates( #' unlist(df.randombond[ #' 1,c('Issue.Date','Mat.Date','CpY.Input','FIPD.Input','LIPD.Input', #' 'FIAD.Input','RV.Input','Coup.Input','DCC.Input','EOM.Input')], #' use.names=FALSE))) #' #' system.time( #' for (i in c(1:nrow(df.randombond))) { #' BondVal.Yield.Output<-suppressWarnings(BondVal.Yield( #' unlist(df.randombond[i,c('CP.Input','TradeDate','Issue.Date','Mat.Date', #' 'CpY.Input','FIPD.Input','LIPD.Input','FIAD.Input','RV.Input', #' 'Coup.Input','DCC.Input','EOM.Input')],use.names=FALSE), #' AnnivDatesOutput=PreAnalysis.randombond)) #' df.randombond$YtM.Out[i]<-BondVal.Yield.Output$ytm.p.a. #' } #' ) #' plot(seq(1,nrow(df.randombond),by=1),df.randombond$YtM.Out,"l") #' #' @export #' BondVal.Yield<-function(CP=as.numeric(NA),SETT=as.Date(NA),Em=as.Date(NA),Mat=as.Date(NA),CpY=as.numeric(NA),FIPD=as.Date(NA),LIPD=as.Date(NA),FIAD=as.Date(NA),RV=as.numeric(NA),Coup=as.numeric(NA),DCC=as.numeric(NA),EOM=as.numeric(NA),DateOrigin=as.Date("1970-01-01"),InputCheck=1,FindEOM=FALSE,RegCF.equal=0,SimpleLastPeriod=TRUE,Precision=.Machine$double.eps^0.75,Calc.Method=1,AnnivDatesOutput=as.list(NA)) { if (length(CP)>1) { arglist<-CP argnames<-c("CP","SETT","Em","Mat","CpY","FIPD","LIPD","FIAD","RV","Coup","DCC","EOM","DateOrigin","InputCheck","FindEOM","RegCF.equal","SimpleLastPeriod","Precision","Calc.Method") for (i in c(1:length(arglist))) { assign(argnames[i],arglist[i]) } } if (InputCheck==1) { CheckedInput<-InputFormatCheck(CP=CP,SETT=SETT,Em=Em,Mat=Mat,CpY=CpY,FIPD=FIPD,LIPD=LIPD,FIAD=FIAD,RV=RV,Coup=Coup,DCC=DCC,EOM=EOM,DateOrigin=DateOrigin) CP<-CheckedInput$CP SETT<-CheckedInput$SETT Em<-CheckedInput$Em Mat<-CheckedInput$Mat CpY<-CheckedInput$CpY FIPD<-CheckedInput$FIPD LIPD<-CheckedInput$LIPD FIAD<-CheckedInput$FIAD RV<-CheckedInput$RV Coup<-CheckedInput$Coup DCC<-CheckedInput$DCC EOM<-CheckedInput$EOM DateOrigin<-CheckedInput$DateOrigin } DP<-as.numeric(NA) n<-as.numeric(NA) k<-as.numeric(NA) CN_tau<-as.numeric(NA) tau<-as.numeric(NA) w<-as.numeric(NA) eta<-as.numeric(NA) z<-as.numeric(NA) AccrInt<-as.numeric(NA) ytm.p.a.<-as.numeric(NA) ModDUR.inYears<-as.numeric(NA) MacDUR.inYears<-as.numeric(NA) Conv.inYears<-as.numeric(NA) ModDUR.inPeriods<-as.numeric(NA) MacDUR.inPeriods<-as.numeric(NA) Conv.inPeriods<-as.numeric(NA) CF_final<-as.numeric(NA) yApprox<-as.numeric(NA) aApprox<-as.numeric(NA) CF.remain<-as.numeric(NA) DiscPowerVector<-as.numeric(NA) N.Iter<-as.numeric(NA) f.value<-as.numeric(NA) path.break<-as.numeric(NA) NewtRaph.Out<-as.numeric(NA) PriceEqn<-as.numeric(NA) if ((missing(CP))|(is.na(CP))|CP==0) { CP<-as.numeric(NA) warning("Clean price (CP) is missing, 0 or NA. NA created!") } else { if ((missing(SETT))|(is.na(SETT))) { SETT<-as.Date(NA) warning("Settlement date (SETT) is missing or NA. NA created!") } else { if ((SETT<Em)|(Mat<=SETT)) { warning("Settlement date (SETT) is outside bond's lifespan. NA created!") } else { if (Calc.Method==0) { RegCF.equal<-1 BondAnalysis<-suppressWarnings(AnnivDates(Em=Em,Mat=Mat,CpY=CpY,FIPD=FIPD,LIPD=LIPD,FIAD=FIAD,RV=RV,Coup=Coup,DCC=DCC,EOM=EOM,DateOrigin=DateOrigin,InputCheck=0,FindEOM=FindEOM,RegCF.equal=RegCF.equal)) } else { if (all(is.na(AnnivDatesOutput))) { BondAnalysis<-suppressWarnings(AnnivDates(Em=Em,Mat=Mat,CpY=CpY,FIPD=FIPD,LIPD=LIPD,FIAD=FIAD,RV=RV,Coup=Coup,DCC=DCC,EOM=EOM,DateOrigin=DateOrigin,InputCheck=0,FindEOM=FindEOM,RegCF.equal=RegCF.equal)) } else { BondAnalysis<-AnnivDatesOutput } } CpY<-BondAnalysis$Traits$CpY RV<-BondAnalysis$Traits$Par RealDates<-na.omit(BondAnalysis$DateVectors$RealDates) if ((SETT<RealDates[1])|(SETT>=RealDates[length(RealDates)])) { warning("Settlement date (SETT) is not between issue date (Em) and maturity date (Mat). NA created!") } else { if (is.na(BondAnalysis$Traits$FIPD)) { FIPD<-BondAnalysis$Traits$est_FIPD } else { FIPD<-BondAnalysis$Traits$FIPD } if (is.na(BondAnalysis$Traits$LIPD)) { LIPD<-BondAnalysis$Traits$est_LIPD } else { LIPD<-BondAnalysis$Traits$LIPD } DP.Output<-DP(CP,SETT,Em,Mat,CpY,FIPD,LIPD,FIAD,RV,Coup,DCC,EOM,DateOrigin,InputCheck=0,FindEOM,AnnivDatesOutput=BondAnalysis) if (BondAnalysis$Warnings$ZeroFlag==1) { Coup<-as.numeric(0) CpY<-as.numeric(1) } DP<-DP.Output[[2]]$Dirty_Price AccrInt<-DP.Output[[2]]$Accrued_Interest CF.values<-na.omit(BondAnalysis$PaySched$CoupPayments) CF.dates<-na.omit(BondAnalysis$PaySched$CoupDates) CF_List<-list(CF.values,CF.dates) Use.ClosedForm<-as.numeric(0) if (length(CF.values)>2) { if (length(which(!duplicated(CF.values[-c(1,length(CF.values))])==TRUE))==1) { Use.ClosedForm<-as.numeric(1) } } if (Calc.Method==0) { DCC_Orig<-DCC DCC<-2 BondAnalysis<-suppressWarnings(AnnivDates(Em=Em,Mat=Mat,CpY=CpY,FIPD=FIPD,LIPD=LIPD,FIAD=FIAD,RV=RV,Coup=Coup,DCC=DCC,EOM=EOM,DateOrigin=DateOrigin,InputCheck=0,FindEOM=FindEOM,RegCF.equal=RegCF.equal)) } # loading AnnivDates and expanding by one value to each side AD.set<-na.omit(BondAnalysis$DateVectors$AnnivDates) Refer<-BondAnalysis$Traits$Refer AtomVector_Refer<-as.numeric(unlist(strsplit(as.character(Refer),split = "-"))) Atom1Refer<-AtomVector_Refer[1] Atom2Refer<-AtomVector_Refer[2] Atom3Refer<-AtomVector_Refer[3] # creating the anniversary date preceding AD1 AtomVector_AD1<-as.numeric(unlist(strsplit(as.character(AD.set[1]),split = "-"))) Atom1AD1<-AtomVector_AD1[1] Atom2AD1<-AtomVector_AD1[2] Atom3AD1<-AtomVector_AD1[3] PrevDate<-as.numeric(CppPrevDate(c(Atom1AD1,Atom2AD1,Atom3AD1,Atom1AD1,Atom2AD1,Atom3AD1,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) PrevDate<-as.Date(paste(PrevDate[1],PrevDate[2],PrevDate[3],sep="-")) AD.set<-c(PrevDate,AD.set) AD.set<-sort(na.omit(AD.set[!duplicated(AD.set)])) # creating the anniversary date succeeding ADfin AtomVector_ADfin<-as.numeric(unlist(strsplit(as.character(AD.set[length(AD.set)]),split = "-"))) Atom1ADfin<-AtomVector_ADfin[1] Atom2ADfin<-AtomVector_ADfin[2] Atom3ADfin<-AtomVector_ADfin[3] SuccDate<-as.numeric(CppSuccDate(c(Atom1ADfin,Atom2ADfin,Atom3ADfin,Atom1ADfin,Atom2ADfin,Atom3ADfin,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) SuccDate<-as.Date(paste(SuccDate[1],SuccDate[2],SuccDate[3],sep="-")) AD.set<-c(AD.set,SuccDate) AD.set<-sort(na.omit(AD.set[!duplicated(AD.set)])) AD.indexes<-na.omit(BondAnalysis$DateVectors$AD_indexes) AD.indexes<-c((AD.indexes[1]-1),AD.indexes,(AD.indexes[length(AD.indexes)]+1)) SD.set<-na.omit(BondAnalysis$DateVectors$RealDates) SD.indexes<-na.omit(BondAnalysis$DateVectors$RD_indexes) AD_List<-list(AD.set,AD.indexes) SD_List<-list(SD.set,SD.indexes) ### calculating tau AtomVector_Mat<-as.numeric(unlist(strsplit(as.character(Mat),split = "-"))) Atom1Mat<-AtomVector_Mat[1] Atom2Mat<-AtomVector_Mat[2] Atom3Mat<-AtomVector_Mat[3] AtomVector_PCD.SETT<-as.numeric(unlist(strsplit(as.character(PCD(SETT,AD.set)),split = "-"))) Atom1_PCD.SETT<-AtomVector_PCD.SETT[1] Atom2_PCD.SETT<-AtomVector_PCD.SETT[2] Atom3_PCD.SETT<-AtomVector_PCD.SETT[3] AtomVector_SETT<-as.numeric(unlist(strsplit(as.character(SETT),split = "-"))) Atom1_SETT<-AtomVector_SETT[1] Atom2_SETT<-AtomVector_SETT[2] Atom3_SETT<-AtomVector_SETT[3] AtomVector_NCD.SETT<-as.numeric(unlist(strsplit(as.character(NCD(SETT,AD.set)),split = "-"))) Atom1_NCD.SETT<-AtomVector_NCD.SETT[1] Atom2_NCD.SETT<-AtomVector_NCD.SETT[2] Atom3_NCD.SETT<-AtomVector_NCD.SETT[3] AtomVector_NCD.NCD.SETT<-as.numeric(unlist(strsplit(as.character(NCD(NCD(SETT,AD.set),AD.set)),split = "-"))) Atom1_NCD.NCD.SETT<-AtomVector_NCD.NCD.SETT[1] Atom2_NCD.NCD.SETT<-AtomVector_NCD.NCD.SETT[2] Atom3_NCD.NCD.SETT<-AtomVector_NCD.NCD.SETT[3] if (is.element(DCC,c(1,3,5,6,8,10,11,12,15))) { tau_Num<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_SETT,Atom2_SETT,Atom3_SETT))[2] tau_Den<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT))[2] } if (DCC==16) { NonBus.PCD.SETT<-length(which((NonBusDays.Brazil$Date>=PCD(SETT,AD.set))&(NonBusDays.Brazil$Date<SETT))) NonBus.PCD.NCD<-length(which((NonBusDays.Brazil$Date>=PCD(SETT,AD.set))&(NonBusDays.Brazil$Date<NCD(SETT,AD.set)))) tau_Num<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_SETT,Atom2_SETT,Atom3_SETT,NonBus.PCD.SETT))[2] tau_Den<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT,NonBus.PCD.NCD))[2] } if (DCC==2|DCC==14) { OrigDCC<-DCC DCC<-2 tau_Num<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT, Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_SETT,Atom2_SETT,Atom3_SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT, AD_List[[2]][which(AD_List[[1]]==PCD(SETT,AD.set))], AD_List[[2]][which(AD_List[[1]]==NCD(PCD(SETT,AD.set),AD.set))], CpY))[2] tau_Den<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT, Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT,Atom1_NCD.NCD.SETT,Atom2_NCD.NCD.SETT,Atom3_NCD.NCD.SETT, AD_List[[2]][which(AD_List[[1]]==PCD(NCD(SETT,AD.set),AD.set))], AD_List[[2]][which(AD_List[[1]]==NCD(PCD(SETT,AD.set),AD.set))], CpY))[2] DCC<-OrigDCC } if (DCC==4) { tau_Num<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_SETT,Atom2_SETT,Atom3_SETT,Atom1_NCD.SETT,CpY))[2] tau_Den<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT,Atom1_NCD.NCD.SETT,CpY))[2] } if (DCC==7) { tau_Num<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_SETT,Atom2_SETT,Atom3_SETT,Atom1Mat,Atom2Mat,Atom3Mat))[2] tau_Den<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT,Atom1Mat,Atom2Mat,Atom3Mat))[2] } if (is.element(DCC,c(9,13))) { tau_Num<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_SETT,Atom2_SETT,Atom3_SETT,EOM))[2] tau_Den<-DIST(c(DCC,Atom1_PCD.SETT,Atom2_PCD.SETT,Atom3_PCD.SETT,Atom1_NCD.SETT,Atom2_NCD.SETT,Atom3_NCD.SETT,EOM))[2] } tau<-tau_Num/tau_Den+AD_List[[2]][which(AD_List[[1]]==PCD(SETT,AD.set))] n<-SD.indexes[length(SD.indexes)-1] l<-BondAnalysis$Traits$LCPLength if ((!(is.na(LIPD)))&(SETT<LIPD)) { k<-SD_List[[2]][which(SD_List[[1]]==NCD(SETT,SD.set))] w<-k-tau eta<-n-k z<-l CN_tau<-CF_List[[1]][which(CF_List[[2]]==NCD(SETT,SD.set))] } else { k<-n w<-SD_List[[2]][which(SD_List[[1]]==NCD(SETT,SD.set))]-tau eta<-0 z<-0 CN_tau<-0 } ###################>>>>>> <<<<<<<########################################################## ##############>>>>>> calculation of YtM, ModDUR, MacDUR and Conv <<<<<<<##################################################### ###################>>>>>> <<<<<<<########################################################## CF_final<-CF.values[length(CF.values)]+RV if ((z==0)&(SimpleLastPeriod==TRUE)) { # simple interest ytm.p.a.<-(CF_final/DP-1)*(CpY/w)*100 a<-1+ytm.p.a.*w/(100*CpY) ModDUR.inYears<-(-1)*(1/DP)*(-1)*CF_final*(a^(-2))*(w/CpY) MacDUR.inYears<-ModDUR.inYears*a Conv.inYears<-0.5*(1/DP)*2*CF_final*(a^(-3))*(w/CpY)^2 ModDUR.inPeriods<-ModDUR.inYears*CpY MacDUR.inPeriods<-MacDUR.inYears*CpY Conv.inPeriods<-Conv.inYears*(CpY^2) } else { yApprox<-Coup/CP+(RV/CP)^(1/(w+eta+z))-1 aApprox<-1+yApprox/CpY # if ((Use.ClosedForm==1)&(!(1+0.015>aApprox))) { # CF<-CF.values[2] # # # # creating the price equation # # # PriceEqn<-gsub(" ","",paste("-",DP,"+b*",DP,"+",CN_tau,"*b^",w,"-",CN_tau,"*b^(",w,"+1)+",CF,"*b^(",w,"+1)-",CF,"*b^(",w,"+",eta,"+1)+",CF_final,"*b^(",w,"+",eta,"+",z,")-",CF_final,"*b^(",w,"+",eta,"+",z,"+1)")) # PriceFunction<-function(b) { # out<-eval(parse(text=PriceEqn)) # return(out) # } # # # # creating the first derivative of the price function # # # d_PriceEqn<-gsub(" ","",paste(DP,"+",w,"*",CN_tau,"*b^(",w,"-1)+(",w,"+1)*(",CF,"-",CN_tau,")*b^",w,"-(",w,"+",eta,"+1)*",CF,"*b^(",w,"+",eta,")+(",w,"+",eta,"+",z,")*",CF_final,"*b^(",w,"+",eta,"+",z,"-1)-(",w,"+",eta,"+",z,"+1)*",CF_final,"*b^(",w,"+",eta,"+",z,")")) # d_PriceFunction<-function(b) { # out<-eval(parse(text=d_PriceEqn)) # return(out) # } # bApprox<-1/aApprox # NewtRaph.Out<-NewtonRaphson(PriceFunction,d_PriceFunction,bApprox,Precision) # b<-as.numeric(NewtRaph.Out[1]) # a<-1/b # ytm.p.a.<-CpY*(a-1)*100 # ModDUR.inYears<-ModDUR(a,c(1,CN_tau,CF,CF_final,w,eta,z,CpY,DP)) # # ModDUR.inYears<-(-1)*(1/DP)*dm_MyPriceEqn(a,c(1,CN_tau,CF,CF_final,w,eta,z,CpY)) # MacDUR.inYears<-ModDUR.inYears*a # Conv.inYears<-CONV(a,c(2,CN_tau,CF,CF_final,w,eta,z,CpY,DP)) # # Conv.inYears<-0.5*(1/DP)*dm_MyPriceEqn(a,c(2,CN_tau,CF,CF_final,w,eta,z,CpY)) # ModDUR.inPeriods<-ModDUR.inYears*CpY # MacDUR.inPeriods<-MacDUR.inYears*CpY # Conv.inPeriods<-Conv.inYears*(CpY^2) # } else { if (z>0) { DiscPowerVector<-c((seq(0,eta,by=1)+w),(w+eta+z)) } else { DiscPowerVector<-w } CF.remain<-CF_List[[1]][which(CF_List[[2]]>SETT)] CF.remain<-c(CF.remain[-length(CF.remain)],CF.remain[length(CF.remain)]+RV) # # creating the price equation # summands<-gsub(" ","",paste(CF.remain,"/(a^",DiscPowerVector,")")) summands<-c(-DP,summands) PriceEqn<-paste(summands,collapse="+") PriceFunction.Standard<-function(a) { out<-eval(parse(text=PriceEqn)) return(out) } # # creating the first derivative of the price function # d_PowerVector<-DiscPowerVector+1 d_summands<-gsub(" ","",paste("(-1)*(",CF.remain,"*",DiscPowerVector,"/(a^",d_PowerVector,"))")) d_PriceEqn<-paste(d_summands,collapse="+") d_PriceFunction.Standard<-function(a) { out<-eval(parse(text=d_PriceEqn)) return(out) } NewtRaph.Out<-NewtonRaphson(PriceFunction.Standard,d_PriceFunction.Standard,aApprox,Precision) a<-as.numeric(NewtRaph.Out[1]) ytm.p.a.<-CpY*(a-1)*100 # # calculating the Modified Duration and the MacAulay Duration # ModDUR.inPeriods<-(-1)*(1/DP)*d_PriceFunction.Standard(a) MacDUR.inPeriods<-ModDUR.inPeriods*a ModDUR.inYears<-ModDUR.inPeriods/CpY MacDUR.inYears<-MacDUR.inPeriods/CpY # # calculating the Convexity # d2_PowerVector<-d_PowerVector+1 d2_summands<-gsub(" ","",paste("(",CF.remain,"*",DiscPowerVector,"*",d_PowerVector,")/(a^",d2_PowerVector,")")) Added.d2_summands<-paste(d2_summands,collapse="+") d2_PriceFunction.Standard <- function(a) { out<-eval(parse(text=Added.d2_summands)) return(out) } Conv.inPeriods<-(1/DP)*0.5*d2_PriceFunction.Standard(a) Conv.inYears<-Conv.inPeriods/(CpY^2) # } } N.Iter<-NewtRaph.Out[2] f.value<-NewtRaph.Out[3] path.break<-NewtRaph.Out[4] } } } } # out<-list(tau=tau,n=n,k=k,w=w,eta=eta,z=z,CN_tau=CN_tau,ytm.p.a.=ytm.p.a.,yApprox=yApprox,DP=DP,CF_final=CF_final, # ModDUR.inPeriods=ModDUR.inPeriods,MacDUR.inPeriods=MacDUR.inPeriods,Conv.inPeriods=Conv.inPeriods, # ModDUR.inYears=ModDUR.inYears,MacDUR.inYears=MacDUR.inYears,Conv.inYears=Conv.inYears, # aApprox=aApprox,N.Iter=N.Iter,f.value=f.value,path.break=path.break) # out<-PriceEqn out<-list(CP=CP,AccrInt=AccrInt,DP=DP,ytm.p.a.=ytm.p.a.,ModDUR.inYears=ModDUR.inYears,MacDUR.inYears=MacDUR.inYears, Conv.inYears=Conv.inYears,ModDUR.inPeriods=ModDUR.inPeriods,MacDUR.inPeriods=MacDUR.inPeriods, Conv.inPeriods=Conv.inPeriods,tau=tau) return(out) }
/scratch/gouwar.j/cran-all/cranData/BondValuation/R/BondVal.Yield.R
#' DP (dirty price calculation of a fixed-coupon bond) #' #' \bold{DP} returns a bond's temporal and pecuniary characteristics on the desired calendar date #' according to the methodology presented in Djatschenko (2018). #' #' The function \bold{DP} generates a list of the two data frames \code{Dates} and \code{Cash}, #' which contain the relevant date-related and pecuniary characteristics that were either provided #' by the user or calculated by the function. \bold{Value} provides further information on the #' output. #' #' @param CP The bond's clean price. #' @param SETT The settlement date. Date class object with format "\%Y-\%m-\%d". (required) #' @param Em The bond's issue date. Date class object with format "\%Y-\%m-\%d". (required) #' @param Mat So-called "maturity date" i.e. date on which the redemption value and the final interest #' are paid. Date class object with format "\%Y-\%m-\%d". (required) #' @param CpY Number of interest payments per year (non-negative integer; element of the set #' \{0,1,2,3,4,6,12\}. Default: 2. #' @param FIPD First interest payment date after \code{Em}. Date class object with format "\%Y-\%m-\%d". Default: \code{NA}. #' @param LIPD Last interest payment date before \code{Mat}. Date class object with format "\%Y-\%m-\%d". Default: \code{NA}. #' @param FIAD Date on which the interest accrual starts (so-called "dated date"). Date class object with format "\%Y-\%m-\%d". Default: \code{NA}. #' @param RV The redemption value of the bond. Default: 100. #' @param Coup Nominal interest rate per year in percent. Default: \code{NA}. #' @param DCC The day count convention the bond follows. Default: \code{NA}. #' For a list of day count conventions currently implemented type \code{View(List.DCC)}. #' @param EOM Boolean indicating whether the bond follows the End-of-Month rule. Default: \code{NA}. #' @param DateOrigin Determines the starting point for the daycount in "Date" objects. #' Default: "1970-01-01". #' @param InputCheck If 1, the input variables are checked for the correct format. Default: 1. #' @param FindEOM If \code{TRUE}, \code{EOM} is overridden by the value inferred from the data. #' Default: \code{FALSE}. #' @param RegCF.equal If 0, the amounts of regular cash flows are calculated according to the #' stipulated \code{DCC}. Any other value forces all regular cash flows to be equal sized. #' Default: 0. #' @param AnnivDatesOutput A list containing the output of the function AnnivDates. Default: \code{NA}. #' #' #' @return #' \describe{ #' \item{\emph{\bold{Dates}} (data frame)}{ #' \describe{ #' \item{\emph{Previous_CouponDate}}{} #' \item{\emph{SettlementDate}}{} #' \item{\emph{Next_CouponDate}}{} #' \item{\emph{DaysAccrued}}{The number of days accrued from \emph{Previous_CouponDate} to #' \emph{Next_CouponDate}, incl. the earlier and excl. the later #' date.} #' \item{\emph{DaysInPeriod}}{The number of interest accruing days in the coupon period #' from \emph{Previous_CouponDate} to \emph{Next_CouponDate}.} #' } #' } #' \item{\emph{\bold{Cash}} (data frame)}{ #' \describe{ #' \item{\emph{Dirty_Price}}{Sum of \emph{Clean_Price} and \emph{Accrued_Interest}.} #' \item{\emph{Clean_Price}}{The clean price entered.} #' \item{\emph{Accrued_Interest}}{The amount of accrued interest on \emph{SettlementDate}.} #' \item{\emph{CouponPayment}}{The interest payment on \emph{Next_CouponDate}.} #' } #' } #' } #' #' @references #' \enumerate{ #' \item{Djatschenko, Wadim, The Nitty Gritty of Bond Valuation: A Generalized Methodology for Fixed Coupon Bond Analysis Allowing for Irregular Periods and Various Day Count Conventions (November 5, 2018). Available at SSRN: https://ssrn.com/abstract=3205167.} #' } #' #' @examples #' CP<-rep(100,16) #' SETT<-rep(as.Date("2014-10-15"),16) #' Em<-rep(as.Date("2013-11-30"),16) #' Mat<-rep(as.Date("2021-04-21"),16) #' CpY<-rep(2,16) #' FIPD<-rep(as.Date("2015-02-28"),16) #' LIPD<-rep(as.Date("2020-02-29"),16) #' FIAD<-rep(as.Date("2013-11-30"),16) #' RV<-rep(100,16) #' Coup<-rep(5.25,16) #' DCC<-seq(1,16,by=1) #' DP.DCC_Comparison<-data.frame(CP,SETT,Em,Mat,CpY,FIPD,LIPD,FIAD,RV,Coup,DCC) #' #' # you can pass an array to AnnivDates #' List<-suppressWarnings( #' AnnivDates(unlist(DP.DCC_Comparison[1,c(3:11)],use.names=FALSE)) #' ) #' #' # and use its output in DP #' suppressWarnings( #' DP(unlist(DP.DCC_Comparison[1,c(1:11)],use.names=FALSE),AnnivDatesOutput=List) #' ) #' #' # or just apply DP to the data frame #' DP.Output<-suppressWarnings( #' apply(DP.DCC_Comparison[,c('CP','SETT','Em','Mat','CpY','FIPD', #' 'LIPD','FIAD','RV','Coup','DCC')], #' 1,function(y) DP(y[1],y[2],y[3],y[4],y[5],y[6],y[7], #' y[8],y[9],y[10],y[11]))) #' #' DiryPrice<-do.call(rbind,lapply(lapply(DP.Output, `[[`, 2), `[[`, 1)) #' DP.DCC_Comparison<-cbind(DP.DCC_Comparison,DiryPrice) #' DP.DCC_Comparison #' #' @export #' DP<-function(CP=as.numeric(NA),SETT=as.Date(NA),Em=as.Date(NA),Mat=as.Date(NA),CpY=as.numeric(NA),FIPD=as.Date(NA),LIPD=as.Date(NA),FIAD=as.Date(NA),RV=as.numeric(NA),Coup=as.numeric(NA),DCC=as.numeric(NA),EOM=as.numeric(NA),DateOrigin=as.Date("1970-01-01"),InputCheck=1,FindEOM=FALSE,RegCF.equal=0,AnnivDatesOutput=as.list(NA)) { if (length(CP)>1) { arglist<-CP argnames<-c("CP","SETT","Em","Mat","CpY","FIPD","LIPD","FIAD","RV","Coup","DCC","EOM","DateOrigin","InputCheck","FindEOM","RegCF.equal") for (i in c(1:length(arglist))) { assign(argnames[i],arglist[i]) } } if (InputCheck==1) { CheckedInput<-InputFormatCheck(CP=CP,SETT=SETT,Em=Em,Mat=Mat,CpY=CpY,FIPD=FIPD,LIPD=LIPD,FIAD=FIAD,RV=RV,Coup=Coup,DCC=DCC,EOM=EOM,DateOrigin=DateOrigin) CP<-CheckedInput$CP SETT<-CheckedInput$SETT Em<-CheckedInput$Em Mat<-CheckedInput$Mat CpY<-CheckedInput$CpY FIPD<-CheckedInput$FIPD LIPD<-CheckedInput$LIPD FIAD<-CheckedInput$FIAD RV<-CheckedInput$RV Coup<-CheckedInput$Coup DCC<-CheckedInput$DCC EOM<-CheckedInput$EOM DateOrigin<-CheckedInput$DateOrigin } DP<-as.numeric(NA) if ((missing(CP))|(is.na(CP))) { CP<-0 warning("Clean price (CP) is missing or NA. CP is set 0.") } AccrInt<-as.numeric(NA) CouponPayment<-as.numeric(NA) NAccr<-as.numeric(NA) NPeriod<-as.numeric(NA) PCD_SETT<-as.Date(NA) NCD_SETT<-as.Date(NA) if ((missing(SETT))|(is.na(SETT))) { SETT<-as.Date(NA) warning("Settlement date (SETT) is missing or NA. NA created!") } else { if (all(is.na(AnnivDatesOutput))) { BondAnalysis<-suppressWarnings(AnnivDates(Em=Em,Mat=Mat,CpY=CpY,FIPD=FIPD,LIPD=LIPD,FIAD=FIAD,RV=RV,Coup=Coup,DCC=DCC,EOM=EOM,DateOrigin=DateOrigin,InputCheck=0,FindEOM=FindEOM,RegCF.equal=RegCF.equal)) } else { BondAnalysis<-AnnivDatesOutput } RealDates<-na.omit(BondAnalysis$DateVectors$RealDates) AnnivDates<-na.omit(BondAnalysis$DateVectors$AnnivDates) if (is.na(BondAnalysis$Traits$FIPD)) { FIPD<-BondAnalysis$Traits$est_FIPD } else { FIPD<-BondAnalysis$Traits$FIPD } if (is.na(BondAnalysis$Traits$LIPD)) { LIPD<-BondAnalysis$Traits$est_LIPD } else { LIPD<-BondAnalysis$Traits$LIPD } if (BondAnalysis$Warnings$EmMatMissing==1) { warning("Maturity date (Mat) is missing or NA. NA created!") } else { if (BondAnalysis$Warnings$NegLifeFlag==1) { warning("Issue date (Em) is not before maturity date (Mat)! NA created!") } else { if ((missing(Coup))|(is.na(Coup))) { warning("The supplied interest rate p.a. (Coup) is NA or cannot be processed. NA created!") } else { if ((SETT<RealDates[1])|(SETT>=RealDates[length(RealDates)])) { warning("Settlement date (SETT) is not between issue date (Em) and maturity date (Mat). NA created!") } else { if (BondAnalysis$Warnings$ZeroFlag==1) { DP<-CP AccrInt<-as.numeric(0) NAccr<-as.numeric(NA) NPeriod<-as.numeric(NA) PCD_SETT<-Em NCD_SETT<-Mat CouponPayment<-BondAnalysis$PaySched$CoupPayments[1] warning("This is a Zero Coupon bond! No interest accrues!") } else { EOM<-BondAnalysis$Traits$EOM_used # If DCC is not provided or NA or not element of {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}, the following code sets it 2 (Act/Act (ICMA)). if ((missing(DCC))|(is.na(DCC))) { DCC<-2 warning("The day count indentifier (DCC) is missing or NA. DCC is set 2 (Act/Act (ICMA))!") } else { if (!(is.element(DCC,c(1:16)))) { DCC<-2 warning("The day count indentifier (DCC) is not element of {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}! DCC is set 2 (Act/Act (ICMA))!") } } if ((DCC==7)&((missing(Mat))|(is.na(Mat)))) { warning("Maturity date (Mat) is missing or NA. Accrued interest computation for the specified day count convention 30E/360 (ISDA) requires a valid Mat value. NA created!") } else { if ((DCC==9)&((missing(EOM))|(is.na(EOM)))) { warning("End-of-Month-Rule identifier (EOM) is missing or NA. Accrued interest computation for the specified day count convention 30/360 US requires a valid EOM value. NA created!") } else { if ((DCC==13)&((missing(EOM))|(is.na(EOM)))) { warning("End-of-Month-Rule identifier (EOM) is missing or NA. Accrued interest computation for the specified day count convention 30/365 requires a valid EOM value. NA created!") } else { if ((DCC==2)&((missing(EOM))|(is.na(EOM)))) { warning("End-of-Month-Rule identifier (EOM) is missing or NA. Accrued interest computation for the specified day count convention Act/Act (ICMA) requires a valid EOM value. NA created!") } else { if ((DCC==14)&((missing(EOM))|(is.na(EOM)))) { warning("End-of-Month-Rule identifier (EOM) is missing or NA. Accrued interest computation for the specified day count convention Act/365 (Canadian Bond) requires a valid EOM value. NA created!") } else { if ((DCC==2)&((missing(CpY))|(is.na(CpY)))) { CpY<-2 warning("Number of interest payments per year (CpY) is missing or NA. Accrued interest computation for the specified day count convention Act/Act (ICMA) requires a valid CpY value. CpY is set 2!") } else { if ((DCC==2)&(!(is.element(CpY,c(1,2,3,4,6,12))))) { CpY<-2 warning("Number of interest payments per year (CpY) is not element of {1,2,3,4,6,12}. Accrued interest computation for the specified day count convention Act/Act (ICMA) requires a valid CpY value. CpY is set 2!") } } if ((DCC==4)&((missing(CpY))|(is.na(CpY)))) { CpY<-2 warning("Number of interest payments per year (CpY) is missing or NA. Accrued interest computation for the specified day count convention Act/365L requires a valid CpY value. CpY is set 2!") } else { if ((DCC==4)&(!(is.element(CpY,c(1,2,3,4,6,12))))) { CpY<-2 warning("Number of interest payments per year (CpY) is not element of {1,2,3,4,6,12}. Accrued interest computation for the specified day count convention Act/365L requires a valid CpY value. CpY is set 2!") } } if ((DCC==14)&((missing(CpY))|(is.na(CpY)))) { CpY<-2 warning("Number of interest payments per year (CpY) is missing or NA. Accrued interest computation for the specified day count convention Act/365 (Canadian Bond) requires a valid CpY value. CpY is set 2!") } else { if ((DCC==14)&(!(is.element(CpY,c(1,2,3,4,6,12))))) { CpY<-2 warning("Number of interest payments per year (CpY) is not element of {1,2,3,4,6,12}. Accrued interest computation for the specified day count convention Act/365 (Canadian Bond) requires a valid CpY value. CpY is set 2!") } } if (DCC==16) { if ((missing(CpY))|(is.na(CpY))) { CpY<-2 warning("Number of interest payments per year (CpY) is missing or NA. Accrued interest computation for the specified day count convention BusDay/252 (Brazilian) requires a valid CpY value. CpY is set 2!") } else { if (!(is.element(CpY,c(1,2,3,4,6,12)))) { CpY<-2 warning("Number of interest payments per year (CpY) is not element of {1,2,3,4,6,12}. Accrued interest computation for the specified day count convention BusDay/252 (Brazilian) requires a valid CpY value. CpY is set 2!") } } } if ((missing(RV))|(is.na(RV))) { RV<-100 warning("Redemption value (RV) is missing or NA. RV is set 100!") } if (is.element(DCC,c(1,3,5,6,8,10,11,12,15))) { # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2) PCD_SETT<-PCD(SETT,RealDates) Atoms_PCD_SETT<-as.numeric(unlist(strsplit(as.character(PCD_SETT),split = "-"))) Atoms_SETT<-as.numeric(unlist(strsplit(as.character(SETT),split = "-"))) NCD_SETT<-NCD(SETT,RealDates) Atoms_NCD_SETT<-as.numeric(unlist(strsplit(as.character(NCD_SETT),split = "-"))) DIST_Output_AccrInt<-DIST(c(DCC,Atoms_PCD_SETT,Atoms_SETT)) DIST_Output_Coup<-DIST(c(DCC,Atoms_PCD_SETT,Atoms_NCD_SETT)) NAccr<-DIST_Output_AccrInt[1] AccrInt<-RV*(Coup/100)*DIST_Output_AccrInt[2] NPeriod<-DIST_Output_Coup[1] CouponPayment<-RV*(Coup/100)*DIST_Output_Coup[2] DP<-CP+AccrInt } else { if (DCC==16) { # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,NonBus) PCD_SETT<-PCD(SETT,RealDates) Atoms_PCD_SETT<-as.numeric(unlist(strsplit(as.character(PCD_SETT),split = "-"))) Atoms_SETT<-as.numeric(unlist(strsplit(as.character(SETT),split = "-"))) NCD_SETT<-NCD(SETT,RealDates) Atoms_NCD_SETT<-as.numeric(unlist(strsplit(as.character(NCD_SETT),split = "-"))) NonBus.PCD.SETT<-length(which((NonBusDays.Brazil$Date>=PCD_SETT)&(NonBusDays.Brazil$Date<SETT))) NonBus.PCD.NCD<-length(which((NonBusDays.Brazil$Date>=PCD_SETT)&(NonBusDays.Brazil$Date<NCD_SETT))) DIST_Output_AccrInt<-DIST(c(DCC,Atoms_PCD_SETT,Atoms_SETT,NonBus.PCD.SETT)) DIST_Output_Coup<-DIST(c(DCC,Atoms_PCD_SETT,Atoms_NCD_SETT,NonBus.PCD.NCD)) if ((!(SETT<FIPD))&(!(SETT>LIPD))) { CouponPayment<-RV*(((1+Coup/100)^(1/CpY))-1) } else { CouponPayment<-RV*(((1+Coup/100)^(DIST_Output_Coup[2]))-1) } NAccr<-DIST_Output_AccrInt[1] AccrInt<-RV*(((1+Coup/100)^(DIST_Output_AccrInt[2]))-1) if (AccrInt>CouponPayment) { AccrInt<-CouponPayment } NPeriod<-DIST_Output_Coup[1] DP<-CP+AccrInt } else { if (DCC==4) { # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,YearNCP,CpY) PCD_SETT<-PCD(SETT,RealDates) Atoms_PCD_SETT<-as.numeric(unlist(strsplit(as.character(PCD_SETT),split = "-"))) Atoms_SETT<-as.numeric(unlist(strsplit(as.character(SETT),split = "-"))) NCD_SETT<-NCD(SETT,RealDates) Atoms_NCD_SETT<-as.numeric(unlist(strsplit(as.character(NCD_SETT),split = "-"))) DIST_Output_AccrInt<-DIST(c(DCC,Atoms_PCD_SETT,Atoms_SETT,Atoms_NCD_SETT[1],CpY)) DIST_Output_Coup<-DIST(c(DCC,Atoms_PCD_SETT,Atoms_NCD_SETT,Atoms_NCD_SETT[1],CpY)) NAccr<-DIST_Output_AccrInt[1] AccrInt<-RV*(Coup/100)*DIST_Output_AccrInt[2] NPeriod<-DIST_Output_Coup[1] CouponPayment<-RV*(Coup/100)*DIST_Output_Coup[2] DP<-CP+AccrInt } else { if (DCC==7) { # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,YMat,MMat,DMat) PCD_SETT<-PCD(SETT,RealDates) Atoms_PCD_SETT<-as.numeric(unlist(strsplit(as.character(PCD_SETT),split = "-"))) Atoms_SETT<-as.numeric(unlist(strsplit(as.character(SETT),split = "-"))) NCD_SETT<-NCD(SETT,RealDates) Atoms_NCD_SETT<-as.numeric(unlist(strsplit(as.character(NCD_SETT),split = "-"))) Atoms_Mat<-as.numeric(unlist(strsplit(as.character(Mat),split = "-"))) DIST_Output_AccrInt<-DIST(c(DCC,Atoms_PCD_SETT,Atoms_SETT,Atoms_Mat)) DIST_Output_Coup<-DIST(c(DCC,Atoms_PCD_SETT,Atoms_NCD_SETT,Atoms_Mat)) NAccr<-DIST_Output_AccrInt[1] AccrInt<-RV*(Coup/100)*DIST_Output_AccrInt[2] NPeriod<-DIST_Output_Coup[1] CouponPayment<-RV*(Coup/100)*DIST_Output_Coup[2] DP<-CP+AccrInt } else { if (is.element(DCC,c(9,13))) { # c(RV,Coup,DCC,Y1,M1,D1,Y2,M2,D2,EOM) PCD_SETT<-PCD(SETT,RealDates) Atoms_PCD_SETT<-as.numeric(unlist(strsplit(as.character(PCD_SETT),split = "-"))) Atoms_SETT<-as.numeric(unlist(strsplit(as.character(SETT),split = "-"))) NCD_SETT<-NCD(SETT,RealDates) Atoms_NCD_SETT<-as.numeric(unlist(strsplit(as.character(NCD_SETT),split = "-"))) DIST_Output_AccrInt<-DIST(c(DCC,Atoms_PCD_SETT,Atoms_SETT,EOM)) DIST_Output_Coup<-DIST(c(DCC,Atoms_PCD_SETT,Atoms_NCD_SETT,EOM)) NAccr<-DIST_Output_AccrInt[1] AccrInt<-RV*(Coup/100)*DIST_Output_AccrInt[2] NPeriod<-DIST_Output_Coup[1] CouponPayment<-RV*(Coup/100)*DIST_Output_Coup[2] DP<-CP+AccrInt } else { if (DCC==2|DCC==14) { PCD_SETT<-PCD(SETT,RealDates) NCD_SETT<-NCD(SETT,RealDates) Refer<-BondAnalysis$Traits$Refer AD_indexes<-BondAnalysis$DateVectors$AD_indexes AD_indexes<-c((AD_indexes[1]-1),AD_indexes,(AD_indexes[length(AD_indexes)]+1)) AtomVector_Refer<-as.numeric(unlist(strsplit(as.character(Refer),split = "-"))) Atom1Refer<-AtomVector_Refer[1] Atom2Refer<-AtomVector_Refer[2] Atom3Refer<-AtomVector_Refer[3] # creating the anniversary date preceding AD1 AtomVector_AD1<-as.numeric(unlist(strsplit(as.character(AnnivDates[1]),split = "-"))) Atom1AD1<-AtomVector_AD1[1] Atom2AD1<-AtomVector_AD1[2] Atom3AD1<-AtomVector_AD1[3] PrevDate<-as.numeric(CppPrevDate(c(Atom1AD1,Atom2AD1,Atom3AD1,Atom1AD1,Atom2AD1,Atom3AD1,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) PrevDate<-as.Date(paste(PrevDate[1],PrevDate[2],PrevDate[3],sep="-")) AnnivDates<-c(PrevDate,AnnivDates) AnnivDates<-sort(na.omit(AnnivDates[!duplicated(AnnivDates)])) # creating the anniversary date succeeding ADfin AtomVector_ADfin<-as.numeric(unlist(strsplit(as.character(AnnivDates[length(AnnivDates)]),split = "-"))) Atom1ADfin<-AtomVector_ADfin[1] Atom2ADfin<-AtomVector_ADfin[2] Atom3ADfin<-AtomVector_ADfin[3] SuccDate<-as.numeric(CppSuccDate(c(Atom1ADfin,Atom2ADfin,Atom3ADfin,Atom1ADfin,Atom2ADfin,Atom3ADfin,Atom1Refer,Atom2Refer,Atom3Refer,CpY,EOM))) SuccDate<-as.Date(paste(SuccDate[1],SuccDate[2],SuccDate[3],sep="-")) AnnivDates<-c(AnnivDates,SuccDate) AnnivDates<-sort(na.omit(AnnivDates[!duplicated(AnnivDates)])) AD_List<-list(AnnivDates,AD_indexes) # DIST: for DCC = {2,14} x is a vector of 22 integers: # c(DCC,Y1,M1,D1,Y2,M2,D2,Y3,M3,D3,Y4,M4,D4,Y5,M5,D5,Y6,M6,D6,P,N,CpY) with # Y1-M1-D1 = PCD(t_a,AD) ; Y2-M2-D2 = t_a ; Y3-M3-D3 = NCD(t_a,AD) # Y4-M4-D4 = PCD(t_b,AD) ; Y5-M5-D5 = t_b ; Y6-M6-D6 = NCD(t_b,AD) # P = P(t_b,AD) ; N = N(t_a,AD) # for AccrInt: t_a = PCD(SETT,SD) and t_b = SETT ta_AccrInt<-PCD(SETT,RealDates) Atoms_ta_AccrInt<-as.numeric(unlist(strsplit(as.character(ta_AccrInt),split = "-"))) tb_AccrInt<-SETT Atoms_tb_AccrInt<-as.numeric(unlist(strsplit(as.character(tb_AccrInt),split = "-"))) PCD_ta_AccrInt<-PCD(ta_AccrInt,AnnivDates) Atoms_PCD_ta_AccrInt<-as.numeric(unlist(strsplit(as.character(PCD_ta_AccrInt),split = "-"))) NCD_ta_AccrInt<-NCD(ta_AccrInt,AnnivDates) Atoms_NCD_ta_AccrInt<-as.numeric(unlist(strsplit(as.character(NCD_ta_AccrInt),split = "-"))) PCD_tb_AccrInt<-PCD(tb_AccrInt,AnnivDates) Atoms_PCD_tb_AccrInt<-as.numeric(unlist(strsplit(as.character(PCD_tb_AccrInt),split = "-"))) NCD_tb_AccrInt<-NCD(tb_AccrInt,AnnivDates) Atoms_NCD_tb_AccrInt<-as.numeric(unlist(strsplit(as.character(NCD_tb_AccrInt),split = "-"))) N_ta_AccrInt<-AD_List[[2]][which(AD_List[[1]]==NCD_ta_AccrInt)] P_tb_AccrInt<-AD_List[[2]][which(AD_List[[1]]==PCD_tb_AccrInt)] DIST_Output_AccrInt<-DIST(c(DCC,Atoms_PCD_ta_AccrInt,Atoms_ta_AccrInt,Atoms_NCD_ta_AccrInt, Atoms_PCD_tb_AccrInt,Atoms_tb_AccrInt,Atoms_NCD_tb_AccrInt,P_tb_AccrInt,N_ta_AccrInt,CpY)) NAccr<-DIST_Output_AccrInt[1] AccrInt<-RV*(Coup/100)*DIST_Output_AccrInt[2] # for Coup: t_a = PCD(SETT,SD) and t_b = NCD(SETT,SD) ta_Coup<-PCD(SETT,RealDates) Atoms_ta_Coup<-as.numeric(unlist(strsplit(as.character(ta_Coup),split = "-"))) tb_Coup<-NCD(SETT,RealDates) Atoms_tb_Coup<-as.numeric(unlist(strsplit(as.character(tb_Coup),split = "-"))) PCD_ta_Coup<-PCD(ta_Coup,AnnivDates) Atoms_PCD_ta_Coup<-as.numeric(unlist(strsplit(as.character(PCD_ta_Coup),split = "-"))) NCD_ta_Coup<-NCD(ta_Coup,AnnivDates) Atoms_NCD_ta_Coup<-as.numeric(unlist(strsplit(as.character(NCD_ta_Coup),split = "-"))) PCD_tb_Coup<-PCD(tb_Coup,AnnivDates) Atoms_PCD_tb_Coup<-as.numeric(unlist(strsplit(as.character(PCD_tb_Coup),split = "-"))) NCD_tb_Coup<-NCD(tb_Coup,AnnivDates) Atoms_NCD_tb_Coup<-as.numeric(unlist(strsplit(as.character(NCD_tb_Coup),split = "-"))) N_ta_Coup<-AD_List[[2]][which(AD_List[[1]]==NCD_ta_Coup)] P_tb_Coup<-AD_List[[2]][which(AD_List[[1]]==PCD_tb_Coup)] DIST_Output_Coup<-DIST(c(DCC,Atoms_PCD_ta_Coup,Atoms_ta_Coup,Atoms_NCD_ta_Coup, Atoms_PCD_tb_Coup,Atoms_tb_Coup,Atoms_NCD_tb_Coup,P_tb_Coup,N_ta_Coup,CpY)) NPeriod<-DIST_Output_Coup[1] CouponPayment<-RV*(Coup/100)*DIST_Output_Coup[2] DP<-CP+AccrInt } } } } } } if ((!(RegCF.equal==0))&(!(DCC==16))) { if ((!(SETT<FIPD))&(!(SETT>LIPD))) { CouponPayment<-RV*(Coup/(CpY*100)) if (AccrInt>CouponPayment) { AccrInt<-CouponPayment DP<-CP+AccrInt } } } } } } } } } } } } } } Dates<-data.frame(Previous_CouponDate=PCD_SETT,SettlementDate=SETT,Next_CouponDate=NCD_SETT,DaysAccrued=NAccr,DaysInPeriod=NPeriod) Cash<-data.frame(Dirty_Price=DP,Clean_Price=CP,Accrued_Interest=AccrInt,CouponPayment=CouponPayment) DP_Out<-list(Dates,Cash) return(DP_Out) }
/scratch/gouwar.j/cran-all/cranData/BondValuation/R/DP.R
InputFormatCheck<-function(CP,YtM,SETT,Em,Mat,CpY,FIPD,LIPD,FIAD,RV,Coup,DCC,EOM,DateOrigin,StartDate,EndDate,YearNCP) { if (!missing(Em)) { if (!("Date"%in%class(Em))) { if ("numeric"%in%class(Em)) { Em<-as.Date(Em,origin=DateOrigin) warning(paste("The issue date (Em) is supplied as \"numeric\". It is converted to class \"Date\" using DateOrigin ",DateOrigin," and processed as Em =",Em,".")) } else { if (!(is.na(Em))) { if ("character"%in%class(Em)) { Em<-gsub("[ \\s]","",Em) HelpVector<-unlist(strsplit(Em,"")) if (all(is.element(HelpVector,c("0":"9")))) { Em<-as.numeric(Em) Em<-as.Date(Em,origin=DateOrigin) warning(paste("The issue date (Em) is supplied as a number of class \"character\". It is converted to class \"Date\" using DateOrigin ",DateOrigin," and processed as Em =",Em,".")) } else { if (all(is.element(HelpVector[c(1:4,6,7,9,10)],c("0":"9")))) { if (HelpVector[5]=="-") { if (HelpVector[8]=="-") { Em<-as.Date(Em,"%Y-%m-%d") if (is.na(Em)==FALSE) { warning(paste("The issue date (Em) is supplied as a string of class \"character\" in the format \"yyyy-mm-dd\". It is converted to class \"Date\" using the command \"as.Date(Em,\"%Y-%m-%d\")\" and processed as Em =",Em,".")) } else { stop("The issue date (Em) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The issue date (Em) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The issue date (Em) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The issue date (Em) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } rm(HelpVector) } } } } } else { Em=as.Date(NA) } if (!missing(Mat)) { if (!("Date"%in%class(Mat))) { if ("numeric"%in%class(Mat)) { Mat<-as.Date(Mat,origin=DateOrigin) warning(paste("The maturity date (Mat) is supplied as \"numeric\". It is converted to class \"Date\" using DateOrigin ",DateOrigin," and processed as Mat =",Mat,".")) } else { if (!(is.na(Mat))) { if ("character"%in%class(Mat)) { Mat<-gsub("[ \\s]","",Mat) HelpVector<-unlist(strsplit(Mat,"")) if (all(is.element(HelpVector,c("0":"9")))) { Mat<-as.numeric(Mat) Mat<-as.Date(Mat,origin=DateOrigin) warning(paste("The maturity date (Mat) is supplied as a number of class \"character\". It is converted to class \"Date\" using DateOrigin ",DateOrigin," and processed as Mat =",Mat,".")) } else { if (all(is.element(HelpVector[c(1:4,6,7,9,10)],c("0":"9")))) { if (HelpVector[5]=="-") { if (HelpVector[8]=="-") { Mat<-as.Date(Mat,"%Y-%m-%d") if (is.na(Mat)==FALSE) { warning(paste("The maturity date (Mat) is supplied as a string of class \"character\" in the format \"yyyy-mm-dd\". It is converted to class \"Date\" using the command \"as.Date(Mat,\"%Y-%m-%d\")\" and processed as Mat =",Mat,".")) } else { stop("The maturity date (Mat) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The maturity date (Mat) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The maturity date (Mat) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The maturity date (Mat) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } rm(HelpVector) } } } } } else { Mat=as.Date(NA) } if (!missing(FIPD)) { if (!("Date"%in%class(FIPD))) { if ("numeric"%in%class(FIPD)) { FIPD<-as.Date(FIPD,origin=DateOrigin) warning(paste("The first interest payment date (FIPD) is supplied as \"numeric\". It is converted to class \"Date\" using DateOrigin ",DateOrigin," and processed as FIPD =",FIPD,".")) } else { if (!(is.na(FIPD))) { if ("character"%in%class(FIPD)) { FIPD<-gsub("[ \\s]","",FIPD) HelpVector<-unlist(strsplit(FIPD,"")) if (all(is.element(HelpVector,c("0":"9")))) { FIPD<-as.numeric(FIPD) FIPD<-as.Date(FIPD,origin=DateOrigin) warning(paste("The first interest payment date (FIPD) is supplied as a number of class \"character\". It is converted to class \"Date\" using DateOrigin ",DateOrigin," and processed as FIPD =",FIPD,".")) } else { if (all(is.element(HelpVector[c(1:4,6,7,9,10)],c("0":"9")))) { if (HelpVector[5]=="-") { if (HelpVector[8]=="-") { FIPD<-as.Date(FIPD,"%Y-%m-%d") if (is.na(FIPD)==FALSE) { warning(paste("The first interest payment date (FIPD) is supplied as a string of class \"character\" in the format \"yyyy-mm-dd\". It is converted to class \"Date\" using the command \"as.Date(FIPD,\"%Y-%m-%d\")\" and processed as FIPD =",FIPD,".")) } else { stop("The first interest payment date (FIPD) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The first interest payment date (FIPD) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The first interest payment date (FIPD) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The first interest payment date (FIPD) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } rm(HelpVector) } } } } } else { FIPD=as.Date(NA) } if (!missing(LIPD)) { if (!("Date"%in%class(LIPD))) { if ("numeric"%in%class(LIPD)) { LIPD<-as.Date(LIPD,origin=DateOrigin) warning(paste("The last interest payment date (LIPD) is supplied as \"numeric\". It is converted to class \"Date\" using DateOrigin ",DateOrigin," and processed as LIPD =",LIPD,".")) } else { if (!(is.na(LIPD))) { if ("character"%in%class(LIPD)) { LIPD<-gsub("[ \\s]","",LIPD) HelpVector<-unlist(strsplit(LIPD,"")) if (all(is.element(HelpVector,c("0":"9")))) { LIPD<-as.numeric(LIPD) LIPD<-as.Date(LIPD,origin=DateOrigin) warning(paste("The last interest payment date (LIPD) is supplied as a number of class \"character\". It is converted to class \"Date\" using DateOrigin ",DateOrigin," and processed as LIPD =",LIPD,".")) } else { if (all(is.element(HelpVector[c(1:4,6,7,9,10)],c("0":"9")))) { if (HelpVector[5]=="-") { if (HelpVector[8]=="-") { LIPD<-as.Date(LIPD,"%Y-%m-%d") if (is.na(LIPD)==FALSE) { warning(paste("The last interest payment date (LIPD) is supplied as a string of class \"character\" in the format \"yyyy-mm-dd\". It is converted to class \"Date\" using the command \"as.Date(LIPD,\"%Y-%m-%d\")\" and processed as LIPD =",LIPD,".")) } else { stop("The last interest payment date (LIPD) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The last interest payment date (LIPD) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The last interest payment date (LIPD) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The last interest payment date (LIPD) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } rm(HelpVector) } } } } } else { LIPD=as.Date(NA) } if (!missing(FIAD)) { if (!("Date"%in%class(FIAD))) { if ("numeric"%in%class(FIAD)) { FIAD<-as.Date(FIAD,origin=DateOrigin) warning(paste("The first interest accrual date (FIAD) is supplied as \"numeric\". It is converted to class \"Date\" using DateOrigin ",DateOrigin," and processed as FIAD =",FIAD,".")) } else { if (!(is.na(FIAD))) { if ("character"%in%class(FIAD)) { FIAD<-gsub("[ \\s]","",FIAD) HelpVector<-unlist(strsplit(FIAD,"")) if (all(is.element(HelpVector,c("0":"9")))) { FIAD<-as.numeric(FIAD) FIAD<-as.Date(FIAD,origin=DateOrigin) warning(paste("The first interest accrual date (FIAD) is supplied as a number of class \"character\". It is converted to class \"Date\" using DateOrigin ",DateOrigin," and processed as FIAD =",FIAD,".")) } else { if (all(is.element(HelpVector[c(1:4,6,7,9,10)],c("0":"9")))) { if (HelpVector[5]=="-") { if (HelpVector[8]=="-") { FIAD<-as.Date(FIAD,"%Y-%m-%d") if (is.na(FIAD)==FALSE) { warning(paste("The first interest accrual date (FIAD) is supplied as a string of class \"character\" in the format \"yyyy-mm-dd\". It is converted to class \"Date\" using the command \"as.Date(FIAD,\"%Y-%m-%d\")\" and processed as FIAD =",FIAD,".")) } else { stop("The first interest accrual date (FIAD) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The first interest accrual date (FIAD) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The first interest accrual date (FIAD) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The first interest accrual date (FIAD) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } rm(HelpVector) } } } } } else { FIAD=as.Date(NA) } if (!missing(CpY)) { if (!("numeric"%in%class(CpY))) { if ("Date"%in%class(CpY)) { CpY<-as.numeric(CpY) warning(paste("The number of interest payments per year (CpY) is supplied as \"Date\". Its conversion to class numeric results in CpY =",CpY,".")) } else { if (!(is.na(CpY))) { if ("character"%in%class(CpY)) { CpY<-gsub("[ \\s]","",CpY) HelpVector<-unlist(strsplit(CpY,"")) if (all(is.element(HelpVector,c("0":"9")))) { CpY<-as.numeric(CpY) warning(paste("The number of interest payments per year (CpY) is supplied as a number of class \"character\". Its conversion to class numeric results in CpY =",CpY,".")) } else { if (all(is.element(HelpVector[c(1:4,6,7,9,10)],c("0":"9")))) { if (HelpVector[5]=="-") { if (HelpVector[8]=="-") { CpY<-as.Date(CpY,"%Y-%m-%d") CpY<-as.numeric(CpY) if (!(is.na(CpY))) { warning(paste("The number of interest payments per year (CpY) is supplied as a string of class \"character\" in the format \"yyyy-mm-dd\". It is converted to class \"Date\" using the command \"as.Date(CpY,\"%Y-%m-%d\")\" Its subsequent conversion to class numeric results in CpY =",CpY,".")) } else { stop("The number of interest payments per year (CpY) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The number of interest payments per year (CpY) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The number of interest payments per year (CpY) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The number of interest payments per year (CpY) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } rm(HelpVector) } } } } } else { CpY=as.numeric(NA) } if (!missing(RV)) { if (!("numeric"%in%class(RV))) { if ("Date"%in%class(RV)) { RV<-as.numeric(RV) warning(paste("The redemption value (RV) is supplied as \"Date\". Its conversion to class numeric results in RV =",RV,".")) } else { if (!(is.na(RV))) { if ("character"%in%class(RV)) { RV<-gsub("[ \\s]","",RV) HelpVector<-unlist(strsplit(RV,"")) if ((all(is.element(HelpVector[-1],c(".","0":"9"))))&(is.element(HelpVector[1],c("-","0":"9")))) { RV<-as.numeric(RV) warning(paste("The redemption value (RV) is supplied as a number of class \"character\". Its conversion to class numeric results in RV =",RV,".")) } else { if (all(is.element(HelpVector[c(1:4,6,7,9,10)],c(".","0":"9")))) { if (HelpVector[5]=="-") { if (HelpVector[8]=="-") { RV<-as.Date(RV,"%Y-%m-%d") RV<-as.numeric(RV) if (!(is.na(RV))) { warning(paste("The redemption value (RV) is supplied as a string of class \"character\" in the format \"yyyy-mm-dd\". It is converted to class \"Date\" using the command \"as.Date(RV,\"%Y-%m-%d\")\" Its subsequent conversion to class numeric results in RV =",RV,".")) } else { stop("The redemption value (RV) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The redemption value (RV) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The redemption value (RV) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The redemption value (RV) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } rm(HelpVector) } } } } } else { RV=as.numeric(NA) } if (!missing(Coup)) { if (!("numeric"%in%class(Coup))) { if ("Date"%in%class(Coup)) { Coup<-as.numeric(Coup) warning(paste("The nominal interest rate p.a. (Coup) is supplied as \"Date\". Its conversion to class numeric results in Coup =",Coup,".")) } else { if (!(is.na(Coup))) { if ("character"%in%class(Coup)) { Coup<-gsub("[ \\s]","",Coup) HelpVector<-unlist(strsplit(Coup,"")) if ((all(is.element(HelpVector[-1],c(".","0":"9"))))&(is.element(HelpVector[1],c("-","0":"9")))) { Coup<-as.numeric(Coup) warning(paste("The nominal interest rate p.a. (Coup) is supplied as a number of class \"character\". Its conversion to class numeric results in Coup =",Coup,".")) } else { if (all(is.element(HelpVector[c(1:4,6,7,9,10)],c(".","0":"9")))) { if (HelpVector[5]=="-") { if (HelpVector[8]=="-") { Coup<-as.Date(Coup,"%Y-%m-%d") Coup<-as.numeric(Coup) if (!(is.na(Coup))) { warning(paste("The nominal interest rate p.a. (Coup) is supplied as a string of class \"character\" in the format \"yyyy-mm-dd\". It is converted to class \"Date\" using the command \"as.Date(Coup,\"%Y-%m-%d\")\" Its subsequent conversion to class numeric results in Coup =",Coup,".")) } else { stop("The nominal interest rate p.a. (Coup) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The nominal interest rate p.a. (Coup) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The nominal interest rate p.a. (Coup) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The nominal interest rate p.a. (Coup) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } rm(HelpVector) } } } } } else { Coup=as.numeric(NA) } if (!missing(DCC)) { if (!("numeric"%in%class(DCC))) { if ("Date"%in%class(DCC)) { DCC<-as.numeric(DCC) warning(paste("The day count convention identifier (DCC) is supplied as \"Date\". Its conversion to class numeric results in DCC =",DCC,".")) } else { if (!(is.na(DCC))) { if ("character"%in%class(DCC)) { DCC<-gsub("[ \\s]","",DCC) HelpVector<-unlist(strsplit(DCC,"")) if (all(is.element(HelpVector,c("0":"9")))) { DCC<-as.numeric(DCC) warning(paste("The day count convention identifier (DCC) is supplied as a number of class \"character\". Its conversion to class numeric results in DCC =",DCC,".")) } else { if (all(is.element(HelpVector[c(1:4,6,7,9,10)],c("0":"9")))) { if (HelpVector[5]=="-") { if (HelpVector[8]=="-") { DCC<-as.Date(DCC,"%Y-%m-%d") DCC<-as.numeric(DCC) if (!(is.na(DCC))) { warning(paste("The day count convention identifier (DCC) is supplied as a string of class \"character\" in the format \"yyyy-mm-dd\". It is converted to class \"Date\" using the command \"as.Date(DCC,\"%Y-%m-%d\")\" Its subsequent conversion to class numeric results in DCC =",DCC,".")) } else { stop("The day count convention identifier (DCC) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The day count convention identifier (DCC) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The day count convention identifier (DCC) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The day count convention identifier (DCC) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } rm(HelpVector) } } } } } else { DCC=as.numeric(NA) } if (!(is.na(DCC))) { if (!(is.element(DCC,c(1:16)))) { stop("The day count convention identifier (DCC) cannot be processed! Please make sure that it is an element of the set {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}!") } } if (!missing(EOM)) { if (!("numeric"%in%class(EOM))) { if ("Date"%in%class(EOM)) { EOM<-as.numeric(EOM) warning(paste("The End-of-Month rule boolean (EOM) is supplied as \"Date\". Its conversion to class numeric results in EOM =",EOM,".")) } else { if (!(is.na(EOM))) { if ("character"%in%class(EOM)) { EOM<-gsub("[ \\s]","",EOM) HelpVector<-unlist(strsplit(EOM,"")) if (all(is.element(HelpVector,c("0":"9")))) { EOM<-as.numeric(EOM) warning(paste("The End-of-Month rule boolean (EOM) is supplied as a number of class \"character\". Its conversion to class numeric results in EOM =",EOM,".")) } else { if (all(is.element(HelpVector[c(1:4,6,7,9,10)],c("0":"9")))) { if (HelpVector[5]=="-") { if (HelpVector[8]=="-") { EOM<-as.Date(EOM,"%Y-%m-%d") EOM<-as.numeric(EOM) if (!(is.na(EOM))) { warning(paste("The End-of-Month rule boolean (EOM) is supplied as a string of class \"character\" in the format \"yyyy-mm-dd\". It is converted to class \"Date\" using the command \"as.Date(EOM,\"%Y-%m-%d\")\" Its subsequent conversion to class numeric results in EOM =",EOM,".")) } else { stop("The End-of-Month rule boolean (EOM) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The End-of-Month rule boolean (EOM) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The End-of-Month rule boolean (EOM) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The End-of-Month rule boolean (EOM) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } rm(HelpVector) } } } } } else { EOM=as.numeric(NA) } if (!(is.na(EOM))) { if (!(is.element(EOM,c(0,1)))) { stop("The End-of-Month rule boolean (EOM) cannot be processed! Please make sure that it is an element of the set {0,1}!") } } if (!missing(CP)) { if (!("numeric"%in%class(CP))) { if ("Date"%in%class(CP)) { CP<-as.numeric(CP) warning(paste("The clean price (CP) is supplied as \"Date\". Its conversion to class numeric results in CP =",CP,".")) } else { if (!(is.na(CP))) { if ("character"%in%class(CP)) { CP<-gsub("[ \\s]","",CP) HelpVector<-unlist(strsplit(CP,"")) if ((all(is.element(HelpVector[-1],c(".","0":"9"))))&(is.element(HelpVector[1],c("-","0":"9")))) { CP<-as.numeric(CP) warning(paste("The clean price (CP) is supplied as a number of class \"character\". Its conversion to class numeric results in CP =",CP,".")) } else { if (all(is.element(HelpVector[c(1:4,6,7,9,10)],c(".","0":"9")))) { if (HelpVector[5]=="-") { if (HelpVector[8]=="-") { CP<-as.Date(CP,"%Y-%m-%d") CP<-as.numeric(CP) if (!(is.na(CP))) { warning(paste("The clean price (CP) is supplied as a string of class \"character\" in the format \"yyyy-mm-dd\". It is converted to class \"Date\" using the command \"as.Date(CP,\"%Y-%m-%d\")\" Its subsequent conversion to class numeric results in CP =",CP,".")) } else { stop("The clean price (CP) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The clean price (CP) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The clean price (CP) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The clean price (CP) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } rm(HelpVector) } } } } } else { CP=as.numeric(NA) } if (!missing(YtM)) { if (!("numeric"%in%class(YtM))) { if ("Date"%in%class(YtM)) { YtM<-as.numeric(YtM) warning(paste("The yield to maturity (YtM) is supplied as \"Date\". Its conversion to class numeric results in YtM =",YtM,".")) } else { if (!(is.na(YtM))) { if ("character"%in%class(YtM)) { YtM<-gsub("[ \\s]","",YtM) HelpVector<-unlist(strsplit(YtM,"")) if ((all(is.element(HelpVector[-1],c(".","0":"9"))))&(is.element(HelpVector[1],c("-","0":"9")))) { YtM<-as.numeric(YtM) warning(paste("The yield to maturity (YtM) is supplied as a number of class \"character\". Its conversion to class numeric results in YtM =",YtM,".")) } else { if (all(is.element(HelpVector[c(1:4,6,7,9,10)],c(".","0":"9")))) { if (HelpVector[5]=="-") { if (HelpVector[8]=="-") { YtM<-as.Date(YtM,"%Y-%m-%d") YtM<-as.numeric(YtM) if (!(is.na(YtM))) { warning(paste("The yield to maturity (YtM) is supplied as a string of class \"character\" in the format \"yyyy-mm-dd\". It is converted to class \"Date\" using the command \"as.Date(YtM,\"%Y-%m-%d\")\" Its subsequent conversion to class numeric results in YtM =",YtM,".")) } else { stop("The yield to maturity (YtM) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The yield to maturity (YtM) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The yield to maturity (YtM) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The yield to maturity (YtM) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } rm(HelpVector) } } } } } else { YtM=as.numeric(NA) } if (!missing(SETT)) { if (!("Date"%in%class(SETT))) { if ("numeric"%in%class(SETT)) { SETT<-as.Date(SETT,origin=DateOrigin) warning(paste("The settlement date (SETT) is supplied as \"numeric\". It is converted to class \"Date\" using DateOrigin ",DateOrigin," and processed as SETT =",SETT,".")) } else { if (!(is.na(SETT))) { if ("character"%in%class(SETT)) { SETT<-gsub("[ \\s]","",SETT) HelpVector<-unlist(strsplit(SETT,"")) if (all(is.element(HelpVector,c("0":"9")))) { SETT<-as.numeric(SETT) SETT<-as.Date(SETT,origin=DateOrigin) warning(paste("The settlement date (SETT) is supplied as a number of class \"character\". It is converted to class \"Date\" using DateOrigin ",DateOrigin," and processed as SETT =",SETT,".")) } else { if (all(is.element(HelpVector[c(1:4,6,7,9,10)],c("0":"9")))) { if (HelpVector[5]=="-") { if (HelpVector[8]=="-") { SETT<-as.Date(SETT,"%Y-%m-%d") if (is.na(SETT)==FALSE) { warning(paste("The settlement date (SETT) is supplied as a string of class \"character\" in the format \"yyyy-mm-dd\". It is converted to class \"Date\" using the command \"as.Date(SETT,\"%Y-%m-%d\")\" and processed as SETT =",SETT,".")) } else { stop("The settlement date (SETT) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The settlement date (SETT) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The settlement date (SETT) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The settlement date (SETT) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } rm(HelpVector) } } } } } else { SETT=as.Date(NA) } if (!missing(StartDate)) { if (!("Date"%in%class(StartDate))) { if ("numeric"%in%class(StartDate)) { StartDate<-as.Date(StartDate,origin=DateOrigin) warning(paste("The starting date (StartDate) is supplied as \"numeric\". It is converted to class \"Date\" using DateOrigin ",DateOrigin," and processed as StartDate =",StartDate,".")) } else { if (!(is.na(StartDate))) { if ("character"%in%class(StartDate)) { StartDate<-gsub("[ \\s]","",StartDate) HelpVector<-unlist(strsplit(StartDate,"")) if (all(is.element(HelpVector,c("0":"9")))) { StartDate<-as.numeric(StartDate) StartDate<-as.Date(StartDate,origin=DateOrigin) warning(paste("The starting date (StartDate) is supplied as a number of class \"character\". It is converted to class \"Date\" using DateOrigin ",DateOrigin," and processed as StartDate =",StartDate,".")) } else { if (all(is.element(HelpVector[c(1:4,6,7,9,10)],c("0":"9")))) { if (HelpVector[5]=="-") { if (HelpVector[8]=="-") { StartDate<-as.Date(StartDate,"%Y-%m-%d") if (is.na(StartDate)==FALSE) { warning(paste("The starting date (StartDate) is supplied as a string of class \"character\" in the format \"yyyy-mm-dd\". It is converted to class \"Date\" using the command \"as.Date(StartDate,\"%Y-%m-%d\")\" and processed as StartDate =",StartDate,".")) } else { stop("The starting date (StartDate) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The starting date (StartDate) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The starting date (StartDate) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The starting date (StartDate) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } rm(HelpVector) } } } } } else { StartDate=as.Date(NA) } if (!missing(EndDate)) { if (!("Date"%in%class(EndDate))) { if ("numeric"%in%class(EndDate)) { EndDate<-as.Date(EndDate,origin=DateOrigin) warning(paste("The starting date (EndDate) is supplied as \"numeric\". It is converted to class \"Date\" using DateOrigin ",DateOrigin," and processed as EndDate =",EndDate,".")) } else { if (!(is.na(EndDate))) { if ("character"%in%class(EndDate)) { EndDate<-gsub("[ \\s]","",EndDate) HelpVector<-unlist(strsplit(EndDate,"")) if (all(is.element(HelpVector,c("0":"9")))) { EndDate<-as.numeric(EndDate) EndDate<-as.Date(EndDate,origin=DateOrigin) warning(paste("The starting date (EndDate) is supplied as a number of class \"character\". It is converted to class \"Date\" using DateOrigin ",DateOrigin," and processed as EndDate =",EndDate,".")) } else { if (all(is.element(HelpVector[c(1:4,6,7,9,10)],c("0":"9")))) { if (HelpVector[5]=="-") { if (HelpVector[8]=="-") { EndDate<-as.Date(EndDate,"%Y-%m-%d") if (is.na(EndDate)==FALSE) { warning(paste("The starting date (EndDate) is supplied as a string of class \"character\" in the format \"yyyy-mm-dd\". It is converted to class \"Date\" using the command \"as.Date(EndDate,\"%Y-%m-%d\")\" and processed as EndDate =",EndDate,".")) } else { stop("The starting date (EndDate) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The starting date (EndDate) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The starting date (EndDate) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } else { stop("The starting date (EndDate) cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\" with the appropriate DateOrigin or 3. number of class \"character\" with the appropriate DateOrigin or 4. string of class \"character\" in the format \"yyyy-mm-dd\" Note: If this date argument has class \"numeric\" or is a number of class \"character\", origin can be set with the option DateOrigin (default is \"1970-01-01\").") } } rm(HelpVector) } } } } } else { EndDate=as.Date(NA) } if (!missing(YearNCP)) { if (!("numeric"%in%class(YearNCP))) { if ("Date"%in%class(YearNCP)) { YearNCP<-as.numeric(YearNCP) warning(paste("The year figure of next coupon date (YearNCP) is supplied as \"Date\". Its conversion to class numeric results in YearNCP =",YearNCP,".")) } else { if (!(is.na(YearNCP))) { if ("character"%in%class(YearNCP)) { YearNCP<-gsub("[ \\s]","",YearNCP) HelpVector<-unlist(strsplit(YearNCP,"")) if ((all(is.element(HelpVector[-1],c(".","0":"9"))))&(is.element(HelpVector[1],c("-","0":"9")))) { YearNCP<-as.numeric(YearNCP) warning(paste("The year figure of next coupon date (YearNCP) is supplied as a number of class \"character\". Its conversion to class numeric results in YearNCP =",YearNCP,".")) } else { if (all(is.element(HelpVector[c(1:4,6,7,9,10)],c(".","0":"9")))) { if (HelpVector[5]=="-") { if (HelpVector[8]=="-") { YearNCP<-as.Date(YearNCP,"%Y-%m-%d") YearNCP<-as.numeric(YearNCP) if (!(is.na(YearNCP))) { warning(paste("The year figure of next coupon date (YearNCP) is supplied as a string of class \"character\" in the format \"yyyy-mm-dd\". It is converted to class \"Date\" using the command \"as.Date(YearNCP,\"%Y-%m-%d\")\" Its subsequent conversion to class numeric results in YearNCP =",YearNCP,".")) } else { stop("The year figure of next coupon date (YearNCP) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The year figure of next coupon date (YearNCP) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The year figure of next coupon date (YearNCP) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } else { stop("The year figure of next coupon date (YearNCP) cannot be processed! Please make sure that its class is either \"numeric\" or \"character\"!") } } rm(HelpVector) } } } } } else { YearNCP=as.numeric(NA) } if (!(is.na(YearNCP))) { if (abs(YearNCP-round(YearNCP))>.Machine$double.eps^0.5) { stop("The year figure of next coupon date (YearNCP) cannot be processed! Please make sure that it is a year figure!") } } CheckedInput<-list(CP=CP,YtM=YtM,SETT=SETT,Em=Em,Mat=Mat,CpY=CpY,FIPD=FIPD,LIPD=LIPD,FIAD=FIAD,RV=RV,Coup=Coup,DCC=DCC, EOM=EOM,DateOrigin=DateOrigin,StartDate=StartDate,EndDate=EndDate,YearNCP=YearNCP) return(CheckedInput) } PCD<-function(Date,DateVector) { if (!(("Date"%in%class(Date))|("numeric"%in%class(Date)))) { stop("The scalar cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\".") } else { if (!(("Date"%in%class(DateVector))|("numeric"%in%class(DateVector)))) { stop("The vector cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\".") } else { DateVector<-sort(na.omit(DateVector[!duplicated(DateVector)])) if (length(DateVector)<2) { stop("The vector must contain at least two different non-NA elements!") } else { if ((Date<DateVector[1])|(DateVector[length(DateVector)]<Date)) { PrevDate<-NA warning("The entered scalar is outside the vector boundaries. NA created!") } else { DateVector<-append(DateVector,Date) DateVector<-sort(DateVector) if (anyDuplicated(DateVector)!=0) { PrevDate<-DateVector[anyDuplicated(DateVector)] } else { PrevDate<-DateVector[which(DateVector<Date)[length(DateVector[which(DateVector<Date)])]] } } return(PrevDate) } } } } NCD<-function(Date,DateVector) { if (!(("Date"%in%class(Date))|("numeric"%in%class(Date)))) { stop("The scalar cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\".") } else { if (!(("Date"%in%class(DateVector))|("numeric"%in%class(DateVector)))) { stop("The vector cannot be processed! Please make sure that it fits one of the following: 1. \"Date\" with format \"%Y-%m-%d\" or 2. \"numeric\".") } else { DateVector<-sort(na.omit(DateVector[!duplicated(DateVector)])) if (length(DateVector)<2) { stop("The vector must contain at least two different non-NA elements!") } else { if ((Date<DateVector[1])|(!(DateVector[length(DateVector)]>Date))) { NextDate<-NA warning("The entered scalar is outside the vector boundaries. NA created!") } else { DateVector<-append(DateVector,Date) DateVector<-sort(DateVector) NextDate<-DateVector[which(DateVector>Date)[1]] } return(NextDate) } } } } NewtonRaphson<-function(f,df,startValue,Precision=.Machine$double.eps^0.25) { i<-0 v<-startValue vNext<-v-f(v)/df(v) path<-c(v,vNext) path.break<-as.numeric(0) f.value<-f(vNext) while ((!(is.nan(f.value)))&(abs(f.value)>Precision)) { i<-i+1 v<-vNext vNext<-v-f(v)/df(v) path<-append(path,vNext) f.value<-f(vNext) # if (round(abs(path[length(path)]-path[length(path)-1])- # abs(path[length(path)-1]-path[length(path)-2]))<Precision) { # path.break<-as.numeric(1) # break # } } # if ((1+0.0000001>vNext)&(1-0.0000001<vNext)) { # path.break<-as.numeric(1) # } out<-list(root=vNext,N.Iter=i,f.value=f.value,path.break=path.break) return(out) } dm_MyPriceEqn<-function(a,x) { m<-x[1] CNtau<-x[2] CF<-x[3] CF_final<-x[4] w<-x[5] eta<-x[6] z<-x[7] CpY<-x[8] Factor1<-((-1)^m)/((a^(w+m))*(CpY^(m))) Summand1<-CNtau*gamma(w+m)/(gamma(w)) InnerFactor<-CF*factorial(m)/(a-1) mSequence<-seq(0,m) InnerSummands<-(1/(factorial(m-mSequence)))*((a/(a-1))^mSequence)* (((gamma(w+m-mSequence))/(gamma(w)))- (gamma(w+eta+m-mSequence))/((a^eta)*(gamma(w+eta)))) InnerSum<-sum(InnerSummands) LastSummand<-(CF_final*gamma(w+eta+z+m))/((a^(eta+z))*(gamma(w+eta+z))) out<-Factor1*(Summand1+InnerFactor*InnerSum+LastSummand) return(out) } ModDUR<-function(a,x) { m<-x[1] CNtau<-x[2] CF<-x[3] CF_final<-x[4] w<-x[5] eta<-x[6] z<-x[7] CpY<-x[8] DP<-x[9] Factor1<-(1/(DP*CpY*a^(w+1))) Summand1<-CNtau*w InnerFactor<-CF*((a^eta)-1)/((a^eta)*(a-1)) InnerSum<-w-(eta/((a^eta)-1))+(a/(a-1)) LastSummand<-(CF_final*(w+eta+z))/(a^(eta+z)) out<-Factor1*(Summand1+InnerFactor*InnerSum+LastSummand) return(out) } CONV<-function(a,x) { m<-x[1] CNtau<-x[2] CF<-x[3] CF_final<-x[4] w<-x[5] eta<-x[6] z<-x[7] CpY<-x[8] DP<-x[9] Factor1<-(1/(2*(a^w)*DP))*((1/(CpY*a))^2) Summand1<-CNtau*w*(w+1) InnerFactor<-CF*((a^eta)-1)/((a^eta)*(a-1)) InnerSum<-w*(w+1)- ((eta*(2*w+eta+1))/((a^eta)-1))+ ((2*a)/(a-1))* (w-eta/(a^eta-1)+(a/(a-1))) LastSummand<-(CF_final*(w+eta+z)*(w+eta+z+1))/(a^(eta+z)) out<-Factor1*(Summand1+InnerFactor*InnerSum+LastSummand) return(out) }
/scratch/gouwar.j/cran-all/cranData/BondValuation/R/InternalFunctions.R
#' List of the day count conventions implemented. #' #' @docType data #' #' @usage data(List.DCC) #' #' @format A data frame with 16 rows and 3 variables: #' \describe{ #' \item{DCC}{Identifier.} #' \item{DCC.Name}{Names of the day count convention.} #' \item{DCC.Reference}{Reference.} #' } #' #' @references #' \enumerate{ #' \item{Banking Federation of the European Union (EBF), 2004, Master Agreement for Financial Transactions - Supplement to the Derivatives Annex - Interest Rate Transactions.} #' \item{Caputo Silva, Anderson, Lena Oliveira de Carvalho, and Octavio Ladeira de Medeiros, 2010, \emph{Public Debt: The Brazilian Experience} (National Treasury Secretariat and World Bank, Brasilia, BR).} #' \item{International Capital Market Association (ICMA), 2010, Rule 251 Accrued Interest Calculation - Excerpt from ICMA's Rules and Recommendations.} #' \item{Investment Industry Association of Canada (IIAC), 2018, Canadian Conventions in Fixed Income Markets - A Reference Document of Fixed Income Securities Formulas and Practices; Release: 1.3.} #' \item{International Swaps and Derivatives Association (ISDA), Inc., 1998, "EMU and Market Conventions: Recent Developments".} #' \item{International Swaps and Derivatives Association (ISDA), 2006, Inc., \emph{2006 ISDA Definitions.}, New York.} #' \item{Itau Unibanco S.A., 2017, Brazilian Sovereign Fixed Income and Foreign Exchange Markets - Handbook (First Edition).} #' \item{Krgin, Dragomir, 2002, The Handbook of Global Fixed Income Calculations. (Wiley, New York).} #' \item{Mayle, Jan, 1993, Standard Securities Calculation Methods: Fixed Income Securities Formulas for Price, Yield, and Accrued Interest, volume 1, New York: Securities Industry Association, third edition.} #' \item{Municipal Securities Rulemaking Board (MSRB), 2017, MSRB Rule Book, Washington, DC: Municipal Securities Rulemaking Board.} #' \item{SWX Swiss Exchange and D. Christie, 2003, "Accrued Interest & Yield Calculations and Determination of Holiday Calendars".} #' } #' #' @keywords datasets #' "List.DCC"
/scratch/gouwar.j/cran-all/cranData/BondValuation/R/List.DCC.R
#' Non-business days in Brazil from 1946-01-01 to 2299-12-31. #' #' This data frame contains all Saturdays and Sundays and the following #' Brazilian national holidays: #' \itemize{ #' \item New Year's Day (always on 01. Jan) #' \item Shrove Monday (variable date) #' \item Shrove Tuesday (variable date) #' \item Good Friday (variable date) #' \item Tiradentes' Day (always on 21. Apr) #' \item Labour Day (always on 01. May) #' \item Corpus Christi (variable date) #' \item Independence Day (always on 07. Sep) #' \item Our Lady of Aparecida (always on 12. Oct) #' \item All Souls' Day (always on 02. Nov) #' \item Republic Day (always on 15. Nov) #' \item Christmas Day (always on 25. Dec) #' } #' #' @docType data #' #' @usage data(NonBusDays.Brazil) #' #' @format A data frame with 40378 rows and 3 variables: #' \itemize{ #' \item{Holiday.Name} #' \item{Date} #' \item{Weekday} #' } #' #' @references #' Itau Unibanco S.A., 2017, Brazilian Sovereign Fixed Income and Foreign Exchange Markets - Handbook (First Edition). #' #' @keywords datasets #' "NonBusDays.Brazil"
/scratch/gouwar.j/cran-all/cranData/BondValuation/R/NonBusDays.Brazil.R
#' A panel of of 100 plain vanilla fixed coupon corporate bonds. #' #' A simulated dataset of 100 plain vanilla fixed coupon #' corporate bonds issued in 2016. #' #' @docType data #' #' @usage data(PanelSomeBonds2016) #' #' @format A data frame with 12718 rows and 16 variables: #' \describe{ #' \item{ID.No}{Identification number of the security.} #' \item{Coup.Type}{Type of the bond's coupon.} #' \item{Issue.Date}{The bond's issue date. Object of class Date #' with format \code{"\%Y-\%m-\%d"}.} #' \item{FIAD.Input}{Date on which the interest accrual starts (so-called #' "dated date"). Object of class Date with format #' \code{"\%Y-\%m-\%d"}.} #' \item{FIPD.Input}{First interest payment date after \code{Issue.Date}. #' Object of class Date with format \code{"\%Y-\%m-\%d"}.} #' \item{LIPD.Input}{Last interest payment date before \code{Mat.Date}. #' Object of class Date with format \code{"\%Y-\%m-\%d"}.} #' \item{Mat.Date}{So-called "maturity date" i.e. date on which the #' redemption value and the final interest are paid. #' Object of class Date with format \code{"\%Y-\%m-\%d"}.} #' \item{CpY.Input}{Number of interest payments per year. Object of class numeric.} #' \item{Coup.Input}{The nominal interest p.a. of the bond in percent. Object #' of class numeric.} #' \item{RV.Input}{The face value (= redemption value, par value) of #' the bond in percent.} #' \item{DCC.Input}{The day count convention the bond follows. Type ?AccrInt for details.} #' \item{EOM.Input}{Boolean indicating whether the bond follows the End-of-Month rule.} #' \item{TradeDate}{The calendar date on which the clean price was observed.} #' \item{SETT}{The settlement date that corresponds to \code{TradeDate}.} #' \item{CP.Input}{The clean price of the bond on \code{TradeDate}.} #' \item{YtM.Input}{The annualized yield to maturity of the bond on \code{TradeDate}.} #' } #' #' @keywords datasets #' "PanelSomeBonds2016"
/scratch/gouwar.j/cran-all/cranData/BondValuation/R/PanelSomeBonds2016.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 leap <- function(x) { .Call(`_BondValuation_leap`, x) } LDM <- function(x) { .Call(`_BondValuation_LDM`, x) } DaysInMonth <- function(x) { .Call(`_BondValuation_DaysInMonth`, x) } DaysInYear <- function(x) { .Call(`_BondValuation_DaysInYear`, x) } DayDiff <- function(x) { .Call(`_BondValuation_DayDiff`, x) } Date_LDM <- function(x) { .Call(`_BondValuation_Date_LDM`, x) } sumC <- function(x) { .Call(`_BondValuation_sumC`, x) } FirstMatch <- function(x) { .Call(`_BondValuation_FirstMatch`, x) } LeapDayInside <- function(x) { .Call(`_BondValuation_LeapDayInside`, x) } DIST <- function(x) { .Call(`_BondValuation_DIST`, x) } PayCalc <- function(x) { .Call(`_BondValuation_PayCalc`, x) } NumToDate <- function(x) { .Call(`_BondValuation_NumToDate`, x) } CppPrevDate <- function(x) { .Call(`_BondValuation_CppPrevDate`, x) } CppSuccDate <- function(x) { .Call(`_BondValuation_CppSuccDate`, x) }
/scratch/gouwar.j/cran-all/cranData/BondValuation/R/RcppExports.R
#' Properties of 100 plain vanilla fixed coupon corporate bonds. #' #' A simulated dataset of 100 plain vanilla fixed coupon #' corporate bonds issued in 2016. #' #' @docType data #' #' @usage data(SomeBonds2016) #' #' @format A data frame with 100 rows and 12 variables: #' \describe{ #' \item{ID.No}{Identification number of the security.} #' \item{Coup.Type}{Type of the bond's coupon.} #' \item{Issue.Date}{The bond's issue date. Object of class Date #' with format \code{"\%Y-\%m-\%d"}.} #' \item{FIAD.Input}{Date on which the interest accrual starts (so-called #' "dated date"). Object of class Date with format #' \code{"\%Y-\%m-\%d"}.} #' \item{FIPD.Input}{First interest payment date after \code{Issue.Date}. #' Object of class Date with format \code{"\%Y-\%m-\%d"}.} #' \item{LIPD.Input}{Last interest payment date before \code{Mat.Date}. #' Object of class Date with format \code{"\%Y-\%m-\%d"}.} #' \item{Mat.Date}{So-called "maturity date" i.e. date on which the #' redemption value and the final interest are paid. #' Object of class Date with format \code{"\%Y-\%m-\%d"}.} #' \item{CpY.Input}{Number of interest payments per year. Object of class numeric.} #' \item{Coup.Input}{The nominal interest p.a. of the bond in percent. Object #' of class numeric.} #' \item{RV.Input}{The face value (= redemption value, par value) of #' the bond in percent.} #' \item{DCC.Input}{The day count convention the bond follows. Type ?AccrInt for details.} #' \item{EOM.Input}{Boolean indicating whether the bond follows the End-of-Month rule.} #' } #' #' @keywords datasets #' "SomeBonds2016"
/scratch/gouwar.j/cran-all/cranData/BondValuation/R/SomeBonds2016.R
#' BP runs a shiny application to fit bone section #' @title Run a shiny application to fit bone section #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return Nothing #' @description Run a shiny application to fit bone section #' @examples #' \dontrun{ #' # Not run: #' library(BoneProfileR) #' BP() #' } #' @export BP <- function() { if (!requireNamespace("shiny", quietly = TRUE)) { stop("shiny package is absent; Please install it first") } if(interactive()){ getFromNamespace("runApp", ns="shiny")(appDir = system.file("shiny", package="BoneProfileR"), launch.browser =TRUE) } }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP.R
#' BP_AutoFit fits model automatically #' @title Fit model automatically #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return Characteristics of an image with all the fit information #' @param file The file to be opened #' @param xlsx TRUE, FALSE or the name and path of the report #' @param rotation.angle The angle of rotation for analysis #' @param center Which center to be used. #' @description Open an image, fit a model and generate a report. #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' library(BoneProfileR) #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_AutoFit(file=path_Hedgehog, xlsx=TRUE) #' # or to open a dialog box #' bone <- BP_AutoFit() #' } #' @export BP_AutoFit <- function(file=file.choose(), xlsx=TRUE, rotation.angle=0, center="ontogenetic") { pdf=FALSE docx=FALSE pb <- txtProgressBar(min = 0, max = 20, initial = 0, style = 3) bone <-BP_OpenImage(file=file) setTxtProgressBar(pb, 1) name <- attributes(bone)$name if (isTRUE(pdf)) { pdf.name <- file.path(getwd(), paste0(gsub("\\..+$", "", name), ".pdf")) } if (isFALSE(pdf)) { pdf.name <- NULL } if (isTRUE(xlsx)) { xlsx.name <- file.path(getwd(), paste0(gsub("\\..+$", "", name), ".xlsx")) } if (isFALSE(xlsx)) { xlsx.name <- NULL } if (isTRUE(docx)) { docx.name <- file.path(getwd(), paste0(gsub("\\..+$", "", name), ".docx")) } if (isFALSE(docx)) { docx.name <- NULL } bone <- BP_DetectBackground(bone=bone, analysis="logistic", show.plot=FALSE) setTxtProgressBar(pb, 2) bone <- BP_DetectForeground(bone=bone, analysis="logistic", show.plot=FALSE) setTxtProgressBar(pb, 3) bone <- BP_DetectCenters(bone=bone, analysis="logistic", show.plot=FALSE) setTxtProgressBar(pb, 4) bone <- BP_EstimateCompactness(bone, analysis="logistic", rotation.angle=rotation.angle, center=center, show.plot=FALSE) setTxtProgressBar(pb, 5) bone <- BP_FitMLCompactness(bone, analysis="logistic", silent=TRUE, fixed.parameters = c(K1=1, K2=1, Max=3, Min=-3), fitted.parameters = c(P=0.5, S=0.1)) fittedpar <- BP_GetFittedParameters(bone, analysis="logistic") bone <- BP_FitMLCompactness(bone, analysis="logistic", silent=TRUE, fixed.parameters = c(K1=1, K2=1), fitted.parameters = c(fittedpar, Max=2, Min=-2)) setTxtProgressBar(pb, 8) fittedpar <- BP_GetFittedParameters(bone, analysis="logistic") setTxtProgressBar(pb, 9) bone <- BP_DuplicateAnalysis(bone, from="logistic", to="flexit") setTxtProgressBar(pb, 10) bone <- BP_FitMLCompactness(bone, fitted.parameters=c(fittedpar, K1=1, K2=1), fixed.parameters=NULL, analysis="flexit", silent=TRUE) setTxtProgressBar(pb, 14) outAIC <- compare_AIC(Logistic=BP_GetFittedParameters(bone, analysis="logistic", alloptim=TRUE), Flexit=BP_GetFittedParameters(bone, analysis="flexit", alloptim=TRUE), silent = TRUE) if (outAIC$DeltaAIC[1]==0) { # Model Logistic bone <- RM_delete(bone, RMname ="flexit") } else { # Model Flexit bone <- RM_delete(bone, RMname ="logistic") } setTxtProgressBar(pb, 15) bone <- BP_FitBayesianCompactness(bone, analysis=1) setTxtProgressBar(pb, 18) bone <- BP_FitMLRadialCompactness(bone, analysis=1, silent=TRUE) setTxtProgressBar(pb, 20) if (xlsx | docx | pdf) BP_Report(bone=bone, analysis=1, control.plot = list(message = NULL, show.centers = TRUE, show.colors = TRUE, show.grid = TRUE, CI = "MCMC", show.legend = TRUE), docx=docx.name, pdf= pdf.name, xlsx=xlsx.name, author="Marc Girondot", title=name) return(invisible(bone)) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_AutoFit.R
#' BP_ChooseBackground lets the use to choose the background color of an image #' @title Let the use to choose the background color of an image #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return The orignial bone object with a new attribute for background color #' @param bone The bone image to be used #' @param analysis The name or rank of analysis #' @description Let the user to choose the background color of an image. #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.tif", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' bone <- BP_ChooseBackground(bone=bone) #' bone <- BP_ChooseForeground(bone=bone) #' plot(bone) #' } #' @export BP_ChooseBackground <- function(bone, analysis=1) { oldpar <- par(no.readonly = TRUE) # code line i on.exit(par(oldpar)) # code line i + 1 plot(bone, message="Please choose the background color", restorePar=FALSE) pos <- getFromNamespace(".BP_DetectClick", ns="BoneProfileR")(bone) bg <- bone[pos["x"], pos["y"], 1, 1:3] bg <- rgb(red=bg[1], green=bg[2], blue=bg[3]) bone <- RM_add(x=bone, RMname=analysis, valuename = "bg", value=bg) bone <- RM_delete(x=bone, RMname = analysis, valuename="threshold") bone <- RM_delete(x=bone, RMname = analysis, valuename="contour") bone <- RM_delete(x=bone, RMname = analysis, valuename="centers") bone <- RM_delete(x=bone, RMname = analysis, valuename="compactness") bone <- RM_delete(x=bone, RMname = analysis, valuename="array.compactness") bone <- RM_delete(x=bone, RMname = analysis, valuename="cut.distance.center") bone <- RM_delete(x=bone, RMname = analysis, valuename="cut.angle") bone <- RM_delete(x=bone, RMname = analysis, valuename="compactness.synthesis") bone <- RM_delete(x=bone, RMname = analysis, valuename="optim") bone <- RM_delete(x=bone, RMname = analysis, valuename="used.centers") bone <- RM_delete(x=bone, RMname = analysis, valuename="optimRadial") plot(bone, message="Do not forget to check thresholding") return(bone) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_ChooseBackground.R
#' BP_ChooseCenter lets the use to choose the center of the bone #' @title Let the user to choose the center of the bone #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return The orignal bone object with a new attribute for center #' @param bone The bone image to be used #' @param analysis The name or rank of analysis #' @description Let the user to choose the center of the bone. #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' library(BoneProfileR) #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' bone <- BP_DetectBackground(bone=bone) #' bone <- BP_DetectForeground(bone=bone) #' bone <- BP_ChooseCenter(bone=bone) #' # For partial section, only BP_ChooseCenter() must be used #' path_Dicynodon <- system.file("extdata", "Dicynodon_tibia_11.11.1.T_b_b-1.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Dicynodon) #' bone <- BP_DetectBackground(bone=bone) #' bone <- BP_DetectForeground(bone=bone) #' bone <- BP_ChooseCenter(bone=bone) #' bone <- BP_EstimateCompactness(bone, center="user", partial=TRUE) #' bone <- BP_FitMLCompactness(bone, analysis=1) #' plot(bone, type="observations+model") #' } #' @export BP_ChooseCenter <- function(bone, analysis=1) { if (is.null(RM_get(x=bone, RMname=analysis, valuename = "bg")) | is.null(RM_get(x=bone, RMname=analysis, valuename = "fg"))) { stop("You must first setup background and foreground colors") } oldpar <- par(no.readonly = TRUE) # code line i on.exit(par(oldpar)) # code line i + 1 plot(bone, message="Please choose the center of the section", restorePar=FALSE) pos <- getFromNamespace(".BP_DetectClick", ns="BoneProfileR")(bone) GC_cortex.x <- NA GC_cortex.y <- NA GC_bone.x <- NA GC_bone.y <- NA GC_medula.x <- NA GC_medula.y <- NA bone <- RM_delete(x=bone, RMname = analysis, valuename="compactness") bone <- RM_delete(x=bone, RMname = analysis, valuename="array.compactness") bone <- RM_delete(x=bone, RMname = analysis, valuename="cut.distance.center") bone <- RM_delete(x=bone, RMname = analysis, valuename="cut.angle") bone <- RM_delete(x=bone, RMname = analysis, valuename="compactness.synthesis") bone <- RM_delete(x=bone, RMname = analysis, valuename="optim") bone <- RM_delete(x=bone, RMname = analysis, valuename="used.centers") bone <- RM_delete(x=bone, RMname = analysis, valuename="optimRadial") bone <- RM_delete(x=bone, RMname = analysis, valuename="contour") bone <- RM_add(x=bone, RMname = analysis, valuename="centers", value=c(GC_cortex.x=GC_cortex.x, GC_cortex.y=GC_cortex.y, GC_bone.x=GC_bone.x, GC_bone.y=GC_bone.y, GC_medula.x=GC_medula.x, GC_medula.y=GC_medula.y, GC_user.x=unname(pos["x"]), GC_user.y=unname(pos["y"]))) bone <- RM_add(x=bone, RMname = analysis, valuename="used.centers", value=c(center.x=unname(pos["x"]), center.y=unname(pos["y"]))) plot(bone, message="Do not forget to check thresholding") return(bone) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_ChooseCenter.R
#' BP_ChooseForeground let the user to choose the foreground color of an image #' @title Let the user to choose the foreground color of an image #' @author Marc Girondot \email{marc.girondot@@u-psud.fr} #' @return The orignial bone object with a new attribute for foreground color #' @param bone The bone image to be used #' @param analysis The name or rank of analysis #' @description Let the user to choose the foreground color of an image. #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' bone <- BP_OpenImage() #' bone <- BP_ChooseBackground(bone=bone) #' bone <- BP_ChooseForeground(bone=bone) #' plot(bone) #' } #' @export BP_ChooseForeground <- function(bone, analysis=1) { oldpar <- par(no.readonly = TRUE) # code line i on.exit(par(oldpar)) # code line i + 1 plot(bone, message="Please choose the foreground color", restorePar=FALSE) pos <- getFromNamespace(".BP_DetectClick", ns="BoneProfileR")(bone) fg <- bone[pos["x"], pos["y"], 1, 1:3] fg <- rgb(red=fg[1], green=fg[2], blue=fg[3]) bone <- RM_add(x=bone, RMname=analysis, valuename = "fg", value=fg) bone <- RM_delete(x=bone, RMname = analysis, valuename="threshold") bone <- RM_delete(x=bone, RMname = analysis, valuename="contour") bone <- RM_delete(x=bone, RMname = analysis, valuename="centers") bone <- RM_delete(x=bone, RMname = analysis, valuename="compactness") bone <- RM_delete(x=bone, RMname = analysis, valuename="array.compactness") bone <- RM_delete(x=bone, RMname = analysis, valuename="cut.distance.center") bone <- RM_delete(x=bone, RMname = analysis, valuename="cut.angle") bone <- RM_delete(x=bone, RMname = analysis, valuename="compactness.synthesis") bone <- RM_delete(x=bone, RMname = analysis, valuename="optim") bone <- RM_delete(x=bone, RMname = analysis, valuename="used.centers") bone <- RM_delete(x=bone, RMname = analysis, valuename="optimRadial") plot(bone) return(bone, message="Do not forget to check thresholding") }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_ChooseForeground.R
#' BP_DetectBackground detects the background color of an image #' @title Detects the background color of an image #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return The orignial bone object with a new attribute for background color #' @param bone The bone image to be used #' @param analysis The name or rank of analysis #' @param show.plot should plot is shown ? #' @description Detects the background color of an image. #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' bone <- BP_OpenImage() #' bone <- BP_DetectBackground(bone=bone) #' bone <- BP_DetectForeground(bone=bone) #' plot(bone) #' } #' @export BP_DetectBackground <- function(bone, analysis=1, show.plot=TRUE) { red <- bone[c(2:5, dim(bone)[1]-2:5), c(2:5, dim(bone)[2]-2:5), 1, 1] green <- bone[c(2:5, dim(bone)[1]-2:5), c(2:5, dim(bone)[2]-2:5), 1, 2] blue <- bone[c(2:5, dim(bone)[1]-2:5), c(2:5, dim(bone)[2]-2:5), 1, 3] bg <- rgb(red=median(red), green=median(green), blue=median(blue)) bone <- RM_add(x=bone, RMname=analysis, valuename = "bg", value=bg) bone <- RM_delete(x=bone, RMname = analysis, valuename="threshold") bone <- RM_delete(x=bone, RMname = analysis, valuename="contour") bone <- RM_delete(x=bone, RMname = analysis, valuename="centers") bone <- RM_delete(x=bone, RMname = analysis, valuename="compactness") bone <- RM_delete(x=bone, RMname = analysis, valuename="array.compactness") bone <- RM_delete(x=bone, RMname = analysis, valuename="cut.distance.center") bone <- RM_delete(x=bone, RMname = analysis, valuename="cut.angle") bone <- RM_delete(x=bone, RMname = analysis, valuename="compactness.synthesis") bone <- RM_delete(x=bone, RMname = analysis, valuename="optim") bone <- RM_delete(x=bone, RMname = analysis, valuename="used.centers") bone <- RM_delete(x=bone, RMname = analysis, valuename="optimRadial") if (show.plot) plot(bone, message="Do not forget to check thresholding") return(bone) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_DetectBackground.R
#' BP_DetectCenters detects the centers of an image #' @title Detect the centers of an image #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return The orignial bone object with a new attribute for centers #' @param bone The bone image to be used #' @param analysis The name or rank of analysis #' @param show.plot should plot is shown ? #' @description Detects the centers of an image. Note that this function must not be used with partial bone section. #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' library(BoneProfileR) #' bone <- BP_OpenImage() #' # or #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' bone <- BP_DetectBackground(bone=bone) #' bone <- BP_DetectForeground(bone=bone) #' bone <- BP_DetectCenters(bone=bone) #' plot(bone, type="mineralized", show.grid=FALSE) #' plot(bone, type="unmineralized", show.grid=FALSE) #' plot(bone, type="section", show.grid=FALSE) #' } #' @export BP_DetectCenters <- function(bone, analysis=1, show.plot=TRUE) { if (is.null(RM_get(x=bone, RMname=analysis, valuename = "bg")) | is.null(RM_get(x=bone, RMname=analysis, valuename = "fg"))) { stop("You must first setup background and foreground colors") } bg <- RM_get(x=bone, RMname=analysis, valuename = "bg") fg <- RM_get(x=bone, RMname=analysis, valuename = "fg") # Je formatte la coupe en threshold # if (is.null(RM_get(x=bone, RMname=analysis, valuename = "threshold"))) { threshold <- getFromNamespace(".BP_threshold", ns="BoneProfileR")(bone) bone <- RM_add(x=bone, RMname = analysis, valuename="threshold", value=threshold) # } else { # threshold <- RM_get(x=bone, RMname = analysis, valuename="threshold") # } # contour <- RM_get(x=bone, RMname = analysis, valuename="contour") # if (is.null(contour)){ contour <- getFromNamespace(".BP_contour", ns="BoneProfileR")(bone, threshold=threshold, analysis=analysis, partial=FALSE, center.x=NA, center.y=NA) bone <- RM_add(x=bone, RMname = analysis, valuename="contour", value=contour) # } # essai <- array(data=as.numeric(contour), dim=c(dim(contour), 1, 1)) # class(essai) <- c("BoneProfileR", "cimg", "imager_array", "numeric" ) # plot(essai) GC_cortex.x <- mean(which(threshold, arr.ind = TRUE)[, 1]) GC_cortex.y <- mean(which(threshold, arr.ind = TRUE)[, 2]) GC_bone.x <- mean(which(contour, arr.ind = TRUE)[, 1]) GC_bone.y <- mean(which(contour, arr.ind = TRUE)[, 2]) GC_medula.x <- mean(which(contour & !threshold, arr.ind = TRUE)[, 1]) GC_medula.y <- mean(which(contour & !threshold, arr.ind = TRUE)[, 2]) bone <- RM_delete(x=bone, RMname = analysis, valuename="compactness") bone <- RM_delete(x=bone, RMname = analysis, valuename="array.compactness") bone <- RM_delete(x=bone, RMname = analysis, valuename="cut.distance.center") bone <- RM_delete(x=bone, RMname = analysis, valuename="cut.angle") bone <- RM_delete(x=bone, RMname = analysis, valuename="compactness.synthesis") bone <- RM_delete(x=bone, RMname = analysis, valuename="optim") # bone <- RM_delete(x=bone, RMname = analysis, valuename="contour") bone <- RM_delete(x=bone, RMname = analysis, valuename="used.centers") bone <- RM_delete(x=bone, RMname = analysis, valuename="optimRadial") # GC_ontoCenter.x <- GC_bone.x - (GC_cortex.x - GC_bone.x) + (GC_medula.x - GC_bone.x) GC_ontoCenter.y <- GC_bone.y - (GC_cortex.y - GC_bone.y) + (GC_medula.y - GC_bone.y) # points(GC_ontoCenter.x, GC_ontoCenter.y, col="blue", pch=19) bone <- RM_add(x=bone, RMname = analysis, valuename="centers", value=c(GC_cortex.x=GC_cortex.x, GC_cortex.y=GC_cortex.y, GC_bone.x=GC_bone.x, GC_bone.y=GC_bone.y, GC_medula.x=GC_medula.x, GC_medula.y=GC_medula.y, GC_user.x=NA, GC_user.y=NA, GC_ontogenic.x=GC_ontoCenter.x, GC_ontogenic.y=GC_ontoCenter.y)) if (show.plot) plot(bone, message="Do not forget to check thresholding") return(bone) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_DetectCenters.R
# #' BP_DetectClick waits for a click in the image # #' @title Wait for a click in the image # #' @author marc.girondot@@u-psud.fr # #' @return a vector with x and y # #' @param bone The bone image # #' @description Wait for a click in the image. # #' @family BoneProfileR # #' @examples # #' \dontrun{ # #' bone <- BP_OpenImage() # #' BP_PlotBone(bone) # #' pos <- BP_DetectClick(bone) # #' } # #' @export .BP_DetectClick <- function(bone) { click.locn <- locator(n = 1, type = "n") # click.locn$x <- click.locn$x * dim(bone)[1] # click.locn$y <- click.locn$y * dim(bone)[2] if (is.null(click.locn)) { click.locn <- list(x=NA, y=NA) } else { if (click.locn$x<1) click.locn$x <- 1 if (click.locn$x>dim(bone)[1]) click.locn$x <- dim(bone)[1] if (click.locn$y<1) click.locn$y <- 1 if (click.locn$y>dim(bone)[2]) click.locn$y <- dim(bone)[2] } return(c(x=click.locn$x, y=click.locn$y)) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_DetectClick.R
#' BP_DetectForeground detects the foreground color of an image #' @title Detects the foreground color of an image #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return The orignial bone object with a new attribute for foreground color #' @param bone The bone image to be used #' @param analysis The name or rank of analysis #' @param show.plot should plot is shown ? #' @description Detects the foreground color of an image. #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' bone <- BP_OpenImage() #' bone <- BP_DetectBackground(bone=bone) #' bone <- BP_DetectForeground(bone=bone) #' plot(bone) #' } #' @export BP_DetectForeground <- function(bone, analysis=1, show.plot=TRUE) { if (is.null(RM_get(x=bone, RM="RM", RMname=analysis, valuename = "bg"))) { red <- bone[c(2:5, dim(bone)[1]-2:5), c(2:5, dim(bone)[2]-2:5), 1, 1] green <- bone[c(2:5, dim(bone)[1]-2:5), c(2:5, dim(bone)[2]-2:5), 1, 2] blue <- bone[c(2:5, dim(bone)[1]-2:5), c(2:5, dim(bone)[2]-2:5), 1, 3] bg <- rgb(red=median(red), green=median(green), blue=median(blue)) red <- col2rgb(bg)["red", 1] green <- col2rgb(bg)["green", 1] blue <- col2rgb(bg)["blue", 1] } else { bg <- RM_get(x=bone, RM="RM", RMname=analysis, valuename = "bg") red <- col2rgb(bg)["red", 1] green <- col2rgb(bg)["green", 1] blue <- col2rgb(bg)["blue", 1] } pos <- sqrt((bone[, , 1, 1]-red/255)^2+ (bone[, , 1, 2]-green/255)^2+ (bone[, , 1, 3]-blue/255)^2) pos2 <- which(pos==max(pos), arr.ind = TRUE)[1, ] fg <- rgb(red=bone[pos2[1], pos2[2], 1, 1], green=bone[pos2[1], pos2[2], 1, 2], blue=bone[pos2[1], pos2[2], 1, 3]) bone <- RM_add(x=bone, RMname=analysis, valuename = "fg", value=fg) bone <- RM_delete(x=bone, RMname = analysis, valuename="threshold") bone <- RM_delete(x=bone, RMname = analysis, valuename="contour") bone <- RM_delete(x=bone, RMname = analysis, valuename="centers") bone <- RM_delete(x=bone, RMname = analysis, valuename="compactness") bone <- RM_delete(x=bone, RMname = analysis, valuename="array.compactness") bone <- RM_delete(x=bone, RMname = analysis, valuename="cut.distance.center") bone <- RM_delete(x=bone, RMname = analysis, valuename="cut.angle") bone <- RM_delete(x=bone, RMname = analysis, valuename="compactness.synthesis") bone <- RM_delete(x=bone, RMname = analysis, valuename="optim") bone <- RM_delete(x=bone, RMname = analysis, valuename="used.centers") bone <- RM_delete(x=bone, RMname = analysis, valuename="optimRadial") if (show.plot) plot(bone, message="Do not forget to check thresholding") return(bone) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_DetectForeground.R
#' BP_DuplicateAnalysis duplicates an analysis stored in an object #' @title Duplicates an analysis stored in an object #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return The orignial bone object with a new analysis #' @param bone The bone image to be used #' @param from The name or rank of analysis to be duplicated #' @param to The name or rank of analysis to be created #' @description Duplicates an analysis stored in an object. #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' library(BoneProfileR) #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' bone <- BP_DetectBackground(bone=bone, analysis="logistic") #' bone <- BP_DetectForeground(bone=bone, analysis="logistic") #' bone <- BP_DetectCenters(bone=bone, analysis="logistic") #' bone <- BP_EstimateCompactness(bone, analysis="logistic") #' bone <- BP_FitMLCompactness(bone, analysis="logistic") #' plot(bone) #' plot(bone, type="observations") #' plot(bone, type="observations+model", analysis=1) #' bone <- BP_DuplicateAnalysis(bone, from="logistic", to="flexit") #' } #' @export BP_DuplicateAnalysis <- function(bone, from=1, to=2) { bone <- RM_duplicate(x=bone, RMnamefrom = from, RMnameto = to) return(bone) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_DuplicateAnalysis.R
#' Example of hedgehog femur #' @title Example of hedgehog femur #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @docType data #' @name Erinaceus_europaeus #' @usage Erinaceus_europaeus #' @description Example of hedgehog femur. A drawing produced by Michel Laurin. #' @family BoneProfileR #' @keywords femur hedgehog #' @examples #' bone <- Erinaceus_europaeus #' plot(bone) #' @format A png image. NULL
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_Erinaceus_europaeus.R
#' BP_EstimateCompactness estimates the compactness of a bone section #' @title Estimation of the compactness of a bone section #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return The orignial bone object with a new attribute for compactness #' @param bone The bone image to be used #' @param center Which center to be used: user, mineralized, unmineralized, section, ontogenetic #' @param cut.angle Number of angles #' @param cut.distance Number of distances #' @param partial Is the section partial? #' @param rotation.angle The angle of rotation for analysis #' @param analysis The name or rank of analysis #' @param show.plot should plot is shown ? #' @description Estimation of the compactness of a bone section.\cr #' The reference for radial estimation of compactness is the trigonometric circle for rotation.angle=0 in #' BP_EstimateCompactness():\cr #' - The top of the section is located at -pi/2.\cr #' - The left of the section is located at -pi and +pi.\cr #' - The bottom of the section is located at pi/2.\cr #' - The right of the section is 0.\cr #' If rotation.angle is different from 0, the value of rotation.angle is added to the angle modulo 2.pi. #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' library(BoneProfileR) #' bone <- BP_OpenImage() #' # or #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' bone <- BP_DetectBackground(bone=bone) #' bone <- BP_DetectForeground(bone=bone) #' bone <- BP_DetectCenters(bone=bone) #' bone <- BP_EstimateCompactness(bone) #' plot(bone, type="original", show.grid=FALSE) #' plot(bone, type="mineralized", show.grid=FALSE) #' plot(bone, type="unmineralized", show.grid=FALSE) #' plot(bone, type="section", show.grid=FALSE) #' } #' @export BP_EstimateCompactness <- function(bone, center="ontogenetic", partial=FALSE, cut.angle=60, cut.distance=100, rotation.angle=0, analysis=1, show.plot=TRUE) { # center="ontogenetic"; partial=FALSE; cut.angle=60; cut.distance=100; analysis=1; rotation.angle=0; show.plot=TRUE # center="user"; partial=TRUE; cut.angle=60; cut.distance=100; analysis=1; rotation.angle=0; show.plot=TRUE oldpar <- par(no.readonly = TRUE) # code line i on.exit(par(oldpar)) # code line i + 1 center <- match.arg(center, choices = c("user", "mineralized", "ontogenic", "unmineralized", "section", "ontogenetic")) if (center == "ontogenic") center <- "ontogenetic" if ((center!="user") & partial) { stop("When partial analysis is done, only user center must be used.") } if (is.null(RM_get(x=bone, RMname=analysis, valuename = "centers"))) { stop("You must first setup centers using BP_DetectCenters() or BP_ChooseCenter()") } # Je formatte la coupe en threshold threshold <- RM_get(x=bone, RMname = analysis, valuename="threshold") if (is.null(threshold)) { threshold <- getFromNamespace(".BP_threshold", ns="BoneProfileR")(bone, analysis=analysis) bone <- RM_add(x=bone, RMname = analysis, valuename="threshold", value=threshold) } if (center == "user") { center.x <- RM_get(x=bone, RMname=analysis, valuename = "centers")["GC_user.x"] center.y <- RM_get(x=bone, RMname=analysis, valuename = "centers")["GC_user.y"] } if (center == "mineralized") { center.x <- RM_get(x=bone, RMname=analysis, valuename = "centers")["GC_cortex.x"] center.y <- RM_get(x=bone, RMname=analysis, valuename = "centers")["GC_cortex.y"] } if (center == "unmineralized") { center.x <- RM_get(x=bone, RMname=analysis, valuename = "centers")["GC_medula.x"] center.y <- RM_get(x=bone, RMname=analysis, valuename = "centers")["GC_medula.y"] } if (center == "section") { center.x <- RM_get(x=bone, RMname=analysis, valuename = "centers")["GC_bone.x"] center.y <- RM_get(x=bone, RMname=analysis, valuename = "centers")["GC_bone.y"] } if (center == "ontogenetic") { center.x <- RM_get(x=bone, RMname=analysis, valuename = "centers")["GC_ontogenic.x"] center.y <- RM_get(x=bone, RMname=analysis, valuename = "centers")["GC_ontogenic.y"] } if (is.na(center.x) | is.na(center.y)) stop("The requested center is not available") contour <- RM_get(x=bone, RMname=analysis, valuename = "contour") if (is.null(contour)) { contour <- getFromNamespace(".BP_contour", ns="BoneProfileR")(bone, analysis=analysis, threshold=threshold, partial=partial, center.x=center.x, center.y=center.y) bone <- RM_add(x=bone, RMname = analysis, valuename="contour", value=contour) } # sum(contour) est le nombre de pixels mineralisés compactness <- data.frame(x=rep(NA, times=sum(contour)), y=rep(NA, times=sum(contour)), distance.center=rep(NA, times=sum(contour)), distance.external=rep(NA, times=sum(contour)), angle=rep(NA, times=sum(contour)), mineral=rep(NA, times=sum(contour))) # if (pg) pb <- progress_bar$new(total = ncol(contour)) cpt <- 1 # contour a les x en ligne et les y en colonne ytot <- 1:ncol(contour) for (x in 1:nrow(contour)) { lgn <- contour[x, ] if (any(lgn)) { pixel <- as.numeric(threshold[x, lgn]) y <- ytot[lgn] dc <- sqrt((y-center.y)^2+(x-center.x)^2) angle <- atan2(y-center.y, x-center.x) angle <- ((angle+pi+rotation.angle) %% (2*pi))-pi compactness[cpt:(cpt+length(y)-1), c("x", "y", "distance.center", "distance.external", "angle", "mineral")] <- c(rep(x, times=length(y)), y, dc, rep(NA, times=length(y)), angle, pixel) cpt <- cpt + length(y) } # if (pg) pb$tick() } compactness <- cbind(compactness, cut.360=cut(compactness$angle, breaks = seq(from=-pi, to=pi, length.out = 360+1))) compactness <- cbind(compactness, cut.angle=cut(compactness$angle, breaks = seq(from=-pi, to=pi, length.out = cut.angle+1))) # Cut angle est à 360 sections peripherie <- NULL for (l in levels(compactness$cut.360)) { dc <- subset(compactness, subset = (compactness$cut.360 == l), select="distance.center") if (nrow(dc) != 0) { m <- max(dc[, "distance.center"]) s <- which(compactness$cut.360 == l) compactness[s, "distance.external"] <- rep(m, times=length(s)) peripherie <- c(peripherie, m) } else { peripherie <- c(peripherie, NA) } } peripherie <- data.frame(angle=seq(from=-pi, to=pi, length.out = 360), peripherie=peripherie) if (partial) { peripherie <- peripherie[c(2:nrow(peripherie)-1), ] } peripherie <- na.omit(peripherie) bone <- RM_add(x=bone, RMname = analysis, valuename="peripherie", value=peripherie) # Je dois aussi tronquer les autres compactness <- cbind(compactness, ratio.center=compactness$distance.center/compactness$distance.external) compactness <- cbind(compactness, cut.distance.center=cut(compactness$ratio.center, breaks = seq(from=0, to=1, length.out = cut.distance+1))) t <- table(compactness$cut.angle, compactness$cut.distance.center, compactness$mineral) m <- NULL nm <- NULL for (l in levels(compactness$cut.distance.center)) { m <- c(m, sum(t[, l, "1"])) nm <- c(nm, sum(t[, l, "0"])) } compactness.synthesis <- data.frame(distance.center=(seq(from=0, to=1, length.out = cut.distance+1)[-1]+ rev(rev(seq(from=0, to=1, length.out = cut.distance+1))[-1]))/2, mineralized=m, unmineralize=nm, compactness=m/(m+nm)) bone <- RM_delete(x=bone, RMname = analysis, valuename="optim") bone <- RM_delete(x=bone, RMname = analysis, valuename="optimRadial") bone <- RM_add(x=bone, RMname = analysis, valuename="compactness", value=compactness) bone <- RM_add(x=bone, RMname = analysis, valuename="array.compactness", value=t) bone <- RM_add(x=bone, RMname = analysis, valuename="cut.distance.center", value=seq(from=0, to=1, length.out = cut.distance+1)) bone <- RM_add(x=bone, RMname = analysis, valuename="cut.angle", value=seq(from=-pi, to=pi, length.out = cut.angle+1)) bone <- RM_add(x=bone, RMname = analysis, valuename="used.centers", value=c(center.x=unname(center.x), center.y=unname(center.y))) bone <- RM_add(x=bone, RMname = analysis, valuename="compactness.synthesis", value=compactness.synthesis) bone <- RM_add(x=bone, RMname = analysis, valuename="partial", value=partial) bone <- RM_add(x=bone, RMname = analysis, valuename="rotation.angle", value=rotation.angle) bone <- RM_add(x=bone, RMname = analysis, valuename="global.compactness", value=sum(compactness[, "mineral"])/(nrow(compactness))) if (show.plot) { # bonex <<- bone # plot(bone, type="observations") par(xaxs="i", yaxs="r") par(mar=c(4, 4, 2, 4)+0.4) plot(compactness.synthesis$distance.center, m/(m+nm), xlim=c(0, 1), ylim=c(0, 1), type="l", las=1, bty="n", xlab="Distance from the center", ylab="Compactness", lwd=2) lines(x=compactness.synthesis$distance.center, y=(m+nm)/max(m+nm), col="blue") axis(side = 4, at=seq(from=0, to=1, by=0.2), labels = round(seq(from=0, to=1, by=0.2)*max(m+nm), 0), las=1, col.axis="blue", col="blue") mtext("Number of pixels", side=4, line=3, col="blue") } return(bone) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_EstimateCompactness.R
#' BP_FitBayesianCompactness estimates Bayesian model of a bone section #' @title Estimation of Bayesian model of a bone section #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return The -Ln L #' @param bone The bone image to be used #' @param priors Priors #' @param n.iter Number of iterations #' @param n.chains Number of chains #' @param n.adapt Number of iteration to adapt #' @param thin Thin parameter for analysis #' @param analysis Name or rank of analysis #' @param silent Should some information must me shown ? #' @description Estimation of Bayesian model of a bone section. #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' library(BoneProfileR) #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' bone <- BP_DetectBackground(bone=bone, analysis="logistic") #' bone <- BP_DetectForeground(bone=bone, analysis="logistic") #' bone <- BP_DetectCenters(bone=bone, analysis="logistic") #' bone <- BP_EstimateCompactness(bone, analysis="logistic") #' bone <- BP_FitMLCompactness(bone, analysis="logistic") #' plot(bone) #' plot(bone, type="observations") #' plot(bone, type="observations+model", analysis=1) #' fittedpar <- BP_GetFittedParameters(bone, analysis="logistic") #' bone <- BP_DuplicateAnalysis(bone, from="logistic", to="flexit") #' bone <- BP_FitMLCompactness(bone, #' fitted.parameters=c(fittedpar, K1=1, K2=1), #' fixed.parameters=NULL, analysis="flexit") #' compare_AIC(Logistic=BP_GetFittedParameters(bone, analysis="logistic", alloptim=TRUE), #' Flexit=BP_GetFittedParameters(bone, analysis="flexit", alloptim=TRUE)) #' out4p <- plot(bone, type="observations+model", analysis="logistic") #' out6p <- plot(bone, type="observations+model", analysis="flexit") #' bone <- BP_FitBayesianCompactness(bone, analysis="logistic") #' plot(bone, type="observations+model", CI="MCMC", analysis="logistic") #' bone <- BP_FitBayesianCompactness(bone, analysis="flexit") #' plot(bone, type="observations+model", CI="MCMC", analysis="flexit") #' } #' @export BP_FitBayesianCompactness <- function(bone=stop("A result from BP_FitMLCompactness() must be provided"), priors=NULL, n.iter = 10000, n.chains = 1, n.adapt = 100, thin = 1, analysis=1, silent=TRUE) { # priors=NULL; n.iter = 10000; n.chains = 1; n.adapt = 100; thin = 1, analysis=1 if (is.null(RM_get(x=bone, RMname=analysis, valuename = "optim"))) stop("The model must be first fitted with BP_FitMLCompactness()") if (is.null(priors)) { priors <- data.frame(Density=character(), Prior1=numeric(), Prior2=numeric(), SDProp=numeric(), Min=numeric(), Max=numeric(), Init=numeric(), stringsAsFactors = FALSE) p <- BP_GetFittedParameters(bone, analysis = analysis, alloptim = FALSE) if (!is.na(p["P"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=0, Prior2=1, SDProp=0.2, Min=0, Max=1, Init=unname(p["P"]), stringsAsFactors = FALSE, row.names = "P")) } if (!is.na(p["S"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=0, Prior2=10, SDProp=0.2, Min=0, Max=10, Init=unname(p["S"]), stringsAsFactors = FALSE, row.names = "S")) } if (!is.na(p["Min"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=0, Prior2=0.8, SDProp=0.2, Min=0, Max=0.8, Init=unname(p["Min"]), stringsAsFactors = FALSE, row.names = "Min")) } if (!is.na(p["Max"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=0.2, Prior2=1, SDProp=0.2, Min=0.2, Max=1, Init=unname(p["Max"]), stringsAsFactors = FALSE, row.names = "Max")) } if (!is.na(p["K1"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=-10, Prior2=10, SDProp=0.2, Min=-10, Max=10, Init=unname(p["K1"]), stringsAsFactors = FALSE, row.names = "K1")) } if (!is.na(p["K2"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=-10, Prior2=10, SDProp=0.2, Min=-10, Max=10, Init=unname(p["K2"]), stringsAsFactors = FALSE, row.names = "K2")) } if (!silent) priors } fixedpar <- BP_GetFittedParameters(bone, analysis = analysis, alloptim = TRUE)$fixed.parameters mcmc <- MHalgoGen( likelihood = BP_LnLCompactness, bone=bone, fixed.parameters=fixedpar, parameters_name = "par", parameters = priors, n.iter = n.iter, n.chains = n.chains, n.adapt = n.adapt, thin = thin, adaptive = TRUE) data <- RM_get(x=bone, RMname=analysis, valuename = "compactness.synthesis") outmcmc <- matrix(NA, ncol=nrow(data), nrow=n.iter) for (iter in 1:n.iter) { p <- c(mcmc$resultMCMC[[1]][iter, ], fixedpar) Min <- p["Min"] Max <- p["Max"] # 21/2/2020 p["S"] <- 1/(4*p["S"]) c <- flexit(x = data$distance.center, par = p) * (Max - Min) + Min outmcmc[iter, ] <- c } qmcmc <- apply(X = outmcmc, MARGIN = 2, FUN = function(x) quantile(x, probs = c(0.025, 0.5, 0.975))) colnames(qmcmc) <- data$distance.center mcmc <- modifyList(mcmc, list(quantiles=qmcmc)) mcmc$timestamp <- date() mcmc$resultMCMC[[1]][, "Min"] <- mcmc$resultMCMC[[1]][, "Min"] mcmc$resultMCMC[[1]][, "Max"] <- mcmc$resultMCMC[[1]][, "Max"] summary.table <- data.frame(mean=apply(mcmc$resultMCMC[[1]], MARGIN=2, FUN = mean), se=apply(mcmc$resultMCMC[[1]], MARGIN=2, FUN = sd)) mcmc <- modifyList(mcmc, list(summary.table=summary.table)) # Je retire bone car ça prend trop de place mcmc$parametersMCMC$control$bone <- NULL bone <- RM_add(x=bone, RMname = analysis, valuename = "mcmc", value=mcmc) return(bone) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_FitBayesianCompactness.R
#' BP_FitMLCompactness estimates likelihood of model of a bone section #' @title Estimation of the likelihood of a bone section #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return The -Ln L #' @param fitted.parameters Parameters of the model to be fitted #' @param bone The bone image to be used #' @param fixed.parameters Fixed parameters of the model #' @param priors Priors used for intermediate estimations #' @param replicates.CI Number of replicates to estimate confidence interval #' @param analysis Name or rank of analysis #' @param twosteps Does a 2-steps analysis be performed? #' @param silent Should information be shown? #' @description Estimation of the model of compactness of a bone section.\cr #' The two-steps analysis performs first a quasi-Newton method, then a Bayesian MCMC and finally again a quasi-Newton method. #' It generally ensures that global minimum is found. On the other hand, it doubles the time to complete. #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' library(BoneProfileR) #' bone <- BP_OpenImage() #' # or, to use the package imager to open a tiff image #' bone <- BP_OpenImage(ijtiff=TRUE) #' library(BoneProfileR) #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' bone <- BP_DetectBackground(bone=bone, analysis="logistic") #' bone <- BP_DetectForeground(bone=bone, analysis="logistic") #' bone <- BP_DetectCenters(bone=bone, analysis="logistic") #' bone <- BP_EstimateCompactness(bone, analysis="logistic") #' plot(bone, type="mineralized", show.grid=FALSE) #' plot(bone, type="unmineralized", show.grid=FALSE) #' plot(bone, type="section", show.grid=FALSE) #' bone <- BP_FitMLCompactness(bone, analysis="logistic", twosteps=TRUE) #' BP_GetFittedParameters(bone) #' plot(bone) #' plot(bone, type="observations") #' plot(bone, type="observations+model", analysis=1) #' bone <- BP_DuplicateAnalysis(bone, from="logistic", to="flexit") #' fittedpar <- BP_GetFittedParameters(bone, analysis="logistic") #' bone <- BP_DuplicateAnalysis(bone, from="logistic", to="flexit") #' BP_ListAnalyses(bone) #' bone <- BP_FitMLCompactness(bone, #' fitted.parameters=c(fittedpar, K1=1, K2=1), #' fixed.parameters=NULL, analysis="flexit", twosteps=TRUE) #' compare_AIC(Logistic=BP_GetFittedParameters(bone, analysis="logistic", alloptim=TRUE), #' Flexit=BP_GetFittedParameters(bone, analysis="flexit", alloptim=TRUE)) #' out4p <- plot(bone, type="observations+model", analysis="logistic") #' out6p <- plot(bone, type="observations+model", analysis="flexit") #' } #' @export BP_FitMLCompactness <- function(bone, fitted.parameters=c(P=0.5, S=0.05, Min=0.001, Max=0.999), priors=NULL, fixed.parameters=c(K1=1, K2=1), twosteps=TRUE, replicates.CI=10000, analysis=1, silent=FALSE) { # fitted.parameters=c(P=0.5, S=0.05, Min=-2, Max=5); fixed.parameters=c(K1=1, K2=1); analysis=1 # BP_LnLCompactness(par=fitted.parameters, bone, data_m=NULL, data_nm=NULL, distance.center=NULL, fixed.parameters=fixed.parameters, analysis=analysis) lower_limit <- c(P=0, S=-2, Min=0, Max=0.2, K1=-1000, k2=-1000) upper_limit <- c(P=1, S=2, Min=0.8, Max=1, K1=1000, k2=1000) lower <- lower_limit[names(fitted.parameters)] upper <- upper_limit[names(fitted.parameters)] o <- optim(par=fitted.parameters, fn=BP_LnLCompactness, fixed.parameters=fixed.parameters, bone=bone, upper = upper, lower = lower, method = "L-BFGS-B", hessian = TRUE, analysis=analysis) if (twosteps) { # lancement en mcmc p <- o$par if (is.null(priors)) { priors <- data.frame(Density=character(), Prior1=numeric(), Prior2=numeric(), SDProp=numeric(), Min=numeric(), Max=numeric(), Init=numeric(), stringsAsFactors = FALSE) if (!is.na(p["P"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=0, Prior2=1, SDProp=0.005, Min=0, Max=1, Init=unname(p["P"]), stringsAsFactors = FALSE, row.names = "P")) } if (!is.na(p["S"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=0, Prior2=max(c(unname(p["S"])*2, +10)), SDProp=0.3, Min=0, Max=max(c(unname(p["S"])*2, +10)), Init=unname(p["S"]), stringsAsFactors = FALSE, row.names = "S")) } if (!is.na(p["Min"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=0, Prior2=0.8, SDProp=0.2, Min=0, Max=0.8, Init=unname(p["Min"]), stringsAsFactors = FALSE, row.names = "Min")) } if (!is.na(p["Max"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=0.2, Prior2=1, SDProp=0.2, Min=0.2, Max=1, Init=unname(p["Max"]), stringsAsFactors = FALSE, row.names = "Max")) } if (!is.na(p["K1"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=min(c(unname(p["K1"])*2, -10)), Prior2=max(c(unname(p["K1"])*2, +10)), SDProp=0.2, Min=min(c(unname(p["K1"])*2, -10)), Max=max(c(unname(p["K1"])*2, +10)), Init=unname(p["K1"]), stringsAsFactors = FALSE, row.names = "K1")) } if (!is.na(p["K2"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=min(c(unname(p["K2"])*2, -10)), Prior2=max(c(unname(p["K2"])*2, +10)), SDProp=0.2, Min=min(c(unname(p["K2"])*2, -10)), Max=max(c(unname(p["K2"])*2, +10)), Init=unname(p["K2"]), stringsAsFactors = FALSE, row.names = "K2")) } } mcmc <- HelpersMG::MHalgoGen( likelihood = BP_LnLCompactness, bone=bone, fixed.parameters=fixed.parameters, parameters_name = "par", parameters = priors, n.iter = 10000, n.chains = 1, n.adapt = 100, thin = 1, adaptive = TRUE) fitted.parameters <- HelpersMG::as.parameters(mcmc) o <- optim(par=fitted.parameters, fn=BP_LnLCompactness, fixed.parameters=fixed.parameters, bone=bone, upper = upper, lower = lower, method = "L-BFGS-B", hessian = TRUE, analysis=analysis) } o$fixed.parameters <- fixed.parameters o$AIC <- 2*o$value+2*length(o$par) o$SE <- SEfromHessian(o$hessian) rd <- RandomFromHessianOrMCMC(method = "hessian", Hessian = o$hessian, fitted.parameters = o$par, fixed.parameters = o$fixed.parameters, replicates = replicates.CI, probs = NULL, silent = silent) Min <- rd$random[, "Min"] Max <- rd$random[, "Max"] rd$random[, "Min"] <- Min rd$random[, "Max"] <- Max m <- matrix(data =c(apply(rd$random, MARGIN=2, FUN=mean), apply(rd$random, MARGIN=2, FUN=sd)), ncol=2) colnames(m) <- c("Mean", "SE") rownames(m) <- colnames(rd$random) o$summary.table <- m # Calcul de l'intervalle de confiance avec Hessian data <- RM_get(x=bone, RMname=analysis, valuename = "compactness.synthesis") outHessian <- matrix(NA, ncol=nrow(data), nrow=replicates.CI) for (iter in 1:replicates.CI) { p <- unlist(rd$random[iter, , drop=TRUE]) # 21/02/2020 p["S"] <- 1/(4*p["S"]) c <- flexit(x = data$distance.center, par = p) * (p["Max"] - p["Min"]) + p["Min"] outHessian[iter, ] <- c } qHessian <- apply(X = outHessian, MARGIN = 2, FUN = function(x) quantile(x, probs = c(0.025, 0.5, 0.975))) colnames(qHessian) <- data$distance.center o$quantiles <- qHessian bone <- RM_add(x=bone, RMname = analysis, valuename = "optim", value=o) return(bone) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_FitMLCompactness.R
#' BP_FitMLRadialCompactness estimates likelihood of model of a bone section #' @title Estimation of the likelihood of a bone section #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return The -Ln L #' @param bone The bone image to be used #' @param fitted.parameters Parameters of the model to be fitted #' @param fixed.parameters Fixed parameters of the model #' @param analysis Name or rank of analysis #' @param twosteps Should a 2-steps analysis be performed? #' @param priors If twosteps is TRUE, ell what prior should be used. #' @param silent Should the function displays some information? #' @description Estimation of the compactness of a bone section using radial model.\cr #' If the fitted.parameters and fixed.parameters are NULL and the analysis includes a #' BP_FitMLCompactness() result, the values of this result is used as a reference for #' fitted.parameters and fixed.parameters.\cr #' If no BP_FitMLCompactness() result is available, it will use:\cr #' fitted.parameters=c(P=0.5, S=0.05, Min=-2, Max=5); fixed.parameters=c(K1=1, K2=1).\cr #' The reference for radial estimation of compactness is the trigonometric circle for rotation.angle=0 in #' BP_EstimateCompactness():\cr #' - The top of the section is located at -pi/2.\cr #' - The left of the section is located at -pi and +pi.\cr #' - The bottom of the section is located at pi/2.\cr #' - The right of the section is 0.\cr #' If rotation.angle is different from 0, the value of rotation.angle is added to the angle modulo 2.pi.\cr #' The two-steps analysis performs first a quasi-Newton method, then a Bayesian MCMC and finally again a quasi-Newton method. #' It generally ensures that global minimum is found. On the other hand, it doubles the time to complete for each angle. #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run #' library(BoneProfileR) #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' # or #' bone <- BP_OpenImage(ijtiff=TRUE) #' bone <- BP_DetectBackground(bone=bone, analysis="logistic") #' bone <- BP_DetectForeground(bone=bone, analysis="logistic") #' bone <- BP_DetectCenters(bone=bone, analysis="logistic") #' bone <- BP_EstimateCompactness(bone, analysis="logistic") #' bone <- BP_EstimateCompactness(bone, analysis="logistic", cut.angle=30) #' bone <- BP_FitMLCompactness(bone, analysis="logistic") #' plot(bone) #' plot(bone, type="observations") #' plot(bone, type="observations+model", analysis=1) #' fittedpar <- BP_GetFittedParameters(bone, analysis="logistic") #' bone <- BP_DuplicateAnalysis(bone, from="logistic", to="flexit") #' bone <- BP_FitMLCompactness(bone, #' fitted.parameters=c(fittedpar, K1=1.01, K2=1.01), #' fixed.parameters=NULL, analysis="flexit") #' bone <- BP_FitBayesianCompactness(bone, analysis="flexit") #' mcmc <- RM_get(bone, RMname = "flexit", value="mcmc") #' fittedpar <- as.parameters(mcmc) #' bone <- BP_FitMLCompactness(bone, #' fitted.parameters=fittedpar, #' fixed.parameters=NULL, analysis="flexit") #' compare_AIC(Logistic=BP_GetFittedParameters(bone, analysis="logistic", alloptim=TRUE), #' Flexit=BP_GetFittedParameters(bone, analysis="flexit", alloptim=TRUE)) #' out4p <- plot(bone, type="observations+model", analysis="logistic") #' out6p <- plot(bone, type="observations+model", analysis="flexit") #' # The twosteps fit is more acurate but is around 100 times slower #' bone <- BP_FitMLRadialCompactness(bone, analysis="logistic", twosteps=TRUE) #' bone <- BP_FitMLRadialCompactness(bone, analysis="logistic", twosteps=FALSE) #' plot(bone, type="observations", angle=0) #' plot(bone, type="model", analysis="logistic", angle=0) #' plot(bone, type="observations+model", angle=0) #' plot(bone, type="observations+model", angle=pi) #' plot(bone, type="radial", radial.variable=c("P", "S"), analysis="logistic") #' plot(bone, type="radial", radial.variable=c("P", "S", "Min", "Max"), analysis="logistic") #' plot(bone, type="radial", radial.variable=c("TRC"), analysis="logistic") #' # Test using the change of orientation using default.angle from BP_EstimateCompactness(): #' bone <- BP_DuplicateAnalysis(bone, from="logistic", to="logistic_rotation_pi") #' # With a pi rotation, the top moves to the bottom and the left moves to the right #' bone <- BP_EstimateCompactness(bone, rotation.angle=pi, analysis="logistic_rotation_pi") #' bone <- BP_FitMLRadialCompactness(bone, analysis="logistic_rotation_pi") #' plot(bone, type="radial", radial.variable=c("P", "S"), analysis="logistic") #' plot(bone, type="radial", radial.variable=c("P", "S"), analysis="logistic_rotation_pi") #' BP_Report(bone=bone, #' analysis=1, #' docx=NULL, #' pdf=NULL, #' xlsx=file.path(getwd(), "report.xlsx"), #' author="Marc Girondot", #' title=attributes(bone)$name) #' } #' @export BP_FitMLRadialCompactness <- function(bone, fitted.parameters=NULL, priors=NULL, fixed.parameters=NULL, analysis=1, silent=FALSE, twosteps=TRUE) { # fitted.parameters=c(P=0.5, S=0.05, Min=0.001, Max=0.999); fixed.parameters=c(K1=1, K2=1); analysis=NULL; silent=FALSE; twosteps=TRUE # fitted.parameters=NULL; fixed.parameters=NULL; analysis=1; silent=FALSE; twosteps=TRUE if (is.null(fitted.parameters)) { if (is.null(BP_GetFittedParameters(bone, analysis=analysis))) { fitted.parameters=c(P=0.5, S=0.05, Min=0.001, Max=0.999) fixed.parameters=c(K1=1, K2=1) } else { fitted.parameters <- BP_GetFittedParameters(bone, analysis=analysis) fixed.parameters <- BP_GetFittedParameters(bone, analysis=analysis, alloptim = TRUE)$fixed.parameters } } array.compactness <- RM_get(x=bone, RMname=analysis, valuename = "array.compactness") partial <- RM_get(x=bone, RMname=analysis, valuename = "partial") result.radial <- matrix(data = NA, ncol=length(fitted.parameters), nrow=dim(array.compactness)[1]) colnames(result.radial) <- names(fitted.parameters) # LnL <- 0 # anglefait <- NULL # radial.modeled.compactness <- NULL # observed.modeled.compactness <- NULL # observed.compactness <- NULL distance.center <- RM_get(x=bone, RMname=analysis, valuename = "compactness.synthesis")$distance.center outmcl <- universalmclapply(1:dim(array.compactness)[1], FUN=function(angle) { # for (angle in 1:dim(array.compactness)[1]) { data_nm <- array.compactness[angle, , "0"] data_m <- array.compactness[angle, , "1"] if ((!partial) | (any(data_nm+data_m != 0))) { # o <- optim(par=fitted.parameters, fn=BP_LnLCompactness, # fixed.parameters=fixed.parameters, # data_nm=data_nm, data_m=data_m, # distance.center = RM_get(x=bone, RMname=analysis, valuename = "compactness.synthesis")$distance.center, # method = "Nelder-Mead") lower_limit <- c(P=0, S=-2, Min=0, Max=0.2, K1=-1000, k2=-1000) upper_limit <- c(P=1, S=2, Min=0.8, Max=1, K1=1000, k2=1000) lower <- lower_limit[names(fitted.parameters)] upper <- upper_limit[names(fitted.parameters)] o <- optim(par=fitted.parameters, fn=BP_LnLCompactness, fixed.parameters=fixed.parameters, data_nm=data_nm, data_m=data_m, upper = upper, lower = lower, distance.center = distance.center, control=list(maxit=1000), method = "L-BFGS-B", hessian = FALSE) if (twosteps) { # lancement en mcmc p <- o$par if (is.null(priors)) { priors <- data.frame(Density=character(), Prior1=numeric(), Prior2=numeric(), SDProp=numeric(), Min=numeric(), Max=numeric(), Init=numeric(), stringsAsFactors = FALSE) if (!is.na(p["P"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=0, Prior2=1, SDProp=0.005, Min=0, Max=1, Init=unname(p["P"]), stringsAsFactors = FALSE, row.names = "P")) } if (!is.na(p["S"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=0, Prior2=max(c(unname(p["S"])*2, +10)), SDProp=0.3, Min=0, Max=max(c(unname(p["S"])*2, +10)), Init=unname(p["S"]), stringsAsFactors = FALSE, row.names = "S")) } if (!is.na(p["Min"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=0, Prior2=0.8, SDProp=0.2, Min=0, Max=0.8, Init=unname(p["Min"]), stringsAsFactors = FALSE, row.names = "Min")) } if (!is.na(p["Max"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=0.2, Prior2=1, SDProp=0.2, Min=0.2, Max=1, Init=unname(p["Max"]), stringsAsFactors = FALSE, row.names = "Max")) } if (!is.na(p["K1"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=min(c(unname(p["K1"])*2, -10)), Prior2=max(c(unname(p["K1"])*2, +10)), SDProp=0.2, Min=min(c(unname(p["K1"])*2, -10)), Max=max(c(unname(p["K1"])*2, +10)), Init=unname(p["K1"]), stringsAsFactors = FALSE, row.names = "K1")) } if (!is.na(p["K2"])) { priors <- rbind(priors, data.frame(Density="dunif", Prior1=min(c(unname(p["K2"])*2, -10)), Prior2=max(c(unname(p["K2"])*2, +10)), SDProp=0.2, Min=min(c(unname(p["K2"])*2, -10)), Max=max(c(unname(p["K2"])*2, +10)), Init=unname(p["K2"]), stringsAsFactors = FALSE, row.names = "K2")) } } mcmc <- HelpersMG::MHalgoGen( likelihood = BP_LnLCompactness, bone=NULL, data_nm=data_nm, data_m=data_m, distance.center = distance.center, fixed.parameters=fixed.parameters, parameters_name = "par", parameters = priors, n.iter = 10000, n.chains = 1, n.adapt = 100, thin = 1, adaptive = TRUE) fitted.parameters <- HelpersMG::as.parameters(mcmc) o <- optim(par=fitted.parameters, fn=BP_LnLCompactness, fixed.parameters=fixed.parameters, data_nm=data_nm, data_m=data_m, upper = upper, lower = lower, distance.center = distance.center, control=list(maxit=1000), method = "L-BFGS-B", hessian = FALSE) } LnL <- o$value result.radial <- o$par anglefait <- angle p <- c(o$par, fixed.parameters) Min <- p["Min"] Max <- p["Max"] # 21/02/2020 p["S"] <- 1/(4*p["S"]) cp <- HelpersMG::flexit(x = distance.center, par = p) radial.modeled.compactness <- mean(cp * (Max - Min) + Min) observed.modeled.compactness <- mean(data_m/(data_m+data_nm), na.rm = TRUE) observed.compactness <- sum(data_m)/sum(data_m+data_nm) limitlow <- distance.center[which.min(abs(cp-0.025))[1]] limithigh <- distance.center[which.min(abs(cp-0.975))[1]] TRC <- abs(limithigh-limitlow) } else { result.radial <- NA radial.modeled.compactness <- NA observed.modeled.compactness <- NA observed.compactness <- NA anglefait <- NA LnL <- 0 TRC <- NA } return(list(result.radial=result.radial, radial.modeled.compactness=radial.modeled.compactness, observed.modeled.compactness=observed.modeled.compactness, observed.compactness=observed.compactness, anglefait=anglefait, TRC=TRC, LnL=LnL)) }, mc.cores = parallel::detectCores(), clusterExport=list(varlist=c("array.compactness", "twosteps", "fixed.parameters", "fitted.parameters", "distance.center"), envir=environment())) result.radial <- sapply(X = outmcl, FUN = function(x) x["result.radial"]) result.radial <- t(as.data.frame(result.radial)) rownames(result.radial) <- NULL radial.modeled.compactness <- unname(unlist(sapply(X = outmcl, FUN = function(x) x["radial.modeled.compactness"]))) observed.modeled.compactness <- unname(unlist(sapply(X = outmcl, FUN = function(x) x["observed.modeled.compactness"]))) observed.compactness <- unname(unlist(sapply(X = outmcl, FUN = function(x) x["observed.compactness"]))) anglefait <- na.omit(unname(unlist(sapply(X = outmcl, FUN = function(x) x["anglefait"])))) LnL <- sum(unname(unlist(sapply(X = outmcl, FUN = function(x) x["LnL"]))), na.rm = TRUE) TRC <- unname(unlist(sapply(X = outmcl, FUN = function(x) x["TRC"]))) o <- list() o$par <- result.radial o$value <- LnL o$counts <- NULL o$convergence <- NULL o$message <- NULL o$fixed.parameters <- fixed.parameters o$angles <- (RM_get(x=bone, RMname=analysis, valuename = "cut.angle")[-1]+ rev(rev(RM_get(x=bone, RMname=analysis, valuename = "cut.angle"))[-1]))/2 o$radial.modeled.compactness <- radial.modeled.compactness o$observed.modeled.compactness <- observed.modeled.compactness o$observed.compactness <- observed.compactness par <- result.radial if (!is.null(fixed.parameters)) { fpmat <- matrix(rep(fixed.parameters, nrow(par)), nrow=nrow(par), byrow = TRUE) colnames(fpmat) <- names(fixed.parameters) par <- cbind(par, fpmat) } # Min <- par[, "Min"] # Max <- par[, "Max"] # par[, "Min"] <- Min # par[, "Max"] <- Max tablestat <- data.frame(mean=numeric(ncol(par)), sd=numeric(ncol(par)), stringsAsFactors = FALSE) for (i in 1:ncol(par)) { tablestat[i, "mean"] <- mean(par[, i], na.rm = TRUE) tablestat[i, "sd"] <- sd(par[, i], na.rm = TRUE) } rownames(tablestat) <- colnames(par) o$summary.table <- tablestat # S <- par[ ,"S"] # S <- 1/(4*S) # # K1 <- par[ ,"K1"] # K2 <- par[ ,"K2"] # P <- par[ , "P"] # # l <- 0.025 # K1 <- ifelse(K1==0, 1E-9, K1) # K2 <- ifelse(K2==0, 1E-9, K2) # # K1 <- ifelse(is.infinite(2^(K1)), sign(K1)*500, K1) # K2 <- ifelse(is.infinite(2^(K2)), sign(K2)*500, K2) # # S1 <- (2^(K1 - 1)*K1*S)/(2^(K1) - 1) # S2 <- (2^(K2 - 1)*K2*S)/(2^(K2) - 1) # # limit.low.TRC <- P - log(((1/(1-l)) ^ K1 - 1)/(2^K1 - 1))/(4 * S1) # limit.high.TRC <- 1/(4 * S2) * log(((1/(1-l))^K2 - 1)/(2^K2 - 1)) + P # # TRC <- abs(unname(limit.high.TRC-limit.low.TRC)) # par <- cbind(par, TRC=TRC) o$synthesis <- cbind(par, angles=attributes(bone)$optimRadial[[analysis]]$angles) bone <- RM_add(x=bone, RMname = analysis, valuename = "optimRadial", value=o) if (!silent) print(tablestat) return(bone) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_FitMLRadialCompactness.R
#' BP_GetFittedParameters returns the fitted parameters #' @title Return the fitted parameters #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return The fitted parameters #' @param bone The bone image to be used #' @param analysis Name or rank of analysis #' @param alloptim If TRUE, return the complete object returned by optim #' @description Return the fitted parameters. #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' library(BoneProfileR) #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' bone <- BP_DetectBackground(bone=bone, analysis="logistic") #' bone <- BP_DetectForeground(bone=bone, analysis="logistic") #' bone <- BP_DetectCenters(bone=bone, analysis="logistic") #' bone <- BP_EstimateCompactness(bone, analysis="logistic") #' bone <- BP_FitMLCompactness(bone, analysis="logistic") #' BP_GetFittedParameters(bone, analysis="logistic") #' } #' @export BP_GetFittedParameters <- function(bone, analysis=1, alloptim=FALSE) { out <- RM_get(x=bone, RMname=analysis, valuename = "optim") if (!alloptim) { out <- out$par } return(out) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_GetFittedParameters.R
#' BP_ListAnalyses lists the analyses stored in an object #' @title List the analyses stored in an object #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return The list of analyses #' @param bone The bone image to be used #' @param silent Should the results be shown ? #' @param max.level If TRUE, will return all list element of the objects #' @description Get the analyses stored in an object. #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' library(BoneProfileR) #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' bone <- BP_DetectBackground(bone=bone, analysis="logistic") #' bone <- BP_DetectForeground(bone=bone, analysis="logistic") #' bone <- BP_DetectCenters(bone=bone, analysis="logistic") #' bone <- BP_EstimateCompactness(bone, analysis="logistic") #' bone <- BP_FitMLCompactness(bone, analysis="logistic") #' plot(bone) #' plot(bone, type="observations") #' plot(bone, type="observations+model", analysis=1) #' bone <- BP_DuplicateAnalysis(bone, from="logistic", to="flexit") #' BP_ListAnalyses(bone) #' } #' @export BP_ListAnalyses <- function(bone, silent=TRUE, max.level = FALSE) { out <- RM_list(x=bone, silent=silent, max.level = max.level) return(out) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_ListAnalyses.R
#' BP_LnLCompactness estimates likelihood of model of a bone section #' @title Estimation of the likelihood of a bone section #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return The -Ln L #' @param par Parameters of the model #' @param bone The bone image to be used #' @param fixed.parameters Fixed parameters of the model #' @param data_m Number of mineralized pixels #' @param data_nm Number of non-mineralized pixels #' @param distance.center Distances to the center #' @param analysis Name or rank of analysis #' @description Estimation of the compactness of a bone section. #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' bone <- BP_DetectBackground(bone=bone) #' bone <- BP_DetectForeground(bone=bone) #' bone <- BP_DetectCenters(bone=bone) #' bone <- BP_EstimateCompactness(bone) #' plot(bone) #' } #' @export BP_LnLCompactness <- function(par, bone=NULL, data_m=NULL, data_nm=NULL, distance.center=NULL, fixed.parameters=NULL, analysis=1) { p <- c(par, fixed.parameters) Min <- p["Min"] Max <- p["Max"] # print(p) # 21/02/2020 p["S"] <- 1/(4*p["S"]) if (inherits(bone, "BoneProfileR")){ # p <- c(P=0.5, S=0.1, K1=1, K2=1, Min=0.05, Max=0.99) data <- RM_get(x=bone, RMname=analysis, valuename = "compactness.synthesis") if (is.null(distance.center)) distance.center <- data$distance.center if (is.null(data_m)) data_m <- data$mineralized if (is.null(data_nm)) data_nm <- data$unmineralize } data_m[length(data_m)] <- data_m[length(data_m)] + data_nm[length(data_nm)] data_nm[length(data_nm)] <- 0 c <- flexit(x = distance.center, par = p) * (Max - Min) + Min c <- ifelse(c<1E-10, 1E-10, c) c <- ifelse(c>1-1E-10, 1-(1E-10), c) L <- dbinom(x=data_m, size = data_nm + data_m, prob=c, log = TRUE) LnL <- -sum(L) return(LnL) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_LnLCompactness.R
#' BP_OpenImage opens an image #' @title Open an image #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return Characteristics of an image #' @param file The file to be opened #' @param name Name of this slice #' @param ijtiff Should the ijtiff must be used to read tiff image #' @description Open an image. #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' library(BoneProfileR) #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' plot(bone) #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.tif", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' plot(bone) #' bone <- BP_OpenImage(file=path_Hedgehog, ijtiff=TRUE) #' plot(bone) #' # A partial section #' path_Dicynodon <- system.file("extdata", "Dicynodon_tibia_11.11.1.T_b_b-1.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Dicynodon) #' plot(bone) #' # To open a file with a dialog: #' bone <- BP_OpenImage() #' } #' @export BP_OpenImage <- function(file=file.choose(), name=NULL, ijtiff=FALSE) { if (grepl("\\.tif", file) | grepl("\\.TIF", file)) { if (isFALSE(ijtiff)) { if (!requireNamespace("tiff", quietly = TRUE)) { stop("tiff package is absent; Please install it first to read tiff image") } bone <- suppressWarnings(tiff::readTIFF(file)) if (length(dim(bone))==3) { bone <- aperm(bone, c(2, 1, 3)) } else { bone_pre <- array(data = NA, dim=c(dim(bone)[2], dim(bone)[1], 1, 1)) bone_pre[, , 1, 1] <- bone bone <- bone_pre } bone <- suppressWarnings(as.cimg(bone)) } else { if (!requireNamespace("ijtiff", quietly = TRUE)) { stop("ijtiff package is absent; Please install it first to read tiff image with option ijtiff") } bone <- ijtiff::read_tif(file) bone <- aperm(bone, c(2, 1, 4, 3)) bone <- suppressWarnings(as.cimg(bone)) if (max(bone[, , , 1]) > 1) bone[, , , 1] <- bone[, , , 1] / 255 if (dim(bone)[4] > 1) if (max(bone[, , , 2]) > 1) bone[, , , 2] <- bone[, , , 2] / 255 if (dim(bone)[4] > 2) if (max(bone[, , , 3]) > 1) bone[, , , 3] <- bone[, , , 3] / 255 } } else { bone <- suppressWarnings(load.image(file)) } if (dim(bone)[4] != 3) { bone_pre <- array(data = 0, dim=c(dim(bone)[1], dim(bone)[2], 1, 3)) bone_pre <- suppressWarnings(as.cimg(bone_pre)) if (dim(bone)[4] == 1) { # bone_pre[, , 1, 1] <- bone bone_pre <- add.colour(bone) # class(bone_pre) <- c("cimg", "imager_array", "numeric" ) } if (dim(bone)[4] == 2) { bone_pre[, , 1, 1:dim(bone)[4]] <- bone # bone_pre[, , 1, 3] <- 1 # class(bone_pre) <- c("cimg", "imager_array", "numeric" ) } if (dim(bone)[4] == 4) { bone_pre <- flatten.alpha(bone) # bone_pre[, , 1, 3] <- 1 # class(bone_pre) <- c("cimg", "imager_array", "numeric" ) } bone <- bone_pre } if (is.null(name)) { name <- basename(file) } bone <- addS3Class(bone, "BoneProfileR") attributes(bone) <- modifyList(attributes(bone), list(name=name)) return(bone) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_OpenImage.R
#' plot.BoneProfileR displays a bone section #' @title Plot a bone section #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return Nothing #' @param x The bone image #' @param message The message to be displayed #' @param type The type of plot; see description #' @param angle Which angle model to show #' @param parameter.mcmc The posterior parameter to show for type = "mcmc" #' @param options.mcmc The option to plot type mcmc output #' @param show.colors Should the background and foreground colors be shown? #' @param show.centers Should the centers be shown? #' @param show.grid Should the grid be shown? #' @param show.legend Should a legend be shown? #' @param analysis Name or number of analysis to be plotted #' @param radial.variable Name of the radial variable to plot #' @param CI Which confidence interval should be plotted: MCMC or ML #' @param restorePar If TRUE, restore the par parameter at the exit #' @param mar The margin for type being "model" or "observations" #' @param angle.3D The angle between x and y for 3Dcolors graph #' @param ... Not used #' @description Display a bone section.\cr #' type value can be:\cr #' Image plot: original, mineralized, unmineralized, section\cr #' Original is the original image, mineralized is the mineral interpretation of the section, #' unmineralized is the unmineralized interpretation of the section, section is the interpretation of the section.\cr #' colors show the histograms of pixel information with foreground and background colors if they are defined.\cr #' 3Dcolors show the pixels colors in 3D\cr #' Global analysis: observations, model, observations+model\cr #' Radial analysis: radial\cr #' If angle is not null and a radial analysis exists, it will show the model for this angle.\cr #' mcmc: It will show the posterior distribution of parameter #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' library(BoneProfileR) #' bone <- BP_OpenImage() #' # or #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' bone <- BP_DetectBackground(bone=bone, analysis="logistic") #' bone <- BP_DetectForeground(bone=bone, analysis="logistic") #' plot(bone, type="colors") #' bone <- BP_DetectCenters(bone=bone, analysis="logistic") #' plot(bone, type="3Dcolors") #' bone <- BP_EstimateCompactness(bone, analysis="logistic", rotation.angle = 1) #' bone <- BP_FitMLCompactness(bone, analysis="logistic") #' plot(bone) #' # #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' bone <- BP_DetectBackground(bone=bone, analysis="logistic") #' bone <- BP_DetectForeground(bone=bone, analysis="logistic") #' bone <- BP_DetectCenters(bone=bone, analysis="logistic") #' bone <- BP_EstimateCompactness(bone, analysis="logistic") #' bone <- BP_FitMLCompactness(bone, analysis="logistic") #' plot(bone) #' plot(bone, type="observations") #' plot(bone, type="observations+model", analysis=1) #' bone <- BP_DuplicateAnalysis(bone, from="logistic", to="flexit") #' fittedpar <- BP_GetFittedParameters(bone, analysis="logistic") #' bone <- BP_DuplicateAnalysis(bone, from="logistic", to="flexit") #' bone <- BP_FitMLCompactness(bone, #' fitted.parameters=c(fittedpar, K1=1, K2=1), #' fixed.parameters=NULL, analysis="flexit") #' compare_AIC(Logistic=BP_GetFittedParameters(bone, analysis="logistic", alloptim=TRUE), #' Flexit=BP_GetFittedParameters(bone, analysis="flexit", alloptim=TRUE)) #' out4p <- plot(bone, type="observations+model", analysis="logistic") #' out6p <- plot(bone, type="observations+model", analysis="flexit") #' bone <- BP_FitBayesianCompactness(bone, analysis="logistic") #' plot(bone, type="observations+model", CI="MCMC") #' bone <- BP_FitMLRadialCompactness(bone) #' plot(bone, type="radial", radial.variable=c("P", "S")) #' plot(bone, type="radial", radial.variable=c("P", "S", "Min", "Max")) #' } #' @method plot BoneProfileR #' @export plot.BoneProfileR <- function(x, message=NULL, type="original", angle=NULL, show.centers=TRUE, show.colors=TRUE, show.grid=TRUE, analysis=1, parameter.mcmc = "S", options.mcmc = list(), restorePar = TRUE, mar=NULL, angle.3D = 55, CI="ML", radial.variable= "S", show.legend=TRUE, ...) { # message=NULL; type="original"; angle=NULL; parameter.mcmc = "S"; options.mcmc = list(); restorePar=TRUE; mar=NULL; show.centers=TRUE; show.colors=TRUE; show.grid=TRUE; analysis=1; CI="ML"; radial.variable= "S"; show.legend=TRUE # type <- "observations" # type <- "radial" # type <- "model" # type <- "colors" bone <- x oldpar <- par(no.readonly = TRUE) # code line i if (restorePar) on.exit(par(oldpar)) # code line i + 1 type <- match.arg(type, choices = c("original", "mineralized", "unmineralized", "section", "radial", "observations", "model", "observations+model", "mcmc", "colors", "3Dcolors")) out <- BP_ListAnalyses(bone=x) if (is.null(out[analysis][[1]]) & (type != "original") & (type != "colors")) { stop(paste0("The analysis ", analysis, " does not exist")) } if (type == "3Dcolors") { threshold <- RM_get(x=bone, RMname=analysis, valuename = "threshold") DF_background <- data.frame(Red=as.numeric(bone[, , 1, 1])[as.vector(!threshold[])], Green=as.numeric(bone[, , 1, 2])[as.vector(!threshold[])], Blue=as.numeric(bone[, , 1, 3])[as.vector(!threshold[])]) DF_foreground <- data.frame(Red=as.numeric(bone[, , 1, 1])[as.vector(threshold[])], Green=as.numeric(bone[, , 1, 2])[as.vector(threshold[])], Blue=as.numeric(bone[, , 1, 3])[as.vector(threshold[])]) DF <- rbind(DF_background, DF_foreground) # install.packages("scatterplot3d") # Install # library("scatterplot3d") # load getFromNamespace(x="scatterplot3d", ns="scatterplot3d")(DF[,1:3], xlim = c(0, 1), ylim = c(0, 1), zlim = c(0, 1), xlab = "Red", ylab = "Green", zlab = "Blue", pch=c(rep(19, nrow(DF_background)), rep(21, nrow(DF_foreground))), color=rgb(red = DF[,1], green = DF[,2], blue = DF[,3], alpha = 1), angle = angle.3D, bg="black" ) if (show.legend) { legend("topleft", legend=c("Background", "Foreground"), col=c(rgb(red = mean(DF_background[,1]), green = mean(DF_background[,2]), blue = mean(DF_background[,3]), alpha = 1), "black"), pch=c(19, 21), pt.bg=c(rgb(red = mean(DF_background[,1]), green = mean(DF_background[,2]), blue = mean(DF_background[,3]), alpha = 1), rgb(red = mean(DF_foreground[,1]), green = mean(DF_foreground[,2]), blue = mean(DF_foreground[,3]), alpha = 1))) } } if (type == "colors") { DF <- data.frame(Red=as.numeric(bone[, , 1, 1]), Green=as.numeric(bone[, , 1, 2]), Blue=as.numeric(bone[, , 1, 3])) if (!is.null(analysis)) { bg <- col2rgb(RM_get(x=bone, RMname=analysis, valuename = "bg"))/255 fg <- col2rgb(RM_get(x=bone, RMname=analysis, valuename = "fg"))/255 } else { bg <- NULL fg <- NULL } layout(1:3) ppar <- par(mar=c(2, 4, 2, 1)) par(xpd=TRUE) hist(DF$Red, main="", xlab="", col="red") ymax <- ScalePreviousPlot()$ylim["end"] if (!is.null(bg)) { segments(x0=bg["red", 1], x1=bg["red", 1], y0=0, y1=ymax) text(x=bg["red", 1], y=ymax*1.1, labels = "Foreground") } if (!is.null(fg)) { segments(x0=fg["red", 1], x1=fg["red", 1], y0=0, y1=ymax) text(x=fg["red", 1], y=ymax*1.1, labels = "Background") } hist(DF$Green, main="", xlab="", col="green") ymax <- ScalePreviousPlot()$ylim["end"] if (!is.null(bg)) { segments(x0=bg["green", 1], x1=bg["green", 1], y0=0, y1=ymax) text(x=bg["green", 1], y=ymax*1.1, labels = "Foreground") } if (!is.null(fg)) { segments(x0=fg["green", 1], x1=fg["green", 1], y0=0, y1=ymax) text(x=fg["green", 1], y=ymax*1.1, labels = "Background") } hist(DF$Blue, main="", xlab="", col="blue") ymax <- ScalePreviousPlot()$ylim["end"] if (!is.null(bg)) { segments(x0=bg["blue", 1], x1=bg["blue", 1], y0=0, y1=ymax) text(x=bg["blue", 1], y=ymax*1.1, labels = "Foreground") } if (!is.null(fg)) { segments(x0=fg["blue", 1], x1=fg["blue", 1], y0=0, y1=ymax) text(x=fg["blue", 1], y=ymax*1.1, labels = "Background") } layout(1) par(mar=ppar$mar) } if (type == "mcmc") { parameter.mcmc <- match.arg(parameter.mcmc, choices = c("P", "S", "Min", "Max", "K1", "K2")) outMCMC <- RM_get(x = x, RM = "RM", RMname = analysis, valuename = "mcmc") par(xpd=FALSE) if (!is.null(outMCMC)) { options.mcmc <- modifyList(options.mcmc, list(x=outMCMC, parameters=parameter.mcmc)) out <- do.call(what = getFromNamespace("plot.mcmcComposite", ns="HelpersMG"), args=options.mcmc) } else { out <- NULL } } if (type == "radial") { if (is.null(RM_get(x=bone, RMname=analysis, valuename = "optimRadial"))) stop("Radial analysis has not been perfomed") if (is.numeric(analysis)) analysis <- names(RM_list(x=bone, silent = TRUE))[analysis] par(mar=c(4, 4, 2, 1)+0.4) out <- RM_get(x=bone, RMname=analysis, valuename = "optimRadial")$synthesis if (length(radial.variable) != 1) layout(mat = 1:length(radial.variable)) angles <- RM_get(x=bone, RMname=analysis, valuename = "optimRadial")$angles for (i in radial.variable) { ylim <- c(0, 1) if (i %in% c("S", "K1", "K2")) ylim=NULL plot(angles, out[, i], las=1, bty="n", xlab="Angle", ylab=i, type="l", ylim=ylim, xaxt="n", xlim=c(-pi, pi)) axis(1, at=seq(from=-pi, to=pi, by=angles[2]-angles[1]), labels = specify_decimal(seq(from=-pi, to=pi, by=angles[2]-angles[1]), decimals = 2), cex.axis=0.5, las=2) axis(1, at=seq(from=-pi, to=pi, by=2*(angles[2]-angles[1])), labels = FALSE, lwd.ticks = 2) } } if (type %in% c("observations", "model", "observations+model")) { if (!is.null(angle) & (!is.null(RM_get(x=bone, RMname=analysis, valuename = "optimRadial")))) { # Je montre une tranche distance.center <- RM_get(x=bone, RMname=analysis, valuename = "compactness.synthesis")$distance.center angles <- RM_get(x=bone, RMname=analysis, valuename = "optimRadial")$angles indice.angle <- which.min(abs(angles-angle)) main <- paste0(" : Angle [", specify_decimal(angles[indice.angle], decimals = 3), ",", specify_decimal(angles[ifelse(indice.angle == length(angles), 1, indice.angle+1)], decimals = 3), "]") array.compactness <- RM_get(x=bone, RMname=analysis, valuename = "array.compactness") angles <- RM_get(x=bone, RMname=analysis, valuename = "optimRadial")$angles indice.angle <- which.min(abs(angles-angle)) data_nm <- array.compactness[indice.angle, , "0"] data_m <- array.compactness[indice.angle, , "1"] compactness.synthesis <- data.frame(distance.center=distance.center, mineralized=data_m, unmineralized=data_nm, compactness=data_m/(data_m+data_nm)) if ((type=="model") | (type=="observations+model")) { p <- RM_get(x=bone, RMname=analysis, valuename = "optimRadial")$synthesis[indice.angle, , drop=TRUE] } } else { # Je montre tout main <- "" # if ((type =="observations") | (type=="observations+model")) { compactness.synthesis <- RM_get(x=bone, RMname=analysis, valuename = "compactness.synthesis") # } if ((type=="model") | (type=="observations+model")) { p <- c(RM_get(x=bone, RMname=analysis, valuename = "optim")$par, RM_get(x=bone, RMname=analysis, valuename = "optim")$fixed.parameters) } } if (is.null(compactness.synthesis)) { stop("Bone section has not still been analyzed. Use BP_EstimateCompactness().") } if (type =="observations") { par(xaxs="i", yaxs="r") if (is.null(mar)) { par(mar=c(4, 4, 2, 5)+0.4) } else { par(mar=mar) } m <- compactness.synthesis$mineralized nm <- compactness.synthesis$unmineralize plot(compactness.synthesis$distance.center, compactness.synthesis$compactness, xlim=c(0, 1), ylim=c(0, 1), type="l", las=1, bty="n", xlab="Distance from the center", ylab="Compactness", lwd=2, main = main) lines(x=compactness.synthesis$distance.center, y=(m+nm)/max(m+nm), col="blue") axis(side = 4, at=seq(from=0, to=1, by=0.2), labels = round(seq(from=0, to=1, by=0.2)*max(m+nm), 0), las=1, col.axis="blue", col="blue") mtext("Number of pixels", side=4, line=3, col="blue") out <- data.frame(distance.center=compactness.synthesis$distance.center, observed.compactness=compactness.synthesis$compactness) if (show.legend) { legend("bottomright", legend=c("Number of pixels", "Observed compactness"), lty=c(1, 1), lwd=c(1, 2), col=c("blue", "black"), cex=0.8) } } if (type == "model") { if (is.numeric(analysis)) analysis <- names(RM_list(x=bone, silent = TRUE))[analysis] Min <- p["Min"] Max <- p["Max"] # 21/02/2020 p["S"] <- 1/(4*p["S"]) c <- flexit(x = compactness.synthesis$distance.center, par = p) * (Max - Min) + Min out <- data.frame(distance.center=compactness.synthesis$distance.center, modeled.compactness=c) par(xaxs="i", yaxs="r") if (is.null(mar)) { par(mar=c(4, 4, 2, 5)+0.4) } else { par(mar=mar) } plot(x=compactness.synthesis$distance.center, y=c, xlim=c(0, 1), ylim=c(0, 1), type="n", las=1, bty="n", xlab="Distance from the center", ylab="Compactness", lwd=2, lty=3, main=paste(analysis, main)) if ((CI == "MCMC") & (is.null(angle)) & (!is.null(RM_get(x=bone, RMname=analysis, valuename = "mcmc")))) { polygon(x=c(compactness.synthesis$distance.center, rev(compactness.synthesis$distance.center)), y=c(RM_get(x=bone, RMname=analysis, valuename = "mcmc")$quantiles["2.5%", ], rev(RM_get(x=bone, RMname=analysis, valuename = "mcmc")$quantiles["97.5%", ])), col="lightgrey", border="lightgrey", lwd=3) } if ((CI == "ML") & (is.null(angle)) & (!is.null(RM_get(x=bone, RMname=analysis, valuename = "optim")$quantiles))) { polygon(x=c(compactness.synthesis$distance.center, rev(compactness.synthesis$distance.center)), y=c(RM_get(x=bone, RMname=analysis, valuename = "optim")$quantiles["2.5%", ], rev(RM_get(x=bone, RMname=analysis, valuename = "optim")$quantiles["97.5%", ])), col="lightgrey", border="lightgrey", lwd=3) } lines(x=compactness.synthesis$distance.center, y=c, lwd=2, lty=3) if (show.legend) { if ((CI == "MCMC") & (is.null(angle)) & (!is.null(RM_get(x=bone, RMname=analysis, valuename = "optim")))) { legend("bottomright", legend=c("Model", "95% Credibility interval MCMC"), lty=c(3, 1), lwd=c(2, 6), col=c("black", "lightgrey"), cex=0.8) } else { if ((CI == "ML") & (is.null(angle)) & (!is.null(RM_get(x=bone, RMname=analysis, valuename = "optim")$quantiles))) { legend("bottomright", legend=c("Model", "95% Confidence interval ML"), lty=c(3, 1), lwd=c(2, 6), col=c("black", "lightgrey"), cex=0.8) } else { legend("bottomright", legend=c("Model"), lty=c(3), lwd=c(2), col=c("black"), cex=0.8) } } } } if (type == "observations+model") { if (is.numeric(analysis)) analysis <- names(RM_list(x=bone, silent = TRUE))[analysis] par(xaxs="i", yaxs="r") if (is.null(mar)) { par(mar=c(4, 4, 2, 5)+0.4) } else { par(mar=mar) } m <- compactness.synthesis$mineralized nm <- compactness.synthesis$unmineralize plot(compactness.synthesis$distance.center, compactness.synthesis$compactness, xlim=c(0, 1), ylim=c(0, 1), type="n", las=1, bty="n", xlab="Distance from the center", ylab="Compactness", lwd=2, main=paste(analysis, main)) if ((CI == "MCMC") & (is.null(angle)) & (!is.null(RM_get(x=bone, RMname=analysis, valuename = "mcmc")))) { polygon(x=c(compactness.synthesis$distance.center, rev(compactness.synthesis$distance.center)), y=c(RM_get(x=bone, RMname=analysis, valuename = "mcmc")$quantiles["2.5%", ], rev(RM_get(x=bone, RMname=analysis, valuename = "mcmc")$quantiles["97.5%", ])), col="lightgrey", border="lightgrey", lwd=3) } if ((CI == "ML") & (is.null(angle)) & (!is.null(RM_get(x=bone, RMname=analysis, valuename = "optim")$quantiles))) { polygon(x=c(compactness.synthesis$distance.center, rev(compactness.synthesis$distance.center)), y=c(RM_get(x=bone, RMname=analysis, valuename = "optim")$quantiles["2.5%", ], rev(RM_get(x=bone, RMname=analysis, valuename = "optim")$quantiles["97.5%", ])), col="lightgrey", border="lightgrey", lwd=3) } lines(compactness.synthesis$distance.center, compactness.synthesis$compactness, lwd=2) lines(x=compactness.synthesis$distance.center, y=(m+nm)/max(m+nm), col="blue") axis(side = 4, at=seq(from=0, to=1, by=0.2), labels = round(seq(from=0, to=1, by=0.2)*max(m+nm), 0), las=1, col.axis="blue", col="blue") mtext("Number of pixels", side=4, line=3, col="blue") out <- data.frame(distance.center=compactness.synthesis$distance.center, observed.compactness=compactness.synthesis$compactness) # p <- c(RM_get(x=bone, RMname=analysis, valuename = "optim")$par, RM_get(x=bone, RMname=analysis, valuename = "optim")$fixed.parameters) Min <- p["Min"] Max <- p["Max"] # 21/02/2020 p["S"] <- 1/(4*p["S"]) c <- flexit(x = compactness.synthesis$distance.center, par = p) * (Max - Min) + Min out <- cbind(out, modeled.compactness=c) lines(x=compactness.synthesis$distance.center, y=c, xlim=c(0, 1), lwd=2, lty=3) if (show.legend) { if ((CI == "MCMC") & (is.null(angle)) & (!is.null(RM_get(x=bone, RMname=analysis, valuename = "mcmc")))) { legend("bottomright", legend=c("Number of pixels", "Observed compactness", "Model", "95% Credibility interval MCMC"), lty=c(1, 1, 3, 1), lwd=c(1, 2, 2, 6), col=c("blue", "black", "black", "lightgrey"), cex=0.8) } else { if ((CI == "ML") & (is.null(angle)) & (!is.null(RM_get(x=bone, RMname=analysis, valuename = "optim")$quantiles))) { legend("bottomright", legend=c("Number of pixels", "Observed compactness", "Model", "95% Confidence interval ML"), lty=c(1, 1, 3, 1), lwd=c(1, 2, 2, 6), col=c("blue", "black", "black", "lightgrey"), cex=0.8) } else { legend("bottomright", legend=c("Number of pixels", "Observed compactness", "Model"), lty=c(1, 1, 3), lwd=c(1, 2, 2), col=c("blue", "black", "black"), cex=0.8) } } } } } if (type %in% c("original", "mineralized", "unmineralized", "section")) { # layout(1) out <- NULL bone_x <- NULL threshold <- RM_get(x=x, RMname=analysis, valuename = "threshold") contour <- RM_get(x=x, RMname=analysis, valuename = "contour") bone <- NULL if (type != "original") { if ((type == "mineralized") & !is.null(threshold)) bone_x <- threshold if ((type == "unmineralized") & !is.null(threshold) & !is.null(contour)) bone_x <- contour & !threshold if ((type == "section") & !is.null(contour)) bone_x <- contour if (!is.null(bone_x)) { bone <- x bone[, , 1, 1] <- !bone_x if (dim(bone)[4]>1) bone[, , 1, 2] <- !bone_x if (dim(bone)[4]>2) bone[, , 1, 3] <- !bone_x } } else { bone <- x } if (!is.null(bone)) { par(xaxs="i", yaxs="i") if (is.null(mar)) { par(mar=c(4, 0, 0, 0)) } else { par(mar=mar) } getFromNamespace("plot.cimg", ns="imager")(bone, bty="n", axes=FALSE, xlab="", ylab="", asp = 1) xl <- ScalePreviousPlot()$xlim yl <- ScalePreviousPlot()$ylim par(xpd=TRUE) if (!is.null(message)) { text(x=xl["begin"], y=yl["begin"]-yl["range"]*0.05, labels = message, pos=4) } if (show.colors) { bg <- RM_get(x=x, RMname=analysis, valuename = "bg") if (!is.null(bg)) { text(x=xl["begin"]+xl["range"]*0.08, y=yl["begin"]-yl["range"]*0.09, labels = "Background\ncolor", pos=4, cex=0.8) polygon(x=c(xl["begin"]+xl["range"]*0.02, xl["begin"]+xl["range"]*0.07, xl["begin"]+xl["range"]*0.07, xl["begin"]+xl["range"]*0.02), y=c(yl["begin"]-yl["range"]*0.12, yl["begin"]-yl["range"]*0.12, yl["begin"]-yl["range"]*0.07, yl["begin"]-yl["range"]*0.07), col=bg) } fg <- RM_get(x=x, RMname=analysis, valuename = "fg") if (!is.null(fg)) { text(x=xl["center"]/2+xl["range"]*0.08, y=yl["begin"]-yl["range"]*0.09, labels = "Foreground\ncolor", pos=4, cex=0.8) polygon(x=c(xl["center"]/2+xl["range"]*0.02, xl["center"]/2+xl["range"]*0.07, xl["center"]/2+xl["range"]*0.07, xl["center"]/2+xl["range"]*0.02), y=c(yl["begin"]-yl["range"]*0.12, yl["begin"]-yl["range"]*0.12, yl["begin"]-yl["range"]*0.07, yl["begin"]-yl["range"]*0.07), col=fg) } } if (show.grid & !is.null(RM_get(x=x, RMname=analysis, valuename = "compactness"))) { # J'affiche la grille compactness <- RM_get(x=x, RMname=analysis, valuename = "compactness") peripherie <- RM_get(x=x, RMname=analysis, valuename = "peripherie") l.angle <- levels(compactness$cut.angle) angles <- RM_get(x=bone, RMname=analysis, valuename = "cut.angle") if (RM_get(x=bone, RMname=analysis, valuename = "partial")) angles <- peripherie[, "angle"] l.distance <- levels(compactness$cut.distance.center) max.distance <- NULL for (angle_ec in angles[-1]) { angle_ecp <- (angle_ec - RM_get(x=x, RMname=analysis, valuename = "rotation.angle" )) %% (2*pi) angle_ecp <- ifelse(angle_ecp > pi, -(2*pi)+angle_ecp, angle_ecp) md <- peripherie$peripherie[which.min(abs(peripherie$angle-angle_ec))] segments(x0=RM_get(x=x, RMname=analysis, valuename = "used.centers")["center.x"], x1=(RM_get(x=x, RMname=analysis, valuename = "used.centers")["center.x"]+cos(angle_ecp)*md), y0=RM_get(x=x, RMname=analysis, valuename = "used.centers")["center.y"], y1=(RM_get(x=x, RMname=analysis, valuename = "used.centers")["center.y"]+sin(angle_ecp)*md), col = rgb(red=0.5, green=0.5, blue=0.5, alpha=0.8)) } # L'angle 0 deg angle_ec <- 0 angle_ecp <- (angle_ec - RM_get(x=x, RMname=analysis, valuename = "rotation.angle" )) %% (2*pi) angle_ecp <- ifelse(angle_ecp > pi, -(2*pi)+angle_ecp, angle_ecp) md <- peripherie$peripherie[which.min(abs(peripherie$angle-angle_ec))]*0.9 text(x=(RM_get(x=x, RMname=analysis, valuename = "used.centers")["center.x"]+cos(angle_ecp)*md*1.2), y=(RM_get(x=x, RMname=analysis, valuename = "used.centers")["center.y"]+sin(angle_ecp)*md*1.2), labels = "0") angle_ec <- pi angle_ecp <- (angle_ec - RM_get(x=bone, RMname=analysis, valuename = "rotation.angle" )) %% (2*pi) angle_ecp <- ifelse(angle_ecp > pi, -(2*pi)+angle_ecp, angle_ecp) md <- peripherie$peripherie[which.min(abs(peripherie$angle-angle_ec))]*0.9 text(x=(RM_get(x=x, RMname=analysis, valuename = "used.centers")["center.x"]+cos(angle_ecp)*md*1.2), y=(RM_get(x=x, RMname=analysis, valuename = "used.centers")["center.y"]+sin(angle_ecp)*md*1.2), labels = "pi") angle_ec <- pi/2 angle_ecp <- (angle_ec - RM_get(x=bone, RMname=analysis, valuename = "rotation.angle" )) %% (2*pi) angle_ecp <- ifelse(angle_ecp > pi, -(2*pi)+angle_ecp, angle_ecp) md <- peripherie$peripherie[which.min(abs(peripherie$angle-angle_ec))]*0.9 text(x=(RM_get(x=x, RMname=analysis, valuename = "used.centers")["center.x"]+cos(angle_ecp)*md*1.2), y=(RM_get(x=x, RMname=analysis, valuename = "used.centers")["center.y"]+sin(angle_ecp)*md*1.2), labels = "pi/2") angle_ec <- -pi/2 angle_ecp <- (angle_ec - RM_get(x=bone, RMname=analysis, valuename = "rotation.angle" )) %% (2*pi) angle_ecp <- ifelse(angle_ecp > pi, -(2*pi)+angle_ecp, angle_ecp) md <- peripherie$peripherie[which.min(abs(peripherie$angle-angle_ec))]*0.9 text(x=(RM_get(x=x, RMname=analysis, valuename = "used.centers")["center.x"]+cos(angle_ecp)*md*1.2), y=(RM_get(x=x, RMname=analysis, valuename = "used.centers")["center.y"]+sin(angle_ecp)*md*1.2), labels = "-pi/2") angle_ec <- peripherie$angle angle_ecp <- (angle_ec - RM_get(x=x, RMname=analysis, valuename = "rotation.angle" )) %% (2*pi) angle_ecp <- ifelse(angle_ecp > pi, -(2*pi)+angle_ecp, angle_ecp) x_peripherie <- cos(angle_ecp)*peripherie$peripherie if (!RM_get(x=bone, RMname=analysis, valuename = "partial")) x_peripherie <- c(x_peripherie, x_peripherie[1]) y_peripherie <- sin(angle_ecp)*peripherie$peripherie if (!RM_get(x=bone, RMname=analysis, valuename = "partial")) y_peripherie <- c(y_peripherie, y_peripherie[1]) for (ratio in RM_get(x=x, RMname=analysis, valuename = "cut.distance.center")[-1]) { lines(RM_get(x=x, RMname=analysis, valuename = "used.centers")["center.x"]+x_peripherie*ratio, RM_get(x=x, RMname=analysis, valuename = "used.centers")["center.y"]+y_peripherie*ratio, col = rgb(red=0.5, green=0.5, blue=0.5, alpha=0.8)) } } if (show.centers) { centers <- RM_get(x=x, RMname=analysis, valuename = "centers") if (!is.null(centers)) { if (!is.na(centers["GC_cortex.x"])) { points(centers["GC_cortex.x"], centers["GC_cortex.y"], pch=4, col="red") points(centers["GC_bone.x"], centers["GC_bone.y"], pch=3, col="red") points(centers["GC_medula.x"], centers["GC_medula.y"], pch=1, col="red") points(centers["GC_ontogenic.x"], centers["GC_ontogenic.y"], pch=19, col="blue") text(x=xl["end"]-xl["range"]*0.40, y=yl["begin"]-yl["range"]*0.01, labels = "X Center of the mineralized part", cex=0.8, col="red", pos=4) text(x=xl["end"]-xl["range"]*0.40, y=yl["begin"]-yl["range"]*0.04, labels = "O Center of the non-mineralized part", cex=0.8, col="red", pos=4) text(x=xl["end"]-xl["range"]*0.40, y=yl["begin"]-yl["range"]*0.07, labels = "+ Center of the section", cex=0.8, col="red", pos=4) points(x=xl["end"]-xl["range"]*0.375, y=yl["begin"]-yl["range"]*0.1, pch=19, col="blue") text(x=xl["end"]-xl["range"]*0.38, y=yl["begin"]-yl["range"]*0.1, labels = "Ontogenetic center", cex=0.8, col="blue", pos=4) } else { points(centers["GC_user.x"], centers["GC_user.y"], pch=1, col="red") text(x=xl["end"]-xl["range"]*0.40, y=yl["begin"]-yl["range"]*0.02, labels = "O User-defined center", cex=0.8, col="red", pos=4) } } } } else { stop("The information is not available") } } return(invisible(out)) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_PlotBone.R
#' BP_Report save a pdf report for the analyzed bone #' @title Generate a pdf report for the analyzed bone #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return Nothing #' @param bone The bone image #' @param control.plot A list with the parameters used for plot #' @param analysis Indicate analysis name or rank that you want report #' @param pdf Name of pdf file #' @param docx Name of Word file #' @param xlsx Name of Excel file #' @param author Name indicated in the report #' @param title Title of the report #' @description Generate a docx, xlsx, or pdf report.\cr #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' library(BoneProfileR) #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' bone <- BP_DetectBackground(bone=bone, analysis="logistic") #' bone <- BP_DetectForeground(bone=bone, analysis="logistic") #' bone <- BP_DetectCenters(bone=bone, analysis="logistic") #' bone <- BP_EstimateCompactness(bone, analysis="logistic") #' bone <- BP_FitMLCompactness(bone, analysis="logistic") #' fittedpar <- BP_GetFittedParameters(bone, analysis="logistic") #' bone <- BP_DuplicateAnalysis(bone, from="logistic", to="flexit") #' bone <- BP_FitMLCompactness(bone, #' fitted.parameters=c(fittedpar, K1=1, K2=1), #' fixed.parameters=NULL, analysis="flexit") #' compare_AIC(Logistic=BP_GetFittedParameters(bone, analysis="logistic", alloptim=TRUE), #' Flexit=BP_GetFittedParameters(bone, analysis="flexit", alloptim=TRUE)) #' bone <- BP_FitMLRadialCompactness(bone, analysis="logistic") #' # Test using the change of orientation using default.angle from BP_EstimateCompactness(): #' bone <- BP_DuplicateAnalysis(bone, from="logistic", to="logistic_rotation_pi") #' # With a pi rotation, the top moves to the bottom and the left moves to the right #' bone <- BP_EstimateCompactness(bone, rotation.angle=pi, analysis="logistic_rotation_pi") #' bone <- BP_FitMLRadialCompactness(bone, analysis="logistic_rotation_pi") #' BP_Report(bone=bone, #' analysis=1, #' docx=NULL, #' pdf=NULL, #' xlsx=file.path(getwd(), "report.xlsx"), #' author="Marc Girondot", #' title=attributes(bone)$name) #' #' BP_Report(bone=bone, #' analysis=1, #' docx=NULL, #' pdf=file.path(getwd(), "report.pdf"), #' xlsx=NULL, #' author="Marc Girondot", #' title=attributes(bone)$name) #' #' BP_Report(bone=bone, #' analysis=1, #' docx=file.path(getwd(), "report.docx"), #' pdf=NULL, #' xlsx=NULL, #' author="Marc Girondot", #' title=attributes(bone)$name) #' } #' @export BP_Report <- function(bone=stop("A bone section must be provided"), control.plot=list(message=NULL, show.centers=TRUE, show.colors=TRUE, show.grid=TRUE, CI="ML", show.legend=TRUE), analysis=1, docx=file.path(getwd(), "report.docx"), pdf=file.path(getwd(), "report.pdf"), xlsx=file.path(getwd(), "report.xlsx"), author=NULL, title=attributes(bone)$name) { # control.plot=list(message=NULL, show.centers=TRUE, show.colors=TRUE, show.grid=TRUE, CI="ML", show.legend=TRUE); analysis=1; pdf=NULL; docx=file.path(getwd(), "report.docx"); xlsx=file.path(getwd(), "report.xlsx"); author=NULL; title=attributes(bone)$name if (is.null(analysis)) { stop("You must choose which analysis to report.") } out <- RM_list(x=bone, silent=TRUE) if (is.numeric(analysis)) if (analysis > length(out)) { stop("The analysis does no exist.") } else { analysis <- names(out)[analysis] } if (is.character(analysis) & (all(analysis != names(out)))) { stop("The analysis does no exist.") } texte <- NULL date <- out[[analysis]]$timestamp if (!is.null(pdf)) { control.output.pdf=list(Title=title, Author=author, Date=date, filename=pdf, dirname=NULL) texte.pdf <- c('---', paste0('title: "', control.output.pdf$Title, '"'), paste0('author: "', control.output.pdf$Author, '"'), paste0('date: "', control.output.pdf$Date, '"'), 'output: "pdf_document"', '---' ) texte.pdf <- c(texte.pdf, "") texte.pdf <- c(texte.pdf, paste0("# ", analysis)) texte.pdf <- c(texte.pdf, "") texte.pdf <- c(texte.pdf,"```{r echo=FALSE}" , paste0("control.plot <- ", paste0(capture.output(dput(control.plot)), collapse = "")), "do.call(getFromNamespace('plot.BoneProfileR', ns='BoneProfileR'), modifyList(control.plot, list(x=env$bone)))", "```") texte.pdf <- c(texte.pdf, "") out1 <- RM_get(x=bone, RMname=analysis, valuename = "optim") if (!is.null(out1)) { texte.pdf <- c(texte.pdf, "## Global model of compactness") texte.pdf <- c(texte.pdf, "```{r echo=FALSE}" , paste0("do.call(getFromNamespace('plot.BoneProfileR', ns='BoneProfileR'), modifyList(control.plot, list(x=env$bone, type='observations+model', analysis='", analysis,"')))"), "```") texte.pdf <- c(texte.pdf, "") texte.pdf <- c(texte.pdf, knitr::kable(out1$summary.table)) texte.pdf <- c(texte.pdf, "") } out1 <- RM_get(x=bone, RMname=analysis, valuename = "optimRadial") if (!is.null(out1)) { texte.pdf <- c(texte.pdf, "## Radial model of compactness") texte.pdf <- c(texte.pdf, "```{r echo=FALSE}" , paste0("do.call(getFromNamespace('plot.BoneProfileR', ns='BoneProfileR'), modifyList(control.plot, list(x=env$bone, type='radial', analysis='", analysis,"')))"), "```") texte.pdf <- c(texte.pdf, "") texte.pdf <- c(texte.pdf, knitr::kable(out1$summary.table)) texte.pdf <- c(texte.pdf, "") } texte.pdf <- c(texte.pdf, "", "### This software is provided by [Marc Girondot](https://hebergement.universite-paris-saclay.fr/marcgirondot/), Ecologie, Syst\u00E9matique, Evolution, CNRS, Universit\u00E9 Paris Saclay, AgroParisTech.") texte.pdf <- iconv(texte.pdf, from = "", to="UTF-8") tmp <- tempdir() Rmd <- file.path(tmp, "temppdf.Rmd") writeLines(texte.pdf, con=Rmd) env <- new.env() assign("bone", bone, envir = env) rmarkdown::render(input=Rmd, output_file = pdf, output_dir=NULL, envir = env ) } if (!is.null(docx)) { control.output.docx=list(Title=title, Author=author, Date=date, filename=docx, dirname=NULL) texte.docx <- c('---', paste0('title: "', control.output.docx$Title, '"'), paste0('author: "', control.output.docx$Author, '"'), paste0('date: "', control.output.docx$Date, '"'), 'output: "word_document"', # ':reference_docx: template.docx', '---' ) texte.docx <- c(texte.docx, "") texte.docx <- c(texte.docx, paste0("# ", analysis)) texte.docx <- c(texte.docx, "") texte.docx <- c(texte.docx,"```{r echo=FALSE}" , paste0("control.plot <- ", paste0(capture.output(dput(control.plot)), collapse = "")), "do.call(getFromNamespace('plot.BoneProfileR', ns='BoneProfileR'), modifyList(control.plot, list(x=bone)))", "```") texte.docx <- c(texte.docx, "") out1 <- RM_get(x=bone, RMname=analysis, valuename = "optim") if (!is.null(out1)) { texte.docx <- c(texte.docx, "## Global model of compactness") texte.docx <- c(texte.docx, "```{r echo=FALSE}" , paste0("do.call(getFromNamespace('plot.BoneProfileR', ns='BoneProfileR'), modifyList(control.plot, list(x=bone, type='observations+model', analysis='", analysis,"')))"), "```") texte.docx <- c(texte.docx, "") texte.docx <- c(texte.docx, knitr::kable(out1$summary.table)) texte.docx <- c(texte.docx, "") } out1 <- RM_get(x=bone, RMname=analysis, valuename = "optimRadial") if (!is.null(out1)) { texte.docx <- c(texte.docx, "## Radial model of compactness") texte.docx <- c(texte.docx, "```{r echo=FALSE}" , paste0("do.call(getFromNamespace('plot.BoneProfileR', ns='BoneProfileR'), modifyList(control.plot, list(x=bone, type='radial', analysis='", analysis,"')))"), "```") texte.docx <- c(texte.docx, "") texte.docx <- c(texte.docx, knitr::kable(out1$summary.table)) texte.docx <- c(texte.docx, "") } texte.docx <- c(texte.docx, "", "### This software is provided by [Marc Girondot](https://hebergement.universite-paris-saclay.fr/marcgirondot/), Ecologie, Syst\u00E9matique, Evolution, CNRS, Universit\u00E9 Paris Saclay, AgroParisTech.") texte.docx <- iconv(texte.docx, from = "", to="UTF-8") tmp <- tempdir() Rmd <- file.path(tmp, "tempdocx.Rmd") writeLines(texte.docx, con=Rmd) env <- new.env() assign("bone", bone, envir = env) rmarkdown::render(input=Rmd, output_file = docx, output_dir=NULL, envir = env ) } if (!is.null(xlsx)) { if (!requireNamespace("openxlsx", quietly = TRUE)) { stop("openxlsx package is absent; Please install it first to export Excel file") } wb <- openxlsx::createWorkbook(creator = author , title = title , subject = "BoneProfileR report" , category = "") openxlsx::addWorksheet( wb=wb, sheetName="Global") openxlsx::addWorksheet( wb=wb, sheetName="Radial") out1 <- RM_get(x=bone, RMname=analysis, valuename = "optim") if (!is.null(out1)) { openxlsx::writeData( wb=wb, sheet="Global", x="Global model of compactness", startCol = 1, startRow = 1) openxlsx::writeData( wb=wb, sheet="Global", x=date, startCol = 1, startRow = 2) openxlsx::writeData( wb=wb, sheet="Global", x=analysis, startCol = 1, startRow = 3) openxlsx::writeData( wb=wb, sheet="Global", x="Observed compactness", startCol = 1, startRow = 4) openxlsx::writeData( wb=wb, sheet="Global", x=RM_get(x=bone, RMname = analysis, valuename = "global.compactness"), startCol = 2, startRow = 4) openxlsx::writeData( wb=wb, sheet="Global", x="Modeled compactness by ML (2.5%, 50%, 97.5%)", startCol = 1, startRow = 5) openxlsx::writeData( wb=wb, sheet="Global", x=mean(RM_get(x=bone, RMname = analysis, valuename = "optim")$quantiles["2.5%", ]), startCol = 2, startRow = 5) openxlsx::writeData( wb=wb, sheet="Global", x=mean(RM_get(x=bone, RMname = analysis, valuename = "optim")$quantiles["50%", ]), startCol = 3, startRow = 5) openxlsx::writeData( wb=wb, sheet="Global", x=mean(RM_get(x=bone, RMname = analysis, valuename = "optim")$quantiles["97.5%", ]), startCol = 4, startRow = 5) if (!is.null(RM_get(x=bone, RMname = analysis, valuename = "mcmc"))) { openxlsx::writeData( wb=wb, sheet="Global", x="Modeled compactness by MCMC (2.5%, 50%, 97.5%)", startCol = 1, startRow = 6) openxlsx::writeData( wb=wb, sheet="Global", x=mean(RM_get(x=bone, RMname = analysis, valuename = "mcmc")$quantiles["2.5%", ]), startCol = 2, startRow = 6) openxlsx::writeData( wb=wb, sheet="Global", x=mean(RM_get(x=bone, RMname = analysis, valuename = "mcmc")$quantiles["50%", ]), startCol = 3, startRow = 6) openxlsx::writeData( wb=wb, sheet="Global", x=mean(RM_get(x=bone, RMname = analysis, valuename = "mcmc")$quantiles["97.5%", ]), startCol = 4, startRow = 6) } openxlsx::writeData( wb=wb, sheet="Global", x=out1$summary.table, startCol = 2, startRow = 10) openxlsx::writeData( wb=wb, sheet="Global", x=rownames(out1$summary.table), startCol = 1, startRow = 11) openxlsx::writeData( wb=wb, sheet="Global", x="Quantiles", startCol = 1, startRow = 20) openxlsx::writeData( wb=wb, sheet="Global", x=t(out1$quantiles), startCol = 1, startRow = 21) } out1 <- RM_get(x=bone, RMname=analysis, valuename = "optimRadial") if (!is.null(out1)) { openxlsx::writeData( wb=wb, sheet="Radial", x="Radial model of compactness", startCol = 1, startRow = 1) openxlsx::writeData( wb=wb, sheet="Radial", x=date, startCol = 1, startRow = 2) openxlsx::writeData( wb=wb, sheet="Radial", x=analysis, startCol = 1, startRow = 3) openxlsx::writeData( wb=wb, sheet="Radial", x=out1$summary.table, startCol = 2, startRow = 4) openxlsx::writeData( wb=wb, sheet="Radial", x=rownames(out1$summary.table), startCol = 1, startRow = 5) openxlsx::writeData( wb=wb, sheet="Radial", x=out1$synthesis, startCol = 2, startRow = 13) openxlsx::writeData( wb=wb, sheet="Radial", x="Angle", startCol = 1, startRow = 13) openxlsx::writeData( wb=wb, sheet="Radial", x=out1$angles, startCol = 1, startRow = 14) openxlsx::writeData( wb=wb, sheet="Radial", x="Radial modeled compactness", startCol = 10, startRow = 13) openxlsx::writeData( wb=wb, sheet="Radial", x=out1$radial.modeled.compactness, startCol = 10, startRow = 14) openxlsx::writeData( wb=wb, sheet="Radial", x="Radial observed modeled compactness", startCol = 11, startRow = 13) openxlsx::writeData( wb=wb, sheet="Radial", x=out1$observed.modeled.compactness, startCol = 11, startRow = 14) openxlsx::writeData( wb=wb, sheet="Radial", x="Radial observed compactness", startCol = 12, startRow = 13) openxlsx::writeData( wb=wb, sheet="Radial", x=out1$observed.compactness, startCol = 12, startRow = 14) } openxlsx::saveWorkbook(wb, file = xlsx, overwrite = TRUE) } }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_Report.R
#' summary.BoneProfileR displays a bone section #' @title Plot a bone section #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @return An invisible list with recorded information #' @param object The bone image #' @param max.level If TRUE, will return all list element of the objects #' @param analysis The analysis to report the global compacteness #' @param ... Not used #' @description Display information of bone section #' @family BoneProfileR #' @examples #' \dontrun{ #' # Not run: #' library(BoneProfileR) #' bone <- BP_OpenImage() #' # or #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' summary(bone) #' } #' @method summary BoneProfileR #' @export summary.BoneProfileR <- function(object, max.level=FALSE, analysis=1, ...) { out <- dim(object) cat(paste0("The image is ", as.character(out[1]), " pixels width and ", as.character(out[2]), " pixels height.\n")) if (!is.null(RM_get(x=object, RMname = analysis, valuename = "global.compactness"))) cat(paste0("The observed global compactness for ", analysis, " analysis is ", specify_decimal(RM_get(x=object, RMname = analysis, valuename = "global.compactness"), decimals = 3), ".\n")) if (!is.null(RM_get(x=object, RMname = analysis, valuename = "optim")$quantiles["50%", ])) { cat(paste0("The median value for ", analysis, " analysis for modeled global compactness is ", specify_decimal(mean(RM_get(x=object, RMname = analysis, valuename = "optim")$quantiles["50%", ]), decimals = 3), ".\n")) cat(paste0("The 95% confidence interval for ", analysis, " analysis for modeled global compactness is between ", specify_decimal(mean(RM_get(x=object, RMname = analysis, valuename = "optim")$quantiles["2.5%", ]), decimals = 3), " and ", specify_decimal(mean(RM_get(x=object, RMname = analysis, valuename = "optim")$quantiles["97.5%", ]), decimals = 3),".\n")) } an <- BP_ListAnalyses(object, max.level=max.level, silent = TRUE) if (length(an) == 0) { cat("There are no recorded analysis still.\n") } else { cat("The current recorded analysis are:\n") an <- BP_ListAnalyses(object, max.level=max.level, silent = FALSE) } out <- list(dim=out, analysis=an) return(invisible(out)) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_SummaryBone.R
# #' .BP_contour estimate a distance matrix .BP_angle <- function(bone, threshold, analysis=1, center.x=NA, center.y=NA) { # analysis=1; partial=FALSE; center.x=NA; center.y=NA if (is.null(threshold)) threshold <- RM_get(x=bone, RMname = analysis, valuename="threshold") if (is.null(threshold)) { threshold <- getFromNamespace(".BP_threshold", ns="BoneProfileR")(bone, analysis=analysis) } # Dans bone et threshold, le x est en dimension 1 et le y en dimension 2 if (is.na(center.x) | is.na(center.y)) { center.x <- mean(which(threshold, arr.ind = TRUE)[, 1]) center.y <- mean(which(threshold, arr.ind = TRUE)[, 2]) } # Je dois travailler avec le center # Attention, dans l'objet bone, row c'est x et col c'est y # La colonne 1 est y; je fais varier d'abord les y # La colonne 2 est x m <- expand.grid(1:dim(threshold)[2], 1:dim(threshold)[1]) a <- sapply(1:nrow(m), FUN = function(r) { x <- m[r, 1] y <- m[r, 2] angle <- atan2(y-center.y, x-center.x) %% (2*pi) # angle <- ((angle+pi+rotation.angle) %% (2*pi))-pi return(angle) # return((atan((x-center.x)/(y-center.y))+pi*(as.numeric((y-center.y)<0))) %% (2*pi)) }) a_mat <- matrix(a, nrow=dim(threshold)[1], ncol=dim(threshold)[2], byrow = TRUE) return(a_mat) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_angle.R
# #' .BP_contour estimate a countour matrix .BP_contour <- function(bone, threshold=NULL, analysis=1, partial=FALSE, center.x=NA, center.y=NA) { # analysis=1; threshold <- NULL; partial=FALSE; center.x=NA; center.y=NA if (is.null(threshold)) threshold <- RM_get(x=bone, RMname = analysis, valuename="threshold") if (is.null(threshold)) { threshold <- getFromNamespace(".BP_threshold", ns="BoneProfileR")(bone, analysis=analysis) } # Dans bone et threshold, le x est en dimension 1 et le y en dimension 2 if (is.na(center.x) | is.na(center.y)) { center.x <- mean(which(threshold, arr.ind = TRUE)[, 1]) center.y <- mean(which(threshold, arr.ind = TRUE)[, 2]) } c. <- center.x center.x <- center.y center.y <- c. d_mat <- getFromNamespace(".BP_distance", ns="BoneProfileR")(bone, threshold=threshold, analysis=analysis, center.x=center.x, center.y=center.y) d_Threshold_mat <- ifelse(threshold, d_mat, NA) d_Threshold <- as.vector(d_Threshold_mat) a_mat <- getFromNamespace(".BP_angle", ns="BoneProfileR")(bone, threshold=threshold, analysis=analysis, center.x=center.x, center.y=center.y) a <- as.vector(a_mat) # # library(fields) # image.plot(a_mat) # cut.angle <- (dim(threshold)[c(1)]*2+dim(threshold)[c(2)])*2 cut.angle <- 360*2 la <- seq(from=0, to=2*pi, length.out=cut.angle+1) fa <- findInterval(a, la) fa_mat <- matrix(fa, nrow=dim(threshold)[1], ncol=dim(threshold)[2], byrow = FALSE) fa <- as.vector(fa) dmax_a <- aggregate(d_Threshold, by=list(fa), FUN=function(x) ifelse(all(is.na(x)), NA, max(x, na.rm = TRUE))) dmax_a2 <- rep(NA, cut.angle) dmax_a2[dmax_a[, 1]] <- dmax_a[, 2] # 23451 # 12345 # 51234 # Si deux valeurs sont séparées d'un NA, on remplace le NA par la moyenne des deux valeurs # Est-ce vraiment nécessaire ? Pas sur if (FALSE) { ld <- length(dmax_a2) kb_dmax_a2 <- c(tail(dmax_a2, n=1), dmax_a2[1:(ld-1)]) ke_dmax_a2 <- c(dmax_a2[-1], dmax_a2[1]) w <- which(is.na(dmax_a2) & !is.na(kb_dmax_a2) & !is.na(ke_dmax_a2)) dmax_a2[w] <- sapply(w, FUN = function(x) mean(kb_dmax_a2[x], ke_dmax_a2[x])) } # Nouvelle stratégie if (FALSE) { ld <- length(dmax_a2) dmax_a2_rep <- c(0, 0, rep(dmax_a2, 3), 0, 0) r <- which(!is.na(dmax_a2_rep)) for (v in seq_along(r[-length(r)])) { if (r[v+1] != r[v]+1) dmax_a2_rep[(r[v]+1):(r[v+1]-1)] <- rep(mean(c(dmax_a2_rep[r[v]], dmax_a2_rep[r[v+1]])), (r[v+1]-1)-(r[v]+1)+1) } dmax_a2 <- dmax_a2_rep[(ld+3):(ld+3+ld-1)] } dmax_a2[dmax_a2 < median(dmax_a2, na.rm = TRUE)/4] <- NA contour <- (as.vector(d_mat) < dmax_a2[fa]) contour <- matrix(contour, nrow=dim(threshold)[1], ncol=dim(threshold)[2], byrow = FALSE) contour <- ifelse(is.na(contour), FALSE, contour) # contour <- matrix(data = FALSE, nrow=dim(threshold)[1], ncol=dim(threshold)[2], byrow = FALSE) # for (x in 1:dim(threshold)[2]) # for (y in 1:dim(threshold)[1]) { # # dans fa_mat[y, x] j'ai la catégorie et dmax_a2[fa_mat[y, x]] la distance max # contour[y, x] <- (d_mat[y, x] < dmax_a2[fa_mat[y, x]]) # } # # contour <- ifelse(is.na(contour), FALSE, contour) # contour <- t(contour) # # # contourTF <- sapply(seq_along(d_Threshold), FUN=function(x) { # # Dans d j'ai la distance au centre de chaque pixel x # # Dans fa[x] j'ai la catgéorie d'angle du pixel x # # Dans dmax_a2 j'ai la distance max pour cet angle # return(d[x] <= dmax_a2[fa[x]]) # }) # # # distance <- matrix(data = d, ncol=dim(threshold)[1], nrow=dim(threshold)[2], byrow = TRUE) # # angle <- matrix(data = fa, ncol=dim(threshold)[1], nrow=dim(threshold)[2], byrow = TRUE) # contour <- matrix(data = contourTF, nrow=dim(threshold)[1], ncol=dim(threshold)[2], byrow = FALSE) # # # # d_Threshold <- as.vector(d_Threshold_mat) # # # # a_mat <- matrix(NA, nrow=dim(threshold)[1], ncol=dim(threshold)[2], byrow = TRUE) # # # # for (x in 1:dim(threshold)[2]) # # for (y in 1:dim(threshold)[1]) { # # a_mat[y, x] <- (atan((x-center.x)/(y-center.y))+pi*(as.numeric((y-center.y)<0))) %% (2*pi) # # } # # # # invisible(sapply(1:nrow(m), FUN = function(r) a_mat[m[r, 2], m[r, 1]] <<- (atan((m[r, 1]-center.y)/(m[r, 2]-center.x))+pi*(as.numeric((m[r, 2]-center.x)<0))) %% (2*pi))) # # # # # # Droite = 0 # # Gauche = -pi # # Haut = -pi/2 # # bas = pi/2 # # a_mat <- matrix(NA, nrow=dim(threshold)[1], ncol=dim(threshold)[2], byrow = TRUE) # # invisible(sapply(1:nrow(m), FUN = function(r) a_mat[m[r, 2], m[r, 1]] <<- (atan((m[r, 1]-center.y)/(m[r, 2]-center.x))+pi*(as.numeric((m[r, 2]-center.x)<0))) %% (2*pi))) # # # a <- sapply(1:nrow(m), FUN = function(r) (atan((m[r, 1]-center.y)/(m[r, 2]-center.x))+pi*(as.numeric((m[r, 2]-center.x)<0))) %% (2*pi)) # # a_mat <- matrix(a, nrow=dim(threshold)[1], ncol=dim(threshold)[2], byrow = TRUE) # # cut.angle <- 360 # # la <- seq(from=0, to=2*pi, length.out=cut.angle+1) # fa <- findInterval(a, la) # fa_mat <- matrix(fa, nrow=dim(threshold)[1], ncol=dim(threshold)[2], byrow = FALSE) # # fa_mat2 <- fa_mat # # for (r in 1:nrow(m)) { # va <- a_mat[m[r, 2], m[r, 1]] # fa_mat2[m[r, 2], m[r, 1]] <- max(which(va>la)) # } # # dmax_a <- aggregate(d_Threshold, by=list(fa), FUN=function(x) ifelse(all(is.na(x)), NA, max(x, na.rm = TRUE))) # dmax_a2 <- rep(NA, cut.angle) # dmax_a2[dmax_a[, 1]] <- dmax_a[, 2] # # fa_mat_2 <- matrix(NA, nrow=dim(threshold)[1], ncol=dim(threshold)[2], byrow = TRUE) # fa_mat_2[] <- dmax_a2[fa_mat] # # # contourTF <- matrix(NA, nrow=dim(threshold)[1], ncol=dim(threshold)[2], byrow = TRUE) # invisible(sapply(1:nrow(m), FUN = function(r) { # # Dans d j'ai la distance au centre de chaque pixel x # # Dans fa[x] j'ai la catgéorie d'angle du pixel x # # Dans dmax_a2 j'ai la distance max pour cet angle # contourTF[m[r, 2], m[r, 1]] <<- dmax_a2[fa[x]] # })) # # # # # Dans fa j'ai la classe de chaque pixel pour l'angle # contourTF <- sapply(seq_along(d_Threshold), FUN=function(x) { # # Dans d j'ai la distance au centre de chaque pixel x # # Dans fa[x] j'ai la catgéorie d'angle du pixel x # # Dans dmax_a2 j'ai la distance max pour cet angle # return(d[x] <= dmax_a2[fa[x]]) # }) # # # distance <- matrix(data = d, ncol=dim(threshold)[1], nrow=dim(threshold)[2], byrow = TRUE) # # angle <- matrix(data = fa, ncol=dim(threshold)[1], nrow=dim(threshold)[2], byrow = TRUE) # contour <- matrix(data = contourTF, nrow=dim(threshold)[1], ncol=dim(threshold)[2], byrow = FALSE) # To check if it works # x <- bone # x[, , 1, 1] <- contour # if (dim(x)[4]>1) x[, , 1, 2] <- !contour # if (dim(x)[4]>2) x[, , 1, 3] <- !contour # par(xaxs="i", yaxs="i") # par(mar=c(4, 0, 0, 0)) # # getFromNamespace("plot.cimg", ns="imager")(x, bty="n", axes=FALSE, xlab="", ylab="", # asp = 1) return(contour) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_contour.R
# #' .BP_contour estimate a distance matrix .BP_distance <- function(bone, threshold, analysis=1, center.x=NA, center.y=NA) { # analysis=1; partial=FALSE; center.x=NA; center.y=NA if (is.null(threshold)) threshold <- RM_get(x=bone, RMname = analysis, valuename="threshold") if (is.null(threshold)) { threshold <- getFromNamespace(".BP_threshold", ns="BoneProfileR")(bone, analysis=analysis) } # Dans bone et threshold, le x est en dimension 1 et le y en dimension 2 if (is.na(center.x) | is.na(center.y)) { center.x <- mean(which(threshold, arr.ind = TRUE)[, 1]) center.y <- mean(which(threshold, arr.ind = TRUE)[, 2]) } # Je dois travailler avec le center # Attention, dans l'objet bone, row c'est x et col c'est y # La colonne 1 est y; je fais varier d'abord les y # La colonne 2 est x m <- expand.grid(1:dim(threshold)[2], 1:dim(threshold)[1]) # La colonne 1 va de 1 à 34; C'est x # La colonne 2 va de 1 à 24; c'est y d <- sapply(1:nrow(m), FUN = function(r) sqrt((m[r, 1]-center.x)^2+(m[r, 2]-center.y)^2)) # d[2] c'est x=1, y=2 # d[3] c'est x=1, y=3 d_mat <- matrix(d, nrow=dim(threshold)[1], ncol=dim(threshold)[2], byrow = TRUE) return(d_mat) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_distance.R
# #' .BP_threshold estimate a countour matrix .BP_threshold <- function(bone, analysis=1) { if (is.null(RM_get(x=bone, RMname=analysis, valuename = "bg")) | is.null(RM_get(x=bone, RMname=analysis, valuename = "fg"))) { stop("You must first setup background and foreground colors") } bg <- RM_get(x=bone, RMname=analysis, valuename = "bg") fg <- RM_get(x=bone, RMname=analysis, valuename = "fg") Distance_bg <- sqrt((bone[, , 1, 1]-col2rgb(bg)["red", 1]/255)^2+ (bone[, , 1, 2]-col2rgb(bg)["green", 1]/255)^2+ (bone[, , 1, 3]-col2rgb(bg)["blue", 1]/255)^2) Distance_fg <- sqrt((bone[, , 1, 1]-col2rgb(fg)["red", 1]/255)^2+ (bone[, , 1, 2]-col2rgb(fg)["green", 1]/255)^2+ (bone[, , 1, 3]-col2rgb(fg)["blue", 1]/255)^2) return(Distance_bg>Distance_fg) }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BP_threshold.R
#' A model for bone compactness. #' #' \tabular{ll}{ #' Package: \tab BoneProfileR\cr #' Type: \tab Package\cr #' Version: \tab 2.4 build 766\cr #' Date: \tab 2022-09-06\cr #' License: \tab GPL (>= 2)\cr #' LazyLoad: \tab yes\cr #' } #' @title A Model for Bone Compactness. #' @author Marc Girondot \email{marc.girondot@@gmail.com} #' @docType package #' @name BoneProfileR-package #' @description A Model for Bone Compactness.\cr #' The lastest version of this package can always been installed using:\cr #' install.packages(c("imager", "tiff", "ijtiff", "HelpersMG", "knitr", "rmarkdown", "openxlsx", "shiny"))\cr #' install.packages("https://hebergement.universite-paris-saclay.fr/marcgirondot/CRAN/HelpersMG.tar.gz", repos=NULL, type="source")\cr #' install.packages("https://hebergement.universite-paris-saclay.fr/marcgirondot/CRAN/BoneProfileR.tar.gz", repos=NULL, type="source")\cr #' BoneProfileR uses a new results management software that is developed as part of the HelpersMG #' package. Using this results management system (RM), all the results are stored as part of the #' analyzed image.\cr #' This results management software has been developed to help users to maintain the results #' associated with the methodology used to obtain it. It is part of the large movement in #' science of replicative research.\cr #' An analysis is then stored with the image in a single file with the following information:\cr #' name, timestamp, bg, fg, threshold, contour, centers, peripherie, compactness, #' array.compactness, cut.distance.center, cut.angle, used.centers, compactness.synthesis, #' partial, rotation.angle, global.compactness, optim, optimRadial\cr #' Several analyses can be stored within a single file.\cr #' \if{html}{\figure{E.png}{options: alt="BoneProfileR logo"}} #' \if{latex}{\figure{E.png}} #' @references Girondot M, Laurin M (2003) Bone Profiler: a tool to quantify, model, and #' statistically compare bone-section compactness profiles. Journal of #' Vertebrate Paleontology 23: 458-461 #' @references Laurin M, Girondot M, Loth M-M (2004) The evolution of long bone microstructure #' and lifestyle in lissamphibians. Paleobiology 30: 589-613 #' @references Gônet, Jordan, Jérémie Bardin, Marc Girondot, John R. Hutchinson, and Michel #' Laurin. 2022. The Reptilian Locomotor and Postural Diversity Seen through the Prism of #' Femoral Microanatomy: Paleobiological Implications for Some Permian and Mesozoic Reptiles. #' Submitted #' @references Gônet, Jordan, Michel Laurin, and Marc Girondot. 2022. “Bone #' Profiler: The Next Step to Quantify, Model and Statistically Compare Bone #' Section Compactness Profiles.” Paleontologica Electronica. 25(1): a12 #' @examples #' \dontrun{ #' # Not run: #' library(BoneProfileR) #' path_Hedgehog <- system.file("extdata", "Erinaceus_europaeus_fem_2-1_small.png", #' package = "BoneProfileR") #' bone <- BP_OpenImage(file=path_Hedgehog) #' plot(bone, type="original") #' bone <- BP_DetectBackground(bone=bone, analysis="logistic") #' bone <- BP_DetectForeground(bone=bone, analysis="logistic") #' bone <- BP_DetectCenters(bone=bone, analysis="logistic") #' plot(bone, type="original") #' plot(bone, type="mineralized") #' plot(bone, type="unmineralized") #' plot(bone, type="section") #' plot(bone, type="colors") #' plot(bone, type="3Dcolors") #' bone <- BP_EstimateCompactness(bone, analysis="logistic", center="ontogenetic") #' plot(bone, type="original") #' plot(bone, type="mineralized") #' plot(bone, type="observations") #' bone <- BP_FitMLCompactness(bone, analysis="logistic") #' plot(bone, type="model", analysis=1) #' plot(bone, type="observations+model", analysis=1) #' fittedpar <- BP_GetFittedParameters(bone, analysis="logistic") #' bone <- BP_DuplicateAnalysis(bone, from="logistic", to="flexit") #' bone <- BP_FitMLCompactness(bone, #' fitted.parameters=c(fittedpar, K1=1, K2=1), #' fixed.parameters=NULL, analysis="flexit") #' compare_AIC(Logistic=BP_GetFittedParameters(bone, analysis="logistic", alloptim=TRUE), #' Flexit=BP_GetFittedParameters(bone, analysis="flexit", alloptim=TRUE)) #' # pdf(file = "Figure 2.pdf", width = 8, height = 10, pointsize = 12) #' layout(1:2) #' plot(bone, type="observations+model", analysis="logistic", restorePar=FALSE, mar=c(4, 4, 2, 5)) #' plot(bone, type="observations+model", analysis="flexit", restorePar=FALSE, mar=c(4, 4, 2, 5)) #' layout(1) #' # dev.off() #' out4p <- plot(bone, type="observations+model", analysis="logistic") #' out6p <- plot(bone, type="observations+model", analysis="flexit") #' bone <- BP_FitBayesianCompactness(bone, analysis="logistic") #' plot(bone, type="observations+model", CI="MCMC") #' bone <- BP_FitBayesianCompactness(bone, analysis="flexit") #' plot(bone, type="observations+model", CI="MCMC", analysis="flexit") #' plot(bone, type="mcmc", parameter="P", #' options.mcmc=list(xlim=c(0.55, 0.57), breaks=seq(from=0, to=1, by=0.001))) #' plot(bone, type="mcmc", parameter="S", #' options.mcmc=list(xlim=c(0.02, 0.05), breaks=seq(from=0.02, to=.05, by=0.001))) #' plot(bone, type="mcmc", parameter="Min", #' options.mcmc=list(xlim=c(0.05, 0.08), breaks=seq(from=0, to=1, by=0.001))) #' plot(bone, type="mcmc", parameter="Max", #' options.mcmc=list(xlim=c(0.95, 0.97), breaks=seq(from=0, to=1, by=0.001))) #' outMCMC <- RM_get(x = bone, RM = "RM", RMname = "logistic", valuename = "mcmc") #' summary(outMCMC) #' outMCMC <- RM_get(x = bone, RM = "RM", RMname = "flexit", valuename = "mcmc") #' summary(outMCMC) #' # pdf(file = "Figure 3.pdf", width = 8, height = 10, pointsize = 12) #' layout(1:2) #' plot(bone, type="mcmc", parameter="K1", analysis="flexit", #' options.mcmc=list(xlim=c(-1, 3), ylim=c(0,10), #' breaks=seq(from=-1, to=3, by=0.001), #' legend = FALSE, show.prior = FALSE, mar=c(4, 4, 1, 6)), restorePar=FALSE) #' segments(x0=1, x1=1, #' y0=0, y1=10, lty=4, lwd=3) #' text(x=ScalePreviousPlot(x=0.95, y=0.95)$x, #' y=ScalePreviousPlot(x=0.95, y=0.95)$y, labels="A", cex=3) #' plot(bone, type="mcmc", parameter="K2", analysis="flexit", #' options.mcmc=list(xlim=c(-1, 3), ylim=c(0,10), #' breaks=seq(from=-1, to=3, by=0.001), #' legend = FALSE, show.prior = FALSE, mar=c(4, 4, 1, 6)), restorePar=FALSE) #' segments(x0=1, x1=1, #' y0=0, y1=10, lty=4, lwd=3) #' text(x=ScalePreviousPlot(x=0.95, y=0.95)$x, #' y=ScalePreviousPlot(x=0.95, y=0.95)$y, labels="B", cex=3) #' # dev.off() #' #' bone <- BP_FitMLRadialCompactness(bone, analysis = "flexit") #' plot(bone, type="radial", radial.variable=c("P", "S"), analysis = "flexit") #' plot(bone, type="radial", radial.variable=c("P", "S", "Min", "Max"), analysis = "flexit") #' out <- RM_get(x=bone, RMname="flexit", valuename = "optimRadial")$synthesis #' mean(out[, "P"]); sd(out[, "P"]) #' range(out[, "S"]) #' quantile(out[, "S"]) #' # pdf(file = "Figure 4.pdf", width=7, height = 9, pointsize = 12) #' layout(1:2) #' plot(bone, type="radial", radial.variable="P", analysis = "flexit", restorePar=FALSE) #' text(x=ScalePreviousPlot(x=0.95, y=0.95)$x, #' y=ScalePreviousPlot(x=0.95, y=0.95)$y, labels="A", cex=3) #' plot(bone, type="radial", radial.variable="S", analysis = "flexit", restorePar=FALSE) #' text(x=ScalePreviousPlot(x=0.95, y=0.95)$x, #' y=ScalePreviousPlot(x=0.95, y=0.95)$y, labels="B", cex=3) #' # dev.off() #' } NULL
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/BoneProfileR-package.R
.onAttach <- function(libname, pkgname) { actual <- utils::packageDescription(pkgname)[["Version"]] packageStartupMessage(paste("Welcome in package", pkgname, "version", actual)) conn <- url("https://hebergement.universite-paris-saclay.fr/marcgirondot/CRAN/BoneProfileR/version.txt") version_get <- try(suppressWarnings( readLines(con=conn)), silent = TRUE ) close(con=conn) if (!(is.null(version_get)) & (!inherits(version_get, "try-error"))) { if (package_version(actual, strict = TRUE) < package_version(version_get, strict = TRUE)) { packageStartupMessage('An update is available; use:\ninstall.packages("https://hebergement.universite-paris-saclay.fr/marcgirondot/CRAN/BoneProfileR.tar.gz", repos=NULL, type="source")') } else { packageStartupMessage("No update is available") } } else { packageStartupMessage("No internet connection is available to check for presence of update") } }
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/R/onAttach.R
library(shiny) # Define server logic required to draw a histogram shinyServer(function(input, output) { outplotfn <- eventReactive(eventExpr=input$goButton, ignoreNULL = FALSE, valueExpr={ file <- isolate(input$FileOpen) center <- isolate(input$center) rotation <- isolate(as.numeric(input$rotation)) ijtiff <- isolate(input$ijtiff) angles <- isolate(as.numeric(input$angles)) distances <- isolate(as.numeric(input$distances)) twosteps <- isolate(input$twosteps) #' file <- list(datapath="/Users/marcgirondot/Desktop/ProblemesBP/astrochelys_femur.tif") #' file <- list(datapath="/Users/marcgirondot/Downloads/femur45l-astrochelys.tif") #' center <- "ontogenic" #' rotation <- 0 #' ijtiff <- TRUE #' angles <- 60 #' distances <- 100 #' twosteps <- TRUE if (is.null(file)) { oldpar <- par(no.readonly = TRUE) # code line i on.exit(par(oldpar)) # code line i + 1 par(mar=c(0, 0, 0, 0)) plot(x=c(0, 1), y=c(0, 1), axes=FALSE, xaxt="n", yaxt="n", main="", xlab = "", ylab = "", xaxs="i", yaxs="i", type="n") text(x = 0.5, y=0.6, labels = "Load an image to be analyzed", col="red", cex = 1.6) } else { # specify_decimal <- function(x, k) trimws(format(round(x, k), nsmall=k)) bone <- BP_OpenImage(file=file$datapath, ijtiff = ijtiff) name <- attributes(bone)$name bone <- BP_DetectBackground(bone=bone, analysis="logistic", show.plot=FALSE) bone <- BP_DetectForeground(bone=bone, analysis="logistic", show.plot=FALSE) bone <- BP_DetectCenters(bone=bone, analysis="logistic", show.plot=FALSE) bone <- BP_EstimateCompactness(bone, analysis="logistic", rotation.angle=rotation, center=center, cut.angle = angles, cut.distance = distances, show.plot=FALSE) bone <- BP_FitMLCompactness(bone, analysis="logistic", silent=TRUE, fixed.parameters = c(K1=1, K2=1), twosteps=TRUE, fitted.parameters = c(P=0.5, S=0.1, Max=0.99, Min=0.01)) # fittedpar <- BP_GetFittedParameters(bone, analysis="logistic") # bone <- BP_FitMLCompactness(bone, analysis="logistic", silent=TRUE, # fixed.parameters = c(K1=0, K2=0), # fitted.parameters = c(fittedpar, Max=0.99, Min=0.01)) # fittedpar <- BP_GetFittedParameters(bone, analysis="logistic") bone <- BP_DuplicateAnalysis(bone, from="logistic", to="flexit") bone <- BP_FitMLCompactness(bone, fitted.parameters=c(fittedpar, K1=0.9, K2=0.9), fixed.parameters=NULL, analysis="flexit", silent=TRUE, twosteps=TRUE) # bone <- BP_FitBayesianCompactness(bone, analysis="flexit") # mcmc <- RM_get(bone, RMname = "flexit", value="mcmc") # fittedpar <- as.parameters(mcmc) # bone <- BP_FitMLCompactness(bone, # fitted.parameters=fittedpar, # fixed.parameters=NULL, analysis="flexit", silent=TRUE) outAIC <- compare_AIC(Logistic=BP_GetFittedParameters(bone, analysis="logistic", alloptim=TRUE), Flexit=BP_GetFittedParameters(bone, analysis="flexit", alloptim=TRUE), silent = TRUE) if (outAIC$DeltaAIC[1]==0) { # Model Logistic selected.model <- "logistic" output$ResultOut1 <- renderText(paste0("The selected model based on AIC is the logistic model. The probability that the logistic model is the best among the two tested is ", specify_decimal(outAIC[1, 3], 3), ".")) } else { # Model Flexit selected.model <- "flexit" output$ResultOut1 <- renderText(paste0("The selected model based on AIC is the flexit model. The probability that the flexit model is the best among the two tested is ", specify_decimal(outAIC[2, 3], 3), ".")) } bone <- BP_FitBayesianCompactness(bone, analysis=selected.model) bone <- BP_FitMLRadialCompactness(bone, analysis=selected.model, silent=TRUE, twosteps=twosteps) output$DataOut <- renderTable(outAIC, rownames=TRUE, colnames = TRUE) output$TitleOut1 <- renderText("<h2><center>Analysis results</center></h2>") output$TitleOut2 <- renderText("<h3>Logistic and flexit model selection</h3>") output$TitleOut3 <- renderText("<h3>Global compacity</h3>") output$ResultOut2 <- renderText(paste0("<b>Observed compacity: </b>", specify_decimal(RM_get(x=bone, RMname = selected.model, valuename = "global.compactness"), 3), "<p><b>Modeled compacity by MCMC (2.5%, 50%, 97.5%): </b>", specify_decimal(mean(RM_get(x=bone, RMname = selected.model, valuename = "mcmc")$quantiles["2.5%", ]), 3), ", ", specify_decimal(mean(RM_get(x=bone, RMname = selected.model, valuename = "mcmc")$quantiles["50%", ]), 3), ", ", specify_decimal(mean(RM_get(x=bone, RMname = selected.model, valuename = "mcmc")$quantiles["97.5%", ]), 3) )) output$GlobalOut <- renderTable(RM_get(x=bone, RMname=selected.model, valuename = "mcmc")$summary.table, rownames=TRUE, colnames = TRUE, digits = 3) # nenv <- new.env() # assign("bone", bone, envir = nenv) # # output$PlotModel <- renderPlot({ # plot(bone, analysis = selected.model,type="observations+model", CI = "MCMC") # }, env = nenv) bone <<- bone selected.model <<- selected.model output$PlotModel <- renderPlot({ plot(bone, analysis = selected.model,type="observations+model", CI = "MCMC") }) output$TitleOut4 <- renderText("<h3>Radial compacity</h3>") out1 <- RM_get(x=bone, RMname=selected.model, valuename = "optimRadial") ost <- out1$summary.table colnames(ost)[2] <- "sd" output$RadialOut1 <- renderTable(ost, rownames=TRUE, colnames = TRUE, digits = 3) delta <- (out1$angles[2]-out1$angles[1])/2 tbl1 <- cbind(data.frame(Angle=paste0(specify_decimal(out1$angles-delta, 3), ";", specify_decimal(out1$angles+delta, 3))), out1$synthesis) tbl1 <- cbind(tbl1, data.frame(Modeled=out1$radial.modeled.compactness, Observed=out1$observed.compactness, Observed.modeled=out1$observed.modeled.compactness)) tbl1 <<- tbl1 output$RadialOut2 <- renderTable(tbl1, rownames=FALSE, colnames = TRUE, hover = TRUE, digits=3) output$PlotModelRadial <- renderPlot({ v <- NULL if (isolate(input$RadialVarMin)) v <- c(v, "Min") if (isolate(input$RadialVarMax)) v <- c(v, "Max") if (isolate(input$RadialVarP)) v <- c(v, "P") if (isolate(input$RadialVarS)) v <- c(v, "S") if (isolate(input$RadialVarK1)) v <- c(v, "K1") if (isolate(input$RadialVarK2)) v <- c(v, "K1") if (isolate(input$RadialVarTRC)) v <- c(v, "TRC") plot(bone, analysis = selected.model, type="radial", radial.variable = v) }) plot(bone, analysis = selected.model, show.grid=TRUE) } } ) output$Plot <- renderPlot({ outplotfn() }) output$ExcelButton <- downloadHandler( filename = "Export.xlsx", content = function(file) { # writeLines(paste0(c("bonjour", "Hello"), collapse = "\n"), con=file) wb <- openxlsx::createWorkbook(creator = "author" , title = "title" , subject = "BoneProfileR report" , category = "") openxlsx::addWorksheet( wb=wb, sheetName="Global") openxlsx::addWorksheet( wb=wb, sheetName="Radial") out1 <- RM_get(x=bone, RMname=selected.model, valuename = "mcmc") out <- RM_list(x=bone, silent=TRUE) date <- out[[selected.model]]$timestamp openxlsx::writeData( wb=wb, sheet="Global", x="Global model of compactness", startCol = 1, startRow = 1) openxlsx::writeData( wb=wb, sheet="Global", x=date, startCol = 1, startRow = 2) openxlsx::writeData( wb=wb, sheet="Global", x=selected.model, startCol = 1, startRow = 3) openxlsx::writeData( wb=wb, sheet="Global", x="Observed compactness", startCol = 1, startRow = 4) openxlsx::writeData( wb=wb, sheet="Global", x=RM_get(x=bone, RMname = selected.model, valuename = "global.compactness"), startCol = 2, startRow = 4) openxlsx::writeData( wb=wb, sheet="Global", x="Modeled compactness by MCMC (2.5%, 50%, 97.5%)", startCol = 1, startRow = 5) openxlsx::writeData( wb=wb, sheet="Global", x=mean(RM_get(x=bone, RMname = selected.model, valuename = "mcmc")$quantiles["2.5%", ]), startCol = 2, startRow = 5) openxlsx::writeData( wb=wb, sheet="Global", x=mean(RM_get(x=bone, RMname = selected.model, valuename = "mcmc")$quantiles["50%", ]), startCol = 3, startRow = 5) openxlsx::writeData( wb=wb, sheet="Global", x=mean(RM_get(x=bone, RMname = selected.model, valuename = "mcmc")$quantiles["97.5%", ]), startCol = 4, startRow = 5) openxlsx::writeData( wb=wb, sheet="Global", x=out1$summary.table, startCol = 2, startRow = 10) openxlsx::writeData( wb=wb, sheet="Global", x=rownames(out1$summary.table), startCol = 1, startRow = 11) # Le modèle radial openxlsx::writeData( wb=wb, sheet="Radial", x="Radial model of compactness", startCol = 1, startRow = 1) openxlsx::writeData( wb=wb, sheet="Radial", x=date, startCol = 1, startRow = 2) openxlsx::writeData( wb=wb, sheet="Radial", x=selected.model, startCol = 1, startRow = 3) openxlsx::writeData( wb=wb, sheet="Radial", x=RM_get(x=bone, RMname=selected.model, valuename = "optimRadial")$summary.table, startCol = 2, startRow = 4) openxlsx::writeData( wb=wb, sheet="Radial", x="SD", startCol = 3, startRow = 4) openxlsx::writeData( wb=wb, sheet="Radial", x=rownames(RM_get(x=bone, RMname=selected.model, valuename = "optimRadial")$summary.table), startCol = 1, startRow = 5) openxlsx::writeData( wb=wb, sheet="Radial", x=tbl1, startCol = 1, startRow = 13) openxlsx::saveWorkbook(wb, file = file, overwrite = TRUE) }) })
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/inst/shiny/server.R
# library(shiny); runApp("/Users/marcgirondot/Documents/Espace_de_travail_R/_shiny/BoneProfileR") # library(shiny); runApp("http://134.158.74.46/BoneProfileR/") library(shiny) package.BoneProfileR <- require('BoneProfileR') version <- "2.4 build 766" # # mycss <- " # #plot-container { # position: relative; # } # #loading-spinner { # position: absolute; # left: 50%; # top: 50%; # z-index: -1; # margin-top: -33px; /* half of the spinner's height */ # margin-left: -33px; /* half of the spinner's width */ # } # #plot.recalculating { # z-index: -2; # } # " splitLayout <- function (..., cellWidths = NULL, cellArgs = list()) { children <- list2(...) childIdx <- !nzchar(names(children) %||% character(length(children))) attribs <- children[!childIdx] children <- children[childIdx] count <- length(children) if (length(cellWidths) == 0 || any(is.na(cellWidths))) { cellWidths <- sprintf("%.3f%%", 100/count) } cellWidths <- rep(cellWidths, length.out = count) cellWidths <- sapply(cellWidths, validateCssUnit) do.call(tags$div, c(list(class = "shiny-split-layout"), attribs, mapply(children, cellWidths, FUN = function(x, w) { do.call(tags$div, c(list(style = sprintf("width: %s;", w)), cellArgs, list(x))) }, SIMPLIFY = FALSE))) } # Define UI for application that draws a histogram fluidPage( titlePanel(h1("Bone Profile", img(src="Rlogo.png", height=40, width=40), align = "center"), windowTitle = "Bone ProfileR"), # h1("Bone Profile", align = "center"), HTML('<h1><center>Bone Profile<img scr="Rlogo.png"></center></h1>')), # h1("Bone Profile", align = "center")), p(HTML("<b><a href=\"https://max2.ese.u-psud.fr/epc/conservation/index.html\">Marc Girondot</a></b> - Laboratoire Ecologie, Systématique, Evolution"), align = "center"), p(HTML("Université Paris-Saclay, CNRS, AgroParisTech, France."), align = "center"), wellPanel( p(HTML("<strong>BoneProfileR is a scientific method and a software used to model bone section for paleontological and ecological studies.</strong>"), align = "left"), p(paste0("This web server version v. ",version, " is a simplified version of the complete tools available as an R package.")), p(HTML("Open a bone section image, choose the options and click 'Run the analysis' button."), align = "left") ), # Show a plot of the generated distribution wellPanel( splitLayout( fileInput(inputId="FileOpen", label="Choose a local file with an image of a bone section", multiple = FALSE, accept = c(".tif", ".png", ".jpg")), checkboxInput(inputId="ijtiff", label = "Use IJTiff package to import image?", value = FALSE) , cellWidths=c("70%", "30%") ), splitLayout( selectInput("center", label="Choose the center to be used " , choices=list("Ontogenic center"="ontogenic", "Section center"="section", "Mineralized center"="mineralized", "Unmineralized center"= "unmineralized") , selected = "ontogenic", multiple = FALSE, selectize = FALSE, size = NULL), textInput(inputId="rotation", label="Rotation angle", value="0") , cellWidths=c("50%", "50%") ), splitLayout( textInput(inputId="angles", label="Number of angles", value="60") , textInput(inputId="distances", label="Number of ribbons from center", value="100") , cellWidths=c("50%", "50%") ), splitLayout( checkboxInput(inputId="twosteps", label = "Use a two-steps fit? The time requires for a two-steps fit is around 5 minutes.", value = TRUE) , cellWidths=c("100%") ), p(HTML("<b>Choose the variables to plot for radial analysis</b>"), align = "left"), splitLayout( checkboxInput(inputId="RadialVarP", label="P", value=TRUE) , checkboxInput(inputId="RadialVarS", label="S", value=FALSE) , checkboxInput(inputId="RadialVarMin", label="Min", value=FALSE) , checkboxInput(inputId="RadialVarMax", label="Max", value=FALSE) , checkboxInput(inputId="RadialVarK1", label="K1", value=FALSE) , checkboxInput(inputId="RadialVarK2", label="K2", value=FALSE) , checkboxInput(inputId="RadialVarTRC", label="Transitional Range of Compacity", value=TRUE) , cellWidths=c("10%", "10%", "10%", "10%", "10%", "10%", "40%") ), p("The Transitional Range of Compacity is the range of distances from the center where the compacity is between 2.5% to 97.5% of the compacity between Min and Max.") , actionButton(inputId="goButton", label="Run the analysis", width="30%", icon("paper-plane"), style="color: #fff; background-color: #337ab7; border-color: #2e6da4"), p(HTML("Be patient, to be completed, the analysis requires between 40 seconds or more than 5 minutes if you use 2-steps analysis."), align = "left") ), p(""), wellPanel( htmlOutput(outputId="TitleOut1"), plotOutput("Plot"), # tags$head(tags$style(HTML(mycss))) # , div(id = "plot-container", # tags$img(src = "spinner.gif", # id = "loading-spinner"), # plotOutput("Plot") # ), htmlOutput(outputId="TitleOut2"), tableOutput(outputId="DataOut"), htmlOutput(outputId="ResultOut1"), htmlOutput(outputId="TitleOut3"), htmlOutput(outputId="ResultOut2"), tableOutput(outputId="GlobalOut"), plotOutput("PlotModel"), htmlOutput(outputId="TitleOut4"), tableOutput(outputId="RadialOut1"), tableOutput(outputId="RadialOut2"), plotOutput("PlotModelRadial"), downloadButton("ExcelButton", "Export formated data for Excel"), ), HTML("<small><i><font color='#006699'>The Virtual Data initiative, run by LABEX P2IO and supported by Université Paris-Saclay, is thanked for providing computing resources on its cloud infrastructure.</font></i></small>") )
/scratch/gouwar.j/cran-all/cranData/BoneProfileR/inst/shiny/ui.R
BKF <- function(Y, net, p, obsModel = NA) { n.genes <- length(net$genes) Y_k <- t(Y) #Create A matrix based on size of network A <- as.matrix(expand.grid(rep(list(0:1), n.genes))) #declare all the matrices used in the algorithm temp <- vector('numeric') MSE <- vector('numeric') PDV <- matrix(0, nrow=2^n.genes, ncol = nrow(Y_k)) Beta <- matrix(0, nrow=2^n.genes, ncol = nrow(Y_k)) Xhat <- matrix(0, nrow=n.genes, ncol = nrow(Y_k)) Xhat2 <- matrix(0, nrow=n.genes, ncol = nrow(Y_k)) M <- generateM(A, p, net) #run recursive BKF algorithm temp <- vector('numeric') PDV[,1] <- rep(1/2^n.genes,2^n.genes) type <- toupper(obsModel[[1]]) for (k in 2:nrow(Y_k)) { if (!any(is.na(Y_k[k,]))) { if (type == 'BERNOULLI') { if(length(obsModel[[2]]) !=1 ) {stop('Bernoulli noise selected, however other observation model noise parameters are defined. See vignette for more information')} T <- diag(update(Y_k[k,], obsModel[[2]], A)) } if (type == "GAUSSIAN") { if(length(obsModel[[2]]) != 4) {stop('Gaussian noise selected, however incorrect observation model noise parameters are defined. See vignette for more information')} T <- diag(updateGaussian(Y_k[k,], A, obsModel[[2]][1], obsModel[[2]][2], obsModel[[2]][3], obsModel[[2]][4])) } if (type == "POISSON") { if(!is.list(obsModel) | length(obsModel[[3]]) != n.genes) {stop('Poisson observation model selected, however incorrect observation model noise parameters are defined. Ensure obsModel is of type LIST. See vignette for more information')} T <- diag(updatePoisson(Y_k[k,], A, obsModel[[2]], obsModel[[3]], obsModel[[4]])) } if (type == "NB") { if(!is.list(obsModel)| length(obsModel[[4]]) != n.genes | length(obsModel[[5]]) != n.genes) {stop('Negative Binomial model selected, however incorrect observation model noise parameters are defined. Ensure obsModel is of type LIST. See vignette for more information')} T <- diag(updateNegativeBinom(Y_k[k,], A, obsModel[[2]], obsModel[[3]], obsModel[[4]], obsModel[[5]])) } } if(!(type == "BERNOULLI" | type == "GAUSSIAN" | type == "POISSON" | type == "NB")) {stop('Invalid observation model specified')} Beta[,k] <- T%*%M%*%PDV[,k-1] PDV[,k] <- Beta[,k]/sum(Beta[,k]) Xhat[,k] <- t(A)%*%PDV[,k] MSE[k] <- sum(abs(Xhat[,k] - round(Xhat[,k]))) } list(Xhat = round(Xhat), MSE = MSE, Beta = colSums(Beta)) }
/scratch/gouwar.j/cran-all/cranData/BoolFilter/R/BKF.r
BKS <- function(Y, net, p, obsModel = NA) { n.genes <- length(net$genes) #Create A matrix based on size of network A <- as.matrix(expand.grid(rep(list(0:1), n.genes))) Y_k <- t(Y) lastT <- nrow(Y_k) #declare all the matrices used in the algorithm temp <- vector('numeric') MSE <- vector('numeric') PDV <- matrix(0, nrow=2^n.genes, ncol = nrow(Y_k)) PV <- matrix(0, nrow=2^n.genes, ncol = nrow(Y_k)) PDVS <- matrix(0, nrow=2^n.genes, ncol = nrow(Y_k)) DDV <- matrix(0, nrow=2^n.genes, ncol = nrow(Y_k)) DV <- matrix(0, nrow=2^n.genes, ncol = nrow(Y_k)) Beta <- matrix(0, nrow=2^n.genes, ncol = nrow(Y_k)) Xhat <- matrix(0, nrow=n.genes, ncol = nrow(Y_k)) XhatS <- matrix(0, nrow=n.genes, ncol = nrow(Y_k)) type <- toupper(obsModel[[1]]) s <- vector('numeric') M <- generateM(A, p, net) #run recursive BKS algorithm temp <- vector('numeric') PDV[,1] <- rep(1/2^n.genes,2^n.genes) PV[,1] <- M%*%PDV[,1] lastT <- nrow(Y_k) for (k in 2:lastT) { if(type == "BERNOULLI") { if(length(obsModel[[2]]) != 1 ) {stop('Bernoulli noise selected, however other observation model noise parameters are defined. See vignette for more information')} T <- diag(update(Y_k[k,], obsModel[[2]], A)) } if (type == "GAUSSIAN") { if(length(obsModel[[2]]) != 4) {stop('Gaussian noise selected, however incorrect observation model noise parameters are defined. See vignette for more information')} T <- diag(updateGaussian(Y_k[k,], A, obsModel[[2]][1], obsModel[[2]][2], obsModel[[2]][3], obsModel[[2]][4])) } if (type == "POISSON") { if(!is.list(obsModel) | length(obsModel[[4]]) != n.genes) {stop('Poisson observation model selected, however incorrect observation model noise parameters are defined. Ensure obsModel is of type LIST. See vignette for more information')} T <- diag(updatePoisson(Y_k[k,], A, obsModel[[2]], obsModel[[3]], obsModel[[4]])) } if (type == "NB") { if(!is.list(obsModel)| length(obsModel[[4]]) != n.genes | length(obsModel[[5]]) != n.genes) {stop('Negative Binomial model selected, however incorrect observation model noise parameters are defined. Ensure obsModel is of type LIST. See vignette for more information')} T <- diag(updateNegativeBinom(Y_k[k,], A, obsModel[[2]], obsModel[[3]], obsModel[[4]], obsModel[[5]])) } if(!(type == "BERNOULLI" | type == "GAUSSIAN" | type == "POISSON" | type == "NB")) {stop('Invalid observation model specified')} Beta[,k] <- T%*%PV[,k-1] PDV[,k] <- Beta[,k]/sum(Beta[,k]) PV[,k] <- M%*%PDV[,k] } DV[,(lastT-1)] <- T%*%rep(1/2^n.genes, 2^n.genes) for (k in (lastT-1):2) { DDV[,k] <- t(M)%*%DV[,k] if(type == "BERNOULLI") { if(length(obsModel[[2]]) !=1 ) {stop('Bernoulli noise selected, however other observation model noise parameters are defined. See vignette for more information')} T <- diag(update(Y_k[k,], obsModel[[2]], A)) } if (type == "GAUSSIAN") { if(length(obsModel[[2]]) != 4) {stop('Gaussian noise selected, however incorrect observation model noise parameters are defined. See vignette for more information')} T <- diag(updateGaussian(Y_k[k,], A, obsModel[[2]][1], obsModel[[2]][2], obsModel[[2]][3], obsModel[[2]][4])) } if (type == "POISSON") { if(!is.list(obsModel) | length(obsModel[[4]]) != n.genes) {stop('Poisson observation model selected, however incorrect observation model noise parameters are defined. Ensure obsModel is of type LIST. See vignette for more information')} T <- diag(updatePoisson(Y_k[k,], A, obsModel[[2]], obsModel[[3]], obsModel[[4]])) } if (type == "NB") { if(!is.list(obsModel)| length(obsModel[[4]]) != n.genes | length(obsModel[[5]]) != n.genes) {stop('Negative Binomial model selected, however incorrect observation model noise parameters are defined. Ensure obsModel is of type LIST. See vignette for more information')} T <- diag(updateNegativeBinom(Y_k[k,], A, obsModel[[2]], obsModel[[3]], obsModel[[4]], obsModel[[5]])) } if(!(type == "BERNOULLI" | type == "GAUSSIAN" | type == "POISSON" | type == "NB")) {stop('Invalid observation model specified')} DV[,(k-1)] <- T%*%DDV[,k] } for( S in 2:lastT){ PDVS[,S] <- (PV[,(S-1)]*DV[,(S-1)])/sum((PV[,(S-1)]*DV[,(S-1)])) XhatS[,S] <- round(t(A)%*%PDVS[,S]) MSE[S] <- sum(abs(t(A)%*%PDVS[,S] - round(t(A)%*%PDVS[,S]))) } list(Xhat = round(XhatS), MSE = MSE) }
/scratch/gouwar.j/cran-all/cranData/BoolFilter/R/BKS.r
MMAE <- function(data, net, p, threshold, Prior = NA, obsModel = NA) { if(is.na(Prior)){ Prior <- rep(1/(length(p)*length(net)),length(p)*length(net)) } if(length(Prior)!=(length(p)*length(net))){ cat("Error: The length of prior is not match with the length of possible models.") cat("\n") cat(" The prior probability should be given for all ") cat(length(p)*length(net)) cat(" different models.") return() } diff_model <- as.matrix(expand.grid( c(list(p) , list(net) ) )) Bt <- c() for(l in 1:nrow(diff_model)){ Bt <- rbind(Bt,BKF(data$Y, get(diff_model[l,2]), as.numeric(diff_model[l,1]),obsModel)$Beta) } Pr <- matrix(0,nrow=nrow(Bt),ncol=ncol(Bt)) Pr[,1] <- Prior for(k in 2:ncol(Bt)){ nPr <- (Bt[,k]*Pr[,k-1])/sum(Bt[,k]*Pr[,k-1]) if(sum(is.na(nPr))!=0){ nPr <- Pr[,k-1] } Pr[,k] <- nPr } #threshold=0.8 Pr_selec <- apply(Pr,2,max) Model_selec <- apply(Pr,2,which.max) Stop <- 0 # Stop the algorithm if posterior probability of any model exceeds the under defined threshold if(max(Pr_selec)>threshold){ Stop <- 1 temp <- which(cumsum(Pr_selec>threshold)==1 ) cat(" The infered model is") cat("\n") if(length(p)>1){ cat(" p = ") cat(diff_model[Model_selec[temp[1]],1]) } if(length(net)>1){ cat("\n") cat(" Net = ") cat(diff_model[Model_selec[temp[1]],2]) } cat("\n") cat(" The selected model is infered with ") cat(temp[1]) cat(" data.") plot(Pr[Model_selec[temp[1]],1:temp[1]],type="l",lty=1,col=c("blue"),lwd=4.5,xlab="Time",ylab="Posterior Probability of The Correct Model",cex.axis = 1.3,pch=50, cex.lab = 1.3,ylim=c(0,1)) abline(h=threshold) } if (Stop == 0){ cat(" Decision could not be made given input data.") } }
/scratch/gouwar.j/cran-all/cranData/BoolFilter/R/MMAE.r
############## #Algorithm for PF ############## SIR_BKF <- function(Y, N, alpha, net, p, obsModel = NA) { ngenes <- length(net$genes) Yk <- t(Y) A <- as.matrix(expand.grid(rep(list(0:1), ngenes))) a <- rmultinom(1,N, (1/N)*rep(1,2^ngenes)) b <- which(a>0) Sx <- rep(b, a[b]) Xo <- A[Sx,] PDV <- matrix(0, nrow=2^ngenes, ncol = nrow(Yk)) MSE_PF <- vector('numeric') Xhat <- matrix(0, nrow=ngenes, ncol = nrow(Yk)) NT <- alpha*N X_k <- Xo W <- rep(1/N, N) type <- toupper(obsModel[[1]]) for (k in 2:nrow(Yk)) { Xk <- abs(step.forward(X_k, p, net)) if (!any(is.na(Yk[k,]))) { if(type == "BERNOULLI") { if(length(obsModel[[2]]) !=1 ) {stop('Bernoulli noise selected, however other observation model noise parameters are defined. See vignette for more information')} Qh <- update(Yk[k,], obsModel[[2]], Xk) } if (type == "GAUSSIAN") { if(length(obsModel[[2]]) != 4) {stop('Gaussian noise selected, however incorrect observation model noise parameters are defined. See vignette for more information')} Qh <- updateGaussian(Yk[k,], Xk, obsModel[[2]][1], obsModel[[2]][2], obsModel[[2]][3], obsModel[[2]][4]) } if (type == "POISSON") { if(!is.list(obsModel) | length(obsModel[[4]]) != ngenes) {stop('Poisson observation model selected, however incorrect observation model noise parameters are defined. Ensure obsModel is of type LIST. See vignette for more information')} Qh <- updatePoisson(Yk[k,], Xk, obsModel[[2]], obsModel[[3]], obsModel[[4]]) } if (type == "NB") { if(!is.list(obsModel)| length(obsModel[[4]]) != ngenes | length(obsModel[[5]]) != ngenes) {stop('Negative Binomial model selected, however incorrect observation model noise parameters are defined. Ensure obsModel is of type LIST. See vignette for more information')} Qh <- updateNegativeBinom(Yk[k,], Xk, obsModel[[2]], obsModel[[3]], obsModel[[4]], obsModel[[5]]) } if(!(type == "BERNOULLI" | type == "GAUSSIAN" | type == "POISSON" | type == "NB")) {stop('Invalid observation model specified')} } Qn <- (Qh*W)/(sum(Qh)*W) N_eff <- 1/sum(Qn^2) S <- rowmatch(A, Xk) for (i in 1:N) { PDV[S[i], k] <- PDV[S[i], k] + Qn[i] } MSE_PF[k] <- sum(abs(t(A)%*%PDV[,k] - round(t(A)%*%PDV[,k]))) #Resample step if( N_eff < NT) { a <- rmultinom(1,N, Qn) b <- which(a>0) Sx <- rep(b, a[b]) Xk1 <- Xk[Sx,] Qn <- rep(1/N, N) Xk <- abs(Xk1) } W <- Qn Xhat[,k] <- round(t(A)%*%PDV[,k]) X_k <- Xk } list(MSE = MSE_PF, Xhat = round(Xhat)) }
/scratch/gouwar.j/cran-all/cranData/BoolFilter/R/SIR_BKF.r