content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Select a Two-Stage Binary Outcome Misclassification Model for a Given Prior
#'
#' @param prior A character string specifying the prior distribution for the
#' \eqn{\beta}, \eqn{\gamma}, and \eqn{\delta} parameters. Options are \code{"t"},
#' \code{"uniform"}, \code{"normal"}, or \code{"dexp"} (double Exponential, or Weibull).
#'
#' @return \code{model_picker} returns a character string specifying the two-stage binary
#' outcome misclassification model to be turned into a .BUG file and used
#' for MCMC estimation with \code{rjags}.
#'
model_picker_2stage <- function(prior){
unif_modelstring = "
model{
# likelihood
for(i in 1:sample_size){
Y_star[i] ~ dcat(pi_star_obs[i, 1:n_cat])
Y_tilde[i] ~ dcat(pi_tilde_obs[i, 1:n_cat])
# regression
for(j in 1:n_cat){
log(phi[i, j]) <- beta[j,1:dim_x] %*% x[i,1:dim_x]
pi[i, j] <- phi[i, j] / sum(phi[i, 1:n_cat])
}
for(k in 1:n_cat){
for(j in 1:n_cat){
log(phistar[i, k, j]) <- gamma[k, j, 1:dim_z] %*% z[i,1:dim_z]
pistar[i, k, j] <- phistar[i, k, j] / (sum(phistar[i, 1:n_cat, j]))
}
pi_star_obs[i, k] <- sum(pistar[i, k, 1:n_cat] * pi[i, 1:n_cat])
}
for(l in 1:n_cat){
for(k in 1:n_cat){
for(j in 1:n_cat){
log(phitilde[i, l, k, j]) <- delta[l, k, j, 1:dim_v] %*% v[i, 1:dim_v]
pitilde[i, l, k, j] <- phitilde[i, l, k, j] / (sum(phitilde[i, 1:n_cat, k, j]))
}
pi_tilde_obs1[i, l, k] <- sum(pitilde[i, l, k, 1:n_cat] * pistar[i, k, 1:n_cat] * pi[i, 1:n_cat])
}
pi_tilde_obs[i, l] <- sum(pi_tilde_obs1[i, l, 1:n_cat])
}
}
# reference categories
#beta[n_cat, 1:dim_x] <- 0
#gamma[n_cat, 1:n_cat, 1:dim_z] <- 0
# priors
for(p in 1:dim_x){
beta[1, p] ~ dunif(unif_l_beta[1, p], unif_u_beta[1, p])
beta[2, p] <- 0
}
for(m in 1:n_cat){
for(n in 1:dim_z){
gamma[1, m, n] ~ dunif(unif_l_gamma[1, m, n], unif_u_gamma[1, m, n])
gamma[2, m, n] <- 0
}
}
for(q in 1:n_cat){
for(r in 1:n_cat){
for(s in 1:dim_v){
delta[1, q, r, s] ~ dunif(unif_l_delta[1, q, r, s], unif_u_delta[1, q, r, s])
delta[2, q, r, s] <- 0
}
}
}
}
"
t_modelstring = "
model{
# likelihood
for(i in 1:sample_size){
Y_star[i] ~ dcat(pi_star_obs[i, 1:n_cat])
Y_tilde[i] ~ dcat(pi_tilde_obs[i, 1:n_cat])
# regression
for(j in 1:n_cat){
log(phi[i, j]) <- beta[j,1:dim_x] %*% x[i,1:dim_x]
pi[i, j] <- phi[i, j] / sum(phi[i, 1:n_cat])
}
for(k in 1:n_cat){
for(j in 1:n_cat){
log(phistar[i, k, j]) <- gamma[k, j, 1:dim_z] %*% z[i,1:dim_z]
pistar[i, k, j] <- phistar[i, k, j] / (sum(phistar[i, 1:n_cat, j]))
}
pi_star_obs[i, k] <- sum(pistar[i, k, 1:n_cat] * pi[i, 1:n_cat])
}
for(l in 1:n_cat){
for(k in 1:n_cat){
for(j in 1:n_cat){
log(phitilde[i, l, k, j]) <- delta[l, k, j, 1:dim_v] %*% v[i, 1:dim_v]
pitilde[i, l, k, j] <- phitilde[i, l, k, j] / (sum(phitilde[i, 1:n_cat, k, j]))
}
pi_tilde_obs1[i, l, k] <- sum(pitilde[i, l, k, 1:n_cat] * pistar[i, k, 1:n_cat] * pi[i, 1:n_cat])
}
pi_tilde_obs[i, l] <- sum(pi_tilde_obs1[i, l, 1:n_cat])
}
}
# reference categories
#beta[n_cat, 1:dim_x] <- 0
#gamma[n_cat, 1:n_cat, 1:dim_z] <- 0
# priors
for(p in 1:dim_x){
beta[1, p] ~ dt(t_mu_beta[1,p], t_tau_beta[1,p], t_df_beta[1,p])
beta[2, p] <- 0
}
for(m in 1:n_cat){
for(n in 1:dim_z){
gamma[1, m, n] ~ dt(t_mu_gamma[1,m,n], t_tau_gamma[1,m,n], t_df_gamma[1,m,n])
gamma[2, m, n] <- 0
}
}
for(q in 1:n_cat){
for(r in 1:n_cat){
for(s in 1:dim_v){
delta[1, q, r, s] ~ dt(t_mu_delta[1, q, r, s], t_tau_delta[1, q, r, s], t_df_delta[1, q, r, s])
delta[2, q, r, s] <- 0
}
}
}
}
"
normal_modelstring = "
model{
# likelihood
for(i in 1:sample_size){
Y_star[i] ~ dcat(pi_star_obs[i, 1:n_cat])
Y_tilde[i] ~ dcat(pi_tilde_obs[i, 1:n_cat])
# regression
for(j in 1:n_cat){
log(phi[i, j]) <- beta[j,1:dim_x] %*% x[i,1:dim_x]
pi[i, j] <- phi[i, j] / sum(phi[i, 1:n_cat])
}
for(k in 1:n_cat){
for(j in 1:n_cat){
log(phistar[i, k, j]) <- gamma[k, j, 1:dim_z] %*% z[i,1:dim_z]
pistar[i, k, j] <- phistar[i, k, j] / (sum(phistar[i, 1:n_cat, j]))
}
pi_star_obs[i, k] <- sum(pistar[i, k, 1:n_cat] * pi[i, 1:n_cat])
}
for(l in 1:n_cat){
for(k in 1:n_cat){
for(j in 1:n_cat){
log(phitilde[i, l, k, j]) <- delta[l, k, j, 1:dim_v] %*% v[i, 1:dim_v]
pitilde[i, l, k, j] <- phitilde[i, l, k, j] / (sum(phitilde[i, 1:n_cat, k, j]))
}
pi_tilde_obs1[i, l, k] <- sum(pitilde[i, l, k, 1:n_cat] * pistar[i, k, 1:n_cat] * pi[i, 1:n_cat])
}
pi_tilde_obs[i, l] <- sum(pi_tilde_obs1[i, l, 1:n_cat])
}
}
# reference categories
#beta[n_cat, 1:dim_x] <- 0
#gamma[n_cat, 1:n_cat, 1:dim_z] <- 0
# priors
for(p in 1:dim_x){
beta[1, p] ~ dnorm(normal_mu_beta[1, p], normal_sigma_beta[1, p])
beta[2, p] <- 0
}
for(m in 1:n_cat){
for(n in 1:dim_z){
gamma[1, m, n] ~ dnorm(normal_mu_gamma[1, m, n], normal_sigma_gamma[1, m, n])
gamma[2, m, n] <- 0
}
}
for(q in 1:n_cat){
for(r in 1:n_cat){
for(s in 1:dim_v){
delta[1, q, r, s] ~ dnorm(normal_mu_delta[1, q, r, s], normal_sigma_delta[1, q, r, s])
delta[2, q, r, s] <- 0
}
}
}
}
"
dexp_modelstring = "
model{
# likelihood
for(i in 1:sample_size){
Y_star[i] ~ dcat(pi_star_obs[i, 1:n_cat])
Y_tilde[i] ~ dcat(pi_tilde_obs[i, 1:n_cat])
# regression
for(j in 1:n_cat){
log(phi[i, j]) <- beta[j,1:dim_x] %*% x[i,1:dim_x]
pi[i, j] <- phi[i, j] / sum(phi[i, 1:n_cat])
}
for(k in 1:n_cat){
for(j in 1:n_cat){
log(phistar[i, k, j]) <- gamma[k, j, 1:dim_z] %*% z[i,1:dim_z]
pistar[i, k, j] <- phistar[i, k, j] / (sum(phistar[i, 1:n_cat, j]))
}
pi_star_obs[i, k] <- sum(pistar[i, k, 1:n_cat] * pi[i, 1:n_cat])
}
for(l in 1:n_cat){
for(k in 1:n_cat){
for(j in 1:n_cat){
log(phitilde[i, l, k, j]) <- delta[l, k, j, 1:dim_v] %*% v[i, 1:dim_v]
pitilde[i, l, k, j] <- phitilde[i, l, k, j] / (sum(phitilde[i, 1:n_cat, k, j]))
}
pi_tilde_obs1[i, l, k] <- sum(pitilde[i, l, k, 1:n_cat] * pistar[i, k, 1:n_cat] * pi[i, 1:n_cat])
}
pi_tilde_obs[i, l] <- sum(pi_tilde_obs1[i, l, 1:n_cat])
}
}
# reference categories
#beta[n_cat, 1:dim_x] <- 0
#gamma[n_cat, 1:n_cat, 1:dim_z] <- 0
# priors
for(l in 1:dim_x){
beta[1, l] ~ ddexp(dexp_mu_beta[1, l], dexp_b_beta[1, l])
beta[2, l] <- 0
}
for(m in 1:n_cat){
for(n in 1:dim_z){
gamma[1, m, n] ~ ddexp(dexp_mu_gamma[1, m, n], dexp_b_gamma[1, m, n])
gamma[2, m, n] <- 0
}
}
for(q in 1:n_cat){
for(r in 1:n_cat){
for(s in 1:dim_v){
delta[1, q, r, s] ~ ddexp(dexp_mu_delta[1, q, r, s], dexp_b_delta[1, q, r, s])
delta[2, q, r, s] <- 0
}
}
}
}
"
selected_model = ifelse(prior == "t", t_modelstring,
ifelse(prior == "uniform", unif_modelstring,
ifelse(prior == "normal", normal_modelstring,
ifelse(prior == "dexp", dexp_modelstring,
stop("Please select a prior distribution.")))))
return(selected_model)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/model_picker_2stage.R
|
#' Set up a Naive Logistic Regression \code{jags.model} Object for a Given Prior
#'
#' @param prior character string specifying the prior distribution for the naive
#' \eqn{\beta} parameters. Options are \code{"t"},
#' \code{"uniform"}, \code{"normal"}, or \code{"dexp"} (double Exponential, or Weibull).
#' @param sample_size An integer value specifying the number of observations in the sample.
#' @param dim_x An integer specifying the number of columns of the design matrix of the true outcome mechanism, \code{X}.
#' @param n_cat An integer specifying the number of categorical values that the true outcome, \code{Y},
#' and the observed outcome, \code{Y*} can take.
#' @param Ystar A numeric vector of indicator variables (1, 2) for the observed
#' outcome \code{Y*}. The reference category is 2.
#' @param X A numeric design matrix for the true outcome mechanism.
#' @param beta_prior_parameters A numeric list of prior distribution parameters
#' for the \eqn{\beta} terms. For prior distributions \code{"t"},
#' \code{"uniform"}, \code{"normal"}, or \code{"dexp"}, the first element of the
#' list should contain a matrix of location, lower bound, mean, or shape parameters,
#' respectively, for \eqn{\beta} terms.
#' For prior distributions \code{"t"},
#' \code{"uniform"}, \code{"normal"}, or \code{"dexp"}, the second element of the
#' list should contain a matrix of shape, upper bound, standard deviation, or scale parameters,
#' respectively, for \eqn{\beta} terms.
#' For prior distribution \code{"t"}, the third element of the list should contain
#' a matrix of the degrees of freedom for \eqn{\beta} terms.
#' The third list element should be empty for all other prior distributions.
#' All matrices in the list should have dimensions \code{dim_x} X \code{n_cat}, and all
#' elements in the \code{n_cat} column should be set to \code{NA}.
#' @param number_MCMC_chains An integer specifying the number of MCMC chains to compute.
#' @param naive_model_file A .BUG file and used
#' for MCMC estimation with \code{rjags}.
#' @param display_progress A logical value specifying whether messages should be
#' displayed during model compilation. The default is \code{TRUE}.
#'
#' @return \code{naive_jags_picker} returns a \code{jags.model} object for a naive
#' logistic regression model predicting the potentially misclassified \code{Y*}
#' from the predictor matrix \code{x}. The object includes the specified
#' prior distribution, model, number of chains, and data.
#'
#' @importFrom stats rnorm rmultinom
#' @importFrom rjags jags.model
#'
naive_jags_picker <- function(prior, sample_size, dim_x, n_cat,
Ystar, X, beta_prior_parameters,
number_MCMC_chains,
naive_model_file,
display_progress = TRUE){
quiet_argument <- !display_progress
if (prior == "t") {
jags_object <- jags.model(
naive_model_file,
data = list(sample_size = sample_size,
dim_x = dim_x,
n_cat = n_cat,
obs_Y = Ystar,
x = X,
t_mu_beta = beta_prior_parameters[[1]],
t_tau_beta = beta_prior_parameters[[2]],
t_df_beta = beta_prior_parameters[[3]]),
n.chains = number_MCMC_chains,
quiet = quiet_argument)
} else if (prior == "uniform") {
jags_object <- jags.model(
naive_model_file,
data = list(sample_size = sample_size,
dim_x = dim_x,
n_cat = n_cat,
obs_Y = Ystar,
x = X,
unif_l_beta = beta_prior_parameters[[1]],
unif_u_beta = beta_prior_parameters[[2]]),
n.chains = number_MCMC_chains,
quiet = quiet_argument)
} else if (prior == "normal") {
jags_object <- jags.model(
naive_model_file,
data = list(sample_size = sample_size,
dim_x = dim_x,
n_cat = n_cat,
obs_Y = Ystar,
x = X,
normal_mu_beta = beta_prior_parameters[[1]],
normal_sigma_beta = beta_prior_parameters[[2]]),
n.chains = number_MCMC_chains,
quiet = quiet_argument)
} else if (prior == "dexp") {
jags_object <- jags.model(
naive_model_file,
data = list(sample_size = sample_size,
dim_x = dim_x,
n_cat = n_cat,
obs_Y = Ystar,
x = X,
dexp_mu_beta = beta_prior_parameters[[1]],
dexp_b_beta = beta_prior_parameters[[2]]),
n.chains = number_MCMC_chains,
quiet = quiet_argument)
} else { stop("Please select a model.")}
return(jags_object)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/naive_jags_picker.R
|
#' Set up a Naive Two-Stage Regression \code{jags.model} Object for a Given Prior
#'
#' @param prior character string specifying the prior distribution for the naive
#' \eqn{\beta} parameters. Options are \code{"t"},
#' \code{"uniform"}, \code{"normal"}, or \code{"dexp"} (double Exponential, or Weibull).
#' @param sample_size An integer value specifying the number of observations in the sample.
#' @param dim_x An integer specifying the number of columns of the design matrix of the first-stage outcome mechanism, \code{X}.
#' @param dim_v An integer specifying the number of columns of the design matrix of the second-stage outcome mechanism, \code{V}.
#' @param n_cat An integer specifying the number of categorical values that
#' the observed outcomes can take.
#' @param Ystar A numeric vector of indicator variables (1, 2) for the first-stage observed
#' outcome \code{Y*}. The reference category is 2.
#' @param Ytilde A numeric vector of indicator variables (1, 2) for the second-stage observed
#' outcome \eqn{\tilde{Y}}. The reference category is 2.
#' @param X A numeric design matrix for the true outcome mechanism.
#' @param V A numeric design matrix for the second-stage outcome mechanism.
#' @param beta_prior_parameters A numeric list of prior distribution parameters
#' for the \eqn{\beta} terms. For prior distributions \code{"t"},
#' \code{"uniform"}, \code{"normal"}, or \code{"dexp"}, the first element of the
#' list should contain a matrix of location, lower bound, mean, or shape parameters,
#' respectively, for \eqn{\beta} terms.
#' For prior distributions \code{"t"},
#' \code{"uniform"}, \code{"normal"}, or \code{"dexp"}, the second element of the
#' list should contain a matrix of shape, upper bound, standard deviation, or scale parameters,
#' respectively, for \eqn{\beta} terms.
#' For prior distribution \code{"t"}, the third element of the list should contain
#' a matrix of the degrees of freedom for \eqn{\beta} terms.
#' The third list element should be empty for all other prior distributions.
#' All matrices in the list should have dimensions \code{dim_x} X \code{n_cat}, and all
#' elements in the \code{n_cat} column should be set to \code{NA}.
#' @param delta_prior_parameters A numeric list of prior distribution parameters
#' for the naive \eqn{\delta} terms. For prior distributions \code{"t"},
#' \code{"uniform"}, \code{"normal"}, or \code{"dexp"}, the first element of the
#' list should contain an array of location, lower bound, mean, or shape parameters,
#' respectively, for \eqn{\delta} terms.
#' For prior distributions \code{"t"},
#' \code{"uniform"}, \code{"normal"}, or \code{"dexp"}, the second element of the
#' list should contain an array of shape, upper bound, standard deviation, or scale parameters,
#' respectively, for \eqn{\delta} terms.
#' For prior distribution \code{"t"}, the third element of the list should contain
#' an array of the degrees of freedom for \eqn{\delta} terms.
#' The third list element should be empty for all other prior distributions.
#' All arrays in the list should have dimensions \code{n_cat} X \code{n_cat} X \code{dim_v},
#' and all elements in the \code{n_cat} row should be set to \code{NA}.
#' @param number_MCMC_chains An integer specifying the number of MCMC chains to compute.
#' @param naive_model_file A .BUG file and used
#' for MCMC estimation with \code{rjags}.
#' @param display_progress A logical value specifying whether messages should be
#' displayed during model compilation. The default is \code{TRUE}.
#'
#' @return \code{naive_jags_picker_2stage} returns a \code{jags.model} object for a naive
#' two-stage regression model predicting the potentially misclassified \code{Y*}
#' from the predictor matrix \code{x} and the potentially misclassified \eqn{\tilde{Y} | Y^*}
#' from the predictor matrix \code{v}. The object includes the specified
#' prior distribution, model, number of chains, and data.
#'
#' @importFrom stats rnorm rmultinom
#' @importFrom rjags jags.model
#'
naive_jags_picker_2stage <- function(prior, sample_size, dim_x, dim_v, n_cat,
Ystar, Ytilde, X, V,
beta_prior_parameters,
delta_prior_parameters,
number_MCMC_chains,
naive_model_file,
display_progress = TRUE){
quiet_argument <- !display_progress
if (prior == "t") {
jags_object <- jags.model(
naive_model_file,
data = list(sample_size = sample_size,
dim_x = dim_x,
dim_v = dim_v,
n_cat = n_cat,
Y_star = Ystar,
Y_tilde = Ytilde,
x = X,
v = V,
t_mu_beta = beta_prior_parameters[[1]],
t_tau_beta = beta_prior_parameters[[2]],
t_df_beta = beta_prior_parameters[[3]],
t_mu_delta = delta_prior_parameters[[1]],
t_tau_delta = delta_prior_parameters[[2]],
t_df_delta = delta_prior_parameters[[3]]),
n.chains = number_MCMC_chains,
quiet = quiet_argument)
} else if (prior == "uniform") {
jags_object <- jags.model(
naive_model_file,
data = list(sample_size = sample_size,
dim_x = dim_x,
dim_v = dim_v,
n_cat = n_cat,
Y_star = Ystar,
Y_tilde = Ytilde,
x = X,
v = V,
unif_l_beta = beta_prior_parameters[[1]],
unif_u_beta = beta_prior_parameters[[2]],
unif_l_delta = delta_prior_parameters[[1]],
unif_u_delta = delta_prior_parameters[[2]]),
n.chains = number_MCMC_chains,
quiet = quiet_argument)
} else if (prior == "normal") {
jags_object <- jags.model(
naive_model_file,
data = list(sample_size = sample_size,
dim_x = dim_x,
dim_v = dim_v,
n_cat = n_cat,
Y_star = Ystar,
Y_tilde = Ytilde,
x = X,
v = V,
normal_mu_beta = beta_prior_parameters[[1]],
normal_sigma_beta = beta_prior_parameters[[2]],
normal_mu_delta = delta_prior_parameters[[1]],
normal_sigma_delta = delta_prior_parameters[[2]]),
n.chains = number_MCMC_chains,
quiet = quiet_argument)
} else if (prior == "dexp") {
jags_object <- jags.model(
naive_model_file,
data = list(sample_size = sample_size,
dim_x = dim_x,
dim_v = dim_v,
n_cat = n_cat,
Y_star = Ystar,
Y_tilde = Ytilde,
x = X,
v = V,
dexp_mu_beta = beta_prior_parameters[[1]],
dexp_b_beta = beta_prior_parameters[[2]],
dexp_mu_delta = delta_prior_parameters[[1]],
dexp_b_delta = delta_prior_parameters[[2]]),
n.chains = number_MCMC_chains,
quiet = quiet_argument)
} else { stop("Please select a model.")}
return(jags_object)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/naive_jags_picker_2stage.R
|
#' Observed Data Log-Likelihood Function for Estimation of the Naive Two-Stage Misclassification Model
#'
#' @param param_current A numeric vector of regression parameters, in the order
#' \eqn{\beta, \delta}. The \eqn{\delta} vector is obtained from the matrix form.
#' In matrix form, the gamma parameter matrix rows
#' correspond to parameters for the \eqn{\tilde{Y} = 1}
#' observed outcome, with the dimensions of \code{V}.
#' In matrix form, the gamma parameter matrix columns correspond to the true outcome categories
#' \eqn{j = 1, \dots,} \code{n_cat}. The numeric vector \code{delta_v} is
#' obtained by concatenating the delta matrix, i.e. \code{delta_v <- c(delta_matrix)}.
#' @param X A numeric design matrix for the first-stage observed mechanism.
#' @param V A numeric design matrix for the second-stage observed mechanism.
#' @param obs_Ystar_matrix A numeric matrix of indicator variables (0, 1) for the first-stage observed
#' outcome \code{Y*}. Rows of the matrix correspond to each subject. Columns of
#' the matrix correspond to each observed outcome category. Each row should contain
#' exactly one 0 entry and exactly one 1 entry.
#' @param obs_Ytilde_matrix A numeric matrix of indicator variables (0, 1) for the second-stage observed
#' outcome \eqn{\tilde{Y}}. Rows of the matrix correspond to each subject. Columns of
#' the matrix correspond to each observed outcome category. Each row should contain
#' exactly one 0 entry and exactly one 1 entry.
#' @param sample_size Integer value specifying the number of observations in the sample.
#' This value should be equal to the number of rows of the design matrix, \code{X} or \code{V}.
#' @param n_cat The number of categorical values that the first- and second-stage outcomes,
#' \eqn{Y^*} and \eqn{\tilde{Y}}, can take.
#'
#' @return \code{naive_loglik_2stage} returns the negative value of the observed data log-likelihood function,
#' \eqn{ \sum_{i = 1}^N \Bigl[ \sum_{k = 1}^2 \sum_{k = 1}^2 \sum_{\ell = 1}^2 y^*_{ik} \tilde{y_i} \text{log} \{ P(\tilde{Y}_{i} = \ell, Y^*_i = k | x_i, v_i) \}\Bigr]},
#' at the provided inputs.
#'
#' @include pi_compute.R
#' @include pistar_compute.R
#'
#' @importFrom stats rnorm rgamma rmultinom
#'
naive_loglik_2stage <- function(param_current,
X, V,
obs_Ystar_matrix, obs_Ytilde_matrix,
sample_size, n_cat){
beta_current = matrix(param_current[1:ncol(X)], ncol = 1)
delta_current = matrix(c(param_current[(ncol(X) + 1):(ncol(X) + (n_cat * ncol(V)))]),
ncol = n_cat, byrow = FALSE)
pi_k_obs = pi_compute(beta_current, X, sample_size, n_cat)
pi_kl_obs = pistar_compute(delta_current, V, sample_size, n_cat)
k1l1 = ifelse(obs_Ystar_matrix[,1] == 1 & obs_Ytilde_matrix[,1] == 1,
1, 0)
k1l2 = ifelse(obs_Ystar_matrix[,1] == 1 & obs_Ytilde_matrix[,2] == 1,
1, 0)
k2l1 = ifelse(obs_Ystar_matrix[,2] == 1 & obs_Ytilde_matrix[,1] == 1,
1, 0)
k2l2 = ifelse(obs_Ystar_matrix[,2] == 1 & obs_Ytilde_matrix[,2] == 1,
1, 0)
l1_index <- 1:sample_size
l2_index <- (sample_size + 1):(2 * sample_size)
k1l1_sum = k1l1 * log((pi_kl_obs[l1_index, 1] * pi_k_obs[,1]) +
(pi_kl_obs[l1_index, 2] * pi_k_obs[,2]))
k1l2_sum = k1l2 * log((pi_kl_obs[l2_index, 1] * pi_k_obs[,1]) +
(pi_kl_obs[l2_index, 2] * pi_k_obs[,2]))
k2l1_sum = k2l1 * log((pi_kl_obs[l1_index, 1] * pi_k_obs[,1]) +
(pi_kl_obs[l1_index, 2] * pi_k_obs[,2]))
k2l2_sum = k2l2 * log((pi_kl_obs[l2_index, 1] * pi_k_obs[,1]) +
(pi_kl_obs[l2_index, 2] * pi_k_obs[,2]))
loglik_return = -sum(k1l1_sum + k1l2_sum + k2l1_sum + k2l2_sum)
return(loglik_return)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/naive_loglik_2stage.R
|
#' Select a Logisitic Regression Model for a Given Prior
#'
#' @param prior A character string specifying the prior distribution for the naive
#' \eqn{\beta} parameters. Options are \code{"t"},
#' \code{"uniform"}, \code{"normal"}, or \code{"dexp"} (double Exponential, or Weibull).
#'
#' @return \code{naive_model_picker} returns a character string specifying the
#' logistic regression model to be turned into a .BUG file and used
#' for MCMC estimation with \code{rjags}.
#'
naive_model_picker <- function(prior) {
unif_modelstring = "model{
# likelihood
for(i in 1:sample_size){
obs_Y[i] ~ dcat(pi_obs[i, 1:n_cat])
# regression
for(j in 1:n_cat){
log(phi[i, j]) <- beta[j,1:dim_x] %*% x[i,1:dim_x]
pi_obs[i, j] <- phi[i, j] / sum(phi[i, 1:n_cat])
}
}
# priors
for(l in 1:dim_x){
beta[1, l] ~ dunif(unif_l_beta[1, l], unif_u_beta[1, l])
beta[2, l] <- 0
}
}"
t_modelstring = "model{
# likelihood
for(i in 1:sample_size){
obs_Y[i] ~ dcat(pi_obs[i, 1:n_cat])
# regression
for(j in 1:n_cat){
log(phi[i, j]) <- beta[j,1:dim_x] %*% x[i,1:dim_x]
pi_obs[i, j] <- phi[i, j] / sum(phi[i, 1:n_cat])
}
}
# priors
for(l in 1:dim_x){
beta[1, l] ~ dt(t_mu_beta[1, l], t_tau_beta[1, l], t_df_beta[1, l])
beta[2, l] <- 0
}
}"
normal_modelstring = "model{
# likelihood
for(i in 1:sample_size){
obs_Y[i] ~ dcat(pi_obs[i, 1:n_cat])
# regression
for(j in 1:n_cat){
log(phi[i, j]) <- beta[j,1:dim_x] %*% x[i,1:dim_x]
pi_obs[i, j] <- phi[i, j] / sum(phi[i, 1:n_cat])
}
}
# priors
for(l in 1:dim_x){
beta[1, l] ~ dnorm(normal_mu_beta[1, l], normal_sigma_beta[1, l])
beta[2, l] <- 0
}
}"
dexp_modelstring = "model{
# likelihood
for(i in 1:sample_size){
obs_Y[i] ~ dcat(pi_obs[i, 1:n_cat])
# regression
for(j in 1:n_cat){
log(phi[i, j]) <- beta[j,1:dim_x] %*% x[i,1:dim_x]
pi_obs[i, j] <- phi[i, j] / sum(phi[i, 1:n_cat])
}
}
# priors
for(l in 1:dim_x){
beta[1, l] ~ ddexp(dexp_mu_beta[1, l], dexp_b_beta[1, l])
beta[2, l] <- 0
}
}"
selected_model = ifelse(prior == "t", t_modelstring,
ifelse(prior == "uniform", unif_modelstring,
ifelse(prior == "normal", normal_modelstring,
ifelse(prior == "dexp", dexp_modelstring,
stop("Please select a prior distribution.")))))
return(selected_model)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/naive_model_picker.R
|
#' Select a Naive Two-Stage Regression Model for a Given Prior
#'
#' @param prior A character string specifying the prior distribution for the naive
#' \eqn{\beta} parameters. Options are \code{"t"},
#' \code{"uniform"}, \code{"normal"}, or \code{"dexp"} (double Exponential, or Weibull).
#'
#' @return \code{naive_model_picker_2stage} returns a character string specifying the
#' logistic regression model to be turned into a .BUG file and used
#' for MCMC estimation with \code{rjags}.
#'
naive_model_picker_2stage <- function(prior) {
unif_modelstring = "model{
# likelihood
for(i in 1:sample_size){
Y_star[i] ~ dcat(pi_obs[i, 1:n_cat])
Y_tilde[i] ~ dcat(pi2_obs[i, 1:n_cat])
# regression
for(j in 1:n_cat){
log(phi[i, j]) <- beta[j,1:dim_x] %*% x[i,1:dim_x]
pi_obs[i, j] <- phi[i, j] / sum(phi[i, 1:n_cat])
}
for(k in 1:n_cat){
for(j in 1:n_cat){
log(phitilde[i, k, j]) <- delta[k, j, 1:dim_v] %*% v[i, 1:dim_v]
pi2_obs2[i, k, j] <- phitilde[i, k, j] / (sum(phitilde[i, 1:n_cat, j]))
}
pi2_obs[i, k] <- sum(pi2_obs2[i, k, 1:n_cat] * pi_obs[i, 1:n_cat])
}
}
# priors
for(l in 1:dim_x){
beta[1, l] ~ dunif(unif_l_beta[1, l], unif_u_beta[1, l])
beta[2, l] <- 0
}
for(m in 1:n_cat){
for(n in 1:dim_v){
delta[1, m, n] ~ dunif(unif_l_delta[1, m, n], unif_u_delta[1, m, n])
delta[2, m, n] <- 0
}
}
}"
t_modelstring = "model{
# likelihood
for(i in 1:sample_size){
Y_star[i] ~ dcat(pi_obs[i, 1:n_cat])
Y_tilde[i] ~ dcat(pi2_obs[i, 1:n_cat])
# regression
for(j in 1:n_cat){
log(phi[i, j]) <- beta[j,1:dim_x] %*% x[i,1:dim_x]
pi_obs[i, j] <- phi[i, j] / sum(phi[i, 1:n_cat])
}
for(k in 1:n_cat){
for(j in 1:n_cat){
log(phitilde[i, k, j]) <- delta[k, j, 1:dim_v] %*% v[i, 1:dim_v]
pi2_obs2[i, k, j] <- phitilde[i, k, j] / (sum(phitilde[i, 1:n_cat, j]))
}
pi2_obs[i, k] <- sum(pi2_obs2[i, k, 1:n_cat] * pi_obs[i, 1:n_cat])
}
}
# priors
for(l in 1:dim_x){
beta[1, l] ~ dt(t_mu_beta[1, l], t_tau_beta[1, l], t_df_beta[1, l])
beta[2, l] <- 0
}
for(m in 1:n_cat){
for(n in 1:dim_v){
delta[1, m, n] ~ dt(t_mu_delta[1, m, n], t_tau_delta[1, m, n], t_df_delta[1, m, n])
delta[2, m, n] <- 0
}
}
}"
normal_modelstring = "model{
# likelihood
for(i in 1:sample_size){
Y_star[i] ~ dcat(pi_obs[i, 1:n_cat])
Y_tilde[i] ~ dcat(pi2_obs[i, 1:n_cat])
# regression
for(j in 1:n_cat){
log(phi[i, j]) <- beta[j,1:dim_x] %*% x[i,1:dim_x]
pi_obs[i, j] <- phi[i, j] / sum(phi[i, 1:n_cat])
}
for(k in 1:n_cat){
for(j in 1:n_cat){
log(phitilde[i, k, j]) <- delta[k, j, 1:dim_v] %*% v[i, 1:dim_v]
pi2_obs2[i, k, j] <- phitilde[i, k, j] / (sum(phitilde[i, 1:n_cat, j]))
}
pi2_obs[i, k] <- sum(pi2_obs2[i, k, 1:n_cat] * pi_obs[i, 1:n_cat])
}
}
# priors
for(l in 1:dim_x){
beta[1, l] ~ dnorm(normal_mu_beta[1, l], normal_sigma_beta[1, l])
beta[2, l] <- 0
}
for(m in 1:n_cat){
for(n in 1:dim_v){
delta[1, m, n] ~ dunif(normal_mu_delta[1, m, n], normal_sigma_delta[1, m, n])
delta[2, m, n] <- 0
}
}
}"
dexp_modelstring = "model{
# likelihood
for(i in 1:sample_size){
Y_star[i] ~ dcat(pi_obs[i, 1:n_cat])
Y_tilde[i] ~ dcat(pi2_obs[i, 1:n_cat])
# regression
for(j in 1:n_cat){
log(phi[i, j]) <- beta[j,1:dim_x] %*% x[i,1:dim_x]
pi_obs[i, j] <- phi[i, j] / sum(phi[i, 1:n_cat])
}
for(k in 1:n_cat){
for(j in 1:n_cat){
log(phitilde[i, k, j]) <- delta[k, j, 1:dim_v] %*% v[i, 1:dim_v]
pi2_obs2[i, k, j] <- phitilde[i, k, j] / (sum(phitilde[i, 1:n_cat, j]))
}
pi2_obs[i, k] <- sum(pi2_obs2[i, k, 1:n_cat] * pi_obs[i, 1:n_cat])
}
}
# priors
for(l in 1:dim_x){
beta[1, l] ~ ddexp(dexp_mu_beta[1, l], dexp_b_beta[1, l])
beta[2, l] <- 0
}
for(m in 1:n_cat){
for(n in 1:dim_v){
delta[1, m, n] ~ dunif(dexp_mu_delta[1, m, n], dexp_b_delta[1, m, n])
delta[2, m, n] <- 0
}
}
}"
selected_model = ifelse(prior == "t", t_modelstring,
ifelse(prior == "uniform", unif_modelstring,
ifelse(prior == "normal", normal_modelstring,
ifelse(prior == "dexp", dexp_modelstring,
stop("Please select a prior distribution.")))))
return(selected_model)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/naive_model_picker_2stage.R
|
#' EM-Algorithm Estimation of the Binary Outcome Misclassification Model while Assuming Perfect Sensitivity
#'
#' Code is adapted by the SAMBA R package from Lauren Beesley and Bhramar Mukherjee.
#'
#' @param Ystar A numeric vector of indicator variables (1, 0) for the observed
#' outcome \code{Y*}. The reference category is 0.
#' @param Z A numeric matrix of covariates in the true outcome mechanism.
#' \code{Z} should not contain an intercept.
#' @param X A numeric matrix of covariates in the observation mechanism.
#' \code{X} should not contain an intercept.
#' @param start Numeric vector of starting values for parameters in the true
#' outcome mechanism (\eqn{\theta}) and the observation mechanism (\eqn{\beta}), respectively.
#' @param beta0_fixed Optional numeric vector of values of the observation mechanism
#' intercept to profile over. If a single value is entered, this corresponds to
#' fixing the intercept at the specified value. The default is \code{NULL}.
#' @param weights Optional vector of row-specific weights used for selection bias
#' adjustment. The default is \code{NULL}.
#' @param expected A logical value indicating whether or not to calculate the
#' covariance matrix via the expected Fisher information matrix. The default is \code{TRUE}.
#' @param tolerance A numeric value specifying when to stop estimation, based on
#' the difference of subsequent log-likelihood estimates. The default is \code{1e-7}.
#' @param max_em_iterations An integer specifying the maximum number of
#' iterations of the EM algorithm. The default is \code{1500}.
#'
#' @return \code{perfect_sensitivity_EM} returns a list containing nine elements.
#' The elements are detailed in \code{?SAMBA::obsloglikEM} documentation. Code
#' is adapted from the \code{SAMBA::obsloglikEM} function.
#'
#' @references Beesley, L. and Mukherjee, B. (2020).
#' Statistical inference for association studies using electronic health records:
#' Handling both selection bias and outcome misclassification.
#' Biometrics, 78, 214-226.
#'
#' @include expit.R
#'
#' @importFrom stats rnorm rgamma rmultinom glm predict coef
#'
perfect_sensitivity_EM <- function(Ystar, Z, X, start, beta0_fixed = NULL,
weights = NULL, expected = TRUE,
tolerance = 1e-7, max_em_iterations = 1500)
{
obsloglik_var <- utils::getFromNamespace("obsloglik_var", "SAMBA")
obsloglik_var_weighted <- utils::getFromNamespace("obsloglik_var_weighted", "SAMBA")
if (is.data.frame(Z))
Z <- as.matrix(Z)
if (!is.numeric(Z))
stop("'Z' should be a numeric matrix.")
if (is.vector(Z))
Z <- as.matrix(Z)
if (!is.matrix(Z))
stop("'Z' should be a matrix or data.frame.")
if (!is.null(X)) {
if (is.data.frame(X))
X <- as.matrix(X)
if (!is.numeric(X))
stop("'X' must be numeric.")
if (is.vector(X))
X <- as.matrix(X)
if (!is.matrix(X))
stop("'X' must be a data.frame or matrix.")
}
if (!is.numeric(Ystar) || !is.vector(Ystar))
stop("'Ystar' must be a numeric vector.")
if (length(setdiff(0:1, unique(Ystar))) != 0)
stop("'Ystar' must be coded 0/1.")
n <- length(Ystar)
if (nrow(Z) != n)
stop("The number of rows of 'Z' must match the length of 'Ystar'.")
if (!is.null(X) && nrow(X) != n)
stop("The number of rows of 'X' must match the length of 'Ystar'.")
if (!is.logical(expected) || length(expected) > 1)
stop("'expected' must be a length one logical.")
# initialise p for EM
theta <- start[1:(1 + ncol(Z))]
beta <- start[-(1:(1 + ncol(Z)))]
pred1 <- expit(cbind(1, Z) %*% theta)
pred2 <- expit(cbind(1, X) %*% beta)
calculate.p <- function(pred1, pred2)
{
(Ystar) * (pred1 / (pred1 + (pred2 * (1 - pred1))))
}
p <- calculate.p(pred1, pred2)
it <- 1
converged <- F
w <- 1
param.seq <- matrix(c(theta, beta), 1)
loglik.seq <- -10 ^ 9
while (!converged && it < max_em_iterations) {
if (is.null(beta0_fixed)) {
suppressWarnings({
fit.beta <- stats::glm(Ystar ~ X, weights = (1 - p) * w,
family = stats::binomial())
})
} else {
suppressWarnings({
fit.beta <- stats::glm(Ystar ~ 0 + X, weights = (1 - p) * w,
offset = rep(beta0_fixed, length(p)),
family = stats::binomial())
})
}
suppressWarnings({
fit.theta <- stats::glm(p ~ Z, family = stats::binomial(),
weights = weights)
})
pred1 <- stats::predict(fit.theta, type = 'response')
pred2 <- stats::predict(fit.beta, type = 'response')
p <- calculate.p(pred1, pred2)
loglik <- sum(w * Ystar * log(pred1 + (pred2 * (1 - pred1))) +
w * (1 - Ystar) * log((1 - pred1) * (1 - pred2)))
loglik.seq <- c(loglik.seq, loglik)
it <- it + 1
if (abs(loglik.seq[it] - loglik.seq[it - 1]) < tolerance)
converged <- TRUE
par <- c(stats::coef(fit.theta), beta0_fixed, stats::coef(fit.beta))
param.seq <- rbind(param.seq, par)
}
param <- c(stats::coef(fit.theta), beta0_fixed, stats::coef(fit.beta))
theta <- param[1:(ncol(Z) + 1)]
beta <- param[-(1:(ncol(Z) + 1))]
if (is.null(weights)) {
var <- obsloglik_var(Ystar, Z, X, theta, beta, beta0_fixed, expected)
} else {
var <- obsloglik_var_weighted(Ystar, Z, X, theta, beta, beta0_fixed,
weights, expected)
}
results <- list(param = param, variance = var, param.seq = param.seq,
loglik.seq = loglik.seq, Ystar = Ystar, X = X, Z = Z,
weights = weights, beta0_fixed = beta0_fixed)
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/perfect_sensitivity_EM.R
|
#' Compute Probability of Each True Outcome, for Every Subject
#'
#' @param beta A numeric column matrix of regression parameters for the
#' \code{Y} (true outcome) ~ \code{X} (predictor matrix of interest).
#' @param X A numeric design matrix.
#' @param n An integer value specifying the number of observations in the sample.
#' This value should be equal to the number of rows of the design matrix, \code{X}.
#' @param n_cat The number of categorical values that the true outcome, \code{Y},
#' can take.
#'
#' @return \code{pi_compute} returns a matrix of probabilities,
#' \eqn{P(Y_i = j | X_i) = \frac{\exp(X_i \beta)}{1 + \exp(X_i \beta)}}
#' for each of the \eqn{i = 1, \dots,} \code{n} subjects. Rows of the matrix
#' correspond to each subject. Columns of the matrix correspond to the true outcome
#' categories \eqn{j = 1, \dots,} \code{n_cat}.
#'
#' @include sum_every_n1.R
#'
#' @importFrom stats rnorm
#'
pi_compute <- function(beta, X, n, n_cat){
exp_xb = exp(X %*% beta)
pi_result = exp_xb[,1] / rep(sum_every_n1(exp_xb[,1], n), n_cat - 1)
pi_matrix = matrix(c(pi_result, 1 - pi_result),
ncol = n_cat, nrow = n,
byrow = FALSE)
return(pi_matrix)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/pi_compute.R
|
#' Compute the Mean Conditional Probability of Correct Classification, by True Outcome Across all Subjects for each MCMC Chain
#'
#' @param n_chains An integer specifying the number of MCMC chains to compute over.
#' @param chains_list A numeric list containing the samples from \code{n_chains}
#' MCMC chains.
#' @param Z A numeric design matrix.
#' @param n An integer value specifying the number of observations in the sample.
#' This value should be equal to the number of rows of the design matrix, \code{Z}.
#' @param n_cat The number of categorical values that the true outcome, \code{Y},
#' and the observed outcome, \code{Y*} can take.
#'
#' @return \code{pistar_by_chain} returns a numeric matrix of the average
#' conditional probability \eqn{P(Y^* = j | Y = j, Z)} across all subjects for
#' each MCMC chain. Rows of the matrix correspond to MCMC chains, up to \code{n_chains}.
#' The first column contains the conditional probability \eqn{P(Y^* = 1 | Y = 1, Z)}.
#' The second column contains the conditional probability \eqn{P(Y^* = 2 | Y = 2, Z)}.
#'
#' @include pistar_compute_for_chains.R
#' @include mean_pistarjj_compute.R
#'
#' @importFrom stats rnorm
#'
pistar_by_chain <- function(n_chains, chains_list, Z, n, n_cat){
colmeans_by_chain = lapply(chains_list, colMeans)
pistar_results = matrix(NA, nrow = n_chains, ncol = n_cat)
for(i in 1:n_chains){
chain_means_i = colmeans_by_chain[[i]]
pistar_matrix_i = pistar_compute_for_chains(chain_means_i, Z, n, n_cat)
pistar_results[i, 1] = mean_pistarjj_compute(pistar_matrix_i, 1, n)
pistar_results[i, 2] = mean_pistarjj_compute(pistar_matrix_i, 2, n)
}
return(pistar_results)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/pistar_by_chain.R
|
#' Compute Conditional Probability of Each Observed Outcome Given Each True Outcome, for Every Subject
#'
#' @param gamma A numeric matrix of regression parameters for the observed
#' outcome mechanism, \code{Y* | Y}
#' (observed outcome, given the true outcome) ~ \code{Z} (misclassification
#' predictor matrix). Rows of the matrix correspond to parameters for the \code{Y* = 1}
#' observed outcome, with the dimensions of \code{Z}.
#' Columns of the matrix correspond to the true outcome categories
#' \eqn{j = 1, \dots,} \code{n_cat}.
#' @param Z A numeric design matrix.
#' @param n An integer value specifying the number of observations in the sample.
#' This value should be equal to the number of rows of the design matrix, \code{Z}.
#' @param n_cat The number of categorical values that the true outcome, \code{Y},
#' and the observed outcome, \code{Y*} can take.
#'
#' @return \code{pistar_compute} returns a matrix of conditional probabilities,
#' \eqn{P(Y_i^* = k | Y_i = j, Z_i) = \frac{\text{exp}\{\gamma_{kj0} + \gamma_{kjZ} Z_i\}}{1 + \text{exp}\{\gamma_{kj0} + \gamma_{kjZ} Z_i\}}}
#' for each of the \eqn{i = 1, \dots,} \code{n} subjects. Rows of the matrix
#' correspond to each subject and observed outcome. Specifically, the probability
#' for subject \eqn{i} and observed category $1$ occurs at row \eqn{i}. The probability
#' for subject \eqn{i} and observed category $2$ occurs at row \eqn{i +} \code{n}.
#' Columns of the matrix correspond to the true outcome categories \eqn{j = 1, \dots,} \code{n_cat}.
#'
#' @include sum_every_n.R
#' @include sum_every_n1.R
#'
#' @importFrom stats rnorm
#'
pistar_compute <- function(gamma, Z, n, n_cat){
exp_zg = exp(Z %*% gamma)
pi_denominator = apply(exp_zg, FUN = sum_every_n1, n, MARGIN = 2)
pi_result = exp_zg / rbind(pi_denominator)
pistar_matrix = rbind(pi_result,
1 - apply(pi_result,
FUN = sum_every_n, n = n,
MARGIN = 2))
return(pistar_matrix)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/pistar_compute.R
|
#' Compute Conditional Probability of Each Observed Outcome Given Each True Outcome for a given MCMC Chain, for Every Subject
#'
#' @param chain_colMeans A numeric vector containing the posterior means for all
#' sampled parameters in a given MCMC chain. \code{chain_colMeans} must be a named
#' object (i.e. each parameter must be named as \code{gamma[k,j,p]}).
#' @param Z A numeric design matrix.
#' @param n An integer value specifying the number of observations in the sample.
#' This value should be equal to the number of rows of the design matrix, \code{Z}.
#' @param n_cat The number of categorical values that the true outcome, \code{Y},
#' and the observed outcome, \code{Y*} can take.
#'
#' @return \code{pistar_compute_for_chains} returns a matrix of conditional probabilities,
#' \eqn{P(Y_i^* = k | Y_i = j, Z_i) = \frac{\text{exp}\{\gamma_{kj0} + \gamma_{kjZ} Z_i\}}{1 + \text{exp}\{\gamma_{kj0} + \gamma_{kjZ} Z_i\}}}
#' for each of the \eqn{i = 1, \dots,} \code{n} subjects. Rows of the matrix
#' correspond to each subject and observed outcome. Specifically, the probability
#' for subject \eqn{i} and observed category $0$ occurs at row \eqn{i}. The probability
#' for subject \eqn{i} and observed category $1$ occurs at row \eqn{i +} \code{n}.
#' Columns of the matrix correspond to the true outcome categories \eqn{j = 1, \dots,} \code{n_cat}.
#'
#' @include sum_every_n.R
#'
#' @importFrom stats rnorm
#'
pistar_compute_for_chains <- function(chain_colMeans, Z, n, n_cat){
dim_z = ncol(Z)
gamma_names <- paste0("gamma[1,", rep(1:n_cat, dim_z), ",", rep(1:dim_z, each = n_cat), "]")
gamma = matrix(chain_colMeans[gamma_names],
ncol = 2, byrow = TRUE)
exp_zg_nobaseline = exp(Z %*% gamma)
exp_zg_baseline = matrix(1, nrow = n, ncol = n_cat)
exp_zg = rbind(exp_zg_nobaseline, exp_zg_baseline)
pi_denominator = apply(exp_zg, FUN = sum_every_n, n, MARGIN = 2)
pi_result = exp_zg / do.call("rbind", rep(list(pi_denominator),
n_cat))
return(pi_result)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/pistar_compute_for_chains.R
|
#' Compute the Mean Conditional Probability of Second-Stage Correct Classification, by First-Stage and True Outcome Across all Subjects for each MCMC Chain
#'
#' @param n_chains An integer specifying the number of MCMC chains to compute over.
#' @param chains_list A numeric list containing the samples from \code{n_chains}
#' MCMC chains.
#' @param V A numeric design matrix.
#' @param n An integer value specifying the number of observations in the sample.
#' This value should be equal to the number of rows of the design matrix, \code{V}.
#' @param n_cat The number of categorical values that the true outcome, \eqn{Y},
#' the first-stage observed outcome, \eqn{Y*}, and the second-stage
#' observed outcome, \eqn{\tilde{Y}}, can take.
#'
#' @return \code{pitilde_by_chain} returns a numeric matrix of the average
#' conditional probability \eqn{P( \tilde{Y} = j | Y^* = j, Y = j, V)} across all subjects for
#' each MCMC chain. Rows of the matrix correspond to MCMC chains, up to \code{n_chains}.
#' The first column contains the conditional probability \eqn{P( \tilde{Y} = 1 | Y^* = 1, Y = 1, V)}.
#' The second column contains the conditional probability \eqn{P( \tilde{Y} = 2 | Y^* = 2, Y = 2, V)}.
#'
#' @include pistar_compute_for_chains.R
#' @include mean_pistarjj_compute.R
#'
#' @importFrom stats rnorm
#'
pitilde_by_chain <- function(n_chains, chains_list, V, n, n_cat){
colmeans_by_chain = lapply(chains_list, colMeans)
pitilde_results = matrix(NA, nrow = n_chains, ncol = n_cat)
for(i in 1:n_chains){
chain_means_i = colmeans_by_chain[[i]]
pitilde_array_i = pitilde_compute_for_chains(chain_means_i, V, n, n_cat)
pitilde_results[i, 1] = mean(pitilde_array_i[1:n, 1, 1])
pitilde_results[i, 2] = mean(pitilde_array_i[(n + 1):(2 * n), 2, 2])
}
return(pitilde_results)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/pitilde_by_chain.R
|
#' Compute Conditional Probability of Each Second-Stage Observed Outcome Given Each True Outcome and First-Stage Observed Outcome, for Every Subject
#'
#' @param delta A numeric array of regression parameters for the second-stage observed
#' outcome mechanism, \eqn{\tilde{Y} | Y^*, Y}
#' (second-stage observed outcome, given the first-stage observed outcome and the true outcome) ~ \code{V} (misclassification
#' predictor matrix). Rows of the matrix correspond to parameters for the \eqn{\tilde{Y} = 1}
#' observed outcome, with the dimensions of \code{V}.
#' Columns of the matrix correspond to the first-stage observed outcome categories
#' \eqn{k = 1, \dots,} \code{n_cat}. The third dimension of the array
#' corresponds to the true outcome categories \eqn{j = 1, \dots,} \code{n_cat}
#' @param V A numeric design matrix.
#' @param n An integer value specifying the number of observations in the sample.
#' This value should be equal to the number of rows of the design matrix, \code{V}.
#' @param n_cat The number of categorical values that the true outcome, \code{Y},
#' and the observed outcomes can take.
#'
#' @return \code{pitilde_compute} returns an array of conditional probabilities,
#' \eqn{P(\tilde{Y}_i = \ell | Y^*_i = k, Y_i = j, V_i) = \frac{\text{exp}\{\delta_{\ell kj0} + \delta_{\ell kjV} V_i\}}{1 + \text{exp}\{\delta_{\ell kj0} + \delta_{\ell kjV} V_i\}}}
#' for each of the \eqn{i = 1, \dots,} \code{n} subjects. Rows of the matrix
#' correspond to each subject and second-stage observed outcome. Specifically, the probability
#' for subject \eqn{i} and observed category $1$ occurs at row \eqn{i}. The probability
#' for subject \eqn{i} and observed category $2$ occurs at row \eqn{i +} \code{n}.
#' Columns of the matrix correspond to the first-stage outcome categories, \eqn{k = 1, \dots,} \code{n_cat}.
#' The third dimension of the array corresponds to the true outcome categories,
#' \eqn{j = 1, \dots,} \code{n_cat}.
#'
#' @include sum_every_n.R
#' @include sum_every_n1.R
#'
#' @importFrom stats rnorm
#'
pitilde_compute <- function(delta, V, n, n_cat){
exp_vd1 = exp(V %*% delta[,,1])
exp_vd2 = exp(V %*% delta[,,2])
pi_denominator1 = apply(exp_vd1, FUN = sum_every_n1, n, MARGIN = 2)
pi_result1 = exp_vd1 / rbind(pi_denominator1)
pi_denominator2 = apply(exp_vd2, FUN = sum_every_n1, n, MARGIN = 2)
pi_result2 = exp_vd2 / rbind(pi_denominator2)
pitilde_matrix1 = rbind(pi_result1,
1 - apply(pi_result1,
FUN = sum_every_n, n = n,
MARGIN = 2))
pitilde_matrix2 = rbind(pi_result2,
1 - apply(pi_result2,
FUN = sum_every_n, n = n,
MARGIN = 2))
pitilde_array = array(c(pitilde_matrix1, pitilde_matrix2),
dim = c(dim(pitilde_matrix1), 2))
return(pitilde_array)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/pitilde_compute.R
|
#' Compute Conditional Probability of Each Observed Outcome Given Each True Outcome for a given MCMC Chain, for Every Subject
#'
#' @param chain_colMeans A numeric vector containing the posterior means for all
#' sampled parameters in a given MCMC chain. \code{chain_colMeans} must be a named
#' object (i.e. each parameter must be named as \code{delta[l,k,j,p]}).
#' @param V A numeric design matrix.
#' @param n An integer value specifying the number of observations in the sample.
#' This value should be equal to the number of rows of the design matrix, \code{V}.
#' @param n_cat The number of categorical values that the true outcome, \eqn{Y},
#' the first-stage observed outcome, \eqn{Y^*}, and the second-stage
#' observed outcome, \eqn{\tilde{Y}},\ can take.
#'
#' @return \code{pitilde_compute_for_chains} returns a matrix of conditional probabilities,
#' \eqn{P(\tilde{Y}_i = \ell | Y^*_i = k, Y_i = j, V_i) = \frac{\text{exp}\{\delta_{\ell kj0} + \delta_{\ell kjV} V_i\}}{1 + \text{exp}\{\delta_{\ell kj0} + \delta_{\ell kjV} V_i\}}}
#' corresponding to each subject and observed outcome. Specifically, the probability
#' for subject \eqn{i} and second-stage observed category $1$ occurs at row \eqn{i}. The probability
#' for subject \eqn{i} and second-stage observed category $2$ occurs at row \eqn{i +} \code{n}.
#' Columns of the matrix correspond to the first-stage outcome categories \eqn{j = 1, \dots,} \code{n_cat}.
#' The third dimension of the array corresponds to the true outcome categories,
#' \eqn{j = 1, \dots,} \code{n_cat}.
#'
#' @include sum_every_n.R
#'
#' @importFrom stats rnorm
#'
pitilde_compute_for_chains <- function(chain_colMeans, V, n, n_cat){
dim_v = ncol(V)
delta_names <- paste0("delta[1,",
rep(1:n_cat, dim_v*dim_v), ",",
rep(rep(1:n_cat, each = dim_v), dim_v), ",",
rep(1:dim_v, each = n_cat * n_cat), "]")
delta_j1_names <- which(substring(delta_names, 11, 11) == 1)
delta_j2_names <- which(substring(delta_names, 11, 11) == 2)
delta_j1 <- matrix(chain_colMeans[delta_names[delta_j1_names]],
ncol = 2, byrow = TRUE)
delta_j2 <- matrix(chain_colMeans[delta_names[delta_j2_names]],
ncol = 2, byrow = TRUE)
delta <- array(c(delta_j1, delta_j2), dim = c(dim_v, n_cat, n_cat))
exp_vd1 = exp(V %*% delta[,,1])
exp_vd2 = exp(V %*% delta[,,2])
pi_denominator1 = apply(exp_vd1, FUN = sum_every_n1, n, MARGIN = 2)
pi_result1 = exp_vd1 / rbind(pi_denominator1)
pi_denominator2 = apply(exp_vd2, FUN = sum_every_n1, n, MARGIN = 2)
pi_result2 = exp_vd2 / rbind(pi_denominator2)
pitilde_matrix1 = rbind(pi_result1,
1 - apply(pi_result1,
FUN = sum_every_n, n = n,
MARGIN = 2))
pitilde_matrix2 = rbind(pi_result2,
1 - apply(pi_result2,
FUN = sum_every_n, n = n,
MARGIN = 2))
pitilde_array = array(c(pitilde_matrix1, pitilde_matrix2),
dim = c(dim(pitilde_matrix1), 2))
return(pitilde_array)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/pitilde_compute_for_chains.R
|
#' M-Step Expected Log-Likelihood with respect to Beta
#'
#' Objective function of the form:
#' \eqn{ Q_\beta = \sum_{i = 1}^N \Bigl[ \sum_{j = 0}^1 w_{ij} \text{log} \{ \pi_{ij} \}\Bigr]}.
#' Used to obtain estimates of \eqn{\beta} parameters.
#'
#' @param beta A numeric vector of regression parameters for the
#' \code{Y} (true outcome) ~ \code{X} (predictor matrix of interest).
#' @param X A numeric design matrix.
#' @param w_mat Matrix of E-step weights obtained from \code{w_j}.
#' @param sample_size An integer value specifying the number of observations in the sample.
#' This value should be equal to the number of rows of the design matrix, \code{X}.
#' @param n_cat The number of categorical values that the true outcome, \code{Y},
#' can take.
#'
#' @return \code{q_beta_f} returns the negative value of the expected log-likelihood function,
#' \eqn{ Q_\beta = \sum_{i = 1}^N \Bigl[ \sum_{j = 1}^2 w_{ij} \text{log} \{ \pi_{ij} \}\Bigr]},
#' at the provided inputs.
#'
#' @include pi_compute.R
#' @include pistar_compute.R
#' @include w_j.R
#'
#' @importFrom stats rnorm rgamma rmultinom optim
#'
q_beta_f <- function(beta, X, w_mat,
sample_size, n_cat){
beta_mat = matrix(beta, ncol = 1)
pi_terms = pi_compute(beta_mat, X, n = sample_size, n_cat)
pi_terms = ifelse(pi_terms == 0, pi_terms + 0.00001, pi_terms)
w_pi = c(w_mat) * log(c(pi_terms))
result = -sum(sum_every_n(w_pi, n = sample_size))
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/q_beta_f.R
|
#' M-Step Expected Log-Likelihood with respect to Delta
#'
#' Objective function of the form:
#' \eqn{Q_{\delta} = \sum_{i = 1}^N \Bigl[\sum_{j = 1}^2 \sum_{k = 1}^2 \sum_{\ell = 1}^2 w_{ij} y^*_{ik} \tilde{y}_{i \ell} \text{log} \{ \tilde{\pi}_{i \ell kj} \}\Bigr]}.
#' Used to obtain estimates of \eqn{\delta} parameters.
#'
#' @param delta_v A numeric array of regression parameters for the second-stage observed
#' outcome mechanism, \eqn{\tilde{Y} | Y^*, Y}
#' (second-stage observed outcome, given the first-stage observed outcome and the true outcome) ~ \code{V} (misclassification
#' predictor matrix). The \eqn{\delta} vector is obtained from the array form. In array form,
#' the first dimension (matrix rows) of \code{delta}
#' corresponds to parameters for the \eqn{\tilde{Y} = 1}
#' second-stage observed outcome, with the dimensions of the \code{V}
#' The second dimension (matrix columns) correspond to the first-stage
#' observed outcome categories \eqn{Y^* \in \{1, 2\}}. The third dimension of
#' \code{delta_start} corresponds to to the true outcome categories
#' \eqn{Y \in \{1, 2\}}. The numeric vector \eqn{\delta} is obtained by
#' concatenating the delta array, i.e. \code{delta_v <- c(delta_array)}.
#' @param V A numeric design matrix.
#' @param obs_Ystar_matrix A numeric matrix of indicator variables (0, 1) for the observed
#' outcome \code{Y*}. Rows of the matrix correspond to each subject. Columns of
#' the matrix correspond to each observed outcome category. Each row should contain
#' exactly one 0 entry and exactly one 1 entry.
#' @param obs_Ytilde_matrix A numeric matrix of indicator variables (0, 1) for the observed
#' outcome \eqn{\tilde{Y}}. Rows of the matrix correspond to each subject. Columns of
#' the matrix correspond to each observed outcome category. Each row should contain
#' exactly one 0 entry and exactly one 1 entry.
#' @param w_mat Matrix of E-step weights obtained from \code{w_j_2stage}.
#' @param sample_size An integer value specifying the number of observations in the sample.
#' This value should be equal to the number of rows of the design matrix, \code{V}.
#' @param n_cat The number of categorical values that the true outcome, \code{Y},
#' and the observed outcomes can take.
#'
#' @return \code{q_beta_f} returns the negative value of the expected log-likelihood function,
#' \eqn{Q_{\delta} = \sum_{i = 1}^N \Bigl[\sum_{j = 1}^2 \sum_{k = 1}^2 \sum_{\ell = 1}^2 w_{ij} y^*_{ik} \tilde{y}_{i \ell} \text{log} \{ \tilde{\pi}_{i \ell kj} \}\Bigr]},
#' at the provided inputs.
#'
#' @include pi_compute.R
#' @include pistar_compute.R
#' @include pitilde_compute.R
#' @include w_j_2stage.R
#'
#' @importFrom stats rnorm rgamma rmultinom optim
#'
q_delta_f <- function(delta_v, V, obs_Ystar_matrix, obs_Ytilde_matrix, w_mat,
sample_size, n_cat){
delta_mat = array(delta_v, dim = c(ncol(V), 2, 2))
pitilde_terms_array = pitilde_compute(delta_mat, V, sample_size, n_cat)
big_Ystar_matrix = array(c(rep(obs_Ystar_matrix[,1], 2),
rep(obs_Ystar_matrix[,2], 2),
rep(obs_Ystar_matrix[,1], 2),
rep(obs_Ystar_matrix[,2], 2)),
dim = c(2 * sample_size, 2, 2))
big_Ytilde_matrix = array(rep(c(obs_Ytilde_matrix[,1],
obs_Ytilde_matrix[,2]), 4),
dim = c(2 * sample_size, 2, 2))
big_w_matrix = array(c(rep(w_mat[,1], 4),
rep(w_mat[,2], 4)),
dim = c(2 * sample_size, 2, 2))
summand = big_Ystar_matrix * big_Ytilde_matrix * big_w_matrix * log(pitilde_terms_array)
result = -sum(summand)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/q_delta_f.R
|
#' M-Step Expected Log-Likelihood with respect to Gamma
#'
#' Objective function of the form:
#' \eqn{Q_{\gamma} = \sum_{i = 1}^N \Bigl[\sum_{j = 1}^2 \sum_{k = 1}^2 w_{ij} y^*_{ik} \text{log} \{ \pi^*_{ikj} \}\Bigr]}.
#' Used to obtain estimates of \eqn{\gamma} parameters.
#'
#' @param gamma_v A numeric vector of regression parameters for the observed
#' outcome mechanism, \code{Y* | Y}
#' (observed outcome, given the true outcome) ~ \code{Z} (misclassification
#' predictor matrix). In matrix form, the gamma parameter matrix rows
#' correspond to parameters for the \code{Y* = 0}
#' observed outcome, with the dimensions of \code{Z}.
#' In matrix form, the gamma parameter matrix columns correspond to the true outcome categories
#' \eqn{j = 1, \dots,} \code{n_cat}. The numeric vector \code{gamma_v} is
#' obtained by concatenating the gamma matrix, i.e. \code{gamma_v <- c(gamma_matrix)}.
#' @param Z A numeric design matrix.
#' @param obs_Y_matrix A numeric matrix of indicator variables (0, 1) for the observed
#' outcome \code{Y*}. Rows of the matrix correspond to each subject. Columns of
#' the matrix correspond to each observed outcome category. Each row should contain
#' exactly one 0 entry and exactly one 1 entry.
#' @param w_mat Matrix of E-step weights obtained from \code{w_j}.
#' @param sample_size An integer value specifying the number of observations in the sample.
#' This value should be equal to the number of rows of the design matrix, \code{Z}.
#' @param n_cat The number of categorical values that the true outcome, \code{Y},
#' and the observed outcome, \code{Y*} can take.
#'
#' @return \code{q_beta_f} returns the negative value of the expected log-likelihood function,
#' \eqn{Q_{\gamma} = \sum_{i = 1}^N \Bigl[\sum_{j = 1}^2 \sum_{k = 1}^2 w_{ij} y^*_{ik} \text{log} \{ \pi^*_{ikj} \}\Bigr]},
#' at the provided inputs.
#'
#' @include pi_compute.R
#' @include pistar_compute.R
#' @include w_j.R
#'
#' @importFrom stats rnorm rgamma rmultinom optim
#'
q_gamma_f <- function(gamma_v, Z, obs_Y_matrix, w_mat,
sample_size, n_cat){
gamma_mat = matrix(gamma_v, ncol = n_cat, byrow = FALSE)
pistar_terms_v = pistar_compute(gamma_mat, Z, sample_size, n_cat)
big_Ystar_matrix = matrix(rep(c(obs_Y_matrix), n_cat),
ncol = n_cat, byrow = FALSE)
big_w_matrix = do.call(rbind, replicate(n_cat, w_mat, simplify = FALSE))
summand = big_Ystar_matrix * big_w_matrix * log(pistar_terms_v)
result = -sum(summand)
return(result)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/q_gamma_f.R
|
#' Sum Every "n"th Element
#'
#' @param x A numeric vector to sum over
#' @param n A numeric value specifying the distance between the reference index and the next index to be summed
#'
#' @return \code{sum_every_n} returns a vector of sums of every \code{n}^{th} element of the vector \code{x}.
#'
sum_every_n <- function(x, n){
vector_groups = split(x,
ceiling(seq_along(x) / n))
sum_x = Reduce(`+`, vector_groups)
return(sum_x)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/sum_every_n.R
|
#' Sum Every "n"th Element, then add 1
#'
#' @param x A numeric vector to sum over
#' @param n A numeric value specifying the distance between the reference index and the next index to be summed
#'
#' @return \code{sum_every_n1} returns a vector of sums of every \code{n}^{th} element of the vector \code{x}, plus 1.
#'
sum_every_n1 <- function(x, n){
vector_groups = split(x,
ceiling(seq_along(x) / n))
sum_x = Reduce(`+`, vector_groups) + 1
return(sum_x)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/sum_every_n1.R
|
#' Compute Probability of Each True Outcome, for Every Subject
#'
#' Compute the probability of the latent true outcome \eqn{Y \in \{1, 2 \}} as
#' \eqn{P(Y_i = j | X_i) = \frac{\exp(X_i \beta)}{1 + \exp(X_i \beta)}}
#' for each of the \eqn{i = 1, \dots,} \code{n} subjects.
#'
#' @param beta_matrix A numeric column matrix of estimated regression parameters for the
#' true outcome mechanism, \code{Y} (true outcome) ~ \code{X} (predictor matrix of interest),
#' obtained from \code{COMBO_EM} or \code{COMBO_MCMC}.
#' @param x_matrix A numeric matrix of covariates in the true outcome mechanism.
#' \code{x_matrix} should not contain an intercept.
#'
#' @return \code{true_classification_prob} returns a dataframe containing three columns.
#' The first column, \code{Subject}, represents the subject ID, from \eqn{1} to \code{n},
#' where \code{n} is the sample size, or equivalently, the number of rows in \code{x_matrix}.
#' The second column, \code{Y}, represents a true, latent outcome category \eqn{Y \in \{1, 2 \}}.
#' The last column, \code{Probability}, is the value of the equation
#' \eqn{P(Y_i = j | X_i) = \frac{\exp(X_i \beta)}{1 + \exp(X_i \beta)}} computed
#' for each subject and true, latent outcome category.
#'
#' @include pi_compute.R
#'
#' @importFrom stats rnorm
#'
#' @export
#'
#' @examples
#' set.seed(123)
#' sample_size <- 1000
#' cov1 <- rnorm(sample_size)
#' cov2 <- rnorm(sample_size, 1, 2)
#' x_matrix <- matrix(c(cov1, cov2), nrow = sample_size, byrow = FALSE)
#' estimated_betas <- matrix(c(1, -1, .5), ncol = 1)
#' P_Y <- true_classification_prob(estimated_betas, x_matrix)
#' head(P_Y)
true_classification_prob <- function(beta_matrix,
x_matrix){
n_cat = 2
sample_size = nrow(x_matrix)
if (!is.null(x_matrix)) {
if (is.data.frame(x_matrix))
x_matrix <- as.matrix(x_matrix)
if (!is.numeric(x_matrix))
stop("'x_matrix' must be numeric.")
if (is.vector(x_matrix))
x_matrix <- as.matrix(x_matrix)
if (!is.matrix(x_matrix))
stop("'x_matrix' must be a data.frame or matrix.")
}
X = matrix(c(rep(1, sample_size), c(x_matrix)),
byrow = FALSE, nrow = sample_size)
subject = rep(1:sample_size, n_cat)
Y_categories = rep(1:n_cat, each = sample_size)
pi_matrix = pi_compute(beta_matrix, X, sample_size, n_cat)
pi_df = data.frame(Subject = subject,
Y = Y_categories,
Probability = c(pi_matrix))
return(pi_df)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/true_classification_prob.R
|
#' Compute E-step for Binary Outcome Misclassification Model Estimated With the EM-Algorithm
#'
#' @param ystar_matrix A numeric matrix of indicator variables (0, 1) for the observed
#' outcome \code{Y*}. Rows of the matrix correspond to each subject. Columns of
#' the matrix correspond to each observed outcome category. Each row should contain
#' exactly one 0 entry and exactly one 1 entry.
#' @param pistar_matrix A numeric matrix of conditional probabilities obtained from
#' the internal function \code{pistar_compute}. Rows of the matrix correspond to
#' each subject and to each observed outcome category. Columns of the matrix
#' correspond to each true, latent outcome category.
#' @param pi_matrix A numeric matrix of probabilities obtained from the internal
#' function \code{pi_compute}. Rows of the matrix correspond to each subject.
#' Columns of the matrix correspond to each true, latent outcome category.
#' @param sample_size An integer value specifying the number of observations in
#' the sample. This value should be equal to the number of rows of the observed
#' outcome matrix, \code{ystar_matrix}.
#' @param n_cat The number of categorical values that the true outcome, \code{Y},
#' and the observed outcome, \code{Y*}, can take.
#'
#' @return \code{w_j} returns a matrix of E-step weights for the EM-algorithm,
#' computed as follows:
#' \eqn{\sum_{k = 1}^2 \frac{y^*_{ik} \pi^*_{ikj} \pi_{ij}}{\sum_{\ell = 1}^2 \pi^*_{i k \ell} \pi_{i \ell}}}.
#' Rows of the matrix correspond to each subject. Columns of the matrix correspond
#' to the true outcome categories \eqn{j = 1, \dots,} \code{n_cat}.
#'
#' @include pi_compute.R
#' @include pistar_compute.R
#'
#' @importFrom stats rnorm rgamma rmultinom
#'
w_j <- function(ystar_matrix, pistar_matrix, pi_matrix, sample_size, n_cat){
pi_ij_1_allj_repped = do.call(rbind,
list(pi_matrix, pi_matrix))
pistar_pi_1 = pistar_matrix * pi_ij_1_allj_repped
suml_pistar_pi_1 = rowSums(pistar_pi_1)
suml_pistar_pi_denominator_1 <- matrix(rep(suml_pistar_pi_1, n_cat),
nrow = n_cat * sample_size,
byrow = FALSE)
obs_Y_matrix_repped_1 <- matrix(rep(c(ystar_matrix), each = n_cat),
nrow = n_cat * sample_size, byrow = TRUE)
weight_not_summed_1 <- obs_Y_matrix_repped_1 * (pistar_pi_1 / ifelse(suml_pistar_pi_denominator_1 == 0, .00000001, suml_pistar_pi_denominator_1))
weight_1 <- matrix(NA, nrow = sample_size, ncol = n_cat)
for(i in 1:sample_size){
for(j in 1:n_cat){
k_set = c(i, sample_size + i)
sum_terms = weight_not_summed_1[c(k_set), j]
weight_1[i, j] = sum(sum_terms)
}
}
return(weight_1)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/w_j.R
|
#' Compute E-step for Two-Stage Binary Outcome Misclassification Model Estimated With the EM-Algorithm
#'
#' @param ystar_matrix A numeric matrix of indicator variables (0, 1) for the observed
#' outcome \code{Y*}. Rows of the matrix correspond to each subject. Columns of
#' the matrix correspond to each observed outcome category. Each row should contain
#' exactly one 0 entry and exactly one 1 entry.
#' @param ytilde_matrix A numeric matrix of indicator variables (0, 1) for the observed
#' outcome \eqn{\tilde{Y}}. Rows of the matrix correspond to each subject. Columns of
#' the matrix correspond to each observed outcome category. Each row should contain
#' exactly one 0 entry and exactly one 1 entry.
#' @param pitilde_array A numeric array of conditional probabilities obtained from
#' the internal function \code{pitilde_compute}. Rows of the matrices correspond
#' to each subject and to each second-stage observed outcome category. Columns of the matrix correspond
#' to each first-stage observed outcome category. The third dimension of the array
#' corresponds to each true, latent outcome category.
#' @param pistar_matrix A numeric matrix of conditional probabilities obtained from
#' the internal function \code{pistar_compute}. Rows of the matrix correspond to
#' each subject and to each first-stage observed outcome category. Columns of the matrix
#' correspond to each true, latent outcome category.
#' @param pi_matrix A numeric matrix of probabilities obtained from the internal
#' function \code{pi_compute}. Rows of the matrix correspond to each subject.
#' Columns of the matrix correspond to each true, latent outcome category.
#' @param sample_size An integer value specifying the number of observations in
#' the sample. This value should be equal to the number of rows of the observed
#' outcome matrices, \code{ystar_matrix} and \code{ytilde_matrix}.
#' @param n_cat The number of categorical values that the true outcome, \code{Y},
#' and the observed outcomes can take.
#'
#' @return \code{w_j} returns a matrix of E-step weights for the EM-algorithm,
#' computed as follows:
#' \eqn{\sum_{k = 1}^2 \sum_{\ell = 1}^2 \frac{y^*_{ik} \tilde{y}_{i \ell} \tilde{\pi}_{i \ell kj} \pi^*_{ikj} \pi_{ij}}{\sum_{h = 1}^2 \tilde{\pi}_{i \ell kh} \pi^*_{ikh} \pi_{ih}}}.
#' Rows of the matrix correspond to each subject. Columns of the matrix correspond
#' to the true outcome categories \eqn{j = 1, \dots,} \code{n_cat}.
#'
#' @include pi_compute.R
#' @include pistar_compute.R
#' @include pitilde_compute.R
#'
#' @importFrom stats rnorm rgamma rmultinom
#'
w_j_2stage <- function(ystar_matrix, ytilde_matrix,
pitilde_array, pistar_matrix, pi_matrix,
sample_size, n_cat){
big_Ystar_matrix = array(c(rep(ystar_matrix[,1], 2),
rep(ystar_matrix[,2], 2),
rep(ystar_matrix[,1], 2),
rep(ystar_matrix[,2], 2)),
dim = c(2 * sample_size, 2, 2))
big_Ytilde_matrix = array(rep(c(ytilde_matrix[,1],
ytilde_matrix[,2]), 4),
dim = c(2 * sample_size, 2, 2))
big_pi_array = array(c(rep(pi_matrix[,1], 4),
rep(pi_matrix[,2], 4)),
dim = c(2 * sample_size, 2, 2))
big_pistar_array = array(c(rep(pistar_matrix[1:sample_size, 1], 2),
rep(pistar_matrix[(sample_size + 1):(2 * sample_size), 1], 2),
rep(pistar_matrix[1:sample_size, 2], 2),
rep(pistar_matrix[(sample_size + 1):(2 * sample_size), 2], 2)),
dim = c(2 * sample_size, 2, 2))
pi_multiply <- pitilde_array * big_pistar_array * big_pi_array
weight_denominator <- pi_multiply[,,1] + pi_multiply[,,2]
pi_y_multiply <- big_Ystar_matrix * big_Ytilde_matrix * pi_multiply
pi_y_before_sum <- pi_y_multiply / array(c(weight_denominator, weight_denominator),
dim = c(dim(weight_denominator), 2))
pi_y_suml <- suml_function(pi_y_before_sum)
pi_y_sumk <- apply(pi_y_suml, 3, rowSums)
weight_matrix <- pi_y_sumk
return(weight_matrix)
}
suml_function <- function(my_array){
my_n <- dim(my_array[,,1])[1] / 2
sum1 <- matrix(c(my_array[1:my_n, 1, 1] + my_array[(my_n + 1):(2 * my_n), 1, 1],
my_array[1:my_n, 2, 1] + my_array[(my_n + 1):(2 * my_n), 2, 1]),
nrow = my_n)
sum2 <- matrix(c(my_array[1:my_n, 1, 2] + my_array[(my_n + 1):(2 * my_n), 1, 2],
my_array[1:my_n, 2, 2] + my_array[(my_n + 1):(2 * my_n), 2, 2]),
nrow = my_n)
return_array <- array(c(sum1, sum2), dim = c(my_n, 2, 2))
return(return_array)
}
|
/scratch/gouwar.j/cran-all/cranData/COMBO/R/w_j_2stage.R
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = FALSE)
library(kableExtra)
## -----------------------------------------------------------------------------
Term <- c("$X$", "$Z$", "$Y$", "$y_{ij}$", "$Y^*$", "$y^*_{ik}$",
"True Outcome Mechanism", "Observation Mechanism",
"$\\pi_{ij}$", "$\\pi^*_{ikj}$", "$\\pi^*_{ik}$", "$\\pi^*_{jj}$",
"Sensitivity", "Specificity",
"$\\beta_X$", "$\\gamma_{11Z}$", "$\\gamma_{12Z}$")
Definition <- c("--", "--",
"$Y \\in \\{1, 2\\}$", "$\\mathbb{I}\\{Y_i = j\\}$",
"$Y^* \\in \\{1, 2\\}$", "$\\mathbb{I}\\{Y^*_i = k\\}$",
"$\\text{logit} \\{ P(Y = j | X ; \\beta) \\} = \\beta_{j0} + \\beta_{jX} X$",
"$\\text{logit}\\{ P(Y^* = k | Y = j, Z ; \\gamma) \\} = \\gamma_{kj0} + \\gamma_{kjZ} Z$",
"$P(Y_i = j | X ; \\beta) = \\frac{\\text{exp}\\{\\beta_{j0} + \\beta_{jX} X_i\\}}{1 + \\text{exp}\\{\\beta_{j0} + \\beta_{jX} X_i\\}}$",
"$P(Y^*_i = k | Y_i = j, Z ; \\gamma) = \\frac{\\text{exp}\\{\\gamma_{kj0} + \\gamma_{kjZ} Z_i\\}}{1 + \\text{exp}\\{\\gamma_{kj0} + \\gamma_{kjZ} Z_i\\}}$",
"$P(Y^*_i = k | Y_i, X, Z ; \\gamma) = \\sum_{j = 1}^2 \\pi^*_{ikj} \\pi_{ij}$",
"$P(Y^* = j | Y = j, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{ijj}$",
"$P(Y^* = 1 | Y = 1, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{i11}$",
"$P(Y^* = 2 | Y = 2, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{i22}$",
"--", "--", "--")
Description <- c("Predictor matrix for the true outcome.",
"Predictor matrix for the observed outcome, conditional on the true outcome.",
"True binary outcome. Reference category is 2.",
"Indicator for the true binary outcome.",
"Observed binary outcome. Reference category is 2.",
"Indicator for the observed binary outcome.",
"Relationship between $X$ and the true outcome, $Y$.",
"Relationship between $Z$ and the observed outcome, $Y^*$, given the true outcome $Y$.",
"Response probability for individual $i$'s true outcome category.",
"Response probability for individual $i$'s observed outcome category, conditional on the true outcome.",
"Response probability for individual $i$'s observed outcome cateogry.",
"Average probability of correct classification for category $j$.",
"True positive rate. Average probability of observing outcome $k = 1$, given the true outcome $j = 1$.",
"True negative rate. Average probability of observing outcome $k = 2$, given the true outcome $j = 2$.",
"Association parameter of interest in the true outcome mechanism.",
"Association parameter of interest in the observation mechanism, given $j=1$.",
"Association parameter of interest in the observation mechanism, given $j=2$.")
notation_table <- data.frame(Term, Definition, Description)
## -----------------------------------------------------------------------------
kableExtra::kbl(notation_table, escape = FALSE, booktabs = TRUE) %>%
kableExtra::kable_styling(latex_options = "HOLD_position")
|
/scratch/gouwar.j/cran-all/cranData/COMBO/inst/doc/COMBO_notation_guide.R
|
---
title: "COMBO Notation Guide"
author: "Kim Hochstedler"
output: rmarkdown::html_vignette
header-includes:
- \usepackage[fontsize=13pt]{scrextend}
- \usepackage{kableExtra}
- \usepackage{knitr}
vignette: >
%\VignetteIndexEntry{COMBO Notation Guide}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = FALSE)
library(kableExtra)
```
```{r}
Term <- c("$X$", "$Z$", "$Y$", "$y_{ij}$", "$Y^*$", "$y^*_{ik}$",
"True Outcome Mechanism", "Observation Mechanism",
"$\\pi_{ij}$", "$\\pi^*_{ikj}$", "$\\pi^*_{ik}$", "$\\pi^*_{jj}$",
"Sensitivity", "Specificity",
"$\\beta_X$", "$\\gamma_{11Z}$", "$\\gamma_{12Z}$")
Definition <- c("--", "--",
"$Y \\in \\{1, 2\\}$", "$\\mathbb{I}\\{Y_i = j\\}$",
"$Y^* \\in \\{1, 2\\}$", "$\\mathbb{I}\\{Y^*_i = k\\}$",
"$\\text{logit} \\{ P(Y = j | X ; \\beta) \\} = \\beta_{j0} + \\beta_{jX} X$",
"$\\text{logit}\\{ P(Y^* = k | Y = j, Z ; \\gamma) \\} = \\gamma_{kj0} + \\gamma_{kjZ} Z$",
"$P(Y_i = j | X ; \\beta) = \\frac{\\text{exp}\\{\\beta_{j0} + \\beta_{jX} X_i\\}}{1 + \\text{exp}\\{\\beta_{j0} + \\beta_{jX} X_i\\}}$",
"$P(Y^*_i = k | Y_i = j, Z ; \\gamma) = \\frac{\\text{exp}\\{\\gamma_{kj0} + \\gamma_{kjZ} Z_i\\}}{1 + \\text{exp}\\{\\gamma_{kj0} + \\gamma_{kjZ} Z_i\\}}$",
"$P(Y^*_i = k | Y_i, X, Z ; \\gamma) = \\sum_{j = 1}^2 \\pi^*_{ikj} \\pi_{ij}$",
"$P(Y^* = j | Y = j, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{ijj}$",
"$P(Y^* = 1 | Y = 1, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{i11}$",
"$P(Y^* = 2 | Y = 2, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{i22}$",
"--", "--", "--")
Description <- c("Predictor matrix for the true outcome.",
"Predictor matrix for the observed outcome, conditional on the true outcome.",
"True binary outcome. Reference category is 2.",
"Indicator for the true binary outcome.",
"Observed binary outcome. Reference category is 2.",
"Indicator for the observed binary outcome.",
"Relationship between $X$ and the true outcome, $Y$.",
"Relationship between $Z$ and the observed outcome, $Y^*$, given the true outcome $Y$.",
"Response probability for individual $i$'s true outcome category.",
"Response probability for individual $i$'s observed outcome category, conditional on the true outcome.",
"Response probability for individual $i$'s observed outcome cateogry.",
"Average probability of correct classification for category $j$.",
"True positive rate. Average probability of observing outcome $k = 1$, given the true outcome $j = 1$.",
"True negative rate. Average probability of observing outcome $k = 2$, given the true outcome $j = 2$.",
"Association parameter of interest in the true outcome mechanism.",
"Association parameter of interest in the observation mechanism, given $j=1$.",
"Association parameter of interest in the observation mechanism, given $j=2$.")
notation_table <- data.frame(Term, Definition, Description)
```
## Notation
This guide is designed to summarize key notation and quantities used the *COMBO* R Package and associated publications.
```{r}
kableExtra::kbl(notation_table, escape = FALSE, booktabs = TRUE) %>%
kableExtra::kable_styling(latex_options = "HOLD_position")
```
|
/scratch/gouwar.j/cran-all/cranData/COMBO/inst/doc/COMBO_notation_guide.Rmd
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = FALSE)
library(kableExtra)
## -----------------------------------------------------------------------------
Term <- c("$X$", "$Z$", "V",
"$Y$", "$y_{ij}$", "$Y^*$", "$y^*_{ik}$", "$\\tilde{Y}$", "$\\tilde{y}_{i \\ell}$",
"True Outcome Mechanism", "First-Stage Observation Mechanism",
"Second-Stage Observation Mechanism",
"$\\pi_{ij}$", "$\\pi^*_{ikj}$", "$\\tilde{\\pi}_{i \\ell kj}$",
"$\\pi^*_{ik}$", "$\\pi^*_{jj}$", "$\\tilde{\\pi}_{jjj}$",
"First-Stage Sensitivity", "Second-Stage Specificity",
"$\\beta_X$", "$\\gamma_{11Z}$", "$\\gamma_{12Z}$",
"$\\delta_{111Z}$", "$\\delta_{121Z}$", "$\\delta_{112Z}$", "$\\delta_{122Z}$")
Definition <- c("--", "--", "--",
"$Y \\in \\{1, 2\\}$", "$\\mathbb{I}\\{Y_i = j\\}$",
"$Y^* \\in \\{1, 2\\}$", "$\\mathbb{I}\\{Y^*_i = k\\}$",
"$\\tilde{Y} \\in \\{1, 2\\}$", "$\\mathbb{I}\\{\\tilde{Y}_i = \\ell \\}$",
"$\\text{logit} \\{ P(Y = j | X ; \\beta) \\} = \\beta_{j0} + \\beta_{jX} X$",
"$\\text{logit}\\{ P(Y^* = k | Y = j, Z ; \\gamma) \\} = \\gamma_{kj0} + \\gamma_{kjZ} Z$",
"$\\text{logit}\\{ P(\\tilde{Y} = \\ell | Y^* = k, Y = j, V ; \\delta) \\} = \\delta_{\\ell kj0} + \\delta_{\\ell kjV} V$",
"$P(Y_i = j | X ; \\beta) = \\frac{\\text{exp}\\{\\beta_{j0} + \\beta_{jX} X_i\\}}{1 + \\text{exp}\\{\\beta_{j0} + \\beta_{jX} X_i\\}}$",
"$P(Y^*_i = k | Y_i = j, Z ; \\gamma) = \\frac{\\text{exp}\\{\\gamma_{kj0} + \\gamma_{kjZ} Z_i\\}}{1 + \\text{exp}\\{\\gamma_{kj0} + \\gamma_{kjZ} Z_i\\}}$",
"$P(\\tilde{Y}_i = \\ell | Y^*_i = k, Y_i = j, Z ; \\delta) = \\frac{\\text{exp}\\{\\delta_{\\ell kj0} + \\delta_{\\ell kjV} V_i\\}}{1 + \\text{exp}\\{\\delta_{\\ell kj0} + \\delta_{\\ell kjV} V_i\\}}$",
"$P(Y^*_i = k | Y_i, X, Z ; \\gamma) = \\sum_{j = 1}^2 \\pi^*_{ikj} \\pi_{ij}$",
"$P(Y^* = j | Y = j, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{ijj}$",
"$P(\\tilde{Y} = j | Y^* = j, Y = j, Z ; \\delta) = \\sum_{i = 1}^N \\tilde{\\pi}_{ijjj}$",
"$P(Y^* = 1 | Y = 1, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{i11}$",
"$P(Y^* = 2 | Y = 2, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{i22}$",
"--", "--", "--", "--", "--", "--", "--")
Description <- c("Predictor matrix for the true outcome.",
"Predictor matrix for the first-stage observed outcome, conditional on the true outcome.",
"Predictor matrix for the second-stage observed outcome, conditional on the true outcome and first-stage observed outcome.",
"True binary outcome. Reference category is 2.",
"Indicator for the true binary outcome.",
"First-stage observed binary outcome. Reference category is 2.",
"Indicator for the first-stage observed binary outcome.",
"Second-stage observed binary outcome. Reference category is 2.",
"Indicator for the second-stage observed binary outcome.",
"Relationship between $X$ and the true outcome, $Y$.",
"Relationship between $Z$ and the first-stage observed outcome, $Y^*$, given the true outcome $Y$.",
"Relationship between $V$ and the second-stage observed outcome, $\\tilde{Y}$, given the first-stage observed outcome, $Y^*$, and the true outcome $Y$.",
"Response probability for individual $i$'s true outcome category.",
"Response probability for individual $i$'s first-stage observed outcome category, conditional on the true outcome.",
"Response probability for individual $i$'s second-stage observed outcome category, conditional on the first-stage observed outcome and the true outcome.",
"Response probability for individual $i$'s first-stage observed outcome cateogry.",
"Average probability of first-stage correct classification for category $j$.",
"Average probability of first-stage and second-stage correct classification for category $j$.",
"True positive rate. Average probability of observing outcome $k = 1$, given the true outcome $j = 1$.",
"True negative rate. Average probability of observing outcome $k = 2$, given the true outcome $j = 2$.",
"Association parameter of interest in the true outcome mechanism.",
"Association parameter of interest in the first-stage observation mechanism, given $j=1$.",
"Association parameter of interest in the first-stage observation mechanism, given $j=2$.",
"Association parameter of interest in the second-stage observation mechanism, given $k = 1$ and $j = 1$.",
"Association parameter of interest in the second-stage observation mechanism, given $k = 2$ and $j = 1$.",
"Association parameter of interest in the second-stage observation mechanism, given $k = 1$ and $j = 2$.",
"Association parameter of interest in the second-stage observation mechanism, given $k = 2$ and $j = 2$.")
notation_table <- data.frame(Term, Definition, Description)
## -----------------------------------------------------------------------------
kableExtra::kbl(notation_table, escape = FALSE, booktabs = TRUE) %>%
kableExtra::kable_styling(latex_options = "HOLD_position")
|
/scratch/gouwar.j/cran-all/cranData/COMBO/inst/doc/COMBO_notation_guide_2stage.R
|
---
title: "COMBO Notation Guide - Two-stage Misclassification Model"
author: "Kim Hochstedler"
output: rmarkdown::html_vignette
header-includes:
- \usepackage[fontsize=13pt]{scrextend}
- \usepackage{kableExtra}
- \usepackage{knitr}
vignette: >
%\VignetteIndexEntry{COMBO Notation Guide - Two-stage Misclassification Model}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = FALSE)
library(kableExtra)
```
```{r}
Term <- c("$X$", "$Z$", "V",
"$Y$", "$y_{ij}$", "$Y^*$", "$y^*_{ik}$", "$\\tilde{Y}$", "$\\tilde{y}_{i \\ell}$",
"True Outcome Mechanism", "First-Stage Observation Mechanism",
"Second-Stage Observation Mechanism",
"$\\pi_{ij}$", "$\\pi^*_{ikj}$", "$\\tilde{\\pi}_{i \\ell kj}$",
"$\\pi^*_{ik}$", "$\\pi^*_{jj}$", "$\\tilde{\\pi}_{jjj}$",
"First-Stage Sensitivity", "Second-Stage Specificity",
"$\\beta_X$", "$\\gamma_{11Z}$", "$\\gamma_{12Z}$",
"$\\delta_{111Z}$", "$\\delta_{121Z}$", "$\\delta_{112Z}$", "$\\delta_{122Z}$")
Definition <- c("--", "--", "--",
"$Y \\in \\{1, 2\\}$", "$\\mathbb{I}\\{Y_i = j\\}$",
"$Y^* \\in \\{1, 2\\}$", "$\\mathbb{I}\\{Y^*_i = k\\}$",
"$\\tilde{Y} \\in \\{1, 2\\}$", "$\\mathbb{I}\\{\\tilde{Y}_i = \\ell \\}$",
"$\\text{logit} \\{ P(Y = j | X ; \\beta) \\} = \\beta_{j0} + \\beta_{jX} X$",
"$\\text{logit}\\{ P(Y^* = k | Y = j, Z ; \\gamma) \\} = \\gamma_{kj0} + \\gamma_{kjZ} Z$",
"$\\text{logit}\\{ P(\\tilde{Y} = \\ell | Y^* = k, Y = j, V ; \\delta) \\} = \\delta_{\\ell kj0} + \\delta_{\\ell kjV} V$",
"$P(Y_i = j | X ; \\beta) = \\frac{\\text{exp}\\{\\beta_{j0} + \\beta_{jX} X_i\\}}{1 + \\text{exp}\\{\\beta_{j0} + \\beta_{jX} X_i\\}}$",
"$P(Y^*_i = k | Y_i = j, Z ; \\gamma) = \\frac{\\text{exp}\\{\\gamma_{kj0} + \\gamma_{kjZ} Z_i\\}}{1 + \\text{exp}\\{\\gamma_{kj0} + \\gamma_{kjZ} Z_i\\}}$",
"$P(\\tilde{Y}_i = \\ell | Y^*_i = k, Y_i = j, Z ; \\delta) = \\frac{\\text{exp}\\{\\delta_{\\ell kj0} + \\delta_{\\ell kjV} V_i\\}}{1 + \\text{exp}\\{\\delta_{\\ell kj0} + \\delta_{\\ell kjV} V_i\\}}$",
"$P(Y^*_i = k | Y_i, X, Z ; \\gamma) = \\sum_{j = 1}^2 \\pi^*_{ikj} \\pi_{ij}$",
"$P(Y^* = j | Y = j, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{ijj}$",
"$P(\\tilde{Y} = j | Y^* = j, Y = j, Z ; \\delta) = \\sum_{i = 1}^N \\tilde{\\pi}_{ijjj}$",
"$P(Y^* = 1 | Y = 1, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{i11}$",
"$P(Y^* = 2 | Y = 2, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{i22}$",
"--", "--", "--", "--", "--", "--", "--")
Description <- c("Predictor matrix for the true outcome.",
"Predictor matrix for the first-stage observed outcome, conditional on the true outcome.",
"Predictor matrix for the second-stage observed outcome, conditional on the true outcome and first-stage observed outcome.",
"True binary outcome. Reference category is 2.",
"Indicator for the true binary outcome.",
"First-stage observed binary outcome. Reference category is 2.",
"Indicator for the first-stage observed binary outcome.",
"Second-stage observed binary outcome. Reference category is 2.",
"Indicator for the second-stage observed binary outcome.",
"Relationship between $X$ and the true outcome, $Y$.",
"Relationship between $Z$ and the first-stage observed outcome, $Y^*$, given the true outcome $Y$.",
"Relationship between $V$ and the second-stage observed outcome, $\\tilde{Y}$, given the first-stage observed outcome, $Y^*$, and the true outcome $Y$.",
"Response probability for individual $i$'s true outcome category.",
"Response probability for individual $i$'s first-stage observed outcome category, conditional on the true outcome.",
"Response probability for individual $i$'s second-stage observed outcome category, conditional on the first-stage observed outcome and the true outcome.",
"Response probability for individual $i$'s first-stage observed outcome cateogry.",
"Average probability of first-stage correct classification for category $j$.",
"Average probability of first-stage and second-stage correct classification for category $j$.",
"True positive rate. Average probability of observing outcome $k = 1$, given the true outcome $j = 1$.",
"True negative rate. Average probability of observing outcome $k = 2$, given the true outcome $j = 2$.",
"Association parameter of interest in the true outcome mechanism.",
"Association parameter of interest in the first-stage observation mechanism, given $j=1$.",
"Association parameter of interest in the first-stage observation mechanism, given $j=2$.",
"Association parameter of interest in the second-stage observation mechanism, given $k = 1$ and $j = 1$.",
"Association parameter of interest in the second-stage observation mechanism, given $k = 2$ and $j = 1$.",
"Association parameter of interest in the second-stage observation mechanism, given $k = 1$ and $j = 2$.",
"Association parameter of interest in the second-stage observation mechanism, given $k = 2$ and $j = 2$.")
notation_table <- data.frame(Term, Definition, Description)
```
## Notation
This guide is designed to summarize key notation and quantities used the *COMBO* R Package and associated publications.
```{r}
kableExtra::kbl(notation_table, escape = FALSE, booktabs = TRUE) %>%
kableExtra::kable_styling(latex_options = "HOLD_position")
```
|
/scratch/gouwar.j/cran-all/cranData/COMBO/inst/doc/COMBO_notation_guide_2stage.Rmd
|
---
title: "COMBO Notation Guide"
author: "Kim Hochstedler"
output: rmarkdown::html_vignette
header-includes:
- \usepackage[fontsize=13pt]{scrextend}
- \usepackage{kableExtra}
- \usepackage{knitr}
vignette: >
%\VignetteIndexEntry{COMBO Notation Guide}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = FALSE)
library(kableExtra)
```
```{r}
Term <- c("$X$", "$Z$", "$Y$", "$y_{ij}$", "$Y^*$", "$y^*_{ik}$",
"True Outcome Mechanism", "Observation Mechanism",
"$\\pi_{ij}$", "$\\pi^*_{ikj}$", "$\\pi^*_{ik}$", "$\\pi^*_{jj}$",
"Sensitivity", "Specificity",
"$\\beta_X$", "$\\gamma_{11Z}$", "$\\gamma_{12Z}$")
Definition <- c("--", "--",
"$Y \\in \\{1, 2\\}$", "$\\mathbb{I}\\{Y_i = j\\}$",
"$Y^* \\in \\{1, 2\\}$", "$\\mathbb{I}\\{Y^*_i = k\\}$",
"$\\text{logit} \\{ P(Y = j | X ; \\beta) \\} = \\beta_{j0} + \\beta_{jX} X$",
"$\\text{logit}\\{ P(Y^* = k | Y = j, Z ; \\gamma) \\} = \\gamma_{kj0} + \\gamma_{kjZ} Z$",
"$P(Y_i = j | X ; \\beta) = \\frac{\\text{exp}\\{\\beta_{j0} + \\beta_{jX} X_i\\}}{1 + \\text{exp}\\{\\beta_{j0} + \\beta_{jX} X_i\\}}$",
"$P(Y^*_i = k | Y_i = j, Z ; \\gamma) = \\frac{\\text{exp}\\{\\gamma_{kj0} + \\gamma_{kjZ} Z_i\\}}{1 + \\text{exp}\\{\\gamma_{kj0} + \\gamma_{kjZ} Z_i\\}}$",
"$P(Y^*_i = k | Y_i, X, Z ; \\gamma) = \\sum_{j = 1}^2 \\pi^*_{ikj} \\pi_{ij}$",
"$P(Y^* = j | Y = j, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{ijj}$",
"$P(Y^* = 1 | Y = 1, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{i11}$",
"$P(Y^* = 2 | Y = 2, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{i22}$",
"--", "--", "--")
Description <- c("Predictor matrix for the true outcome.",
"Predictor matrix for the observed outcome, conditional on the true outcome.",
"True binary outcome. Reference category is 2.",
"Indicator for the true binary outcome.",
"Observed binary outcome. Reference category is 2.",
"Indicator for the observed binary outcome.",
"Relationship between $X$ and the true outcome, $Y$.",
"Relationship between $Z$ and the observed outcome, $Y^*$, given the true outcome $Y$.",
"Response probability for individual $i$'s true outcome category.",
"Response probability for individual $i$'s observed outcome category, conditional on the true outcome.",
"Response probability for individual $i$'s observed outcome cateogry.",
"Average probability of correct classification for category $j$.",
"True positive rate. Average probability of observing outcome $k = 1$, given the true outcome $j = 1$.",
"True negative rate. Average probability of observing outcome $k = 2$, given the true outcome $j = 2$.",
"Association parameter of interest in the true outcome mechanism.",
"Association parameter of interest in the observation mechanism, given $j=1$.",
"Association parameter of interest in the observation mechanism, given $j=2$.")
notation_table <- data.frame(Term, Definition, Description)
```
## Notation
This guide is designed to summarize key notation and quantities used the *COMBO* R Package and associated publications.
```{r}
kableExtra::kbl(notation_table, escape = FALSE, booktabs = TRUE) %>%
kableExtra::kable_styling(latex_options = "HOLD_position")
```
|
/scratch/gouwar.j/cran-all/cranData/COMBO/vignettes/COMBO_notation_guide.Rmd
|
---
title: "COMBO Notation Guide - Two-stage Misclassification Model"
author: "Kim Hochstedler"
output: rmarkdown::html_vignette
header-includes:
- \usepackage[fontsize=13pt]{scrextend}
- \usepackage{kableExtra}
- \usepackage{knitr}
vignette: >
%\VignetteIndexEntry{COMBO Notation Guide - Two-stage Misclassification Model}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = FALSE)
library(kableExtra)
```
```{r}
Term <- c("$X$", "$Z$", "V",
"$Y$", "$y_{ij}$", "$Y^*$", "$y^*_{ik}$", "$\\tilde{Y}$", "$\\tilde{y}_{i \\ell}$",
"True Outcome Mechanism", "First-Stage Observation Mechanism",
"Second-Stage Observation Mechanism",
"$\\pi_{ij}$", "$\\pi^*_{ikj}$", "$\\tilde{\\pi}_{i \\ell kj}$",
"$\\pi^*_{ik}$", "$\\pi^*_{jj}$", "$\\tilde{\\pi}_{jjj}$",
"First-Stage Sensitivity", "Second-Stage Specificity",
"$\\beta_X$", "$\\gamma_{11Z}$", "$\\gamma_{12Z}$",
"$\\delta_{111Z}$", "$\\delta_{121Z}$", "$\\delta_{112Z}$", "$\\delta_{122Z}$")
Definition <- c("--", "--", "--",
"$Y \\in \\{1, 2\\}$", "$\\mathbb{I}\\{Y_i = j\\}$",
"$Y^* \\in \\{1, 2\\}$", "$\\mathbb{I}\\{Y^*_i = k\\}$",
"$\\tilde{Y} \\in \\{1, 2\\}$", "$\\mathbb{I}\\{\\tilde{Y}_i = \\ell \\}$",
"$\\text{logit} \\{ P(Y = j | X ; \\beta) \\} = \\beta_{j0} + \\beta_{jX} X$",
"$\\text{logit}\\{ P(Y^* = k | Y = j, Z ; \\gamma) \\} = \\gamma_{kj0} + \\gamma_{kjZ} Z$",
"$\\text{logit}\\{ P(\\tilde{Y} = \\ell | Y^* = k, Y = j, V ; \\delta) \\} = \\delta_{\\ell kj0} + \\delta_{\\ell kjV} V$",
"$P(Y_i = j | X ; \\beta) = \\frac{\\text{exp}\\{\\beta_{j0} + \\beta_{jX} X_i\\}}{1 + \\text{exp}\\{\\beta_{j0} + \\beta_{jX} X_i\\}}$",
"$P(Y^*_i = k | Y_i = j, Z ; \\gamma) = \\frac{\\text{exp}\\{\\gamma_{kj0} + \\gamma_{kjZ} Z_i\\}}{1 + \\text{exp}\\{\\gamma_{kj0} + \\gamma_{kjZ} Z_i\\}}$",
"$P(\\tilde{Y}_i = \\ell | Y^*_i = k, Y_i = j, Z ; \\delta) = \\frac{\\text{exp}\\{\\delta_{\\ell kj0} + \\delta_{\\ell kjV} V_i\\}}{1 + \\text{exp}\\{\\delta_{\\ell kj0} + \\delta_{\\ell kjV} V_i\\}}$",
"$P(Y^*_i = k | Y_i, X, Z ; \\gamma) = \\sum_{j = 1}^2 \\pi^*_{ikj} \\pi_{ij}$",
"$P(Y^* = j | Y = j, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{ijj}$",
"$P(\\tilde{Y} = j | Y^* = j, Y = j, Z ; \\delta) = \\sum_{i = 1}^N \\tilde{\\pi}_{ijjj}$",
"$P(Y^* = 1 | Y = 1, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{i11}$",
"$P(Y^* = 2 | Y = 2, Z ; \\gamma) = \\sum_{i = 1}^N \\pi^*_{i22}$",
"--", "--", "--", "--", "--", "--", "--")
Description <- c("Predictor matrix for the true outcome.",
"Predictor matrix for the first-stage observed outcome, conditional on the true outcome.",
"Predictor matrix for the second-stage observed outcome, conditional on the true outcome and first-stage observed outcome.",
"True binary outcome. Reference category is 2.",
"Indicator for the true binary outcome.",
"First-stage observed binary outcome. Reference category is 2.",
"Indicator for the first-stage observed binary outcome.",
"Second-stage observed binary outcome. Reference category is 2.",
"Indicator for the second-stage observed binary outcome.",
"Relationship between $X$ and the true outcome, $Y$.",
"Relationship between $Z$ and the first-stage observed outcome, $Y^*$, given the true outcome $Y$.",
"Relationship between $V$ and the second-stage observed outcome, $\\tilde{Y}$, given the first-stage observed outcome, $Y^*$, and the true outcome $Y$.",
"Response probability for individual $i$'s true outcome category.",
"Response probability for individual $i$'s first-stage observed outcome category, conditional on the true outcome.",
"Response probability for individual $i$'s second-stage observed outcome category, conditional on the first-stage observed outcome and the true outcome.",
"Response probability for individual $i$'s first-stage observed outcome cateogry.",
"Average probability of first-stage correct classification for category $j$.",
"Average probability of first-stage and second-stage correct classification for category $j$.",
"True positive rate. Average probability of observing outcome $k = 1$, given the true outcome $j = 1$.",
"True negative rate. Average probability of observing outcome $k = 2$, given the true outcome $j = 2$.",
"Association parameter of interest in the true outcome mechanism.",
"Association parameter of interest in the first-stage observation mechanism, given $j=1$.",
"Association parameter of interest in the first-stage observation mechanism, given $j=2$.",
"Association parameter of interest in the second-stage observation mechanism, given $k = 1$ and $j = 1$.",
"Association parameter of interest in the second-stage observation mechanism, given $k = 2$ and $j = 1$.",
"Association parameter of interest in the second-stage observation mechanism, given $k = 1$ and $j = 2$.",
"Association parameter of interest in the second-stage observation mechanism, given $k = 2$ and $j = 2$.")
notation_table <- data.frame(Term, Definition, Description)
```
## Notation
This guide is designed to summarize key notation and quantities used the *COMBO* R Package and associated publications.
```{r}
kableExtra::kbl(notation_table, escape = FALSE, booktabs = TRUE) %>%
kableExtra::kable_styling(latex_options = "HOLD_position")
```
|
/scratch/gouwar.j/cran-all/cranData/COMBO/vignettes/COMBO_notation_guide_2stage.Rmd
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
hungarian_cc <- function(cost) {
.Call(`_COMIX_hungarian_cc`, cost)
}
calib <- function(Y, C, Z, mu_input, mu_dim, mu0_input, mu0_dim, ref) {
.Call(`_COMIX_calib`, Y, C, Z, mu_input, mu_dim, mu0_input, mu0_dim, ref)
}
calibNoDist <- function(Y, C, Z, mu_input, mu_dim, mu0_input, mu0_dim, ref) {
.Call(`_COMIX_calibNoDist`, Y, C, Z, mu_input, mu_dim, mu0_input, mu0_dim, ref)
}
perturbedSNcpp <- function(Y, C, prior, pmc, state, initParticles, init, ncores) {
.Call(`_COMIX_perturbedSNcpp`, Y, C, prior, pmc, state, initParticles, init, ncores)
}
KL <- function(xi_1, xi_2, Omega_1, Omega_2, alpha_1, alpha_2) {
.Call(`_COMIX_KL`, xi_1, xi_2, Omega_1, Omega_2, alpha_1, alpha_2)
}
relabel <- function(res) {
.Call(`_COMIX_relabel`, res)
}
|
/scratch/gouwar.j/cran-all/cranData/COMIX/R/RcppExports.R
|
#' The function computes (and by default plots) estimates of the autocovariance
#' or autocorrelation function for the different parameters of the model. This
#' is a wrapper for coda::acf.
#' @param res An object of class \code{COMIX} or \code{tidyChainCOMIX}.
#' @param params A character vector naming the parameters to compute and plot
#' the autocorrelation plots for.
#' @param only_non_trivial_clusters Logical, if \code{TRUE} only compute and/or
#' plot the autocorrelation for the clusters that are estimated to be non-empty.
#' @param lag.max maximum lag at which to calculate the autocorrelation. See more
#' details at ?acf.
#' @param type Character string giving the type of autocorrelation to be
#' computed. See more details at ?acf.
#' @param plot Logical. If \code{TRUE} (the default) the autocorrelation is
#' plotted.
#' @param ... Other arguments passed to \code{acf}.
#' @return An \code{acfParamsCOMIX} object which is a named list,
#' with a named element for each requested parameter. Each element is
#' an object of class \code{acf} (from the \code{coda} package).
#' #' @examples
#' library(COMIX)
#' # Number of observations for each sample (row) and cluster (column):
#' njk <-
#' matrix(
#' c(
#' 150, 300,
#' 250, 200
#' ),
#' nrow = 2,
#' byrow = TRUE
#' )
#'
#' # Dimension of data:
#' p <- 3
#'
#' # Scale and skew parameters for first cluster:
#' Sigma1 <- matrix(0.5, nrow = p, ncol = p) + diag(0.5, nrow = p)
#' alpha1 <- rep(0, p)
#' alpha1[1] <- -5
#' # location parameter for first cluster in first sample:
#' xi11 <- rep(0, p)
#' # location parameter for first cluster in second sample (aligned with first):
#' xi21 <- rep(0, p)
#'
#' # Scale and skew parameters for second cluster:
#' Sigma2 <- matrix(-1/3, nrow = p, ncol = p) + diag(1 + 1/3, nrow = p)
#' alpha2 <- rep(0, p)
#' alpha2[2] <- 5
#' # location parameter for second cluster in first sample:
#' xi12 <- rep(3, p)
#' # location parameter for second cluster in second sample (misaligned with first):
#' xi22 <- rep(4, p)
#'
#' # Sample data:
#' set.seed(1)
#' Y <-
#' rbind(
#' sn::rmsn(njk[1, 1], xi = xi11, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[1, 2], xi = xi12, Omega = Sigma2, alpha = alpha2),
#' sn::rmsn(njk[2, 1], xi = xi21, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[2, 2], xi = xi22, Omega = Sigma2, alpha = alpha2)
#' )
#'
#' C <- c(rep(1, rowSums(njk)[1]), rep(2, rowSums(njk)[2]))
#'
#' prior <- list(zeta = 1, K = 10)
#' pmc <- list(naprt = 5, nburn = 200, nsave = 200) # Reasonable usage
#' pmc <- list(naprt = 5, nburn = 2, nsave = 5) # Minimal usage for documentation
#' # Fit the model:
#' res <- comix(Y, C, pmc = pmc, prior = prior)
#'
#' # Relabel to resolve potential label switching issues:
#' res_relab <- relabelChain(res)
#' effssz <- effectiveSampleSize(res_relab, "w")
#' # Or:
#' tidy_chain <- tidyChain(res_relab, "w")
#' acf_w <- acfParams(tidy_chain, "w")
#'
#' # (see vignette for a more detailed example)
#' @export
acfParams <-
function(
res,
params = c("w", "xi", "xi0", "psi", "G", "E", "eta"),
only_non_trivial_clusters = TRUE,
# coda::acf() parameters:
lag.max = NULL, type = c("correlation", "covariance", "partial"),
plot = TRUE, ...
) {
stopifnot(is(res, "COMIX") | is(res, "tidyChainCOMIX"))
if (is(res, "COMIX")) {
tidy_chain <- tidyChain(res, params)
} else {
tidy_chain <- res
}
n <- attributes(tidy_chain)$n
P <- attributes(tidy_chain)$p
nsave <- attributes(tidy_chain)$nsave
K <- attributes(tidy_chain)$K
J <- attributes(tidy_chain)$J
non_trivial_k <- attributes(tidy_chain)$non_trivial_k
non_triv_j_k <- attributes(tidy_chain)$non_triv_j_k
acfParams <- list()
class(acfParams) <- "acfParamsCOMIX"
attributes(acfParams)$n <- n
attributes(acfParams)$p <- P
attributes(acfParams)$nsave <- nsave
attributes(acfParams)$K <- K
attributes(acfParams)$J <- J
attributes(acfParams)$non_trivial_k <- non_trivial_k
attributes(acfParams)$non_triv_j_k <- non_triv_j_k
# w -----
if ("w" %in% params) {
if (only_non_trivial_clusters) {
a <- tidy_chain$w
a$triv <- TRUE
for (j in 1:J) {
a$triv[a$j == j & a$k %in% non_triv_j_k[[as.character(j)]]] <- FALSE
}
a <- a %>% filter(!.data$triv) %>% select(-.data$triv)
} else {
a <- tidy_chain$w
}
a <-
a %>%
mutate(k = paste0("k=", .data$k), j = paste0("j=", .data$j)) %>%
pivot_wider(names_from = c(.data$k, .data$j), values_from = .data$W) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
acfParams$w <-
acf(x = aa, lag.max = lag.max, type = type, plot = plot, ...)
}
# xi0 -----
if ("xi0" %in% params) {
if (only_non_trivial_clusters) {
a <- tidy_chain$xi0 %>% filter(.data$k %in% non_trivial_k)
} else {
a <- tidy_chain$xi0
}
a <-
a %>%
mutate(k = paste0("k=", .data$k), p = paste0("p=", .data$p)) %>%
pivot_wider(names_from = c(.data$k, .data$p), values_from = c(.data$xi0)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
acfParams$xi0 <-
acf(x = aa, lag.max = lag.max, type = type, plot = plot, ...)
}
# xi -----
if ("xi" %in% params) {
if (only_non_trivial_clusters) {
a <- tidy_chain$xi
a$triv <- TRUE
for (j in 1:J) {
a$triv[a$j == j & a$k %in% non_triv_j_k[[as.character(j)]]] <- FALSE
}
a <- a %>% filter(!.data$triv) %>% select(-.data$triv)
} else {
a <- tidy_chain$xi
}
a <-
a %>%
mutate(k = paste0("k=", .data$k), p = paste0("p=", .data$p), j = paste0("j=", .data$j)) %>%
pivot_wider(names_from = c(.data$k, .data$p, .data$j), values_from = c(.data$xi)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
acfParams$xi0 <-
acf(x = aa, lag.max = lag.max, type = type, plot = plot, ...)
}
# psi -----
if ("psi" %in% params) {
if (only_non_trivial_clusters) {
a <- tidy_chain$psi %>% filter(.data$k %in% non_trivial_k)
} else {
a <- tidy_chain$psi
}
a <-
a %>%
mutate(k = paste0("k=", .data$k), p = paste0("p=", .data$p)) %>%
pivot_wider(names_from = c(.data$k, .data$p), values_from = c(.data$psi)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
acfParams$psi <-
acf(x = aa, lag.max = lag.max, type = type, plot = plot, ...)
}
# G -----
if ("G" %in% params) {
if (only_non_trivial_clusters) {
a <- tidy_chain$G %>% filter(.data$k %in% non_trivial_k)
} else {
a <- tidy_chain$G
}
a <-
a %>%
mutate(k = paste0("k=", .data$k), p1 = paste0("p1=", .data$p1), p2 = paste0("p2=", .data$p2)) %>%
pivot_wider(names_from = c(.data$k, .data$p1, .data$p2), values_from = c(.data$G)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
acfParams$G <-
acf(x = aa, lag.max = lag.max, type = type, plot = plot, ...)
}
# E -----
if ("E" %in% params) {
if (only_non_trivial_clusters) {
a <- tidy_chain$E %>% filter(.data$k %in% non_trivial_k)
} else {
a <- tidy_chain$E
}
a <-
a %>%
mutate(k = paste0("k=", .data$k), p1 = paste0("p1=", .data$p1), p2 = paste0("p2=", .data$p2)) %>%
pivot_wider(names_from = c(.data$k, .data$p1, .data$p2), values_from = c(.data$E)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
acfParams$E <-
acf(x = aa, lag.max = lag.max, type = type, plot = plot, ...)
}
# eta -----
if ("eta" %in% params) {
eta <- mcmc(data = pull(tidy_chain$eta), start = 1)
acfParams$eta <-
acf(x = eta, lag.max = lag.max, type = type, plot = plot, ...)
}
return(acfParams)
}
|
/scratch/gouwar.j/cran-all/cranData/COMIX/R/acf_plots.R
|
#'
#' This function generates a sample from the posterior of COMIX.
#'
#' @param Y Matrix of the data. Each row represents an observation.
#' @param C Vector of the group label of each observation. Labels must be integers starting from 1.
#' @param prior A list giving the prior information. If unspecified, a default prior is used.
#' The list includes the following parameters:
#' \itemize{
#' \item \code{zeta}: Coarsening parameter. A number between 0 and 1. \code{zeta} = 1: sample from standard posterior;
#' \code{zeta} < 1: sample from power posterior. The lower \code{zeta} is, the more flexible the kernels become.
#' \item \code{K}: Maximal number of mixture components.
#' \item \code{eta_prior} Parameters for gamma prior for concentration parameter of the stick breaking process
#' prior for the weights.
#' \item \code{m0}: Number of degrees of freedom for the inverse Wishart prior for Sigma, the covariance matrix
#' of the kernels.
#' \item \code{Lambda}: Mean parameter for the inverse Wishart prior for Sigma, the covariance matrix
#' of the kernels.
#' \item \code{b0}: Mean parameter for the multivariate normal distribution that is the prior for the
#' group mean parameter xi0.
#' \item \code{B0}: Covariance parameter for the multivariate normal distribution that is the prior for the
#' group mean parameter xi0.
#' \item \code{e0}: Number of degrees of freedom for the inverse Wishart prior for \eqn{E_k}{E_k}, the
#' covariance matrix of the multivariate normal from which \eqn{\xi_{j,k}}{xi_jk} are drawn.
#' \item \code{E0}: Mean parameter for the inverse Wishart prior for \eqn{E_k}{E_k}, the
#' covariance matrix of the multivariate normal from which \eqn{\xi_{j,k}}{xi_jk} are drawn.
#' \item \code{merge_step}: Introduce step to merge mixture components with small KL divergence. Default
#' is \code{merge_step = TRUE}.
#' \item \code{merge_par}: Parameter controlling merging radius. Default is \code{merge_par = 0.1}.
#' }
#' @param pmc A list giving the Population Monte Carlo (PMC) parameters:
#' \itemize{
#' \item \code{npart}: Number of PMC particles.
#' \item \code{nburn}: Number of burn-in steps
#' \item \code{nsave}: Number of steps in the chain after burn-in.
#' \item \code{nskip}: Thinning parameter, number of steps to skip between saving steps after burn-in.
#' \item \code{ndisplay}: Display status of chain after every \code{ndisplay} steps.
#' }
#' @param state A list giving the initial cluster labels:
#' \itemize{
#' \item \code{t}: An integer vector, same length as the number of rows of \code{Y}, with cluster labels
#' between \code{1} and \code{K}.
#' }
#' @param ncores The number of CPU cores to utilize in parallel. Defaults to 2.
#' @return An object of class COMIX, a list of 4:
#' \itemize{
#' \code{chain}, a named list:
#' \itemize{
#' \item \code{t}: an \code{nsave} \eqn{\times}{x} \code{nrow(Y)} matrix with estimated cluster labels
#' for each saved step of the chain and each observation in the data \code{Y}.
#' \item \code{z}: a \code{nsave} \eqn{\times}{x} \code{nrow(Y)} matrix with estimated values of
#' the latent \eqn{z_{i,j}}{z_ij} variable for the parameterization of the
#' multivariate skew normal distribution used in the sampler for each saved step of
#' the chain and each observation in the data \code{Y}.
#' \item \code{W}: an \code{length(unique(C))} \eqn{\times}{x} \code{K} \eqn{\times}{x}
#' \item \code{nsave}: array storing the estimated sample- and cluster-specific weights for each
#' saved step of the chain.
#' \item \code{xi}: an \code{length(unique(C))} \eqn{\times}{x} \code{(ncol(Y) x K)}
#' \eqn{\times}{x} \code{nsave} array storing the estimated sample- and cluster-specific
#' multivariate skew normal location parameters of the kernel for each saved step of the chain.
#' \item \code{xi0}: an \code{ncol(Y)} \eqn{\times}{x} \code{K} \eqn{\times}{x}
#' \item \code{nsave}: array storing the estimated cluster-specific
#' group location parameters for each saved step of the chain.
#' \item \code{psi}: an \code{ncol(Y)} \eqn{\times}{x} \code{K} \eqn{\times}{x} \code{nsave}
#' array storing the estimated cluster-specific skew parameters of the kernels in
#' the parameterization of the
#' multivariate skew normal distribution used in the sampler
#' for each saved step of the chain.
#' \item \code{G}: an \code{ncol(Y)} \eqn{\times}{x} \code{(ncol(Y) x K)}
#' \eqn{\times}{x} \code{nsave} array storing the estimated cluster-specific
#' multivariate skew normal scale matrix (in row format) of the kernel
#' used in the sampler for each saved step of the chain.
#' \item \code{E}: an \code{ncol(Y)} \eqn{\times}{x} \code{(ncol(Y) x K)}
#' \eqn{\times}{x} \code{nsave} array storing the estimated covariance matrix
#' (in row format) of the multivariate normal distribution from which the
#' sample- and cluster-specific location parameters are drawn for each saved step
#' of the chain.
#' \item \code{eta}: a \code{nsave} \eqn{\times}{x} \code{1} matrix storing the
#' estimated Dirichlet Process Mixture concentration parameter for each saved step of the chain.
#' \item \code{Sigma}: an \code{ncol(Y)} \eqn{\times}{x} \code{(ncol(Y) x K)}
#' \eqn{\times}{x} \code{nsave} array storing the estimated cluster-specific
#' multivariate skew normal scale matrix (in row format) of the kernel for each saved step of the chain.
#' \item \code{alpha}: an \code{ncol(Y)} \eqn{\times}{x} \code{K} \eqn{\times}{x} \code{nsave}
#' array storing the estimated cluster-specific skew parameters of the
#' kernel's multivariate skew normal distribution
#' for each saved step of the chain.
#' }
#' \item \code{data}, a named list that includes the matrix of the data \code{Y}
#' and \code{C} the vector of the group label of each observation.
#' \item \code{prior} and \code{pmc}, the lists, as above, that were provided as inputs to
#' the function.
#' }
#' @examples
#' library(COMIX)
#' # Number of observations for each sample (row) and cluster (column):
#' njk <-
#' matrix(
#' c(
#' 150, 300,
#' 250, 200
#' ),
#' nrow = 2,
#' byrow = TRUE
#' )
#'
#'
#' # Dimension of data:
#' p <- 3
#'
#' # Scale and skew parameters for first cluster:
#' Sigma1 <- matrix(0.5, nrow = p, ncol = p) + diag(0.5, nrow = p)
#' alpha1 <- rep(0, p)
#' alpha1[1] <- -5
#' # location parameter for first cluster in first sample:
#' xi11 <- rep(0, p)
#' # location parameter for first cluster in second sample (aligned with first):
#' xi21 <- rep(0, p)
#'
#' # Scale and skew parameters for second cluster:
#' Sigma2 <- matrix(-1/3, nrow = p, ncol = p) + diag(1 + 1/3, nrow = p)
#' alpha2 <- rep(0, p)
#' alpha2[2] <- 5
#' # location parameter for second cluster in first sample:
#' xi12 <- rep(3, p)
#' # location parameter for second cluster in second sample (misaligned with first):
#' xi22 <- rep(4, p)
#'
#' # Sample data:
#' set.seed(1)
#' Y <-
#' rbind(
#' sn::rmsn(njk[1, 1], xi = xi11, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[1, 2], xi = xi12, Omega = Sigma2, alpha = alpha2),
#' sn::rmsn(njk[2, 1], xi = xi21, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[2, 2], xi = xi22, Omega = Sigma2, alpha = alpha2)
#' )
#'
#' C <- c(rep(1, rowSums(njk)[1]), rep(2, rowSums(njk)[2]))
#'
#' prior <- list(zeta = 1, K = 10)
#' pmc <- list(naprt = 5, nburn = 200, nsave = 200) # Reasonable usage
#' pmc <- list(naprt = 5, nburn = 2, nsave = 5) # Minimal usage for documentation
#' # Fit the model:
#' res <- comix(Y, C, pmc = pmc, prior = prior)
#'
#' # Relabel to resolve potential label switching issues:
#' res_relab <- relabelChain(res)
#'
#' # Generate calibrated data:
#' cal <- calibrateNoDist(res_relab)
#'
#' # Compare raw and calibrated data: (see plot in vignette)
#' # par(mfrow=c(1, 2))
#' # plot(Y, col = C, xlim = range(Y[,1]), ylim = range(Y[,2]) )
#'
#' # Get posterior estimates for the model parameters:
#' res_summary <- summarizeChain(res_relab)
#' # Check for instance, the cluster assignment labels:
#' table(res_summary$t)
#' # Indeed the same as
#' colSums(njk)
#'
#' # Or examine the skewness parameter for the non-trivial clusters:
#' res_summary$alpha[ , unique(res_summary$t)]
#' # And compare those to
#' cbind(alpha1, alpha2)
#'
#' # (see vignette for a more detailed example)
#' @export
comix = function(Y, C, prior = NULL, pmc = NULL, state = NULL, ncores = 2)
{
Y = as.matrix(Y)
p = ncol(Y)
# R wrapper:
if(is.null(prior)) {
prior = list( zeta = 1,
K = 10,
eta_prior = c(1, 1),
m0 = ncol(Y) + 2,
Lambda = stats::cov(Y),
b0 = colMeans(Y),
B0 = 100 * stats::cov(Y),
e0 = ncol(Y) + 2,
E0 = 0.1 * stats::cov(Y),
merge_step = TRUE,
merge_par = 0.1)
} else {
if(is.null(prior$zeta))
prior$zeta = 1;
if(is.null(prior$K))
prior$K = 10;
if(is.null(prior$eta_prior))
prior$eta_prior = c(1, 1);
if(is.null(prior$Lambda))
prior$Lambda = stats::cov(Y);
if(is.null(prior$m0))
prior$m0 = ncol(Y) + 2;
if(is.null(prior$b0))
prior$b0 = colMeans(Y);
if(is.null(prior$B0))
prior$B0 = 100*stats::cov(Y);
if(is.null(prior$e0))
prior$e0 = ncol(Y) + 2;
if(is.null(prior$E0))
prior$E0 = 0.1 * stats::cov(Y);
if(is.null(prior$merge_step))
prior$merge_step = TRUE;
if(is.null(prior$merge_par))
prior$merge_par = 0.1;
}
if(is.null(pmc)) {
pmc = list(npart = 10, nburn = 1000, nsave = 1000, nskip = 1, ndisplay = 500)
} else {
if(is.null(pmc$npart))
pmc$npart = 10
if(is.null(pmc$nburn))
pmc$nburn = 5000
if(is.null(pmc$nburn))
pmc$nburn = 5000
if(is.null(pmc$nsave))
pmc$nsave = 1000
if(is.null(pmc$nskip))
pmc$nskip = 1
if(is.null(pmc$ndisplay))
pmc$ndisplay = 100
}
if(is.null(state$t)) {
state$t = stats::kmeans(Y, prior$K, iter.max = 100)$cluster
}
J = length(unique(C))
if( sum( sort(unique(C)) == 1:J ) != J )
{
print("ERROR: unique(C) should look like 1, 2, ...")
return(0);
}
C = C - 1
state$t = state$t - 1
ans = perturbedSNcpp(Y, C, prior, pmc, state, initParticles = NULL, init=T, ncores=2)
colnames(ans$data$Y) = colnames(Y)
ans$data$C = ans$data$C + 1
ans$chain$t = ans$chain$t + 1
class(ans) = "COMIX"
return(ans)
}
|
/scratch/gouwar.j/cran-all/cranData/COMIX/R/comix.R
|
#' This function creates an object that summarizes the effective sample size
#' for the parameters of the model.
#'
#' @param res An object of class \code{COMIX} or \code{tidyChainCOMIX}.
#' @param params A character vector naming the parameters to compute the
#' effective sample size for.
#' @return An \code{effectiveSampleSizeCOMIX} object which is a named list,
#' with a named element for each requested parameter. Each element is a data
#' frame that includes the effective sample size for the parameter.
#' @examples
#' library(COMIX)
#' # Number of observations for each sample (row) and cluster (column):
#' njk <-
#' matrix(
#' c(
#' 150, 300,
#' 250, 200
#' ),
#' nrow = 2,
#' byrow = TRUE
#' )
#'
#' # Dimension of data:
#' p <- 3
#'
#' # Scale and skew parameters for first cluster:
#' Sigma1 <- matrix(0.5, nrow = p, ncol = p) + diag(0.5, nrow = p)
#' alpha1 <- rep(0, p)
#' alpha1[1] <- -5
#' # location parameter for first cluster in first sample:
#' xi11 <- rep(0, p)
#' # location parameter for first cluster in second sample (aligned with first):
#' xi21 <- rep(0, p)
#'
#' # Scale and skew parameters for second cluster:
#' Sigma2 <- matrix(-1/3, nrow = p, ncol = p) + diag(1 + 1/3, nrow = p)
#' alpha2 <- rep(0, p)
#' alpha2[2] <- 5
#' # location parameter for second cluster in first sample:
#' xi12 <- rep(3, p)
#' # location parameter for second cluster in second sample (misaligned with first):
#' xi22 <- rep(4, p)
#'
#' # Sample data:
#' set.seed(1)
#' Y <-
#' rbind(
#' sn::rmsn(njk[1, 1], xi = xi11, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[1, 2], xi = xi12, Omega = Sigma2, alpha = alpha2),
#' sn::rmsn(njk[2, 1], xi = xi21, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[2, 2], xi = xi22, Omega = Sigma2, alpha = alpha2)
#' )
#'
#' C <- c(rep(1, rowSums(njk)[1]), rep(2, rowSums(njk)[2]))
#'
#' prior <- list(zeta = 1, K = 10)
#' pmc <- list(naprt = 5, nburn = 200, nsave = 200) # Reasonable usage
#' pmc <- list(naprt = 5, nburn = 2, nsave = 5) # Minimal usage for documentation
#' # Fit the model:
#' res <- comix(Y, C, pmc = pmc, prior = prior)
#'
#' # Relabel to resolve potential label switching issues:
#' res_relab <- relabelChain(res)
#' effssz <- effectiveSampleSize(res_relab, "w")
#' # Or:
#' tidy_chain <- tidyChain(res_relab, "w")
#' effssz <- effectiveSampleSize(tidy_chain, "w")
#' # (see vignette for a more detailed example)
#' @export
effectiveSampleSize <- function(res, params = c("w", "xi", "xi0", "psi", "G", "E", "eta")) {
stopifnot(is(res, "COMIX") | is(res, "tidyChainCOMIX"))
if (is(res, "COMIX")) {
tidy_chain <- tidyChain(res, params)
} else {
tidy_chain <- res
}
n <- attributes(tidy_chain)$n
P <- attributes(tidy_chain)$p
nsave <- attributes(tidy_chain)$nsave
K <- attributes(tidy_chain)$K
J <- attributes(tidy_chain)$J
non_trivial_k <- attributes(tidy_chain)$non_trivial_k
non_triv_j_k <- attributes(tidy_chain)$non_triv_j_k
effssz <- list()
class(effssz) <- "effectiveSampleSizeCOMIX"
attributes(effssz)$n <- n
attributes(effssz)$p <- P
attributes(effssz)$nsave <- nsave
attributes(effssz)$K <- K
attributes(effssz)$J <- J
attributes(effssz)$non_trivial_k <- non_trivial_k
attributes(effssz)$non_triv_j_k <- non_triv_j_k
attributes(effssz)$glob_freq_t <- attributes(tidy_chain)$glob_freq_t
attributes(effssz)$local_freq_t <- attributes(tidy_chain)$local_freq_t
# w -----
if ("w" %in% params) {
effssz_w <-
tibble(
k = rep(1:K, times = J),
j = rep(1:J, each = K),
effssz = 0,
triv = TRUE,
W = 0
)
dplyr.summarise.inform <- options()$dplyr.summarise.inform
options(dplyr.summarise.inform = FALSE)
effssz_w$W <-
tidy_chain$w %>%
group_by(.data$k, .data$j) %>%
summarize(W = mean(.data$W)) %>%
arrange(.data$j, .data$k) %>%
pull(.data$W)
options(dplyr.summarise.inform = dplyr.summarise.inform)
a <-
tidy_chain$w %>%
pivot_wider(names_from = c(.data$k, .data$j), values_from = c(.data$W)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
eff_raw <- effectiveSize(aa)
kj <- apply(str_split(names(eff_raw), "_", simplify = TRUE), 2, as.integer)
stopifnot(all.equal(effssz_w %>% select(.data$k, .data$j) %>% as.matrix() %>% unname(), kj))
effssz_w$effssz <- eff_raw
for (j in 1:J) {
effssz_w$triv[effssz_w$j == j][non_triv_j_k[[as.character(j)]]] <- FALSE
}
effssz$w <- effssz_w
}
# xi0 -----
if ("xi0" %in% params) {
effssz_xi0 <-
tibble(
k = rep(1:K, times = P),
p = rep(1:P, each = K),
effssz = 0,
triv = TRUE
)
a <-
tidy_chain$xi0 %>%
pivot_wider(names_from = c(.data$k, .data$p), values_from = c(.data$xi0)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
eff_raw <- effectiveSize(aa)
kp <- apply(str_split(names(eff_raw), "_", simplify = TRUE), 2, as.integer)
stopifnot(all.equal(effssz_xi0 %>% select(.data$k, .data$p) %>% as.matrix() %>% unname(), kp))
effssz_xi0$effssz <- eff_raw
effssz_xi0$triv[effssz_xi0$k %in% non_trivial_k] <- FALSE
effssz$xi0 <- effssz_xi0
}
# xi -----
if ("xi" %in% params) {
effssz_xi <-
tibble(
expand_grid(k = 1:K, p = 1:P, j = 1:J),
effssz = 0,
triv = TRUE
)
a <-
tidy_chain$xi %>%
pivot_wider(names_from = c(.data$k, .data$p, .data$j), values_from = c(.data$xi)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
eff_raw <- effectiveSize(aa)
kpj <- apply(str_split(names(eff_raw), "_", simplify = TRUE), 2, as.integer)
stopifnot(all.equal(effssz_xi %>% select(.data$k, .data$p, .data$j) %>% as.matrix() %>% unname(), kpj))
effssz_xi$effssz <- eff_raw
for (j in 1:J) {
effssz_xi$triv[effssz_xi$j == j & effssz_xi$k %in% non_triv_j_k[[as.character(j)]]] <- FALSE
}
effssz$xi <- effssz_xi
}
# psi -----
if ("psi" %in% params) {
effssz_psi <-
tibble(
k = rep(1:K, times = P),
p = rep(1:P, each = K),
effssz = 0,
triv = TRUE
)
a <-
tidy_chain$psi %>%
pivot_wider(names_from = c(.data$k, .data$p), values_from = c(.data$psi)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
eff_raw <- effectiveSize(aa)
kp <- apply(str_split(names(eff_raw), "_", simplify = TRUE), 2, as.integer)
stopifnot(all.equal(effssz_psi %>% select(.data$k, .data$p) %>% as.matrix() %>% unname(), kp))
effssz_psi$effssz <- eff_raw
effssz_psi$triv[effssz_psi$k %in% non_trivial_k] <- FALSE
effssz$psi <- effssz_psi
}
# G -----
if ("G" %in% params) {
a <-
tidy_chain$G %>%
pivot_wider(names_from = c(.data$k, .data$p1, .data$p2), values_from = c(.data$G)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
eff_raw <- effectiveSize(aa)
kpp <- apply(str_split(names(eff_raw), "_", simplify = TRUE), 2, as.integer)
colnames(kpp) <- c("k", "p1", "p2")
effssz_G <-
tibble(
as_tibble(kpp),
effssz = eff_raw,
triv = TRUE
)
effssz_G$triv[effssz_G$k %in% non_trivial_k] <- FALSE
effssz$G <- effssz_G
}
# E -----
if ("E" %in% params) {
a <-
tidy_chain$E %>%
pivot_wider(names_from = c(.data$k, .data$p1, .data$p2), values_from = c(.data$E)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
eff_raw <- effectiveSize(aa)
kpp <- apply(str_split(names(eff_raw), "_", simplify = TRUE), 2, as.integer)
colnames(kpp) <- c("k", "p1", "p2")
effssz_E <-
tibble(
as_tibble(kpp),
effssz = eff_raw,
triv = TRUE
)
effssz_E$triv[effssz_E$k %in% non_trivial_k] <- FALSE
effssz$E <- effssz_E
}
# eta -----
if ("eta" %in% params) {
eta <- pull(tidy_chain$eta)
effssz$eta <- unname(effectiveSize(mcmc(eta)))
}
return(effssz)
}
#' This function creates plots for the effective sample size
#' for the parameters of the model.
#'
#' @param effssz An object of class \code{effectiveSampleSizeCOMIX} as created
#' by the function \code{effectiveSampleSize}.
#' @param param Character, naming the parameter to create a plot of effective
#' sample sizes.
#' @return A \code{ggplot2} plot containing the effective sample size plot.
#' @examples
#' library(COMIX)
#' # Number of observations for each sample (row) and cluster (column):
#' njk <-
#' matrix(
#' c(
#' 150, 300,
#' 250, 200
#' ),
#' nrow = 2,
#' byrow = TRUE
#' )
#'
#' # Dimension of data:
#' p <- 3
#'
#' # Scale and skew parameters for first cluster:
#' Sigma1 <- matrix(0.5, nrow = p, ncol = p) + diag(0.5, nrow = p)
#' alpha1 <- rep(0, p)
#' alpha1[1] <- -5
#' # location parameter for first cluster in first sample:
#' xi11 <- rep(0, p)
#' # location parameter for first cluster in second sample (aligned with first):
#' xi21 <- rep(0, p)
#'
#' # Scale and skew parameters for second cluster:
#' Sigma2 <- matrix(-1/3, nrow = p, ncol = p) + diag(1 + 1/3, nrow = p)
#' alpha2 <- rep(0, p)
#' alpha2[2] <- 5
#' # location parameter for second cluster in first sample:
#' xi12 <- rep(3, p)
#' # location parameter for second cluster in second sample (misaligned with first):
#' xi22 <- rep(4, p)
#'
#' # Sample data:
#' set.seed(1)
#' Y <-
#' rbind(
#' sn::rmsn(njk[1, 1], xi = xi11, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[1, 2], xi = xi12, Omega = Sigma2, alpha = alpha2),
#' sn::rmsn(njk[2, 1], xi = xi21, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[2, 2], xi = xi22, Omega = Sigma2, alpha = alpha2)
#' )
#'
#' C <- c(rep(1, rowSums(njk)[1]), rep(2, rowSums(njk)[2]))
#'
#' prior <- list(zeta = 1, K = 10)
#' pmc <- list(naprt = 5, nburn = 200, nsave = 200) # Reasonable usage
#' pmc <- list(naprt = 5, nburn = 2, nsave = 5) # Minimal usage for documentation
#' # Fit the model:
#' res <- comix(Y, C, pmc = pmc, prior = prior)
#'
#' # Relabel to resolve potential label switching issues:
#' res_relab <- relabelChain(res)
#' effssz <- effectiveSampleSize(res_relab, "w")
#' # Or:
#' tidy_chain <- tidyChain(res_relab, "w")
#' effssz <- effectiveSampleSize(tidy_chain, "w")
#' plotEffectiveSampleSize(effssz, "w")
#' # (see vignette for a more detailed example)
#' @export
plotEffectiveSampleSize <- function(effssz, param) {
stopifnot(is(effssz, "effectiveSampleSizeCOMIX"))
stopifnot(length(param) == 1)
stopifnot(param %in% c("w", "xi0", "xi", "psi", "G", "E"))
J <- attributes(effssz)$J
P <- attributes(effssz)$p
nsave <- attributes(effssz)$nsave
local_freq_t <- attributes(effssz)$local_freq_t
glob_freq_t <- attributes(effssz)$glob_freq_t
non_trivial_k <- attributes(effssz)$non_trivial_k
non_triv_j_k <- attributes(effssz)$non_triv_j_k
k_names <- paste0("Cluster ", non_trivial_k)
names(k_names) <- non_trivial_k
j_names <- paste0("Sample ", names(non_triv_j_k))
names(j_names) <- names(non_triv_j_k)
# w -----
if (param == "w") {
ggeffssz_w <-
effssz$w %>%
filter(!.data$triv) %>%
mutate(k = factor(.data$k), j = factor(.data$j)) %>%
mutate(effssz = ifelse(.data$effssz <= nsave, .data$effssz, nsave))
g <-
ggplot(ggeffssz_w) +
geom_bar(
mapping =
aes(
x = .data$k,
y = .data$effssz,
),
fill = rgb(1, 0, 0, 0.6),
stat = "identity"
) +
geom_bar(
mapping =
aes(
x = .data$k,
y = .data$W * nsave,
),
fill = rgb(0, 0, 1, 0.6),
stat = "identity"
) +
scale_y_continuous(limits = c(0, nsave), sec.axis = ~ . / nsave) +
facet_wrap(~ .data$j, labeller = labeller(j = j_names)) +
ylab("Effective Sample Size (Red, Left Axis)\nEstimated Weight of Cluster (Blue, Right Axis)") +
xlab("(Non-empty) Cluster number") +
theme(legend.position = "none")
return(g)
}
# xi0 -----
if ("xi0" == param) {
ggeffssz_xi0 <-
effssz$xi0 %>%
filter(!.data$triv) %>%
mutate(k = factor(.data$k)) %>%
mutate(effssz = ifelse(.data$effssz <= nsave, .data$effssz, nsave))
g <-
ggplot(ggeffssz_xi0) +
geom_bar(
mapping =
aes(
x = .data$p,
y = .data$effssz
),
fill = rgb(1, 0, 0, 0.6),
stat = "identity"
) +
geom_bar(
data = glob_freq_t,
mapping = aes(
x = .data$x,
y = .data$frq_t * nsave
),
stat = "identity",
width = P - 0.1,
fill = rgb(0, 0, 1, 0.6)
) +
scale_y_continuous(limits = c(0, nsave), sec.axis = ~ . / nsave) +
facet_wrap(~ .data$k, labeller = labeller(k = k_names)) +
ylab("Effective Sample Size (Red, Left Axis)\nEstimated Relative Frequency\nof Cluster (Blue, Right Axis)") +
xlab("Margin") +
theme(legend.position = "none", axis.title = element_text(size = 10))
return(g)
}
# xi -----
if ("xi" == param) {
ggeffssz_xi <-
effssz$xi %>%
filter(!.data$triv) %>%
mutate(k = factor(.data$k)) %>%
mutate(effssz = ifelse(.data$effssz <= nsave, .data$effssz, nsave))
g <-
ggplot(ggeffssz_xi) +
geom_bar(
mapping =
aes(
x = .data$p,
y = .data$effssz
),
fill = rgb(1, 0, 0, 0.6),
stat = "identity"
) +
geom_bar(
data = local_freq_t,
mapping = aes(
x = .data$x,
y = .data$frq_t * nsave
),
stat = "identity",
width = P - 0.1,
fill = rgb(0, 0, 1, 0.6)
) +
scale_y_continuous(limits = c(0, nsave), sec.axis = ~ . / nsave) +
facet_grid(.data$j ~ .data$k, labeller = labeller(j = j_names, k = k_names)) +
ylab("Effective Sample Size (Red, Left Axis)\nEstimated Relative Frequency\nof Cluster (Blue, Right Axis)") +
xlab("Margin") +
theme(legend.position = "none", axis.title = element_text(size = 10))
return(g)
}
# psi -----
if ("psi" == param) {
ggeffssz_psi <-
effssz$psi %>%
filter(!.data$triv) %>%
mutate(k = factor(.data$k)) %>%
mutate(effssz = ifelse(.data$effssz <= nsave, .data$effssz, nsave))
g <-
ggplot(ggeffssz_psi) +
geom_bar(
mapping =
aes(
x = .data$p,
y = .data$effssz
),
fill = rgb(1, 0, 0, 0.6),
stat = "identity"
) +
geom_bar(
data = glob_freq_t,
mapping = aes(
x = .data$x,
y = .data$frq_t * nsave
),
stat = "identity",
width = P - 0.1,
fill = rgb(0, 0, 1, 0.6)
) +
scale_y_continuous(limits = c(0, nsave), sec.axis = ~ . / nsave) +
facet_wrap(~ .data$k, labeller = labeller(k = k_names)) +
ylab("Effective Sample Size (Red, Left Axis)\nEstimated Relative Frequency\nof Cluster (Blue, Right Axis)") +
xlab("Margin") +
theme(legend.position = "none", axis.title = element_text(size = 10))
return(g)
}
# G -----
if ("G" == param) {
glob_freq_t_matrix <- glob_freq_t
glob_freq_t_matrix$x <- (((P + 1) * P / 2) + 1) / 2
ggeffssz_G <-
effssz$G %>%
filter(!.data$triv) %>%
mutate(k = factor(.data$k)) %>%
mutate(pp = paste0(.data$p1, ",", .data$p2)) %>%
mutate(
Diagonal =
factor(
.data$p1 == .data$p2,
levels = c(TRUE, FALSE),
labels = c("Diagonal", "Off-Diagonal")
)
) %>%
mutate(effssz = ifelse(.data$effssz <= nsave, .data$effssz, nsave))
g <-
ggplot(ggeffssz_G) +
geom_bar(
mapping =
aes(
x = reorder(.data$pp, as.integer(.data$Diagonal)),
y = .data$effssz,
fill = .data$Diagonal
),
stat = "identity"
) +
scale_fill_manual(values = c(rgb(1, 0, 0, 0.6), rgb(0.7, 0, 0.7, 0.6))) +
geom_bar(
data = glob_freq_t_matrix,
mapping = aes(
x = .data$x,
y = .data$frq_t * nsave
),
stat = "identity",
width = (P + 1) * P / 2 - 0.1,
fill = rgb(0, 0, 1, 0.6)
) +
scale_y_continuous(limits = c(0, nsave), sec.axis = ~ . / nsave) +
facet_wrap(~ .data$k, labeller = labeller(k = k_names)) +
ylab("Effective Sample Size (Left Axis)\nEstimated Relative Frequency\nof Cluster (Blue, Right Axis)") +
xlab("Margins") +
theme(axis.text.x = element_text(angle = -90, vjust = .5, hjust = 1)) +
theme(
legend.title = element_blank(),
legend.text = element_text(size = 6)
)
return(g)
}
# E -----
if ("E" == param) {
glob_freq_t_matrix <- glob_freq_t
glob_freq_t_matrix$x <- (((P + 1) * P / 2) + 1) / 2
ggeffssz_E <-
effssz$E %>%
filter(!.data$triv) %>%
mutate(k = factor(.data$k)) %>%
mutate(pp = paste0(.data$p1, ",", .data$p2)) %>%
mutate(
Diagonal =
factor(
.data$p1 == .data$p2,
levels = c(TRUE, FALSE),
labels = c("Diagonal", "Off-Diagonal")
)
) %>%
mutate(effssz = ifelse(.data$effssz <= nsave, .data$effssz, nsave))
g <-
ggplot(ggeffssz_E) +
geom_bar(
mapping =
aes(
x = reorder(.data$pp, as.integer(.data$Diagonal)),
y = .data$effssz,
fill = .data$Diagonal
),
stat = "identity"
) +
scale_fill_manual(values = c(rgb(1, 0, 0, 0.6), rgb(0.7, 0, 0.7, 0.6))) +
geom_bar(
data = glob_freq_t_matrix,
mapping = aes(
x = .data$x,
y = .data$frq_t * nsave
),
stat = "identity",
width = (P + 1) * P / 2 - 0.1,
fill = rgb(0, 0, 1, 0.6)
) +
scale_y_continuous(limits = c(0, nsave), sec.axis = ~ . / nsave) +
facet_wrap(~ .data$k, labeller = labeller(k = k_names)) +
ylab("Effective Sample Size (Left Axis)\nEstimated Relative Frequency\nof Cluster (Blue, Right Axis)") +
xlab("Margins") +
theme(axis.text.x = element_text(angle = -90, vjust = .5, hjust = 1)) +
theme(
legend.title = element_blank(),
legend.text = element_text(size = 6)
)
return(g)
}
return(NULL)
}
|
/scratch/gouwar.j/cran-all/cranData/COMIX/R/effective_sample_size.R
|
#' This function creates an object that summarizes the Geweke convergence
#' diagnostic.
#'
#' @param res An object of class \code{COMIX} or \code{tidyChainCOMIX}.
#' @param params A character vector naming the parameters to compute the
#' Geweke diagnostic for.
#' @param frac1 Double, fraction to use from beginning of chain.
#' @param frac2 Double, fraction to use from end of chain.
#' @param probs A vector of 2 doubles, probabilities denoting the limits
#' of a confidence interval for the convergence diagnostic.
#' @return An \code{gewekeParamsCOMIX} object which is a named list,
#' with a named element for each requested parameter. Each element is a data
#' frame that includes the Geweke diagnostic and result of a stationarity test
#' for the parameter.
#' @examples
#' library(COMIX)
#' # Number of observations for each sample (row) and cluster (column):
#' njk <-
#' matrix(
#' c(
#' 150, 300,
#' 250, 200
#' ),
#' nrow = 2,
#' byrow = TRUE
#' )
#'
#' # Dimension of data:
#' p <- 3
#'
#' # Scale and skew parameters for first cluster:
#' Sigma1 <- matrix(0.5, nrow = p, ncol = p) + diag(0.5, nrow = p)
#' alpha1 <- rep(0, p)
#' alpha1[1] <- -5
#' # location parameter for first cluster in first sample:
#' xi11 <- rep(0, p)
#' # location parameter for first cluster in second sample (aligned with first):
#' xi21 <- rep(0, p)
#'
#' # Scale and skew parameters for second cluster:
#' Sigma2 <- matrix(-1/3, nrow = p, ncol = p) + diag(1 + 1/3, nrow = p)
#' alpha2 <- rep(0, p)
#' alpha2[2] <- 5
#' # location parameter for second cluster in first sample:
#' xi12 <- rep(3, p)
#' # location parameter for second cluster in second sample (misaligned with first):
#' xi22 <- rep(4, p)
#'
#' # Sample data:
#' set.seed(1)
#' Y <-
#' rbind(
#' sn::rmsn(njk[1, 1], xi = xi11, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[1, 2], xi = xi12, Omega = Sigma2, alpha = alpha2),
#' sn::rmsn(njk[2, 1], xi = xi21, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[2, 2], xi = xi22, Omega = Sigma2, alpha = alpha2)
#' )
#'
#' C <- c(rep(1, rowSums(njk)[1]), rep(2, rowSums(njk)[2]))
#'
#' prior <- list(zeta = 1, K = 10)
#' pmc <- list(naprt = 5, nburn = 200, nsave = 200) # Reasonable usage
#' pmc <- list(naprt = 5, nburn = 2, nsave = 5) # Minimal usage for documentation
#' # Fit the model:
#' res <- comix(Y, C, pmc = pmc, prior = prior)
#'
#' # Relabel to resolve potential label switching issues:
#' res_relab <- relabelChain(res)
#' effssz <- effectiveSampleSize(res_relab, "w")
#' # Or:
#' tidy_chain <- tidyChain(res_relab, "w")
#' gwk <- gewekeParams(tidy_chain, "w")
#' # (see vignette for a more detailed example)
#' @export
gewekeParams <- function(res, params = c("w", "xi", "xi0", "psi", "G", "E", "eta"),
frac1 = 0.1, frac2 = 0.5, probs = c(0.025, 0.975)) {
stopifnot(is(res, "COMIX") | is(res, "tidyChainCOMIX"))
if (is(res, "COMIX")) {
tidy_chain <- tidyChain(res, params)
} else {
tidy_chain <- res
}
n <- attributes(tidy_chain)$n
P <- attributes(tidy_chain)$p
nsave <- attributes(tidy_chain)$nsave
K <- attributes(tidy_chain)$K
J <- attributes(tidy_chain)$J
non_trivial_k <- attributes(tidy_chain)$non_trivial_k
non_triv_j_k <- attributes(tidy_chain)$non_triv_j_k
glob_freq_t <- attributes(tidy_chain)$glob_freq_t
local_freq_t <- attributes(tidy_chain)$local_freq_t
gewekeParams <- list()
class(gewekeParams) <- "gewekeParamsCOMIX"
attributes(gewekeParams)$n <- n
attributes(gewekeParams)$p <- P
attributes(gewekeParams)$nsave <- nsave
attributes(gewekeParams)$K <- K
attributes(gewekeParams)$J <- J
attributes(gewekeParams)$non_trivial_k <- non_trivial_k
attributes(gewekeParams)$non_triv_j_k <- non_triv_j_k
attributes(gewekeParams)$frac <- c(frac1, frac2)
attributes(gewekeParams)$glob_freq_t <- glob_freq_t
# w -----
if ("w" %in% params) {
tc <- tidy_chain$w
tc$triv <- TRUE
for (j in 1:J) {
tc$triv[tc$j == j & tc$k %in% non_triv_j_k[[as.character(j)]]] <- FALSE
}
tc <- tc %>% filter(!.data$triv) %>% select(-.data$triv)
a <-
tc %>%
pivot_wider(names_from = c(.data$k, .data$j), values_from = c(.data$W)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
gd <- geweke.diag(x = aa, frac1 = frac1, frac2 = frac2)
kj <- apply(str_split(names(gd$z), "_", simplify = TRUE), 2, as.character)
dkj <- tc %>% select(.data$k, .data$j) %>% distinct()
stopifnot(all.equal(dkj %>% as.matrix() %>% unname(), kj))
gewekeParams$w <- tibble(dkj, geweke = gd$z)
dplyr.summarise.inform <- options()$dplyr.summarise.inform
options(dplyr.summarise.inform = FALSE)
meanW <- tc %>% group_by(.data$j, .data$k) %>% summarize(meanW = mean(.data$W))
stopifnot(all(meanW %>% select(.data$k, .data$j) %>% as.matrix() == kj))
options(dplyr.summarise.inform = dplyr.summarise.inform)
gewekeParams$w <-
gewekeParams$w %>%
mutate(stationary = .data$geweke > qnorm(probs[1]) & .data$geweke < qnorm(probs[2])) %>%
left_join(meanW, by = c("j", "k"))
}
# xi0 -----
if ("xi0" %in% params) {
tc <- tidy_chain$xi0 %>% filter(.data$k %in% non_trivial_k)
a <-
tc %>%
pivot_wider(names_from = c(.data$k, .data$p), values_from = c(.data$xi0)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
gd <- geweke.diag(x = aa, frac1 = frac1, frac2 = frac2)
kp <- apply(str_split(names(gd$z), "_", simplify = TRUE), 2, as.character)
dpk <- tc %>% select(.data$k, .data$p) %>% distinct()
stopifnot(all.equal(dpk %>% as.matrix() %>% unname(), kp))
gewekeParams$xi0 <- tibble(dpk, geweke = gd$z)
dplyr.summarise.inform <- options()$dplyr.summarise.inform
options(dplyr.summarise.inform = FALSE)
meanXi0 <- tc %>% group_by(.data$p, .data$k) %>% summarize(meanXi0 = mean(.data$xi0))
options(dplyr.summarise.inform = dplyr.summarise.inform)
gewekeParams$xi0 <-
gewekeParams$xi0 %>% mutate(stationary = .data$geweke > qnorm(probs[1]) & .data$geweke < qnorm(probs[2])) %>%
left_join(glob_freq_t %>% select(.data$k, .data$frq_t), by = "k") %>%
left_join(meanXi0, by = c("p", "k"))
}
# xi -----
if ("xi" %in% params) {
tc <- tidy_chain$xi
tc$triv <- TRUE
for (j in 1:J) {
tc$triv[tc$j == j & tc$k %in% non_triv_j_k[[as.character(j)]]] <- FALSE
}
tc <- tc %>% filter(!.data$triv) %>% select(-.data$triv)
a <-
tc %>%
pivot_wider(names_from = c(.data$k, .data$p, .data$j), values_from = c(.data$xi)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
gd <- geweke.diag(x = aa, frac1 = frac1, frac2 = frac2)
kpj <- apply(str_split(names(gd$z), "_", simplify = TRUE), 2, as.character)
dkpj <- tc %>% select(.data$k, .data$p, .data$j) %>% distinct()
stopifnot(all.equal(dkpj %>% as.matrix() %>% unname(), kpj))
gewekeParams$xi <- tibble(dkpj, geweke = gd$z)
local_freq_t <-
local_freq_t %>%
ungroup() %>%
mutate(j = factor(.data$j), k = factor(.data$k), .data$frq_t) %>%
select(.data$j, .data$k, .data$frq_t)
dplyr.summarise.inform <- options()$dplyr.summarise.inform
options(dplyr.summarise.inform = FALSE)
meanXi <- tc %>% group_by(.data$j, .data$p, .data$k) %>% summarize(meanXi = mean(.data$xi))
options(dplyr.summarise.inform = dplyr.summarise.inform)
gewekeParams$xi <-
gewekeParams$xi %>%
mutate(stationary = .data$geweke > qnorm(probs[1]) & .data$geweke < qnorm(probs[2])) %>%
left_join(local_freq_t, by = c("j", "k")) %>%
left_join(meanXi, by = c("j", "p", "k"))
}
# psi -----
if ("psi" %in% params) {
tc <- tidy_chain$psi %>% filter(.data$k %in% non_trivial_k)
a <-
tc %>%
pivot_wider(names_from = c(.data$k, .data$p), values_from = c(.data$psi)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
gd <- geweke.diag(x = aa, frac1 = frac1, frac2 = frac2)
kp <- apply(str_split(names(gd$z), "_", simplify = TRUE), 2, as.character)
dpk <- tc %>% select(.data$k, .data$p) %>% distinct()
stopifnot(all.equal(dpk %>% as.matrix() %>% unname(), kp))
gewekeParams$psi <- tibble(dpk, geweke = gd$z)
dplyr.summarise.inform <- options()$dplyr.summarise.inform
options(dplyr.summarise.inform = FALSE)
meanPsi <- tc %>% group_by(.data$p, .data$k) %>% summarize(meanPsi = mean(.data$psi))
options(dplyr.summarise.inform = dplyr.summarise.inform)
gewekeParams$psi <-
gewekeParams$psi %>%
mutate(stationary = .data$geweke > qnorm(probs[1]) & .data$geweke < qnorm(probs[2])) %>%
left_join(glob_freq_t %>% select(.data$k, .data$frq_t), by = "k") %>%
left_join(meanPsi, by = c("p", "k"))
}
# G -----
if ("G" %in% params) {
tc <- tidy_chain$G %>% filter(.data$k %in% non_trivial_k)
a <-
tc %>%
pivot_wider(names_from = c(.data$k, .data$p1, .data$p2), values_from = c(.data$G)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
gd <- geweke.diag(x = aa, frac1 = frac1, frac2 = frac2)
kp1p2 <- apply(str_split(names(gd$z), "_", simplify = TRUE), 2, as.character)
dkp1p2 <- tc %>% select(.data$k, .data$p1, .data$p2) %>% distinct()
stopifnot(all.equal(dkp1p2 %>% as.matrix() %>% unname(), kp1p2))
gewekeParams$G <- tibble(dkp1p2, geweke = gd$z)
dplyr.summarise.inform <- options()$dplyr.summarise.inform
options(dplyr.summarise.inform = FALSE)
meanG <- tc %>% group_by(.data$k, .data$p1, .data$p2) %>% summarize(meanG = mean(.data$G))
options(dplyr.summarise.inform = dplyr.summarise.inform)
gewekeParams$G <-
gewekeParams$G %>%
mutate(stationary = .data$geweke > qnorm(probs[1]) & .data$geweke < qnorm(probs[2])) %>%
left_join(glob_freq_t %>% select(.data$k, .data$frq_t), by = "k") %>%
left_join(meanG, by = c("k", "p1", "p2"))
}
# E -----
if ("E" %in% params) {
tc <- tidy_chain$E %>% filter(.data$k %in% non_trivial_k)
a <-
tc %>%
pivot_wider(names_from = c(.data$k, .data$p1, .data$p2), values_from = c(.data$E)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
gd <- geweke.diag(x = aa, frac1 = frac1, frac2 = frac2)
kp1p2 <- apply(str_split(names(gd$z), "_", simplify = TRUE), 2, as.character)
dkp1p2 <- tc %>% select(.data$k, .data$p1, .data$p2) %>% distinct()
stopifnot(all.equal(dkp1p2 %>% as.matrix() %>% unname(), kp1p2))
gewekeParams$E <- tibble(dkp1p2, geweke = gd$z)
dplyr.summarise.inform <- options()$dplyr.summarise.inform
options(dplyr.summarise.inform = FALSE)
meanE <- tc %>% group_by(.data$k, .data$p1, .data$p2) %>% summarize(meanE = mean(.data$E))
options(dplyr.summarise.inform = dplyr.summarise.inform)
gewekeParams$E <-
gewekeParams$E %>%
mutate(stationary = .data$geweke > qnorm(probs[1]) & .data$geweke < qnorm(probs[2])) %>%
left_join(glob_freq_t %>% select(.data$k, .data$frq_t), by = "k") %>%
left_join(meanE, by = c("k", "p1", "p2"))
}
# eta -----
if ("eta" %in% params) {
eta <- mcmc(data = tidy_chain$eta$eta, start = 1)
gd <- geweke.diag(x = eta, frac1 = frac1, frac2 = frac2)
gewekeParams$eta <- tibble(geweke = unname(gd$z))
gewekeParams$eta <-
gewekeParams$eta %>% mutate(stationary = .data$geweke > qnorm(probs[1]) & .data$geweke < qnorm(probs[2]))
}
return(gewekeParams)
}
#' This function creates plots for the Geweke diagnostic and results of test of
#' stationarity for the parameters of the model.
#'
#' @param gwk An object of class \code{gewekeParamsCOMIX} as created
#' by the function \code{gewekeParams}.
#' @param param Character, naming the parameter to create a plot of the Geweke
#' diagnostic for.
#' @return A \code{ggplot2} plot containing the Geweke diagnostic plot.
#' @examples
#' library(COMIX)
#' # Number of observations for each sample (row) and cluster (column):
#' njk <-
#' matrix(
#' c(
#' 150, 300,
#' 250, 200
#' ),
#' nrow = 2,
#' byrow = TRUE
#' )
#'
#' # Dimension of data:
#' p <- 3
#'
#' # Scale and skew parameters for first cluster:
#' Sigma1 <- matrix(0.5, nrow = p, ncol = p) + diag(0.5, nrow = p)
#' alpha1 <- rep(0, p)
#' alpha1[1] <- -5
#' # location parameter for first cluster in first sample:
#' xi11 <- rep(0, p)
#' # location parameter for first cluster in second sample (aligned with first):
#' xi21 <- rep(0, p)
#'
#' # Scale and skew parameters for second cluster:
#' Sigma2 <- matrix(-1/3, nrow = p, ncol = p) + diag(1 + 1/3, nrow = p)
#' alpha2 <- rep(0, p)
#' alpha2[2] <- 5
#' # location parameter for second cluster in first sample:
#' xi12 <- rep(3, p)
#' # location parameter for second cluster in second sample (misaligned with first):
#' xi22 <- rep(4, p)
#'
#' # Sample data:
#' set.seed(1)
#' Y <-
#' rbind(
#' sn::rmsn(njk[1, 1], xi = xi11, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[1, 2], xi = xi12, Omega = Sigma2, alpha = alpha2),
#' sn::rmsn(njk[2, 1], xi = xi21, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[2, 2], xi = xi22, Omega = Sigma2, alpha = alpha2)
#' )
#'
#' C <- c(rep(1, rowSums(njk)[1]), rep(2, rowSums(njk)[2]))
#'
#' prior <- list(zeta = 1, K = 10)
#' pmc <- list(naprt = 5, nburn = 200, nsave = 200) # Reasonable usage
#' pmc <- list(naprt = 5, nburn = 2, nsave = 5) # Minimal usage for documentation
#' # Fit the model:
#' res <- comix(Y, C, pmc = pmc, prior = prior)
#'
#' # Relabel to resolve potential label switching issues:
#' res_relab <- relabelChain(res)
#' effssz <- effectiveSampleSize(res_relab, "w")
#' # Or:
#' tidy_chain <- tidyChain(res_relab, "w")
#' gwk <- gewekeParams(tidy_chain, "w")
#' plotGewekeParams(gwk, "w")
#' # (see vignette for a more detailed example)
#' @export
plotGewekeParams <- function(gwk, param) {
stopifnot(class(gwk) == "gewekeParamsCOMIX")
stopifnot(length(param) == 1)
stopifnot(param %in% c("w", "xi0", "xi", "psi", "G", "E"))
J <- attributes(gwk)$J
j_names <- paste0("Sample ", 1:J)
names(j_names) <- 1:J
non_trivial_k <- attributes(gwk)$non_trivial_k
glob_freq_t <- attributes(gwk)$glob_freq_t
frq_t <- glob_freq_t$frq_t[glob_freq_t$k %in% non_trivial_k]
k_names_frq <- paste0("Cluster ", non_trivial_k, "\n(Est. Freq. = ", round(frq_t, 2), ")")
names(k_names_frq) <- non_trivial_k
scm <-
scale_color_manual(
name = "Geweke\nStationarity test",
labels = c("Passed", "Failed"),
values = c("TRUE" = "#00ba38", "FALSE" = "#f8766d")
)
# w -----
if (param == "w") {
g <-
gwk$w %>%
ggplot(aes(x = .data$k, y = .data$meanW, color = .data$stationary)) +
geom_point() +
geom_segment(aes(xend = .data$k, y = 0, yend = .data$meanW)) +
scm +
ylab("Estimated weight") +
xlab("Cluster Number") +
facet_wrap(~ .data$j, labeller = labeller(j = j_names))
return(g)
}
# xi0 -----
if (param == "xi0") {
g <-
gwk$xi0 %>%
ggplot(aes(x = .data$p, y = .data$meanXi0, color = .data$stationary)) +
geom_segment(aes(xend = .data$p, y = 0, yend = .data$meanXi0)) +
geom_point() +
scm +
ylab(expression(xi[0])) +
xlab("Margin") +
facet_wrap(~ .data$k, labeller = labeller(k = k_names_frq))
return(g)
}
# xi -----
if (param == "xi") {
g <-
gwk$xi %>%
ggplot(aes(x = .data$p, y = .data$meanXi, color = .data$stationary)) +
geom_segment(aes(xend = .data$p, y = 0, yend = .data$meanXi)) +
geom_point() +
scm +
ylab(expression(xi)) +
xlab("Margin") +
facet_grid(.data$j ~ .data$k, labeller = labeller(j = j_names, k = k_names_frq))
return(g)
}
# psi -----
if (param == "psi") {
g <-
gwk$psi %>%
ggplot(aes(x = .data$p, y = .data$meanPsi, color = .data$stationary)) +
geom_segment(aes(xend = .data$p, y = 0, yend = .data$meanPsi)) +
geom_point() +
scm +
ylab(expression(psi)) +
xlab("Margin") +
facet_wrap(~ .data$k, labeller = labeller(k = k_names_frq))
return(g)
}
# G -----
if (param == "G") {
g <-
gwk$G %>%
mutate(
Diagonal =
factor(
.data$p1 == .data$p2,
levels = c(TRUE, FALSE),
labels = c("Diagonal", "Off-Diagonal")
)
) %>%
mutate(p1p2 = paste0(.data$p1, ", ", .data$p2)) %>%
ggplot(aes(x = .data$p1p2, y = .data$meanG, color = .data$stationary)) +
geom_segment(aes(xend = .data$p1p2, y = 0, yend = .data$meanG)) +
geom_point(aes(shape = .data$Diagonal)) +
scm +
ylab("G") +
xlab("Margin") +
facet_wrap(~ .data$k, labeller = labeller(k = k_names_frq)) +
theme(axis.text.x = element_text(angle = -90, vjust = .5, hjust = 1))
return(g)
}
# E -----
if (param == "E") {
g <-
gwk$E %>%
mutate(
Diagonal =
factor(
.data$p1 == .data$p2,
levels = c(TRUE, FALSE),
labels = c("Diagonal", "Off-Diagonal")
)
) %>%
mutate(p1p2 = paste0(.data$p1, ", ", .data$p2)) %>%
ggplot(aes(x = .data$p1p2, y = .data$meanE, color = .data$stationary)) +
geom_segment(aes(xend = .data$p1p2, y = 0, yend = .data$meanE)) +
geom_point(aes(shape = .data$Diagonal)) +
scm +
ylab("E") +
xlab("Margin") +
facet_wrap(~ .data$k, labeller = labeller(k = k_names_frq)) +
theme(axis.text.x = element_text(angle = -90, vjust = .5, hjust = 1))
return(g)
}
return(NULL)
}
|
/scratch/gouwar.j/cran-all/cranData/COMIX/R/geweke.R
|
#' This function creates an object that summarizes the Heidelberg-Welch
#' convergence diagnostic.
#'
#' @param res An object of class \code{COMIX} or \code{tidyChainCOMIX}.
#' @param params A character vector naming the parameters to compute the
#' Heidelberg-Welch diagnostic for.
#' @param eps Target value for ratio of halfwidth to sample mean.
#' @param pvalue Significance level to use.
#' @return An \code{heidelParamsCOMIX} object which is a named list,
#' with a named element for each requested parameter. Each element is a data
#' frame that includes the Heidelberg-Welch diagnostic and results of a
#' stationarity test for the parameter.
#' @examples
#' library(COMIX)
#' # Number of observations for each sample (row) and cluster (column):
#' njk <-
#' matrix(
#' c(
#' 150, 300,
#' 250, 200
#' ),
#' nrow = 2,
#' byrow = TRUE
#' )
#'
#' # Dimension of data:
#' p <- 3
#'
#' # Scale and skew parameters for first cluster:
#' Sigma1 <- matrix(0.5, nrow = p, ncol = p) + diag(0.5, nrow = p)
#' alpha1 <- rep(0, p)
#' alpha1[1] <- -5
#' # location parameter for first cluster in first sample:
#' xi11 <- rep(0, p)
#' # location parameter for first cluster in second sample (aligned with first):
#' xi21 <- rep(0, p)
#'
#' # Scale and skew parameters for second cluster:
#' Sigma2 <- matrix(-1/3, nrow = p, ncol = p) + diag(1 + 1/3, nrow = p)
#' alpha2 <- rep(0, p)
#' alpha2[2] <- 5
#' # location parameter for second cluster in first sample:
#' xi12 <- rep(3, p)
#' # location parameter for second cluster in second sample (misaligned with first):
#' xi22 <- rep(4, p)
#'
#' # Sample data:
#' set.seed(1)
#' Y <-
#' rbind(
#' sn::rmsn(njk[1, 1], xi = xi11, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[1, 2], xi = xi12, Omega = Sigma2, alpha = alpha2),
#' sn::rmsn(njk[2, 1], xi = xi21, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[2, 2], xi = xi22, Omega = Sigma2, alpha = alpha2)
#' )
#'
#' C <- c(rep(1, rowSums(njk)[1]), rep(2, rowSums(njk)[2]))
#'
#' prior <- list(zeta = 1, K = 10)
#' pmc <- list(naprt = 5, nburn = 200, nsave = 200) # Reasonable usage
#' pmc <- list(naprt = 5, nburn = 2, nsave = 5) # Minimal usage for documentation
#' # Fit the model:
#' res <- comix(Y, C, pmc = pmc, prior = prior)
#'
#' # Relabel to resolve potential label switching issues:
#' res_relab <- relabelChain(res)
#' effssz <- effectiveSampleSize(res_relab, "w")
#' # Or:
#' tidy_chain <- tidyChain(res_relab, "w")
#' hd <- heidelParams(tidy_chain, "w")
#' # (see vignette for a more detailed example)
#' @export
heidelParams <- function(res, params = c("w", "xi", "xi0", "psi", "G", "E", "eta"),
eps = 0.1, pvalue = 0.05) {
stopifnot(is(res, "COMIX") | is(res, "tidyChainCOMIX"))
if (is(res, "COMIX")) {
tidy_chain <- tidyChain(res, params)
} else {
tidy_chain <- res
}
n <- attributes(tidy_chain)$n
P <- attributes(tidy_chain)$p
nsave <- attributes(tidy_chain)$nsave
K <- attributes(tidy_chain)$K
J <- attributes(tidy_chain)$J
non_trivial_k <- attributes(tidy_chain)$non_trivial_k
non_triv_j_k <- attributes(tidy_chain)$non_triv_j_k
glob_freq_t <- attributes(tidy_chain)$glob_freq_t
local_freq_t <- attributes(tidy_chain)$local_freq_t
heidelParams <- list()
class(heidelParams) <- "heidelParamsCOMIX"
attributes(heidelParams)$n <- n
attributes(heidelParams)$p <- P
attributes(heidelParams)$nsave <- nsave
attributes(heidelParams)$K <- K
attributes(heidelParams)$J <- J
attributes(heidelParams)$non_trivial_k <- non_trivial_k
attributes(heidelParams)$non_triv_j_k <- non_triv_j_k
attributes(heidelParams)$eps <- eps
attributes(heidelParams)$pvalue <- pvalue
attributes(heidelParams)$glob_freq_t <- glob_freq_t
# w -----
if ("w" %in% params) {
tc <- tidy_chain$w
tc$triv <- TRUE
for (j in 1:J) {
tc$triv[tc$j == j & tc$k %in% non_triv_j_k[[as.character(j)]]] <- FALSE
}
tc <- tc %>% filter(!.data$triv) %>% select(-.data$triv)
a <-
tc %>%
pivot_wider(names_from = c(.data$k, .data$j), values_from = c(.data$W)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
hd <- heidel.diag(x = aa, eps = eps, pvalue = pvalue)
class(hd) <- "matrix"
kj <- apply(str_split(rownames(hd), "_", simplify = TRUE), 2, as.character)
dkj <- tc %>% select(.data$k, .data$j) %>% distinct()
stopifnot(all.equal(dkj %>% as.matrix() %>% unname(), kj))
heidelParams$w <-
tibble(dkj, as_tibble(hd)) %>%
mutate(kj = rownames(hd)) %>%
mutate(
stest = factor(.data$stest, levels = c(1, 0), labels = c("passed", "failed")),
htest = factor(.data$htest, levels = c(1, 0), labels = c("passed", "failed"))
)
dplyr.summarise.inform <- options()$dplyr.summarise.inform
options(dplyr.summarise.inform = FALSE)
attributes(heidelParams$w)$meanW <-
tidy_chain$w %>%
group_by(.data$j, .data$k) %>%
summarize(meanW = mean(.data$W))
options(dplyr.summarise.inform = dplyr.summarise.inform)
}
# xi0 -----
if ("xi0" %in% params) {
tc <- tidy_chain$xi0 %>% filter(.data$k %in% non_trivial_k)
a <-
tc %>%
pivot_wider(names_from = c(.data$k, .data$p), values_from = c(.data$xi0)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
hd <- heidel.diag(x = aa, eps = eps, pvalue = pvalue)
class(hd) <- "matrix"
kp <- apply(str_split(rownames(hd), "_", simplify = TRUE), 2, as.character)
dkp <- tc %>% select(.data$k, .data$p) %>% distinct()
stopifnot(all.equal(dkp %>% as.matrix() %>% unname(), kp))
heidelParams$xi0 <-
tibble(dkp, as_tibble(hd)) %>%
mutate(kp = rownames(hd)) %>%
mutate(
stest = factor(.data$stest, levels = c(1, 0), labels = c("passed", "failed")),
htest = factor(.data$htest, levels = c(1, 0), labels = c("passed", "failed"))
) %>%
left_join(glob_freq_t %>% select(.data$k, .data$frq_t), by = "k")
dplyr.summarise.inform <- options()$dplyr.summarise.inform
options(dplyr.summarise.inform = FALSE)
attributes(heidelParams$xi0)$meanXi0 <-
tidy_chain$xi0 %>%
group_by(.data$k, .data$p) %>%
summarize(meanXi0 = mean(.data$xi0))
options(dplyr.summarise.inform = dplyr.summarise.inform)
}
# xi -----
if ("xi" %in% params) {
tc <- tidy_chain$xi
tc$triv <- TRUE
for (j in 1:J) {
tc$triv[tc$j == j & tc$k %in% non_triv_j_k[[as.character(j)]]] <- FALSE
}
tc <- tc %>% filter(!.data$triv) %>% select(-.data$triv)
a <-
tc %>%
pivot_wider(names_from = c(.data$k, .data$p, .data$j), values_from = c(.data$xi)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
hd <- heidel.diag(x = aa, eps = eps, pvalue = pvalue)
class(hd) <- "matrix"
kpj <- apply(str_split(rownames(hd), "_", simplify = TRUE), 2, as.character)
dkpj <- tc %>% select(.data$k, .data$p, .data$j) %>% distinct()
stopifnot(all.equal(dkpj %>% as.matrix() %>% unname(), kpj))
local_freq_t <-
local_freq_t %>%
ungroup() %>%
mutate(j = factor(.data$j), k = factor(.data$k), .data$frq_t) %>%
select(.data$j, .data$k, .data$frq_t)
heidelParams$xi <-
tibble(dkpj, as_tibble(hd)) %>%
mutate(kpj = rownames(hd)) %>%
mutate(
stest = factor(.data$stest, levels = c(1, 0), labels = c("passed", "failed")),
htest = factor(.data$htest, levels = c(1, 0), labels = c("passed", "failed"))
) %>%
left_join(local_freq_t, by = c("j", "k"))
dplyr.summarise.inform <- options()$dplyr.summarise.inform
options(dplyr.summarise.inform = FALSE)
attributes(heidelParams$xi)$meanXi <-
tidy_chain$xi %>%
group_by(.data$k, .data$j, .data$p) %>%
summarize(meanXi = mean(.data$xi))
options(dplyr.summarise.inform = dplyr.summarise.inform)
}
# psi -----
if ("psi" %in% params) {
tc <- tidy_chain$psi %>% filter(.data$k %in% non_trivial_k)
a <-
tc %>%
pivot_wider(names_from = c(.data$k, .data$p), values_from = c(.data$psi)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
hd <- heidel.diag(x = aa, eps = eps, pvalue = pvalue)
class(hd) <- "matrix"
kp <- apply(str_split(rownames(hd), "_", simplify = TRUE), 2, as.character)
dkp <- tc %>% select(.data$k, .data$p) %>% distinct()
stopifnot(all.equal(dkp %>% as.matrix() %>% unname(), kp))
heidelParams$psi <-
tibble(dkp, as_tibble(hd)) %>%
mutate(kp = rownames(hd)) %>%
mutate(
stest = factor(.data$stest, levels = c(1, 0), labels = c("passed", "failed")),
htest = factor(.data$htest, levels = c(1, 0), labels = c("passed", "failed"))
) %>%
left_join(glob_freq_t %>% select(.data$k, .data$frq_t), by = "k")
dplyr.summarise.inform <- options()$dplyr.summarise.inform
options(dplyr.summarise.inform = FALSE)
attributes(heidelParams$psi)$meanPsi <-
tidy_chain$psi %>%
group_by(.data$k, .data$p) %>%
summarize(meanPsi = mean(.data$psi))
options(dplyr.summarise.inform = dplyr.summarise.inform)
}
# G -----
if ("G" %in% params) {
tc <- tidy_chain$G %>% filter(.data$k %in% non_trivial_k)
a <-
tc %>%
pivot_wider(names_from = c(.data$k, .data$p1, .data$p2), values_from = c(.data$G)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
hd <- heidel.diag(x = aa, eps = eps, pvalue = pvalue)
class(hd) <- "matrix"
kp1p2 <- apply(str_split(rownames(hd), "_", simplify = TRUE), 2, as.character)
dkp1p2 <- tc %>% select(.data$k, .data$p1, .data$p2) %>% distinct()
stopifnot(all.equal(dkp1p2 %>% as.matrix() %>% unname(), kp1p2))
heidelParams$G <-
tibble(dkp1p2, as_tibble(hd)) %>%
mutate(p1p2 = rownames(hd)) %>%
mutate(
stest = factor(.data$stest, levels = c(1, 0), labels = c("passed", "failed")),
htest = factor(.data$htest, levels = c(1, 0), labels = c("passed", "failed"))
) %>%
left_join(glob_freq_t %>% select(.data$k, .data$frq_t), by = "k")
dplyr.summarise.inform <- options()$dplyr.summarise.inform
options(dplyr.summarise.inform = FALSE)
attributes(heidelParams$G)$meanG <-
tidy_chain$G %>%
group_by(.data$k, .data$p1, .data$p2) %>%
summarize(meanG = mean(.data$G))
options(dplyr.summarise.inform = dplyr.summarise.inform)
}
# E -----
if ("E" %in% params) {
tc <- tidy_chain$E %>% filter(.data$k %in% non_trivial_k)
a <-
tc %>%
pivot_wider(names_from = c(.data$k, .data$p1, .data$p2), values_from = c(.data$E)) %>%
select(-.data$iter)
aa <- mcmc(data = a, start = 1)
hd <- heidel.diag(x = aa, eps = eps, pvalue = pvalue)
class(hd) <- "matrix"
kp1p2 <- apply(str_split(rownames(hd), "_", simplify = TRUE), 2, as.character)
dkp1p2 <- tc %>% select(.data$k, .data$p1, .data$p2) %>% distinct()
stopifnot(all.equal(dkp1p2 %>% as.matrix() %>% unname(), kp1p2))
heidelParams$E <-
tibble(dkp1p2, as_tibble(hd)) %>%
mutate(p1p2 = rownames(hd)) %>%
mutate(
stest = factor(.data$stest, levels = c(1, 0), labels = c("passed", "failed")),
htest = factor(.data$htest, levels = c(1, 0), labels = c("passed", "failed"))
) %>%
left_join(glob_freq_t %>% select(.data$k, .data$frq_t), by = "k")
dplyr.summarise.inform <- options()$dplyr.summarise.inform
options(dplyr.summarise.inform = FALSE)
attributes(heidelParams$E)$meanE <-
tidy_chain$E %>%
group_by(.data$k, .data$p1, .data$p2) %>%
summarize(meanE = mean(.data$E))
options(dplyr.summarise.inform = dplyr.summarise.inform)
}
# eta -----
if ("eta" %in% params) {
eta <- mcmc(data = tidy_chain$eta$eta, start = 1)
hd <- heidel.diag(x = eta, eps = eps, pvalue = pvalue)
class(hd) <- "matrix"
heidelParams$eta <-
as_tibble(hd) %>%
mutate(
stest = factor(.data$stest, levels = c(1, 0), labels = c("passed", "failed")),
htest = factor(.data$htest, levels = c(1, 0), labels = c("passed", "failed"))
)
}
return(heidelParams)
}
#' This function creates plots for the Heidelberg-Welch diagnostic and
#' results of test of stationarity for the parameters of the model.
#'
#' @param hd An object of class \code{heidelParamsCOMIX} as created
#' by the function \code{heidelParams}.
#' @param param Character, naming the parameter to create a plot of the
#' Heidelberg-Welch diagnostic for.
#' @return A \code{ggplot2} plot containing the Heidelberg-Welch diagnostic plot.
#' @examples
#' library(COMIX)
#' # Number of observations for each sample (row) and cluster (column):
#' njk <-
#' matrix(
#' c(
#' 150, 300,
#' 250, 200
#' ),
#' nrow = 2,
#' byrow = TRUE
#' )
#'
#' # Dimension of data:
#' p <- 3
#'
#' # Scale and skew parameters for first cluster:
#' Sigma1 <- matrix(0.5, nrow = p, ncol = p) + diag(0.5, nrow = p)
#' alpha1 <- rep(0, p)
#' alpha1[1] <- -5
#' # location parameter for first cluster in first sample:
#' xi11 <- rep(0, p)
#' # location parameter for first cluster in second sample (aligned with first):
#' xi21 <- rep(0, p)
#'
#' # Scale and skew parameters for second cluster:
#' Sigma2 <- matrix(-1/3, nrow = p, ncol = p) + diag(1 + 1/3, nrow = p)
#' alpha2 <- rep(0, p)
#' alpha2[2] <- 5
#' # location parameter for second cluster in first sample:
#' xi12 <- rep(3, p)
#' # location parameter for second cluster in second sample (misaligned with first):
#' xi22 <- rep(4, p)
#'
#' # Sample data:
#' set.seed(1)
#' Y <-
#' rbind(
#' sn::rmsn(njk[1, 1], xi = xi11, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[1, 2], xi = xi12, Omega = Sigma2, alpha = alpha2),
#' sn::rmsn(njk[2, 1], xi = xi21, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[2, 2], xi = xi22, Omega = Sigma2, alpha = alpha2)
#' )
#'
#' C <- c(rep(1, rowSums(njk)[1]), rep(2, rowSums(njk)[2]))
#'
#' prior <- list(zeta = 1, K = 10)
#' pmc <- list(naprt = 5, nburn = 200, nsave = 200) # Reasonable usage
#' pmc <- list(naprt = 5, nburn = 2, nsave = 5) # Minimal usage for documentation
#' # Fit the model:
#' res <- comix(Y, C, pmc = pmc, prior = prior)
#'
#' # Relabel to resolve potential label switching issues:
#' res_relab <- relabelChain(res)
#' effssz <- effectiveSampleSize(res_relab, "w")
#' # Or:
#' tidy_chain <- tidyChain(res_relab, "w")
#' hd <- heidelParams(tidy_chain, "w")
#' plotHeidelParams(hd, "w")
#' # (see vignette for a more detailed example)
#' @export
plotHeidelParams <- function(hd, param) {
stopifnot(is(hd, "heidelParamsCOMIX"))
stopifnot(length(param) == 1)
stopifnot(param %in% c("w", "xi0", "xi", "psi", "G", "E"))
J <- attributes(hd)$J
j_names <- paste0("Sample ", 1:J)
names(j_names) <- 1:J
non_trivial_k <- attributes(hd)$non_trivial_k
glob_freq_t <- attributes(hd)$glob_freq_t
frq_t <- glob_freq_t$frq_t[glob_freq_t$k %in% non_trivial_k]
k_names_frq <- paste0("Cluster ", non_trivial_k, "\n(Est. Freq. = ", round(frq_t, 2), ")")
names(k_names_frq) <- non_trivial_k
scm <-
scale_color_manual(
name = "Heidelberg-Welch\nStationarity test",
labels = c("Passed", "Failed"),
values = c("passed" = "#00ba38", "failed" = "#f8766d")
)
# w -----
if (param == "w") {
g <-
hd$w %>%
left_join(attributes(hd$w)$meanW, by = c("k", "j")) %>%
mutate(mean_na_replace = ifelse(!is.na(.data$mean), .data$mean, .data$meanW)) %>%
mutate(start_na_replace = ifelse(!is.na(.data$start), as.character(.data$start), "")) %>%
ggplot(aes(x = .data$k, y = .data$mean_na_replace, color = .data$stest, label = .data$start_na_replace)) +
geom_point() +
geom_segment(aes(xend = .data$k, y = 0, yend = .data$mean_na_replace)) +
geom_text(
aes(y = .data$meanW + 0.13 * (max(.data$meanW) - min(.data$meanW)) * sign(.data$meanW)),
size = 2.5,
color = "black"
) +
scm +
ylab("Estimated weight\n(start chain from)") +
xlab("Cluster Number") +
facet_wrap(~ .data$j, labeller = labeller(j = j_names))
return(g)
}
# xi0 -----
if (param == "xi0") {
g <-
hd$xi0 %>%
left_join(attributes(hd$xi0)$meanXi0, by = c("k", "p")) %>%
mutate(mean_na_replace = ifelse(!is.na(.data$mean), .data$mean, .data$meanXi0)) %>%
mutate(start_na_replace = ifelse(!is.na(.data$start), as.character(.data$start), "")) %>%
ggplot(aes(x = .data$p, y = .data$mean_na_replace, color = .data$stest, label = .data$start_na_replace)) +
geom_segment(aes(xend = .data$p, y = 0, yend = .data$mean_na_replace)) +
geom_point() +
geom_text(
aes(y = .data$meanXi0 + 0.13 * (max(.data$meanXi0) - min(.data$meanXi0)) * sign(.data$meanXi0)),
size = 2.5,
color = "black"
) +
scm +
ylab("Estimated grand location\n(start chain from)") +
xlab("Margin") +
facet_wrap(~ .data$k, labeller = labeller(k = k_names_frq))
return(g)
}
# xi -----
if (param == "xi") {
g <-
hd$xi %>%
left_join(attributes(hd$xi)$meanXi, by = c("k", "j", "p")) %>%
mutate(mean_na_replace = ifelse(!is.na(.data$mean), .data$mean, .data$meanXi)) %>%
mutate(start_na_replace = ifelse(!is.na(.data$start), as.character(.data$start), "")) %>%
ggplot(aes(x = .data$p, y = .data$mean_na_replace, color = .data$stest, label = .data$start_na_replace)) +
geom_segment(aes(xend = .data$p, y = 0, yend = .data$mean_na_replace)) +
geom_point() +
geom_text(
aes(y = .data$meanXi + 0.13 * (max(.data$meanXi) - min(.data$meanXi)) * sign(.data$meanXi)),
size = 2.5,
color = "black"
) +
scm +
ylab("Estimated cluster-specific location\n(start chain from)") +
xlab("Margin") +
facet_grid(.data$j ~ .data$k, labeller = labeller(j = j_names, k = k_names_frq))
return(g)
}
# psi -----
if (param == "psi") {
g <-
hd$psi %>%
left_join(attributes(hd$psi)$meanPsi, by = c("k", "p")) %>%
mutate(mean_na_replace = ifelse(!is.na(.data$mean), .data$mean, .data$meanPsi)) %>%
mutate(start_na_replace = ifelse(!is.na(.data$start), as.character(.data$start), "")) %>%
ggplot(aes(x = .data$p, y = .data$mean_na_replace, color = .data$stest, label = .data$start_na_replace)) +
geom_segment(aes(xend = .data$p, y = 0, yend = .data$mean_na_replace)) +
geom_point() +
geom_text(
aes(y = .data$meanPsi + 0.13 * (max(.data$meanPsi) - min(.data$meanPsi)) * sign(.data$meanPsi)),
size = 2.5,
color = "black"
) +
scm +
ylab("Estimated \U03C8\n(start chain from)") +
xlab("Margin") +
facet_wrap(~ .data$k, labeller = labeller(k = k_names_frq))
return(g)
}
# G -----
if (param == "G") {
g <-
hd$G %>%
mutate(
Diagonal =
factor(
.data$p1 == .data$p2,
levels = c(TRUE, FALSE),
labels = c("Diagonal", "Off-Diagonal")
)
) %>%
mutate(p1p2 = paste0(.data$p1, ", ", .data$p2)) %>%
left_join(attributes(hd$G)$meanG, by = c("k", "p1", "p2")) %>%
mutate(mean_na_replace = ifelse(!is.na(.data$mean), .data$mean, .data$meanG)) %>%
mutate(start_na_replace = ifelse(!is.na(.data$start), as.character(.data$start), "")) %>%
ggplot(aes(x = .data$p1p2, y = .data$mean_na_replace, color = .data$stest, label = .data$start_na_replace)) +
geom_segment(aes(xend = .data$p1p2, y = 0, yend = .data$mean_na_replace)) +
geom_point(aes(shape = .data$Diagonal)) +
geom_text(
aes(y = .data$meanG + 0.13 * (max(.data$meanG) - min(.data$meanG)) * sign(.data$meanG)),
size = 2.5,
color = "black"
) +
scm +
ylab("Estimated G\n(start chain from)") +
xlab("Margin") +
facet_wrap(~ .data$k, labeller = labeller(k = k_names_frq)) +
theme(axis.text.x = element_text(angle = -90, vjust = .5, hjust = 1))
return(g)
}
# E -----
if (param == "E") {
g <-
hd$E %>%
mutate(
Diagonal =
factor(
.data$p1 == .data$p2,
levels = c(TRUE, FALSE),
labels = c("Diagonal", "Off-Diagonal")
)
) %>%
mutate(p1p2 = paste0(.data$p1, ", ", .data$p2)) %>%
left_join(attributes(hd$E)$meanE, by = c("k", "p1", "p2")) %>%
mutate(mean_na_replace = ifelse(!is.na(.data$mean), .data$mean, .data$meanE)) %>%
mutate(start_na_replace = ifelse(!is.na(.data$start), as.character(.data$start), "")) %>%
ggplot(aes(x = .data$p1p2, y = .data$mean_na_replace, color = .data$stest, label = .data$start_na_replace)) +
geom_segment(aes(xend = .data$p1p2, y = 0, yend = .data$mean_na_replace)) +
geom_point(aes(shape = .data$Diagonal)) +
geom_text(
aes(y = .data$meanE + 0.13 * (max(.data$meanE) - min(.data$meanE)) * sign(.data$meanE)),
size = 2.5,
color = "black"
) +
scm +
ylab("Estimated G\n(start chain from)") +
xlab("Margin") +
facet_wrap(~ .data$k, labeller = labeller(k = k_names_frq)) +
theme(axis.text.x = element_text(angle = -90, vjust = .5, hjust = 1))
return(g)
}
return(NULL)
}
|
/scratch/gouwar.j/cran-all/cranData/COMIX/R/heidelberg_welch.R
|
#' This function creates tidy versions of the stored chain. This object can then be used as input
#' for the other diagnostic functions in this package.
#'
#' @param res An object of class COMIX.
#' @param params A character vector naming the parameters to tidy.
#' @return A \code{tidyChainCOMIX} object: a named list of class whose length is the length
#' of \code{params}. Each element of the list contains a tibble with a tidy version of the samples
#' from the MCMC chain.
#' @examples
#' library(COMIX)
#' # Number of observations for each sample (row) and cluster (column):
#' njk <-
#' matrix(
#' c(
#' 150, 300,
#' 250, 200
#' ),
#' nrow = 2,
#' byrow = TRUE
#' )
#'
#' # Dimension of data:
#' p <- 3
#'
#' # Scale and skew parameters for first cluster:
#' Sigma1 <- matrix(0.5, nrow = p, ncol = p) + diag(0.5, nrow = p)
#' alpha1 <- rep(0, p)
#' alpha1[1] <- -5
#' # location parameter for first cluster in first sample:
#' xi11 <- rep(0, p)
#' # location parameter for first cluster in second sample (aligned with first):
#' xi21 <- rep(0, p)
#'
#' # Scale and skew parameters for second cluster:
#' Sigma2 <- matrix(-1/3, nrow = p, ncol = p) + diag(1 + 1/3, nrow = p)
#' alpha2 <- rep(0, p)
#' alpha2[2] <- 5
#' # location parameter for second cluster in first sample:
#' xi12 <- rep(3, p)
#' # location parameter for second cluster in second sample (misaligned with first):
#' xi22 <- rep(4, p)
#'
#' # Sample data:
#' set.seed(1)
#' Y <-
#' rbind(
#' sn::rmsn(njk[1, 1], xi = xi11, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[1, 2], xi = xi12, Omega = Sigma2, alpha = alpha2),
#' sn::rmsn(njk[2, 1], xi = xi21, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[2, 2], xi = xi22, Omega = Sigma2, alpha = alpha2)
#' )
#'
#' C <- c(rep(1, rowSums(njk)[1]), rep(2, rowSums(njk)[2]))
#'
#' prior <- list(zeta = 1, K = 10)
#' pmc <- list(naprt = 5, nburn = 200, nsave = 200) # Reasonable usage
#' pmc <- list(naprt = 5, nburn = 2, nsave = 5) # Minimal usage for documentation
#' # Fit the model:
#' res <- comix(Y, C, pmc = pmc, prior = prior)
#'
#' # Relabel to resolve potential label switching issues:
#' res_relab <- relabelChain(res)
#' tidy_chain <- tidyChain(res_relab)
#' # (see vignette for a more detailed example)
#' @export
tidyChain <- function(res, params = c("t", "w", "xi", "xi0", "psi", "G", "E", "eta", "Sigma", "alpha")) {
stopifnot(is(res, "COMIX"))
sum_chain <- COMIX::summarizeChain(res)
n <- nrow(res$data$Y)
P <- ncol(res$data$Y)
nsave <- res$pmc$nsave
K <- res$prior$K
J <- length(unique(res$data$C))
non_trivial_k <- sort(unique(sum_chain$t))
non_triv_j_k <- split(x = sum_chain$t, f = res$data$C) %>% lapply(function(t) sort(unique(t)))
tidy_chain <- list()
class(tidy_chain) <- "tidyChainCOMIX"
attributes(tidy_chain)$pmc <- res$pmc
attributes(tidy_chain)$prior <- res$prior
attributes(tidy_chain)$n <- n
attributes(tidy_chain)$p <- P
attributes(tidy_chain)$nsave <- nsave
attributes(tidy_chain)$K <- K
attributes(tidy_chain)$J <- J
attributes(tidy_chain)$non_trivial_k <- non_trivial_k
attributes(tidy_chain)$non_triv_j_k <- non_triv_j_k
attributes(tidy_chain)$glob_freq_t <-
data.frame(
x = (P + 1) / 2,
table(sum_chain$t) / sum(table(sum_chain$t))
) %>%
rename(c("k" = "Var1", "frq_t" = "Freq"))
dplyr.summarise.inform <- options()$dplyr.summarise.inform
options(dplyr.summarise.inform = FALSE)
attributes(tidy_chain)$local_freq_t <-
tibble(x = (P + 1) / 2, k = sum_chain$t, j = c(res$data$C)) %>%
group_by(.data$j) %>%
summarize(Freq = table(.data$k) / sum(table(.data$k))) %>%
mutate(x = (P + 1) / 2, k = names(.data$Freq), frq_t = as.numeric(.data$Freq)) %>%
select(-.data$Freq)
options(dplyr.summarise.inform = dplyr.summarise.inform)
# t -----
if ("t" %in% params) {
tidy_chain$t <- tibble(t = res$chain$t)
}
# w -----
if ("w" %in% params) {
w_tb <-
expand_grid(j = 1:J, k = 1:K) %>%
slice(rep(1:n(), each = nsave)) %>%
mutate(
j = factor(.data$j),
k = factor(.data$k),
W = 0
)
w_tb$iter <- rep(1:nsave, J * K)
for ( j in 1:J ) {
for ( k in 1:K ) {
w_tb$W[w_tb$j == j & w_tb$k == k] <- res$chain$W[j, k, ]
}
}
tidy_chain$w <- w_tb
}
# xi0 -----
if ("xi0" %in% params) {
xi0_tb <-
expand_grid(p = 1:P, k = 1:K) %>%
slice(rep(1:n(), each = nsave)) %>%
mutate(
p = factor(.data$p),
k = factor(.data$k),
xi0 = 0
)
xi0_tb$iter <- rep(1:nsave, P * K)
for ( p in 1:P ) {
for ( k in 1:K ) {
xi0_tb$xi0[xi0_tb$p == p & xi0_tb$k == k] <- res$chain$xi0[p, k, ]
}
}
tidy_chain$xi0 <- xi0_tb
}
# xi -----
if ("xi" %in% params) {
xi_tb <-
tibble(
k = NA_integer_,
p = NA_integer_,
j = NA_integer_,
xi = NA_real_,
iter = rep(1:nsave, K * P * J)
)
counter <- 1
for ( k in 1:K ) {
for ( p in 1:P ) {
for ( j in 1:J ) {
current_idx <- counter : (counter + nsave - 1)
xi_tb$k[current_idx] <- k
xi_tb$p[current_idx] <- p
xi_tb$j[current_idx] <- j
xi_tb$xi[current_idx] <- res$chain$xi[j, P * (k - 1) + p, ]
counter <- counter + nsave
}
}
}
xi_tb <- mutate(xi_tb, p = factor(p), k = factor(k), j = factor(j))
tidy_chain$xi <- xi_tb
}
# psi -----
if ("psi" %in% params) {
psi_tb <-
expand_grid(p = 1:P, k = 1:K) %>%
slice(rep(1:n(), each = nsave)) %>%
mutate(
p = factor(.data$p),
k = factor(.data$k),
psi = 0
)
psi_tb$iter <- rep(1:nsave, P * K)
for ( p in 1:P ) {
for ( k in 1:K ) {
psi_tb$psi[psi_tb$p == p & psi_tb$k == k] <- res$chain$psi[p, k, ]
}
}
tidy_chain$psi <- psi_tb
}
# G -----
if ("G" %in% params) {
G_tb <-
tibble(
k = NA_integer_,
p1 = NA_integer_,
p2 = NA_integer_,
G = NA_real_,
iter = rep(1:nsave, K * (P + 1) * P / 2)
)
counter <- 1
for ( k in 1:K ) {
for ( p1 in 1:P ) {
for ( p2 in p1:P ) {
current_idx <- counter : (counter + nsave - 1)
G_tb$k[current_idx] <- k
G_tb$p1[current_idx] <- p1
G_tb$p2[current_idx] <- p2
G_tb$G[current_idx] <- res$chain$G[p1, P * (k - 1) + p2, ]
counter <- counter + nsave
}
}
}
G_tb <- mutate(G_tb, k = factor(k), p1 = factor(p1), p2 = factor(p2))
tidy_chain$G <- G_tb
}
# E -----
if ("E" %in% params) {
E_tb <-
tibble(
k = NA_integer_,
p1 = NA_integer_,
p2 = NA_integer_,
E = NA_real_,
iter = rep(1:nsave, K * (P + 1) * P / 2)
)
counter <- 1
for ( k in 1:K ) {
for ( p1 in 1:P ) {
for ( p2 in p1:P ) {
current_idx <- counter : (counter + nsave - 1)
E_tb$k[current_idx] <- k
E_tb$p1[current_idx] <- p1
E_tb$p2[current_idx] <- p2
E_tb$E[current_idx] <- res$chain$E[p1, P * (k - 1) + p2, ]
counter <- counter + nsave
}
}
}
E_tb <- mutate(E_tb, k = factor(k), p1 = factor(p1), p2 = factor(p2))
tidy_chain$E <- E_tb
}
# eta -----
if ("eta" %in% params) {
tidy_chain$eta <- tibble(eta = res$chain$eta) %>% mutate(iter = row_number())
}
# Sigma -----
if ("Sigma" %in% params) {
Sigma_tb <-
tibble(
k = NA_integer_,
p1 = NA_integer_,
p2 = NA_integer_,
Sigma = NA_real_,
iter = rep(1:nsave, K * (P + 1) * P / 2)
)
counter <- 1
for ( k in 1:K ) {
for ( p1 in 1:P ) {
for ( p2 in p1:P ) {
current_idx <- counter : (counter + nsave - 1)
Sigma_tb$k[current_idx] <- k
Sigma_tb$p1[current_idx] <- p1
Sigma_tb$p2[current_idx] <- p2
Sigma_tb$Sigma[current_idx] <- res$chain$Sigma[p1, P * (k - 1) + p2, ]
counter <- counter + nsave
}
}
}
Sigma_tb <- mutate(Sigma_tb, k = factor(k), p1 = factor(p1), p2 = factor(p2))
tidy_chain$Sigma <- Sigma_tb
}
# alpha -----
if ("alpha" %in% params) {
alpha_tb <-
expand_grid(p = 1:P, k = 1:K) %>%
slice(rep(1:n(), each = nsave)) %>%
mutate(
p = factor(.data$p),
k = factor(.data$k),
alpha = 0
)
alpha_tb$iter <- rep(1:nsave, P * K)
for ( p in 1:P ) {
for ( k in 1:K ) {
alpha_tb$alpha[alpha_tb$p == p & alpha_tb$k == k] <- res$chain$alpha[p, k, ]
}
}
tidy_chain$alpha <- alpha_tb
}
return(tidy_chain)
}
|
/scratch/gouwar.j/cran-all/cranData/COMIX/R/tidy_chain.R
|
#' This function creates trace plots for different parameters of the MCMC chain.
#'
#' @param res An object of class \code{COMIX} or \code{tidyChainCOMIX}.
#' @param param Character, naming the parameter to create a trace plot for.
#' @return A \code{ggplot2} plot containing the trace plot.
#' @examples
#' library(COMIX)
#' # Number of observations for each sample (row) and cluster (column):
#' njk <-
#' matrix(
#' c(
#' 150, 300,
#' 250, 200
#' ),
#' nrow = 2,
#' byrow = TRUE
#' )
#'
#' # Dimension of data:
#' p <- 3
#'
#' # Scale and skew parameters for first cluster:
#' Sigma1 <- matrix(0.5, nrow = p, ncol = p) + diag(0.5, nrow = p)
#' alpha1 <- rep(0, p)
#' alpha1[1] <- -5
#' # location parameter for first cluster in first sample:
#' xi11 <- rep(0, p)
#' # location parameter for first cluster in second sample (aligned with first):
#' xi21 <- rep(0, p)
#'
#' # Scale and skew parameters for second cluster:
#' Sigma2 <- matrix(-1/3, nrow = p, ncol = p) + diag(1 + 1/3, nrow = p)
#' alpha2 <- rep(0, p)
#' alpha2[2] <- 5
#' # location parameter for second cluster in first sample:
#' xi12 <- rep(3, p)
#' # location parameter for second cluster in second sample (misaligned with first):
#' xi22 <- rep(4, p)
#'
#' # Sample data:
#' set.seed(1)
#' Y <-
#' rbind(
#' sn::rmsn(njk[1, 1], xi = xi11, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[1, 2], xi = xi12, Omega = Sigma2, alpha = alpha2),
#' sn::rmsn(njk[2, 1], xi = xi21, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[2, 2], xi = xi22, Omega = Sigma2, alpha = alpha2)
#' )
#'
#' C <- c(rep(1, rowSums(njk)[1]), rep(2, rowSums(njk)[2]))
#'
#' prior <- list(zeta = 1, K = 10)
#' pmc <- list(naprt = 5, nburn = 200, nsave = 200) # Reasonable usage
#' pmc <- list(naprt = 5, nburn = 2, nsave = 5) # Minimal usage for documentation
#' # Fit the model:
#' res <- comix(Y, C, pmc = pmc, prior = prior)
#'
#' # Relabel to resolve potential label switching issues:
#' res_relab <- relabelChain(res)
#' plotTracePlots(res_relab, "w")
#' # Or:
#' tidy_chain <- tidyChain(res_relab, "w")
#' plotTracePlots(tidy_chain, "w")
#' # (see vignette for a more detailed example)
#' @export
plotTracePlots <- function(res, param) {
stopifnot(is(res, "COMIX") | is(res, "tidyChainCOMIX"))
stopifnot(length(param) == 1)
stopifnot(param %in% c("w", "xi0", "xi", "psi", "G", "E", "eta"))
if (is(res, "COMIX")) {
tidy_chain <- tidyChain(res, param)
} else {
tidy_chain <- res
}
n <- attributes(tidy_chain)$n
P <- attributes(tidy_chain)$p
nsave <- attributes(tidy_chain)$nsave
K <- attributes(tidy_chain)$K
J <- attributes(tidy_chain)$J
non_trivial_k <- attributes(tidy_chain)$non_trivial_k
non_triv_j_k <- attributes(tidy_chain)$non_triv_j_k
glob_freq_t <- attributes(tidy_chain)$glob_freq_t
frq_t <- glob_freq_t$frq_t[glob_freq_t$k %in% non_trivial_k]
k_names <- paste0("Cluster ", non_trivial_k, "\n(Est. Freq. = ", round(frq_t, 2), ")")
names(k_names) <- non_trivial_k
j_names <- paste0("Sample ", 1:J)
names(j_names) <- 1:J
# w -----
if (param == "w") {
g <-
tidy_chain$w %>%
ggplot() +
geom_point(
mapping = aes(x = .data$iter, y = .data$W, color = .data$k),
show.legend = FALSE,
size = 1,
alpha = 0.4) +
facet_wrap(~ .data$j, labeller = labeller(j = j_names)) +
ylab(expression(omega)) +
xlab("Iteration") +
theme(axis.text.x = element_text(angle = -90, vjust = 0.5))
return(g)
}
# xi0 -----
if (param == "xi0") {
g <-
tidy_chain$xi0 %>%
filter(.data$k %in% non_trivial_k) %>%
ggplot() +
geom_point(
mapping = aes(x = .data$iter, y = .data$xi0, color = .data$p),
show.legend = FALSE,
size = 1,
alpha = 0.4
) +
facet_wrap( ~ .data$k, labeller = labeller(k = k_names)) +
ylab(expression(xi[0])) +
xlab("Iteration") +
theme(axis.text.x = element_text(angle = -90, vjust = 0.5))
return(g)
}
# xi -----
if (param == "xi") {
a <- tidy_chain$xi
a$triv <- TRUE
for (j in 1:J) {
a$triv[a$j == j & a$k %in% non_triv_j_k[[as.character(j)]]] <- FALSE
}
g <-
a %>%
filter(!.data$triv) %>%
ggplot() +
geom_point(
mapping = aes(x = .data$iter, y = .data$xi, color = .data$p),
show.legend = FALSE,
size = 1,
alpha = 0.4
) +
facet_grid(.data$j ~ .data$k, labeller = labeller(j = j_names, k = k_names)) +
ylab(expression(xi)) +
xlab("Iteration") +
theme(axis.text.x = element_text(angle = -90, vjust = 0.5))
return(g)
}
# psi -----
if (param == "psi") {
g <-
tidy_chain$psi %>%
filter(.data$k %in% non_trivial_k) %>%
ggplot() +
geom_point(
mapping = aes(x = .data$iter, y = .data$psi, color = .data$p),
show.legend = FALSE,
size = 1,
alpha = 0.4
) +
facet_wrap(~ .data$k, labeller = labeller(k = k_names)) +
ylab(expression(psi)) +
xlab("Iteration") +
theme(axis.text.x = element_text(angle = -90, vjust = 0.5))
return(g)
}
# G -----
if ("G" %in% param) {
g <-
tidy_chain$G %>%
filter(.data$k %in% non_trivial_k) %>%
mutate(pp = paste0(.data$p1, ",", .data$p2)) %>%
ggplot() +
geom_point(
mapping = aes(x = .data$iter, y = .data$G, color = .data$pp),
show.legend = FALSE,
size = 1,
alpha = 0.4
) +
facet_wrap(~ .data$k, labeller = labeller(k = k_names)) +
ylab("G") +
xlab("Iteration") +
theme(axis.text.x = element_text(angle = -90, vjust = 0.5))
return(g)
}
# E -----
if ("E" %in% param) {
g <-
tidy_chain$E %>%
filter(.data$k %in% non_trivial_k) %>%
mutate(pp = paste0(.data$p1, ",", .data$p2)) %>%
ggplot() +
geom_point(
mapping = aes(x = .data$iter, y = .data$E, color = .data$pp),
show.legend = FALSE,
size = 1,
alpha = 0.4
) +
facet_wrap(~ .data$k, labeller = labeller(k = k_names)) +
ylab("E") +
xlab("Iteration") +
theme(axis.text.x = element_text(angle = -90, vjust = 0.5))
return(g)
}
# eta -----
if ("eta" %in% param) {
g <-
ggplot(tidy_chain$eta) +
geom_point(aes(x = .data$iter, y = .data$eta), size = 1, alpha = 0.4) +
ylab(expression(eta)) +
xlab("Iteration")
return(g)
}
return(NULL)
}
|
/scratch/gouwar.j/cran-all/cranData/COMIX/R/trace_plot.R
|
#' This function aligns multiple samples so that their location parameters
#' are equal.
#'
#' @param x An object of class COMIX.
#' @param reference.group An integer between 1 and the number of groups in the data
#' (\code{length(unique(C))}). Defaults to \code{NULL}. If \code{NULL}, the samples
#' are aligned so that their location parameters are set to be at the estimated
#' group location parameter. If an integer, the samples are aligned so that their
#' location parameters are the same as the location parameter of sample \code{reference.group}.
#' @return A named list of 3:
#' \itemize{
#' \item \code{Y_cal}: a \code{nrow(x$data$Y)} \eqn{\times}{x} \code{ncol(x$data$Y)}
#' matrix, a calibrated version of the original data.
#' \item \code{calibration_distribution}: an \code{x$pmc$nsave} \eqn{\times}{x}
#' \code{ncol(x$data$Y)} \eqn{\times}{x} \code{nrow(x$data$Y)} array storing the
#' difference between the estimated sample-specific location parameter and the group
#' location parameter for each saved step of the chain.
#' \item \code{calibration_median}: a \code{nrow(x$data$Y)} \eqn{\times}{x} \code{ncol(x$data$Y)}
#' matrix storing the median difference between the estimated sample-specific location parameter and the group
#' location parameter for each saved step of the chain. This matrix is equal to the
#' difference between the uncalibrated data (\code{x$data$Y}) and the calibrated
#' data (\code{Y_cal}).
#' }
#' @examples
#' library(COMIX)
#' # Number of observations for each sample (row) and cluster (column):
#' njk <-
#' matrix(
#' c(
#' 150, 300,
#' 250, 200
#' ),
#' nrow = 2,
#' byrow = TRUE
#' )
#'
#' # Dimension of data:
#' p <- 3
#'
#' # Scale and skew parameters for first cluster:
#' Sigma1 <- matrix(0.5, nrow = p, ncol = p) + diag(0.5, nrow = p)
#' alpha1 <- rep(0, p)
#' alpha1[1] <- -5
#' # location parameter for first cluster in first sample:
#' xi11 <- rep(0, p)
#' # location parameter for first cluster in second sample (aligned with first):
#' xi21 <- rep(0, p)
#'
#' # Scale and skew parameters for second cluster:
#' Sigma2 <- matrix(-1/3, nrow = p, ncol = p) + diag(1 + 1/3, nrow = p)
#' alpha2 <- rep(0, p)
#' alpha2[2] <- 5
#' # location parameter for second cluster in first sample:
#' xi12 <- rep(3, p)
#' # location parameter for second cluster in second sample (misaligned with first):
#' xi22 <- rep(4, p)
#'
#' # Sample data:
#' set.seed(1)
#' Y <-
#' rbind(
#' sn::rmsn(njk[1, 1], xi = xi11, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[1, 2], xi = xi12, Omega = Sigma2, alpha = alpha2),
#' sn::rmsn(njk[2, 1], xi = xi21, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[2, 2], xi = xi22, Omega = Sigma2, alpha = alpha2)
#' )
#'
#' C <- c(rep(1, rowSums(njk)[1]), rep(2, rowSums(njk)[2]))
#'
#' prior <- list(zeta = 1, K = 10)
#' pmc <- list(naprt = 5, nburn = 200, nsave = 200) # Reasonable usage
#' pmc <- list(naprt = 5, nburn = 2, nsave = 5) # Minimal usage for documentation
#' # Fit the model:
#' res <- comix(Y, C, pmc = pmc, prior = prior)
#'
#' # Relabel to resolve potential label switching issues:
#' res_relab <- relabelChain(res)
#'
#' # Generate calibrated data:
#' cal <- calibrateNoDist(res_relab)
#'
#' # Compare raw and calibrated data: (see plot in vignette)
#' # par(mfrow=c(1, 2))
#' # plot(Y, col = C, xlim = range(Y[,1]), ylim = range(Y[,2]) )
#'
#' # Get posterior estimates for the model parameters:
#' res_summary <- summarizeChain(res_relab)
#' # Check for instance, the cluster assignment labels:
#' table(res_summary$t)
#' # Indeed the same as
#' colSums(njk)
#'
#' # Or examine the skewness parameter for the non-trivial clusters:
#' res_summary$alpha[ , unique(res_summary$t)]
#' # And compare those to
#' cbind(alpha1, alpha2)
#'
#' # (see vignette for a more detailed example)
#' @export
calibrate <- function(x, reference.group = NULL)
{
C = x$data$C - 1
Z = x$chain$t - 1
ref = ifelse(is.null(reference.group), -1, reference.group - 1 )
if (!is.matrix(Z)) Z=matrix(Z, ncol=1)
ns = dim(x$chain$xi0)[3]
K = dim(x$chain$xi0)[2]
output = calib(x$data$Y,
matrix(C,ncol=1),
Z,
x$chain$xi, dim(x$chain$xi),
x$chain$xi0, dim(x$chain$xi0),
ref)
colnames(output$Y_cal) = colnames(x$data$Y)
return(output)
}
#' This function aligns multiple samples so that their location parameters
#' are equal.
#'
#' @param x An object of class COMIX.
#' @param reference.group An integer between 1 and the number of groups in the data
#' (\code{length(unique(C))}). Defaults to \code{NULL}. If \code{NULL}, the samples
#' are aligned so that their location parameters are set to be at the estimated
#' group location parameter. If an integer, the samples are aligned so that their
#' location parameters are the same as the location parameter of sample \code{reference.group}.
#' @return A named list of 2:
#' \itemize{
#' \item \code{Y_cal}: a \code{nrow(x$data$Y)} \eqn{\times}{x} \code{ncol(x$data$Y)}
#' matrix, a calibrated version of the original data.
#' \item \code{calibration_median}: a \code{nrow(x$data$Y)} \eqn{\times}{x} \code{ncol(x$data$Y)}
#' matrix storing the median difference between the estimated sample-specific location parameter and the group
#' location parameter for each saved step of the chain. This matrix is equal to the
#' difference between the uncalibrated data (\code{x$data$Y}) and the calibrated
#' data (\code{Y_cal}).
#' }
#' @examples
#' library(COMIX)
#' # Number of observations for each sample (row) and cluster (column):
#' njk <-
#' matrix(
#' c(
#' 150, 300,
#' 250, 200
#' ),
#' nrow = 2,
#' byrow = TRUE
#' )
#'
#' # Dimension of data:
#' p <- 3
#'
#' # Scale and skew parameters for first cluster:
#' Sigma1 <- matrix(0.5, nrow = p, ncol = p) + diag(0.5, nrow = p)
#' alpha1 <- rep(0, p)
#' alpha1[1] <- -5
#' # location parameter for first cluster in first sample:
#' xi11 <- rep(0, p)
#' # location parameter for first cluster in second sample (aligned with first):
#' xi21 <- rep(0, p)
#'
#' # Scale and skew parameters for second cluster:
#' Sigma2 <- matrix(-1/3, nrow = p, ncol = p) + diag(1 + 1/3, nrow = p)
#' alpha2 <- rep(0, p)
#' alpha2[2] <- 5
#' # location parameter for second cluster in first sample:
#' xi12 <- rep(3, p)
#' # location parameter for second cluster in second sample (misaligned with first):
#' xi22 <- rep(4, p)
#'
#' # Sample data:
#' set.seed(1)
#' Y <-
#' rbind(
#' sn::rmsn(njk[1, 1], xi = xi11, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[1, 2], xi = xi12, Omega = Sigma2, alpha = alpha2),
#' sn::rmsn(njk[2, 1], xi = xi21, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[2, 2], xi = xi22, Omega = Sigma2, alpha = alpha2)
#' )
#'
#' C <- c(rep(1, rowSums(njk)[1]), rep(2, rowSums(njk)[2]))
#'
#' prior <- list(zeta = 1, K = 10)
#' pmc <- list(naprt = 5, nburn = 200, nsave = 200) # Reasonable usage
#' pmc <- list(naprt = 5, nburn = 2, nsave = 5) # Minimal usage for documentation
#' # Fit the model:
#' res <- comix(Y, C, pmc = pmc, prior = prior)
#'
#' # Relabel to resolve potential label switching issues:
#' res_relab <- relabelChain(res)
#'
#' # Generate calibrated data:
#' cal <- calibrateNoDist(res_relab)
#'
#' # Compare raw and calibrated data: (see plot in vignette)
#' # par(mfrow=c(1, 2))
#' # plot(Y, col = C, xlim = range(Y[,1]), ylim = range(Y[,2]) )
#'
#' # Get posterior estimates for the model parameters:
#' res_summary <- summarizeChain(res_relab)
#' # Check for instance, the cluster assignment labels:
#' table(res_summary$t)
#' # Indeed the same as
#' colSums(njk)
#'
#' # Or examine the skewness parameter for the non-trivial clusters:
#' res_summary$alpha[ , unique(res_summary$t)]
#' # And compare those to
#' cbind(alpha1, alpha2)
#'
#' # (see vignette for a more detailed example)
#' @export
calibrateNoDist <- function(x, reference.group = NULL)
{
C = x$data$C - 1
Z = x$chain$t - 1
ref = ifelse(is.null(reference.group), -1, reference.group - 1 )
if (!is.matrix(Z)) Z=matrix(Z, ncol=1)
ns = dim(x$chain$xi0)[3]
K = dim(x$chain$xi0)[2]
output = calibNoDist(x$data$Y,
matrix(C,ncol=1),
Z,
x$chain$xi, dim(x$chain$xi),
x$chain$xi0, dim(x$chain$xi0),
ref)
colnames(output$Y_cal) = colnames(x$data$Y)
return(output)
}
#' This function relabels the chain to avoid label switching issues.
#'
#' @param res An object of class COMIX.
#' @return An object of class COMIX where \code{res$chain$t} is replaced with the
#' new labels.
#' @examples
#' library(COMIX)
#' # Number of observations for each sample (row) and cluster (column):
#' njk <-
#' matrix(
#' c(
#' 150, 300,
#' 250, 200
#' ),
#' nrow = 2,
#' byrow = TRUE
#' )
#'
#' # Dimension of data:
#' p <- 3
#'
#' # Scale and skew parameters for first cluster:
#' Sigma1 <- matrix(0.5, nrow = p, ncol = p) + diag(0.5, nrow = p)
#' alpha1 <- rep(0, p)
#' alpha1[1] <- -5
#' # location parameter for first cluster in first sample:
#' xi11 <- rep(0, p)
#' # location parameter for first cluster in second sample (aligned with first):
#' xi21 <- rep(0, p)
#'
#' # Scale and skew parameters for second cluster:
#' Sigma2 <- matrix(-1/3, nrow = p, ncol = p) + diag(1 + 1/3, nrow = p)
#' alpha2 <- rep(0, p)
#' alpha2[2] <- 5
#' # location parameter for second cluster in first sample:
#' xi12 <- rep(3, p)
#' # location parameter for second cluster in second sample (misaligned with first):
#' xi22 <- rep(4, p)
#'
#' # Sample data:
#' set.seed(1)
#' Y <-
#' rbind(
#' sn::rmsn(njk[1, 1], xi = xi11, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[1, 2], xi = xi12, Omega = Sigma2, alpha = alpha2),
#' sn::rmsn(njk[2, 1], xi = xi21, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[2, 2], xi = xi22, Omega = Sigma2, alpha = alpha2)
#' )
#'
#' C <- c(rep(1, rowSums(njk)[1]), rep(2, rowSums(njk)[2]))
#'
#' prior <- list(zeta = 1, K = 10)
#' pmc <- list(naprt = 5, nburn = 200, nsave = 200) # Reasonable usage
#' pmc <- list(naprt = 5, nburn = 2, nsave = 5) # Minimal usage for documentation
#' # Fit the model:
#' res <- comix(Y, C, pmc = pmc, prior = prior)
#'
#' # Relabel to resolve potential label switching issues:
#' res_relab <- relabelChain(res)
#'
#' # Generate calibrated data:
#' cal <- calibrateNoDist(res_relab)
#'
#' # Compare raw and calibrated data: (see plot in vignette)
#' # par(mfrow=c(1, 2))
#' # plot(Y, col = C, xlim = range(Y[,1]), ylim = range(Y[,2]) )
#'
#' # Get posterior estimates for the model parameters:
#' res_summary <- summarizeChain(res_relab)
#' # Check for instance, the cluster assignment labels:
#' table(res_summary$t)
#' # Indeed the same as
#' colSums(njk)
#'
#' # Or examine the skewness parameter for the non-trivial clusters:
#' res_summary$alpha[ , unique(res_summary$t)]
#' # And compare those to
#' cbind(alpha1, alpha2)
#'
#' # (see vignette for a more detailed example)
#' @export
relabelChain = function(res) {
res$chain$t = res$chain$t - 1
relabeled_chain = relabel(res)
res$chain = relabeled_chain
res
}
#' Convert between parameterizations of the multivariate skew normal distribution.
#'
#' @param Sigma A scale matrix.
#' @param alpha A vector for the skew parameter.
#' @return A list:
#' \itemize{
#' \item \code{delta}: a reparameterized skewness vector, a transformed
#' version of \code{alpha}.
#' \item \code{omega}: a diagonal matrix of the same dimensions as \code{Sigma},
#' the diagonal elements are the square roots of the diagonal elements of \code{Sigma}.
#' \item \code{psi}: another reparameterized skewness vector, utilized in the sampler.
#' \item \code{G}: a reparameterized version of \code{Sigma}, utilized in the sampler.
#' }
#' @examples
#' library(COMIX)
#' # Scale and skew parameters:
#' Sigma <- matrix(0.5, nrow = 4, ncol = 4) + diag(0.5, nrow = 4)
#' alpha <- c(0, 0, 0, 5)
#' transformed_parameters <- transform_params(Sigma, alpha)
#' @export
transform_params = function(Sigma, alpha) {
n = NROW(Sigma)
m = NCOL(Sigma)
if (n!=m) stop("Sigma is not sqaure")
if (length(alpha)!=n) stop("alpha is of wrong length")
if (n==1) {
omega = sqrt(Sigma)
} else {
omega = sqrt(diag(diag(Sigma)))
}
omega_inv = solve(omega)
Omega = omega_inv %*% Sigma %*% omega_inv
alpha = matrix(alpha, ncol=1)
numer = Omega %*% alpha
denom = as.numeric(sqrt(1 + t(alpha) %*% Omega %*% alpha))
delta = numer/denom
if (n==1) {
psi = sqrt(Sigma) * delta
} else {
psi = sqrt(diag(Sigma)) * delta
}
return(list(delta=delta, omega=omega,
psi=psi, G = (Sigma - psi%*%t(psi))))
}
Mode = function(x) {
ux = unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
#' This function provides post-hoc estimates of the model parameters.
#'
#' @param res An object of class COMIX.
#' @return A named list:
#' \itemize{
#' \item \code{xi0}: a \code{ncol(res$data$Y)} \eqn{\times}{x} \code{res$prior$K} matrix storing
#' the posterior mean of the group location parameter.
#' \item \code{psi}: a \code{ncol(res$data$Y)} \eqn{\times}{x} \code{res$prior$K} matrix storing
#' the posterior mean of the multivariate skew normal kernels skewness parameter (in the parameterization used in
#' the sampler).
#' \item \code{alpha}: a \code{ncol(res$data$Y)} \eqn{\times}{x} \code{res$prior$K} matrix storing
#' the posterior mean of the multivariate skew normal kernels skewness parameter.
#' \item \code{W}: a \code{length(unique(res$data$C))} \eqn{\times}{x} \code{res$prior$K} matrix storing
#' the posterior mean of the mixture weights for each sample and cluster.
#' \item \code{xi}: an \code{length(unique(res$data$C))} \eqn{\times}{x} \code{ncol(res$data$Y)}
#' \eqn{\times}{x} \code{res$prior$K} array storing the the posterior mean of the
#' multivariate skew normal kernels location parameter for each sample and cluster.
#' \item \code{Sigma}: an \code{ncol(res$data$Y)} \eqn{\times}{x} \code{ncol(res$data$Y)}
#' \eqn{\times}{x} \code{res$prior$K} array storing the the posterior mean of the
#' scaling matrix of the multivariate skew normal kernels for each cluster.
#' \item \code{G}: an \code{ncol(res$data$Y)} \eqn{\times}{x} \code{ncol(res$data$Y)}
#' \eqn{\times}{x} \code{res$prior$K} array storing the the posterior mean of the
#' scaling matrix of the multivariate skew normal kernels for each cluster (in the
#' parameterization used in the sampler).
#' \item \code{E}: an \code{ncol(res$data$Y)} \eqn{\times}{x} \code{ncol(res$data$Y)}
#' \eqn{\times}{x} \code{res$prior$K} array storing the the posterior mean of the
#' covariance matrix of the multivariate normal distributions for each cluster form which
#' the sample specific location parameters are drawn.
#' \item \code{meanvec}: an \code{length(unique(res$data$C))} \eqn{\times}{x} \code{ncol(res$data$Y)}
#' \eqn{\times}{x} \code{res$prior$K} array storing the the posterior mean of the
#' multivariate skew normal kernels mean parameter for each sample and cluster.
#' \item \code{meanvec0}: a \code{ncol(res$data$Y)} \eqn{\times}{x} \code{res$prior$K} matrix storing
#' the posterior mean of the group mean parameter.
#' \item \code{t}: Vector of length \code{nrow(x$data$Y)}. Each element is the mode
#' of the posterior distribution of cluster labels.
#' \item \code{eta}: scalar, the mean of the posterior distribution of the estimated
#' Dirichlet Process Mixture concentration parameter.
#'}
#' @examples
#' library(COMIX)
#' # Number of observations for each sample (row) and cluster (column):
#' njk <-
#' matrix(
#' c(
#' 150, 300,
#' 250, 200
#' ),
#' nrow = 2,
#' byrow = TRUE
#' )
#'
#' # Dimension of data:
#' p <- 3
#'
#' # Scale and skew parameters for first cluster:
#' Sigma1 <- matrix(0.5, nrow = p, ncol = p) + diag(0.5, nrow = p)
#' alpha1 <- rep(0, p)
#' alpha1[1] <- -5
#' # location parameter for first cluster in first sample:
#' xi11 <- rep(0, p)
#' # location parameter for first cluster in second sample (aligned with first):
#' xi21 <- rep(0, p)
#'
#' # Scale and skew parameters for second cluster:
#' Sigma2 <- matrix(-1/3, nrow = p, ncol = p) + diag(1 + 1/3, nrow = p)
#' alpha2 <- rep(0, p)
#' alpha2[2] <- 5
#' # location parameter for second cluster in first sample:
#' xi12 <- rep(3, p)
#' # location parameter for second cluster in second sample (misaligned with first):
#' xi22 <- rep(4, p)
#'
#' # Sample data:
#' set.seed(1)
#' Y <-
#' rbind(
#' sn::rmsn(njk[1, 1], xi = xi11, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[1, 2], xi = xi12, Omega = Sigma2, alpha = alpha2),
#' sn::rmsn(njk[2, 1], xi = xi21, Omega = Sigma1, alpha = alpha1),
#' sn::rmsn(njk[2, 2], xi = xi22, Omega = Sigma2, alpha = alpha2)
#' )
#'
#' C <- c(rep(1, rowSums(njk)[1]), rep(2, rowSums(njk)[2]))
#'
#' prior <- list(zeta = 1, K = 10)
#' pmc <- list(naprt = 5, nburn = 200, nsave = 200) # Reasonable usage
#' pmc <- list(naprt = 5, nburn = 2, nsave = 5) # Minimal usage for documentation
#' # Fit the model:
#' res <- comix(Y, C, pmc = pmc, prior = prior)
#'
#' # Relabel to resolve potential label switching issues:
#' res_relab <- relabelChain(res)
#'
#' # Generate calibrated data:
#' cal <- calibrateNoDist(res_relab)
#'
#' # Compare raw and calibrated data: (see plot in vignette)
#' # par(mfrow=c(1, 2))
#' # plot(Y, col = C, xlim = range(Y[,1]), ylim = range(Y[,2]) )
#'
#' # Get posterior estimates for the model parameters:
#' res_summary <- summarizeChain(res_relab)
#' # Check for instance, the cluster assignment labels:
#' table(res_summary$t)
#' # Indeed the same as
#' colSums(njk)
#'
#' # Or examine the skewness parameter for the non-trivial clusters:
#' res_summary$alpha[ , unique(res_summary$t)]
#' # And compare those to
#' cbind(alpha1, alpha2)
#'
#' # (see vignette for a more detailed example)
#' @export
summarizeChain = function( res ) {
chainSummary = list()
K = res$prior$K
chain = res$chain
p = ncol(res$data$Y)
J = length(unique(res$data$C))
ns = res$pmc$nsave
xi_raw = matrix(0, nrow=J, ncol=p*K)
chainSummary$xi0 = matrix(0, nrow=p,ncol=K)
chainSummary$psi = matrix(0, nrow=p,ncol=K)
Sigma_raw = matrix(0, nrow=p, ncol=p*K)
G_raw = matrix(0, nrow=p, ncol=p*K)
E_raw = matrix(0, nrow=p, ncol=p*K)
chainSummary$alpha = matrix(0, nrow=p,ncol=K)
chainSummary$W = matrix(0, nrow=J, ncol=K)
for (i in 1:ns) {
xi_raw = xi_raw + chain$xi[,,i]
chainSummary$xi0 = chainSummary$xi0 + chain$xi0[,,i]
chainSummary$psi = chainSummary$psi + chain$psi[,,i]
Sigma_raw = Sigma_raw + chain$Sigma[,,i]
G_raw = G_raw + chain$G[,,i]
E_raw = E_raw + chain$E[,,i]
chainSummary$alpha = chainSummary$alpha + chain$alpha[,,i]
chainSummary$W = chainSummary$W + chain$W[,,i]
}
chainSummary$W = chainSummary$W/ns
xi_raw = xi_raw/ns
chainSummary$xi = array(0, dim=c(J,p,K))
for (k in 1:K) {
chainSummary$xi[,,k] = xi_raw[,(1+p*(k-1)):(p*k)]
}
chainSummary$xi0 = chainSummary$xi0/ns
chainSummary$psi = chainSummary$psi/ns
Sigma_raw = Sigma_raw/ns
chainSummary$Sigma = array(0, dim=c(p,p,K))
for (k in 1:K) {
chainSummary$Sigma[,,k] = Sigma_raw[,(1+p*(k-1)):(p*k)]
}
G_raw = G_raw/ns
chainSummary$G = array(0, dim=c(p,p,K))
for (k in 1:K) {
chainSummary$G[,,k] = G_raw[,(1+p*(k-1)):(p*k)]
}
E_raw = E_raw/ns
chainSummary$E = array(0, dim=c(p,p,K))
for (k in 1:K) {
chainSummary$E[,,k] = E_raw[,(1+p*(k-1)):(p*k)]
}
chainSummary$alpha = chainSummary$alpha/ns
chainSummary$meanvec = array(0, c(J, p, K))
chainSummary$meanvec0 = matrix(0, p, K)
for (k in 1:K) {
del.om = transform_params(chainSummary$Sigma[,,k], chainSummary$alpha[,k])
for (j in 1:J) {
chainSummary$meanvec[j,,k] = chainSummary$xi[j,,k] + del.om$omega %*% del.om$delta*sqrt(2/pi)
}
chainSummary$meanvec0[,k] = chainSummary$xi0[,k] + del.om$omega %*% del.om$delta*sqrt(2/pi)
}
chainSummary$t = apply(chain$t,2,Mode)
chainSummary$eta = mean(chain$eta)
chainSummary
}
|
/scratch/gouwar.j/cran-all/cranData/COMIX/R/utilities.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Loglikelihood function for CMP.
#'
#' @param x Vector of observed counts.
#' @param lambda Vector of lambda parameters.
#' @param nu Vector of nu parameters.
#' @param hybrid_tol Tolerance for truncation.
#' @param truncate_tol Tolerance for when to use approximation vs. truncation.
#' @param ymax Maximum value of y to consider.
#'
#' @details
#' The vectors \code{x}, \code{lambda}, and \code{nu} must ne the same length.
#'
#' @return Value of loglikelihood.
#'
#' @noRd
loglik_cmp <- function(x, lambda, nu, hybrid_tol, truncate_tol, ymax) {
.Call(`_COMPoissonReg_loglik_cmp`, x, lambda, nu, hybrid_tol, truncate_tol, ymax)
}
#' Density function for CMP.
#'
#' @param x Vector of density arguments.
#' @param lambda Rate parameter.
#' @param nu Dispersion parameter.
#' @param take_log Return density on the log-scale if \code{TRUE}. Otherwise,
#' return density on the original scale.
#' @param hybrid_tol Tolerance for truncation.
#' @param truncate_tol Tolerance for when to use approximation vs. truncation.
#' @param ymax Maximum value of y to consider.
#'
#' @return Density values.
#'
#' @noRd
d_cmp <- function(x, lambda, nu, take_log, normalize, hybrid_tol, truncate_tol, ymax) {
.Call(`_COMPoissonReg_d_cmp`, x, lambda, nu, take_log, normalize, hybrid_tol, truncate_tol, ymax)
}
#' CDF function for CMP.
#'
#' @param x Vector of CDF arguments.
#' @param lambda Rate parameter.
#' @param nu Dispersion parameter.
#' @param take_log Return CDF value on the log-scale if \code{TRUE}. Otherwise,
#' return value on the original scale.
#' @param hybrid_tol Tolerance for truncation.
#' @param truncate_tol Tolerance for when to use approximation vs. truncation.
#' @param ymax Maximum value of y to consider.
#'
#' @return CDF values.
#'
#' @noRd
p_cmp <- function(x, lambda, nu, hybrid_tol, truncate_tol, ymax) {
.Call(`_COMPoissonReg_p_cmp`, x, lambda, nu, hybrid_tol, truncate_tol, ymax)
}
#' Quantile function for CMP.
#'
#' @param logq Vector of probabilities on the log scale.
#' @param lambda Rate parameter.
#' @param nu Dispersion parameter.
#' @param hybrid_tol Tolerance for truncation.
#' @param truncate_tol Tolerance for when to use approximation vs. truncation.
#' @param ymax Maximum value of y to consider.
#'
#' @details
#' Operates on the log-scale for stability.
#'
#' @return Quantile values.
#'
#' @noRd
q_cmp <- function(logq, lambda, nu, hybrid_tol, truncate_tol, ymax) {
.Call(`_COMPoissonReg_q_cmp`, logq, lambda, nu, hybrid_tol, truncate_tol, ymax)
}
#' Draw variates from CMP.
#'
#' @param n Number of draws to generate.
#' @param lambda Rate parameter.
#' @param nu Dispersion parameter.
#' @param hybrid_tol Tolerance for truncation.
#' @param truncate_tol Tolerance for when to use approximation vs. truncation.
#' @param ymax Maximum value of y to consider.
#'
#' @return Draws.
#'
#' @noRd
r_cmp <- function(n, lambda, nu, hybrid_tol, truncate_tol, ymax) {
.Call(`_COMPoissonReg_r_cmp`, n, lambda, nu, hybrid_tol, truncate_tol, ymax)
}
#' Compute quantiles of a discrete / finite distribution.
#'
#' @param q Probability of the desired quantile.
#' @param cp vector of cumulative probabilities.
#'
#' @details
#' Compute a quantile for the discrete distribution with values
#' \code{0, 1, ..., k-1} and associated *cumulative* probabilities
#' \code{cp(0), cp(1), ..., cp(k-1)}. Use a bisection search in case \code{cp}
#' is a large vector. \code{q} and \code{cp} can be given on the log-scale or
#' probability scale, but they are expected to be compatible.
#'
#' @return The desired quantile.
#'
#' @noRd
q_discrete <- function(q, cp) {
.Call(`_COMPoissonReg_q_discrete`, q, cp)
}
#' Derivatives of the normalizing constant.
#'
#' @param lambda A vector of lambda parameters.
#' @param nu A vector of nu parameters.
#' @param max Maximum value of y to consider.
#'
#' @details
#' Derivatives of the normalizing constant computed by truncation.
#' \itemize{
#' \item \code{z_prodlogj} \eqn{\sum_{j=0}^{\text{max}} \log(j!) \lambda^j/((j!)^\nu)}
#' }
#' Several other functions are currently in the code but not used.
#'
#' @return Vector of results.
#'
#' @noRd
z_prodlogj <- function(lambda, nu, max) {
.Call(`_COMPoissonReg_z_prodlogj`, lambda, nu, max)
}
#' Compute normalizing constant by truncating the infinite series.
#'
#' @param lambda A vector of lambda parameters.
#' @param nu A vector of nu parameters.
#' @param tol Tolerance for truncation.
#' @param take_log Compute value on the log-scale if \code{TRUE}. Otherwise,
#' compute value on the original scale.
#' @param ymax Maximum value of y to consider.
#'
#' @return Vector of normalizing constant values.
#'
#' @noRd
z_trunc <- function(lambda, nu, tol, take_log, ymax) {
.Call(`_COMPoissonReg_z_trunc`, lambda, nu, tol, take_log, ymax)
}
#' An approximation method to compute normalizing constant (Shmueli et al,
#' JRSS-C, 2005).
#'
#' @param lambda A vector of lambda parameters.
#' @param nu A vector of nu parameters.
#' @param take_log Compute value on the log-scale if \code{TRUE}. Otherwise,
#' compute value on the original scale.
#'
#' @return Vector of normalizing constant values.
#' @references
#' Galit Shmueli, Thomas P. Minka, Joseph B. Kadane, Sharad Borle, and Peter
#' Boatwright (2005). A useful distribution for fitting discrete data: revival
#' of the Conway-Maxwell-Poisson distribution. Journal of Royal Statistical
#' Society C, 54, 127-142.
#'
#' @noRd
z_approx <- function(lambda, nu, take_log) {
.Call(`_COMPoissonReg_z_approx`, lambda, nu, take_log)
}
#' Hybrid method to compute normalizing constant.
#'
#' @param lambda A vector of lambda parameters.
#' @param nu A vector of nu parameters.
#' @param take_log Return constant on the log-scale if \code{TRUE}. Otherwise,
#' return constant on the original scale.
#' @param hybrid_tol Tolerance for when to use approximation vs. truncation.
#' @param truncate_tol Tolerance for truncation.
#' @param ymax Maximum value of y to consider.
#'
#' @details
#' Conway-Maxwell Poisson normalizing constant is
#' \deqn{
#' Z(\lambda, \nu) = \sum_{j=0}^1 { \lambda^j / (j!)^\nu }.
#' }
#' Hybrid method uses approximation approach when
#' \deqn{
#' \lambda^{-1/\nu}
#' }
#' is smaller than \code{hybrid_tol}.
#'
#' @return Vector of normalizing constant values.
#'
#' @noRd
z_hybrid <- function(lambda, nu, take_log, hybrid_tol, truncate_tol, ymax) {
.Call(`_COMPoissonReg_z_hybrid`, lambda, nu, take_log, hybrid_tol, truncate_tol, ymax)
}
#' Truncation value computed in \code{z_trunc}. This can be used by other
#' functions to compute truncated expressions.
#'
#' @param lambda A vector of lambda parameters.
#' @param nu A vector of nu parameters.
#' @param tol Tolerance for truncation.
#' @param ymax Maximum value of y to consider.
#'
#' @return Vector of truncation values used in normalizing constant
#' computation.
#'
#' @noRd
y_trunc <- function(lambda, nu, tol, ymax) {
.Call(`_COMPoissonReg_y_trunc`, lambda, nu, tol, ymax)
}
#' Loglikelihood function for ZICMP.
#'
#' @param x Vector of observed counts.
#' @param lambda Vector of lambda parameters.
#' @param nu Vector of nu parameters.
#' @param p Vector of p parameters.
#' @param hybrid_tol Tolerance for truncation.
#' @param truncate_tol Tolerance for when to use approximation vs. truncation.
#' @param ymax Maximum value of y to consider.
#'
#' @details
#' The vectors \code{x}, \code{lambda}, and \code{nu} must ne the same length.
#'
#' @return Value of loglikelihood.
#'
#' @noRd
loglik_zicmp <- function(x, lambda, nu, p, hybrid_tol, truncate_tol, ymax) {
.Call(`_COMPoissonReg_loglik_zicmp`, x, lambda, nu, p, hybrid_tol, truncate_tol, ymax)
}
#' Density function for ZICMP.
#'
#' @param x Vector of density arguments.
#' @param lambda Rate parameter.
#' @param nu Dispersion parameter.
#' @param p Zero-inflation parameter.
#' @param take_log Return density on the log-scale if \code{TRUE}. Otherwise,
#' return density on the original scale.
#' @param hybrid_tol Tolerance for truncation.
#' @param truncate_tol Tolerance for when to use approximation vs. truncation.
#' @param ymax Maximum value of y to consider.
#'
#' @return Density values.
#'
#' @noRd
d_zicmp <- function(x, lambda, nu, p, take_log, hybrid_tol, truncate_tol, ymax) {
.Call(`_COMPoissonReg_d_zicmp`, x, lambda, nu, p, take_log, hybrid_tol, truncate_tol, ymax)
}
#' Quantile function for ZICMP.
#'
#' @param logq Vector of probabilities on the log scale.
#' @param lambda Rate parameter.
#' @param nu Dispersion parameter.
#' @param p Zero-inflation parameter.
#' @param hybrid_tol Tolerance for truncation.
#' @param truncate_tol Tolerance for when to use approximation vs. truncation.
#' @param ymax Maximum value of y to consider.
#'
#' @details
#' Operates on the log-scale for stability.
#'
#' @return Quantile values.
#'
#' @noRd
q_zicmp <- function(logq, lambda, nu, p, hybrid_tol, truncate_tol, ymax) {
.Call(`_COMPoissonReg_q_zicmp`, logq, lambda, nu, p, hybrid_tol, truncate_tol, ymax)
}
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/R/RcppExports.R
|
#' Equidispersion Test
#'
#' Likelihood ratio test for equidispersion
#'
#' @param object a model object
#' @param ... other parameters which might be required by the model
#'
#' @details
#' A generic function for the likelihood ratio test for
#' equidispersion using the output of a fitted mode. The function invokes
#' particular methods which depend on the class of the first argument.
#'
#' @return
#' Returns the test statistic and p-value determined from a \eqn{\chi^2}
#' distribution with \eqn{d_2} degrees of freedom.
#'
#' @author Thomas Lotze
#' @name equitest
#' @export
equitest = function(object, ...)
{
UseMethod("equitest")
}
#' Leverage
#'
#' A generic function for the leverage of points used in various model
#' fitting functions. The function invokes particular methods which
#' depend on the class of the first argument.
#'
#' @param object a model object
#' @param ... other parameters which might be required by the model
#'
#' @return
#' The form of the value returned depends on the class of its argument.
#' See the documentation of the particular methods for details of what is
#' produced by that method.
#'
#' @details
#' See the documentation of the particular methods for details.
#'
#' @author Thomas Lotze
#' @name leverage
#' @export
leverage = function(object, ...)
{
UseMethod("leverage")
}
#' Estimate for dispersion parameter
#'
#' (Deprecated)
#' A generic function for the dispersion parameter estimate from the results
#' of various model fitting functions. The function invokes particular methods
#' which depend on the class of the first argument.
#'
#' @param object a model object
#' @param ... other parameters which might be required by the model
#'
#' @details
#' See the documentation of the particular methods for details.
#'
#' @return
#' The form of the value returned depends on the class of its argument. See
#' the documentation of the particular methods for details of what is
#' produced by that method.
#'
#' @rdname COMPoissonReg-deprecated
#' @seealso predict
#' @name nu
#' @export
nu = function(object, ...)
{
UseMethod("nu")
}
#' Standard deviation
#'
#' A generic function for standard deviation estimates from the results
#' of various model fitting functions. The function invokes particular
#' methods which depend on the class of the first argument.
#'
#' @param object a model object
#' @param ... other parameters which might be required by the model
#'
#' @return
#' The form of the value returned depends on the class of its argument.
#' See the documentation of the particular methods for details of what
#' is produced by that method.
#'
#' @details
#' See the documentation of the particular methods for details.
#'
#' @author Thomas Lotze
#' @name sdev
#' @export
sdev = function (object, ...)
{
UseMethod("sdev")
}
#' Parametric Bootstrap
#'
#' A generic function for the parametric bootstrap from the results of
#' various model fitting functions. The function invokes particular methods
#' which depend on the class of the first argument.
#'
#' @param object a model object
#' @param ... other parameters which might be required by the model
#' @param reps Number of bootstrap repetitions.
#' @param report.period Report progress every \code{report.period} iterations.
#'
#' @details
#' See the documentation of the particular methods for details.
#'
#' @return
#' The form of the value returned depends on the class of its argument. See
#' the documentation of the particular methods for details of what is produced by that method.
#'
#' @author Thomas Lotze
#' @name parametric.bootstrap
#' @export
parametric.bootstrap = function(object, reps = 1000, report.period = reps + 1, ...)
{
UseMethod("parametric.bootstrap")
}
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/R/S3methods.R
|
#' Supporting Functions for COM-Poisson Regression
#'
#' @param object object of type \code{cmp}.
#' @param x object of type \code{cmp}.
#' @param k Penalty per parameter to be used in AIC calculation.
#' @param newdata New covariates to be used for prediction.
#' @param type Specifies quantity to be computed. See details.
#' @param reps Number of bootstrap repetitions.
#' @param report.period Report progress every \code{report.period} iterations.
#' @param ... other arguments, such as \code{subset} and \code{na.action}.
#'
#' @details
#' The function \code{residuals} returns raw residuals when
#' \code{type = "raw"} and quantile residuals when
#' \code{type = "quantile"}.
#'
#' The function \code{predict} returns expected values of the outcomes,
#' eveluated at the computed estimates, when \code{type = "response"}. When
#' \code{type = "link"}, a \code{data.frame} is instead returned with
#' columns corresponding to estimates of \code{lambda} and \code{nu}.
#'
#' The function \code{coef} returns a vector of coefficient estimates in
#' the form \code{c(beta, gamma)} when \code{type = "vector"}. When
#' \code{type = "list"}, the estimates are returned as a list with named
#' elements \code{beta} and \code{gamma}.
#'
#' The \code{type} argument behaves the same for the \code{sdev} function
#' as it does for \code{coef}.
#'
#' @name glm.cmp, CMP support
NULL
#' @name glm.cmp, CMP support
#' @export
summary.cmpfit = function(object, ...)
{
n = nrow(object$X)
d1 = ncol(object$X)
d2 = ncol(object$S)
qq = d1 + d2
# We need the indices of the fixed coefficients and the ones included in
# optimization.
fixed = object$fixed
unfixed = object$unfixed
idx.par1 = seq_along(unfixed$beta)
idx.par2 = seq_along(unfixed$gamma) + length(unfixed$beta)
V = vcov(object)
est = coef(object)
# In the vector of SEs, include an NA entry if the variable was fixed
# This NA will propagate to the corresponding z-value and p-value as well.
se = rep(NA, qq)
se[unfixed$beta] = sdev(object)[idx.par1]
se[unfixed$gamma + d1] = sdev(object)[idx.par2]
z.val = est / se
p.val = 2*pnorm(-abs(z.val))
DF = data.frame(
Estimate = round(est, 4),
SE = round(se, 4),
z.value = round(z.val, 4),
p.value = sprintf("%0.4g", p.val)
)
colnames(DF) = c("Estimate", "SE", "z-value", "p-value")
rownames(DF) = c(
sprintf("X:%s", colnames(object$X)),
sprintf("S:%s", colnames(object$S))
)
# Add a column to indicate fixed components if anything is fixed
if (length(unlist(fixed)) > 0) {
fixed.beta = rep("F", d1)
fixed.gamma = rep("F", d2)
fixed.beta[fixed$beta] = "T"
fixed.gamma[fixed$gamma] = "T"
DF$Fixed = c(fixed.beta, fixed.gamma)
}
# If X, S, or W are intercept only, compute results for non-regression parameters
DF.lambda = NULL
DF.nu = NULL
# In each block below, make sure to consider only the non-fixed variables
# for the Jacobian and Hessian. If one of the intercepts was fixed, it should
# result in an SE of zero.
if (is.intercept.only(object$X) && is.zero.matrix(object$offset$x)) {
if (length(fixed$beta) > 0) {
se = 0
} else {
J = c(exp(object$beta), numeric(length(idx.par2)))
se = sqrt(t(J) %*% V %*% J)
}
est = exp(object$beta)
DF.lambda = data.frame(
Estimate = round(est, 4),
SE = round(se, 4)
)
rownames(DF.lambda) = "lambda"
}
if (is.intercept.only(object$S) && is.zero.matrix(object$offset$s)) {
if (length(fixed$gamma) > 0) {
se = 0
} else {
J = c(numeric(length(idx.par1)), exp(object$gamma))
se = sqrt(t(J) %*% V %*% J)
}
est = exp(object$gamma)
DF.nu = data.frame(
Estimate = round(est, 4),
SE = round(se, 4)
)
rownames(DF.nu) = "nu"
}
list(DF = DF, DF.lambda = DF.lambda, DF.nu = DF.nu,
n = n,
loglik = logLik(object),
aic = AIC(object),
bic = BIC(object),
optim.method = object$control$optim.method,
opt.message = object$opt.res$message,
opt.convergence = object$opt.res$convergence,
elapsed.sec = object$elapsed.sec
)
}
fitted_cmp_internal = function(X, S, beta, gamma, off.x, off.s)
{
n = nrow(X)
stopifnot(n == nrow(S))
stopifnot(n == length(off.x))
stopifnot(n == length(off.s))
stopifnot(ncol(X) == length(beta))
stopifnot(ncol(S) == length(gamma))
if (length(beta) > 0) {
lambda = as.numeric(exp(X %*% beta + off.x))
} else {
lambda = rep(0, n)
}
if (length(gamma) > 0) {
nu = as.numeric(exp(S %*% gamma + off.s))
} else {
nu = rep(0, n)
}
list(lambda = lambda, nu = nu)
}
#' @name glm.cmp, CMP support
#' @export
print.cmpfit = function(x, ...)
{
printf("CMP coefficients\n")
s = summary(x)
print(s$DF)
if (length(x$fixed$gamma) > 0) {
tt = paste(collapse = " ", c(
"Some elements of gamma were fixed.",
"Chi-squared test for equidispersion not defined."))
} else {
tt = equitest(x)
}
if (!is.null(s$DF.lambda) || !is.null(s$DF.nu)) {
printf("--\n")
printf("Transformed intercept-only parameters\n")
print(rbind(s$DF.lambda, s$DF.nu))
}
if (is.character(tt)) {
printf("--\n")
cat(paste(tt, collapse = "\n"))
printf("\n")
} else {
printf("--\n")
printf("Chi-squared test for equidispersion\n")
printf("X^2 = %0.4f, df = %d, ", tt$teststat, tt$df)
printf("p-value = %0.4e\n", tt$pvalue)
}
printf("--\n")
printf("Elapsed: %s ", format.difftime(s$elapsed.sec))
printf("Sample size: %d ", s$n)
printf("%s interface\n", x$interface)
printf("LogLik: %0.4f ", s$loglik)
printf("AIC: %0.4f ", s$aic)
printf("BIC: %0.4f ", s$bic)
printf("\n")
printf("Optimization Method: %s ", s$optim.method)
printf("Converged status: %d\n", s$opt.convergence)
printf("Message: %s\n", s$opt.message)
}
#' @name glm.cmp, CMP support
#' @export
logLik.cmpfit = function(object, ...)
{
object$loglik
}
#' @name glm.cmp, CMP support
#' @export
AIC.cmpfit= function(object, ..., k=2)
{
d = length(unlist(object$unfixed))
-2*object$loglik + 2*d
}
#' @name glm.cmp, CMP support
#' @export
BIC.cmpfit = function(object, ...)
{
n = length(object$y)
d = length(unlist(object$unfixed))
-2*object$loglik + log(n)*d
}
#' @name glm.cmp, CMP support
#' @export
coef.cmpfit = function(object, type = c("vector", "list"), ...)
{
switch(match.arg(type),
vector = c(object$beta, object$gamma),
list = list(beta = object$beta, gamma = object$gamma)
)
}
#' @name glm.cmp, CMP support
#' @export
nu.cmpfit = function(object, ...)
{
# This function is deprecated - use predict instead
.Deprecated("predict(object, type = \"link\")")
link = predict(object, type = "link")
link$nu
}
#' @name glm.cmp, CMP support
#' @export
sdev.cmpfit = function(object, type = c("vector", "list"), ...)
{
d1 = ncol(object$X)
d2 = ncol(object$S)
fixed = object$fixed
unfixed = object$unfixed
idx.par1 = seq_along(unfixed$beta)
idx.par2 = seq_along(unfixed$gamma) + length(unfixed$beta)
sd.hat = sqrt(diag(vcov(object)))
if (match.arg(type) == "vector") {
out = sd.hat
} else if (match.arg(type) == "list") {
sd.beta = rep(NA, d1)
sd.gamma = rep(NA, d2)
sd.beta[unfixed$beta] = sd.hat[idx.par1]
sd.gamma[unfixed$gamma] = sd.hat[idx.par2]
out = list(beta = sd.beta, gamma = sd.gamma)
} else {
stop("Unrecognized type")
}
return(out)
}
#' @name glm.cmp, CMP support
#' @export
vcov.cmpfit = function(object, ...)
{
# Compute the covariance via Hessian from optimizer
-solve(object$H)
}
#' @name glm.cmp, CMP support
#' @export
equitest.cmpfit = function(object, ...)
{
if ("equitest" %in% names(object)) {
return(object$equitest)
}
y = object$y
X = object$X
S = object$S
init = object$init
offset = object$offset
fixed = object$fixed
ll = object$loglik
# If any elements of gamma have been fixed, an "equidispersion" test no
# longer makes sense. Unless the values were fixed at zeroes. But let's
# avoid this complication.
if (length(fixed$gamma) > 0) {
msg = c("Some elements of gamma were fixed,",
"chi-squared test for equidispersion not defined")
stop(paste(msg, collapse = " "))
}
n = length(y)
d2 = ncol(S)
# Null model is CMP with nu determined by the offset off.s. If off.s happens
# to be zeros, this simplifies to a Poisson regression.
fit0.out = fit.cmp.reg(y, X, S, offset = offset,
init = get.init(beta = object$beta, gamma = numeric(d2), zeta = numeric(0)),
fixed = get.fixed(beta = fixed$beta, gamma = seq_len(d2), zeta = fixed$zeta),
control = object$control)
ll0 = fit0.out$loglik
teststat = -2 * (ll0 - ll)
pvalue = pchisq(teststat, df = d2, lower.tail = FALSE)
list(teststat = teststat, pvalue = pvalue, df = d2)
}
#' @name glm.cmp, CMP support
#' @export
leverage.cmpfit = function(object, ...)
{
y = object$y
n = length(y)
out = fitted_cmp_internal(object$X, object$S, object$beta, object$gamma,
object$offset$x, object$offset$s)
# 1) Some quantities corresponding to parameters lambda and nu: Normalizing
# constants, expected values, variances, and truncation values.
z.hat = ncmp(out$lambda, out$nu, control = object$control)
E.y = ecmp(out$lambda, out$nu, control = object$control)
V.y = vcmp(out$lambda, out$nu, control = object$control)
y.trunc = max(tcmp(out$lambda, out$nu, control = object$control))
# Note that z_prodlogj uses truncation, even if we would approximate z
# using the asymptotic expression using some of the given lambda and nu
# pairs. This might be something that could be improved later.
# and X matrix (in Appendix)
E.logfacty = z_prodlogj(out$lambda, out$nu, max = y.trunc) / z.hat
extravec = (E.logfacty - lgamma(y+1)) / (y - E.y)
curlyX.mat = cbind(object$X, extravec)
# 2) to compute H using equation (12) on p. 11
if (FALSE) {
# Here is a more readable version, but it creates n x n matrices so may
# have problems with large inputs.
WW = diag(V.y)
H1 = t(curlyX.mat) %*% sqrt(WW)
H2 = solve(t(curlyX.mat) %*% WW %*% curlyX.mat)
H = t(H1) %*% H2 %*% H1
diag.H = diag(H)
} else {
# This version avoids creating large matrices, but is a bit harder to read.
diag.H = numeric(n)
H1 = t(curlyX.mat * sqrt(V.y))
H2 = solve(t(curlyX.mat * V.y) %*% curlyX.mat)
for (i in 1:n) {
diag.H[i] = t(H1[,i]) %*% H2 %*% H1[,i]
}
}
return(diag.H)
}
#' @name glm.cmp, CMP support
#' @export
deviance.cmpfit = function(object, ...)
{
# Compute the COM-Poisson deviances exactly
y = object$y
X = object$X
S = object$S
# init = object$init
fixed = object$fixed
offset = object$offset
control = object$control
n = length(y)
d2 = ncol(S)
# Compute optimal log likelihood value for given nu-hat value
# beta.init = object$beta
# d1 = length(beta.init)
ll.star = numeric(n)
for (i in 1:n) {
# Maximize loglik for ith obs
glm.out = fit.cmp.reg(y[i],
X = X[i,,drop = FALSE],
S = S[i,,drop = FALSE],
init = get.init(beta = object$beta, gamma = numeric(d2)),
offset = get.offset(x = offset$x[i], s = offset$s[i], w = offset$w[i]),
fixed = get.fixed(beta = fixed$beta, gamma = seq_len(d2), zeta = fixed$zeta),
control = control)
ll.star[i] = glm.out$opt.res$value
}
# Compute exact deviances
ll = numeric(n)
out = fitted_cmp_internal(X, S, object$beta, object$gamma, offset$x, offset$s)
for (i in 1:n) {
ll[i] = dcmp(y[i], out$lambda[i], out$nu[i], log = TRUE, control = control)
}
dev = -2*(ll - ll.star)
lev = leverage(object)
cmpdev = dev / sqrt(1 - lev)
return(cmpdev)
}
#' @name glm.cmp, CMP support
#' @export
residuals.cmpfit = function(object, type = c("raw", "quantile"), ...)
{
out = fitted_cmp_internal(object$X, object$S, object$beta, object$gamma,
object$offset$x, object$offset$s)
type = match.arg(type)
if (type == "raw") {
y.hat = ecmp(out$lambda, out$nu, control = object$control)
res = object$y - y.hat
} else if (type == "quantile") {
res = rqres.cmp(object$y, lambda = out$lambda, nu = out$nu,
control = object$control)
} else {
stop("Unsupported residual type")
}
return(as.numeric(res))
}
#' @name glm.cmp, CMP support
#' @export
predict.cmpfit = function(object, newdata = NULL, type = c("response", "link"), ...)
{
if (is.null(newdata)) {
# If newdata is NULL, reuse data from model fit
X = object$X
S = object$S
off.x = object$offset$x
off.s = object$offset$s
} else if (object$interface == "formula") {
# If the model was fit with the formula interface, attempt to process
# newdata as a data.frame
newdata = as.data.frame(newdata)
# If the response was not included in newdata, add a column with zeros
response.name = all.vars(object$formula.lambda)[1]
if (is.null(newdata[[response.name]])) {
newdata[[response.name]] = 0
}
raw = formula2raw(object$formula.lambda, object$formula.nu,
object$formula.p, data = newdata, ...)
X = raw$X
S = raw$S
off.x = raw$offset$x
off.s = raw$offset$s
} else if (object$interface == "raw") {
# If the model was fit with the raw interface, attempt to process
# newdata as a list
stopifnot("COMPoissonReg.modelmatrix" %in% class(newdata))
X = newdata$X
S = newdata$S
off.x = newdata$offset$x
off.s = newdata$offset$s
} else {
stop("Don't recognize value of interface")
}
link = fitted_cmp_internal(X, S, object$beta, object$gamma, off.x, off.s)
switch(match.arg(type),
response = ecmp(link$lambda, link$nu, control = object$control),
link = data.frame(lambda = link$lambda, nu = link$nu)
)
}
#' @name glm.cmp, CMP support
#' @export
parametric.bootstrap.cmpfit = function(object, reps = 1000, report.period = reps+1, ...)
{
n = nrow(object$X)
d1 = ncol(object$X)
d2 = ncol(object$S)
qq = d1 + d2
out = matrix(NA, nrow = reps, ncol = qq)
colnames(out) = c(colnames(object$X), colnames(object$S), recursive=TRUE)
fitted.out = fitted_cmp_internal(object$X, object$S, object$beta, object$gamma,
object$offset$x, object$offset$s)
lambda.hat = fitted.out$lambda
nu.hat = fitted.out$nu
init = get.init(beta = object$beta, gamma = object$gamma)
for (r in 1:reps){
if (r %% report.period == 0) {
logger("Starting bootstrap rep %d\n", r)
}
# Generate bootstrap samples of the full dataset using MLE
y.boot = rcmp(n, lambda.hat, nu.hat, control = object$control)
# Take each of the bootstrap samples and fit model to generate bootstrap
# estimates
tryCatch({
fit.boot = fit.cmp.reg(y = y.boot, X = object$X, S = object$S,
init = init, offset = object$offset, fixed = object$fixed,
control = object$control)
out[r,] = unlist(fit.boot$theta.hat)
}, error = function(e) {
# Do nothing now; emit a warning later
})
}
cnt = sum(rowSums(is.na(out)) > 0)
if (cnt > 0) {
warning(sprintf("%d out of %d bootstrap iterations failed", cnt, reps))
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/R/cmp-reg.R
|
#' COM-Poisson Distribution
#'
#' Functions for the COM-Poisson distribution.
#'
#' @param x vector of quantiles.
#' @param q vector of probabilities.
#' @param n number of observations.
#' @param lambda rate parameter.
#' @param nu dispersion parameter.
#' @param log logical; if TRUE, probabilities are returned on log-scale.
#' @param log.p logical; if TRUE, probabilities \code{p} are given as \eqn{\log(p)}.
#' @param control a \code{COMPoissonReg.control} object from \code{get.control}
#' or \code{NULL} to use global default.
#'
#' @return
#' \describe{
#' \item{dcmp}{density,}
#' \item{pcmp}{cumulative probability,}
#' \item{qcmp}{quantiles,}
#' \item{rcmp}{generate random variates,}
#' \item{ecmp}{expected value,}
#' \item{vcmp}{variance,}
#' \item{ncmp}{value of the normalizing constant, and}
#' \item{tcmp}{upper value used to compute the normalizing constant under
#' truncation method.}
#' }
#'
#' @references
#' Kimberly F. Sellers & Galit Shmueli (2010). A Flexible Regression Model for
#' Count Data. Annals of Applied Statistics, 4(2), 943-961.
#'
#' @author Kimberly Sellers
#' @keywords COM-Poisson distribution
#' @name CMP Distribution
NULL
#' @name CMP Distribution
#' @export
dcmp = function(x, lambda, nu, log = FALSE, control = NULL)
{
n = length(x)
prep = prep.zicmp(n, lambda, nu)
if (is.null(control)) {
control = getOption("COMPoissonReg.control", default = get.control())
}
stopifnot("COMPoissonReg.control" %in% class(control))
ymax = control$ymax
hybrid.tol = control$hybrid.tol
truncate.tol = control$truncate.tol
if (prep$type == "iid") {
# Independent and identically distributed case
out = d_cmp(x, prep$lambda, prep$nu, take_log = log, normalize = TRUE,
hybrid_tol = hybrid.tol, truncate_tol = truncate.tol, ymax = ymax)
} else {
# Independent but not identically distributed case
out = numeric(n)
for (i in 1:n) {
out[i] = d_cmp(x[i], prep$lambda[i], prep$nu[i], take_log = log,
normalize = TRUE, hybrid_tol = hybrid.tol,
truncate_tol = truncate.tol, ymax = ymax)
}
}
return(out)
}
#' @name CMP Distribution
#' @export
rcmp = function(n, lambda, nu, control = NULL)
{
prep = prep.zicmp(n, lambda, nu)
if (is.null(control)) {
control = getOption("COMPoissonReg.control", default = get.control())
}
stopifnot("COMPoissonReg.control" %in% class(control))
ymax = control$ymax
hybrid.tol = control$hybrid.tol
truncate.tol = control$truncate.tol
if (prep$type == "iid") {
# Independent and identically distributed case
out = r_cmp(n, prep$lambda, prep$nu, hybrid.tol, truncate.tol, ymax)
} else {
# Independent but not identically distributed case
out = numeric(n)
for (i in 1:n) {
out[i] = r_cmp(1, prep$lambda[i], prep$nu[i], hybrid.tol, truncate.tol, ymax)
}
}
return(out)
}
#' @name CMP Distribution
#' @export
pcmp = function(x, lambda, nu, control = NULL)
{
n = length(x)
prep = prep.zicmp(n, lambda, nu)
if (is.null(control)) {
control = getOption("COMPoissonReg.control", default = get.control())
}
stopifnot("COMPoissonReg.control" %in% class(control))
ymax = control$ymax
hybrid.tol = control$hybrid.tol
truncate.tol = control$truncate.tol
if (prep$type == "iid") {
# Independent and identically distributed case
out = p_cmp(x, prep$lambda, prep$nu, hybrid_tol = hybrid.tol,
truncate_tol = truncate.tol, ymax = ymax)
} else {
# Independent but not identically distributed case
out = numeric(n)
for (i in 1:n) {
out[i] = p_cmp(x[i], prep$lambda[i], prep$nu[i],
hybrid_tol = hybrid.tol, truncate_tol = truncate.tol,
ymax = ymax)
}
}
return(out)
}
#' @name CMP Distribution
#' @export
qcmp = function(q, lambda, nu, log.p = FALSE, control = NULL)
{
n = length(q)
prep = prep.zicmp(n, lambda, nu)
if (is.null(control)) {
control = getOption("COMPoissonReg.control", default = get.control())
}
stopifnot("COMPoissonReg.control" %in% class(control))
ymax = control$ymax
hybrid.tol = control$hybrid.tol
truncate.tol = control$truncate.tol
if (log.p) { lq = q } else { lq = log(q) }
# We do this check here instead of in the C++ code, because we only want
# a warning if quantiles were explicitly requested. (Not draws via rcmp,
# for example).
idx_warn = which(lq > log1p(-truncate.tol))
if (length(idx_warn) > 0) {
msg = sprintf(paste(
"At least one requested quantile was very close to 1. In",
"particular, %d of the given probabilities were greater than",
"1 - truncate_tol = exp(%g), where truncate_tol = %g.",
"Associated results may be affected by truncation. Consider",
"adjusting the controls ymax and truncate.tol or reducing logq."),
length(idx_warn), log1p(-truncate.tol), truncate.tol)
warning(msg)
}
if (prep$type == "iid") {
# Independent and identically distributed case
out = q_cmp(lq, prep$lambda, prep$nu, hybrid_tol = hybrid.tol,
truncate_tol = truncate.tol, ymax = ymax)
} else {
# Independent but not identically distributed case
out = numeric(n)
for (i in 1:n) {
out[i] = q_cmp(lq[i], prep$lambda[i], prep$nu[i],
hybrid_tol = hybrid.tol, truncate_tol = truncate.tol, ymax = ymax)
}
}
return(out)
}
#' @name CMP Distribution
#' @export
ecmp = function(lambda, nu, control = NULL)
{
# If lambda and nu are vectors, assume they are not repeats of the same
# value and do not attempt to save time if this is the case.
n = max(length(lambda), length(nu))
prep = prep.zicmp(n, lambda, nu)
if (is.null(control)) {
control = getOption("COMPoissonReg.control", default = get.control())
}
stopifnot("COMPoissonReg.control" %in% class(control))
ymax = control$ymax
hybrid.tol = control$hybrid.tol
truncate.tol = control$truncate.tol
# If the following is true, use asymptotic approximation. Otherwise use
# truncated sum.
is.approx.valid = -1/prep$nu * log(prep$lambda) < log(hybrid.tol)
out = numeric(n)
for (i in 1:n) {
if (is.approx.valid[i]) {
# Use 1st derivative of the log-normalizing constant to compute the
# expected value.
out[i] = prep$lambda[i] * grad(ncmp, prep$lambda[i],
nu = prep$nu[i], log = TRUE, control = control)
} else {
# Compute the expected value by a simple truncated sum
x.seq = seq(0, tcmp(prep$lambda[i], prep$nu[i], control = control))
out[i] = sum(x.seq * dcmp(x.seq, prep$lambda[i], prep$nu[i], control = control))
}
}
return(out)
}
#' @name CMP Distribution
#' @export
vcmp = function(lambda, nu, control = NULL)
{
# If lambda and nu are vectors, assume they are not repeats of the same
# value and do not attempt to save time if this is the case.
n = max(length(lambda), length(nu))
prep = prep.zicmp(n, lambda, nu)
if (is.null(control)) {
control = getOption("COMPoissonReg.control", default = get.control())
}
stopifnot("COMPoissonReg.control" %in% class(control))
ymax = control$ymax
hybrid.tol = control$hybrid.tol
truncate.tol = control$truncate.tol
# If the following is true, use asymptotic approximation. Otherwise use
# truncated sum.
is.approx.valid = -1/prep$nu * log(prep$lambda) < log(hybrid.tol)
# Compute expected value
ev = ecmp(lambda, nu, control = control)
out = numeric(n)
for (i in 1:n) {
if (is.approx.valid[i]) {
# Use 2nd derivative of the log-normalizing constant to compute the
# variance. The expression for dd is equal to Var(X) - E(X).
dd = prep$lambda[i]^2 * hessian(ncmp, prep$lambda[i],
nu = prep$nu[i], log = TRUE, control = control)
out[i] = dd + ev[i]
} else {
# Compute the expected value by a simple truncated sum
x.seq = seq(0, tcmp(prep$lambda[i], prep$nu[i], control = control))
f.seq = dcmp(x.seq, prep$lambda[i], prep$nu[i], control = control)
out[i] = sum(x.seq^2 * f.seq) - ev[i]^2
}
}
return(out)
}
#' @name CMP Distribution
#' @export
ncmp = function(lambda, nu, log = FALSE, control = NULL)
{
# If lambda and nu are vectors, assume they are not repeats of the same
# value and do not attempt to save time if this is the case.
n = max(length(lambda), length(nu))
prep = prep.zicmp(n, lambda, nu)
if (is.null(control)) {
control = getOption("COMPoissonReg.control", default = get.control())
}
stopifnot("COMPoissonReg.control" %in% class(control))
ymax = control$ymax
hybrid.tol = control$hybrid.tol
truncate.tol = control$truncate.tol
out = z_hybrid(prep$lambda, prep$nu, take_log = log,
hybrid_tol = hybrid.tol, truncate_tol = truncate.tol, ymax = ymax)
return(out)
}
#' @name CMP Distribution
#' @export
tcmp = function(lambda, nu, control = NULL)
{
if (is.null(control)) {
control = getOption("COMPoissonReg.control", default = get.control())
}
stopifnot("COMPoissonReg.control" %in% class(control))
ymax = control$ymax
truncate.tol = control$truncate.tol
n = max(length(lambda), length(nu))
prep = prep.zicmp(n, lambda, nu)
y_trunc(prep$lambda, prep$nu, tol = truncate.tol, ymax = ymax)
}
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/R/cmp.R
|
#' Couple dataset
#'
#' A dataset investigating the impact of education level and level of anxious
#' attachment on unwanted pursuit behaviors in the context of couple separation.
#'
#' @usage
#' data(couple)
#'
#' @format
#' \describe{
#' \item{UPB}{number of unwanted pursuit behavior perpetrations.}
#' \item{EDUCATION}{1 if at least bachelor's degree; 0 otherwise.}
#' \item{ANXIETY}{continuous measure of anxious attachment.}
#' }
#'
#' @references
#' Loeys, T., Moerkerke, B., DeSmet, O., Buysse, A., 2012. The analysis of
#' zero-inflated count data: Beyond zero-inflated Poisson regression. British
#' J. Math. Statist. Psych. 65 (1), 163-180.
#' @name couple
#' @docType data
"couple"
#' Freight dataset
#'
#' A set of data on airfreight breakage (breakage of ampules filled with some
#' biological substance are shipped in cartons).
#'
#' @usage
#' data(freight)
#'
#' @format
#' \describe{
#' \item{broken}{number of ampules found broken upon arrival.}
#' \item{transfers}{number of times carton was transferred from one aircraft to
#' another.}
#' }
#'
#' @references
#' Kutner MH, Nachtsheim CJ, Neter J (2003). Applied Linear Regression Models,
#' Fourth Edition. McGraw-Hill.
#'
#' @name freight
#' @docType data
"freight"
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/R/data.R
|
# These functions are currently not exported to users. Numerical computation
# of the FIM seems to be potentially unstable, whether computing by
# derivatives of the normalizing function, or explicitly programming the
# infinite sum expressions and truncating them. Instead, we currently
# provide variances via the Hessian from the optimizer.
# Compute the CMP information matrix using the exact expression, except
# use numerical derivatives of z functions. This avoids numerical issues
# with infinite sums.
fim.cmp = function(lambda, nu)
{
f = function(theta) {
z_hybrid(theta[1], theta[2], take_log = TRUE)
}
hess.eps = getOption("COMPoissonReg.hess.eps", default = 1e-2)
H = hess_fwd(f, c(lambda, nu), h = hess.eps)
H[1,1] = H[1,1] + ecmp(lambda, nu) / lambda^2
return(H)
}
# Compute the CMP information matrix using Monte Carlo. This avoids
# issues with numerical second derivatives.
fim.cmp.mc = function(lambda, nu, reps)
{
stopifnot(length(lambda) == 1)
stopifnot(length(nu) == 1)
grad.eps = getOption("COMPoissonReg.grad.eps", default = 1e-5)
f = function(theta, data) {
sum(dcmp(data, lambda = theta[1], nu = theta[2], log = TRUE))
}
FIM = matrix(0, 2, 2)
x = rcmp(reps, lambda, nu)
for (r in 1:reps) {
S = grad_fwd(f, x = c(lambda, nu), h = grad.eps, data = x[r])
FIM = FIM + S %*% t(S)
}
return(FIM / reps)
}
# Compute the ZICMP information matrix using the exact expression, except
# use numerical derivatives of z functions.
fim.zicmp = function(lambda, nu, p)
{
stopifnot(length(lambda) == 1)
stopifnot(length(nu) == 1)
stopifnot(length(p) == 1)
z = z_hybrid(lambda, nu)
meany = ezicmp(lambda, nu, p)
f = function(theta) {
z_hybrid(theta[1], theta[2], take_log = TRUE)
}
FIM = matrix(NA, 3, 3)
colnames(FIM) = c("lambda", "nu", "p")
rownames(FIM) = c("lambda", "nu", "p")
# The gradient and hessian of the z-like functions all have a closed
# form, but require some work to compute reliably. We have some code to
# compute the z-function itself somewhat reliably, so it's safer
# to use numerical derivatives here. We use forward derivatives
# to avoid issues at the boundary when lambda or nu is near 0.
grad.eps = getOption("COMPoissonReg.grad.eps", default = 1e-5)
hess.eps = getOption("COMPoissonReg.hess.eps", default = 1e-2)
Dlogz = grad_fwd(f, c(lambda, nu), h = grad.eps)
Hlogz = hess_fwd(f, c(lambda, nu), h = hess.eps)
dlogzdlambda = Dlogz[1]
dlogzdnu = Dlogz[2]
d2logzdlambda2 = Hlogz[1,1]
d2logzdnu2 = Hlogz[2,2]
d2logzdlambdadnu = Hlogz[1,2]
# FIM[lambda, lambda]
FIM[1,1] = (1-p)*d2logzdlambda2 - p*(1-p)*dlogzdlambda^2 / (p*(z-1)+1) + meany / lambda^2
# FIM[nu, nu]
FIM[2,2] = (1-p)*d2logzdnu2 - p*(1-p)*dlogzdnu^2 / (p*(z-1)+1)
# FIM[p, p]
FIM[3,3] = (1/z) * (z-1)^2 / (p*(z-1) + 1) + (1 - 1/z) / (1-p)
# FIM[lambda, nu]
FIM[1,2] = (1-p)*d2logzdlambdadnu - p*(1-p)*dlogzdnu*dlogzdlambda / (p*(z-1)+1)
FIM[2,1] = FIM[1,2]
# FIM[lambda, p]
FIM[1,3] = -1/(p*(z-1) + 1) * dlogzdlambda
FIM[3,1] = FIM[1,3]
# FIM[nu, p]
FIM[2,3] = -1/(p*(z-1) + 1) * dlogzdnu
FIM[3,2] = FIM[2,3]
return(FIM)
}
# Compute the ZICMP information matrix using Monte Carlo.
fim.zicmp.mc = function(lambda, nu, p, reps)
{
stopifnot(length(lambda) == 1)
stopifnot(length(nu) == 1)
stopifnot(length(p) == 1)
grad.eps = getOption("COMPoissonReg.grad.eps", default = 1e-5)
f = function(theta, data) {
sum(dzicmp(data, lambda = theta[1], nu = theta[2], p = theta[3], log = TRUE))
}
FIM = matrix(0, 3, 3)
x = rzicmp(reps, lambda, nu, p)
for (r in 1:reps) {
S = grad_fwd(f, x = c(lambda, nu, p), h = grad.eps, data = x[r])
FIM = FIM + S %*% t(S)
}
return(FIM / reps)
}
# Compute the ZICMP information matrix when parameterized by regressions.
# If reps is NULL, attempt to use the exact expression (with numerical
# derivatives). Otherwise, use Monte Carlo approximation based on that
# many reps.
fim.zicmp.reg = function(X, S, W, beta, gamma, zeta, off.x, off.s, off.w, reps = NULL)
{
n = nrow(X)
qq = ncol(X) + ncol(S) + ncol(W)
lambda = as.numeric(exp(X %*% beta + off.x))
nu = as.numeric(exp(S %*% gamma + off.s))
p = as.numeric(plogis(W %*% zeta + off.w))
FIM = matrix(0, qq, qq)
colnames(FIM) = c(
sprintf("beta%d", 1:length(beta)),
sprintf("gamma%d", 1:length(gamma)),
sprintf("zeta%d", 1:length(zeta)))
rownames(FIM) = colnames(FIM)
# Compute the FIM with respect to each (lambda, nu, p), then
# transform to the FIM of (beta, gamma, zeta)
FIM.one.list = list()
for (i in 1:n) {
if (is.null(reps)) {
FIM.one.list[[i]] = fim.zicmp(lambda[i], nu[i], p[i])
} else {
FIM.one.list[[i]] = fim.zicmp.mc(lambda[i], nu[i], p[i], reps)
}
}
idx.beta = 1:length(beta)
idx.gamma = 1:length(gamma) + length(beta)
idx.zeta = 1:length(zeta) + length(gamma) + length(beta)
# FIM[beta, beta]
D = unlist(Map(function(x) { x[1,1] }, FIM.one.list))
FIM[idx.beta, idx.beta] = t(X) %*% ((D * lambda^2) * X)
# FIM[gamma, gamma]
D = unlist(Map(function(x) { x[2,2] }, FIM.one.list))
FIM[idx.gamma, idx.gamma] = t(S) %*% ((D * nu^2) * S)
# FIM[zeta, zeta]
D = unlist(Map(function(x) { x[3,3] }, FIM.one.list))
FIM[idx.zeta, idx.zeta] = t(W) %*% ((D * p^2*(1-p)^2) * W)
# FIM[beta, gamma]
D = unlist(Map(function(x) { x[1,2] }, FIM.one.list))
FIM[idx.beta, idx.gamma] = t(X) %*% (D * lambda * nu * S)
FIM[idx.gamma, idx.beta] = t(FIM[idx.beta, idx.gamma])
# FIM[zeta, gamma]
D = unlist(Map(function(x) { x[3,2] }, FIM.one.list))
FIM[idx.zeta, idx.gamma] = t(W) %*% (D * p*(1-p) * nu * S)
FIM[idx.gamma, idx.zeta] = t(FIM[idx.zeta, idx.gamma])
# FIM[beta, zeta]
D = unlist(Map(function(x) { x[1,3] }, FIM.one.list))
FIM[idx.zeta, idx.beta] = t(W) %*% (D * lambda * p*(1-p) * X)
FIM[idx.beta, idx.zeta] = t(FIM[idx.zeta, idx.beta])
return(FIM)
}
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/R/fim.R
|
#' Raw Interface to COM-Poisson and Zero-Inflated COM-Poisson Regression
#'
#' Fit COM-Poisson and Zero-Inflated COM-Poisson regression using a "raw"
#' interface which bypasses the formula-driven interface of \code{glm.cmp}.
#'
#' @param y A vector of counts which represent the response .
#' @param X Design matrix for the `lambda` regression.
#' @param S Design matrix for the `nu` regression.
#' @param W Design matrix for the `p` regression.
#' @param offset A data structure that specifies offsets. See the helper
#' function \link{get.offset}.
#' @param init A data structure that specifies initial values. See the helper
#' function \link{get.init}.
#' @param fixed A data structure that specifies which coefficients should
#' remain fixed in the maximum likelihood procedure. See the helper function
#' \link{get.fixed}.
#' @param control A control data structure. See the helper function
#' \link{get.control}.
#'
#' @return
#' See the \link{glm.cmp}.
#'
#' @name glm.cmp-raw
NULL
#' Extract model elements from a formula to use with the raw interface
#'
#' @param formula.lambda regression formula linked to \code{log(lambda)}.
#' The response should be specified here.
#' @param formula.nu regression formula linked to \code{log(nu)}. The
#' default, is taken to be only an intercept.
#' @param formula.p regression formula linked to \code{logit(p)}. If NULL
#' (the default), zero-inflation term is excluded from the model.#'
#' @param data An optional data.frame with variables to be used with regression
#' formulas. Variables not found here are read from the envionment.
#' @param ... other arguments, such as \code{subset} and \code{na.action}.
#'
#' @noRd
formula2raw = function(formula.lambda, formula.nu, formula.p, data = NULL, ...)
{
# Parse formula.lambda. This one should have the response.
mf = model.frame(formula.lambda, data, ...)
y = model.response(mf)
X = model.matrix(formula.lambda, mf)
off.x = model.offset(mf)
d1 = ncol(X)
n = length(y)
weights = model.weights(mf)
if(!is.null(weights)) {
stop("weights argument is currently not supported")
}
# Parse formula.nu
mf = model.frame(formula.nu, data, ...)
S = model.matrix(formula.nu, mf)
if (nrow(S) == 0) {
# A workaround for the case where there is no context in formula.nu
# about how many observations there should be. The only way this
# seems possible is when formula.nu = ~1
S = model.matrix(y ~ 1)
}
off.s = model.offset(mf)
d2 = ncol(S)
if (is.null(off.x)) { off.x = rep(0, n) }
if (is.null(off.s)) { off.s = rep(0, n) }
# If formula.p is NULL, do CMP regression. Else do ZICMP regression
if (is.null(formula.p)) {
# Run the regression using the raw interface function
W = matrix(0, n, 0)
off.w = numeric(n)
} else {
mf = model.frame(formula.p, data, ...)
W = model.matrix(formula.p, mf)
if (nrow(W) == 0) {
# A workaround for the case where there is no context in formula.nu
# about how many observations there should be. The only way this
# seems possible is when formula.p = ~1
W = model.matrix(y ~ 1)
}
d3 = ncol(W)
off.w = model.offset(mf)
if (is.null(off.w)) { off.w = rep(0, n) }
}
offset = get.offset(x = off.x, s = off.s, w = off.w)
res = list(y = y, X = X, S = S, W = W, offset = offset)
return(res)
}
#' COM-Poisson and Zero-Inflated COM-Poisson Regression
#'
#' Fit COM-Poisson regression using maximum likelihood estimation.
#' Zero-Inflated COM-Poisson can be fit by specifying a regression for the
#' overdispersion parameter.
#'
#' @param formula.lambda regression formula linked to \code{log(lambda)}.
#' The response should be specified here.
#' @param formula.nu regression formula linked to \code{log(nu)}. The
#' default, is taken to be only an intercept.
#' @param formula.p regression formula linked to \code{logit(p)}. If NULL
#' (the default), zero-inflation term is excluded from the model.
#' @param data An optional data.frame with variables to be used with regression
#' formulas. Variables not found here are read from the envionment.
#' @param init A data structure that specifies initial values. See the helper
#' function \link{get.init}.
#' @param fixed A data structure that specifies which coefficients should
#' remain fixed in the maximum likelihood procedure. See the helper function
#' \link{get.fixed}.
#' @param control A control data structure. See the helper function
#' \link{get.control}. If \code{NULL}, a global default will be used.
#' @param ... other arguments, such as \code{subset} and \code{na.action}.
#'
#' @return
#' \code{glm.cmp} produces an object of either class \code{cmpfit} or
#' \code{zicmpfit}, depending on whether zero-inflation is used in the model.
#' From this object, coefficients and other information can be extracted.
#'
#' @details
#' The COM-Poisson regression model is
#' \deqn{
#' y_i \sim \rm{CMP}(\lambda_i, \nu_i), \;\;\;
#' \log \lambda_i = \bm{x}_i^\top \beta, \;\;\;
#' \log \nu_i = \bm{s}_i^\top \gamma.
#' }{
#' y_i ~ CMP(lambda_i, nu_i),
#' log lambda_i = x_i^T beta,
#' log nu_i = s_i^T gamma.
#' }
#'
#' The Zero-Inflated COM-Poisson regression model assumes that \eqn{y_i} is 0
#' with probability \eqn{p_i} or \eqn{y_i^*} with probability \eqn{1 - p_i},
#' where
#' \deqn{
#' y_i^* \sim \rm{CMP}(\lambda_i, \nu_i), \;\;\;
#' \log \lambda_i = \bm{x}_i^\top \beta, \;\;\;
#' \log \nu_i = \bm{s}_i^\top \gamma, \;\;\;
#' \rm{logit} \, p_i = \bm{w}_i^\top \zeta.
#' }{
#' y_i^* ~ CMP(lambda_i, nu_i),
#' log lambda_i = x_i^T beta,
#' log nu_i = s_i^T gamma,
#' logit p_i = w_i^T zeta.
#' }
#'
#' @references
#' Kimberly F. Sellers & Galit Shmueli (2010). A Flexible Regression Model for
#' Count Data. Annals of Applied Statistics, 4(2), 943-961.
#'
#' Kimberly F. Sellers and Andrew M. Raim (2016). A Flexible Zero-Inflated Model
#' to Address Data Dispersion. Computational Statistics and Data Analysis, 99,
#' 68-80.
#'
#' @author Kimberly Sellers, Thomas Lotze, Andrew Raim
#'
#' @export
glm.cmp = function(formula.lambda, formula.nu = ~ 1, formula.p = NULL,
data = NULL, init = NULL, fixed = NULL, control = NULL, ...)
{
raw = formula2raw(formula.lambda, formula.nu, formula.p, data, ...)
d3 = ncol(raw$W)
if (d3 > 0) {
res = glm.zicmp.raw(y = raw$y, X = raw$X, S = raw$S, W = raw$W,
offset = raw$offset, init = init, fixed = fixed, control = control)
} else {
res = glm.cmp.raw(y = raw$y, X = raw$X, S = raw$S, offset = raw$offset,
init = init, fixed = fixed, control = control)
}
# Add formula-specific things to return object
res$interface = "formula"
res$formula.lambda = formula.lambda
res$formula.nu = formula.nu
res$formula.p = formula.p
return(res)
}
#' @name glm.cmp-raw
#' @export
glm.cmp.raw = function(y, X, S, offset = NULL, init = NULL, fixed = NULL, control = NULL)
{
# Get dimensions
n = length(y)
d1 = ncol(X)
d2 = ncol(S)
# Make sure design matrices have column names
if (is.null(colnames(X))) { colnames(X) = seq_len(d1) }
if (is.null(colnames(S))) { colnames(S) = seq_len(d2) }
# Initialize NULL arguments
if (is.null(offset)) { offset = get.offset.zero(n) }
if (is.null(init)) { init = get.init.zero(d1, d2) }
if (is.null(fixed)) { fixed = get.fixed() }
if (is.null(control)) {
control = getOption("COMPoissonReg.control", default = get.control())
}
# Fit the CMP regression model
fit.out = fit.cmp.reg(y, X, S, init = init, offset = offset, fixed = fixed,
control = control)
# Construct return value
res = list(
y = y,
X = X,
S = S,
init = init,
offset = offset,
beta = fit.out$theta.hat$beta,
gamma = fit.out$theta.hat$gamma,
H = fit.out$H,
loglik = fit.out$loglik,
opt.res = fit.out$opt.res,
control = fit.out$control,
elapsed.sec = fit.out$elapsed.sec,
fixed = fit.out$fixed,
unfixed = fit.out$unfixed,
interface = "raw"
)
attr(res, "class") = c("cmpfit", attr(res, "class"))
# Add the equidispersion test if no elements of gamma are fixed
if (length(fixed$gamma) == 0) {
res$equitest = equitest(res)
}
return(res)
}
#' @name glm.cmp-raw
#' @export
glm.zicmp.raw = function(y, X, S, W, offset = NULL, init = NULL, fixed = NULL, control = NULL)
{
# Get dimensions
n = length(y)
d1 = ncol(X)
d2 = ncol(S)
d3 = ncol(W)
# Make sure design matrices have column names
if (is.null(colnames(X))) { colnames(X) = seq_len(d1) }
if (is.null(colnames(S))) { colnames(S) = seq_len(d2) }
if (is.null(colnames(W))) { colnames(W) = seq_len(d3) }
# Initialize NULL arguments
if (is.null(offset)) { offset = get.offset.zero(n) }
if (is.null(init)) { init = get.init.zero(d1, d2, d3) }
if (is.null(fixed)) { fixed = get.fixed() }
if (is.null(control)) {
control = getOption("COMPoissonReg.control", default = get.control())
}
# Fit the ZICMP regression model
fit.out = fit.zicmp.reg(y, X, S, W, init = init, offset = offset,
fixed = fixed, control = control)
# Construct return value
res = list(
y = y,
X = X,
S = S,
W = W,
init = init,
offset = offset,
beta = fit.out$theta.hat$beta,
gamma = fit.out$theta.hat$gamma,
zeta = fit.out$theta.hat$zeta,
H = fit.out$H,
loglik = fit.out$loglik,
opt.res = fit.out$opt.res,
control = fit.out$control,
elapsed.sec = fit.out$elapsed.sec,
fixed = fit.out$fixed,
unfixed = fit.out$unfixed,
interface = "raw"
)
attr(res, "class") = c("zicmpfit", attr(res, "class"))
# Add the equidispersion test if no elements of gamma are fixed
if (length(fixed$gamma) == 0) {
res$equitest = equitest(res)
}
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/R/glm.R
|
.onAttach = function(libname, pkgname) {
options(COMPoissonReg.control = get.control())
}
#' Construct an object that specifies which indices of coefficients should
#' remain fixed in maximum likelihood computation.
#'
#' @param beta Vector of indices of \code{beta} to keep fixed.
#' @param gamma Vector of indices of \code{gamma} to keep fixed.
#' @param zeta Vector of indices of \code{zeta} to keep fixed.
#'
#' @details
#' Arguments are expected to be vectors of integers. These are interpreted as
#' the indices to keep fixed during optimization. For example,
#' \code{beta = c(1L, 1L, 2L)} indicates that the first and second elements of
#' \code{beta} should remain fixed. Note that duplicate indices are ignored.
#' The default value is the empty vector \code{integer(0)}, which requests that
#' no elements of the given coefficient vector should be fixed.
#'
#' @return List of vectors indicating fixed indices.
#' @export
get.fixed = function(beta = integer(0), gamma = integer(0), zeta = integer(0))
{
stopifnot(is.integer(beta))
stopifnot(is.integer(gamma))
stopifnot(is.integer(zeta))
out = list(
beta = sort(unique(beta)),
gamma = sort(unique(gamma)),
zeta = sort(unique(zeta))
)
class(out) = "COMPoissonReg.fixed"
return(out)
}
#' Construct initial values for coefficients with zeros.
#'
#' @param d1 Dimension of \code{beta}.
#' @param d2 Dimension of \code{gamma}.
#' @param d3 Dimension of \code{zeta}.
#'
#' @return List of initial value terms containing all zeros.
#' @export
get.init.zero = function(d1 = 0, d2 = 0, d3 = 0)
{
get.init(beta = numeric(d1), gamma = numeric(d2), zeta = numeric(d3))
}
#' Construct initial values for coefficients.
#'
#' @param beta Vector for \code{beta}.
#' @param gamma Vector for \code{gamma}.
#' @param zeta Vector for \code{zeta}.
#'
#' @details
#' The default value \code{NULL} is interpreted as an empty vector, so that the
#' given component is absent from the model.
#'
#' @return List of initial value terms.
#' @export
get.init = function(beta = NULL, gamma = NULL, zeta = NULL)
{
if (is.null(beta)) { beta = numeric(0) }
if (is.null(gamma)) { gamma = numeric(0) }
if (is.null(zeta)) { zeta = numeric(0) }
out = list(beta = beta, gamma = gamma, zeta = zeta)
class(out) = "COMPoissonReg.init"
return(out)
}
#' Construct zero values for offsets.
#'
#' @param n Number of observations.
#'
#' @return List of offset terms containing all zeros.
#' @export
get.offset.zero = function(n)
{
get.offset(x = numeric(n), s = numeric(n), w = numeric(n))
}
#' Construct values for offsets.
#'
#' @param x Vector of offsets to go with \code{X} matrix.
#' @param s Vector of offsets to go with \code{S} matrix.
#' @param w Vector of offsets to go with \code{W} matrix.
#'
#' @details
#' The default value \code{NULL} is interpreted as a vector of zeros. At least
#' one component must be non-NULL so that the dimension can be determined.
#'
#' @return List of offset terms.
#' @export
get.offset = function(x = NULL, s = NULL, w = NULL)
{
lengths = c(length(x), length(s), length(w))
# At least one offset should be non-null. Set n to be the first entry
stopifnot(!is.null(lengths))
n = max(lengths)
if (is.null(x)) { x = numeric(n) }
if (is.null(s)) { s = numeric(n) }
if (is.null(w)) { w = numeric(n) }
stopifnot(n == length(x))
stopifnot(n == length(s))
stopifnot(n == length(w))
out = list(x = x, s = s, w = s)
class(out) = "COMPoissonReg.offset"
return(out)
}
#' Construct a control object to pass additional arguments to a number of
#' functions in the package.
#'
#' @param ymax Truncate counts to maximum value of \code{y}.
#' @param optim.method Optimization method for maximum likelihood. See the
#' \code{method} argument in \link[stats]{optim}.
#' @param optim.control \code{control} argument for \link[stats]{optim}.
#' @param hybrid.tol Tolerance to decide when to use truncation method versus
#' approximation method to compute quantities based on the normalizing constant.
#' See details.
#' @param truncate.tol Tolerance for truncation method. See details.
#'
#' @details
#' A hybrid method is used throughout the package to compute the CMP normalizing
#' constant and related quantities. When \eqn{\lambda^{-1/\nu}} is smaller than
#' \code{hybrid.tol}, an asymptotic approximation is used; otherwise, infinite
#' series are truncated to finite summations. More information is given in the
#' \code{COMPoissonReg} vignette.
#'
#' The element \code{ymax} protects against very long computations. Users
#' should beware when increasing this significantly beyond the default, as it
#' may result in a session which needs to be terminated.
#'
#' @return List of controls.
#' @export
get.control = function(ymax = 1e6, optim.method = 'L-BFGS-B',
optim.control = list(maxit = 150), hybrid.tol = 1e-2, truncate.tol = 1e-6)
{
out = list(ymax = ymax, optim.method = optim.method,
optim.control = optim.control, hybrid.tol = hybrid.tol,
truncate.tol = truncate.tol)
class(out) = "COMPoissonReg.control"
return(out)
}
#' Construct model matrices and offsets for CMP/ZICMP regression
#'
#' @param X An \code{X} matrix to use with \code{beta}.
#' @param S An \code{S} matrix to use with \code{gamma}.
#' @param W A \code{W} matrix to use with \code{zeta}.
#' @param offset An offset object. See helper function \link{get.offset}.
#'
#' @return List of model matrix terms.
#' @export
get.modelmatrix = function(X = NULL, S = NULL, W = NULL, offset = NULL)
{
nrows = c(nrow(X), nrow(S), nrow(W))
# At least one design matrix should be non-null. Set n to be the first entry
stopifnot(!is.null(nrows))
n = head(nrows, 1)
if (is.null(X)) { X = matrix(0, n, 0) }
if (is.null(S)) { S = matrix(0, n, 0) }
if (is.null(W)) { W = matrix(0, n, 0) }
stopifnot(n == nrow(X))
stopifnot(n == nrow(S))
stopifnot(n == nrow(W))
if (is.null(offset)) {
offset = get.offset.zero(n)
}
stopifnot("COMPoissonReg.offset" %in% class(offset))
out = list(X = X, S = S, W = W, offset = offset)
class(out) = "COMPoissonReg.modelmatrix"
return(out)
}
#' Prepare lambda, nu, and p in vector form for use with CMP/ZICMP distribution
#' functions.
#'
#' @param n Number of observations
#' @param lambda The rate parameter: scalar or vector of length n
#' @param nu The dispersion parameter: scalar or vector of length n
#' @param p The zero-inflation parameter: scalar or vector of length n
#'
#' @details
#' Extend lambda, nu, and p vectors to be compatible lengths. If all are length
#' 1, do not extend them - this is a special case which is handled more
#' efficiently. Also make sure parameters are in the right space
#'
#' @noRd
prep.zicmp = function(n, lambda, nu, p = 0)
{
L = max(length(lambda), length(nu), length(p))
stopifnot(all(lambda >= 0))
stopifnot(all(nu >= 0))
stopifnot(all(p >= 0 & p <= 1))
if (n > 1 && L > 1) { stopifnot(n == L) }
if (length(lambda) == 1 && L > 1) { lambda = rep(lambda, L) }
if (length(nu) == 1 && L > 1) { nu = rep(nu, L) }
if (length(p) == 1 && L > 1) { p = rep(p, L) }
if (L > 1) { type = "indep" } else { type = "iid" }
list(lambda = lambda, nu = nu, p = p, type = type)
}
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/R/helpers.R
|
fit.zicmp.reg = function(y, X, S, W, init, offset, fixed, control)
{
start = Sys.time()
d1 = ncol(X)
d2 = ncol(S)
d3 = ncol(W)
n = length(y)
qq = d1 + d2 + d3
stopifnot("COMPoissonReg.init" %in% class(init))
stopifnot("COMPoissonReg.offset" %in% class(offset))
stopifnot("COMPoissonReg.fixed" %in% class(fixed))
stopifnot("COMPoissonReg.control" %in% class(control))
# Make sure dimensions match up
stopifnot(n == nrow(X))
stopifnot(n == nrow(S))
stopifnot(n == nrow(W))
stopifnot(n == length(offset$x))
stopifnot(n == length(offset$s))
stopifnot(n == length(offset$w))
# Make sure fixed indices are between 1 and the corresponding dimension
stopifnot(all(fixed$beta %in% seq_len(d1)))
stopifnot(all(fixed$gamma %in% seq_len(d2)))
stopifnot(all(fixed$zeta %in% seq_len(d3)))
# Make sure initial values have the correct dimension
if (d1 != length(init$beta)) {
msg = sprintf("Length %d of init$beta is not equal to length %d of beta",
length(init$beta), d1)
stop(msg)
}
if (d2 != length(init$gamma)) {
msg = sprintf("Length %d of init$gamma is not equal to length %d of gamma",
length(init$gamma), d2)
stop(msg)
}
if (d3 != length(init$zeta)) {
msg = sprintf("Length %d of init$zeta is not equal to length %d of zeta",
length(init$zeta), d3)
stop(msg)
}
# Get settings from control
optim.method = control$optim.method
optim.control = control$optim.control
hybrid.tol = control$hybrid.tol
truncate.tol = control$truncate.tol
ymax = control$ymax
# "par" represents the vector of optimization variables; it contains only
# coefficients which have not been fixed.
#
# "theta" is a list whose three elements represent the three vectors of
# coefficients; these include fixed variables in addition to optimization
# variables. Fixed variables are assumed to be set to their initial values.
unfixed = get.fixed(
beta = setdiff(seq_len(d1), fixed$beta),
gamma = setdiff(seq_len(d2), fixed$gamma),
zeta = setdiff(seq_len(d3), fixed$zeta)
)
idx.par1 = seq_along(unfixed$beta)
idx.par2 = seq_along(unfixed$gamma) + length(unfixed$beta)
idx.par3 = seq_along(unfixed$zeta) + length(unfixed$beta) + length(unfixed$gamma)
# The following functions transform back and forth between the "par" and
# "theta" representations.
par2theta = function(par) {
beta = rep(NA, d1)
beta[fixed$beta] = init$beta[fixed$beta]
beta[unfixed$beta] = par[idx.par1]
gamma = rep(NA, d2)
gamma[fixed$gamma] = init$gamma[fixed$gamma]
gamma[unfixed$gamma] = par[idx.par2]
zeta = rep(NA, d3)
zeta[fixed$zeta] = init$zeta[fixed$zeta]
zeta[unfixed$zeta] = par[idx.par3]
list(beta = beta, gamma = gamma, zeta = zeta)
}
theta2par = function(theta) {
c(theta$beta[unfixed$beta],
theta$gamma[unfixed$gamma],
theta$zeta[unfixed$zeta])
}
loglik = function(par) {
theta = par2theta(par)
out = fitted_zicmp_internal(X, S, W, theta$beta, theta$gamma,
theta$zeta, offset$x, offset$s, offset$w)
loglik_zicmp(y, out$lambda, out$nu, out$p, hybrid.tol, truncate.tol, ymax)
}
if (!is.null(optim.control$fnscale)) {
warning("optim.control$fnscale disregarded and taken as -1")
}
optim.control$fnscale = -1
par.init = theta2par(init)
res = optim(par.init, loglik, method = optim.method,
control = optim.control, hessian = TRUE)
# theta includes fixed values as well as optimization results.
theta.hat = par2theta(res$par)
names(theta.hat$beta) = sprintf("X:%s", colnames(X))
names(theta.hat$gamma) = sprintf("S:%s", colnames(S))
names(theta.hat$zeta) = sprintf("W:%s", colnames(W))
# The Hessian from the optimizer only has entries corresponding to the
# optimization variables. To apply labels from the design matrices, we
# must pick out the non-fixed variables from each.
H = res$hessian
colnames(H) = rownames(H) = c(
sprintf("X:%s", colnames(X)[unfixed$beta]),
sprintf("S:%s", colnames(S)[unfixed$gamma]),
sprintf("W:%s", colnames(W)[unfixed$zeta]))
loglik = res$value
elapsed.sec = as.numeric(Sys.time() - start, units = "secs")
res = list(theta.hat = theta.hat, H = H, opt.res = res,
control = control, elapsed.sec = elapsed.sec, loglik = loglik, n = n,
fixed = fixed, unfixed = unfixed)
return(res)
}
# This is just provided as a convenience to call fit.zicmp.reg with some dummy
# values.
fit.cmp.reg = function(y, X, S, init, offset, fixed, control)
{
n = length(y)
W = matrix(NA, n, 0)
init$zeta = numeric(0)
offset$w = numeric(n)
fixed$zeta = integer(0)
fit.zicmp.reg(y, X, S, W, init, offset, fixed, control)
}
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/R/mle.R
|
#' Estimate parameters for COM-Poisson regression
#'
#' This package offers the ability to compute the parameter estimates
#' for a COM-Poisson or zero-inflated (ZI) COM-Poisson regression and
#' associated standard errors. This package also provides a hypothesis
#' test for determining statistically significant data dispersion, and
#' other model diagnostics.
#'
#' @details
#' This package offers the ability to compute COM-Poisson parameter
#' estimates and associated standard errors for a regular regression
#' model or a zero-inflated regression model (via the \code{glm.cmp}
#' function).
#'
#' Further, the user can perform a hypothesis test to determine the
#' statistically significant need for using COM-Poisson regression
#' to model the data. The test addresses the matter of statistically
#' significant dispersion.
#'
#' The main order of functions for COM-Poisson regression is as follows:
#' \enumerate{
#' \item Compute Poisson estimates (using \code{glm} for Poisson regression
#' or \code{pscl} for ZIP regression).
#' \item Use Poisson estimates as starting values to determine COM-Poisson
#' estimates (using \code{glm.cmp}).
#' \item Compute associated standard errors (using \code{sdev} function).
#' }
#'
#' From here, there are many ways to proceed, so order is irrelevant:
#' \itemize{
#' \item Perform a hypothesis test to assess for statistically significant
#' dispersion (using \code{equitest} or \code{parametric.bootstrap}).
#' \item Compute leverage (using leverage) and deviance (using deviance).
#' \item Predict the outcome for new examples, using predict.
#' }
#'
#' The package also supports fitting of the zero-inflated COM-Poisson model
#' (ZICMP). Most of the tools available for COM-Poisson are also available
#' for ZICMP.
#'
#' As of version 0.5.0 of this package, a hybrid method is used to compute
#' the normalizing constant \eqn{z(\lambda, \nu)} for the COM-Poisson density.
#' A closed-form approximation (Shmueli et al, 2005; Gillispie & Green, 2015)
#' to the exact sum is used if the given \eqn{\lambda} is sufficiently large
#' and \eqn{\nu} is sufficiently small. Otherwise, an exact summation is used,
#' except that the number of terms is truncated to meet a given accuracy.
#' Previous versions of the package used simple truncation (defaulting to 100
#' terms), but this was found to be inaccurate in some settings.
#'
#' See the package vignette for a more comprehensive guide on package use and
#' explanations of the computations.
#'
#' @author Kimberly Sellers, Thomas Lotze, Andrew M. Raim
#'
#' @references
#' Steven B. Gillispie & Christopher G. Green (2015) Approximating the
#' Conway-Maxwell-Poisson distribution normalization constant, Statistics,
#' 49:5, 1062-1073.
#'
#' Kimberly F. Sellers & Galit Shmueli (2010). A Flexible Regression Model for
#' Count Data. Annals of Applied Statistics, 4(2), 943-961.
#'
#' Kimberly F. Sellers and Andrew M. Raim (2016). A Flexible Zero-Inflated
#' Model to Address Data Dispersion. Computational Statistics and Data
#' Analysis, 99, 68-80.
#'
#' Galit Shmueli, Thomas P. Minka, Joseph B. Kadane, Sharad Borle, and Peter
#' Boatwright (2005). A useful distribution for fitting discrete data: revival
#' of the Conway-Maxwell-Poisson distribution. Journal of Royal Statistical
#' Society C, 54, 127-142.
#'
#' @name COMPoissonReg-package
#' @useDynLib COMPoissonReg, .registration = TRUE
#' @import Rcpp
#' @import stats
#' @importFrom utils head
#' @importFrom numDeriv grad hessian
#' @docType package
NULL
#' Package options
#'
#' Global options used by the COMPoissonReg package.
#'
#' @details
#' \itemize{
#' \item \code{getOption("COMPoissonReg.control")}
#' }
#'
#' @param COMPoissonReg.control A default control data structure for the
#' package. See the helper function \link{get.control} for a description of
#' contents.
#'
#' @name COMPoissonReg-options
NULL
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/R/package.R
|
rqres.zicmp = function(y, lambda, nu, p, control = NULL)
{
F = function(y) {
pzicmp(y, lambda, nu, p, control = control)
}
rqres(y, F)
}
rqres.cmp = function(y, lambda, nu, control = NULL)
{
F = function(y) {
pcmp(y, lambda, nu, control = control)
}
rqres(y, F)
}
rqres = function(y, F, eps = 1e-6)
{
n = length(y)
FL = F(y - eps)
FU = F(y)
u = runif(n, min = FL, max = FU)
qres = qnorm(u)
return(qres)
}
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/R/rqres.R
|
format.difftime = function(x) {
s = as.numeric(x, units = "secs")
dd = floor(s / (60^2 * 24))
dd.resid = s / (60^2 * 24) - dd
hh = floor(24*dd.resid)
hh.resid = 24*dd.resid - floor(24*dd.resid)
mm = floor(60*hh.resid)
mm.resid = 60*hh.resid - floor(60*hh.resid)
ss = floor(60*mm.resid)
if (dd > 0) {
fmt = sprintf("%02dd:%02dh:%02dm:%02ds", dd, hh, mm, ss)
} else if (hh > 0) {
fmt = sprintf("%02dh:%02dm:%02ds", hh, mm, ss)
} else if (mm > 0) {
fmt = sprintf("%02dm:%02ds", mm, ss)
} else {
fmt = sprintf("%0.2f sec", s)
}
return(fmt)
}
printf = function(msg, ...) {
cat(sprintf(msg, ...))
}
logger = function(msg, ...)
{
sys.time = as.character(Sys.time())
cat(sys.time, "-", sprintf(msg, ...))
}
grad_fwd = function(f, x, h = 1e-5, ...) {
k = length(x)
eye = diag(1, k)
res = numeric(k)
fx = f(x, ...)
for (j in 1:k) {
res[j] = ( f(x + h * eye[,j], ...) - fx ) / h
}
return(res)
}
hess_fwd = function(f, x, h = 1e-5, ...) {
k = length(x)
eye = diag(1, k)
H = matrix(NA, k, k)
fx = f(x, ...)
fx.eps = numeric(k)
for (j in 1:k) {
fx.eps[j] = f(x + h * eye[,j], ...)
}
for (j in 1:k) {
for (l in 1:k) {
num = f(x + h * eye[,j] + h * eye[,l], ...) -
fx.eps[l] - fx.eps[j] + fx
H[j,l] = num / h^2
}
}
(H + t(H)) / 2
}
is.zero.matrix = function(X, eps = 1e-12)
{
all(abs(X) < eps)
}
is.intercept.only = function(X, eps = 1e-12)
{
n = length(X)
all(dim(X) == c(n,1)) & is.zero.matrix(X-1, eps = eps)
}
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/R/util.R
|
#' Supporting Functions for ZICMP Regression
#'
#' @param object object of type \code{zicmp}.
#' @param x object of type \code{zicmp}.
#' @param k Penalty per parameter to be used in AIC calculation.
#' @param newdata New covariates to be used for prediction.
#' @param type Specifies quantity to be computed. See details.
#' @param reps Number of bootstrap repetitions.
#' @param report.period Report progress every \code{report.period} iterations.
#' @param ... other arguments, such as \code{subset} and \code{na.action}.
#'
#' @details
#' The function \code{residuals} returns raw residuals when
#' \code{type = "raw"} and quantile residuals when
#' \code{type = "quantile"}.
#'
#' The function \code{predict} returns expected values of the outcomes,
#' eveluated at the computed estimates, when \code{type = "response"}. When
#' \code{type = "link"}, a \code{data.frame} is instead returned with
#' columns corresponding to estimates of \code{lambda}, \code{nu}, and
#' \code{p}.
#'
#' The function \code{coef} returns a vector of coefficient estimates in
#' the form \code{c(beta, gamma, zeta)} when \code{type = "vector"}. When
#' \code{type = "list"}, the estimates are returned as a list with named
#' elements \code{beta} and \code{gamma}, and \code{zeta}.
#'
#' The \code{type} argument behaves the same for the \code{sdev} function
#' as it does for \code{coef}.
#'
#' @name glm.cmp, ZICMP support
NULL
#' @name glm.cmp, ZICMP support
#' @export
summary.zicmpfit = function(object, ...)
{
n = nrow(object$X)
d1 = ncol(object$X)
d2 = ncol(object$S)
d3 = ncol(object$W)
qq = d1 + d2 + d3
# We need the indices of the fixed coefficients and the ones included in
# optimization.
fixed = object$fixed
unfixed = object$unfixed
idx.par1 = seq_along(unfixed$beta)
idx.par2 = seq_along(unfixed$gamma) + length(unfixed$beta)
idx.par3 = seq_along(unfixed$zeta) + length(unfixed$beta) + length(unfixed$gamma)
V = vcov(object)
est = coef(object)
# In the vector of SEs, include an NA entry if the variable was fixed
# This NA will propagate to the corresponding z-value and p-value as well.
se = rep(NA, qq)
se[unfixed$beta] = sdev(object)[idx.par1]
se[unfixed$gamma + d1] = sdev(object)[idx.par2]
se[unfixed$zeta + d1 + d2] = sdev(object)[idx.par3]
z.val = est / se
p.val = 2*pnorm(-abs(z.val))
qq = length(est)
DF = data.frame(
Estimate = round(est, 4),
SE = round(se, 4),
z.value = round(z.val, 4),
p.value = sprintf("%0.4g", p.val)
)
colnames(DF) = c("Estimate", "SE", "z-value", "p-value")
rownames(DF) = c(
sprintf("X:%s", colnames(object$X)),
sprintf("S:%s", colnames(object$S)),
sprintf("W:%s", colnames(object$W))
)
# Add a column to indicate fixed components if anything is fixed
if (length(unlist(fixed)) > 0) {
fixed.beta = rep("F", d1)
fixed.gamma = rep("F", d2)
fixed.zeta = rep("F", d3)
fixed.beta[fixed$beta] = "T"
fixed.gamma[fixed$gamma] = "T"
fixed.zeta[fixed$zeta] = "T"
DF$Fixed = c(fixed.beta, fixed.gamma, fixed.zeta)
}
# If X, S, or W are intercept only, compute results for non-regression parameters
# Exclude offsets from these calculations
DF.lambda = NULL
DF.nu = NULL
DF.p = NULL
# In each block below, make sure to consider only the non-fixed variables
# for the Jacobian and Hessian. If one of the intercepts was fixed, it should
# result in an SE of zero.
if (is.intercept.only(object$X) && is.zero.matrix(object$offset$x)) {
if (length(fixed$beta) > 0) {
se = 0
} else {
J = c(exp(object$beta), numeric(length(idx.par2)), numeric(length(idx.par3)))
se = sqrt(t(J) %*% V %*% J)
}
est = exp(object$beta)
DF.lambda = data.frame(
Estimate = round(est, 4),
SE = round(se, 4)
)
rownames(DF.lambda) = "lambda"
}
if (is.intercept.only(object$S) && is.zero.matrix(object$offset$s)) {
if (length(fixed$gamma) > 0) {
se = 0
} else {
J = c(numeric(length(idx.par1)), exp(object$gamma), numeric(rep(length(idx.par3))))
se = sqrt(t(J) %*% V %*% J)
}
est = exp(object$gamma)
DF.nu = data.frame(
Estimate = round(est, 4),
SE = round(se, 4) )
rownames(DF.nu) = "nu"
}
if (is.intercept.only(object$W) && is.zero.matrix(object$offset$w)) {
if (length(fixed$zeta) > 0) {
se = 0
} else {
J = c(numeric(length(idx.par1)), numeric(length(idx.par2)), dlogis(object$zeta))
se = sqrt(t(J) %*% V %*% J)
}
est = plogis(object$zeta)
DF.p = data.frame(
Estimate = round(est, 4),
SE = round(se, 4)
)
rownames(DF.p) = "p"
}
list(DF = DF, DF.lambda = DF.lambda, DF.nu = DF.nu, DF.p = DF.p,
n = length(object$y),
loglik = logLik(object),
aic = AIC(object),
bic = BIC(object),
optim.method = object$control$optim.method,
opt.message = object$opt.res$message,
opt.convergence = object$opt.res$convergence,
elapsed.sec = object$elapsed.sec
)
}
fitted_zicmp_internal = function(X, S, W, beta, gamma, zeta, off.x, off.s, off.w)
{
n = nrow(X)
stopifnot(n == nrow(S))
stopifnot(n == nrow(W))
stopifnot(n == length(off.x))
stopifnot(n == length(off.s))
stopifnot(n == length(off.w))
stopifnot(ncol(X) == length(beta))
stopifnot(ncol(S) == length(gamma))
stopifnot(ncol(W) == length(zeta))
if (length(beta) > 0) {
lambda = as.numeric(exp(X %*% beta + off.x))
} else {
lambda = rep(0, n)
}
if (length(gamma) > 0) {
nu = as.numeric(exp(S %*% gamma + off.s))
} else {
nu = rep(0, n)
}
if (length(zeta) > 0) {
p = as.numeric(plogis(W %*% zeta + off.w))
} else {
p = rep(0, n)
}
list(lambda = lambda, nu = nu, p = p)
}
#' @name glm.cmp, ZICMP support
#' @export
print.zicmpfit = function(x, ...)
{
printf("ZICMP coefficients\n")
s = summary(x)
print(s$DF)
if (length(x$fixed$gamma) > 0) {
tt = paste(collapse = " ", c(
"Some elements of gamma were fixed.",
"Chi-squared test for equidispersion not defined."))
} else {
tt = equitest(x)
}
if (!is.null(s$DF.lambda) || !is.null(s$DF.nu) || !is.null(s$DF.p)) {
printf("--\n")
printf("Transformed intercept-only parameters\n")
print(rbind(s$DF.lambda, s$DF.nu, s$DF.p))
}
if (is.character(tt)) {
printf("--\n")
cat(paste(tt, collapse = "\n"))
printf("\n")
} else {
printf("--\n")
printf("Chi-squared test for equidispersion\n")
printf("X^2 = %0.4f, df = %d, ", tt$teststat, tt$df)
printf("p-value = %0.4e\n", tt$pvalue)
}
printf("--\n")
printf("Elapsed: %s ", format.difftime(s$elapsed.sec))
printf("Sample size: %d ", s$n)
printf("%s interface\n", x$interface)
printf("LogLik: %0.4f ", s$loglik)
printf("AIC: %0.4f ", s$aic)
printf("BIC: %0.4f ", s$bic)
printf("\n")
printf("Optimization Method: %s ", s$optim.method)
printf("Converged status: %d\n", s$opt.convergence)
printf("Message: %s\n", s$opt.message)
}
#' @name glm.cmp, ZICMP support
#' @export
logLik.zicmpfit = function(object, ...)
{
object$loglik
}
#' @name glm.cmp, ZICMP support
#' @export
AIC.zicmpfit = function(object, ..., k = 2)
{
d = length(unlist(object$unfixed))
-2*object$loglik + 2*d
}
#' @name glm.cmp, ZICMP support
#' @export
BIC.zicmpfit = function(object, ...)
{
n = length(object$y)
d = length(unlist(object$unfixed))
-2*object$loglik + log(n)*d
}
#' @name glm.cmp, ZICMP support
#' @export
coef.zicmpfit = function(object, type = c("vector", "list"), ...)
{
switch(match.arg(type),
vector = c(object$beta, object$gamma, object$zeta),
list = list(beta = object$beta, gamma = object$gamma,
zeta = object$zeta)
)
}
#' @name glm.cmp, ZICMP support
#' @export
nu.zicmpfit = function(object, ...)
{
# This function is deprecated - use predict instead
.Deprecated("predict(object, type = \"link\")")
link = predict(object, type = "link")
link$nu
}
#' @name glm.cmp, ZICMP support
#' @export
sdev.zicmpfit = function(object, type = c("vector", "list"), ...)
{
d1 = ncol(object$X)
d2 = ncol(object$S)
d3 = ncol(object$W)
unfixed = object$unfixed
idx.par1 = seq_along(unfixed$beta)
idx.par2 = seq_along(unfixed$gamma) + length(unfixed$beta)
idx.par3 = seq_along(unfixed$zeta) + length(unfixed$beta) + length(unfixed$gamma)
sd.hat = sqrt(diag(vcov(object)))
if (match.arg(type) == "vector") {
out = sd.hat
} else if (match.arg(type) == "list") {
sd.beta = rep(NA, d1)
sd.gamma = rep(NA, d2)
sd.zeta = rep(NA, d3)
sd.beta[unfixed$beta] = sd.hat[idx.par1]
sd.gamma[unfixed$gamma] = sd.hat[idx.par2]
sd.zeta[unfixed$zeta] = sd.hat[idx.par3]
out = list(beta = sd.beta, gamma = sd.gamma, zeta = sd.zeta)
} else {
stop("Unrecognized type")
}
return(out)
}
#' @name glm.cmp, ZICMP support
#' @export
vcov.zicmpfit = function(object, ...)
{
# Compute the covariance via Hessian from optimizer
-solve(object$H)
}
#' @name glm.cmp, ZICMP support
#' @export
equitest.zicmpfit = function(object, ...)
{
if ("equitest" %in% names(object)) {
return(object$equitest)
}
y = object$y
X = object$X
S = object$S
W = object$W
init = object$init
offset = object$offset
fixed = object$fixed
control = object$control
ll = object$loglik
# If any elements of gamma have been fixed, an "equidispersion" test no
# longer makes sense. Unless the values were fixed at zeroes. But let's
# avoid this complication.
if (length(fixed$gamma) > 0) {
msg = c("Some elements of gamma were fixed,",
"chi-squared test for equidispersion not defined")
stop(paste(msg, collapse = " "))
}
n = length(y)
d2 = ncol(S)
# Null model is ZICMP with nu determined by the offset off.s. If off.s happens
# to be zeros, this simplifies to a Poisson regression.
fit0.out = fit.zicmp.reg(y, X, S, W, offset = offset,
init = get.init(beta = object$beta, gamma = numeric(d2), zeta = object$zeta),
fixed = get.fixed(beta = fixed$beta, gamma = seq_len(d2), zeta = fixed$zeta),
control = control)
ll0 = fit0.out$loglik
X2 = -2 * (ll0 - ll)
pvalue = pchisq(X2, df = d2, lower.tail = FALSE)
list(teststat = X2, pvalue = pvalue, df = d2)
}
#' @name glm.cmp, ZICMP support
#' @export
deviance.zicmpfit = function(object, ...)
{
y = object$y
X = object$X
S = object$S
W = object$W
n = length(y)
d2 = ncol(S)
fixed = object$fixed
offset = object$offset
control = object$control
ll.star = numeric(n)
for (i in 1:n) {
# Maximize loglik for ith obs
glm.out = fit.zicmp.reg(y[i],
X = X[i,,drop = FALSE],
S = S[i,,drop = FALSE],
W = W[i,,drop = FALSE],
init = get.init(beta = object$beta, gamma = numeric(d2), zeta = object$zeta),
offset = get.offset(x = offset$x[i], s = offset$s[i], w = offset$w[i]),
fixed = get.fixed(beta = fixed$beta, gamma = seq_len(d2), zeta = fixed$zeta),
control = control)
ll.star[i] = glm.out$opt.res$value
}
# Compute exact deviances
ll = numeric(n)
out = fitted_zicmp_internal(X, S, W, object$beta, object$gamma,
object$zeta, offset$x, offset$s, offset$w)
for (i in 1:n) {
ll[i] = dzicmp(y[i], out$lambda[i], out$nu[i], out$p[i], log = TRUE,
control = control)
}
dev = -2 * (ll - ll.star)
leverage = leverage(object)
cmpdev = dev / sqrt(1 - leverage)
return(cmpdev)
}
#' @name glm.cmp, ZICMP support
#' @export
residuals.zicmpfit = function(object, type = c("raw", "quantile"), ...)
{
out = fitted_zicmp_internal(object$X, object$S, object$W, object$beta,
object$gamma, object$zeta, object$offset$x, object$offset$s,
object$offset$w)
type = match.arg(type)
if (type == "raw") {
y.hat = ezicmp(out$lambda, out$nu, out$p, control = object$control)
res = object$y - y.hat
} else if (type == "quantile") {
res = rqres.zicmp(object$y, out$lambda, out$nu, out$p,
control = object$control)
} else {
stop("Unsupported residual type")
}
return(as.numeric(res))
}
#' @name glm.cmp, ZICMP support
#' @export
predict.zicmpfit = function(object, newdata = NULL, type = c("response", "link"), ...)
{
if (is.null(newdata)) {
# If newdata is NULL, reuse data for model fit
X = object$X
S = object$S
W = object$W
off.x = object$offset$x
off.s = object$offset$s
off.w = object$offset$w
} else if (object$interface == "formula") {
# If the model was fit with the formula interface, attempt to process
# newdata as a data.frame
newdata = as.data.frame(newdata)
# If the response was not included in newdata, add a column with zeros
response.name = all.vars(object$formula.lambda)[1]
if (is.null(newdata[[response.name]])) {
newdata[[response.name]] = 0
}
raw = formula2raw(object$formula.lambda, object$formula.nu,
object$formula.p, data = newdata, ...)
X = raw$X
S = raw$S
W = raw$W
off.x = raw$offset$x
off.s = raw$offset$s
off.w = raw$offset$w
} else if (object$interface == "raw") {
# If the model was fit with the raw interface, attempt to process
# newdata as a list
if (!("COMPoissonReg.modelmatrix" %in% class(newdata))) {
msg = paste("Model was fit using raw interface. Use",
"get.modelmatrix to construct design matrices for prediction.")
stop(msg)
}
X = newdata$X
S = newdata$S
W = newdata$W
off.x = newdata$offset$x
off.s = newdata$offset$s
off.w = newdata$offset$w
} else {
stop("Don't recognize value of interface")
}
link = fitted_zicmp_internal(X, S, W, object$beta,
object$gamma, object$zeta, off.x, off.s, off.w)
switch(match.arg(type),
response = ezicmp(link$lambda, link$nu, link$p, control = object$control),
link = data.frame(lambda = link$lambda, nu = link$nu, p = link$p)
)
}
#' @name glm.cmp, ZICMP support
#' @export
parametric.bootstrap.zicmpfit = function(object, reps = 1000, report.period = reps+1, ...)
{
n = length(object$y)
qq = length(object$beta) + length(object$gamma) + length(object$zeta)
out = matrix(NA, reps, qq)
colnames(out) = c(
sprintf("X:%s", colnames(object$X)),
sprintf("S:%s", colnames(object$S)),
sprintf("W:%s", colnames(object$W))
)
fitted.out = fitted_zicmp_internal(object$X, object$S, object$W, object$beta,
object$gamma, object$zeta, object$offset$x, object$offset$s,
object$offset$w)
lambda.hat = fitted.out$lambda
nu.hat = fitted.out$nu
p.hat = fitted.out$p
init = get.init(beta = object$beta, gamma = object$gamma, zeta = object$zeta)
for (r in 1:reps) {
if (r %% report.period == 0) {
logger("Starting bootstrap rep %d\n", r)
}
# Generate bootstrap samples of the full dataset using MLE
y.boot = rzicmp(n, lambda.hat, nu.hat, p.hat, control = object$control)
# Take each of the bootstrap samples and fit model to generate bootstrap
# estimates
tryCatch({
fit.boot = fit.zicmp.reg(y = y.boot, X = object$X, S = object$S,
W = object$W, init = init, offset = object$offset,
fixed = object$fixed, control = object$control)
out[r,] = unlist(fit.boot$theta.hat)
},
error = function(e) {
# Do nothing now; emit a warning later
})
}
cnt = sum(rowSums(is.na(out)) > 0)
if (cnt > 0) {
warning(sprintf("%d out of %d bootstrap iterations failed", cnt, reps))
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/R/zicmp-reg.R
|
#' ZICMP Distribution
#'
#' Computes the density, cumulative probability, quantiles, and
#' random draws for the zero-inflated COM-Poisson distribution.
#'
#' @param x vector of quantiles.
#' @param q vector of probabilities.
#' @param n number of observations.
#' @param lambda rate parameter.
#' @param nu dispersion parameter.
#' @param p zero-inflation probability parameter.
#' @param log logical; if TRUE, probabilities are returned on log-scale.
#' @param log.p logical; if TRUE, probabilities p are given as \eqn{\log(p)}.
#' @param control a \code{COMPoissonReg.control} object from \code{get.control}
#' or \code{NULL} to use global default.
#'
#' @return
#' \describe{
#' \item{dzicmp}{density,}
#' \item{pzicmp}{cumulative probability,}
#' \item{qzicmp}{quantiles,}
#' \item{rzicmp}{generate random variates,}
#' \item{ezicmp}{expected value. and}
#' \item{vzicmp}{variance.}
#' }
#'
#' @references
#' Kimberly F. Sellers and Andrew M. Raim (2016). A Flexible Zero-Inflated Model
#' to Address Data Dispersion. Computational Statistics and Data Analysis, 99,
#' 68-80.
#'
#' @author Kimberly Sellers, Andrew Raim
#' @name ZICMP Distribution
NULL
#' @name ZICMP Distribution
#' @export
dzicmp = function(x, lambda, nu, p, log = FALSE, control = NULL)
{
n = length(x)
prep = prep.zicmp(n, lambda, nu, p)
fx = prep$p*(x==0) + (1-prep$p)*dcmp(x, prep$lambda, prep$nu, control = control)
if (log) { return(log(fx)) } else { return(fx) }
}
#' @name ZICMP Distribution
#' @export
rzicmp = function(n, lambda, nu, p, control = NULL)
{
prep = prep.zicmp(n, lambda, nu, p)
s = rbinom(n, size = 1, prob = prep$p)
(1-s) * rcmp(n, prep$lambda, prep$nu, control = control)
}
#' @name ZICMP Distribution
#' @export
pzicmp = function(x, lambda, nu, p, control = NULL)
{
n = length(x)
prep = prep.zicmp(n, lambda, nu, p)
prep$p*(x >= 0) + (1-prep$p)*pcmp(x, prep$lambda, prep$nu, control = control)
}
#' @name ZICMP Distribution
#' @export
qzicmp = function(q, lambda, nu, p, log.p = FALSE, control = NULL)
{
n = length(q)
prep = prep.zicmp(length(q), lambda, nu, p)
if (is.null(control)) {
control = getOption("COMPoissonReg.control", default = get.control())
}
stopifnot("COMPoissonReg.control" %in% class(control))
ymax = control$ymax
hybrid.tol = control$hybrid.tol
truncate.tol = control$truncate.tol
if (log.p) { lq = q } else { lq = log(q) }
# As in qcmp, check if any requested quantiles are too close to 1. Note
# that the criteria here depends on p, so we do not include it in the
# message.
idx_warn = which(lq > log((1 - truncate.tol)*(1 - prep$p) + prep$p))
if (length(idx_warn) > 0) {
msg = sprintf(paste(
"At least one requested quantile was very close to 1. In",
"particular, %d of the given probabilities were greater than",
"(1 - truncate.tol) * (1-p) + p, where truncate_tol = %g.",
"Associated results may be affected by truncation. Consider",
"adjusting the controls ymax and truncate.tol or reducing logq."),
length(idx_warn), truncate.tol)
warning(msg)
}
q_zicmp(lq, prep$lambda, prep$nu, prep$p, hybrid_tol = hybrid.tol,
truncate_tol = truncate.tol, ymax = ymax)
}
#' @name ZICMP Distribution
#' @export
ezicmp = function(lambda, nu, p, control = NULL)
{
(1-p) * ecmp(lambda, nu, control = control)
}
#' @name ZICMP Distribution
#' @export
vzicmp = function(lambda, nu, p, control = NULL)
{
ee = ecmp(lambda, nu, control = control)
vv = vcmp(lambda, nu, control = control)
(1-p) * (p*ee^2 + vv)
}
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/R/zicmp.R
|
fitted_zip_internal = function(X, W, beta, zeta, off.x, off.w)
{
list(
lambda = as.numeric(exp(X %*% beta + off.x)),
p = as.numeric(plogis(W %*% zeta + off.w))
)
}
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/R/zip-reg.R
|
#' COM-Poisson Distribution
#'
#' Functions for the COM-Poisson distribution.
#'
#' @param x vector of quantiles.
#' @param q vector of probabilities.
#' @param n number of observations.
#' @param lambda rate parameter.
#' @param p zero-inflation probability parameter.
#' @param log logical; if TRUE, probabilities are returned on log-scale.
#' @param log.p logical; if TRUE, probabilities \code{p} are given as \eqn{\log(p)}.
#'
#' @return
#' \describe{
#' \item{dzip}{density,}
#' \item{pzip}{cumulative probability,}
#' \item{qzip}{quantiles,}
#' \item{rzip}{generate random variates,}
#' \item{ezip}{expected value,}
#' \item{vzip}{variance,}
#' }
#'
#' @author Kimberly Sellers
#' @keywords Zero-Inflated Poisson distribution
#' @name ZIP Distribution
NULL
#' @name ZIP Distribution
#' @export
dzip = function(x, lambda, p, log = FALSE)
{
fx = p*(x==0) + (1-p)*dpois(x, lambda)
if (log) { return(log(fx)) } else { return(fx) }
}
#' @name ZIP Distribution
#' @export
rzip = function(n, lambda, p)
{
s = rbinom(n, size = 1, prob = p)
(1-s) * rpois(n, lambda)
}
#' @name ZIP Distribution
#' @export
pzip = function(x, lambda, p)
{
p*(x >= 0) + (1-p)*ppois(x, lambda)
}
#' @name ZIP Distribution
#' @export
qzip = function(q, lambda, p, log.p = FALSE)
{
q_tx = (q-p) / (1-p)
qpois(q_tx, lambda, log.p = log.p)
}
#' @name ZIP Distribution
#' @export
ezip = function(lambda, p)
{
(1-p) * lambda
}
#' @name ZIP Distribution
#' @export
vzip = function(lambda, p)
{
epois = lambda
vpois = lambda
(1-p) * (vpois + p*epois^2)
}
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/R/zip.R
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
prompt = TRUE,
comment = ""
)
## ----setup, include = FALSE---------------------------------------------------
library(COMPoissonReg)
set.seed(1235)
## -----------------------------------------------------------------------------
control = get.control(
ymax = 100000,
hybrid.tol = 1e-2,
truncate.tol = 1e-6
)
## -----------------------------------------------------------------------------
control = getOption("COMPoissonReg.control")
control$ymax
control$hybrid.tol
control$truncate.tol
## -----------------------------------------------------------------------------
options(COMPoissonReg.control = control)
## -----------------------------------------------------------------------------
ncmp(lambda = 1.5, nu = 1.2)
ncmp(lambda = 1.5, nu = 1.2, log = TRUE)
ncmp(lambda = 1.5, nu = 1.2, log = TRUE, control = get.control(hybrid.tol = 1e10))
ncmp(lambda = 1.5, nu = 1.2, log = TRUE, control = get.control(hybrid.tol = 1e-10))
## -----------------------------------------------------------------------------
print_warning = function(x) { print(strwrap(x), quote = FALSE) }
## -----------------------------------------------------------------------------
nu_seq = c(1, 0.5, 0.2, 0.1, 0.05, 0.03)
tryCatch({ tcmp(lambda = 1.5, nu = nu_seq) }, warning = print_warning)
## -----------------------------------------------------------------------------
tcmp(lambda = 1.5, nu = nu_seq, control = get.control(ymax = 3e6))
## -----------------------------------------------------------------------------
tcmp(lambda = 1.2, nu = 0.03, control = get.control(ymax = 1200))
## ----prompt = FALSE-----------------------------------------------------------
library(ggplot2)
nu_seq = seq(0.03, 1.5, length.out = 20)
nc1 = ncmp(lambda = 0.5, nu = nu_seq, log = TRUE)
nc2 = ncmp(lambda = 1.05, nu = nu_seq, log = TRUE)
nc3 = ncmp(lambda = 1.20, nu = nu_seq, log = TRUE)
## ----fig.width = 5, fig.height = 3, fig.align = "center", prompt = FALSE, fig.cap = "Log of normalizing constant for $\\lambda = 0.5$ ($\\circ$), $\\lambda = 1.05$ ($\\Delta$), and $\\lambda = 1.20$ ($+$)."----
ggplot() +
geom_point(data = data.frame(x = nu_seq, y = nc1), aes(x = x, y = y), pch = 1) +
geom_point(data = data.frame(x = nu_seq, y = nc2), aes(x = x, y = y), pch = 2) +
geom_point(data = data.frame(x = nu_seq, y = nc3), aes(x = x, y = y), pch = 3) +
xlab("nu") +
ylab("log of normalizing constant") +
theme_bw()
## -----------------------------------------------------------------------------
dcmp(0, lambda = 10, nu = 0.9)
dcmp(0:17, lambda = 10, nu = 0.9, log = TRUE)
dcmp(c(0, 1, 2), lambda = c(10, 11, 12), nu = c(0.9, 1.0, 1.1), log = TRUE)
## -----------------------------------------------------------------------------
rcmp(50, lambda = 10, nu = 0.9)
## -----------------------------------------------------------------------------
pcmp(0:17, lambda = 10, nu = 0.9)
## -----------------------------------------------------------------------------
qq = seq(0, 0.95, length.out = 10)
qcmp(qq, lambda = 10, nu = 0.9)
## -----------------------------------------------------------------------------
tryCatch({ rcmp(1, lambda = 2, nu = 0.01) }, warning = print_warning)
## -----------------------------------------------------------------------------
tryCatch({
qcmp(0.9999999, lambda = 1.5, nu = 0.5)
}, warning = print_warning)
## ----fig.width = 3, fig.height = 3, fig.align = "center", prompt = FALSE, fig.show = "hold"----
library(ggplot2)
n = 100000
lambda = 0.5
nu = 0.1
x = rcmp(n, lambda, nu)
xx = seq(-1, max(x)) ## Include -1 to ensure it gets probability zero
qq = seq(0, 0.99, length.out = 100)
fx = dcmp(xx, lambda, nu)
px = pcmp(xx, lambda, nu)
qx = qcmp(qq, lambda, nu)
qx_emp = quantile(x, probs = qq)
## ----fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical density of draws (histogram) versus density computed via the dcmp function (points)."----
ggplot() +
geom_bar(data = data.frame(x = x), aes(x = x, y = ..prop..), fill = "NA",
col = "black") +
geom_point(data = data.frame(x = xx[-1], fx = fx[-1]), aes(x, fx)) +
ylab("Density") +
theme_bw()
## ----fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical CDF of draws (solid line) versus CDF computed via the pcmp function (points)."----
ggplot() +
stat_ecdf(data = data.frame(x = x), aes(x), geom = "step") +
geom_point(data = data.frame(x = xx, px = px), aes(x, px)) +
ylab("Probability") +
theme_bw()
## ----fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical quantiles of draws (`o`) versus quantiles computed via the qcmp function (`+`)."----
ggplot() +
geom_point(data = data.frame(x = qq, qx_emp = qx_emp), aes(qq, qx_emp), pch = 1) +
geom_point(data = data.frame(x = qq, qx = qx), aes(qq, qx), pch = 3) +
xlab("Probability") +
ylab("Quantile") +
theme_bw()
## -----------------------------------------------------------------------------
ecmp(lambda = 10, nu = 1.2)
ecmp(lambda = 1.5, nu = 0.5)
ecmp(lambda = 1.5, nu = 0.05)
ecmp(lambda = 1.5, nu = 0.05, control = get.control(hybrid.tol = 1e-10))
ecmp(lambda = 1.5, nu = 0.05, control = get.control(hybrid.tol = 1e10))
## -----------------------------------------------------------------------------
vcmp(lambda = 10, nu = 1.2)
vcmp(lambda = 1.5, nu = 0.5)
vcmp(lambda = 1.5, nu = 0.05)
vcmp(lambda = 1.5, nu = 0.05, control = get.control(hybrid.tol = 1e-10))
vcmp(lambda = 1.5, nu = 0.05, control = get.control(hybrid.tol = 1e10))
## -----------------------------------------------------------------------------
M = tcmp(lambda = 1.5, nu = 0.05)
print(M)
xx = seq(0, M)
sum(xx^3 * dcmp(xx, lambda, nu)) # E(X^3)
sum(xx^4 * dcmp(xx, lambda, nu)) # E(X^4)
## -----------------------------------------------------------------------------
qq = seq(0, 0.95, length.out = 20)
rzicmp(20, lambda = 1.5, nu = 0.2, p = 0.25)
dzicmp(c(0, 1, 2), lambda = 1.5, nu = 0.2, p = 0.25)
pzicmp(c(0, 1, 2), lambda = 1.5, nu = 0.2, p = 0.25)
qzicmp(qq, lambda = 1.5, nu = 0.2, p = 0.25)
## -----------------------------------------------------------------------------
tryCatch({
qzicmp(0.9999999, lambda = 1.5, nu = 0.5, p = 0.5)
}, warning = print_warning)
## ----fig.width = 3, fig.height = 3, fig.align = "center", prompt = FALSE, fig.show = "hold"----
library(ggplot2)
n = 100000
lambda = 0.5
nu = 0.1
p = 0.5
x = rzicmp(n, lambda, nu, p)
xx = seq(-1, max(x)) ## Include -1 to ensure it gets probability zero
qq = seq(0, 0.99, length.out = 100)
fx = dzicmp(xx, lambda, nu, p)
px = pzicmp(xx, lambda, nu, p)
qx = qzicmp(qq, lambda, nu, p)
qx_emp = quantile(x, probs = qq)
## ----fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical density of draws (histogram) versus density computed via the dzicmp function (points)."----
ggplot() +
geom_bar(data = data.frame(x = x), aes(x = x, y = ..prop..), fill = "NA",
col = "black") +
geom_point(data = data.frame(x = xx[-1], fx = fx[-1]), aes(x, fx)) +
ylab("Density") +
theme_bw()
## ----fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical CDF of draws (solid line) versus CDF computed via the pzicmp function (points)."----
ggplot() +
stat_ecdf(data = data.frame(x = x), aes(x), geom = "step") +
geom_point(data = data.frame(x = xx, px = px), aes(x, px)) +
ylab("Probability") +
theme_bw()
## ----fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical quantiles of draws (`o`) versus quantiles computed via the qzicmp function (`+`)."----
ggplot() +
geom_point(data = data.frame(x = qq, qx_emp = qx_emp), aes(qq, qx_emp), pch = 1) +
geom_point(data = data.frame(x = qq, qx = qx), aes(qq, qx), pch = 3) +
xlab("Probability") +
ylab("Quantile") +
theme_bw()
## -----------------------------------------------------------------------------
ezicmp(lambda = 1.5, nu = 0.5, p = 0.1)
ezicmp(lambda = 1.5, nu = 0.5, p = c(0.1, 0.2, 0.5))
## -----------------------------------------------------------------------------
vzicmp(lambda = 1.5, nu = 0.5, p = 0.1)
vzicmp(lambda = 1.5, nu = 0.5, p = c(0.1, 0.2, 0.5))
## ----eval = FALSE, prompt = FALSE---------------------------------------------
# out = glm.cmp(formula.lambda, formula.nu = ~ 1, formula.p = NULL,
# data = NULL, init = NULL, fixed = NULL, control = NULL, ...)
## -----------------------------------------------------------------------------
get.init(beta = c(1, 2, 3), gamma = c(-1, 1), zeta = c(-2, -1))
get.init.zero(d1 = 3, d2 = 2, d3 = 2)
## -----------------------------------------------------------------------------
get.fixed(beta = c(1L, 2L), gamma = c(1L))
## ----eval=FALSE---------------------------------------------------------------
# model.matrix(formula.lambda, data = data)
# model.matrix(formula.nu, data = data)
# model.matrix(formula.p, data = data)
## -----------------------------------------------------------------------------
control = getOption("COMPoissonReg.control")
control$optim.method
control$optim.control
## -----------------------------------------------------------------------------
data(freight)
print(freight)
## -----------------------------------------------------------------------------
glm.out = glm(broken ~ transfers, data = freight, family = poisson)
summary(glm.out)
## -----------------------------------------------------------------------------
cmp.out = glm.cmp(broken ~ transfers, data = freight)
print(cmp.out)
## -----------------------------------------------------------------------------
cmp2.out = glm.cmp(broken ~ transfers, formula.nu = ~ transfers, data = freight)
print(cmp2.out)
## -----------------------------------------------------------------------------
control = get.control(optim.control = list(maxit = 5, trace = 3, REPORT = 1))
cmp3.out = glm.cmp(broken ~ transfers, data = freight, control = control)
## ----results = 'hide'---------------------------------------------------------
y = freight$broken
x = freight$transfers
glm.cmp(y ~ x)
## ----results = 'hide'---------------------------------------------------------
freight$offx = 13
freight$offs = 1
glm.cmp(broken ~ transfers + offset(offx), data = freight)
glm.cmp(broken ~ transfers + offset(offx), formula.nu = ~1 + offset(offs), data = freight)
## ----results = 'hide'---------------------------------------------------------
y = freight$broken
X = model.matrix(~ transfers, data = freight)
S = model.matrix(~ 1, data = freight)
offs = get.offset(x = rep(13, nrow(freight)), s = rep(1, nrow(freight)))
cmp.raw.out = glm.cmp.raw(y, X, S, offset = offs)
## -----------------------------------------------------------------------------
logLik(cmp.out) ## Log-likelihood evaluated at MLE.
AIC(cmp.out) ## AIC evaluated at MLE.
BIC(cmp.out) ## BIC evaluated at MLE.
coef(cmp.out) ## Estimates of theta as a flat vector
coef(cmp.out, type = "list") ## Estimates of theta as a named list
vcov(cmp.out) ## Estimated covariance matrix of theta hat
sdev(cmp.out) ## Standard deviations from vcov(...) diagonals
sdev(cmp.out, type = "list") ## Standard deviations as a named list
## -----------------------------------------------------------------------------
predict(cmp.out)
predict(cmp.out, type = "link")
## ----fig.width = 3, fig.height = 3, fig.align = "center", prompt=FALSE--------
# Prepare new data to fit by formula interface
new.df = data.frame(transfers = 0:10)
# Prepare new data to fit by raw interface
X = model.matrix(~ transfers, data = new.df)
S = model.matrix(~ 1, data = new.df)
new.data = get.modelmatrix(X = X, S = S)
# Pass new data to model from by formula interface
y.hat.new = predict(cmp.out, newdata = new.df)
# Pass new data to model from by raw interface
y.hat.new = predict(cmp.raw.out, newdata = new.data)
# Compute predictions for links
predict.out = predict(cmp.out, newdata = new.df, type = "link")
# Plot predictions
ggplot() +
geom_point(data = new.df, aes(transfers, y.hat.new)) +
xlab("Number of transfers") +
ylab("Predicted number broken") +
theme_bw()
## -----------------------------------------------------------------------------
print(y.hat.new)
print(predict.out)
## -----------------------------------------------------------------------------
leverage(cmp.out)
## -----------------------------------------------------------------------------
res.raw = residuals(cmp.out)
res.qtl = residuals(cmp.out, type = "quantile")
## -----------------------------------------------------------------------------
link.hat = predict(cmp.out, type = "link")
vv = vcmp(link.hat$lambda, link.hat$nu)
hh = leverage(cmp.out)
res.pearson = res.raw / sqrt(vv*(1-hh))
## ----fig.width = 3, fig.height = 3, fig.align = "center", prompt = FALSE, fig.show = "hold"----
plot.fit.res = function(y.hat, res) {
ggplot(data.frame(y = y.hat, res = res)) +
geom_point(aes(y, res)) +
xlab("Fitted Value") +
ylab("Residual Value") +
theme_bw() +
theme(plot.title = element_text(size = 10))
}
plot.qq.res = function(res) {
ggplot(data.frame(res = res), aes(sample = res)) +
stat_qq() +
stat_qq_line() +
theme_bw() +
theme(plot.title = element_text(size = 10))
}
y.hat = predict(cmp.out)
plot.fit.res(y.hat, res.raw) +
ggtitle("Fitted Values vs. Raw Residuals")
plot.qq.res(res.raw) +
ggtitle("Q-Q Plot of Raw Residuals")
plot.fit.res(y.hat, res.pearson) +
ggtitle("Fitted Values vs. Pearson Residuals")
plot.qq.res(res.pearson) +
ggtitle("Q-Q Plot of Pearson Residuals")
plot.fit.res(y.hat, res.qtl) +
ggtitle("Fitted Values vs. Quantile Residuals")
plot.qq.res(res.qtl) +
ggtitle("Q-Q Plot of Quantile Residuals")
## -----------------------------------------------------------------------------
mean(res.raw^2)
## -----------------------------------------------------------------------------
equitest(cmp.out)
## -----------------------------------------------------------------------------
deviance(cmp.out)
## -----------------------------------------------------------------------------
cmp.boot = parametric.bootstrap(cmp.out, reps = 100)
head(cmp.boot)
## -----------------------------------------------------------------------------
t(apply(cmp.boot, 2, quantile, c(0.025,0.975)))
## ----prompt=FALSE-------------------------------------------------------------
set.seed(1234)
n = 200
x = rnorm(n, 500, 10)
X = cbind(intercept = 1, slope = x)
S = matrix(1, n, 1)
beta_true = c(-0.05, 0.05)
gamma_true = 2
lambda_true = exp(X %*% beta_true)
nu_true = exp(S %*% gamma_true)
y = rcmp(n, lambda_true, nu_true)
## -----------------------------------------------------------------------------
summary(x)
summary(y)
## -----------------------------------------------------------------------------
tryCatch({
glm.cmp(y ~ x, formula.nu = ~ 1)
}, error = print_warning)
## -----------------------------------------------------------------------------
glm.cmp(y ~ scale(x), formula.nu = ~ 1)
## -----------------------------------------------------------------------------
glm.cmp(y ~ log(x), formula.nu = ~ 1)
## -----------------------------------------------------------------------------
control = get.control(optim.method = "BFGS", optim.control = list(maxit = 200))
suppressWarnings({
cmp.out = glm.cmp(y ~ x, formula.nu = ~ 1, control = control)
print(cmp.out)
})
## ----prompt=FALSE-------------------------------------------------------------
set.seed(1234)
n = 200
x = runif(n, 1, 2)
X = cbind(intercept = 1, slope = x)
S = matrix(1, n, 1)
beta_true = c(1, 1)
gamma_true = -0.95
lambda_true = exp(X %*% beta_true)
nu_true = exp(S %*% gamma_true)
y = rcmp(n, lambda_true, nu_true)
## -----------------------------------------------------------------------------
summary(x)
summary(y)
## -----------------------------------------------------------------------------
tryCatch({
glm.cmp(y ~ x, formula.nu = ~ 1)
}, error = print_warning)
## -----------------------------------------------------------------------------
init = get.init(beta = beta_true, gamma = gamma_true)
glm.cmp(y ~ x, formula.nu = ~ 1, init = init)
## -----------------------------------------------------------------------------
control = get.control(optim.method = "Nelder-Mead", optim.control = list(maxit = 1000))
glm.cmp(y ~ x, formula.nu = ~ 1, control = control)
## -----------------------------------------------------------------------------
data(couple)
head(couple)
## -----------------------------------------------------------------------------
glm.out = glm(UPB ~ EDUCATION + ANXIETY, data = couple, family = poisson)
summary(glm.out)
## -----------------------------------------------------------------------------
zicmp0.out = glm.cmp(UPB ~ EDUCATION + ANXIETY,
formula.nu = ~ 1,
formula.p = ~ EDUCATION + ANXIETY,
data = couple)
print(zicmp0.out)
## -----------------------------------------------------------------------------
pred.out = predict(zicmp0.out, type = "link")
summary(pred.out$lambda)
## ----prompt=FALSE-------------------------------------------------------------
library(numDeriv)
g = function(gamma0) {
-ncmp(lambda = exp(-0.25), nu = exp(gamma0), log = TRUE)
}
dat = data.frame(gamma0 = seq(0, -13), g = NA, d_g = NA, d2_g = NA)
for (j in 1:nrow(dat)) {
gamma0 = dat$gamma0[j]
dat$g[j] = g(gamma0)
dat$d_g[j] = numDeriv::grad(func = g, x = gamma0)
dat$d2_g[j] = numDeriv::hessian(func = g, x = gamma0)
}
## -----------------------------------------------------------------------------
print(dat)
## ----prompt=FALSE-------------------------------------------------------------
init = coef(zicmp0.out, type = "list")
y = couple$UPB
X = model.matrix(~ EDUCATION + ANXIETY, data = couple)
S = model.matrix(~ 1, data = couple)
W = model.matrix(~ EDUCATION + ANXIETY, data = couple)
control = get.control(optim.method = "BFGS")
zicmp.out = glm.zicmp.raw(y, X, S, W,
init = get.init(beta = c(-1,0,0), gamma = -Inf, zeta = c(-1,0,0)),
fixed = get.fixed(gamma = 1L), control = control)
## -----------------------------------------------------------------------------
print(zicmp.out)
## -----------------------------------------------------------------------------
logLik(zicmp.out) ## Log-likelihood evaluated at MLE
AIC(zicmp.out) ## AIC evaluated at MLE
BIC(zicmp.out) ## BIC evaluated at MLE
coef(zicmp.out) ## Estimates of theta as a flat vector
coef(zicmp.out, type = "list") ## Estimates of theta as a named list
vcov(zicmp.out) ## Estimated covariance matrix of theta hat
sdev(zicmp.out) ## Standard deviations from vcov(...) diagonals
sdev(zicmp.out, type = "list") ## Standard deviations as a named list
equitest(zicmp0.out) ## Likelihood ratio test for H_0: gamma = 0
tryCatch({ ## An error is thrown for model with fixed gamma
equitest(zicmp.out)
}, error = print_warning)
## -----------------------------------------------------------------------------
y.hat = predict(zicmp.out) ## Fitted values based on ecmp
link.hat = predict(zicmp.out, type = "link")
head(y.hat)
head(link.hat)
## ----fig.width = 3, fig.height = 3, fig.align = "center", prompt = FALSE, fig.show = "hold"----
res.raw = residuals(zicmp.out, type = "raw")
res.qtl = residuals(zicmp.out, type = "quantile")
plot.fit.res(y.hat, res.raw) +
ggtitle("Fitted Values vs. Raw Residuals")
plot.qq.res(res.raw) +
ggtitle("Q-Q Plot of Raw Residuals")
plot.fit.res(y.hat, res.qtl) +
ggtitle("Fitted Values vs. Quantile Residuals")
plot.qq.res(res.qtl) +
ggtitle("Q-Q Plot of Quantile Residuals")
## ----prompt = FALSE-----------------------------------------------------------
new.df = data.frame(EDUCATION = round(1:20 / 20), ANXIETY = seq(-3,3, length.out = 20))
X.new = model.matrix(~ EDUCATION + ANXIETY, data = new.df)
S.new = model.matrix(~ 1, data = new.df)
W.new = model.matrix(~ EDUCATION + ANXIETY, data = new.df)
new.data = get.modelmatrix(X.new, S.new, W.new)
# For model fit using raw interface, use get.modelmatrix to prepare new design
# matrices, offsets, etc
y.hat.new = predict(zicmp.out, newdata = new.data)
# For models fit with the formula interface, pass a data.frame with the same
# structure as used in the fit.
y.hat.new = predict(zicmp0.out, newdata = new.df)
## -----------------------------------------------------------------------------
print(y.hat.new)
## ----eval = FALSE-------------------------------------------------------------
# zicmp.boot = parametric.bootstrap(zicmp.out, reps = 100)
# head(zicmp.boot)
# apply(zicmp.boot, 2, quantile, c(0.025,0.975))
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/inst/doc/vignette.R
|
---
title: "COMPoissonReg: Usage, the Normalizing Constant, and Other Computational Details"
author:
- Andrew M. Raim^[
<[email protected]>,
Center for Statistical Research & Methodology,
U.S. Census Bureau,
Washington, DC, 20233, U.S.A.
**Disclaimer**`:` This document is released to inform interested parties of
ongoing research and to encourage discussion of work in progress. Any views
expressed are those of the authors and not those of the U.S. Census Bureau.
]
- Kimberly F. Sellers^[
<[email protected]>,
Center for Statistical Research & Methodology,
U.S. Census Bureau and
Department of Mathematics and Statistics,
Georgetown University,
Washington, DC, 20057, U.S.A.
]
abstract: >
`COMPoissonReg` is an R package which supports Conway-Maxwell Poisson (CMP)
and Zero-Inflated Conway-Maxwell Poisson (ZICMP) models. This vignette
describes fundamental computational details, especially those involving the
normalizing constant and related quantities. The CMP normalizing constant
does not have a general closed form; furthermore, it requires care to handle
numerically as its magnitude can vary greatly with changes in the parameters.
Primary `COMPoissonReg` functions are demonstrated with examples, including
those implementing basic distribution functions and regression modeling.
bibliography: references.bib
date: "`r format(Sys.time(), '%Y-%m-%d')`"
output:
pdf_document:
citation_package: natbib
number_sections: yes
toc: yes
toc_depth: 3
extra_dependencies:
common: null
keep_tex: yes
urlcolor: blue
linkcolor: blue
citecolor: blue
vignette: >
%\VignetteIndexEntry{COMPoissonReg: Usage, the Normalizing Constant, and Other Computational Details}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
prompt = TRUE,
comment = ""
)
```
```{r setup, include = FALSE}
library(COMPoissonReg)
set.seed(1235)
```
\newpage
# Introduction
\label{sec:intro}
The R package `COMPoissonReg` supports Conway-Maxwell Poisson (CMP) and Zero-Inflated Conway-Maxwell Poisson (ZICMP) models for analysis of count data in a flexible manner, to account for data dispersion relative to a Poisson model. The package provides regression functionality in addition to basic distribution functions. Interested users can refer to @SellersShmueli2010 and @SellersRaim2016 regarding the underlying theoretical developments for the CMP and ZICMP regressions, respectively. A full specification of the public `COMPoissonReg` interface can be found in the manual. In addition to package prerequisites `Rcpp` [@Eddelbuettel2013] and `numDeriv` [@numDeriv], `ggplot2` [@Wickham2016] is also used in this vignette.
One of the challenges of working with CMP and ZICMP lies in computing the normalizing constant and related quantities. The normalizing constant does not have a simple closed form in general and can quickly increase or decrease magnitude as parameters are varied. `COMPoissonReg` takes a hybrid approach of either truncating the infinite series or making use of an approximation, depending on parameter values.
The remainder of the vignette proceeds as follows. Section \ref{sec:cmp} describes functions to support the CMP distribution, including numerical handling of the normalizing constant. Section \ref{sec:zicmp} describes functions for ZICMP. Finally, Section \ref{sec:reg} demonstrates regression functions; Sections \ref{sec:cmp-reg} and \ref{sec:zicmp-reg} give specific examples based on CMP and ZICMP outcomes, respectively. The `COMPoissonReg` package is on CRAN at <https://cran.r-project.org/package=COMPoissonReg> and the source code is on Github at <https://github.com/lotze/COMPoissonReg>.
# Conway-Maxwell Poisson Distribution
\label{sec:cmp}
Let $Y \sim \text{CMP}(\lambda, \nu)$ be a Conway-Maxwell Poisson (CMP) random variable with density
<!-- -->
\begin{align*}
f(y \mid \lambda, \nu) = \frac{\lambda^y}{(y!)^\nu Z(\lambda, \nu)}, \quad y \in \mathbb{N}, \quad
Z(\lambda, \nu) = \sum_{r=0}^\infty \frac{\lambda^r}{(r!)^\nu},
\end{align*}
<!-- -->
where $\lambda > 0$, $\nu > 0$, and $\mathbb{N}$ represents the nonnegative integers $\{ 0, 1, 2, \ldots\}$. Three notable special cases of $\text{CMP}(\lambda, \nu)$ help to demonstrate its flexibility in count modeling.
a. The case $\nu = 1$ corresponds to $\text{Poisson}(\lambda)$.
a. When $\lambda \in (0,1)$ and $\nu = 0$, the $\text{CMP}(\lambda, \nu)$ distribution is $\text{Geometric}(1-\lambda)$ with density $f(y \mid \lambda) = (1 - \lambda) \lambda^y$ for $y \in \mathbb{N}$, which is overdispersed relative to Poisson.
a. When $\nu \rightarrow \infty$, $\text{CMP}(\lambda, \nu)$ converges to a $\text{Bernoulli}(\lambda / (1 + \lambda))$ distribution which is underdispersed relative to Poisson.
## Normalizing Constant
\label{sec:cmp-normconst}
The normalizing constant $Z(\lambda, \nu)$ presents some challenges in the practical use of CMP models and has been a topic of interest in the CMP literature. In general, there is no simple closed form expression for the series $Z(\lambda, \nu)$. @ShmueliEtAl2005 give the approximation
<!-- -->
\begin{align}
Z(\lambda, \nu) &= \frac{ \exp(\nu \lambda^{1/\nu}) }{ \lambda^{(\nu-1)/2\nu} (2\pi)^{(\nu-1)/2} \nu^{1/2} }
\left\{ 1 + O(\lambda^{-1/\nu}) \right\},
\label{eqn:approx}
\end{align}
<!-- -->
where the $O(\cdot)$ term vanishes as $\lambda^{-1/\nu}$ becomes small. Approximations have been further studied and refined in subsequent literature; see for example @GillispieGreen2015, @DalyGaunt2016, and @GauntEtAl2019. The expression in \eqref{eqn:approx} emphasizes that the magnitude of $Z(\lambda, \nu)$ explodes as $\nu \rightarrow 0$ when $\lambda > 1$. For example, $Z(2, 0.075) \approx e^{780.515}$ is too large to store as a double-precision floating point number, and may evaluate to infinity if care is not taken. In contrast, $Z(\lambda, \nu) \rightarrow 1/(1 - \lambda)$ when $\lambda < 1$ and $\nu \rightarrow 0$.
In practice, the `COMPoissonReg` package does not place constraints on $\lambda$ and $\nu$, except to ensure that they are positive, so that their values are driven by the data or the user's selection. A hybrid strategy motivated by \eqref{eqn:approx} is taken by `COMPoissonReg`.
To compute $Z(\lambda, \nu)$, suppose we are given a small tolerance $\delta > 0$. If
<!-- --->
\begin{align}
\lambda^{-1/\nu} < \delta,
\label{eqn:can_approx}
\end{align}
<!-- --->
the first term of \eqref{eqn:approx} dominates the second term, and we take
<!-- -->
\begin{align}
Z(\lambda, \nu) &\approx \frac{ \exp(\nu \lambda^{1/\nu}) }{ \lambda^{(\nu-1)/2\nu} (2\pi)^{(\nu-1)/2} \nu^{1/2} } \nonumber \\
&=\exp\left\{
\nu \lambda^{1/\nu} - \frac{\nu-1}{2\nu} \log \lambda - \frac{\nu-1}{2} \log(2\pi) - \frac{1}{2} \log \nu
\right\}.
\label{eqn:z-approx}
\end{align}
<!-- -->
as an approximation. Otherwise, the series is computed by truncating to a finite number of terms, which is described next. In either case, computations are kept on the log-scale as much as possible to accommodate numbers with potentially very large and very small magnitudes.
We approximate $Z(\lambda, \nu)$ by a finite summation $Z^{(M)}(\lambda, \nu) = \sum_{r=0}^M \lambda^r / (r!)^\nu$ if condition \eqref{eqn:can_approx} fails, so that the remainder is smaller than a given tolerance. The general approach is described in Appendix B of @ShmueliEtAl2005. @Robbins1955 gives bounds for Stirling's approximation as
<!-- -->
\begin{align*}
\sqrt{2\pi} n^{n + 1/2} e^{-n} e^{1 / (12n + 1)} < n! <
\sqrt{2\pi} n^{n + 1/2} e^{-n} e^{1 / (12n)}.
\end{align*}
<!-- -->
Noting that $e^{1 / (12n + 1)} \geq 1$ for $n \geq 1$ and $\sqrt{2\pi} e^{1 / (12n)} \leq e$ for $n \geq 2$, we obtain simpler bounds
<!-- -->
\begin{align*}
\sqrt{2\pi} n^{n + 1/2} e^{-n} \leq n! \leq e n^{n + 1/2} e^{-n},
\end{align*}
<!-- -->
which will be convenient in the following calculations.^[These bounds are also stated at <https://en.wikipedia.org/wiki/Stirling\%27s_approximation>, last accessed 2022-10-09.] We may then bound the truncation error for $Z^{(M)}(\lambda, \nu)$ using
<!-- -->
\begin{align}
\lvert Z(\lambda, \nu) - Z^{(M)}(\lambda, \nu) \rvert &= Z(\lambda, \nu) - Z^{(M)}(\lambda, \nu) \nonumber \\
&= \sum_{r=M+1}^\infty \frac{\lambda^r}{(r!)^\nu} \nonumber \\
&\leq \sum_{r=M+1}^\infty \frac{\lambda^r}{(2\pi)^{\nu/2} r^{\nu r + \nu/2} e^{-r \nu}} \nonumber \\
&\leq \sum_{r=M+1}^\infty \frac{\lambda^r}{(2\pi)^{\nu/2} (M+1)^{\nu r + \nu/2} e^{-r \nu}} \nonumber \\
&= (2\pi)^{-\nu/2} (M+1)^{-\nu/2} \sum_{r=M+1}^\infty \left( \frac{\lambda e^{\nu}}{(M+1)^{\nu}} \right)^r \label{eqn:geom} \\
&= (2\pi)^{-\nu/2} (M+1)^{-\nu/2} \sum_{r=0}^\infty \left( \frac{\lambda e^{\nu}}{(M+1)^{\nu}} \right)^{r+M+1} \nonumber \\
&= (2\pi)^{-\nu/2} (M+1)^{-\nu/2} \left( \frac{\lambda e^{\nu}}{(M+1)^{\nu}} \right)^{M+1} \frac{1}{1 - \frac{\lambda e^{\nu}}{(M+1)^{\nu}}} \nonumber \\
&=: \Delta_M, \nonumber
\end{align}
<!-- -->
assuming that $|\lambda e^\nu / (M+1)^\nu| < 1$ so that the geometric series in \eqref{eqn:geom} converges. To ensure this convergence we choose $M$ at least large enough so that
<!-- -->
\begin{align*}
\lambda e^\nu / (M+1)^\nu < 1 \iff
M > \lambda^{1/\nu} e - 1.
\end{align*}
<!-- -->
For a small given number $\epsilon > 0$, we may consider bounding the relative error by
<!-- -->
\begin{align}
&\frac{\lvert Z(\lambda, \nu) - Z^{(M)}(\lambda, \nu) \rvert}{Z^{(M)}(\lambda, \nu)}
\leq \frac{\Delta_M}{Z^{(M)}(\lambda, \nu)}
< \epsilon.
\label{eqn:rel_error_bound}
\end{align}
<!-- -->
The second inequality of \eqref{eqn:rel_error_bound} can be expressed on the log-scale using
<!-- -->
\begin{align}
\log \Delta_M - \log Z^{(M)}(\lambda, \nu) < \log \epsilon,
\label{eqn:adaptive-log-scale}
\end{align}
<!-- -->
where
<!-- -->
\begin{align*}
\log \Delta_M
= -\frac{\nu}{2} \log(2\pi)
- \nu \left( M+\frac{3}{2} \right) \log (M+1)
+ (M+1) (\nu + \log \lambda)
- \log\left(1 - \frac{\lambda e^\nu}{(M+1)^\nu} \right).
\end{align*}
<!-- -->
Therefore, we compute $Z^{(M)}(\lambda, \nu)$ until at least $M > \lambda^{1/\nu} e - 1$, increasing $M$ and updating $Z^{(M)}(\lambda, \nu)$ until \eqref{eqn:adaptive-log-scale} is satisfied. This is summarized as Algorithm \ref{alg:normconst-trunc}.
\begin{algorithm}
\caption{Compute the CMP normalizing constant using truncation approach.}
\label{alg:normconst-trunc}
\hspace*{\algorithmicindent}\textbf{Input}: $\lambda > 0$ rate parameter. \\
\hspace*{\algorithmicindent}\textbf{Input}: $\nu > 0$ dispersion parameter. \\
\hspace*{\algorithmicindent}\textbf{Input}: $\epsilon > 0$ tolerance. \\
\hspace*{\algorithmicindent}\textbf{Input}: $y_\text{max} \in \mathbb{N}$ upper limit for $M$
\begin{algorithmic}[1]
\Function{Truncated-$Z$}{$\lambda, \nu, \epsilon, y_\text{max}$}
\State $M = 0$, $Z^{(0)} = 1$
\While{$M \leq \lambda^{1/\nu} e - 1$ and $M \leq y_\text{max}$}
\State $Z^{(M+1)} \leftarrow Z^{(M)} + \lambda^{M} / (M!)^\nu$
\State $M \leftarrow M + 1$
\EndWhile
\While{$\log \Delta_M - \log Z^{(M)} \geq \log \epsilon$ and $M \leq y_\text{max}$}
\State $Z^{(M+1)} \leftarrow Z^{(M)} + \lambda^{M} / (M!)^\nu$
\State $M \leftarrow M + 1$
\EndWhile
\State \Return $\{ Z^{(M)}, M \}$
\EndFunction
\end{algorithmic}
\end{algorithm}
The individual terms $\lambda^r / (r!)^\nu$ in the summation may be too large to store at their original scale. Therefore, summation is carried out at the log-scale, wherever possible, using the identity
<!-- -->
\begin{align}
\log(x + y) = \log x + \log(1 + \exp\{ \log y - \log x \});
\label{eqn:logadd}
\end{align}
<!-- -->
this is especially helpful when $0 < y \ll x$, as $\log x$ may be kept on the log-scale by the first term of the right-hand side of \eqref{eqn:logadd}, and the standard library function `log1p` may be used to accurately compute $\log(1 + \phi)$ for very small $\phi > 0$.
Many of the functions in the user interface of `COMPoissonReg` take an optional `control` argument, which can be constructed as follows.
```{r}
control = get.control(
ymax = 100000,
hybrid.tol = 1e-2,
truncate.tol = 1e-6
)
```
The tolerances $\delta$ and $\epsilon$ are specified as `hybrid.tol` and `truncate.tol` respectively. Taking `hybrid.tol` to be a very small positive number results in use of the truncated sum $Z^{(M)}(\lambda, \nu)$, while `hybrid.tol = Inf` uses the approximation method \eqref{eqn:z-approx}, except in extreme cases where $\lambda^{-1/\nu}$ evaluates to zero or $\infty$ numerically. The argument `ymax` specifies upper limit $M$; this is a safety measure which prevents very large computations unless the user opts to allow them. When no control object is specified, a global default (via option `COMPoissonReg.control`) is used.
```{r}
control = getOption("COMPoissonReg.control")
control$ymax
control$hybrid.tol
control$truncate.tol
```
The default may be replaced in your current session if desired.
```{r}
options(COMPoissonReg.control = control)
```
The control object contains several other useful arguments to be discussed later in the vignette.
The `ncmp` function computes the normalizing constant $Z(\lambda, \nu)$ and returns its value either on the original scale or the log-scale.
```{r}
ncmp(lambda = 1.5, nu = 1.2)
ncmp(lambda = 1.5, nu = 1.2, log = TRUE)
ncmp(lambda = 1.5, nu = 1.2, log = TRUE, control = get.control(hybrid.tol = 1e10))
ncmp(lambda = 1.5, nu = 1.2, log = TRUE, control = get.control(hybrid.tol = 1e-10))
```
Before proceeding, let us define a function to display errors and warnings which are intentionally triggered in the remainder of the vignette.
```{r}
print_warning = function(x) { print(strwrap(x), quote = FALSE) }
```
The function `tcmp` returns the truncation value $M$ obtained from Algorithm \ref{alg:normconst-trunc}.
```{r}
nu_seq = c(1, 0.5, 0.2, 0.1, 0.05, 0.03)
tryCatch({ tcmp(lambda = 1.5, nu = nu_seq) }, warning = print_warning)
```
Note that `tcmp` returns `1e6` and produces a warning for the smallest `nu` value `r min(nu_seq)` because Algorithm \ref{alg:normconst-trunc} has reached `ymax = 1e6` before the series could be bounded by a geometric series. Here, it is likely that support values with non-negligible mass are being left out. Let us increase `ymax` to avoid this problem.
```{r}
tcmp(lambda = 1.5, nu = nu_seq, control = get.control(ymax = 3e6))
```
It is also possible to reach the second loop of Algorithm \ref{alg:normconst-trunc} where the geometric series can be used, but `ymax` is not large enough to satisfy \eqref{eqn:adaptive-log-scale}. Here is an example where this occurs.
```{r}
tcmp(lambda = 1.2, nu = 0.03, control = get.control(ymax = 1200))
```
Now that we have a somewhat robust computation for the normalizing constant, let us create a plot of the interesting behavior when $\lambda > 1$ and $\nu$ is decreasing.
```{r, prompt = FALSE}
library(ggplot2)
nu_seq = seq(0.03, 1.5, length.out = 20)
nc1 = ncmp(lambda = 0.5, nu = nu_seq, log = TRUE)
nc2 = ncmp(lambda = 1.05, nu = nu_seq, log = TRUE)
nc3 = ncmp(lambda = 1.20, nu = nu_seq, log = TRUE)
```
```{r, fig.width = 5, fig.height = 3, fig.align = "center", prompt = FALSE, fig.cap = "Log of normalizing constant for $\\lambda = 0.5$ ($\\circ$), $\\lambda = 1.05$ ($\\Delta$), and $\\lambda = 1.20$ ($+$)."}
ggplot() +
geom_point(data = data.frame(x = nu_seq, y = nc1), aes(x = x, y = y), pch = 1) +
geom_point(data = data.frame(x = nu_seq, y = nc2), aes(x = x, y = y), pch = 2) +
geom_point(data = data.frame(x = nu_seq, y = nc3), aes(x = x, y = y), pch = 3) +
xlab("nu") +
ylab("log of normalizing constant") +
theme_bw()
```
We see that with $\lambda = 1.2$ a value of $Z(\lambda, `r round(nu_seq[1], 4)`) \approx e^{`r round(nc3[1], 2)`}$ is obtained, which is an extremely large jump from the next value in the series $Z(\lambda, `r round(nu_seq[2], 4)`) \approx e^{`r round(nc3[2], 2)`}$.
## Density, Generation, CDF, and Quantile Functions
\label{sec:cmp-dist}
The respective functions for CMP density, variate generation, CDF, and quantile functions are `dcmp`, `rcmp`, `pcmp` , and `qcmp`. Their usage is similar to distribution functions provided by the R `stats` package.
```{r}
dcmp(0, lambda = 10, nu = 0.9)
dcmp(0:17, lambda = 10, nu = 0.9, log = TRUE)
dcmp(c(0, 1, 2), lambda = c(10, 11, 12), nu = c(0.9, 1.0, 1.1), log = TRUE)
```
```{r}
rcmp(50, lambda = 10, nu = 0.9)
```
```{r}
pcmp(0:17, lambda = 10, nu = 0.9)
```
```{r}
qq = seq(0, 0.95, length.out = 10)
qcmp(qq, lambda = 10, nu = 0.9)
```
The CMP distribution functions compute the normalizing constant via Section \ref{sec:cmp-normconst}. The density `dcmp` uses the hybrid method, while `rcmp`, `pcmp`, and `qcmp` use the truncation method regardless of condition \eqref{eqn:can_approx}. Variate generation, CDF, and quantiles are then computed on CMP as if it were a discrete distribution on the sample space $\{ 0, \ldots, M \}$. As with `tcmp` and `ncmp`, a warning is emitted in cases where they may not produce reliable results.
```{r}
tryCatch({ rcmp(1, lambda = 2, nu = 0.01) }, warning = print_warning)
```
The truncation method for `qcmp` can result in unreliable quantiles when the requested probability is very close to 1. For example, the actual quantile for probability $1$ is $\infty$, which can be expressed with no computation, but the computed quantity will be the truncation value $M$. More generally, \eqref{eqn:rel_error_bound} implies that
<!-- -->
\begin{align}
\frac{Z(\lambda, \nu) - Z^{(M)}(\lambda, \nu)}{Z(\lambda, \nu)} < \epsilon
&\iff \Prob(Y > M) < \epsilon \nonumber \\
&\iff 1 - F(M \mid \lambda, \nu) < \epsilon \nonumber \\
&\iff F^{-}(1 - \epsilon \mid \lambda, \nu) < M.
\label{eqn:cmp-quantile-limit}
\end{align}
<!-- -->
Therefore, it is possible that quantiles larger than $1 - \epsilon$ may be inaccurately computed with the truncated support. `COMPoissonReg` gives a warning when these are requested.
```{r}
tryCatch({
qcmp(0.9999999, lambda = 1.5, nu = 0.5)
}, warning = print_warning)
```
As a sanity check, let us ensure that the empirical density values, cumulative probabilities, and quantiles of draws from `rcmp` align with respective results computed via `dcmp`, `pcmp`, and `qcmp`.
```{r, fig.width = 3, fig.height = 3, fig.align = "center", prompt = FALSE, fig.show = "hold"}
library(ggplot2)
n = 100000
lambda = 0.5
nu = 0.1
x = rcmp(n, lambda, nu)
xx = seq(-1, max(x)) ## Include -1 to ensure it gets probability zero
qq = seq(0, 0.99, length.out = 100)
fx = dcmp(xx, lambda, nu)
px = pcmp(xx, lambda, nu)
qx = qcmp(qq, lambda, nu)
qx_emp = quantile(x, probs = qq)
```
```{r, fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical density of draws (histogram) versus density computed via the dcmp function (points)."}
ggplot() +
geom_bar(data = data.frame(x = x), aes(x = x, y = ..prop..), fill = "NA",
col = "black") +
geom_point(data = data.frame(x = xx[-1], fx = fx[-1]), aes(x, fx)) +
ylab("Density") +
theme_bw()
```
```{r, fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical CDF of draws (solid line) versus CDF computed via the pcmp function (points)."}
ggplot() +
stat_ecdf(data = data.frame(x = x), aes(x), geom = "step") +
geom_point(data = data.frame(x = xx, px = px), aes(x, px)) +
ylab("Probability") +
theme_bw()
```
```{r, fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical quantiles of draws (`o`) versus quantiles computed via the qcmp function (`+`)."}
ggplot() +
geom_point(data = data.frame(x = qq, qx_emp = qx_emp), aes(qq, qx_emp), pch = 1) +
geom_point(data = data.frame(x = qq, qx = qx), aes(qq, qx), pch = 3) +
xlab("Probability") +
ylab("Quantile") +
theme_bw()
```
## Expected Value and Variance
\label{sec:cmp-ev}
For $Y \sim \text{CMP}(\lambda, \nu)$, we can consider computing the expectation and variance of $Y$ in two ways. First, if there is a moderately-sized $M$ where $\{ 0, \ldots, M \}$ contains all but a negligible amount of the mass of $Y$, we can compute the moments using truncated summations
<!-- -->
\begin{align*}
\E(Y) = \sum_{y=0}^M y \frac{\lambda^y}{(y!)^\nu Z(\lambda, \nu)}, \quad
\E(Y^2) = \sum_{y=0}^M y^2 \frac{\lambda^y}{(y!)^\nu Z(\lambda, \nu)}, \quad
\Var(Y) = \E(Y^2) - [\E(Y)]^2.
\end{align*}
<!-- -->
Otherwise, a different approach is taken. Notice that the expected value is related to the first derivative of the log-normalizing constant via
<!-- -->
\begin{align*}
&\frac{\partial}{\partial \lambda} \log Z(\lambda, \nu)
= \frac{ \frac{\partial}{\partial \lambda} Z(\lambda, \nu) }{ Z(\lambda, \nu) }
= \frac{1}{Z(\lambda, \nu)} \sum_{y=0}^\infty y \frac{\lambda^{y-1}}{(y!)^\nu} \\
&\iff \E(Y) = \lambda \frac{\partial}{\partial \lambda} \log Z(\lambda, \nu).
\end{align*}
<!-- -->
For the second derivative,
<!-- -->
\begin{align*}
&\frac{\partial^2}{\partial \lambda^2} \log Z(\lambda, \nu)
= \frac{ Z(\lambda, \nu) \frac{\partial^2}{\partial \lambda^2} Z(\lambda, \nu) - [\frac{\partial}{\partial \lambda} Z(\lambda, \nu)]^2}{ Z(\lambda, \nu)^2 }
= \frac{1}{Z(\lambda, \nu)} \sum_{y=0}^\infty y(y-1) \frac{\lambda^{y-2}}{(y!)^\nu} - \left[ \frac{\E(Y)}{\lambda} \right]^2 \\
&\iff \lambda^2 \frac{\partial^2}{\partial \lambda^2} \log Z(\lambda, \nu) = \E[Y(Y-1)] - [\E(Y)]^2 = \Var(Y) - \E(Y) \\
&\iff \Var(Y) = \lambda^2 \frac{\partial^2}{\partial \lambda^2} \log Z(\lambda, \nu) + \E(Y).
\end{align*}
<!-- -->
Therefore, we may use first and second derivatives of $\log Z(\lambda, \nu)$, taken with respect to $\lambda$, to compute $\E(Y)$ and $\Var(Y)$. The `ecmp` and `vcmp` functions implement this computation of the expectation and variance, respectively. The control object is used to determine whether to use the truncation or differentiation approach. Condition \eqref{eqn:can_approx} is used to determine whether to use the truncation approach (if false) or differentiation approach (if true).
```{r}
ecmp(lambda = 10, nu = 1.2)
ecmp(lambda = 1.5, nu = 0.5)
ecmp(lambda = 1.5, nu = 0.05)
ecmp(lambda = 1.5, nu = 0.05, control = get.control(hybrid.tol = 1e-10))
ecmp(lambda = 1.5, nu = 0.05, control = get.control(hybrid.tol = 1e10))
```
```{r}
vcmp(lambda = 10, nu = 1.2)
vcmp(lambda = 1.5, nu = 0.5)
vcmp(lambda = 1.5, nu = 0.05)
vcmp(lambda = 1.5, nu = 0.05, control = get.control(hybrid.tol = 1e-10))
vcmp(lambda = 1.5, nu = 0.05, control = get.control(hybrid.tol = 1e10))
```
Provided that an enormously large truncation value $M$ is not required, we may compute other moments by truncated sums using `tcmp`.
```{r}
M = tcmp(lambda = 1.5, nu = 0.05)
print(M)
xx = seq(0, M)
sum(xx^3 * dcmp(xx, lambda, nu)) # E(X^3)
sum(xx^4 * dcmp(xx, lambda, nu)) # E(X^4)
```
# Zero-Inflated Conway-Maxwell Poisson Distribution
\label{sec:zicmp}
Let $S \sim \text{Bernoulli}(p)$ and $T \sim \text{CMP}(\lambda, \nu)$ be independent random variables. Then $Y = (1-S) T$ follows a Zero-Inflated Conway-Maxwell Poisson distribution $\text{ZICMP}(\lambda, \nu, p)$ with density
<!-- -->
\begin{align*}
f(y \mid \lambda, \nu, p) = (1 - p) \frac{\lambda^{y}}{(y!)^{\nu} Z(\lambda, \nu)}
+ p \cdot \ind(y = 0), \quad y \in \mathbb{N}.
\end{align*}
<!-- -->
Like CMP, several interesting special cases are obtained.
a. Taking $\nu = 1$ corresponds to Zero-Inflated Poisson $\text{ZIP}(\lambda, p)$ with density $f(y \mid \lambda, p) = (1 - p) e^{-\lambda} \lambda^{y} / y! + p \cdot \ind(y = 0)$.
a. When $\lambda \in (0,1)$ and $\nu \rightarrow 0$, $\text{ZICMP}(\lambda, \nu)$ converges to a Zero-Inflated Geometric distribution with density $f(y \mid \lambda, p) = (1 - p) (1 - \lambda) \lambda^y + p \cdot \ind(y = 0)$ for $y \in \mathbb{N}$.
a. When $\nu \rightarrow \infty$, $\text{ZICMP}(\lambda, \nu, p)$ converges to a "Zero-Inflated Bernoulli" distribution with density
<!-- -->
\begin{math}
f(y \mid \lambda, p) = (1 - p) \left[ \lambda/(1+\lambda) \right]^y \left[ 1/(1+\lambda) \right]^{1-y} + p \cdot \ind(y = 0).
\end{math}
<!-- -->
which remains a Bernoulli distribution with an adjusted success probability. Here the $\lambda$ and $p$ parameters are not individually identifiable. Therefore, users may want to avoid zero-inflation in CMP data analyses with extreme underdispersion.
a. Finally, $p = 0$ yields $\text{CMP}(\lambda, \nu)$.
## Density, Generation, CDF, and Quantile Functions
There is a close relationship between the CMP and ZICMP distributions, as we have seen from construction of ZICMP. The relationship between the CMP densities and variate generation mechanisms was given earlier in this section. Denote $F(x \mid \lambda, \nu)$ and $F(x \mid \lambda, \nu, p)$ as the CDFs of $\text{CMP}(\lambda, \nu)$ and $\text{ZICMP}(\lambda, \nu, p)$, respectively. We have
<!-- -->
\begin{align*}
F(x \mid \lambda, \nu, p) = (1-p) F(x \mid \lambda, \nu) + p \cdot \ind(x \geq 0).
\end{align*}
<!-- -->
For a given probability $\phi \in [0,1]$, the associated CMP and ZICMP quantile functions are related via
<!-- -->
\begin{align}
F^{-}(\phi \mid \lambda, \nu, p)
&= \inf\{ x \in \mathbb{N} : F(x \mid \lambda, \nu, p) \geq \phi \} \nonumber \\
&= \inf\{ x \in \mathbb{N} : (1-p) F(x \mid \lambda, \nu) + p \cdot \ind(x \geq 0) \geq \phi \} \nonumber \\
&= \inf\{ x \in \mathbb{N} : F(x \mid \lambda, \nu) \geq (\phi - p) / (1 - p) \} \nonumber \\
&= F^{-}\left( \frac{\phi - p}{1 - p} \mid \lambda, \nu \right).
\label{eqn:zicmp-quantiles}
\end{align}
The respective functions for ZICMP density, variate generation, CDF, and quantile functions are `dzicmp`, `rzicmp`, `pzicmp` , and `qzicmp`. They make use of the CMP implementation described in Section \ref{sec:cmp} such as the criteria to either truncate or approximate the normalizing constant.
```{r}
qq = seq(0, 0.95, length.out = 20)
rzicmp(20, lambda = 1.5, nu = 0.2, p = 0.25)
dzicmp(c(0, 1, 2), lambda = 1.5, nu = 0.2, p = 0.25)
pzicmp(c(0, 1, 2), lambda = 1.5, nu = 0.2, p = 0.25)
qzicmp(qq, lambda = 1.5, nu = 0.2, p = 0.25)
```
As with `qcmp`, the `qzicmp` function is computed by the truncation method, and cannot accurately compute quantiles very close to 1. More specifically, from the CMP distribution, \eqref{eqn:cmp-quantile-limit} gives
<!-- -->
\begin{align*}
M &> F^{-}(1 - \epsilon \mid \lambda, \nu) \\
&= F^{-}\left( \frac{\phi_\epsilon - p}{1 - p} \mid \lambda, \nu \right) \\
&= F^{-}\left( \phi_\epsilon \mid \lambda, \nu, p \right)
\end{align*}
<!-- -->
where $\phi_\epsilon = (1 - \epsilon)(1-p) + p$ and the last equality uses \eqref{eqn:zicmp-quantiles}. This motivates a warning from `qzicmp` when the argument is larger than $\phi_\epsilon$.
```{r}
tryCatch({
qzicmp(0.9999999, lambda = 1.5, nu = 0.5, p = 0.5)
}, warning = print_warning)
```
Let us repeat the sanity check from Section \ref{sec:cmp-dist} to ensure that the empirical density values, cumulative probabilities, and quantiles line up with the ones computed via `dzicmp`, `pzicmp`, and `qzicmp`.
```{r, fig.width = 3, fig.height = 3, fig.align = "center", prompt = FALSE, fig.show = "hold"}
library(ggplot2)
n = 100000
lambda = 0.5
nu = 0.1
p = 0.5
x = rzicmp(n, lambda, nu, p)
xx = seq(-1, max(x)) ## Include -1 to ensure it gets probability zero
qq = seq(0, 0.99, length.out = 100)
fx = dzicmp(xx, lambda, nu, p)
px = pzicmp(xx, lambda, nu, p)
qx = qzicmp(qq, lambda, nu, p)
qx_emp = quantile(x, probs = qq)
```
```{r, fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical density of draws (histogram) versus density computed via the dzicmp function (points)."}
ggplot() +
geom_bar(data = data.frame(x = x), aes(x = x, y = ..prop..), fill = "NA",
col = "black") +
geom_point(data = data.frame(x = xx[-1], fx = fx[-1]), aes(x, fx)) +
ylab("Density") +
theme_bw()
```
```{r, fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical CDF of draws (solid line) versus CDF computed via the pzicmp function (points)."}
ggplot() +
stat_ecdf(data = data.frame(x = x), aes(x), geom = "step") +
geom_point(data = data.frame(x = xx, px = px), aes(x, px)) +
ylab("Probability") +
theme_bw()
```
```{r, fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical quantiles of draws (`o`) versus quantiles computed via the qzicmp function (`+`)."}
ggplot() +
geom_point(data = data.frame(x = qq, qx_emp = qx_emp), aes(qq, qx_emp), pch = 1) +
geom_point(data = data.frame(x = qq, qx = qx), aes(qq, qx), pch = 3) +
xlab("Probability") +
ylab("Quantile") +
theme_bw()
```
## Expectation and Variance
The expected value and variance of $Y \sim \text{ZICMP}(\lambda, \nu, p)$ are apparent from the construction $Y = (1-S) T$ given earlier in this section. Namely,
<!-- -->
\begin{align*}
\E(Y) = (1-p) \E(T)
\quad \text{and} \quad
\Var(Y) = (1-p) \left\{ \Var(T) + p[\E(T)]^2 \right\}
\end{align*}
<!-- -->
may be obtained using formulas for iterated conditional expectations and variances. They are evaluated in `COMPoissonReg` using the `ezicmp` and `vzicmp` functions respectively. These functions make use of the `ecmp` and `vcmp` functions described in Section \ref{sec:cmp-ev} to compute $\E(T)$ and $\Var(T)$.
```{r}
ezicmp(lambda = 1.5, nu = 0.5, p = 0.1)
ezicmp(lambda = 1.5, nu = 0.5, p = c(0.1, 0.2, 0.5))
```
```{r}
vzicmp(lambda = 1.5, nu = 0.5, p = 0.1)
vzicmp(lambda = 1.5, nu = 0.5, p = c(0.1, 0.2, 0.5))
```
# Regression Modeling with CMP and ZICMP
\label{sec:reg}
Suppose there are $n$ subjects with outcomes $y_1, \ldots, y_n \in \mathbb{N}$ and covariates $\vec{x}_i \in \mathbb{R}^{d_1}$, $\vec{s}_i \in
\mathbb{R}^{d_2}$, and $\vec{w}_i \in \mathbb{R}^{d_3}$ for $i = 1, \ldots, n$. The `COMPoissonReg` package fits both CMP and ZICMP regression models.
The CMP regression model assumes that
<!-- -->
\begin{align*}
Y_i \indep \text{CMP}(\lambda_i, \nu_i), \quad i = 1, \ldots, n,
\end{align*}
<!-- -->
where $\log \lambda_i = \vec{x}_i^\top \vec{\beta}$ and $\log \nu_i = \vec{s}_i^\top \vec{\gamma}$. Writing $\btheta = (\vec{\beta}, \vec{\gamma})$, the likelihood is
<!-- -->
\begin{align}
L(\btheta) =
\prod_{i=1}^n \left[
\frac{\lambda_i^{y_i}}{(y_i!)^{\nu_i} Z(\lambda_i, \nu_i)}
\right].
\label{eqn:likelihood-cmp}
\end{align}
<!-- -->
The ZICMP regression model assumes that
<!-- -->
\begin{align*}
Y_i \indep \text{ZICMP}(\lambda_i, \nu_i, p_i), \quad i = 1, \ldots, n,
\end{align*}
<!-- -->
where $\log \lambda_i = \vec{x}_i^\top \vec{\beta}$, $\log \nu_i = \vec{s}_i^\top \vec{\gamma}$, and $\logit p_i = \vec{w}_i^\top \vec{\zeta}$. Writing $\btheta = (\vec{\beta}, \vec{\gamma}, \vec{\zeta})$, the likelihood is
<!-- -->
\begin{align}
L(\btheta) =
\prod_{i=1}^n \left[(1 - p_i)
\frac{\lambda_i^{y_i}}{(y_i!)^{\nu_i} Z(\lambda_i, \nu_i)}
+ p_i \ind(y_i = 0)
\right].
\label{eqn:likelihood-zicmp}
\end{align}
<!-- -->
We will write $d = d_1 + d_2 + d_3$ for the total dimension of $\btheta$. The $n \times d_1$ design matrix whose rows consist of $\vec{x}_i$ will be denoted $\vec{X}$. Similarly, we will write $\vec{S}$ and $\vec{W}$ as the $n \times d_2$ and $n \times d_3$ design matrices constructed from $\vec{s}_i$ and $\vec{w}_i$, respectively. The `glm.cmp` function provides a formula interface to fit models of both types: \eqref{eqn:likelihood-cmp} and \eqref{eqn:likelihood-zicmp}.
```{r, eval = FALSE, prompt = FALSE}
out = glm.cmp(formula.lambda, formula.nu = ~ 1, formula.p = NULL,
data = NULL, init = NULL, fixed = NULL, control = NULL, ...)
```
The interface contains three formulas: `formula.lambda` specifies the regression $\vec{x}_i^\top \vec{\beta}$ used for $\lambda_i$, while `formula.nu` and `formula.p` correspond to $\vec{s}_i^\top \vec{\gamma}$ for $\nu_i$ and $\vec{w}_i^\top \vec{\zeta}$ for $p_i$, respectively. ZICMP regression is utilized when `formula.p` is set to something other than its default `NULL` value; otherwise, CMP regression is assumed. The `data` argument is used to pass a `data.frame` explicitly rather than having the data be read from the local environment. The `init`, `fixed`, and `control` arguments and associated helper functions are described below.
The `init` argument represents an initial value for the optimizer. The following functions can be used to construct it.
```{r}
get.init(beta = c(1, 2, 3), gamma = c(-1, 1), zeta = c(-2, -1))
get.init.zero(d1 = 3, d2 = 2, d3 = 2)
```
The `fixed` argument is used to specify indices of the three coefficients which will remain fixed at their initial value during optimization.
```{r}
get.fixed(beta = c(1L, 2L), gamma = c(1L))
```
The specification above requests the first two elements of `beta` and the first element of `gamma` to be fixed. Notice that indices must be integers and that the default value is an empty integer vector which is interpreted as "no elements are fixed". The `fixed` argument can usually be disregarded but may be useful in some circumstances; an example is given in Section \ref{sec:zicmp-reg}.
Specifying the elements of `init` and `fixed` may be somewhat awkward with the formula interface, as they require knowledge of how formulas will be expanded into design matrices and coefficients. It can be helpful to produce the design matrices using R's `model.matrix` function.
```{r, eval=FALSE}
model.matrix(formula.lambda, data = data)
model.matrix(formula.nu, data = data)
model.matrix(formula.p, data = data)
```
The `control` argument has been introduced in Section \ref{sec:cmp}; regression modeling makes use of several additional arguments. `COMPoissonReg` uses `optim` to compute the maximum likelihood estimate (MLE) $\hat{\btheta}$ for $\btheta$ under the specified model. Several controls are provided to influence how `COMPoissonReg` invokes `optim`; here are their default values.
```{r}
control = getOption("COMPoissonReg.control")
control$optim.method
control$optim.control
```
The element `optim.method` is a string which is passed as the `method` argument to `optim`, while `optim.control` is a list passed as the `control` argument of `optim`. Note that, for the latter, if an entry is given for `fnscale`, it will be ignored and overwritten internally by `COMPoissonReg`.
The covariance of $\hat{\btheta}$ is estimated by $\hat{\vec{V}}(\hat{\btheta}) = -[\vec{H}(\hat{\btheta})]^{-1}$, where $\vec{H}(\btheta) = \partial^2 \log L(\btheta) / [\partial \btheta \partial \btheta^\top]$ is the Hessian of the log-likelihood computed by `optim`. The standard error for coefficient $\theta_j$ in $\btheta$ is then obtained as the square root of the $j$th diagonal of $\hat{\vec{V}}(\hat{\btheta})$.
We will now illustrate use of the regression tools using two examples whose data are included in the package. Note that these demonstrations are not intended to be complete regression analyses, and results may be slightly different than previously published analyses due to differences in the computations.
## CMP Regression
\label{sec:cmp-reg}
### Freight Dataset
\label{sec:cmp-reg-freight}
The `freight` dataset [@KutnerEtAl2003] was analyzed using CMP regression by @SellersShmueli2010 and found to exhibit underdispersion. The data describe $n = 10$ instances where 1,000 ampules were transported via air shipment. The outcome of interest is the variable `broken` which describes the number of broken ampules in each shipment. The covariate `transfers` describes the number of times the carton was transferred from one aircraft to another during the shipment.
Let us load and view the dataset.
```{r}
data(freight)
print(freight)
```
Before fitting a CMP regression, let us fit a Poisson regression model $Y_i \indep \text{Poisson}(\lambda_i)$ with
<!-- -->
\begin{align*}
\log \lambda_i = \beta_0 + \beta_1 \cdot \text{transfers}_i.
\end{align*}
<!-- -->
This can be carried out with the standard `glm` function.
```{r}
glm.out = glm(broken ~ transfers, data = freight, family = poisson)
summary(glm.out)
```
### CMP Regression
\label{sec:cmp-reg-initial}
Next, let us fit a similar CMP regression model with
<!-- -->
\begin{align*}
&\log \lambda_i = \beta_0 + \beta_1 \cdot \text{transfers}_i, \\
&\log \nu_i = \gamma_0,
\end{align*}
<!-- -->
using only an intercept for $\nu_i$.
```{r}
cmp.out = glm.cmp(broken ~ transfers, data = freight)
print(cmp.out)
```
The coefficients used in the $\lambda_i$ formula are prefixed with an `X:` label, while an `S:` label is used for coefficients of the $\nu_i$ formula. Notice that estimates for `X:` coefficients from the CMP fit are dissimilar to those from the Poisson fit; this may occur when the estimate of $\nu$ deviates from the value of 1. Similarly to the `glm` output, the output of `glm.cmp` displays several quantities for each coefficient $\theta_j$, $j = 1, \ldots, d$: a point estimate $\hat{\theta}_j$, an associated standard error $\widehat{\text{SE}}(\hat{\theta}_j)$, a z-value $z_j = \hat{\theta}_j / \widehat{\text{SE}}(\hat{\theta}_j)$, and a p-value $2\Phi(-|z_j|)$ for the test $H_0: \theta_j = 0$ versus $H_1: \theta_j \neq 0$. Here, $\Phi$ is the CDF of the standard normal distribution. Because an intercept-only formula was specified for $\nu_i$, $\hat{\nu} = \exp(\hat{\gamma})$ does not vary with $i$ and its estimate and associated standard error are added to the display. Here we see evidence of underdispersion with $\hat{\nu} > 1$. A test for equidispersion is displayed to determine whether there is a significant amount of over or underdispersion in the data. In particular, a likelihood ratio test is used to decide whether $H_0: \vec{\gamma} = \vec{0}$ versus $H_0: \vec{\gamma} \neq \vec{0}$. The test statistic is displayed along with the degrees of freedom and associated p-value. Here we have fairly strong evidence to reject the null hypothesis of equidispersion.
The AIC for the CMP model `cmp.out` shows some improvement over that of `glm.out`. Let us also consider a slope coefficient for the $\nu$ component using
<!-- -->
\begin{align*}
\log \nu_i = \gamma_0 + \gamma_1 \cdot \text{transfers}_i.
\end{align*}
<!-- -->
via the following call to `glm.cmp`.
```{r}
cmp2.out = glm.cmp(broken ~ transfers, formula.nu = ~ transfers, data = freight)
print(cmp2.out)
```
Model `cmp2.out` provides a slight improvement over `cmp.out` in AIC and BIC, but we may prefer `cmp.out` in the interest of simplicity.
### Adjustments to Optim
\label{sec:cmp-reg-optim}
To gain some insight into the optimization, we may wish to increase the trace level, which can be done as follows.
```{r}
control = get.control(optim.control = list(maxit = 5, trace = 3, REPORT = 1))
cmp3.out = glm.cmp(broken ~ transfers, data = freight, control = control)
```
Data from the local environment may be passed to the `glm.cmp` function without explicitly using the `data` argument.
```{r, results = 'hide'}
y = freight$broken
x = freight$transfers
glm.cmp(y ~ x)
```
### Offset Term
\label{sec:cmp-reg-offset}
In a count regression model, it may be desirable to include offset terms such as
<!-- -->
\begin{align*}
\log \lambda_i = \vec{x}_i^\top \vec{\beta} + \text{offx}_i, \quad
\log \nu_i = \vec{s}_i^\top \vec{\beta} + \text{offs}_i.
\end{align*}
<!-- -->
An `offset` term may be used in the formula interface to accomplish this.
```{r, results = 'hide'}
freight$offx = 13
freight$offs = 1
glm.cmp(broken ~ transfers + offset(offx), data = freight)
glm.cmp(broken ~ transfers + offset(offx), formula.nu = ~1 + offset(offs), data = freight)
```
For users who wish to bypass the formula interface and prepare the $\vec{X}$ and $\vec{S}$ design matrices manually, a "raw" interface to the regression functionality is also provided.
```{r, results = 'hide'}
y = freight$broken
X = model.matrix(~ transfers, data = freight)
S = model.matrix(~ 1, data = freight)
offs = get.offset(x = rep(13, nrow(freight)), s = rep(1, nrow(freight)))
cmp.raw.out = glm.cmp.raw(y, X, S, offset = offs)
```
### Accessor Functions
\label{sec:cmp-reg-accessors}
Several accessors are provided to extract results from the output object.
```{r}
logLik(cmp.out) ## Log-likelihood evaluated at MLE.
AIC(cmp.out) ## AIC evaluated at MLE.
BIC(cmp.out) ## BIC evaluated at MLE.
coef(cmp.out) ## Estimates of theta as a flat vector
coef(cmp.out, type = "list") ## Estimates of theta as a named list
vcov(cmp.out) ## Estimated covariance matrix of theta hat
sdev(cmp.out) ## Standard deviations from vcov(...) diagonals
sdev(cmp.out, type = "list") ## Standard deviations as a named list
```
The `predict` function computes several useful quantities evaluated at the estimate $\hat{\btheta}$. The argument default `type = "response"` produces a vector of estimates of the response $\hat{y}_i = \E(Y_i)$ for $i = 1, \ldots, n$ using the method described in Section \ref{sec:cmp-ev}. The argument `type = "link"` produces a `data.frame` with columns for the linked parameters $\lambda_i$ and $\nu_i$.
```{r}
predict(cmp.out)
predict(cmp.out, type = "link")
```
Note that the estimated `nu` values are equal for all observations because the model assumed only an intercept term for the dispersion component.
We can also use `predict` on new covariate values. Note that models fit with the formula interface expect the new data to be provided as a `data.frame` which is interpreted using the formula used to fit the model. If the raw interface was used to fit the model, use the `get.modelmatrix` function to specify design matrices to use for prediction.
```{r, fig.width = 3, fig.height = 3, fig.align = "center", prompt=FALSE}
# Prepare new data to fit by formula interface
new.df = data.frame(transfers = 0:10)
# Prepare new data to fit by raw interface
X = model.matrix(~ transfers, data = new.df)
S = model.matrix(~ 1, data = new.df)
new.data = get.modelmatrix(X = X, S = S)
# Pass new data to model from by formula interface
y.hat.new = predict(cmp.out, newdata = new.df)
# Pass new data to model from by raw interface
y.hat.new = predict(cmp.raw.out, newdata = new.data)
# Compute predictions for links
predict.out = predict(cmp.out, newdata = new.df, type = "link")
# Plot predictions
ggplot() +
geom_point(data = new.df, aes(transfers, y.hat.new)) +
xlab("Number of transfers") +
ylab("Predicted number broken") +
theme_bw()
```
```{r}
print(y.hat.new)
print(predict.out)
```
The `leverage` function computes the diagonal entries of a "hat" matrix which can be formulated in CMP regression. These can be used to diagnose influential observations. For details, see Section 3.6 of @SellersShmueli2010.
```{r}
leverage(cmp.out)
```
The `residuals` function provides either raw (the default) or quantile-based residuals [@DunnSmyth1996]. In a CMP regression setting, raw residuals $y_i - \hat{y}_i$ generally do not work well with traditional regression diagnostics, such as Q-Q plots. Quantile-based residuals often produce interpretable diagnostics; however, a random element is used in the computation of quantile residuals for discrete distributions. This aids interpretability but gives slightly different residual values each time they are computed. See @DunnSmyth1996 for details.
```{r}
res.raw = residuals(cmp.out)
res.qtl = residuals(cmp.out, type = "quantile")
```
Pearson residuals may be preferred over raw residuals for diagnostics; these can be obtained by standardizing raw residuals using leverage values and variance estimates.
```{r}
link.hat = predict(cmp.out, type = "link")
vv = vcmp(link.hat$lambda, link.hat$nu)
hh = leverage(cmp.out)
res.pearson = res.raw / sqrt(vv*(1-hh))
```
For each type of residual---raw, Pearson, and quantile---we now plot fitted values versus residuals and Q-Q plots.
```{r, fig.width = 3, fig.height = 3, fig.align = "center", prompt = FALSE, fig.show = "hold"}
plot.fit.res = function(y.hat, res) {
ggplot(data.frame(y = y.hat, res = res)) +
geom_point(aes(y, res)) +
xlab("Fitted Value") +
ylab("Residual Value") +
theme_bw() +
theme(plot.title = element_text(size = 10))
}
plot.qq.res = function(res) {
ggplot(data.frame(res = res), aes(sample = res)) +
stat_qq() +
stat_qq_line() +
theme_bw() +
theme(plot.title = element_text(size = 10))
}
y.hat = predict(cmp.out)
plot.fit.res(y.hat, res.raw) +
ggtitle("Fitted Values vs. Raw Residuals")
plot.qq.res(res.raw) +
ggtitle("Q-Q Plot of Raw Residuals")
plot.fit.res(y.hat, res.pearson) +
ggtitle("Fitted Values vs. Pearson Residuals")
plot.qq.res(res.pearson) +
ggtitle("Q-Q Plot of Pearson Residuals")
plot.fit.res(y.hat, res.qtl) +
ggtitle("Fitted Values vs. Quantile Residuals")
plot.qq.res(res.qtl) +
ggtitle("Q-Q Plot of Quantile Residuals")
```
In this example, with only `r nrow(freight)` observations, it is difficult to see an advantage of using quantile residuals; the benefit will be more apparent in Section \ref{sec:zicmp-reg}. One benefit of raw residuals is that they may be used to compute a mean-squared error.
```{r}
mean(res.raw^2)
```
To access the results of the equidispersion test shown in the output of `cmp.out`, we may use the `equitest` accessor function.
```{r}
equitest(cmp.out)
```
The `deviance` function computes the deviance quantities $D_i = -2 [\log L_i(\hat{\btheta}) - \log L_i(\tilde{\btheta}_i)]$ for $i = 1, \ldots, n$, where $L_i(\btheta)$ is the term of the likelihood corresponding to the $i$th observation, $\hat{\btheta}$ is the MLE computed under the full likelihood $L(\btheta) = \prod_{i=1}^n L_i(\btheta)$, and $\tilde{\btheta}_i$ is the maximizer of $L_i(\btheta)$.
```{r}
deviance(cmp.out)
```
The `parametric.bootstrap` function carries out a parametric bootstrap with $R$ repetitions. Using the fitted MLE $\hat{\btheta}$, bootstrap samples $\vec{y}^{(r)} = (y_1^{(r)}, \ldots, y_n^{(r)})$ are drawn from the likelihood $L(\hat{\btheta})$ for $r = 1, \ldots, R$. Estimate $\hat{\btheta}^{(r)}$ is fitted from bootstrap sample $\vec{y}^{(r)}$. An $R \times d$ matrix is returned whose $r$th row is $\hat{\btheta}^{(r)}$.
```{r}
cmp.boot = parametric.bootstrap(cmp.out, reps = 100)
head(cmp.boot)
```
We used $R = `r nrow(cmp.boot)`$ in the display above to keep vignette computations small, but a larger number may be desired in practice. Bootstrap repetitions can be used, for example, to compute 95% confidence intervals for each of the coefficients.
```{r}
t(apply(cmp.boot, 2, quantile, c(0.025,0.975)))
```
### Large Covariates
\label{sec:cmp-reg-large-covariates}
Large covariates can present numerical difficulties in fitting CMP regression. We will briefly demonstrate the difficulties and some possible workarounds. First let us generate a new dataset based on a large covariate in the regression for $\lambda_i$.
```{r, prompt=FALSE}
set.seed(1234)
n = 200
x = rnorm(n, 500, 10)
X = cbind(intercept = 1, slope = x)
S = matrix(1, n, 1)
beta_true = c(-0.05, 0.05)
gamma_true = 2
lambda_true = exp(X %*% beta_true)
nu_true = exp(S %*% gamma_true)
y = rcmp(n, lambda_true, nu_true)
```
Notice that the generated counts $y_1, \ldots, y_n$ are relatively small compared to the covariate $x_1, \ldots, x_n$.
```{r}
summary(x)
summary(y)
```
An initial attempt to fit the true data-generating model fails.
```{r}
tryCatch({
glm.cmp(y ~ x, formula.nu = ~ 1)
}, error = print_warning)
```
Internally, the linked rate parameter $\lambda_i = \exp(\beta_0 + \beta_1 x_i)$ may evaluate to `Inf` or become very close to zero as the optimizer moves $\beta_1$ away from zero in a positive or negative direction, respectively. Some possible ways to address this are as follows.
Standardize the covariate to have mean zero and variance one.
```{r}
glm.cmp(y ~ scale(x), formula.nu = ~ 1)
```
Use a logarithmic transformation on the covariate.
```{r}
glm.cmp(y ~ log(x), formula.nu = ~ 1)
```
Change optimization method or other `optim` arguments.
```{r}
control = get.control(optim.method = "BFGS", optim.control = list(maxit = 200))
suppressWarnings({
cmp.out = glm.cmp(y ~ x, formula.nu = ~ 1, control = control)
print(cmp.out)
})
```
In this case, standardization and logarithmic transformation produce a usable fit. Changing the optimization method to `BFGS` allows the optimization to finish, but there are further numerical problems in computing the Hessian for standard errors.
### Large Outcomes
\label{sec:cmp-reg-large-outcomes}
Now consider a generated dataset with large outcomes but a relatively small covariate. This situation can also present numerical difficulties.
```{r, prompt=FALSE}
set.seed(1234)
n = 200
x = runif(n, 1, 2)
X = cbind(intercept = 1, slope = x)
S = matrix(1, n, 1)
beta_true = c(1, 1)
gamma_true = -0.95
lambda_true = exp(X %*% beta_true)
nu_true = exp(S %*% gamma_true)
y = rcmp(n, lambda_true, nu_true)
```
```{r}
summary(x)
summary(y)
```
An initial attempt to fit the data-generating model fails.
```{r}
tryCatch({
glm.cmp(y ~ x, formula.nu = ~ 1)
}, error = print_warning)
```
Informative starting values help the optimizer to initially make progress. True data-generating parameters will not be available in a real data analysis situation but help to illustrate the idea.
```{r}
init = get.init(beta = beta_true, gamma = gamma_true)
glm.cmp(y ~ x, formula.nu = ~ 1, init = init)
```
Without choosing an initial value, changing the optimization method to `Nelder-Mead` and increasing the maximum number of iterations also helps the optimizer find a solution.
```{r}
control = get.control(optim.method = "Nelder-Mead", optim.control = list(maxit = 1000))
glm.cmp(y ~ x, formula.nu = ~ 1, control = control)
```
Note that this solution is different from the previous one; the log-likelihood of the previous one is slightly better.
## ZICMP Regression
\label{sec:zicmp-reg}
### Couple Dataset
\label{sec:zicmp-reg-couple}
The `couple` dataset [@LoeysEtAl2012] was analyzed with ZICMP regression in @SellersRaim2016 and found to exhibit overdispersion. The data concern separation trajectories of $n = 387$ couples. The variable `UPB` records the number of unwanted pursuit behavior perpetrations and is considered the outcome of interest. Included covariates are the binary variable `EDUCATION`, which is 1 if at least a bachelor's degree was attained, and a continuous variable `ANXIETY` which measures anxious attachment. A zero-inflated count model is considered for these data because 246 of the 387 records have an outcome of zero.
Let us load and view the first few records in the dataset.
```{r}
data(couple)
head(couple)
```
As a preliminary model, let us fit a standard Poisson model $Y_i \indep \text{Poisson}(\lambda_i)$ with
<!-- -->
\begin{align*}
\log \lambda_i = \beta_0 + \beta_1 \cdot \text{EDUCATION}_i + \beta_2 \cdot \text{ANXIETY}_i.
\end{align*}
<!-- -->
We may use the standard `glm` function.
```{r}
glm.out = glm(UPB ~ EDUCATION + ANXIETY, data = couple, family = poisson)
summary(glm.out)
```
### ZICMP Regression
\label{sec:zicmp-reg-initial}
Now consider a ZICMP regression with
<!-- -->
\begin{align*}
&\log \lambda_i = \beta_0 + \beta_1 \cdot \text{EDUCATION}_i + \beta_2 \cdot \text{ANXIETY}_i, \\
&\log \nu_i = \gamma_0, \\
&\logit p_i = \zeta_0 + \zeta_1 \cdot \text{EDUCATION}_i + \zeta_2 \cdot \text{ANXIETY}_i.
\end{align*}
<!-- -->
We use the `glm.cmp` function as follows.
```{r}
zicmp0.out = glm.cmp(UPB ~ EDUCATION + ANXIETY,
formula.nu = ~ 1,
formula.p = ~ EDUCATION + ANXIETY,
data = couple)
print(zicmp0.out)
```
There are now three sets of coefficients reported in the output: the `X:`, `S:`, and `W:` prefixes label estimates for the $\lambda_i$, $\nu_i$, and $p_i$ formulas respectively.
### Comments about Results
The AIC of the ZICMP model is drastically smaller than the Poisson model, indicating a greatly improved fit. However, there are signs of possible numerical issues. The estimate for $\gamma_0$ is a large negative number, but with an extremely large associated SE, which suggests that the effect may not be statistically significant. On the other hand, the estimate of $\nu$ is nearly zero with a small SE, which suggests that the dispersion parameter is indeed statistically significant. On the surface, this seems to be a contradiction.
The issue is that the Hessian of the log-likelihood becomes insensitive to small changes in $\gamma_0$ when $\lambda_i < 1$ and $\gamma_0$ is a large negative number. Let us first verify that the estimates for $\lambda_i$ are indeed smaller than 1.
```{r}
pred.out = predict(zicmp0.out, type = "link")
summary(pred.out$lambda)
```
To show the insensitivity of the Hessian, let us consider a simpler setting with $Y \sim \text{CMP}(\lambda, \nu)$, $\lambda = \exp\{-0.25\} \approx 0.7788$ fixed, and $\nu = \exp\{\gamma_0\}$. We then have log-density
<!-- -->
\begin{align*}
\log f(y \mid \gamma_0) = y \log \lambda - e^{\gamma_0} \log(y!) - \log Z(\lambda, e^{\gamma_0}),
\end{align*}
<!-- -->
with first derivative and second derivatives, respectively,
<!-- -->
\begin{align*}
&\frac{\partial}{\partial \gamma_0} \log f(y \mid \gamma_0)
= -e^{\gamma_0} \log(y!) - \frac{\partial}{\partial \gamma_0} \log Z(\lambda, e^{\gamma_0}), \\
&\frac{\partial^2}{\partial \gamma_0^2} \log f(y \mid \gamma_0)
= -e^{\gamma_0} \log(y!) - \frac{\partial^2}{\partial \gamma_0^2} \log Z(\lambda, e^{\gamma_0}).
\end{align*}
<!-- -->
For a given value of $y$, $-e^{\gamma_0} \log(y!)$ approaches zero as $\gamma_0$ decreases. Therefore, let us focus on the function
<!-- -->
\(
g(\gamma_0) = -\log Z(\lambda, e^{\gamma_0})
\)
<!-- -->
and its first and second derivatives. The following code illustrates their behavior.
```{r, prompt=FALSE}
library(numDeriv)
g = function(gamma0) {
-ncmp(lambda = exp(-0.25), nu = exp(gamma0), log = TRUE)
}
dat = data.frame(gamma0 = seq(0, -13), g = NA, d_g = NA, d2_g = NA)
for (j in 1:nrow(dat)) {
gamma0 = dat$gamma0[j]
dat$g[j] = g(gamma0)
dat$d_g[j] = numDeriv::grad(func = g, x = gamma0)
dat$d2_g[j] = numDeriv::hessian(func = g, x = gamma0)
}
```
Here is the result.
```{r}
print(dat)
```
Notice that $g(\gamma_0)$ approaches a limit as $\gamma_0 \rightarrow -\infty$, which coincides with the CMP distribution approaching a Geometric distribution. It may not be surprising that the first and second derivatives approach zero accordingly. This explains the large SE for $\gamma_0$ in the model `zicmp0.out`. With estimates tending to this region of the parameter space, it may be preferable to fix fix $\gamma_0$ at a value such as $-\infty$, which will be done in the next section.
### Fixed Coefficients
\label{sec:zicmp-reg-fixed}
Our attempt to fit the previous model strongly tended to the Zero-Inflated Geometric special case of ZICMP, but SEs computed via the Hessian become large in this region. In this section, we fix $\gamma_0$ at the extreme $-\infty$ and fit the remaining coefficients. Let us use the raw interface to do this.
```{r, prompt=FALSE}
init = coef(zicmp0.out, type = "list")
y = couple$UPB
X = model.matrix(~ EDUCATION + ANXIETY, data = couple)
S = model.matrix(~ 1, data = couple)
W = model.matrix(~ EDUCATION + ANXIETY, data = couple)
control = get.control(optim.method = "BFGS")
zicmp.out = glm.zicmp.raw(y, X, S, W,
init = get.init(beta = c(-1,0,0), gamma = -Inf, zeta = c(-1,0,0)),
fixed = get.fixed(gamma = 1L), control = control)
```
```{r}
print(zicmp.out)
```
Notice that an additional `Fixed` column has been added to the display, indicating that the coefficient `gamma` is fixed. Furthermore, its `Estimate` column is set to the initial value and the columns `SE`, `z-value`, and `p-value` are set to `NA`. This model achieves a similar log-likelihood value as our first attempt using `L-BFGS-B` but does not exhibit signs of numerical issues.
### Accessor Functions
\label{sec:zicmp-reg-accessors}
Here are several of the accessors provided to extract model outputs.
```{r}
logLik(zicmp.out) ## Log-likelihood evaluated at MLE
AIC(zicmp.out) ## AIC evaluated at MLE
BIC(zicmp.out) ## BIC evaluated at MLE
coef(zicmp.out) ## Estimates of theta as a flat vector
coef(zicmp.out, type = "list") ## Estimates of theta as a named list
vcov(zicmp.out) ## Estimated covariance matrix of theta hat
sdev(zicmp.out) ## Standard deviations from vcov(...) diagonals
sdev(zicmp.out, type = "list") ## Standard deviations as a named list
equitest(zicmp0.out) ## Likelihood ratio test for H_0: gamma = 0
tryCatch({ ## An error is thrown for model with fixed gamma
equitest(zicmp.out)
}, error = print_warning)
```
Because we fixed $\gamma = -\infty$ to obtain `zicmp0.out`, the `equitest` function throws an error instead of proceeding with an equidispersion test.
The `predict` function behaves similarly as in CMP regression; however, the `link` type here also includes a column with the estimated $p_i$.
```{r}
y.hat = predict(zicmp.out) ## Fitted values based on ecmp
link.hat = predict(zicmp.out, type = "link")
head(y.hat)
head(link.hat)
```
In this example, we can see the benefit of using quantile residuals rather than raw residuals for diagnostic plots. The functions `plot.fit.res` and `plot.qq.res` have been defined in Section \ref{sec:cmp-reg}.
```{r, fig.width = 3, fig.height = 3, fig.align = "center", prompt = FALSE, fig.show = "hold"}
res.raw = residuals(zicmp.out, type = "raw")
res.qtl = residuals(zicmp.out, type = "quantile")
plot.fit.res(y.hat, res.raw) +
ggtitle("Fitted Values vs. Raw Residuals")
plot.qq.res(res.raw) +
ggtitle("Q-Q Plot of Raw Residuals")
plot.fit.res(y.hat, res.qtl) +
ggtitle("Fitted Values vs. Quantile Residuals")
plot.qq.res(res.qtl) +
ggtitle("Q-Q Plot of Quantile Residuals")
```
Here is an example of computing fitted values for new covariate data.
```{r, prompt = FALSE}
new.df = data.frame(EDUCATION = round(1:20 / 20), ANXIETY = seq(-3,3, length.out = 20))
X.new = model.matrix(~ EDUCATION + ANXIETY, data = new.df)
S.new = model.matrix(~ 1, data = new.df)
W.new = model.matrix(~ EDUCATION + ANXIETY, data = new.df)
new.data = get.modelmatrix(X.new, S.new, W.new)
# For model fit using raw interface, use get.modelmatrix to prepare new design
# matrices, offsets, etc
y.hat.new = predict(zicmp.out, newdata = new.data)
# For models fit with the formula interface, pass a data.frame with the same
# structure as used in the fit.
y.hat.new = predict(zicmp0.out, newdata = new.df)
```
```{r}
print(y.hat.new)
```
As with CMP regression, a `parametric.bootstrap` function is provided for convenience to obtain a bootstrap sample $\hat{\btheta}^{(r)}$ of $\btheta$ based on the estimate $\hat{\btheta}$. Because it is too time consuming to run this example within the vignette, we show the code without output below. As in Section \ref{sec:cmp-reg}, we consider using the bootstrap samples to construct a 95% confidence interval for each of the coefficients.
```{r, eval = FALSE}
zicmp.boot = parametric.bootstrap(zicmp.out, reps = 100)
head(zicmp.boot)
apply(zicmp.boot, 2, quantile, c(0.025,0.975))
```
# Acknowledgements {.unlisted .unnumbered}
We acknowledge Thomas Lotze for significant contributions to the initial development of the `COMPoissonReg` package and for service as initial maintainer on CRAN. We thank Darcy Steeg Morris, Eric Slud, and Tommy Wright for reviewing the manuscript. We are grateful to the users of `COMPoissonReg` for their interest and for bringing to light some of the issues which have been considered in this work.
# References
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/inst/doc/vignette.Rmd
|
---
title: "COMPoissonReg: Usage, the Normalizing Constant, and Other Computational Details"
author:
- Andrew M. Raim^[
<[email protected]>,
Center for Statistical Research & Methodology,
U.S. Census Bureau,
Washington, DC, 20233, U.S.A.
**Disclaimer**`:` This document is released to inform interested parties of
ongoing research and to encourage discussion of work in progress. Any views
expressed are those of the authors and not those of the U.S. Census Bureau.
]
- Kimberly F. Sellers^[
<[email protected]>,
Center for Statistical Research & Methodology,
U.S. Census Bureau and
Department of Mathematics and Statistics,
Georgetown University,
Washington, DC, 20057, U.S.A.
]
abstract: >
`COMPoissonReg` is an R package which supports Conway-Maxwell Poisson (CMP)
and Zero-Inflated Conway-Maxwell Poisson (ZICMP) models. This vignette
describes fundamental computational details, especially those involving the
normalizing constant and related quantities. The CMP normalizing constant
does not have a general closed form; furthermore, it requires care to handle
numerically as its magnitude can vary greatly with changes in the parameters.
Primary `COMPoissonReg` functions are demonstrated with examples, including
those implementing basic distribution functions and regression modeling.
bibliography: references.bib
date: "`r format(Sys.time(), '%Y-%m-%d')`"
output:
pdf_document:
citation_package: natbib
number_sections: yes
toc: yes
toc_depth: 3
extra_dependencies:
common: null
keep_tex: yes
urlcolor: blue
linkcolor: blue
citecolor: blue
vignette: >
%\VignetteIndexEntry{COMPoissonReg: Usage, the Normalizing Constant, and Other Computational Details}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
prompt = TRUE,
comment = ""
)
```
```{r setup, include = FALSE}
library(COMPoissonReg)
set.seed(1235)
```
\newpage
# Introduction
\label{sec:intro}
The R package `COMPoissonReg` supports Conway-Maxwell Poisson (CMP) and Zero-Inflated Conway-Maxwell Poisson (ZICMP) models for analysis of count data in a flexible manner, to account for data dispersion relative to a Poisson model. The package provides regression functionality in addition to basic distribution functions. Interested users can refer to @SellersShmueli2010 and @SellersRaim2016 regarding the underlying theoretical developments for the CMP and ZICMP regressions, respectively. A full specification of the public `COMPoissonReg` interface can be found in the manual. In addition to package prerequisites `Rcpp` [@Eddelbuettel2013] and `numDeriv` [@numDeriv], `ggplot2` [@Wickham2016] is also used in this vignette.
One of the challenges of working with CMP and ZICMP lies in computing the normalizing constant and related quantities. The normalizing constant does not have a simple closed form in general and can quickly increase or decrease magnitude as parameters are varied. `COMPoissonReg` takes a hybrid approach of either truncating the infinite series or making use of an approximation, depending on parameter values.
The remainder of the vignette proceeds as follows. Section \ref{sec:cmp} describes functions to support the CMP distribution, including numerical handling of the normalizing constant. Section \ref{sec:zicmp} describes functions for ZICMP. Finally, Section \ref{sec:reg} demonstrates regression functions; Sections \ref{sec:cmp-reg} and \ref{sec:zicmp-reg} give specific examples based on CMP and ZICMP outcomes, respectively. The `COMPoissonReg` package is on CRAN at <https://cran.r-project.org/package=COMPoissonReg> and the source code is on Github at <https://github.com/lotze/COMPoissonReg>.
# Conway-Maxwell Poisson Distribution
\label{sec:cmp}
Let $Y \sim \text{CMP}(\lambda, \nu)$ be a Conway-Maxwell Poisson (CMP) random variable with density
<!-- -->
\begin{align*}
f(y \mid \lambda, \nu) = \frac{\lambda^y}{(y!)^\nu Z(\lambda, \nu)}, \quad y \in \mathbb{N}, \quad
Z(\lambda, \nu) = \sum_{r=0}^\infty \frac{\lambda^r}{(r!)^\nu},
\end{align*}
<!-- -->
where $\lambda > 0$, $\nu > 0$, and $\mathbb{N}$ represents the nonnegative integers $\{ 0, 1, 2, \ldots\}$. Three notable special cases of $\text{CMP}(\lambda, \nu)$ help to demonstrate its flexibility in count modeling.
a. The case $\nu = 1$ corresponds to $\text{Poisson}(\lambda)$.
a. When $\lambda \in (0,1)$ and $\nu = 0$, the $\text{CMP}(\lambda, \nu)$ distribution is $\text{Geometric}(1-\lambda)$ with density $f(y \mid \lambda) = (1 - \lambda) \lambda^y$ for $y \in \mathbb{N}$, which is overdispersed relative to Poisson.
a. When $\nu \rightarrow \infty$, $\text{CMP}(\lambda, \nu)$ converges to a $\text{Bernoulli}(\lambda / (1 + \lambda))$ distribution which is underdispersed relative to Poisson.
## Normalizing Constant
\label{sec:cmp-normconst}
The normalizing constant $Z(\lambda, \nu)$ presents some challenges in the practical use of CMP models and has been a topic of interest in the CMP literature. In general, there is no simple closed form expression for the series $Z(\lambda, \nu)$. @ShmueliEtAl2005 give the approximation
<!-- -->
\begin{align}
Z(\lambda, \nu) &= \frac{ \exp(\nu \lambda^{1/\nu}) }{ \lambda^{(\nu-1)/2\nu} (2\pi)^{(\nu-1)/2} \nu^{1/2} }
\left\{ 1 + O(\lambda^{-1/\nu}) \right\},
\label{eqn:approx}
\end{align}
<!-- -->
where the $O(\cdot)$ term vanishes as $\lambda^{-1/\nu}$ becomes small. Approximations have been further studied and refined in subsequent literature; see for example @GillispieGreen2015, @DalyGaunt2016, and @GauntEtAl2019. The expression in \eqref{eqn:approx} emphasizes that the magnitude of $Z(\lambda, \nu)$ explodes as $\nu \rightarrow 0$ when $\lambda > 1$. For example, $Z(2, 0.075) \approx e^{780.515}$ is too large to store as a double-precision floating point number, and may evaluate to infinity if care is not taken. In contrast, $Z(\lambda, \nu) \rightarrow 1/(1 - \lambda)$ when $\lambda < 1$ and $\nu \rightarrow 0$.
In practice, the `COMPoissonReg` package does not place constraints on $\lambda$ and $\nu$, except to ensure that they are positive, so that their values are driven by the data or the user's selection. A hybrid strategy motivated by \eqref{eqn:approx} is taken by `COMPoissonReg`.
To compute $Z(\lambda, \nu)$, suppose we are given a small tolerance $\delta > 0$. If
<!-- --->
\begin{align}
\lambda^{-1/\nu} < \delta,
\label{eqn:can_approx}
\end{align}
<!-- --->
the first term of \eqref{eqn:approx} dominates the second term, and we take
<!-- -->
\begin{align}
Z(\lambda, \nu) &\approx \frac{ \exp(\nu \lambda^{1/\nu}) }{ \lambda^{(\nu-1)/2\nu} (2\pi)^{(\nu-1)/2} \nu^{1/2} } \nonumber \\
&=\exp\left\{
\nu \lambda^{1/\nu} - \frac{\nu-1}{2\nu} \log \lambda - \frac{\nu-1}{2} \log(2\pi) - \frac{1}{2} \log \nu
\right\}.
\label{eqn:z-approx}
\end{align}
<!-- -->
as an approximation. Otherwise, the series is computed by truncating to a finite number of terms, which is described next. In either case, computations are kept on the log-scale as much as possible to accommodate numbers with potentially very large and very small magnitudes.
We approximate $Z(\lambda, \nu)$ by a finite summation $Z^{(M)}(\lambda, \nu) = \sum_{r=0}^M \lambda^r / (r!)^\nu$ if condition \eqref{eqn:can_approx} fails, so that the remainder is smaller than a given tolerance. The general approach is described in Appendix B of @ShmueliEtAl2005. @Robbins1955 gives bounds for Stirling's approximation as
<!-- -->
\begin{align*}
\sqrt{2\pi} n^{n + 1/2} e^{-n} e^{1 / (12n + 1)} < n! <
\sqrt{2\pi} n^{n + 1/2} e^{-n} e^{1 / (12n)}.
\end{align*}
<!-- -->
Noting that $e^{1 / (12n + 1)} \geq 1$ for $n \geq 1$ and $\sqrt{2\pi} e^{1 / (12n)} \leq e$ for $n \geq 2$, we obtain simpler bounds
<!-- -->
\begin{align*}
\sqrt{2\pi} n^{n + 1/2} e^{-n} \leq n! \leq e n^{n + 1/2} e^{-n},
\end{align*}
<!-- -->
which will be convenient in the following calculations.^[These bounds are also stated at <https://en.wikipedia.org/wiki/Stirling\%27s_approximation>, last accessed 2022-10-09.] We may then bound the truncation error for $Z^{(M)}(\lambda, \nu)$ using
<!-- -->
\begin{align}
\lvert Z(\lambda, \nu) - Z^{(M)}(\lambda, \nu) \rvert &= Z(\lambda, \nu) - Z^{(M)}(\lambda, \nu) \nonumber \\
&= \sum_{r=M+1}^\infty \frac{\lambda^r}{(r!)^\nu} \nonumber \\
&\leq \sum_{r=M+1}^\infty \frac{\lambda^r}{(2\pi)^{\nu/2} r^{\nu r + \nu/2} e^{-r \nu}} \nonumber \\
&\leq \sum_{r=M+1}^\infty \frac{\lambda^r}{(2\pi)^{\nu/2} (M+1)^{\nu r + \nu/2} e^{-r \nu}} \nonumber \\
&= (2\pi)^{-\nu/2} (M+1)^{-\nu/2} \sum_{r=M+1}^\infty \left( \frac{\lambda e^{\nu}}{(M+1)^{\nu}} \right)^r \label{eqn:geom} \\
&= (2\pi)^{-\nu/2} (M+1)^{-\nu/2} \sum_{r=0}^\infty \left( \frac{\lambda e^{\nu}}{(M+1)^{\nu}} \right)^{r+M+1} \nonumber \\
&= (2\pi)^{-\nu/2} (M+1)^{-\nu/2} \left( \frac{\lambda e^{\nu}}{(M+1)^{\nu}} \right)^{M+1} \frac{1}{1 - \frac{\lambda e^{\nu}}{(M+1)^{\nu}}} \nonumber \\
&=: \Delta_M, \nonumber
\end{align}
<!-- -->
assuming that $|\lambda e^\nu / (M+1)^\nu| < 1$ so that the geometric series in \eqref{eqn:geom} converges. To ensure this convergence we choose $M$ at least large enough so that
<!-- -->
\begin{align*}
\lambda e^\nu / (M+1)^\nu < 1 \iff
M > \lambda^{1/\nu} e - 1.
\end{align*}
<!-- -->
For a small given number $\epsilon > 0$, we may consider bounding the relative error by
<!-- -->
\begin{align}
&\frac{\lvert Z(\lambda, \nu) - Z^{(M)}(\lambda, \nu) \rvert}{Z^{(M)}(\lambda, \nu)}
\leq \frac{\Delta_M}{Z^{(M)}(\lambda, \nu)}
< \epsilon.
\label{eqn:rel_error_bound}
\end{align}
<!-- -->
The second inequality of \eqref{eqn:rel_error_bound} can be expressed on the log-scale using
<!-- -->
\begin{align}
\log \Delta_M - \log Z^{(M)}(\lambda, \nu) < \log \epsilon,
\label{eqn:adaptive-log-scale}
\end{align}
<!-- -->
where
<!-- -->
\begin{align*}
\log \Delta_M
= -\frac{\nu}{2} \log(2\pi)
- \nu \left( M+\frac{3}{2} \right) \log (M+1)
+ (M+1) (\nu + \log \lambda)
- \log\left(1 - \frac{\lambda e^\nu}{(M+1)^\nu} \right).
\end{align*}
<!-- -->
Therefore, we compute $Z^{(M)}(\lambda, \nu)$ until at least $M > \lambda^{1/\nu} e - 1$, increasing $M$ and updating $Z^{(M)}(\lambda, \nu)$ until \eqref{eqn:adaptive-log-scale} is satisfied. This is summarized as Algorithm \ref{alg:normconst-trunc}.
\begin{algorithm}
\caption{Compute the CMP normalizing constant using truncation approach.}
\label{alg:normconst-trunc}
\hspace*{\algorithmicindent}\textbf{Input}: $\lambda > 0$ rate parameter. \\
\hspace*{\algorithmicindent}\textbf{Input}: $\nu > 0$ dispersion parameter. \\
\hspace*{\algorithmicindent}\textbf{Input}: $\epsilon > 0$ tolerance. \\
\hspace*{\algorithmicindent}\textbf{Input}: $y_\text{max} \in \mathbb{N}$ upper limit for $M$
\begin{algorithmic}[1]
\Function{Truncated-$Z$}{$\lambda, \nu, \epsilon, y_\text{max}$}
\State $M = 0$, $Z^{(0)} = 1$
\While{$M \leq \lambda^{1/\nu} e - 1$ and $M \leq y_\text{max}$}
\State $Z^{(M+1)} \leftarrow Z^{(M)} + \lambda^{M} / (M!)^\nu$
\State $M \leftarrow M + 1$
\EndWhile
\While{$\log \Delta_M - \log Z^{(M)} \geq \log \epsilon$ and $M \leq y_\text{max}$}
\State $Z^{(M+1)} \leftarrow Z^{(M)} + \lambda^{M} / (M!)^\nu$
\State $M \leftarrow M + 1$
\EndWhile
\State \Return $\{ Z^{(M)}, M \}$
\EndFunction
\end{algorithmic}
\end{algorithm}
The individual terms $\lambda^r / (r!)^\nu$ in the summation may be too large to store at their original scale. Therefore, summation is carried out at the log-scale, wherever possible, using the identity
<!-- -->
\begin{align}
\log(x + y) = \log x + \log(1 + \exp\{ \log y - \log x \});
\label{eqn:logadd}
\end{align}
<!-- -->
this is especially helpful when $0 < y \ll x$, as $\log x$ may be kept on the log-scale by the first term of the right-hand side of \eqref{eqn:logadd}, and the standard library function `log1p` may be used to accurately compute $\log(1 + \phi)$ for very small $\phi > 0$.
Many of the functions in the user interface of `COMPoissonReg` take an optional `control` argument, which can be constructed as follows.
```{r}
control = get.control(
ymax = 100000,
hybrid.tol = 1e-2,
truncate.tol = 1e-6
)
```
The tolerances $\delta$ and $\epsilon$ are specified as `hybrid.tol` and `truncate.tol` respectively. Taking `hybrid.tol` to be a very small positive number results in use of the truncated sum $Z^{(M)}(\lambda, \nu)$, while `hybrid.tol = Inf` uses the approximation method \eqref{eqn:z-approx}, except in extreme cases where $\lambda^{-1/\nu}$ evaluates to zero or $\infty$ numerically. The argument `ymax` specifies upper limit $M$; this is a safety measure which prevents very large computations unless the user opts to allow them. When no control object is specified, a global default (via option `COMPoissonReg.control`) is used.
```{r}
control = getOption("COMPoissonReg.control")
control$ymax
control$hybrid.tol
control$truncate.tol
```
The default may be replaced in your current session if desired.
```{r}
options(COMPoissonReg.control = control)
```
The control object contains several other useful arguments to be discussed later in the vignette.
The `ncmp` function computes the normalizing constant $Z(\lambda, \nu)$ and returns its value either on the original scale or the log-scale.
```{r}
ncmp(lambda = 1.5, nu = 1.2)
ncmp(lambda = 1.5, nu = 1.2, log = TRUE)
ncmp(lambda = 1.5, nu = 1.2, log = TRUE, control = get.control(hybrid.tol = 1e10))
ncmp(lambda = 1.5, nu = 1.2, log = TRUE, control = get.control(hybrid.tol = 1e-10))
```
Before proceeding, let us define a function to display errors and warnings which are intentionally triggered in the remainder of the vignette.
```{r}
print_warning = function(x) { print(strwrap(x), quote = FALSE) }
```
The function `tcmp` returns the truncation value $M$ obtained from Algorithm \ref{alg:normconst-trunc}.
```{r}
nu_seq = c(1, 0.5, 0.2, 0.1, 0.05, 0.03)
tryCatch({ tcmp(lambda = 1.5, nu = nu_seq) }, warning = print_warning)
```
Note that `tcmp` returns `1e6` and produces a warning for the smallest `nu` value `r min(nu_seq)` because Algorithm \ref{alg:normconst-trunc} has reached `ymax = 1e6` before the series could be bounded by a geometric series. Here, it is likely that support values with non-negligible mass are being left out. Let us increase `ymax` to avoid this problem.
```{r}
tcmp(lambda = 1.5, nu = nu_seq, control = get.control(ymax = 3e6))
```
It is also possible to reach the second loop of Algorithm \ref{alg:normconst-trunc} where the geometric series can be used, but `ymax` is not large enough to satisfy \eqref{eqn:adaptive-log-scale}. Here is an example where this occurs.
```{r}
tcmp(lambda = 1.2, nu = 0.03, control = get.control(ymax = 1200))
```
Now that we have a somewhat robust computation for the normalizing constant, let us create a plot of the interesting behavior when $\lambda > 1$ and $\nu$ is decreasing.
```{r, prompt = FALSE}
library(ggplot2)
nu_seq = seq(0.03, 1.5, length.out = 20)
nc1 = ncmp(lambda = 0.5, nu = nu_seq, log = TRUE)
nc2 = ncmp(lambda = 1.05, nu = nu_seq, log = TRUE)
nc3 = ncmp(lambda = 1.20, nu = nu_seq, log = TRUE)
```
```{r, fig.width = 5, fig.height = 3, fig.align = "center", prompt = FALSE, fig.cap = "Log of normalizing constant for $\\lambda = 0.5$ ($\\circ$), $\\lambda = 1.05$ ($\\Delta$), and $\\lambda = 1.20$ ($+$)."}
ggplot() +
geom_point(data = data.frame(x = nu_seq, y = nc1), aes(x = x, y = y), pch = 1) +
geom_point(data = data.frame(x = nu_seq, y = nc2), aes(x = x, y = y), pch = 2) +
geom_point(data = data.frame(x = nu_seq, y = nc3), aes(x = x, y = y), pch = 3) +
xlab("nu") +
ylab("log of normalizing constant") +
theme_bw()
```
We see that with $\lambda = 1.2$ a value of $Z(\lambda, `r round(nu_seq[1], 4)`) \approx e^{`r round(nc3[1], 2)`}$ is obtained, which is an extremely large jump from the next value in the series $Z(\lambda, `r round(nu_seq[2], 4)`) \approx e^{`r round(nc3[2], 2)`}$.
## Density, Generation, CDF, and Quantile Functions
\label{sec:cmp-dist}
The respective functions for CMP density, variate generation, CDF, and quantile functions are `dcmp`, `rcmp`, `pcmp` , and `qcmp`. Their usage is similar to distribution functions provided by the R `stats` package.
```{r}
dcmp(0, lambda = 10, nu = 0.9)
dcmp(0:17, lambda = 10, nu = 0.9, log = TRUE)
dcmp(c(0, 1, 2), lambda = c(10, 11, 12), nu = c(0.9, 1.0, 1.1), log = TRUE)
```
```{r}
rcmp(50, lambda = 10, nu = 0.9)
```
```{r}
pcmp(0:17, lambda = 10, nu = 0.9)
```
```{r}
qq = seq(0, 0.95, length.out = 10)
qcmp(qq, lambda = 10, nu = 0.9)
```
The CMP distribution functions compute the normalizing constant via Section \ref{sec:cmp-normconst}. The density `dcmp` uses the hybrid method, while `rcmp`, `pcmp`, and `qcmp` use the truncation method regardless of condition \eqref{eqn:can_approx}. Variate generation, CDF, and quantiles are then computed on CMP as if it were a discrete distribution on the sample space $\{ 0, \ldots, M \}$. As with `tcmp` and `ncmp`, a warning is emitted in cases where they may not produce reliable results.
```{r}
tryCatch({ rcmp(1, lambda = 2, nu = 0.01) }, warning = print_warning)
```
The truncation method for `qcmp` can result in unreliable quantiles when the requested probability is very close to 1. For example, the actual quantile for probability $1$ is $\infty$, which can be expressed with no computation, but the computed quantity will be the truncation value $M$. More generally, \eqref{eqn:rel_error_bound} implies that
<!-- -->
\begin{align}
\frac{Z(\lambda, \nu) - Z^{(M)}(\lambda, \nu)}{Z(\lambda, \nu)} < \epsilon
&\iff \Prob(Y > M) < \epsilon \nonumber \\
&\iff 1 - F(M \mid \lambda, \nu) < \epsilon \nonumber \\
&\iff F^{-}(1 - \epsilon \mid \lambda, \nu) < M.
\label{eqn:cmp-quantile-limit}
\end{align}
<!-- -->
Therefore, it is possible that quantiles larger than $1 - \epsilon$ may be inaccurately computed with the truncated support. `COMPoissonReg` gives a warning when these are requested.
```{r}
tryCatch({
qcmp(0.9999999, lambda = 1.5, nu = 0.5)
}, warning = print_warning)
```
As a sanity check, let us ensure that the empirical density values, cumulative probabilities, and quantiles of draws from `rcmp` align with respective results computed via `dcmp`, `pcmp`, and `qcmp`.
```{r, fig.width = 3, fig.height = 3, fig.align = "center", prompt = FALSE, fig.show = "hold"}
library(ggplot2)
n = 100000
lambda = 0.5
nu = 0.1
x = rcmp(n, lambda, nu)
xx = seq(-1, max(x)) ## Include -1 to ensure it gets probability zero
qq = seq(0, 0.99, length.out = 100)
fx = dcmp(xx, lambda, nu)
px = pcmp(xx, lambda, nu)
qx = qcmp(qq, lambda, nu)
qx_emp = quantile(x, probs = qq)
```
```{r, fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical density of draws (histogram) versus density computed via the dcmp function (points)."}
ggplot() +
geom_bar(data = data.frame(x = x), aes(x = x, y = ..prop..), fill = "NA",
col = "black") +
geom_point(data = data.frame(x = xx[-1], fx = fx[-1]), aes(x, fx)) +
ylab("Density") +
theme_bw()
```
```{r, fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical CDF of draws (solid line) versus CDF computed via the pcmp function (points)."}
ggplot() +
stat_ecdf(data = data.frame(x = x), aes(x), geom = "step") +
geom_point(data = data.frame(x = xx, px = px), aes(x, px)) +
ylab("Probability") +
theme_bw()
```
```{r, fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical quantiles of draws (`o`) versus quantiles computed via the qcmp function (`+`)."}
ggplot() +
geom_point(data = data.frame(x = qq, qx_emp = qx_emp), aes(qq, qx_emp), pch = 1) +
geom_point(data = data.frame(x = qq, qx = qx), aes(qq, qx), pch = 3) +
xlab("Probability") +
ylab("Quantile") +
theme_bw()
```
## Expected Value and Variance
\label{sec:cmp-ev}
For $Y \sim \text{CMP}(\lambda, \nu)$, we can consider computing the expectation and variance of $Y$ in two ways. First, if there is a moderately-sized $M$ where $\{ 0, \ldots, M \}$ contains all but a negligible amount of the mass of $Y$, we can compute the moments using truncated summations
<!-- -->
\begin{align*}
\E(Y) = \sum_{y=0}^M y \frac{\lambda^y}{(y!)^\nu Z(\lambda, \nu)}, \quad
\E(Y^2) = \sum_{y=0}^M y^2 \frac{\lambda^y}{(y!)^\nu Z(\lambda, \nu)}, \quad
\Var(Y) = \E(Y^2) - [\E(Y)]^2.
\end{align*}
<!-- -->
Otherwise, a different approach is taken. Notice that the expected value is related to the first derivative of the log-normalizing constant via
<!-- -->
\begin{align*}
&\frac{\partial}{\partial \lambda} \log Z(\lambda, \nu)
= \frac{ \frac{\partial}{\partial \lambda} Z(\lambda, \nu) }{ Z(\lambda, \nu) }
= \frac{1}{Z(\lambda, \nu)} \sum_{y=0}^\infty y \frac{\lambda^{y-1}}{(y!)^\nu} \\
&\iff \E(Y) = \lambda \frac{\partial}{\partial \lambda} \log Z(\lambda, \nu).
\end{align*}
<!-- -->
For the second derivative,
<!-- -->
\begin{align*}
&\frac{\partial^2}{\partial \lambda^2} \log Z(\lambda, \nu)
= \frac{ Z(\lambda, \nu) \frac{\partial^2}{\partial \lambda^2} Z(\lambda, \nu) - [\frac{\partial}{\partial \lambda} Z(\lambda, \nu)]^2}{ Z(\lambda, \nu)^2 }
= \frac{1}{Z(\lambda, \nu)} \sum_{y=0}^\infty y(y-1) \frac{\lambda^{y-2}}{(y!)^\nu} - \left[ \frac{\E(Y)}{\lambda} \right]^2 \\
&\iff \lambda^2 \frac{\partial^2}{\partial \lambda^2} \log Z(\lambda, \nu) = \E[Y(Y-1)] - [\E(Y)]^2 = \Var(Y) - \E(Y) \\
&\iff \Var(Y) = \lambda^2 \frac{\partial^2}{\partial \lambda^2} \log Z(\lambda, \nu) + \E(Y).
\end{align*}
<!-- -->
Therefore, we may use first and second derivatives of $\log Z(\lambda, \nu)$, taken with respect to $\lambda$, to compute $\E(Y)$ and $\Var(Y)$. The `ecmp` and `vcmp` functions implement this computation of the expectation and variance, respectively. The control object is used to determine whether to use the truncation or differentiation approach. Condition \eqref{eqn:can_approx} is used to determine whether to use the truncation approach (if false) or differentiation approach (if true).
```{r}
ecmp(lambda = 10, nu = 1.2)
ecmp(lambda = 1.5, nu = 0.5)
ecmp(lambda = 1.5, nu = 0.05)
ecmp(lambda = 1.5, nu = 0.05, control = get.control(hybrid.tol = 1e-10))
ecmp(lambda = 1.5, nu = 0.05, control = get.control(hybrid.tol = 1e10))
```
```{r}
vcmp(lambda = 10, nu = 1.2)
vcmp(lambda = 1.5, nu = 0.5)
vcmp(lambda = 1.5, nu = 0.05)
vcmp(lambda = 1.5, nu = 0.05, control = get.control(hybrid.tol = 1e-10))
vcmp(lambda = 1.5, nu = 0.05, control = get.control(hybrid.tol = 1e10))
```
Provided that an enormously large truncation value $M$ is not required, we may compute other moments by truncated sums using `tcmp`.
```{r}
M = tcmp(lambda = 1.5, nu = 0.05)
print(M)
xx = seq(0, M)
sum(xx^3 * dcmp(xx, lambda, nu)) # E(X^3)
sum(xx^4 * dcmp(xx, lambda, nu)) # E(X^4)
```
# Zero-Inflated Conway-Maxwell Poisson Distribution
\label{sec:zicmp}
Let $S \sim \text{Bernoulli}(p)$ and $T \sim \text{CMP}(\lambda, \nu)$ be independent random variables. Then $Y = (1-S) T$ follows a Zero-Inflated Conway-Maxwell Poisson distribution $\text{ZICMP}(\lambda, \nu, p)$ with density
<!-- -->
\begin{align*}
f(y \mid \lambda, \nu, p) = (1 - p) \frac{\lambda^{y}}{(y!)^{\nu} Z(\lambda, \nu)}
+ p \cdot \ind(y = 0), \quad y \in \mathbb{N}.
\end{align*}
<!-- -->
Like CMP, several interesting special cases are obtained.
a. Taking $\nu = 1$ corresponds to Zero-Inflated Poisson $\text{ZIP}(\lambda, p)$ with density $f(y \mid \lambda, p) = (1 - p) e^{-\lambda} \lambda^{y} / y! + p \cdot \ind(y = 0)$.
a. When $\lambda \in (0,1)$ and $\nu \rightarrow 0$, $\text{ZICMP}(\lambda, \nu)$ converges to a Zero-Inflated Geometric distribution with density $f(y \mid \lambda, p) = (1 - p) (1 - \lambda) \lambda^y + p \cdot \ind(y = 0)$ for $y \in \mathbb{N}$.
a. When $\nu \rightarrow \infty$, $\text{ZICMP}(\lambda, \nu, p)$ converges to a "Zero-Inflated Bernoulli" distribution with density
<!-- -->
\begin{math}
f(y \mid \lambda, p) = (1 - p) \left[ \lambda/(1+\lambda) \right]^y \left[ 1/(1+\lambda) \right]^{1-y} + p \cdot \ind(y = 0).
\end{math}
<!-- -->
which remains a Bernoulli distribution with an adjusted success probability. Here the $\lambda$ and $p$ parameters are not individually identifiable. Therefore, users may want to avoid zero-inflation in CMP data analyses with extreme underdispersion.
a. Finally, $p = 0$ yields $\text{CMP}(\lambda, \nu)$.
## Density, Generation, CDF, and Quantile Functions
There is a close relationship between the CMP and ZICMP distributions, as we have seen from construction of ZICMP. The relationship between the CMP densities and variate generation mechanisms was given earlier in this section. Denote $F(x \mid \lambda, \nu)$ and $F(x \mid \lambda, \nu, p)$ as the CDFs of $\text{CMP}(\lambda, \nu)$ and $\text{ZICMP}(\lambda, \nu, p)$, respectively. We have
<!-- -->
\begin{align*}
F(x \mid \lambda, \nu, p) = (1-p) F(x \mid \lambda, \nu) + p \cdot \ind(x \geq 0).
\end{align*}
<!-- -->
For a given probability $\phi \in [0,1]$, the associated CMP and ZICMP quantile functions are related via
<!-- -->
\begin{align}
F^{-}(\phi \mid \lambda, \nu, p)
&= \inf\{ x \in \mathbb{N} : F(x \mid \lambda, \nu, p) \geq \phi \} \nonumber \\
&= \inf\{ x \in \mathbb{N} : (1-p) F(x \mid \lambda, \nu) + p \cdot \ind(x \geq 0) \geq \phi \} \nonumber \\
&= \inf\{ x \in \mathbb{N} : F(x \mid \lambda, \nu) \geq (\phi - p) / (1 - p) \} \nonumber \\
&= F^{-}\left( \frac{\phi - p}{1 - p} \mid \lambda, \nu \right).
\label{eqn:zicmp-quantiles}
\end{align}
The respective functions for ZICMP density, variate generation, CDF, and quantile functions are `dzicmp`, `rzicmp`, `pzicmp` , and `qzicmp`. They make use of the CMP implementation described in Section \ref{sec:cmp} such as the criteria to either truncate or approximate the normalizing constant.
```{r}
qq = seq(0, 0.95, length.out = 20)
rzicmp(20, lambda = 1.5, nu = 0.2, p = 0.25)
dzicmp(c(0, 1, 2), lambda = 1.5, nu = 0.2, p = 0.25)
pzicmp(c(0, 1, 2), lambda = 1.5, nu = 0.2, p = 0.25)
qzicmp(qq, lambda = 1.5, nu = 0.2, p = 0.25)
```
As with `qcmp`, the `qzicmp` function is computed by the truncation method, and cannot accurately compute quantiles very close to 1. More specifically, from the CMP distribution, \eqref{eqn:cmp-quantile-limit} gives
<!-- -->
\begin{align*}
M &> F^{-}(1 - \epsilon \mid \lambda, \nu) \\
&= F^{-}\left( \frac{\phi_\epsilon - p}{1 - p} \mid \lambda, \nu \right) \\
&= F^{-}\left( \phi_\epsilon \mid \lambda, \nu, p \right)
\end{align*}
<!-- -->
where $\phi_\epsilon = (1 - \epsilon)(1-p) + p$ and the last equality uses \eqref{eqn:zicmp-quantiles}. This motivates a warning from `qzicmp` when the argument is larger than $\phi_\epsilon$.
```{r}
tryCatch({
qzicmp(0.9999999, lambda = 1.5, nu = 0.5, p = 0.5)
}, warning = print_warning)
```
Let us repeat the sanity check from Section \ref{sec:cmp-dist} to ensure that the empirical density values, cumulative probabilities, and quantiles line up with the ones computed via `dzicmp`, `pzicmp`, and `qzicmp`.
```{r, fig.width = 3, fig.height = 3, fig.align = "center", prompt = FALSE, fig.show = "hold"}
library(ggplot2)
n = 100000
lambda = 0.5
nu = 0.1
p = 0.5
x = rzicmp(n, lambda, nu, p)
xx = seq(-1, max(x)) ## Include -1 to ensure it gets probability zero
qq = seq(0, 0.99, length.out = 100)
fx = dzicmp(xx, lambda, nu, p)
px = pzicmp(xx, lambda, nu, p)
qx = qzicmp(qq, lambda, nu, p)
qx_emp = quantile(x, probs = qq)
```
```{r, fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical density of draws (histogram) versus density computed via the dzicmp function (points)."}
ggplot() +
geom_bar(data = data.frame(x = x), aes(x = x, y = ..prop..), fill = "NA",
col = "black") +
geom_point(data = data.frame(x = xx[-1], fx = fx[-1]), aes(x, fx)) +
ylab("Density") +
theme_bw()
```
```{r, fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical CDF of draws (solid line) versus CDF computed via the pzicmp function (points)."}
ggplot() +
stat_ecdf(data = data.frame(x = x), aes(x), geom = "step") +
geom_point(data = data.frame(x = xx, px = px), aes(x, px)) +
ylab("Probability") +
theme_bw()
```
```{r, fig.width = 3, fig.height = 3, prompt = FALSE, fig.cap = "Empirical quantiles of draws (`o`) versus quantiles computed via the qzicmp function (`+`)."}
ggplot() +
geom_point(data = data.frame(x = qq, qx_emp = qx_emp), aes(qq, qx_emp), pch = 1) +
geom_point(data = data.frame(x = qq, qx = qx), aes(qq, qx), pch = 3) +
xlab("Probability") +
ylab("Quantile") +
theme_bw()
```
## Expectation and Variance
The expected value and variance of $Y \sim \text{ZICMP}(\lambda, \nu, p)$ are apparent from the construction $Y = (1-S) T$ given earlier in this section. Namely,
<!-- -->
\begin{align*}
\E(Y) = (1-p) \E(T)
\quad \text{and} \quad
\Var(Y) = (1-p) \left\{ \Var(T) + p[\E(T)]^2 \right\}
\end{align*}
<!-- -->
may be obtained using formulas for iterated conditional expectations and variances. They are evaluated in `COMPoissonReg` using the `ezicmp` and `vzicmp` functions respectively. These functions make use of the `ecmp` and `vcmp` functions described in Section \ref{sec:cmp-ev} to compute $\E(T)$ and $\Var(T)$.
```{r}
ezicmp(lambda = 1.5, nu = 0.5, p = 0.1)
ezicmp(lambda = 1.5, nu = 0.5, p = c(0.1, 0.2, 0.5))
```
```{r}
vzicmp(lambda = 1.5, nu = 0.5, p = 0.1)
vzicmp(lambda = 1.5, nu = 0.5, p = c(0.1, 0.2, 0.5))
```
# Regression Modeling with CMP and ZICMP
\label{sec:reg}
Suppose there are $n$ subjects with outcomes $y_1, \ldots, y_n \in \mathbb{N}$ and covariates $\vec{x}_i \in \mathbb{R}^{d_1}$, $\vec{s}_i \in
\mathbb{R}^{d_2}$, and $\vec{w}_i \in \mathbb{R}^{d_3}$ for $i = 1, \ldots, n$. The `COMPoissonReg` package fits both CMP and ZICMP regression models.
The CMP regression model assumes that
<!-- -->
\begin{align*}
Y_i \indep \text{CMP}(\lambda_i, \nu_i), \quad i = 1, \ldots, n,
\end{align*}
<!-- -->
where $\log \lambda_i = \vec{x}_i^\top \vec{\beta}$ and $\log \nu_i = \vec{s}_i^\top \vec{\gamma}$. Writing $\btheta = (\vec{\beta}, \vec{\gamma})$, the likelihood is
<!-- -->
\begin{align}
L(\btheta) =
\prod_{i=1}^n \left[
\frac{\lambda_i^{y_i}}{(y_i!)^{\nu_i} Z(\lambda_i, \nu_i)}
\right].
\label{eqn:likelihood-cmp}
\end{align}
<!-- -->
The ZICMP regression model assumes that
<!-- -->
\begin{align*}
Y_i \indep \text{ZICMP}(\lambda_i, \nu_i, p_i), \quad i = 1, \ldots, n,
\end{align*}
<!-- -->
where $\log \lambda_i = \vec{x}_i^\top \vec{\beta}$, $\log \nu_i = \vec{s}_i^\top \vec{\gamma}$, and $\logit p_i = \vec{w}_i^\top \vec{\zeta}$. Writing $\btheta = (\vec{\beta}, \vec{\gamma}, \vec{\zeta})$, the likelihood is
<!-- -->
\begin{align}
L(\btheta) =
\prod_{i=1}^n \left[(1 - p_i)
\frac{\lambda_i^{y_i}}{(y_i!)^{\nu_i} Z(\lambda_i, \nu_i)}
+ p_i \ind(y_i = 0)
\right].
\label{eqn:likelihood-zicmp}
\end{align}
<!-- -->
We will write $d = d_1 + d_2 + d_3$ for the total dimension of $\btheta$. The $n \times d_1$ design matrix whose rows consist of $\vec{x}_i$ will be denoted $\vec{X}$. Similarly, we will write $\vec{S}$ and $\vec{W}$ as the $n \times d_2$ and $n \times d_3$ design matrices constructed from $\vec{s}_i$ and $\vec{w}_i$, respectively. The `glm.cmp` function provides a formula interface to fit models of both types: \eqref{eqn:likelihood-cmp} and \eqref{eqn:likelihood-zicmp}.
```{r, eval = FALSE, prompt = FALSE}
out = glm.cmp(formula.lambda, formula.nu = ~ 1, formula.p = NULL,
data = NULL, init = NULL, fixed = NULL, control = NULL, ...)
```
The interface contains three formulas: `formula.lambda` specifies the regression $\vec{x}_i^\top \vec{\beta}$ used for $\lambda_i$, while `formula.nu` and `formula.p` correspond to $\vec{s}_i^\top \vec{\gamma}$ for $\nu_i$ and $\vec{w}_i^\top \vec{\zeta}$ for $p_i$, respectively. ZICMP regression is utilized when `formula.p` is set to something other than its default `NULL` value; otherwise, CMP regression is assumed. The `data` argument is used to pass a `data.frame` explicitly rather than having the data be read from the local environment. The `init`, `fixed`, and `control` arguments and associated helper functions are described below.
The `init` argument represents an initial value for the optimizer. The following functions can be used to construct it.
```{r}
get.init(beta = c(1, 2, 3), gamma = c(-1, 1), zeta = c(-2, -1))
get.init.zero(d1 = 3, d2 = 2, d3 = 2)
```
The `fixed` argument is used to specify indices of the three coefficients which will remain fixed at their initial value during optimization.
```{r}
get.fixed(beta = c(1L, 2L), gamma = c(1L))
```
The specification above requests the first two elements of `beta` and the first element of `gamma` to be fixed. Notice that indices must be integers and that the default value is an empty integer vector which is interpreted as "no elements are fixed". The `fixed` argument can usually be disregarded but may be useful in some circumstances; an example is given in Section \ref{sec:zicmp-reg}.
Specifying the elements of `init` and `fixed` may be somewhat awkward with the formula interface, as they require knowledge of how formulas will be expanded into design matrices and coefficients. It can be helpful to produce the design matrices using R's `model.matrix` function.
```{r, eval=FALSE}
model.matrix(formula.lambda, data = data)
model.matrix(formula.nu, data = data)
model.matrix(formula.p, data = data)
```
The `control` argument has been introduced in Section \ref{sec:cmp}; regression modeling makes use of several additional arguments. `COMPoissonReg` uses `optim` to compute the maximum likelihood estimate (MLE) $\hat{\btheta}$ for $\btheta$ under the specified model. Several controls are provided to influence how `COMPoissonReg` invokes `optim`; here are their default values.
```{r}
control = getOption("COMPoissonReg.control")
control$optim.method
control$optim.control
```
The element `optim.method` is a string which is passed as the `method` argument to `optim`, while `optim.control` is a list passed as the `control` argument of `optim`. Note that, for the latter, if an entry is given for `fnscale`, it will be ignored and overwritten internally by `COMPoissonReg`.
The covariance of $\hat{\btheta}$ is estimated by $\hat{\vec{V}}(\hat{\btheta}) = -[\vec{H}(\hat{\btheta})]^{-1}$, where $\vec{H}(\btheta) = \partial^2 \log L(\btheta) / [\partial \btheta \partial \btheta^\top]$ is the Hessian of the log-likelihood computed by `optim`. The standard error for coefficient $\theta_j$ in $\btheta$ is then obtained as the square root of the $j$th diagonal of $\hat{\vec{V}}(\hat{\btheta})$.
We will now illustrate use of the regression tools using two examples whose data are included in the package. Note that these demonstrations are not intended to be complete regression analyses, and results may be slightly different than previously published analyses due to differences in the computations.
## CMP Regression
\label{sec:cmp-reg}
### Freight Dataset
\label{sec:cmp-reg-freight}
The `freight` dataset [@KutnerEtAl2003] was analyzed using CMP regression by @SellersShmueli2010 and found to exhibit underdispersion. The data describe $n = 10$ instances where 1,000 ampules were transported via air shipment. The outcome of interest is the variable `broken` which describes the number of broken ampules in each shipment. The covariate `transfers` describes the number of times the carton was transferred from one aircraft to another during the shipment.
Let us load and view the dataset.
```{r}
data(freight)
print(freight)
```
Before fitting a CMP regression, let us fit a Poisson regression model $Y_i \indep \text{Poisson}(\lambda_i)$ with
<!-- -->
\begin{align*}
\log \lambda_i = \beta_0 + \beta_1 \cdot \text{transfers}_i.
\end{align*}
<!-- -->
This can be carried out with the standard `glm` function.
```{r}
glm.out = glm(broken ~ transfers, data = freight, family = poisson)
summary(glm.out)
```
### CMP Regression
\label{sec:cmp-reg-initial}
Next, let us fit a similar CMP regression model with
<!-- -->
\begin{align*}
&\log \lambda_i = \beta_0 + \beta_1 \cdot \text{transfers}_i, \\
&\log \nu_i = \gamma_0,
\end{align*}
<!-- -->
using only an intercept for $\nu_i$.
```{r}
cmp.out = glm.cmp(broken ~ transfers, data = freight)
print(cmp.out)
```
The coefficients used in the $\lambda_i$ formula are prefixed with an `X:` label, while an `S:` label is used for coefficients of the $\nu_i$ formula. Notice that estimates for `X:` coefficients from the CMP fit are dissimilar to those from the Poisson fit; this may occur when the estimate of $\nu$ deviates from the value of 1. Similarly to the `glm` output, the output of `glm.cmp` displays several quantities for each coefficient $\theta_j$, $j = 1, \ldots, d$: a point estimate $\hat{\theta}_j$, an associated standard error $\widehat{\text{SE}}(\hat{\theta}_j)$, a z-value $z_j = \hat{\theta}_j / \widehat{\text{SE}}(\hat{\theta}_j)$, and a p-value $2\Phi(-|z_j|)$ for the test $H_0: \theta_j = 0$ versus $H_1: \theta_j \neq 0$. Here, $\Phi$ is the CDF of the standard normal distribution. Because an intercept-only formula was specified for $\nu_i$, $\hat{\nu} = \exp(\hat{\gamma})$ does not vary with $i$ and its estimate and associated standard error are added to the display. Here we see evidence of underdispersion with $\hat{\nu} > 1$. A test for equidispersion is displayed to determine whether there is a significant amount of over or underdispersion in the data. In particular, a likelihood ratio test is used to decide whether $H_0: \vec{\gamma} = \vec{0}$ versus $H_0: \vec{\gamma} \neq \vec{0}$. The test statistic is displayed along with the degrees of freedom and associated p-value. Here we have fairly strong evidence to reject the null hypothesis of equidispersion.
The AIC for the CMP model `cmp.out` shows some improvement over that of `glm.out`. Let us also consider a slope coefficient for the $\nu$ component using
<!-- -->
\begin{align*}
\log \nu_i = \gamma_0 + \gamma_1 \cdot \text{transfers}_i.
\end{align*}
<!-- -->
via the following call to `glm.cmp`.
```{r}
cmp2.out = glm.cmp(broken ~ transfers, formula.nu = ~ transfers, data = freight)
print(cmp2.out)
```
Model `cmp2.out` provides a slight improvement over `cmp.out` in AIC and BIC, but we may prefer `cmp.out` in the interest of simplicity.
### Adjustments to Optim
\label{sec:cmp-reg-optim}
To gain some insight into the optimization, we may wish to increase the trace level, which can be done as follows.
```{r}
control = get.control(optim.control = list(maxit = 5, trace = 3, REPORT = 1))
cmp3.out = glm.cmp(broken ~ transfers, data = freight, control = control)
```
Data from the local environment may be passed to the `glm.cmp` function without explicitly using the `data` argument.
```{r, results = 'hide'}
y = freight$broken
x = freight$transfers
glm.cmp(y ~ x)
```
### Offset Term
\label{sec:cmp-reg-offset}
In a count regression model, it may be desirable to include offset terms such as
<!-- -->
\begin{align*}
\log \lambda_i = \vec{x}_i^\top \vec{\beta} + \text{offx}_i, \quad
\log \nu_i = \vec{s}_i^\top \vec{\beta} + \text{offs}_i.
\end{align*}
<!-- -->
An `offset` term may be used in the formula interface to accomplish this.
```{r, results = 'hide'}
freight$offx = 13
freight$offs = 1
glm.cmp(broken ~ transfers + offset(offx), data = freight)
glm.cmp(broken ~ transfers + offset(offx), formula.nu = ~1 + offset(offs), data = freight)
```
For users who wish to bypass the formula interface and prepare the $\vec{X}$ and $\vec{S}$ design matrices manually, a "raw" interface to the regression functionality is also provided.
```{r, results = 'hide'}
y = freight$broken
X = model.matrix(~ transfers, data = freight)
S = model.matrix(~ 1, data = freight)
offs = get.offset(x = rep(13, nrow(freight)), s = rep(1, nrow(freight)))
cmp.raw.out = glm.cmp.raw(y, X, S, offset = offs)
```
### Accessor Functions
\label{sec:cmp-reg-accessors}
Several accessors are provided to extract results from the output object.
```{r}
logLik(cmp.out) ## Log-likelihood evaluated at MLE.
AIC(cmp.out) ## AIC evaluated at MLE.
BIC(cmp.out) ## BIC evaluated at MLE.
coef(cmp.out) ## Estimates of theta as a flat vector
coef(cmp.out, type = "list") ## Estimates of theta as a named list
vcov(cmp.out) ## Estimated covariance matrix of theta hat
sdev(cmp.out) ## Standard deviations from vcov(...) diagonals
sdev(cmp.out, type = "list") ## Standard deviations as a named list
```
The `predict` function computes several useful quantities evaluated at the estimate $\hat{\btheta}$. The argument default `type = "response"` produces a vector of estimates of the response $\hat{y}_i = \E(Y_i)$ for $i = 1, \ldots, n$ using the method described in Section \ref{sec:cmp-ev}. The argument `type = "link"` produces a `data.frame` with columns for the linked parameters $\lambda_i$ and $\nu_i$.
```{r}
predict(cmp.out)
predict(cmp.out, type = "link")
```
Note that the estimated `nu` values are equal for all observations because the model assumed only an intercept term for the dispersion component.
We can also use `predict` on new covariate values. Note that models fit with the formula interface expect the new data to be provided as a `data.frame` which is interpreted using the formula used to fit the model. If the raw interface was used to fit the model, use the `get.modelmatrix` function to specify design matrices to use for prediction.
```{r, fig.width = 3, fig.height = 3, fig.align = "center", prompt=FALSE}
# Prepare new data to fit by formula interface
new.df = data.frame(transfers = 0:10)
# Prepare new data to fit by raw interface
X = model.matrix(~ transfers, data = new.df)
S = model.matrix(~ 1, data = new.df)
new.data = get.modelmatrix(X = X, S = S)
# Pass new data to model from by formula interface
y.hat.new = predict(cmp.out, newdata = new.df)
# Pass new data to model from by raw interface
y.hat.new = predict(cmp.raw.out, newdata = new.data)
# Compute predictions for links
predict.out = predict(cmp.out, newdata = new.df, type = "link")
# Plot predictions
ggplot() +
geom_point(data = new.df, aes(transfers, y.hat.new)) +
xlab("Number of transfers") +
ylab("Predicted number broken") +
theme_bw()
```
```{r}
print(y.hat.new)
print(predict.out)
```
The `leverage` function computes the diagonal entries of a "hat" matrix which can be formulated in CMP regression. These can be used to diagnose influential observations. For details, see Section 3.6 of @SellersShmueli2010.
```{r}
leverage(cmp.out)
```
The `residuals` function provides either raw (the default) or quantile-based residuals [@DunnSmyth1996]. In a CMP regression setting, raw residuals $y_i - \hat{y}_i$ generally do not work well with traditional regression diagnostics, such as Q-Q plots. Quantile-based residuals often produce interpretable diagnostics; however, a random element is used in the computation of quantile residuals for discrete distributions. This aids interpretability but gives slightly different residual values each time they are computed. See @DunnSmyth1996 for details.
```{r}
res.raw = residuals(cmp.out)
res.qtl = residuals(cmp.out, type = "quantile")
```
Pearson residuals may be preferred over raw residuals for diagnostics; these can be obtained by standardizing raw residuals using leverage values and variance estimates.
```{r}
link.hat = predict(cmp.out, type = "link")
vv = vcmp(link.hat$lambda, link.hat$nu)
hh = leverage(cmp.out)
res.pearson = res.raw / sqrt(vv*(1-hh))
```
For each type of residual---raw, Pearson, and quantile---we now plot fitted values versus residuals and Q-Q plots.
```{r, fig.width = 3, fig.height = 3, fig.align = "center", prompt = FALSE, fig.show = "hold"}
plot.fit.res = function(y.hat, res) {
ggplot(data.frame(y = y.hat, res = res)) +
geom_point(aes(y, res)) +
xlab("Fitted Value") +
ylab("Residual Value") +
theme_bw() +
theme(plot.title = element_text(size = 10))
}
plot.qq.res = function(res) {
ggplot(data.frame(res = res), aes(sample = res)) +
stat_qq() +
stat_qq_line() +
theme_bw() +
theme(plot.title = element_text(size = 10))
}
y.hat = predict(cmp.out)
plot.fit.res(y.hat, res.raw) +
ggtitle("Fitted Values vs. Raw Residuals")
plot.qq.res(res.raw) +
ggtitle("Q-Q Plot of Raw Residuals")
plot.fit.res(y.hat, res.pearson) +
ggtitle("Fitted Values vs. Pearson Residuals")
plot.qq.res(res.pearson) +
ggtitle("Q-Q Plot of Pearson Residuals")
plot.fit.res(y.hat, res.qtl) +
ggtitle("Fitted Values vs. Quantile Residuals")
plot.qq.res(res.qtl) +
ggtitle("Q-Q Plot of Quantile Residuals")
```
In this example, with only `r nrow(freight)` observations, it is difficult to see an advantage of using quantile residuals; the benefit will be more apparent in Section \ref{sec:zicmp-reg}. One benefit of raw residuals is that they may be used to compute a mean-squared error.
```{r}
mean(res.raw^2)
```
To access the results of the equidispersion test shown in the output of `cmp.out`, we may use the `equitest` accessor function.
```{r}
equitest(cmp.out)
```
The `deviance` function computes the deviance quantities $D_i = -2 [\log L_i(\hat{\btheta}) - \log L_i(\tilde{\btheta}_i)]$ for $i = 1, \ldots, n$, where $L_i(\btheta)$ is the term of the likelihood corresponding to the $i$th observation, $\hat{\btheta}$ is the MLE computed under the full likelihood $L(\btheta) = \prod_{i=1}^n L_i(\btheta)$, and $\tilde{\btheta}_i$ is the maximizer of $L_i(\btheta)$.
```{r}
deviance(cmp.out)
```
The `parametric.bootstrap` function carries out a parametric bootstrap with $R$ repetitions. Using the fitted MLE $\hat{\btheta}$, bootstrap samples $\vec{y}^{(r)} = (y_1^{(r)}, \ldots, y_n^{(r)})$ are drawn from the likelihood $L(\hat{\btheta})$ for $r = 1, \ldots, R$. Estimate $\hat{\btheta}^{(r)}$ is fitted from bootstrap sample $\vec{y}^{(r)}$. An $R \times d$ matrix is returned whose $r$th row is $\hat{\btheta}^{(r)}$.
```{r}
cmp.boot = parametric.bootstrap(cmp.out, reps = 100)
head(cmp.boot)
```
We used $R = `r nrow(cmp.boot)`$ in the display above to keep vignette computations small, but a larger number may be desired in practice. Bootstrap repetitions can be used, for example, to compute 95% confidence intervals for each of the coefficients.
```{r}
t(apply(cmp.boot, 2, quantile, c(0.025,0.975)))
```
### Large Covariates
\label{sec:cmp-reg-large-covariates}
Large covariates can present numerical difficulties in fitting CMP regression. We will briefly demonstrate the difficulties and some possible workarounds. First let us generate a new dataset based on a large covariate in the regression for $\lambda_i$.
```{r, prompt=FALSE}
set.seed(1234)
n = 200
x = rnorm(n, 500, 10)
X = cbind(intercept = 1, slope = x)
S = matrix(1, n, 1)
beta_true = c(-0.05, 0.05)
gamma_true = 2
lambda_true = exp(X %*% beta_true)
nu_true = exp(S %*% gamma_true)
y = rcmp(n, lambda_true, nu_true)
```
Notice that the generated counts $y_1, \ldots, y_n$ are relatively small compared to the covariate $x_1, \ldots, x_n$.
```{r}
summary(x)
summary(y)
```
An initial attempt to fit the true data-generating model fails.
```{r}
tryCatch({
glm.cmp(y ~ x, formula.nu = ~ 1)
}, error = print_warning)
```
Internally, the linked rate parameter $\lambda_i = \exp(\beta_0 + \beta_1 x_i)$ may evaluate to `Inf` or become very close to zero as the optimizer moves $\beta_1$ away from zero in a positive or negative direction, respectively. Some possible ways to address this are as follows.
Standardize the covariate to have mean zero and variance one.
```{r}
glm.cmp(y ~ scale(x), formula.nu = ~ 1)
```
Use a logarithmic transformation on the covariate.
```{r}
glm.cmp(y ~ log(x), formula.nu = ~ 1)
```
Change optimization method or other `optim` arguments.
```{r}
control = get.control(optim.method = "BFGS", optim.control = list(maxit = 200))
suppressWarnings({
cmp.out = glm.cmp(y ~ x, formula.nu = ~ 1, control = control)
print(cmp.out)
})
```
In this case, standardization and logarithmic transformation produce a usable fit. Changing the optimization method to `BFGS` allows the optimization to finish, but there are further numerical problems in computing the Hessian for standard errors.
### Large Outcomes
\label{sec:cmp-reg-large-outcomes}
Now consider a generated dataset with large outcomes but a relatively small covariate. This situation can also present numerical difficulties.
```{r, prompt=FALSE}
set.seed(1234)
n = 200
x = runif(n, 1, 2)
X = cbind(intercept = 1, slope = x)
S = matrix(1, n, 1)
beta_true = c(1, 1)
gamma_true = -0.95
lambda_true = exp(X %*% beta_true)
nu_true = exp(S %*% gamma_true)
y = rcmp(n, lambda_true, nu_true)
```
```{r}
summary(x)
summary(y)
```
An initial attempt to fit the data-generating model fails.
```{r}
tryCatch({
glm.cmp(y ~ x, formula.nu = ~ 1)
}, error = print_warning)
```
Informative starting values help the optimizer to initially make progress. True data-generating parameters will not be available in a real data analysis situation but help to illustrate the idea.
```{r}
init = get.init(beta = beta_true, gamma = gamma_true)
glm.cmp(y ~ x, formula.nu = ~ 1, init = init)
```
Without choosing an initial value, changing the optimization method to `Nelder-Mead` and increasing the maximum number of iterations also helps the optimizer find a solution.
```{r}
control = get.control(optim.method = "Nelder-Mead", optim.control = list(maxit = 1000))
glm.cmp(y ~ x, formula.nu = ~ 1, control = control)
```
Note that this solution is different from the previous one; the log-likelihood of the previous one is slightly better.
## ZICMP Regression
\label{sec:zicmp-reg}
### Couple Dataset
\label{sec:zicmp-reg-couple}
The `couple` dataset [@LoeysEtAl2012] was analyzed with ZICMP regression in @SellersRaim2016 and found to exhibit overdispersion. The data concern separation trajectories of $n = 387$ couples. The variable `UPB` records the number of unwanted pursuit behavior perpetrations and is considered the outcome of interest. Included covariates are the binary variable `EDUCATION`, which is 1 if at least a bachelor's degree was attained, and a continuous variable `ANXIETY` which measures anxious attachment. A zero-inflated count model is considered for these data because 246 of the 387 records have an outcome of zero.
Let us load and view the first few records in the dataset.
```{r}
data(couple)
head(couple)
```
As a preliminary model, let us fit a standard Poisson model $Y_i \indep \text{Poisson}(\lambda_i)$ with
<!-- -->
\begin{align*}
\log \lambda_i = \beta_0 + \beta_1 \cdot \text{EDUCATION}_i + \beta_2 \cdot \text{ANXIETY}_i.
\end{align*}
<!-- -->
We may use the standard `glm` function.
```{r}
glm.out = glm(UPB ~ EDUCATION + ANXIETY, data = couple, family = poisson)
summary(glm.out)
```
### ZICMP Regression
\label{sec:zicmp-reg-initial}
Now consider a ZICMP regression with
<!-- -->
\begin{align*}
&\log \lambda_i = \beta_0 + \beta_1 \cdot \text{EDUCATION}_i + \beta_2 \cdot \text{ANXIETY}_i, \\
&\log \nu_i = \gamma_0, \\
&\logit p_i = \zeta_0 + \zeta_1 \cdot \text{EDUCATION}_i + \zeta_2 \cdot \text{ANXIETY}_i.
\end{align*}
<!-- -->
We use the `glm.cmp` function as follows.
```{r}
zicmp0.out = glm.cmp(UPB ~ EDUCATION + ANXIETY,
formula.nu = ~ 1,
formula.p = ~ EDUCATION + ANXIETY,
data = couple)
print(zicmp0.out)
```
There are now three sets of coefficients reported in the output: the `X:`, `S:`, and `W:` prefixes label estimates for the $\lambda_i$, $\nu_i$, and $p_i$ formulas respectively.
### Comments about Results
The AIC of the ZICMP model is drastically smaller than the Poisson model, indicating a greatly improved fit. However, there are signs of possible numerical issues. The estimate for $\gamma_0$ is a large negative number, but with an extremely large associated SE, which suggests that the effect may not be statistically significant. On the other hand, the estimate of $\nu$ is nearly zero with a small SE, which suggests that the dispersion parameter is indeed statistically significant. On the surface, this seems to be a contradiction.
The issue is that the Hessian of the log-likelihood becomes insensitive to small changes in $\gamma_0$ when $\lambda_i < 1$ and $\gamma_0$ is a large negative number. Let us first verify that the estimates for $\lambda_i$ are indeed smaller than 1.
```{r}
pred.out = predict(zicmp0.out, type = "link")
summary(pred.out$lambda)
```
To show the insensitivity of the Hessian, let us consider a simpler setting with $Y \sim \text{CMP}(\lambda, \nu)$, $\lambda = \exp\{-0.25\} \approx 0.7788$ fixed, and $\nu = \exp\{\gamma_0\}$. We then have log-density
<!-- -->
\begin{align*}
\log f(y \mid \gamma_0) = y \log \lambda - e^{\gamma_0} \log(y!) - \log Z(\lambda, e^{\gamma_0}),
\end{align*}
<!-- -->
with first derivative and second derivatives, respectively,
<!-- -->
\begin{align*}
&\frac{\partial}{\partial \gamma_0} \log f(y \mid \gamma_0)
= -e^{\gamma_0} \log(y!) - \frac{\partial}{\partial \gamma_0} \log Z(\lambda, e^{\gamma_0}), \\
&\frac{\partial^2}{\partial \gamma_0^2} \log f(y \mid \gamma_0)
= -e^{\gamma_0} \log(y!) - \frac{\partial^2}{\partial \gamma_0^2} \log Z(\lambda, e^{\gamma_0}).
\end{align*}
<!-- -->
For a given value of $y$, $-e^{\gamma_0} \log(y!)$ approaches zero as $\gamma_0$ decreases. Therefore, let us focus on the function
<!-- -->
\(
g(\gamma_0) = -\log Z(\lambda, e^{\gamma_0})
\)
<!-- -->
and its first and second derivatives. The following code illustrates their behavior.
```{r, prompt=FALSE}
library(numDeriv)
g = function(gamma0) {
-ncmp(lambda = exp(-0.25), nu = exp(gamma0), log = TRUE)
}
dat = data.frame(gamma0 = seq(0, -13), g = NA, d_g = NA, d2_g = NA)
for (j in 1:nrow(dat)) {
gamma0 = dat$gamma0[j]
dat$g[j] = g(gamma0)
dat$d_g[j] = numDeriv::grad(func = g, x = gamma0)
dat$d2_g[j] = numDeriv::hessian(func = g, x = gamma0)
}
```
Here is the result.
```{r}
print(dat)
```
Notice that $g(\gamma_0)$ approaches a limit as $\gamma_0 \rightarrow -\infty$, which coincides with the CMP distribution approaching a Geometric distribution. It may not be surprising that the first and second derivatives approach zero accordingly. This explains the large SE for $\gamma_0$ in the model `zicmp0.out`. With estimates tending to this region of the parameter space, it may be preferable to fix fix $\gamma_0$ at a value such as $-\infty$, which will be done in the next section.
### Fixed Coefficients
\label{sec:zicmp-reg-fixed}
Our attempt to fit the previous model strongly tended to the Zero-Inflated Geometric special case of ZICMP, but SEs computed via the Hessian become large in this region. In this section, we fix $\gamma_0$ at the extreme $-\infty$ and fit the remaining coefficients. Let us use the raw interface to do this.
```{r, prompt=FALSE}
init = coef(zicmp0.out, type = "list")
y = couple$UPB
X = model.matrix(~ EDUCATION + ANXIETY, data = couple)
S = model.matrix(~ 1, data = couple)
W = model.matrix(~ EDUCATION + ANXIETY, data = couple)
control = get.control(optim.method = "BFGS")
zicmp.out = glm.zicmp.raw(y, X, S, W,
init = get.init(beta = c(-1,0,0), gamma = -Inf, zeta = c(-1,0,0)),
fixed = get.fixed(gamma = 1L), control = control)
```
```{r}
print(zicmp.out)
```
Notice that an additional `Fixed` column has been added to the display, indicating that the coefficient `gamma` is fixed. Furthermore, its `Estimate` column is set to the initial value and the columns `SE`, `z-value`, and `p-value` are set to `NA`. This model achieves a similar log-likelihood value as our first attempt using `L-BFGS-B` but does not exhibit signs of numerical issues.
### Accessor Functions
\label{sec:zicmp-reg-accessors}
Here are several of the accessors provided to extract model outputs.
```{r}
logLik(zicmp.out) ## Log-likelihood evaluated at MLE
AIC(zicmp.out) ## AIC evaluated at MLE
BIC(zicmp.out) ## BIC evaluated at MLE
coef(zicmp.out) ## Estimates of theta as a flat vector
coef(zicmp.out, type = "list") ## Estimates of theta as a named list
vcov(zicmp.out) ## Estimated covariance matrix of theta hat
sdev(zicmp.out) ## Standard deviations from vcov(...) diagonals
sdev(zicmp.out, type = "list") ## Standard deviations as a named list
equitest(zicmp0.out) ## Likelihood ratio test for H_0: gamma = 0
tryCatch({ ## An error is thrown for model with fixed gamma
equitest(zicmp.out)
}, error = print_warning)
```
Because we fixed $\gamma = -\infty$ to obtain `zicmp0.out`, the `equitest` function throws an error instead of proceeding with an equidispersion test.
The `predict` function behaves similarly as in CMP regression; however, the `link` type here also includes a column with the estimated $p_i$.
```{r}
y.hat = predict(zicmp.out) ## Fitted values based on ecmp
link.hat = predict(zicmp.out, type = "link")
head(y.hat)
head(link.hat)
```
In this example, we can see the benefit of using quantile residuals rather than raw residuals for diagnostic plots. The functions `plot.fit.res` and `plot.qq.res` have been defined in Section \ref{sec:cmp-reg}.
```{r, fig.width = 3, fig.height = 3, fig.align = "center", prompt = FALSE, fig.show = "hold"}
res.raw = residuals(zicmp.out, type = "raw")
res.qtl = residuals(zicmp.out, type = "quantile")
plot.fit.res(y.hat, res.raw) +
ggtitle("Fitted Values vs. Raw Residuals")
plot.qq.res(res.raw) +
ggtitle("Q-Q Plot of Raw Residuals")
plot.fit.res(y.hat, res.qtl) +
ggtitle("Fitted Values vs. Quantile Residuals")
plot.qq.res(res.qtl) +
ggtitle("Q-Q Plot of Quantile Residuals")
```
Here is an example of computing fitted values for new covariate data.
```{r, prompt = FALSE}
new.df = data.frame(EDUCATION = round(1:20 / 20), ANXIETY = seq(-3,3, length.out = 20))
X.new = model.matrix(~ EDUCATION + ANXIETY, data = new.df)
S.new = model.matrix(~ 1, data = new.df)
W.new = model.matrix(~ EDUCATION + ANXIETY, data = new.df)
new.data = get.modelmatrix(X.new, S.new, W.new)
# For model fit using raw interface, use get.modelmatrix to prepare new design
# matrices, offsets, etc
y.hat.new = predict(zicmp.out, newdata = new.data)
# For models fit with the formula interface, pass a data.frame with the same
# structure as used in the fit.
y.hat.new = predict(zicmp0.out, newdata = new.df)
```
```{r}
print(y.hat.new)
```
As with CMP regression, a `parametric.bootstrap` function is provided for convenience to obtain a bootstrap sample $\hat{\btheta}^{(r)}$ of $\btheta$ based on the estimate $\hat{\btheta}$. Because it is too time consuming to run this example within the vignette, we show the code without output below. As in Section \ref{sec:cmp-reg}, we consider using the bootstrap samples to construct a 95% confidence interval for each of the coefficients.
```{r, eval = FALSE}
zicmp.boot = parametric.bootstrap(zicmp.out, reps = 100)
head(zicmp.boot)
apply(zicmp.boot, 2, quantile, c(0.025,0.975))
```
# Acknowledgements {.unlisted .unnumbered}
We acknowledge Thomas Lotze for significant contributions to the initial development of the `COMPoissonReg` package and for service as initial maintainer on CRAN. We thank Darcy Steeg Morris, Eric Slud, and Tommy Wright for reviewing the manuscript. We are grateful to the users of `COMPoissonReg` for their interest and for bringing to light some of the issues which have been considered in this work.
# References
|
/scratch/gouwar.j/cran-all/cranData/COMPoissonReg/vignettes/vignette.Rmd
|
# make cAUC kernel function
#' @importFrom utils write.table
#' @include deldup.R
.CAUCkernel <- function(...,
cnv,
nCore,
outFileKernel,
verbose) {
if (verbose) cat('calculating CONCUR kernel\n')
K <- .deldup(cnv = cnv, nCore = nCore, verbose = verbose)
if (length(x = outFileKernel) == 0L || is.null(x = outFileKernel)) {
return( K )
} else if (is.character(x = outFileKernel)) {
tryCatch(expr = utils::write.table(x = K, file = outFileKernel),
condition = function(e){
cat("unable to save kernel\n")
stop( e$message )
})
if (verbose) cat("\tsaved kernel in file", outFileKernel, "\n")
return( outFileKernel )
} else {
stop('outFileKernel must be NULL or a character')
}
}
|
/scratch/gouwar.j/cran-all/cranData/CONCUR/R/CAUCkernel.R
|
#' Pseudo Copy Number Variants Data
#'
#' This data set includes simulated CNV data in PLINK CNV data format.
#' The data are also available from the authors through the url
#' provided below. These data were generated following the simulation
#' study used to illustrate the method in the original manuscript also
#' referenced below; it has been reduced to include only 600 individuals.
#' These data are not meaningful and are intended for demonstration purposes only.
#'
#' @usage data(cnvData)
#'
#' @format cnvData is a data.frame containing 522 observations with 5 columns:
#' \describe{
#' \item{ID}{character patient identifier.}
#' \item{CHR}{CNV chromosome.}
#' \item{BP1}{starting location in base pairs.}
#' \item{BP2}{ending location in base pairs.}
#' \item{TYPE}{copy number (0,1,3,or 4).}
#' }
#'
#' @references Brucker, A., Lu, W., Marceau West, R., Yu, Q-Y., Hsiao, C. K.,
#' Hsiao, T-H., Lin, C-H., Magnusson, P. K. E., Holloway, S. T.,
#' Sullivan, P. F., Szatkiewicz, J. P., Lu, T-P., and
#' Tzeng, J-Y. Association testing using Copy Number Profile Curves (CONCUR)
#' enhances power in copy number variant analysis. <doi:10.1101/666875>.
#'
#' @references \url{https://www4.stat.ncsu.edu/~jytzeng/Software/CONCUR/}
#' @keywords datasets
"cnvData"
#' Pseudo Covariate Data
#'
#' This data set includes simulated covariate data.
#' These data were generated as draws from a Binom(1,0.5) distribution for the
#' 800 individuals in the example data provided with the package.
#' These data are not meaningful and are intended for demonstration purposes only.
#'
#' @usage data(cnvData)
#'
#' @format covData is a data.frame containing 400 observations with 2 columns
#' \describe{
#' \item{ID}{character patient identifier.}
#' \item{SEX}{binary indicator of M/F.}
#' }
#'
#' @references Brucker, A., Lu, W., Marceau West, R., Yu, Q-Y., Hsiao, C. K.,
#' Hsiao, T-H., Lin, C-H., Magnusson, P. K. E., Holloway, S. T.,
#' Sullivan, P. F., Szatkiewicz, J. P., Lu, T-P., and
#' Tzeng, J-Y. Association testing using Copy Number Profile Curves (CONCUR)
#' enhances power in copy number variant analysis. <doi:10.1101/666875>.
#'
#' @keywords datasets
"covData"
#' Pseudo Phenotype Data
#'
#' This data set includes simulated phenotype data.
#' These data include a binary phenotype and a normally distributed continuous
#' phenotype that are randomly generated independent of the CNV data.
#' These data are not meaningful and are intended for demonstration purposes only.
#'
#' @usage data(cnvData)
#'
#' @format phenoData is a data.frame containing 400 observations with 3 columns
#' \describe{
#' \item{ID}{character patient identifier.}
#' \item{PHEB}{binary phenotype.}
#' \item{PHEC}{continuous phenotype.}
#' }
#'
#' @references Brucker, A., Lu, W., Marceau West, R., Yu, Q-Y., Hsiao, C. K.,
#' Hsiao, T-H., Lin, C-H., Magnusson, P. K. E., Holloway, S. T.,
#' Sullivan, P. F., Szatkiewicz, J. P., Lu, T-P., and
#' Tzeng, J-Y. Association testing using Copy Number Profile Curves (CONCUR)
#' enhances power in copy number variant analysis. <doi:10.1101/666875>.
#'
#' @keywords datasets
"phenoData"
|
/scratch/gouwar.j/cran-all/cranData/CONCUR/R/cnvData.R
|
#' @importFrom parallel parSapply
.commonAUC <- function(..., segLength, dup, cluster) {
n <- ncol(x = dup)
k <- nrow(x = dup)
dup <- segLength*dup
.iteration <- function(i, dup) {
return(sapply(X = {i+1L}:n,
FUN = function(j, dupi, dup) {
sum(pmin(dupi, dup[,j]))
},
dupi = dup[,i],
dup = dup))
}
if (is.null(x = cluster)) {
vec <- unlist(x = sapply(X = 1L:{n-1L},
FUN = .iteration,
dup = dup))
} else {
vec <- parallel::parSapply(cl = cluster,
X = 1L:{n-1L},
FUN = .iteration,
dup = dup)
vec <- unlist(x = c(vec))
}
res <- matrix(data = 0.0, nrow = n, ncol = n)
res[lower.tri(x = res, diag = FALSE)] <- vec
res[upper.tri(x = res)] <- t(x = res)[upper.tri(x = res)]
diag(x = res) <- colSums(x = dup)
return( res )
}
|
/scratch/gouwar.j/cran-all/cranData/CONCUR/R/commonAUC.R
|
#' @importFrom CompQuadForm davies
#' @importFrom CompQuadForm liu
.compFunc <- function(..., X, verbose) {
ee <- eigen(x = X, symmetric = TRUE, only.values = TRUE)
lambda0 <- ee$values[abs(x = ee$values) >= 1e-10]
if (verbose) cat("using davies method\n")
p1 <- CompQuadForm::davies(q = 0.0, lambda = lambda0)$Qq
if (p1 > 1.0 | p1 < 1e-8 ) {
if (verbose) cat("result outside of expected range, using liu\n")
p1 <- CompQuadForm::liu(q = 0.0, lambda = lambda0)
}
return( p1 )
}
|
/scratch/gouwar.j/cran-all/cranData/CONCUR/R/compFunc.R
|
#' Copy Number Profile Curve-Based Association Test
#'
#' Implements a kernel-based association test for CNV aggregate analysis
#' in a certain genomic region (e.g., gene set, chromosome, or genome) that is
#' robust to the within-locus and across-locus etiologoical heterogeneity, and
#' bypass the need to define a "locus" unit for CNVs.
#'
#' The CNV data must adhere to the following conditions:
#' \itemize{
#' \item CNVs must be at least 1 unit long.
#' \item CNVs cannot end at the exact location another begins
#' }
#' Violations of these conditions typically occur when data are rounded to
#' a desired resolution.
#' For example
#'
#' \preformatted{
#' ID CHR BP1 BP2 TYPE
#' 1 13 10112087 10112414 3
#' }
#' becomes upon rounding to kilo
#' \preformatted{
#' ID CHR BP1 BP2 TYPE
#' 1 13 10112 10112 3 .
#' }
#' These cases should either be discarded or modified to be of length 1, e.g.,
#' \preformatted{
#' ID CHR BP1 BP2 TYPE
#' 1 13 10112 10113 3 .
#' }
#' As an example of condition 2
#' \preformatted{
#' ID CHR BP1 BP2 TYPE
#' 1 13 100768 101100 3
#' 1 13 101100 101299 1
#' }
#' should be modified to one of
#' \preformatted{
#' ID CHR BP1 BP2 TYPE
#' 1 13 100768 101100 3
#' 1 13 101101 101299 1
#' }
#' or
#' \preformatted{
#' ID CHR BP1 BP2 TYPE
#' 1 13 100768 101099 3
#' 1 13 101100 101299 1 .
#' }
#' Additionally,
#' \preformatted{
#' ID CHR BP1 BP2 TYPE
#' 1 13 100768 101100 3
#' 1 13 101100 101299 3
#' }
#' should be combined as
#' \preformatted{
#' ID CHR BP1 BP2 TYPE
#' 1 13 100768 101299 3 .
#' }
#'
#' @param cnv A character or data.frame object. If character, the
#' name of the data file containing the CNV data (with a header). If
#' data.frame, the CNV data. The data must contain the following columns:
#' "ID", "CHR", "BP1", "BP2", "TYPE", where "ID" is a unique patient id,
#' "CHR" is the CNV chromosome, "BP1" is the start location in base pairs
#' or kilo-base pairs,
#' "BP2" is the end location in base pairs or kilo-base pairs, and
#' "TYPE" is the CNV copy number.
#' @param X A character or data.frame object. If character, the
#' name of the data file containing the covariate data (with a header). If
#' data.frame, the covariate data. The data must contain a column titled
#' "ID" containing a unique patient id. This column must contain the
#' patient identifiers of the CNV data specified in
#' input cnv; however, it can contain patient identifiers not contained in cnv.
#' Further, inputs X and pheno must contain the same patient identifiers. Categorical
#' variables must be translated into design matrix format.
#' @param pheno A character or data.frame object. If character, the
#' name of the data file containing the phenotype data (with a header). If
#' data.frame, the phenotype data. The data must contain a column titled
#' "ID" containing a unique patient id. This column must contain the
#' patient identifiers of the CNV data specified in
#' input cnv; however, it can contain patient identifiers not contained in cnv.
#' Further, inputs X and pheno must contain the same patient identifiers.
#' @param phenoY A character object. The column name in input pheno containing
#' the phenotype of interest.
#' @param phenoType A character object. Must be one of of \{"bin", "cont"\} indicating
#' if input phenoY (i.e., the phenotype of interest) is binary or continuous.
#' @param ... Ignored. Included to require named inputs.
#' @param nCore An integer object. If nCore > 1, package parallel is used to
#' calculate the kernel. Though the methods of package CompQuadForm dominate
#' the time profile, setting nCore > 1L can improve computation times.
#' @param outFileKernel A character object or NULL. If a character, the
#' file in which the kernel is to be saved. If NULL, the kernel is returned
#' by the function.
#' @param verbose A logical object. If TRUE, progress information is printed
#' to the screen.
#'
#' @return A list containing the kernel (or its file name) and the p-value.
#'
#' @export
#'
#' @references Brucker, A., Lu, W., Marceau West, R., Yu, Q-Y., Hsiao, C. K.,
#' Hsiao, T-H., Lin, C-H., Magnusson, P. K. E., Holloway, S. T.,
#' Sullivan, P. F., Szatkiewicz, J. P., Lu, T-P., and
#' Tzeng, J-Y. Association testing using Copy Number Profile Curves (CONCUR)
#' enhances power in copy number variant analysis. <doi:10.1101/666875>.
#'
#' @include CAUCkernel.R pv_func.R dataChecks.R
#'
#' @examples
#'
#' data(cnvData)
#'
#' # limit data for examples
#' exCNV <- cnvData$ID %in% paste0("P", 1:150)
#' exCOV <- covData$ID %in% paste0("P", 1:150)
#' exPHE <- phenoData$ID %in% paste0("P", 1:150)
#'
#' # binary phenoType
#' results <- concur(cnv = cnvData[exCNV,],
#' X = covData[exCOV,],
#' pheno = phenoData[exPHE,],
#' phenoY = 'PHEB',
#' phenoType = 'bin',
#' nCore = 1L,
#' outFileKernel = NULL,
#' verbose = TRUE)
#'
#' # continuous phenoType
#' results <- concur(cnv = cnvData[exCNV,],
#' X = covData[exCOV,],
#' pheno = phenoData[exPHE,],
#' phenoY = 'PHEC',
#' phenoType = 'cont',
#' nCore = 1L,
#' outFileKernel = NULL,
#' verbose = TRUE)
#'
concur <- function(cnv,
X,
pheno,
phenoY,
phenoType,
...,
nCore = 1L,
outFileKernel = NULL,
verbose = TRUE) {
if (!is.character(x = phenoType)) stop("phenoType must be a character")
phenoType <- tolower(x = phenoType)
if (!{phenoType %in% c('bin','cont')}) {
stop("phenoType must be one of {bin, cont}")
}
if (verbose) cat("\tverifying pheno input\n")
phenoFile <- NULL
if (is.character(x = pheno)) {
phenoFile <- pheno
pheno <- tryCatch(expr = utils::read.table(file = pheno,
as.is = TRUE,
header = TRUE),
error = function(e){
cat("unable to read", pheno,"\n")
stop(e$message)
})
} else if (is.matrix(x = pheno)) {
pheno <- as.data.frame(x = pheno)
}
if (!is.data.frame(x = pheno)) {
stop('pheno must be a data.frame or a path to the data file')
}
if (!{"ID" %in% colnames(x = pheno)}) stop("ID must be a column of pheno")
if (!is.null(x = phenoFile)) {
rm(pheno)
pheno <- phenoFile
}
if (verbose) cat("processing copy number variant (cnv) data\n")
# read/process cnv data
if (is.character(x = cnv)) {
if (verbose) cat("\tcnv input provided as data file\n")
cnv <- tryCatch(expr = read.table(file = cnv,
as.is = TRUE,
header = TRUE),
error = function(e){
cat("unable to read", cnv,"\n")
stop(e$message)
})
} else if (is.matrix(x = cnv)) {
if (verbose) {
cat("\tcnv input provided as matrix - converting to data.frame\n")
}
cnv <- as.data.frame(x = cnv)
} else if (!is.data.frame(x = cnv)) {
stop('cnv must be a data.frame or a path to the data file')
}
# ensure minimum data is present in cnv
tst <- c("ID", "CHR", "BP1", "BP2", "TYPE") %in% colnames(x = cnv)
if (any(!tst)) stop("cnv does not appear to contain the correct data")
cnv <- cnv[,c("ID", "CHR", "BP1", "BP2", "TYPE")]
if (is.factor(x = cnv$ID)) cnv$ID <- as.character(x = levels(cnv$ID))[cnv$ID]
# remove duplicate records
dups <- duplicated(x = cnv)
if (verbose && {sum(dups) > 0}) {
cat("\t", sum(dups), "duplicate cnv records removed\n")
}
cnv <- cnv[!dups,]
cnv <- cnv[order(cnv$ID,cnv$CHR,cnv$BP1),]
tst <- .dataCheck(cnv = cnv)
kernel <- .CAUCkernel(cnv = cnv,
nCore = nCore,
outFileKernel = outFileKernel,
verbose = verbose)
rm(cnv)
return( .pv_func(pheno = pheno,
phenoY = phenoY,
X = X,
phenoType = phenoType,
kernel = kernel,
verbose = verbose) )
}
|
/scratch/gouwar.j/cran-all/cranData/CONCUR/R/concur.R
|
.createDF <- function(cnv){
m <- nrow(x = cnv)
# {2*m x 4}
id <- rep(x = 1L:m, each = 2L)
df1 <- data.frame('id' = cnv$ID,
'loc' = cnv$BP1,
'chr' = cnv$CHR,
'type' = cnv$TYPE)[id,]
df1[{1L:m}*2L,'loc'] <- cnv$BP2
df1[{1L:m}*2L,'type'] <- 2.0
rownames(x = df1) <- NULL
return( df1 )
}
|
/scratch/gouwar.j/cran-all/cranData/CONCUR/R/createDF.R
|
.dataCheck <- function(cnv) {
ids <- sort(x = unique(x = cnv$ID))
msg <- NULL
for (i in ids) {
idMatch <- cnv$ID == i
if (any(cnv[idMatch,"BP1"] %in% cnv[idMatch,"BP2"])) {
idData <- cnv[idMatch,]
chrs <- sort(x = unique(x = idData[,"CHR"]))
for (j in chrs) {
chrMatch <- idData[,"CHR"] == j
chrData <- idData[chrMatch,]
sameBP <- chrData[,"BP1"] == chrData[, "BP2"]
if (any(sameBP)) {
msg <- c(msg, paste("\nID", i, "CHR", j, "has same BP1 and BP2"))
chrData <- chrData[!sameBP,,drop=FALSE]
if (nrow(x = chrData) == 0L) next
}
overlap <- chrData[,"BP1"] %in% chrData[, "BP2"]
if (any(overlap)) {
msg <- c(msg,
paste("\nID", i, "CHR", j,
"CNV ends exactly where new CNV begins"))
}
}
}
}
if (!is.null(x = msg)) {
msg <- c(msg, "\nplease correct these data issues before proceeding")
stop(msg, call. = FALSE)
}
return( NULL )
}
|
/scratch/gouwar.j/cran-all/cranData/CONCUR/R/dataChecks.R
|
#' @importFrom dplyr left_join
#' @importFrom parallel makeCluster stopCluster
#' @include createDF.R uniqueCombinations.R popFunc.R segLength.R commonAUC.R
.deldup <- function(..., cnv, nCore, verbose) {
if (verbose) cat("\tobtaining duplication/deletion matrices\n")
m <- nrow(x = cnv)
# {2*m x 4}
df1 <- .createDF(cnv = cnv)
# unique location/chromosome combinations {r x 2}
uniqueLoc <- .uniqueCombinations(df = df1, verbose = verbose)
# unique patient ids
ids <- sort(x = unique(x = cnv$ID))
n <- length(x = ids)
r <- nrow(x = uniqueLoc)
dup <- matrix(data = 0.0, nrow = r, ncol = n)
del <- matrix(data = 0.0, nrow = r, ncol = n)
len <- .segLength(uniqueLoc = uniqueLoc, verbose = verbose)
tt <- numeric(length = r)
cnt <- integer(length = r)
for (i in 1L:length(x = ids)) {
# cnv data for patient i
cnvOfi <- df1[df1$id == ids[i],,drop = FALSE]
# chromosomes of patient i found in others of the population
common_chrs <- intersect(x = cnvOfi[,"chr"],
y = df1[df1$id != ids[i],"chr"])
if (length(x = common_chrs) == 0L) {
# if no shared chromosomes, use 2.0 as type
type <- rep(x = 2.0, times = r)
} else {
# transfer available type value(s) from cnvOfi to uniqueLoc
uniqueLoci <- dplyr::left_join(x = uniqueLoc,
y = cnvOfi,
by = c('loc','chr'))
type <- uniqueLoci$type
tst <- uniqueLoci$chr %in% common_chrs
# for chromosomes not shared with others in population, set to 2.0
type[!tst] <- 2.0
# for chromosomes shared with others in population, determine value
type[tst] <- .popFunc(cnv = cnv[cnv$ID == ids[i],,drop = FALSE],
common_chrs = common_chrs,
s1 = uniqueLoci[tst,,drop = FALSE])
}
dif <- type - 2.0
dup[,i] <- pmax( dif, 0.0)
del[,i] <- pmax(-dif, 0.0)
tst <- abs(x = dif) > 1e-8
tt[tst] <- tt[tst] + len[tst]
cnt <- cnt + tst
}
cnt[ cnt == 0L ] <- 1L
len <- tt / cnt
if (verbose) cat("\tobtaining kernel\n")
if (nCore > 1L) {
localCluster <- parallel::makeCluster(spec = nCore)
} else {
localCluster <- NULL
}
K <- .commonAUC(segLength = len, dup = dup, cluster = localCluster) +
.commonAUC(segLength = len, dup = del, cluster = localCluster)
if (nCore > 1L) {
parallel::stopCluster(cl = localCluster)
}
dimnames(x = K) <- list(ids, ids)
return( K )
}
|
/scratch/gouwar.j/cran-all/cranData/CONCUR/R/deldup.R
|
.popFunc <- function(..., cnv, common_chrs, s1) {
type <- s1$type
for (j in common_chrs) {
sisj <- s1$chr == j
cisj <- cnv$CHR == j
need <- s1$loc[sisj & is.na(x = type)]
for (l in need) {
tstRange <- {cnv$BP1 <= l} & {l <= cnv$BP2} & cisj
if (any(tstRange)) {
val <- sum(cnv$TYPE[tstRange])
} else {
val <- 2.0
}
type[sisj & {s1$loc == l}] <- val
}
}
return( type )
}
|
/scratch/gouwar.j/cran-all/cranData/CONCUR/R/popFunc.R
|
#' @importFrom utils read.table
#' @include test.bt.R test.qt.R
.pv_func <- function(...,
pheno,
phenoY,
phenoType,
X,
kernel,
verbose) {
if (verbose) cat("calculating p-value\n\tverifying phenoType input\n")
if (is.character(x = pheno)) {
pheno <- tryCatch(expr = utils::read.table(file = pheno,
as.is = TRUE,
header = TRUE),
error = function(e){
cat("unable to read", pheno,"\n")
stop(e$message)
})
} else if (is.matrix(x = pheno)) {
pheno <- as.data.frame(x = pheno)
}
outFileKernel <- NULL
if (is.character(x = kernel)) {
outFileKernel <- kernel
kernel <- tryCatch(expr = utils::read.table(file = kernel, header = TRUE),
condition = function(e){
cat("unable to read kernel\n")
stop( e$message )
})
kernel <- data.matrix(frame = kernel)
if (verbose) cat("\tread kernel from file", outFileKernel, "\n")
}
cnvID <- colnames(x = kernel)
if (is.factor(x = pheno$ID)) pheno$ID <- as.character(x = levels(pheno$ID))[pheno$ID]
# add diagonal elements for individuals with pheno data but no cnv data
inBoth <- pheno$ID %in% cnvID
if (!all(inBoth)) {
mm <- match(x = cnvID, table = pheno$ID)
if (any(is.na(x = mm))) stop("ID in cnv not present in pheno")
minNonZero <- min(x = diag(x = kernel))
nAdd <- sum(!inBoth)
cnames <- colnames(x = kernel)
kernel <- rbind(cbind(kernel,
matrix(data = 0.0,
nrow = nrow(x = kernel),
ncol = nAdd)),
cbind(matrix(data = 0.0,
nrow = nAdd,
ncol = ncol(x = kernel)),
diag(x = minNonZero, nrow = nAdd, ncol = nAdd)))
newnames <- c(cnames, pheno$ID[!inBoth])
dimnames(x = kernel) <- list(newnames, newnames)
if (verbose) {
cat("\textended kernel to include individuals with no cnv data\n")
}
ok <- order(colnames(x = kernel))
kernel <- kernel[ok,ok]
cnvID <- colnames(x = kernel)
if (!is.null(x = outFileKernel)) {
tryCatch(expr = utils::write.table(x = kernel, file = outFileKernel),
condition = function(e){
cat("unable to save kernel\n")
stop( e$message )
})
if (verbose) cat("\tsaved new kernel to file", outFileKernel, "\n")
}
}
if (!all(pheno$ID == cnvID)) {
mm <- match(x = cnvID, table = pheno$ID)
if (any(is.na(x = mm))) stop("ID in cnv not present in pheno")
if (verbose) cat("\treorderd pheno to align with cnv\n")
pheno <- pheno[mm,]
}
if (verbose) cat("\tverifying phenoY input\n")
phenoY <- tryCatch(expr = pheno[,phenoY],
condition = function(x){
cat("unable to identify", phenoY, "in pheno\n")
stop(x$message)
})
rm(pheno)
if (verbose) cat("\tverifying X input\n")
if (is.character(x = X)) {
X <- tryCatch(expr = utils::read.table(file = X,
as.is = TRUE,
header = TRUE),
error = function(e){
cat("unable to read", X,"\n")
stop(e$message)
})
} else if (is.matrix(x = X)) {
X <- as.data.frame(x = X)
} else if (!is.data.frame(x = X) & !is.null(x = X)) {
stop('X must be NULL, a data.frame, or a path to the data file')
}
if (!is.null(x = X)) {
if (!{"ID" %in% colnames(X)}) stop("ID must be a column of X data.frame")
}
if (is.factor(x = X$ID)) X$ID <- as.character(x = levels(X$ID))[X$ID]
if (!is.null(x = X)) {
if (!all(X$ID == cnvID)) {
mm <- match(cnvID, X$ID)
if (any(is.na(x = mm))) stop("ID in pheno not present in X")
if (verbose) cat("\treorderd X to align with cnv\n")
X <- X[mm,]
}
X <- X[,!{colnames(X) %in% "ID"},drop=FALSE]
}
if (phenoType == 'bin') {
pv <- .test.bt(y = phenoY,
K = kernel,
X = X,
verbose = verbose)
} else {
pv <- .test.qt(y = phenoY, K = kernel, X = X, verbose = verbose)
}
if (verbose) cat("p-value", pv, "\n")
if (!is.null(x = outFileKernel)) kernel <- outFileKernel
return( list("kernel" = kernel, "pValue" = pv) )
}
|
/scratch/gouwar.j/cran-all/cranData/CONCUR/R/pv_func.R
|
#' @importFrom utils head
#' @importFrom utils tail
.segLength <- function(uniqueLoc, verbose) {
dist <- diff(x = uniqueLoc$loc)*
{utils::tail(x = uniqueLoc$chr, n = -1L) ==
utils::head(x = uniqueLoc$chr, n = -1L)}
return( c(dist, 0.0) )
}
|
/scratch/gouwar.j/cran-all/cranData/CONCUR/R/segLength.R
|
# p-value function for binary
#' @importFrom stats glm
#' @importFrom stats model.matrix
#' @importFrom utils read.table
#' @include compFunc.R
# Function adapted from CKAT package
.test.bt <- function (..., y, K, X = NULL, verbose, nms) {
if (verbose) cat("\tbinary phenotype\n")
if (is.character(x = K)) {
if (verbose) cat("\treading kernel from file", K, "\n")
K <- utils::read.table(file = K, header = TRUE)
}
if (is.data.frame(x = K)) K <- data.matrix(frame = K)
n <- length(x = y)
if (is.null(x = X)) {
X1 <- matrix(data = rep(x = 1.0, times = length(x = y)), ncol = 1L)
} else {
X1 <- stats::model.matrix(~. , as.data.frame(X))
}
glmfit <- stats::glm(formula = y ~ X1-1, family = "binomial")
if (verbose) {
cat("\tglm fit\n")
print(glmfit)
}
mu <- glmfit$fitted.values
res.wk <- glmfit$residuals
res <- y - mu
w <- mu*{1.0 - mu}
sqrtw <- sqrt(x = w)
adj <- sum({sqrtw * res.wk}^2)
DX12 <- sqrtw * X1
qrX <- qr(x = DX12)
Q <- qr.Q(qr = qrX)
Q <- Q[, 1L:qrX$rank, drop=FALSE]
P0 <- diag(x = length(x = y)) - tcrossprod(x = Q)
DKD <- tcrossprod(x = sqrtw) * K
tQK <- t(x = Q) %*% DKD
QtQK <- Q %*% tQK
PKP1 <- DKD - QtQK - t(x = QtQK) + Q %*% (tQK %*% Q) %*% t(x = Q)
q1 <- as.numeric(x = res %*% K %*% res)
q1 <- q1 / adj
return( .compFunc(X = PKP1 - q1 * P0, verbose = verbose) )
}
|
/scratch/gouwar.j/cran-all/cranData/CONCUR/R/test.bt.R
|
#' @importFrom stats lm
#' @importFrom stats residuals
#' @importFrom stats model.matrix
#' @importFrom utils read.table
#' @include compFunc.R
# Function adapted from CKAT package
.test.qt <- function (..., y, K, X = NULL, verbose) {
if (verbose) cat("\tcontinuous phenotype\n")
if (is.character(x = K)) {
if (verbose) cat("\treading kernel from file", K, "\n")
K <- utils::read.table(file = K, header = TRUE)
}
if (is.data.frame(x = K)) K <- data.matrix(frame = K)
n <- length(x = y)
if (is.null(x = X)) {
X1 <- matrix(data = rep(x = 1.0, times = length(x = y)), ncol = 1L)
} else {
X1 <- stats::model.matrix(~. , as.data.frame(X))
}
px <- ncol(x = X1)
mod <- stats::lm(formula = y ~ X1-1)
if (verbose) {
cat("\tlm fit\n")
print(mod)
}
res <- stats::residuals(object = mod)
s2 <- sum(res^2)
D0 <- diag(x = length(x = y))
P0 <- D0 - X1 %*% solve(a = crossprod(x = X1), b = t(X1))
PKP <- P0 %*% K %*% P0
q <- as.numeric(x = res %*% K %*% res / s2)
return( .compFunc(X = PKP - q * P0, verbose = verbose) )
}
|
/scratch/gouwar.j/cran-all/cranData/CONCUR/R/test.qt.R
|
# unique location/chromosome combinations
#' @importFrom mgcv uniquecombs
.uniqueCombinations <- function(df, verbose) {
if (!all(c("loc", "chr") %in% colnames(df))) stop("incorrect data provided")
uniqueLoc <- mgcv::uniquecombs(x = df[,c('loc', 'chr')])
uniqueLoc <- uniqueLoc[order(uniqueLoc$chr, uniqueLoc$loc),]
r <- nrow(x = uniqueLoc)
if (verbose) {
cat("\tidentified", r, "unique chromosome/location combinations\n")
}
return( uniqueLoc )
}
|
/scratch/gouwar.j/cran-all/cranData/CONCUR/R/uniqueCombinations.R
|
#' Caculate the optimal subset lengths on the COR
#'
#' @param K is the number of subsets
#' @param nk is the length of subsets
#' @param alpha is the significance level
#' @param X is the observation matrix
#' @param y is the response vector
#'
#' @return seqL, seqN,lWMN
#' @export
#' @examples
#' p=6;n=1000;K=2;nk=200;alpha=0.05;sigma=1
#' e=rnorm(n,0,sigma); beta=c(sort(c(runif(p,0,1))));
#' data=c(rnorm(n*p,5,10));X=matrix(data, ncol=p);
#' y=X%*%beta+e;
#' COR(K=K,nk=nk,alpha=alpha,X=X,y=y)
COR=function(K=K,nk=nk,alpha=alpha,X=X,y=y){
n=nrow(X);p=ncol(X)
L=M=N=E=W=c(rep(1,K));I=diag(rep(1,nk));betam=matrix(rep(0,p*K), ncol=K)
R=matrix(rep(0,n*nk), ncol=n); Io=matrix(rep(0,nk*K), ncol=nk);
mr=matrix(rep(0,K*nk),ncol=nk)
for (i in 1:K){
mr[i,]=sample(1:n,nk,replace=T);
r=matrix(c(1:nk,mr[i,]),ncol=nk,byrow=T);
R[t(r)]=1
Io[i,]=r[2,]
X1=R%*%X;y1=R%*%y;
ux=solve(crossprod(X1))
sy=sqrt((t(y1)%*%(I-X1%*%solve(crossprod(X1))%*%t(X1))%*%y1)/(length(y1)-p))
L[i]= sy*sum(sqrt(diag(ux)))*(qt(1-alpha/2, length(y1)-p)-qt(alpha/2, length(y1)-p))
W[i]= sum(diag(t(ux)%*% ux))
M[i]= det(X1%*%t(X1))
N[i]=t(y1)%*% y1
E[i]=t(y1)%*%(I-X1%*%solve(crossprod(X1)) %*%t(X1))%*% y1/(length(y1)-p)
betam[,i]=solve(t(X1)%*%X1)%*%t(X1)%*%y1
}
seqL=which.min(L);seqN=which.min(N)
int=intersect(intersect(Io[which.min(W),],Io[which.min(M),]),Io[which.min(N),])
Xc=X[int,];yc= y[int]; I=diag(rep(1,length(int)))
t(yc)%*%(I-Xc%*%solve(crossprod(Xc))%*%t(Xc))%*% yc/(n-p)
minL=L[which.min(L)]
minM=M[which.min(M)]
minN=N[which.min(N)]
minE=E[which.min(E)]
lW=length(Io[which.min(W),])
lWM=length(intersect(Io[which.min(W),],Io[which.max(M),]))
lWMN=length(intersect(intersect(Io[which.min(W),],Io[which.max(M),]),Io[which.min(N),]))
return(list(seqL=seqL,seqN=seqN,lWMN=lWMN))
}
|
/scratch/gouwar.j/cran-all/cranData/COR/R/COR.R
|
#' Caculate the MSE values of the COR criterion in simulation
#'
#' @param K is the number of subsets
#' @param nk is the length of subsets
#' @param alpha is the significance level
#' @param X is the observation matrix
#' @param y is the response vector
#'
#' @return MSEx,MSEA,MSEc,MSEm,MSEa
#' @export
#' @examples
#' p=6;n=1000;K=2;nk=500;alpha=0.05;sigma=1
#' e=rnorm(n,0,sigma); beta=c(sort(c(runif(p,0,1))));
#' data=c(rnorm(n*p,5,10));X=matrix(data, ncol=p);
#' y=X%*%beta+e;
#' MSEcom(K=K,nk=nk,alpha=alpha,X=X,y=y)
MSEcom=function(K=K,nk=nk,alpha=alpha,X=X,y=y){
n=nrow(X);p=ncol(X)
beta=solve(t(X)%*%X)%*%t(X)%*%y;
L=M=N=E=W=c(rep(1,K));I=diag(rep(1,nk));betam=matrix(rep(0,p*K), ncol=K)
R=matrix(rep(0,n*nk), ncol=n); Io=matrix(rep(0,nk*K), ncol=nk);
mr=matrix(rep(0,K*nk),ncol=nk)
for (i in 1:K){
mr[i,]=sample(1:n,nk,replace=T);
r=matrix(c(1:nk,mr[i,]),ncol=nk,byrow=T);
R[t(r)]=1
Io[i,]=r[2,]
X1=R%*%X;y1=R%*%y;
ux=solve(crossprod(X1))
sy=sqrt((t(y1)%*%(I-X1%*%solve(crossprod(X1))%*%t(X1))%*%y1)/(length(y1)-p))
L[i]= sy*sum(sqrt(diag(ux)))*(qt(1-alpha/2, length(y1)-p)-qt(alpha/2, length(y1)-p))
W[i]= sum(diag(t(ux)%*% ux))
M[i]= det(X1%*%t(X1))
N[i]=t(y1)%*% y1
E[i]=t(y1)%*%(I-X1%*%solve(crossprod(X1)) %*%t(X1))%*% y1/(length(y1)-p)
betam[,i]=solve(t(X1)%*%X1)%*%t(X1)%*%y1
}
int=intersect(intersect(Io[which.min(W),],Io[which.min(M),]),Io[which.min(N),])
Xc=X[int,];yc= y[int]; I=diag(rep(1,length(int)))
#t(yc)%*%(I-Xc%*%solve(crossprod(Xc))%*%t(Xc))%*% yc/(n-p)
minL=L[which.min(L)]
minM=M[which.min(M)]
minN=N[which.min(N)]
minE=E[which.min(E)]
lW=length(Io[which.min(W),])
lWM=length(intersect(Io[which.min(W),],Io[which.max(M),]))
lWMN=length(intersect(intersect(Io[which.min(W),],Io[which.max(M),]),Io[which.min(N),]))
betac=solve(t(Xc)%*%Xc)%*%t(Xc)%*%yc;
Xx= X[Io[which.max(M),],];yx= y[Io[which.max(M),]]
betax=solve(t(Xx)%*%Xx)%*%t(Xx)%*%yx;
XA= X[Io[which.min(W),],];yA= y[Io[which.min(W),]]
betaA=solve(t(XA)%*%XA)%*%t(XA)%*%yA;
betamm=apply(betam,1, median);betaa=apply(betam,1, mean);
MSEx=sum(beta-betax)^2; MSEA=sum(beta-betaA)^2;
MSEc=sum(beta-betac)^2;MSEm=sum(beta-betamm)^2;
MSEa=sum(beta-betaa)^2;
return(list(MSEx=MSEx,MSEA=MSEA,MSEc=MSEc,MSEm=MSEm,MSEa=MSEa))
}
|
/scratch/gouwar.j/cran-all/cranData/COR/R/MSEcom.R
|
#' Caculate the MSE values of the COR criterion for redundant data in simulation
#'
#' @param K is the number of subsets
#' @param nk is the length of subsets
#' @param alpha is the significance level
#' @param X is the observation matrix
#' @param y is the response vector
#'
#' @return minE,Mcor,Mx,MA
#' @export
#' @examples
#' p=6;n=1000;K=2;nk=200;alpha=0.05;sigma=1
#' e=rnorm(n,0,sigma); beta=c(sort(c(runif(p,0,1))));
#' data=c(rnorm(n*p,5,10));X=matrix(data, ncol=p);
#' y=X%*%beta+e;
#' MSEver(K=K,nk=nk,alpha=alpha,X=X,y=y)
MSEver=function(K=K,nk=nk,alpha=alpha,X=X,y=y){
n=nrow(X);p=ncol(X)
L=M=N=E=W=c(rep(1,K));I=diag(rep(1,nk));betam=matrix(rep(0,p*K), ncol=K)
R=matrix(rep(0,n*nk), ncol=n); Io=matrix(rep(0,nk*K), ncol=nk);
mr=matrix(rep(0,K*nk),ncol=nk)
for (i in 1:K){
mr[i,]=sample(1:n,nk,replace=T);
r=matrix(c(1:nk,mr[i,]),ncol=nk,byrow=T);
R[t(r)]=1
Io[i,]=r[2,]
X1=R%*%X;y1=R%*%y;
ux=solve(crossprod(X1))
sy=sqrt((t(y1)%*%(I-X1%*%solve(crossprod(X1))%*%t(X1))%*%y1)/(length(y1)-p))
L[i]= sy*sum(sqrt(diag(ux)))*(qt(1-alpha/2, length(y1)-p)-qt(alpha/2, length(y1)-p))
W[i]= sum(diag(t(ux)%*% ux))
M[i]= det(X1%*%t(X1))
N[i]=t(y1)%*% y1
E[i]=t(y1)%*%(I-X1%*%solve(crossprod(X1)) %*%t(X1))%*% y1/(length(y1)-p)
betam[,i]=solve(t(X1)%*%X1)%*%t(X1)%*%y1
}
int=intersect(intersect(Io[which.min(W),],Io[which.min(M),]),Io[which.min(N),])
Xc=X[int,];yc= y[int]; I=diag(rep(1,length(int)))
minL=L[which.min(L)]
minM=M[which.min(M)]
minN=N[which.min(N)]
minE=E[which.min(E)]
lW=length(Io[which.min(W),])
lWM=length(intersect(Io[which.min(W),],Io[which.max(M),]))
lWMN=length(intersect(intersect(Io[which.min(W),],Io[which.max(M),]),Io[which.min(N),]))
I=diag(rep(1,length(int)))
Xcor= X[intersect(intersect(Io[which.min(W),],Io[which.max(M),]),Io[which.min(N),]),];
ycor= y[intersect(intersect(Io[which.min(W),],Io[which.max(M),]),Io[which.min(N),])]
I=diag(rep(1,length(ycor)));
Mcor=t(ycor)%*%(I-Xcor%*%solve(crossprod(Xcor))%*%t(Xcor))%*%ycor/(length(ycor)-p);
##MSE of the COR estimator
Xx= X[Io[which.max(M),],];yx= y[Io[which.max(M),]]
I=diag(rep(1,length(yx)));
Mx=t(yx)%*%(I-Xx%*%solve(crossprod(Xx))%*%t(Xx))%*% yx/(length(yx)-p);
XA= X[Io[which.min(W),],];yA= y[Io[which.min(W),]]
I=diag(rep(1,length(yA)));
MA=t(yA)%*%(I-XA%*%solve(crossprod(XA))%*%t(XA))%*% yA/(length(yA)-p);
return(list(minE=minE,Mcor=Mcor,Mx=Mx,MA=MA))
}
|
/scratch/gouwar.j/cran-all/cranData/COR/R/MSEver.R
|
#' Caculate the estimators of beta on the A-opt and D-opt
#'
#' @param K is the number of subsets
#' @param nk is the length of subsets
#' @param alpha is the significance level
#' @param X is the observation matrix
#' @param y is the response vector
#'
#' @return betaA, betaD
#' @export
#' @examples
#' p=6;n=1000;K=2;nk=200;alpha=0.05;sigma=1
#' e=rnorm(n,0,sigma); beta=c(sort(c(runif(p,0,1))));
#' data=c(rnorm(n*p,5,10));X=matrix(data, ncol=p);
#' y=X%*%beta+e;
#' beta_AD(K=K,nk=nk,alpha=alpha,X=X,y=y)
beta_AD=function(K=K,nk=nk,alpha=alpha,X=X,y=y){
n=nrow(X);p=ncol(X)
M=W=c(rep(1,K));
mr=matrix(rep(0,K*nk),ncol=nk)
R=matrix(rep(0,n*nk), ncol=n);Io=matrix(rep(0,nk*K), ncol=nk);
for (i in 1:K){
mr[i,]=sample(1:n,nk,replace=T);
r=matrix(c(1:nk,mr[i,]),ncol=nk,byrow=T);
R[t(r)]=1
Io[i,]=r[2,]
X1=R%*%X;y1=R%*%y;
ux=solve(crossprod(X1))
W[i]= sum(diag(t(ux)%*% ux))
M[i]= det(X1%*%t(X1))
}
XD= X[Io[which.max(M),],];yD= y[Io[which.max(M),]]
betaD=solve(crossprod(XD))%*%t(XD)%*% yD
XA= X[Io[which.min(W),],];yA= y[Io[which.min(W),]]
betaA=solve(crossprod(XA))%*%t(XA)%*% yA
return(list(betaA=betaA,betaD=betaD))
}
|
/scratch/gouwar.j/cran-all/cranData/COR/R/beta_AD.R
|
#' Caculate the estimator of beta on the COR
#'
#' @param K is the number of subsets
#' @param nk is the length of subsets
#' @param alpha is the significance level
#' @param X is the observation matrix
#' @param y is the response vector
#'
#' @return betaC
#' @export
#' @examples
#' p=6;n=1000;K=2;nk=200;alpha=0.05;sigma=1
#' e=rnorm(n,0,sigma); beta=c(sort(c(runif(p,0,1))));
#' data=c(rnorm(n*p,5,10));X=matrix(data, ncol=p);
#' y=X%*%beta+e;
#' beta_cor(K=K,nk=nk,alpha=alpha,X=X,y=y)
beta_cor=function(K=K,nk=nk,alpha=alpha,X=X,y=y){
n=nrow(X);p=ncol(X)
M=N=W=c(rep(1,K))
R=matrix(rep(0,n*nk), ncol=n); Io=matrix(rep(0,nk*K), ncol=nk);
mr=matrix(rep(0,K*nk),ncol=nk)
for (i in 1:K){
mr[i,]=sample(1:n,nk,replace=T);
r=matrix(c(1:nk,mr[i,]),ncol=nk,byrow=T);
R[t(r)]=1
Io[i,]=r[2,]
X1=R%*%X;y1=R%*%y;
ux=solve(crossprod(X1))
W[i]= sum(diag(t(ux)%*% ux))
M[i]= det(X1%*%t(X1))
N[i]=t(y1)%*% y1
}
int=intersect(intersect(Io[which.min(W),],Io[which.min(M),]),Io[which.min(N),])
lWMN=length(intersect(intersect(Io[which.min(W),],Io[which.max(M),]),Io[which.min(N),]))
Xcor= X[intersect(intersect(Io[which.min(W),],Io[which.max(M),]),Io[which.min(N),]),];
ycor= y[intersect(intersect(Io[which.min(W),],Io[which.max(M),]),Io[which.min(N),])]
betaC=solve(crossprod(Xcor))%*%t(Xcor)%*% ycor
return(list(betaC=betaC))
}
|
/scratch/gouwar.j/cran-all/cranData/COR/R/beta_cor.R
|
CORE <-
function(dataIn,keep=NULL,startcol="start",endcol="end",
chromcol="chrom",weightcol="weight",maxmark=1,minscore=0,pow=1,
assoc=c("I","J","P"),nshuffle=0,boundaries=NULL,seedme=sample(1e8,1),
shufflemethod=c("SIMPLE","RESCALE"),tiny=-1,
distrib=c("vanilla","Rparallel","Grid"),njobs=1,qmem=NA){
doshuffles<-"NO"
shufflemethod<-match.arg(shufflemethod)
assoc<-match.arg(assoc)
if(!(class(dataIn)%in%c("CORE","matrix","data.frame")))
stop("invalid class of argument dataIn")
if(inherits(dataIn,"CORE"))if(((!("assoc"%in%keep))&(dataIn$assoc!=assoc))|
((!("pow"%in%keep))&(dataIn$pow!=pow))){
cankeep<-c("maxmark","minscore","nshuffle","boundaries","seedme",
"shufflemethod","tiny")
for(item in intersect(keep,cankeep))assign(item,dataIn[[item]])
dataIn<-dataIn$input
startcol<-"start"
endcol<-"end"
chromcol<-"chrom"
weightcol<-"weight"
}
if(!inherits(dataIn,"CORE")){ #get cores from scratch
if(!(startcol%in%dimnames(dataIn)[[2]]))
stop("start column missing in input data")
if(!(endcol%in%dimnames(dataIn)[[2]]))
stop("end column missing in input data")
if(!(chromcol%in%dimnames(dataIn)[[2]])){
dataIn<-cbind(dataIn,rep(1,nrow(dataIn)))
dimnames(dataIn)[[2]][ncol(dataIn)]<-chromcol
}
if(!(weightcol%in%dimnames(dataIn)[[2]])){
dataIn<-cbind(dataIn,rep(1,nrow(dataIn)))
dimnames(dataIn)[[2]][ncol(dataIn)]<-weightcol
}
z<-as.matrix(dataIn[,c(chromcol,startcol,endcol,weightcol),drop=F])
#rm(dataIn)
dimnames(z)[[2]]<-c("chrom","start","end","weight")
if(!is.null(boundaries)){
if(!(startcol%in%dimnames(boundaries)[[2]]))
stop("start column missing in boundary table")
if(!(endcol%in%dimnames(boundaries)[[2]]))
stop("end column missing in boundary table")
if(!(chromcol%in%dimnames(boundaries)[[2]]))
stop("chrom column missing in boundary table")
boundaries<-as.matrix(boundaries[,c(chromcol,startcol,endcol),drop=F])
}
result<-switch(assoc,
I=ICOREiteration(z,maxmark,pow,minscore),
J=JCOREiteration(z,maxmark,pow,minscore),
P=PCOREiteration(z,maxmark,minscore)
)
returnme<-list(input=z,call=match.call(),minscore=minscore,maxmark=maxmark,
pow=pow,assoc=assoc,coreTable=result,seedme=seedme,boundaries=boundaries,
shufflemethod=shufflemethod,nshuffle=nshuffle,tiny=tiny)
if(nshuffle>0)doshuffles<-"FROMSCRATCH"
}
#If dataIn is a CORE, determine whether computation is to be continued
else{
assoc<-dataIn$assoc
pow<-dataIn$pow
cankeep<-c("maxmark","minscore",
"nshuffle","boundaries","seedme","shufflemethod","tiny")
for(item in intersect(keep,cankeep))assign(item,dataIn[[item]])
returnme<-list(input=dataIn$input,call=match.call(),minscore=minscore,
maxmark=maxmark,pow=dataIn$pow,assoc=dataIn$assoc,
shufflemethod=shufflemethod,seedme=seedme,nshuffle=nshuffle,tiny=tiny,
boundaries=boundaries)
if(dataIn$coreTable[nrow(dataIn$coreTable),"score"]>=minscore&
nrow(dataIn$coreTable)<maxmark){
z<-dataIn$input
if(assoc=="I"){
for(i in 1:nrow(dataIn$coreTable)){
fixus<-z[,"chrom"]==dataIn$coreTable[i,"chrom"]&
z[,"start"]<=dataIn$coreTable[i,"start"]&
z[,"end"]>=dataIn$coreTable[i,"end"]
z[fixus,"weight"]<-z[fixus,"weight"]*
(1-((dataIn$coreTable[i,"end"]-dataIn$coreTable[i,"start"]+1)/
(z[fixus,"end"]-z[fixus,"start"]+1))^dataIn$pow)
}
z<-z[z[,"weight"]>=tiny,,drop=F]
result<-ICOREiteration(z,maxmark-nrow(dataIn$coreTable),
dataIn$pow,minscore)
}
if(assoc=="J"){
for(i in 1:nrow(dataIn$coreTable)){
fixus<-z[,"chrom"]==dataIn$coreTable[i,"chrom"]&
pmax(z[,"start"],dataIn$coreTable[i,"start"])<=
pmin(z[,"end"],dataIn$coreTable[i,"end"])
z[fixus,"weight"]<-z[fixus,"weight"]*
(1-((pmin(dataIn$coreTable[i,"end"],z[fixus,"end"])-
pmax(dataIn$coreTable[i,"start"],z[fixus,"start"])+1)/
(pmax(dataIn$coreTable[i,"end"],z[fixus,"end"])-
pmin(dataIn$coreTable[i,"start"],z[fixus,"start"])+1))^
dataIn$pow)
}
z<-z[z[,"weight"]>=tiny,,drop=F]
result<-JCOREiteration(z,maxmark-nrow(dataIn$coreTable),
dataIn$pow,minscore)
}
if(assoc=="P"){
for(i in 1:nrow(dataIn$coreTable)){
fixus<-z[,"chrom"]==dataIn$coreTable[i,"chrom"]&
z[,"start"]<=dataIn$coreTable[i,"start"]&
z[,"end"]>=dataIn$coreTable[i,"end"]
z<-z[-fixus,,drop=F]
}
result<-PCOREiteration(z,maxmark-nrow(dataIn$coreTable),
minscore)
}
returnme$coreTable<-rbind(dataIn$coreTable,result)
}
else returnme$coreTable<-dataIn$coreTable[which(dataIn$coreTable[1:
min(maxmark,nrow(dataIn$coreTable)),"score"]>=minscore),,drop=F]
if(nshuffle>0){
if(dataIn$shufflemethod!=shufflemethod|dataIn$seedme!=seedme|
nrow(dataIn$coreTable)<nrow(returnme$coreTable)|dataIn$nshuffle==0)
doshuffles<-"FROMSCRATCH"
else if(dataIn$nshuffle<nshuffle)doshuffles<-"ADD"
else returnme$simscores<-
dataIn$simscores[1:nrow(returnme$coreTable),1:nshuffle,drop=F]
}
}
class(returnme)<-"CORE"
if(is.null(boundaries)){
y<-returnme$input[order(returnme$input[,"chrom"]),,drop=F]
boundaries<-cbind(unique(y[,"chrom"]),
tapply(X=y[,"start"],INDEX=y[,"chrom"],FUN=min),
tapply(X=y[,"end"],INDEX=y[,"chrom"],FUN=max))
}
dimnames(boundaries)[[2]]<-c("chrom","start","end")
if(assoc=="I") randfun<-ICORErandomized
if(assoc=="J") randfun<-JCORErandomized
if(assoc=="P") randfun<-PCORErandomized
distrib<-match.arg(distrib)
returnme<-Rparallel(randfun,distrib,doshuffles,nshuffle,dataIn,returnme,boundaries,njobs,qmem)
return(returnme)
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/CORE.R
|
ICOREiteration <-
function(z,maxmark,pow=1,minscore=0,nprof=1){
chu<-unique(z[,"chrom"])
chc<-rep(0,length(chu))
for(i in 1:length(chu))chc[i]<-sum(z[z[,"chrom"]==chu[i],"weight"])
for(ich in 1:length(chu)){
za<-z[z[,"chrom"]==chu[rev(order(chc))][ich],c("start","end","weight"),drop=F]
za<-za[order(za[,"start"]),,drop=F]
spairs<-makepairs(za)
spairs<-spairs[order(spairs[,"start"]),,drop=F]
psched<-interval.schedule(spairs)
invl<-1/(za[,"end"]-za[,"start"]+1)
score<-rep(0,nrow(spairs))
for(sch in 1:max(psched)){
pspairs<-spairs[psched==sch,,drop=F]
ci<-containment.indicator(pspairs[,"start"],pspairs[,"end"],
za[,"start"],za[,"end"])
cigt<-(ci[,2]>=ci[,1])
if(sum(cigt)>0){
pci<-ci[cigt,,drop=F]
pinvl<-invl[cigt]
pwa<-za[cigt,"weight"]
score[psched==sch]<-score[psched==sch]+
(spairs[psched==sch,"end"]-
spairs[psched==sch,"start"]+1)^pow*
fill.values(pci,pwa*pinvl^pow,nrow(pspairs))
}
}
winloci<-matrix(ncol=4,nrow=maxmark,
dimnames=list(NULL,c("chrom","start","end","score")))
winloci[,"chrom"]<-chu[rev(order(chc))][ich]
for(i in 1:maxmark){
if(sum(score>=minscore)==0)break
spairs<-spairs[score>=minscore,,drop=F]
score<-score[score>=minscore]
psched<-interval.schedule(spairs)
iwin<-which.max(score)
winloci[i,c("start","end","score")]<-
c(spairs[iwin,"start"],spairs[iwin,"end"],score[iwin])
if(ich>1)if(((sum(accu[,"score"]>winloci[i,3])+i)>=maxmark))break
fixus<-za[,"start"]<=spairs[iwin,"start"]&za[,"end"]>=spairs[iwin,"end"]
if(sum(fixus)>0){
zaf<-za[fixus,,drop=F]
finvl<-invl[fixus]
neweight<-zaf[,"weight"]*
(1-((spairs[iwin,"end"]-spairs[iwin,"start"]+1)*finvl)^pow)
zaf<-cbind(zaf,neweight)
for(sch in 1:max(psched)){
pspairs<-spairs[psched==sch,,drop=F]
ci<-containment.indicator(pspairs[,"start"],pspairs[,"end"],
zaf[,"start"],zaf[,"end"])
cigt<-(ci[,2]>=ci[,1])
if(sum(cigt)>0){
pci<-ci[cigt,,drop=F]
pinvl<-finvl[cigt]
zas<-zaf[cigt,,drop=F]
score[psched==sch]<-score[psched==sch]+
(spairs[psched==sch,"end"]-
spairs[psched==sch,"start"]+1)^pow*
fill.values(pci,pinvl^pow*(zas[,"neweight"]-zas[,"weight"]),
nrow(pspairs))
}
}
za[fixus,"weight"]<-zaf[,"neweight"]
}
}
winloci<-winloci[!is.na(winloci[,"score"]),,drop=F]
if(ich==1)accu<-winloci
if(ich>1){
accu<-rbind(accu,winloci)
accu<-accu[order(accu[,"score"],decreasing=T),,
drop=F][1:min(maxmark,nrow(accu)),,drop=F]
}
}
accu[,"score"]<-accu[,"score"]/nprof
return(accu)
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/ICOREiteration.R
|
ICORErandomized <-
function(procno=1,COREobj,boundaries,nprocs=1,rngoffset=0){
minscore<-max(COREobj$minscore,min(COREobj$coreTable[,"score"]))
set.seed(COREobj$seedme)
COREobj$nshuffle<-COREobj$nshuffle-rngoffset
myshuffles<-COREobj$nshuffle%/%nprocs
shuffleres<-COREobj$nshuffle%%nprocs
shuffleskip<-myshuffles*(procno-1)+min(shuffleres,procno-1)
if(procno<=shuffleres)myshuffles<-myshuffles+1
weightList<-vector(mode="list",length=nrow(COREobj$coreTable))
weight<-COREobj$input[,"weight"]
for(i in 1:nrow(COREobj$coreTable)){
za<-COREobj$input[COREobj$input[,"chrom"]==COREobj$coreTable[i,"chrom"],,drop=F]
cigt<-za[,"start"]<=COREobj$coreTable[i,"start"]&
za[,"end"]>=COREobj$coreTable[i,"end"]
weight[COREobj$input[,"chrom"]==COREobj$coreTable[i,"chrom"]][cigt]<-
weight[COREobj$input[,"chrom"]==COREobj$coreTable[i,"chrom"]][cigt]*
(1-((COREobj$coreTable[i,"end"]-COREobj$coreTable[i,"start"]+1)/
(za[cigt,"end"]-za[cigt,"start"]+1))^COREobj$pow)
weightList[[i]]<-matrix(ncol=2,
data=c(which(COREobj$input[,"chrom"]==COREobj$coreTable[i,"chrom"])[cigt],
weight[COREobj$input[,"chrom"]==COREobj$coreTable[i,"chrom"]][cigt]))
}
simscores<-matrix(ncol=myshuffles,nrow=nrow(COREobj$coreTable))
chrmax<-nrow(boundaries)
advanceRNG(randopt=COREobj$shufflemethod,nrand=shuffleskip+rngoffset,
nevents=nrow(COREobj$input))
for(shuffle in 1:myshuffles){
if(COREobj$shufflemethod=="SIMPLE")z<-
cbind(randomEventMoves(COREobj$input[,"end"]-COREobj$input[,"start"]+1,
boundaries),COREobj$input[,"weight"])
if(COREobj$shufflemethod=="RESCALE")z<-
cbind(randomRescaledEventMoves(COREobj$input[,c("start","end","chrom",
"weight")],boundaries),COREobj$input[,"weight"])
dimnames(z)[[2]]<-c("start","end","chrom","weight")
chu<-unique(z[,"chrom"])
chc<-rep(0,length(chu))
for(i in 1:length(chu))chc[i]<-sum(z[z[, "chrom"]==chu[i],"weight"])
scoremat<-matrix(nrow=nrow(COREobj$coreTable),ncol=length(chu),data=0)
for(ich in 1:length(chu)){
wza<-which(z[,"chrom"]==chu[rev(order(chc))][ich])
za<-z[wza,c("start","end","weight"),drop=F]
oza<-order(za[,"start"])
ooza<-order(oza)
za<-za[oza,,drop=F]
spairs<-makepairs(za)
spairs<-spairs[order(spairs[,"start"]),,drop=F]
psched<-interval.schedule(spairs)
invl<-1/(za[,"end"]-za[,"start"]+1)
score<-rep(0,nrow(spairs))
for(sch in 1:max(psched)){
pspairs<-spairs[psched==sch,,drop=F]
ci<-containment.indicator(pspairs[,"start"],pspairs[,"end"],
za[,"start"],za[,"end"])
cigt<-(ci[,2]>=ci[,1])
if(sum(cigt)>0){
pci<-ci[cigt,,drop=F]
pinvl<-invl[cigt]
pwa<-za[cigt,"weight"]
score[psched==sch]<-score[psched==sch]+
(spairs[psched==sch,"end"]-
spairs[psched==sch,"start"]+1)^COREobj$pow*
fill.values(pci,pwa*pinvl^COREobj$pow,nrow(pspairs))
}
}
for(i in 1:nrow(COREobj$coreTable)){
spairs<-spairs[score>=minscore,,drop=F]
if(nrow(spairs)==0){
scoremat[i:nrow(COREobj$coreTable),ich]<-0
break
}
scoremat[i,ich]<-max(score)
if(i==nrow(COREobj$coreTable))break
score<-score[score>=minscore]
psched<-interval.schedule(spairs)
whereIwas<-which(wza%in%weightList[[i]][,1])
if(length(whereIwas)>0){
whereIamNow<-ooza[whereIwas]
zaf<-cbind(za[whereIamNow,,drop=F],
weightList[[i]][weightList[[i]][,1]%in%wza,2])
dimnames(zaf)[[2]][ncol(zaf)]<-"neweight"
za[whereIamNow,"weight"]<-zaf[,"neweight"]
zaf<-zaf[order(zaf[,"start"]),,drop=F]
invl<-1/(zaf[,"end"]-zaf[,"start"]+1)
for(sch in 1:max(psched)){
pspairs<-spairs[psched==sch,,drop=F]
ci<-containment.indicator(pspairs[,"start"],pspairs[,"end"],
zaf[,"start"],zaf[,"end"])
cigt<-(ci[,2]>=ci[,1])
if(sum(cigt)>0){
pci<-ci[cigt,,drop=F]
pinvl<-invl[cigt]
pwa<-zaf[cigt,"neweight"]-zaf[cigt,"weight"]
score[psched==sch]<-score[psched==sch]+
(spairs[psched==sch,"end"]-
spairs[psched==sch,"start"]+1)^COREobj$pow*
fill.values(pci,pwa*pinvl^COREobj$pow,nrow(pspairs))
}
}
}
}
}
simscores[,shuffle]<-apply(scoremat,1,max)
}
return(simscores)
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/ICORErandomized.R
|
JCOREiteration <-
function(z,maxmark,pow=1,minscore=0,nprof=1){
chu<-unique(z[,"chrom"])
chc<-rep(0,length(chu))
for(i in 1:length(chu))chc[i]<-sum(z[,"chrom"]==chu[i])
for(ich in 1:length(chu)){
za<-z[z[,"chrom"]==chu[rev(order(chc))][ich],c("start","end","weight"),drop=F]
za<-za[order(za[,"start"]),,drop=F]
spairs<-makepairs(za)
spairs<-spairs[order(spairs[,"start"]),,drop=F]
psched<-interval.schedule(spairs)
score<-rep(0,nrow(spairs))
for(sch in 1:max(psched)){
pspairs<-spairs[psched==sch,,drop=F]
ci<-overlap.indicator(pspairs[,"start"],pspairs[,"end"],
za[,"start"],za[,"end"])
cigt<-(ci[,2]>=ci[,1])
if(sum(cigt)>0){
pci<-ci[cigt,,drop=F]
zas<-za[cigt,,drop=F]
zaexp<-zas[rep(1:nrow(zas),times=pci[,2]-pci[,1]+1),,drop=F]
pind<-unlist(apply(pci,1,function(v){v[1]:v[2]}))
jac<-zaexp[,"weight"]*((pmin(zaexp[,"end"],pspairs[pind,"end"])-
pmax(zaexp[,"start"],pspairs[pind,"start"])+1)/
(pmax(zaexp[,"end"],pspairs[pind,"end"])-
pmin(zaexp[,"start"],pspairs[pind,"start"])+1))^pow
jacsums<-tapply(X=jac,FUN=sum,INDEX=as.factor(pind))
score[psched==sch][as.numeric(names(jacsums))]<-jacsums
}
}
winloci<-matrix(ncol=4,nrow=maxmark,
dimnames=list(NULL,c("chrom","start","end","score")))
winloci[,"chrom"]<-chu[rev(order(chc))][ich]
for(i in 1:maxmark){
if(sum(score>=minscore)==0)break
spairs<-spairs[score>=minscore,,drop=F]
score<-score[score>=minscore]
psched<-interval.schedule(spairs)
iwin<-which.max(score)
winloci[i,c("start","end","score")]<-
c(spairs[iwin,"start"],spairs[iwin,"end"],score[iwin])
if(ich>1)if(((sum(accu[,"score"]>winloci[i,3])+i)>=maxmark))break
fixus<-pmin(za[,"end"],spairs[iwin,"end"])>=
pmax(za[,"start"],spairs[iwin,"start"])
if(sum(fixus)>0){
zaf<-za[fixus,,drop=F]
neweight<-zaf[,"weight"]*(1-((pmin(zaf[,"end"],spairs[iwin,"end"])-
pmax(zaf[,"start"],spairs[iwin,"start"])+1)/
(pmax(zaf[,"end"],spairs[iwin,"end"])-
pmin(zaf[,"start"],spairs[iwin,"start"])+1))^pow)
zaf<-cbind(zaf,neweight)
for(sch in 1:max(psched)){
pspairs<-spairs[psched==sch,,drop=F]
ci<-overlap.indicator(pspairs[,"start"],pspairs[,"end"],
zaf[,"start"],zaf[,"end"])
cigt<-(ci[,2]>=ci[,1])
if(sum(cigt)>0){
pci<-ci[cigt,,drop=F]
zas<-zaf[cigt,,drop=F]
zaexp<-zas[rep(1:nrow(zas),times=pci[,2]-pci[,1]+1),,drop=F]
pind<-unlist(apply(pci,1,function(v){v[1]:v[2]}))
jac<-(zaexp[,"neweight"]-zaexp[,"weight"])*
((pmin(zaexp[,"end"],pspairs[pind,"end"])-
pmax(zaexp[,"start"],pspairs[pind,"start"])+1)/
(pmax(zaexp[,"end"],pspairs[pind,"end"])-
pmin(zaexp[,"start"],pspairs[pind,"start"])+1))^pow
jacsums<-tapply(X=jac,FUN=sum,INDEX=as.factor(pind))
score[psched==sch][as.numeric(names(jacsums))]<-
score[psched==sch][as.numeric(names(jacsums))]+jacsums
}
}
za[fixus,"weight"]<-zaf[,"neweight"]
}
}
winloci<-winloci[!is.na(winloci[,"score"]),,drop=F]
if(ich==1)accu<-winloci
if(ich>1){
accu<-rbind(accu,winloci)
accu<-accu[order(accu[,"score"],decreasing=T),,
drop=F][1:min(maxmark,nrow(accu)),,drop=F]
}
}
accu[,"score"]<-accu[,"score"]/nprof
return(accu)
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/JCOREiteration.R
|
JCORErandomized <-
function(procno=1,COREobj,boundaries,nprocs=1,rngoffset=0){
minscore<-max(COREobj$minscore,min(COREobj$coreTable[,"score"]))
set.seed(COREobj$seedme)
COREobj$nshuffle<-COREobj$nshuffle-rngoffset
myshuffles<-COREobj$nshuffle%/%nprocs
shuffleres<-COREobj$nshuffle%%nprocs
shuffleskip<-myshuffles*(procno-1)+min(shuffleres,procno-1)
if(procno<=shuffleres)myshuffles<-myshuffles+1
weightList<-vector(mode="list",length=nrow(COREobj$coreTable))
weight<-COREobj$input[,"weight"]
for(i in 1:nrow(COREobj$coreTable)){
za<-COREobj$input[COREobj$input[,"chrom"]==COREobj$coreTable[i,"chrom"],,drop=F]
cigt<-pmin(za[,"end"],COREobj$coreTable[i,"end"])>=
pmax(za[,"start"],COREobj$coreTable[i,"start"])
zac<-za[cigt,,drop=F]
weight[COREobj$input[,"chrom"]==COREobj$coreTable[i,"chrom"]][cigt]<-
weight[COREobj$input[,"chrom"]==COREobj$coreTable[i,"chrom"]][cigt]*
(1-((pmin(zac[,"end"],COREobj$coreTable[i,"end"])-
pmax(zac[,"start"],COREobj$coreTable[i,"start"])+1)/
(pmax(zac[,"end"],COREobj$coreTable[i,"end"])-
pmin(zac[,"start"],COREobj$coreTable[i,"start"])+1))^COREobj$pow)
weightList[[i]]<-matrix(ncol=2,
data=c(which(COREobj$input[,"chrom"]==COREobj$coreTable[i,"chrom"])[cigt],
weight[COREobj$input[,"chrom"]==COREobj$coreTable[i,"chrom"]][cigt]))
}
simscores<-matrix(ncol=myshuffles,nrow=nrow(COREobj$coreTable))
chrmax<-nrow(boundaries)
advanceRNG(randopt=COREobj$shufflemethod,nrand=shuffleskip+rngoffset,
nevents=nrow(COREobj$input))
for(shuffle in 1:myshuffles){
if(COREobj$shufflemethod=="SIMPLE")z<-
cbind(randomEventMoves(COREobj$input[,"end"]-COREobj$input[,"start"]+1,boundaries),
COREobj$input[,"weight"])
if(COREobj$shufflemethod=="RESCALE")z<-
cbind(randomRescaledEventMoves(COREobj$input[,c("start","end","chrom","weight")],
boundaries),COREobj$input[,"weight"])
dimnames(z)[[2]]<-c("start","end","chrom","weight")
chu<-unique(z[,"chrom"])
chc<-rep(0,length(chu))
for(i in 1:length(chu))chc[i]<-sum(z[z[, "chrom"]==chu[i],"weight"])
scoremat<-matrix(nrow=nrow(COREobj$coreTable),ncol=length(chu),data=0)
for(ich in 1:length(chu)){
wza<-which(z[,"chrom"]==chu[rev(order(chc))][ich])
za<-z[wza,c("start","end","weight"),drop=F]
oza<-order(za[,"start"])
ooza<-order(oza)
za<-za[oza,,drop=F]
spairs<-makepairs(za)
spairs<-spairs[order(spairs[,"start"]),,drop=F]
psched<-interval.schedule(spairs)
score<-rep(0,nrow(spairs))
for(sch in 1:max(psched)){
pspairs<-spairs[psched==sch,,drop=F]
ci<-overlap.indicator(pspairs[,"start"],pspairs[,"end"],
za[,"start"],za[,"end"])
cigt<-(ci[,2]>=ci[,1])
if(sum(cigt)>0){
pci<-ci[cigt,,drop=F]
zas<-za[cigt,,drop=F]
zaexp<-zas[rep(1:nrow(zas),times=pci[,2]-pci[,1]+1),,drop=F]
pind<-unlist(apply(pci,1,function(v){v[1]:v[2]}))
jac<-zaexp[,"weight"]*((pmin(zaexp[,"end"],pspairs[pind,"end"])-
pmax(zaexp[,"start"],pspairs[pind,"start"])+1)/
(pmax(zaexp[,"end"],pspairs[pind,"end"])-
pmin(zaexp[,"start"],pspairs[pind,"start"])+1))^COREobj$pow
jacsums<-tapply(X=jac,FUN=sum,INDEX=as.factor(pind))
score[psched==sch][as.numeric(names(jacsums))]<-jacsums
}
}
for(i in 1:nrow(COREobj$coreTable)){
spairs<-spairs[score>=minscore,,drop=F]
if(nrow(spairs)==0){
scoremat[i:nrow(COREobj$coreTable),ich]<-0
break
}
scoremat[i,ich]<-max(score)
if(i==nrow(COREobj$coreTable))break
score<-score[score>=minscore]
psched<-interval.schedule(spairs)
whereIwas<-which(wza%in%weightList[[i]][,1])
if(length(whereIwas)>0){
whereIamNow<-ooza[whereIwas]
zaf<-cbind(za[whereIamNow,,drop=F],
weightList[[i]][weightList[[i]][,1]%in%wza,2])
dimnames(zaf)[[2]][ncol(zaf)]<-"neweight"
za[whereIamNow,"weight"]<-zaf[,"neweight"]
zaf<-zaf[order(zaf[,"start"]),,drop=F]
for(sch in 1:max(psched)){
pspairs<-spairs[psched==sch,,drop=F]
ci<-overlap.indicator(pspairs[,"start"],pspairs[,"end"],
zaf[,"start"],zaf[,"end"])
cigt<-(ci[,2]>=ci[,1])
if(sum(cigt)>0){
pci<-ci[cigt,,drop=F]
zas<-zaf[cigt,,drop=F]
zaexp<-zas[rep(1:nrow(zas),times=pci[,2]-pci[,1]+1),,drop=F]
pind<-unlist(apply(pci,1,function(v){v[1]:v[2]}))
jac<-(zaexp[,"neweight"]-zaexp[,"weight"])*
((pmin(zaexp[,"end"],pspairs[pind,"end"])-
pmax(zaexp[,"start"],pspairs[pind,"start"])+1)/
(pmax(zaexp[,"end"],pspairs[pind,"end"])-
pmin(zaexp[,"start"],pspairs[pind,"start"])+1))^COREobj$pow
jacsums<-tapply(X=jac,FUN=sum,INDEX=as.factor(pind))
score[psched==sch][as.numeric(names(jacsums))]<-
score[psched==sch][as.numeric(names(jacsums))]+jacsums
}
}
}
}
}
simscores[,shuffle]<-apply(scoremat,1,max)
}
return(simscores)
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/JCORErandomized.R
|
PCOREiteration <-
function(z,maxmark,minscore=0,nprof=1){
chu<-unique(z[,"chrom"])
chc<-rep(0,length(chu))
for(i in 1:length(chu))chc[i]<-sum(z[,"chrom"]==chu[i])
for(ich in 1:length(chu)){
za<-z[z[,"chrom"]==chu[rev(order(chc))][ich],c("start","end","weight"),drop=F]
za<-za[order(za[,"start"]),,drop=F]
winloci<-matrix(ncol=4,nrow=maxmark,
dimnames=list(NULL,c("chrom","start","end","score")))
winloci[,"chrom"]<-chu[rev(order(chc))][ich]
for(i in 1:maxmark){
if(nrow(za)==0)break
y<-cbind(c(za[,"end"]+1,za[,"start"]),c(-za[,"weight"],za[,"weight"]))
y<-y[order(y[,1]),,drop=F]
cy2<-cumsum(y[,2])
score<-max(cy2)
if(score<minscore)break
stabstart<-y[which.max(cy2),1]
stabend<-min(y[y[,1]>stabstart&y[,2]<=0,1])-1
winloci[i,c("start","end","score")]<-c(stabstart,stabend,score)
if(ich>1)if(((sum(accu[,"score"]>winloci[i,"score"])+i)>=maxmark))break
za<-za[!(za[,"start"]<=stabstart&za[,"end"]>=stabstart),,drop=F]
}
winloci<-winloci[!is.na(winloci[,"score"]),,drop=F]
if(ich==1)accu<-winloci
if(ich>1){
accu<-rbind(accu,winloci)
accu<-accu[accu[,"score"]>=minscore,,drop=F]
accu<-accu[order(accu[,"score"],decreasing=T),,
drop=F][1:min(maxmark,nrow(accu)),,drop=F]
}
}
accu[,"score"]<-accu[,"score"]/nprof
return(accu)
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/PCOREiteration.R
|
PCORErandomized <-
function(procno=1,COREobj,boundaries,nprocs=1,rngoffset=0){
minscore<-max(COREobj$minscore,min(COREobj$coreTable[,"score"]))
set.seed(COREobj$seedme)
COREobj$nshuffle<-COREobj$nshuffle-rngoffset
myshuffles<-COREobj$nshuffle%/%nprocs
shuffleres<-COREobj$nshuffle%%nprocs
shuffleskip<-myshuffles*(procno-1)+min(shuffleres,procno-1)
if(procno<=shuffleres)myshuffles<-myshuffles+1
weightList<-vector(mode="list",length=nrow(COREobj$coreTable))
weight<-COREobj$input[,"weight"]
for(i in 1:nrow(COREobj$coreTable)){
za<-COREobj$input[COREobj$input[,"chrom"]==COREobj$coreTable[i,"chrom"],,drop=F]
cigt<-za[,"start"]<=COREobj$coreTable[i,"start"]&
za[,"end"]>=COREobj$coreTable[i,"end"]
weight[COREobj$input[,"chrom"]==COREobj$coreTable[i,"chrom"]][cigt]<-0
weightList[[i]]<-matrix(ncol=2,
data=c(which(COREobj$input[,"chrom"]==COREobj$coreTable[i,"chrom"])[cigt],
weight[COREobj$input[,"chrom"]==COREobj$coreTable[i,"chrom"]][cigt]))
}
simscores<-matrix(ncol=myshuffles,nrow=nrow(COREobj$coreTable))
chrmax<-nrow(boundaries)
advanceRNG(randopt=COREobj$shufflemethod,nrand=shuffleskip+rngoffset,
nevents=nrow(COREobj$input))
for(shuffle in 1:myshuffles){
if(COREobj$shufflemethod=="SIMPLE")z<-
cbind(randomEventMoves(COREobj$input[,"end"]-COREobj$input[,"start"]+1,
boundaries),COREobj$input[,"weight"])
if(COREobj$shufflemethod=="RESCALE")z<-
cbind(randomRescaledEventMoves(COREobj$input[,c("start","end","chrom",
"weight")],boundaries),COREobj$input[,"weight"])
dimnames(z)[[2]]<-c("start","end","chrom","weight")
chu<-unique(z[,"chrom"])
chc<-rep(0,length(chu))
for(i in 1:length(chu))chc[i]<-sum(z[z[, "chrom"]==chu[i],"weight"])
scoremat<-matrix(nrow=nrow(COREobj$coreTable),ncol=length(chu),data=0)
for(ich in 1:length(chu)){
wza<-which(z[,"chrom"]==chu[rev(order(chc))][ich])
za<-z[wza,c("start","end","weight"),drop=F]
zaf<-za[za[,"weight"]>0,,drop=F]
y<-cbind(c(zaf[,"end"]+1,zaf[,"start"]),c(-zaf[,"weight"],zaf[,"weight"]))
y<-y[order(y[,1]),,drop=F]
cy2<-cumsum(y[,2])
mscore<-max(cy2)
for(i in 1:nrow(COREobj$coreTable)){
if(mscore<minscore){
scoremat[i:nrow(COREobj$coreTable),ich]<-0
break
}
scoremat[i,ich]<-mscore
if(i==nrow(COREobj$coreTable))break
whereIwas<-which(wza%in%weightList[[i]][,1])
if(length(whereIwas)>0){
za[whereIwas,"weight"]<-0
zaf<-za[za[,"weight"]>0,,drop=F]
y<-cbind(c(zaf[,"end"]+1,zaf[,"start"]),c(-zaf[,"weight"],zaf[,"weight"]))
y<-y[order(y[,1]),,drop=F]
cy2<-cumsum(y[,2])
mscore<-max(cy2)
}
}
}
simscores[,shuffle]<-apply(scoremat,1,max)
}
return(simscores)
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/PCORErandomized.R
|
Rparallel<-function(randfun,distrib,doshuffles,nshuffle,dataIn,returnme,
boundaries,njobs,qmem){
if(distrib=="Rparallel"&doshuffles!="NO"){
ncores<-min(njobs,ifelse(doshuffles=="FROMSCRATCH",
nshuffle,nshuffle-dataIn$nshuffle))
cl<-parallel::makeCluster(ncores)
parallel::clusterEvalQ(cl=cl,expr=library(CORE))
}
if(distrib=="Grid"&doshuffles!="NO"){
ncores<-min(njobs,ifelse(doshuffles=="FROMSCRATCH",
nshuffle,nshuffle-dataIn$nshuffle))
timeid<-substring(Sys.time(),12,20)
system(paste("mkdir",timeid))
setwd(paste(getwd(),"/",timeid,sep=""))
WrRGrid(ncores)
nshuffle<-returnme$nshuffle
save(ncores,doshuffles,dataIn,randfun,boundaries,
returnme,file=paste(getwd(),"/tempMPI",sep=""))
sink(paste(getwd(),"/rjob.sh",sep=""))
cat("R CMD BATCH RGrid.R")
sink()
if(is.na(qmem)){
system(paste("qsub -cwd -V -sync y -l virtual_free=2G -t 1-",ncores,":1 rjob.sh",sep=""))
#system(paste("qsub -cwd -V -sync y -l m_mem_free=2G -t 1-",ncores,":1 rjob.sh",sep=""))
}
if(!is.na(qmem)){
system(paste("qsub -cwd -V -sync y ",qmem," -t 1-",ncores,":1 rjob.sh",sep=""))
}
load(paste(getwd(),"/jobid",sep=""))
myjobID<-get("job.id")
load(paste(getwd(),"/mygather.temp.",as.character(myjobID),"1",sep=""))
myresult<-get("x")
for(i in 2:ncores){
load(paste(getwd(),"/mygather.temp.",as.character(myjobID),as.character(i),sep=""))
myresult<-c(myresult,get("x"))
}
if(doshuffles=="FROMSCRATCH")mpidata<-myresult[1:(nshuffle*nrow(returnme$coreTable))]
if(doshuffles=="ADD")mpidata<-myresult[1:((nshuffle-dataIn$nshuffle)*nrow(returnme$coreTable))]
}
if(doshuffles=="FROMSCRATCH"){
returnme$simscores<-switch(distrib,
vanilla=randfun(COREobj=returnme,boundaries=boundaries),
Rparallel=matrix(nrow=nrow(returnme$coreTable),
data=unlist(parallel::parSapply(cl=cl,X=1:ncores,FUN=randfun,
COREobj=returnme,boundaries=boundaries,nprocs=ncores))),
Grid=matrix(nrow=nrow(returnme$coreTable),data=mpidata)
)
}
else if(doshuffles=="ADD"){
returnme$simscores<-
cbind(dataIn$simscores[1:nrow(returnme$coreTable),,drop=F],switch(distrib,
vanilla=randfun(COREobj=returnme,boundaries=boundaries,
rngoffset=dataIn$nshuffle),
Rparallel=matrix(nrow=nrow(returnme$coreTable),
data=unlist(parallel::parSapply(cl=cl,X=1:ncores,
FUN=randfun,COREobj=returnme,boundaries=boundaries,
rngoffset=dataIn$nshuffle,nprocs=ncores))),
Grid=matrix(nrow=nrow(returnme$coreTable),data=mpidata)
))
}
if("simscores"%in%names(returnme))returnme$p<-
(rowSums(returnme$simscores>returnme$coreTable[,"score"])+1)/
(ncol(returnme$simscores)+2)
if(exists("cl"))parallel::stopCluster(cl)
if(substring(distrib,1,3)=="Gri"){
returnme$nshuffle<-nshuffle
setwd("..")
system(paste("rm -rf",timeid))
#system(paste("rm mygather.temp.",as.character(myjobID),"*",sep=""))
#system("rm tempMPI")
#system("rm RGrid.*")
#system("rm jobid")
#system("rm rjob.sh*")
}
return(returnme)
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/Rparallel.R
|
WrRGrid<-function(ncores){
sink(paste(getwd(),"/RGrid.R",sep=""))
cat(paste("task.id <-",'Sys.getenv("SGE_TASK_ID")'))
cat("\r\n")
cat(paste("job.id <-",'Sys.getenv("JOB_ID")'))
cat("\r\n")
cat(paste("load(paste(getwd(),",'"/tempMPI"',',sep=""))',sep=""))
cat("\r\n")
#cat("set.seed(task.id)")
#cat("\r\n")
#cat("x<-as.vector(randfun(procno=1,COREobj=returnme,
#boundaries=boundaries,nprocs=1,rngoffset=0))")
cat('if(doshuffles=="FROMSCRATCH"){')
cat("\r\n")
cat("x<-as.vector(randfun(procno=as.numeric(task.id),COREobj=returnme,
boundaries=boundaries,nprocs=ncores,rngoffset=0))")
cat("\r\n")
cat("}")
cat("\r\n")
cat('if(doshuffles=="ADD"){')
cat("\r\n")
cat("x<-as.vector(randfun(procno=as.numeric(task.id),COREobj=returnme,
boundaries=boundaries,nprocs=ncores,rngoffset=dataIn$nshuffle))")
cat("\r\n")
cat("}")
cat("\r\n")
cat(paste(paste("save(x,file=paste(getwd(),",'"/mygather.temp."',",as.character(job.id)",",as.character(task.id)",sep=""),',sep=""))',sep=""))
cat("\r\n")
cat(paste("save(job.id,file=paste(getwd(),",'"/jobid"',',sep=""))',sep=""))
cat("\r\n")
sink()
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/WrRGrid.R
|
advanceRNG <-
function(randopt=c("SIMPLE","RESCALE"),nrand,nevents){
randopt<-match.arg(randopt)
switch(randopt,
SIMPLE=replicate(nrand,{a<-runif(2*nevents);rm(a)}),
RESCALE=replicate(nrand,{a<-sample(nevents);a<-runif(nevents);rm(a)})
)
return()
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/advanceRNG.R
|
containment.indicator <-
function(vstart,vend,wstart,wend){
lw<-length(wstart)
lv<-length(vstart)
z<-cbind(c(vend,wend),c(1:lv,rep(0,lw)),c(rep(0,lv),1:lw))
z<-z[order(z[,1]),]
endbeforeend<-cummax(z[,2])[order(z[,3])][sort(z[,3])!=0]
z<-cbind(c(wstart,vstart),c(rep((lv+1),lw),1:lv),c(1:lw,rep(0,lv)))
z<-z[order(z[,1]),]
startafterstart<-rev(cummin(rev(z[,2])))[order(z[,3])][sort(z[,3])!=0]
return(cbind(startafterstart,endbeforeend))
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/containment.indicator.R
|
envelope.indicator <-
function(vstart,vend,wstart,wend){
lw<-length(wstart)
lv<-length(vstart)
z<-cbind(c(vstart,wstart),c(1:lv,rep(0,lw)),c(rep(0,lv),1:lw))
z<-z[order(z[,1]),]
startbeforestart<-cummax(z[,2])[order(z[,3])][sort(z[,3])!=0]
z<-cbind(c(wend,vend),c(rep((lv+1),lw),1:lv),c(1:lw,rep(0,lv)))
z<-z[order(z[,1]),]
endafterend<-rev(cummin(rev(z[,2])))[order(z[,3])][sort(z[,3])!=0]
return(cbind(endafterend,startbeforestart))
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/envelope.indicator.R
|
fill.values <-
function(fromto,values,ldest){
dest<-rep(0,ldest)
z<-cbind(c(fromto[,1],fromto[,2]+1),c(values,-values))
z<-z[order(z[,1]),]
z[,2]<-cumsum(z[,2])
z<-z[nrow(z)-rev(match(unique(rev(z[,1])),rev(z[,1])))+1,]
#if(z[nrow(z),1]>ldest)z<-z[-nrow(z),]
if(z[nrow(z),1]>ldest)z<-matrix(ncol=ncol(z),data=z[-nrow(z),])
dest[z[,1]]<-z[,2]
zz<-rep(0,length(dest))
zz[z[,1]]<-c(0,z[-nrow(z),2])
return(cumsum(dest-zz))
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/fill.values.R
|
interval.schedule <-
function(v){
if(!("start"%in%dimnames(v)[[2]])|!("end"%in%dimnames(v)[[2]])|
length(dim(v))!=2)stop("interval.schedule: incorrect form of input\n")
lv<-nrow(v)
if(sum(sort(v[,"start"])==v[,"start"])!=lv)
stop("interval.schedule: input incorrectly ordered\n")
unsch<-lv
schedule<-0
vsched<-rep(0,lv)
while(unsch>0){
schedule<-schedule+1
vsched[cummax(v[,"end"]*(vsched==0))==v[,"end"]]<-schedule
unsch<-sum(vsched==0)
}
return(vsched)
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/interval.schedule.R
|
makepairs <-
function(events){
if("chrom"%in%dimnames(events)[[2]]){
events<-events[order(events[,"chrom"]),,drop=F]
chromstarts<-tapply(X=events[,"start"],INDEX=events[,"chrom"],FUN=min)
chromends<-tapply(X=events[,"end"],INDEX=events[,"chrom"],FUN=max)
}
else{
chromstarts<-min(events[,"start"])
chromends<-max(events[,"end"])
}
ustarts<-sort(unique(events[,"start"])) #unique starts
uends<-sort(unique(events[,"end"])) #unique ends
#find for each unique start and end its chromosome (sch and ech)
z<-cbind(c(chromstarts,ustarts),c(rep(1,length(chromstarts)),
rep(0,length(ustarts))),c(rep(0,length(chromstarts)),1:length(ustarts)))
z<-z[order(z[,1]),]
sch<-cumsum(z[,2])[z[,3]!=0][order(z[z[,3]!=0,3])]
z<-cbind(c(chromstarts,uends),c(rep(1,length(chromstarts)),
rep(0,length(uends))),c(rep(0,length(chromstarts)),1:length(uends)))
z<-z[order(z[,1]),]
ech<-cumsum(z[,2])[z[,3]!=0][order(z[z[,3]!=0,3])]
#for each unique end find the leftmost unique start in its chromosome
sfirst<-match(1:length(chromstarts),sch)[ech]
#for each unique end find the last preceding unique start
z<-cbind(c(ustarts,uends),c(rep(1,length(ustarts)),rep(0,length(uends))))
z<-z[order(z[,1]),]
slast<-cumsum(z[,2])[z[,2]==0]
z<-cbind(rep(1:length(uends),times=(slast-sfirst+1)),
rep(0,sum(slast-sfirst+1)))
z[match(1:length(uends),z[,1]),2]<-match(1:length(uends),z[,1])
mypairs<-matrix(ncol=3,dimnames=list(NULL,c("start","end","id")),
data=c(ustarts[(1:nrow(z))-cummax(z[,2])+sfirst[z[,1]]],
uends[z[,1]],1:length(uends[z[,1]])))
return(mypairs)
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/makepairs.R
|
overlap.indicator <-
function(vstart,vend,wstart,wend){
lw<-length(wstart)
lv<-length(vstart)
z<-cbind(c(wstart,vend),c(rep(0,lw),1:lv),c(1:lw,rep(0,lv)))
z<-z[order(z[,1]),]
endbefore<-cummax(z[,2])[order(z[,3])][sort(z[,3])!=0]
z<-cbind(c(vstart,wend),c(1:lv,rep((lv+1),lw)),c(rep(0,lv),1:lw))
z<-z[order(z[,1]),]
startafter<-rev(cummin(rev(z[,2])))[order(z[,3])][sort(z[,3])!=0]
return(cbind(endbefore+1,startafter-1))
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/overlap.indicator.R
|
randomEventMoves <-
function(eventLengths,boundaries){
chrlengths<-boundaries[,"end"]-boundaries[,"start"]+1
ochl<-order(chrlengths,decreasing=T)
schl<-chrlengths[ochl]
cschl<-cumsum(schl)
sb<-boundaries[ochl,]
oe<-order(eventLengths,decreasing=T)
z<-cbind(c(chrlengths,eventLengths),
c(rep(1,length(chrlengths)),rep(0,length(eventLengths))))
z<-z[order(z[,1],decreasing=T),]
chrmax<-cumsum(z[,2])[z[,2]==0]
eventLengths<-z[z[,2]==0,1]
abslen<-floor(cschl[chrmax]*runif(length(chrmax)))
z<-cbind(c(cschl,abslen),c(rep(1,length(cschl)),rep(0,length(abslen))),
c(rep(0,length(cschl)),eventLengths))
oz<-order(z[,1])
oze<-order(z[z[,3]!=0,1])
z<-z[oz,]
z[,2]<-cumsum(z[,2])+1
z<-z[z[,3]!=0,,drop=F]
start<-sb[z[,2],"start"]+floor((schl[z[,2]]-z[,3]+1)*runif(nrow(z)))
end<-start+z[,3]-1
chrom<-ochl[z[,2]]
sec<-matrix(ncol=3,nrow=length(start),
dimnames=list(NULL,c("start","end","chrom")))
sec[oze,]<-cbind(start,end,chrom)
sec[oe,]<-sec
return(sec)
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/randomEventMoves.R
|
randomRescaledEventMoves <-
function(events,boundaries,countOnly=F){
chrlengths<-boundaries[,"end"]-boundaries[,"start"]+1
chrfrac<-(events[,"end"]-events[,"start"]+1)/
chrlengths[match(events[,"chrom"],boundaries[,"chrom"])]
chrstart<-cumsum(chrlengths)-chrlengths+1
newchrom<-sample(unique(events[,"chrom"]),size=nrow(events),replace=T)
newstart<-chrstart[newchrom]+
floor((1-chrfrac)*runif(nrow(events))*
chrlengths[match(newchrom,boundaries[,"chrom"])])
if(countOnly)return()
newend<-newstart+
pmax(0,round(chrfrac*chrlengths[match(newchrom,boundaries[,"chrom"])])-1)
return(matrix(ncol=3,data=c(newstart,newend,newchrom),dimnames=list(NULL,
c("start","end","chrom"))))
}
|
/scratch/gouwar.j/cran-all/cranData/CORE/R/randomRescaledEventMoves.R
|
versionCore <- function()
{
tmp <- .C(C_versionCore, libVersion = paste(rep(".",times=256),collapse=""))
tmp$libVersion
}
#predict <- function(object, ...) UseMethod("predict", object)
#plot <- function(object, ...) UseMethod("plot", object)
destroyModels <-function(model=NULL)
{
if (is.null(model)) {
destroyCore()
initCore()
}
else {
if ( !("CoreModel" %in% class(model)) ){
stop("Only models of class CoreModel can be destroyed with this function. The parameter is of class ", class(model))
return(NULL) ;
}
modelID <- model$modelID
tmp <- .C(C_destroyOneCoreModel, as.integer(modelID) )
}
invisible(NULL)
}
cvCoreModel <- function(formula, data, model=c("rf","rfNear","tree","knn","knnKernel","bayes","regTree"), costMatrix=NULL,
folds=10, stratified=TRUE, returnModel=TRUE, ...)
{
# check formula or response index or reponse name
if (inherits(formula,"formula"))
className <- all.vars(formula)[1]
else if (is.numeric(formula)) {
if (formula == round(formula)) {# index of response variable
classIdx <- formula
className <- names(data)[classIdx]
}
else stop("The first argument must be a formula or prediction column name or prediction column index.")
}
else if (is.character(formula)) { # name of response variable
classIdx <- match(formula, names(data))
if (length(classIdx) != 1 || is.na(classIdx))
stop("The first argument must be a formula or prediction column name or prediction column index.")
className <- names(data[classIdx])
}
else stop("The first argument must be a formula or prediction column name or prediction column index.")
if (stratified)
foldIdx <- cvGenStratified(data[,className], k=folds)
else
foldIdx <- cvGen(nrow(data), k=folds)
evalCore<-list()
for (j in 1:folds) {
dTrain <- data[foldIdx!=j,]
dTest <- data[foldIdx==j,]
modelCore <- CoreModel(formula, dTrain, model, costMatrix, ...)
predCore <- predict(modelCore, dTest)
evalCore[[j]] <- modelEval(modelCore, correctClass=dTest[[className]],predictedClass=predCore$class, predictedProb=predCore$prob )
destroyModels(modelCore)
}
resList <- gatherFromList(evalCore)
avgs <- sapply(resList, mean)
stds <- sapply(resList, sd)
if (returnModel) {
modelCore <- CoreModel(formula, data, model, costMatrix, ...)
}
else {
modelCore <- list()
}
modelCore$avgs <- avgs
modelCore$stds <- stds
modelCore$evalList <- resList
modelCore
}
CoreModel <- function(formula, data, model=c("rf","rfNear","tree","knn","knnKernel","bayes","regTree"), costMatrix=NULL, ...)
{
# check formula or response index or reponse name
if (inherits(formula,"formula")) {
dat <- model.frame(formula, data=data, na.action=na.pass)
trms <- attr(dat,"terms")
attributes(trms) <- NULL
formulaExpanded <- as.formula(trms)
}
else {
if (is.numeric(formula)) {
if (formula == round(formula)) {# index of response variable
classIdx <- formula
className <- names(data)[classIdx]
}
else stop("The first argument must be a formula or prediction column name or prediction column index.")
}
else if (is.character(formula)) { # name of response variable
classIdx <- match(formula, names(data))
if (length(classIdx) != 1 || is.na(classIdx))
stop("The first argument must be a formula or prediction column name or prediction column index.")
className <- names(data[classIdx])
}
else stop("The first argument must be a formula or prediction column name or prediction column index.")
dat <- data.frame(data[, classIdx], data[, -classIdx, drop=FALSE])
names(dat)[1] <- className
# get formula explicitly to allow storage of all terms and their manipulation
frml <- paste(className, "~",paste(names(dat)[-1], sep="+",collapse="+"),sep="")
formulaExpanded <- as.formula(frml)
}
model <- match.arg(model) ;
isRegression <- model == "regTree"
if (isRegression && !is.null(costMatrix))
warning("For regression problems parameter costMatrix is ignored.");
if (!isRegression && !inherits(dat[[1]],"factor")) {
dat[[1]] <- factor(dat[[1]]);
cat("Changing dependent variable to factor with levels:",levels(dat[[1]]),"\n");
warning("Possibly this is an error caused by regression formula and classification model or vice versa.")
}
class.lev <- levels(dat[[1]]);
noClasses <- length(class.lev);
if (!isRegression) {
priorClassProb = table(dat[[1]])/nrow(dat)
avgTrainPrediction <- 0
}
else {
priorClassProb <- 0
avgTrainPrediction <- mean(dat[[1]])
}
if (!isRegression && is.null(costMatrix)) {
## create and fill uniform costs matrix
costMatrix <- 1 - diag(noClasses);
}
aux <- prepare.Data(dat, formulaExpanded, dependent=TRUE,numericAsOrdered=FALSE,skipNAcolumn=TRUE,skipEqualColumn=TRUE);
discnumvalues <- aux$discnumvalues;
discdata <- aux$discdata;
numdata <- aux$numdata;
discAttrNames <- dimnames(discdata)[[2]]
discValCompressed <- aux$disccharvalues
discValues <- aux$discValues
numAttrNames <- dimnames(numdata)[[2]]
skipNames <- dimnames(aux$skipmap)
discmap <- aux$discmap;
nummap <- aux$nummap;
skipmap <- aux$skipmap
options <- prepare.Options(...);
checkOptionsValues(options) ;
optRemain <- checkModelOptions(model, options) ;
if (length(optRemain) > 0) warning("Unused options:", paste(names(optRemain), collapse=", "));
options <- convert.Options(options)
options <- c(options, action=model)
tmp <- .C(C_buildCoreModel,
noInst = aux$noInst,
noDiscrete = ncol(discdata),
noDiscreteValues = as.integer(discnumvalues),
discreteData = as.integer(discdata), # vector of length noInst*noDiscrete, column-wise
noNumeric = ncol(numdata),
numericData = as.double(numdata), # vector of length noInst*noNumeric, column-wise
costs = as.double(costMatrix),
discAttrNames = as.character(discAttrNames),
discValNames = as.character(discValCompressed),
numAttrNames = as.character(numAttrNames),
numOptions = length(options),
optionsName = names(options),
optionsVal = options,
modelID = integer(1),
noClasses = integer(1),
NAOK=TRUE)
if (tmp$modelID == -1) {
return(NULL)
}
res <- list(modelID=tmp$modelID, class.lev=class.lev, model=model, formula=aux$formulaOut,
noClasses = tmp$noClasses, priorClassProb = priorClassProb,
avgTrainPrediction = avgTrainPrediction,
noNumeric = tmp$noNumeric, noDiscrete=tmp$noDiscrete, discAttrNames = discAttrNames,
discValNames = discValues, numAttrNames = numAttrNames,
discmap = discmap, nummap = nummap, skipmap = skipmap
)
class(res) <- "CoreModel"
res
}
predict.CoreModel <- function(object, newdata, ..., costMatrix=NULL, type=c("both","class","probability"))
{
model <- object
#rm(object)
type<-match.arg(type)
modelID <- model$modelID;
isRegression <- model$noClasses == 0
noClasses <- model$noClasses;
class.lev <- model$class.lev;
#terms <- delete.response(model$terms);
allVars <- all.vars(model$formula)
if (length(allVars)<=1) { # formula was striped to no variables probably due to equal or missing data
# thereofre predict with default classifier
if (model$model == "regTree") {
returnList <- rep(object$avgTrainPrediction, nrow(newdata))
}
else {
maxPrior <- nnet::which.is.max(object$priorClassProb)
pred <- rep(factor(class.lev[maxPrior],levels=class.lev), nrow(newdata))
prob <- matrix(object$priorClassProb, nrow=nrow(newdata), ncol=noClasses,dimnames=list(NULL,class.lev),byrow=TRUE);
if (type == "both")
returnList <- list(class=pred,probabilities=prob)
else if (type=="class")
returnList <- pred
else if (type == "probability")
returnList <- prob
}
return(returnList)
}
newFormula <- reformulate(allVars[-1])
#newdata <- as.data.frame(newdata)
#dat <- model.frame(model$formula, data=newdata, na.action=na.pass);
dat <- model.frame(newFormula, data=newdata, na.action=na.pass);
aux <- prepare.Data(dat, model$formula, dependent=FALSE,class.lev=class.lev, numericAsOrdered=FALSE,skipNAcolumn=FALSE, skipEqualColumn=FALSE);
#aux <- prepare.Data(dat[-1], model$formula, dependent=FALSE,class.lev, skipNAcolumn=FALSE, skipEqualColumn=FALSE);
noInst <- aux$noInst
discnumvalues <- aux$discnumvalues;
discdata <- aux$discdata;
numdata <- aux$numdata;
if (!isRegression && is.null(costMatrix))
costMatrix <- 1 - diag(noClasses); ## create and fill uniform costs matrix
options <- prepare.Options(...);
checkOptionsValues(options) ;
optRemain <- checkPredictOptions(model, options)
if (length(optRemain) > 0) warning("Unused options:", paste(names(optRemain), collapse=", "));
tmp <- .C(C_predictWithCoreModel,
as.integer(modelID),
noInst = noInst,
discreteData = as.integer(discdata), # vector of length noInst*noDiscrete, columnwise
numericData = as.double(numdata), # vector of length noInst*noNumeric, columnwise
costs = as.double(costMatrix),
predicted = integer(noInst),
prob = double(noInst*noClasses),
predictedReg = double(noInst),
numOptions = length(options),
optionsName = names(options),
optionsVal = options,
NAOK=TRUE)
if (model$model == "regTree") {
returnList <- tmp$predictedReg
}
else {
code <- tmp$predicted;
code[code==0] <- NA;
pred <- factor(class.lev[code],levels=class.lev);
prob <- matrix(tmp$prob, nrow=noInst, ncol=noClasses,dimnames=list(NULL,class.lev));
if (type == "both")
returnList <- list(class=pred,probabilities=prob)
else if (type=="class")
returnList <- pred
else if (type == "probability")
returnList <- prob
}
returnList
}
display<- function(x, format=c("screen","dot")) UseMethod("display", x)
display.CoreModel <- function(x, format=c("screen","dot")) {
format<-match.arg(format)
if (x$model %in% c("tree","knn","knnKernel","bayes","regTree")){
if (format=="screen")
treeStr <- .Call(C_printTree2R, as.integer(x$modelID))
else if (format == "dot")
treeStr <- .Call(C_printTreeDot2R, as.integer(x$modelID))
}
else {
warning("The model provided is not of appropriate type for this visualization.");
treeStr <- ""
}
cat(treeStr)
invisible(treeStr)
}
plot.CoreModel<-function(x, trainSet, rfGraphType=c("attrEval","outliers","scaling", "prototypes","attrEvalCluster"), clustering=NULL,...)
{
rfGraphType<-match.arg(rfGraphType)
# regression or decision tree
if (x$model == "regTree" || x$model == "tree"){
rmodel <- getRpartModel(x, trainSet) ;
#plot(rmodel) # ,compress=T,branch=0.5);
#text(rmodel) # , pretty=0);
rpart.plot(rmodel,roundint=FALSE,...)
}
else if (x$model == "rf" || x$model == "rfNear"){
if (rfGraphType == "attrEval") {
imp<-rfAttrEval(x);
plotRFStats(imp, plotLine=TRUE, myAxes=all.vars(x$formula)[-1]);
}
else if (rfGraphType == "attrEvalCluster") {
#importance by cluster
impc<-rfAttrEvalClustering(x, trainSet, clustering);
plotRFMulti(impc$imp, impc$levels, myAxes=all.vars(x$formula)[-1]);
}
else if (rfGraphType == "outliers"){
out<-rfOutliers(x, trainSet);
plotRFStats(abs(out), cluster=as.character(trainSet[[all.vars(x$formula)[1]]]));
}
else if (rfGraphType == "scaling"){
dis<-rfProximity(x, outProximity=F);
#get 4 most important components
space<-spaceScale(dis, 4);
# display 1. in 2. component
subDim<-c(space$points[,1], space$points[,2]);
dim(subDim)<-c(length(space$points[,1]),2);
className <- all.vars(x$formula)[1];
cluster<-trainSet[as.character(className)];
plotRFStats(subDim, t(cluster));
}
else if (rfGraphType == "prototypes"){
# 10 most typical cases for each class based on predicted class probability
best<-classPrototypes(x, trainSet, 10);
vnorm<-varNormalization(x, trainSet[best$prototypes,]);
plotRFNorm(vnorm, best$cluster, best$levels, 0.15, myHoriz=TRUE, myAxes=all.vars(x$formula)[-1]);
}
}
else {
warning("The model provided has no visualization.");
}
invisible(x)
}
attrEval <- function(formula, data, estimator, costMatrix = NULL, outputNumericSplits=FALSE, ...)
{
## find the index of estimator
isRegression <- FALSE ;
estDsc <- infoCore(what="attrEval");
estIndex <- match(estimator, estDsc, nomatch=-1);
if (estIndex == -1) {
estDscReg <- infoCore(what="attrEvalReg");
estIndex <- match(estimator, estDscReg, nomatch=-1);
if (estIndex == -1)
stop("Invalid estimator parameter")
else
isRegression = TRUE ;
}
# check formula or response index or reponse name
if (inherits(formula,"formula")) {
dat <- model.frame(formula, data=data, na.action=na.pass)
trms <- attr(dat,"terms")
attributes(trms) <- NULL
formulaExpanded <- as.formula(trms)
}
else {
if (is.numeric(formula)) {
if (formula == round(formula)) {# index of response variable
classIdx <- formula
className <- names(data)[classIdx]
}
else stop("The first argument must be a formula or prediction column name or prediction column index.")
}
else if (is.character(formula)) { # name of response variable
classIdx <- match(formula, names(data))
if (length(classIdx) != 1 || is.na(classIdx))
stop("The first argument must be a formula or prediction column name or prediction column index.")
className <- names(data[classIdx])
}
else stop("The first argument must be a formula or prediction column name or prediction column index.")
dat <- data.frame(data[, classIdx], data[, -classIdx, drop=FALSE])
names(dat)[1] <- className
# get formula explicitly to allow storage of all terms and their manipulation
frml <- paste(className, "~",paste(names(dat)[-1], sep="+",collapse="+"),sep="")
formulaExpanded <- as.formula(frml)
}
if (!isRegression && !inherits(dat[[1]],"factor")) {
dat[[1]] <- factor(dat[[1]]);
cat("Changing dependent variable to factor with levels:",levels(dat[[1]]),"\n");
warning("Possibly this is an error caused by regression formula and classification attribute estimator or vice versa.")
}
if (!isRegression && is.null(costMatrix)) {
class.lev <- levels(dat[[1]]);
noClasses <- length(class.lev)
# create and fill uniform costs matrix
costMatrix <- 1 - diag(noClasses);
}
aux <- prepare.Data(dat,formulaExpanded,dependent=TRUE,numericAsOrdered=FALSE,skipNAcolumn=TRUE,skipEqualColumn=FALSE);
discnumvalues <- aux$discnumvalues;
discdata <- aux$discdata;
discmap <- aux$discmap;
numdata <- aux$numdata;
nummap <- aux$nummap;
skipmap<-aux$skipmap
discAttrNames <- dimnames(discdata)[[2]]
discValCompressed <- aux$disccharvalues
discValues <- aux$discValues
numAttrNames <- dimnames(numdata)[[2]]
options <- prepare.Options(...);
#check solaris which cannot handle openMP code and force it to use a single thread
# versionStr <- paste(version,sep="",collapse="")
# if (grepl("sun",versionStr,fixed=T) || grepl("solaris",versionStr,fixed=TRUE)) {
# options[length(options)+1] <- as.character(1)
# names(options)[length(options)] <- "maxThreads"
# }
checkOptionsValues(options) ;
optRemain <- checkEstimatorOptions(estimator, options, isRegression) ;
if (length(optRemain) > 0) warning("Unused options:", paste(names(optRemain), collapse=", "));
if (isRegression) {
tmp <- .C(C_estimateCoreReg,
noInst= aux$noInst,
noDiscrete = ncol(discdata),
noDiscreteValues = as.integer(discnumvalues),
discreteData = as.integer(discdata), # vector of length noInst*noDiscrete, columnwise
noNumeric = ncol(numdata),
numericData = as.double(numdata), # vector of length noInst*noNumeric, columnwise
discAttrNames = as.character(discAttrNames),
discValNames = as.character(discValCompressed),
numAttrNames = as.character(numAttrNames),
numOptions = length(options),
optionsName = names(options),
optionsVal = options,
selEst = estIndex,
estDisc = double(ncol(discdata)),
estNum = double(ncol(numdata)),
splitPointNum = double(ncol(numdata)),
NAOK=TRUE);
# assumes length(estNum) == noNumeric, but estNum[1] for predictor is not used
if (nummap[1] != 1) stop("no dependent variable in prepared regression data");
}
else {
tmp <- .C(C_estimateCore,
noInst = aux$noInst,
noDiscrete = ncol(discdata),
noDiscreteValues = as.integer(discnumvalues),
discreteData = as.integer(discdata), # vector of length noInst*noDiscrete, columnwise
noNumeric = ncol(numdata),
numericData = as.double(numdata), # vector of length noInst*noNumeric, columnwise
costs = as.double(costMatrix),
discAttrNames = as.character(discAttrNames),
discValNames = as.character(discValCompressed),
numAttrNames = as.character(numAttrNames),
numOptions = length(options),
optionsName = names(options),
optionsVal = options,
selEst = estIndex,
estDisc = double(ncol(discdata)),
estNum = double(ncol(numdata)),
splitPointNum = double(ncol(numdata)),
NAOK=TRUE);
# assumes length(estDisc) == noDiscrete, but estDist[1] for class is not used
if (discmap[1] != 1) stop("no class in prepared data"); # for debugging only
}
est <- double(length(discmap) + length(nummap)+length(skipmap));
est[discmap] <- tmp$estDisc;
est[nummap] <- tmp$estNum;
names(est)[discmap] <- discAttrNames
names(est)[nummap] <- numAttrNames
if (outputNumericSplits) {
sp <- double(length(discmap) + length(nummap)+length(skipmap));
sp[nummap] <- tmp$splitPointNum
names(sp)[nummap] <- numAttrNames
return( list(attrEval=est[-c(1,skipmap)], splitPointNum=sp[nummap][-1]))
}
else { # output only feature evaluations
return(est[-c(1,skipmap)])
}
}
rfAttrEval <- function(model) {
if (! model$model %in% c("rf","rfNear") ) stop("Only random forest model can evaluate attributes with this function.");
modelID <- model$modelID
tmp <- .C(C_rfAttrEval,
modelID = as.integer(modelID),
est = double(model$noDiscrete+model$noNumeric))
est <- double(length(model$discmap) + length(model$nummap)+length(model$skipmap));
est[model$discmap] <- tmp$est[1:length(model$discmap)];
est[model$nummap] <- tmp$est[(length(model$discmap)+1) : (length(model$discmap) + length(model$nummap)) ];
names(est)[model$discmap] <- model$discAttrNames
names(est)[model$nummap] <- model$numAttrNames
est[-c(1,model$skipmap)];
}
rfOOB <- function(model) {
if (! model$model %in% c("rf","rfNear") )
stop("Only random forest models can output out of bag performance estimators. Current model is of type ", model$model);
modelID <- model$modelID
tmp <- .C(C_rfOOB,
modelID = as.integer(modelID),
oobAccuracy = double(1),
oobMargin = double(1),
oobCorrelation = double(1))
res<-list(accuracy=tmp$oobAccuracy, margin=tmp$oobMargin, correlation=tmp$oobCorrelation)
return(res)
}
ordEval <- function(formula, data, file=NULL, rndFile=NULL, variant=c("allNear","attrDist1","classDist1"), ...)
{
# check formula or response index or reponse name
if (inherits(formula,"formula")) {
dat <- model.frame(formula, data=data, na.action=na.pass)
trms <- attr(dat,"terms")
attributes(trms) <- NULL
formulaExpanded <- as.formula(trms)
}
else {
if (is.numeric(formula)) {
if (formula == round(formula)) {# index of response variable
classIdx <- formula
className <- names(data)[classIdx]
}
else stop("The first argument must be a formula or prediction column name or prediction column index.")
}
else if (is.character(formula)) { # name of response variable
classIdx <- match(formula, names(data))
if (length(classIdx) != 1 || is.na(classIdx))
stop("The first argument must be a formula or prediction column name or prediction column index.")
className <- names(data[classIdx])
}
else stop("The first argument must be a formula or prediction column name or prediction column index.")
dat <- data.frame(data[, classIdx], data[, -classIdx, drop=FALSE])
names(dat)[1] <- className
# get formula explicitly to allow storage of all terms and their manipulation
frml <- paste(className, "~",paste(names(dat)[-1], sep="+",collapse="+"),sep="")
formulaExpanded <- as.formula(frml)
}
variant <- match.arg(variant)
variantIdx=match(variant,eval(formals()$variant),nomatch=-1)
if (!inherits(dat[[1]],"factor")) {
dat[[1]] <- factor(dat[[1]]);
}
class.lev <- levels(dat[[1]]);
aux <- prepare.Data(dat, formulaExpanded,dependent=TRUE, numericAsOrdered=TRUE,skipNAcolumn=TRUE,skipEqualColumn=TRUE);
discnumvalues <- aux$discnumvalues;
discdata <- aux$discdata;
discmap <- aux$discmap;
discAttrNames <- dimnames(discdata)[[2]]
discValNames <- aux$disccharvalues
options <- prepare.Options(...);
checkOptionsValues(options) ;
optRemain <- checkOrdEvalOptions(options)
if (length(optRemain) > 0) warning("Unused options:", paste(names(optRemain), collapse=", "));
noAttr <- ncol(discdata) - 1
maxAttrValues <- max(discnumvalues[-1])+1
statNames<-getStatNames() ;
noStats <- length(statNames) ## we get 8 statistics about random normalizers
tmp <- .C(C_ordEvalCore,
noInst = aux$noInst,
noDiscrete = ncol(discdata),
noDiscreteValues = as.integer(discnumvalues),
discreteData = as.integer(discdata), # vector of length noInst*noDiscrete, columnwise
discAttrNames = as.character(discAttrNames),
discValNames = as.character(discValNames),
numOptions = length(options),
optionsName = names(options),
optionsVal = options,
reinfPos = double(noAttr * maxAttrValues),
reinfNeg = double(noAttr * maxAttrValues),
anchor = double(noAttr * maxAttrValues),
rndReinfPos = double(noAttr * maxAttrValues * noStats),
rndReinfNeg = double(noAttr * maxAttrValues * noStats),
rndAnchor = double(noAttr * maxAttrValues * noStats),
noAV = integer(noAttr * maxAttrValues),
file = as.character(file),
rndFile = as.character(rndFile),
variant = as.integer(variantIdx),
NAOK=TRUE)
attrNames <- names(dat)[-1]
attrMap <- (discmap[-1]) - 1
attrMapLen <- length(attrMap)
avNames <- c(1:(maxAttrValues-1),"all")
avMap <- 1:(maxAttrValues-1)
avMapLen <- length(avMap)
reinfPos <- matrix(tmp$reinfPos, nrow=noAttr, ncol=maxAttrValues,dimnames=list(attrNames,avNames));
reinfNeg <- matrix(tmp$reinfNeg, nrow=noAttr, ncol=maxAttrValues,dimnames=list(attrNames,avNames));
anchor <- matrix(tmp$anchor, nrow=noAttr, ncol=maxAttrValues,dimnames=list(attrNames,avNames));
noAV <- matrix(tmp$noAV, nrow=noAttr, ncol=maxAttrValues,dimnames=list(attrNames,avNames));
rndReinfPos <- array(tmp$rndReinfPos, dim=c(noAttr, maxAttrValues,noStats), dimnames=list(attrNames, avNames, statNames));
rndReinfNeg <- array(tmp$rndReinfNeg, dim=c(noAttr, maxAttrValues,noStats), dimnames=list(attrNames, avNames, statNames)) ;
rndAnchor <- array(tmp$rndAnchor, dim=c(noAttr, maxAttrValues,noStats), dimnames=list(attrNames, avNames, statNames));
rndReinfPosAttr=matrix(rndReinfPos[attrMap,maxAttrValues,],nrow=noAttr, ncol=noStats,dimnames=list(attrNames,statNames))
rndReinfNegAttr=matrix(rndReinfNeg[attrMap,maxAttrValues,],nrow=noAttr, ncol=noStats,dimnames=list(attrNames,statNames))
rndAnchorAttr=matrix(rndAnchor[attrMap,maxAttrValues,],nrow=noAttr, ncol=noStats,dimnames=list(attrNames,statNames))
res<-list(reinfPosAV=reinfPos[attrMap,avMap, drop=FALSE],
reinfNegAV=reinfNeg[attrMap,avMap, drop=FALSE],
anchorAV=anchor[attrMap,avMap, drop=FALSE],
noAV = noAV[attrMap,avMap, drop=FALSE],
reinfPosAttr=reinfPos[attrMap,maxAttrValues, drop=FALSE],
reinfNegAttr=reinfNeg[attrMap,maxAttrValues, drop=FALSE],
anchorAttr=anchor[attrMap,maxAttrValues, drop=FALSE],
noAVattr = noAV[attrMap,maxAttrValues, drop=FALSE],
rndReinfPosAV=rndReinfPos[attrMap,avMap, , drop=FALSE],
rndReinfNegAV=rndReinfNeg[attrMap,avMap, , drop=FALSE],
rndAnchorAV=rndAnchor[attrMap,avMap, , drop=FALSE],
rndReinfPosAttr=rndReinfPosAttr,
rndReinfNegAttr=rndReinfNegAttr,
rndAnchorAttr=rndAnchorAttr,
attrNames= attrNames,
valueNames=aux$discValues[-1],
noAttr=length(attrNames),
ordVal=maxAttrValues-1,
variant=variant,
file=file,
rndFile=rndFile,
formula=aux$formulaOut
);
class(res) <- "ordEval"
return(res)
}
plotInstEval<-function(oeInstFile, oeInstRndFile, noAttr, ...) {
inst<-read.table(oeInstFile,header=FALSE,sep=",",colClasses="character",strip.white=TRUE,na.strings=c("NA","?"))
instNorm<-read.table(oeInstRndFile,header=FALSE,sep=",",colClasses="character",strip.white=TRUE,na.strings=c("NA","?"))
#noAttr <- length(ordEvalData$noAVattr)
#ordVal <- ncol(ordEvalData$reinfPosAV)
statNames<-getStatNames() ;
noStats <- length(statNames)
ord<-list()
noInst <- nrow(inst)/(noAttr+1)
for (i in 1:noInst) {
className <- as.character(trimSpaces(inst[(i-1)*(noAttr+1)+1, 1]))
classValue <- as.character(trimSpaces(inst[(i-1)*(noAttr+1)+1, 2]))
attrName <- as.character(trimSpaces(inst[((i-1)*(noAttr+1)+2):(i*(noAttr+1)), 1]))
valueName <- as.character(trimSpaces(inst[((i-1)*(noAttr+1)+2):(i*(noAttr+1)), 2]))
reinfPos <- as.numeric(inst[((i-1)*(noAttr+1)+2):(i*(noAttr+1)), 3])
reinfNeg <- as.numeric(inst[((i-1)*(noAttr+1)+2):(i*(noAttr+1)), 4])
anchor <- as.numeric(inst[((i-1)*(noAttr+1)+2):(i*(noAttr+1)), 5])
rndReinfPos<-list()
rndReinfNeg<-list()
rndAnchor<-list()
for (iA in 1:noAttr){
rndReinfPos[[iA]]<-as.numeric(instNorm[(i-1)*(noAttr+1)+1+iA,2:(1+noStats)])
rndReinfNeg[[iA]]<-as.numeric(instNorm[(i-1)*(noAttr+1)+1+iA,(2+noStats):(1+2*noStats)])
rndAnchor[[iA]]<-as.numeric(instNorm[(i-1)*(noAttr+1)+1+iA,(2+2*noStats):(1+3*noStats)])
}
ord[[i]]<-list(className=className,classValue=classValue,attributeName=attrName,valueName=valueName,reinfPos=reinfPos,reinfNeg=reinfNeg,anchor=anchor,
rndReinfPos=rndReinfPos,rndReinfNeg=rndReinfNeg,rndAnchor=rndAnchor)
}
oeInst(ord, noAttr, ...)
}
plotOrdEval<-function(file, rndFile=NULL, ...){
# read data from files and transform the two tables to internal object as returned by ordEval
ord<-read.table(file,header=TRUE,sep=",",strip.white=TRUE)
if (!is.null(rndFile))
ordNorm<-read.table(rndFile,header=TRUE,sep=",",strip.white=TRUE)
## extract number of attributes and values from first column
name <- ord[,1]
dup <- duplicated(name)
for (i in 2:length(dup))
if (dup[i])
break ;
ordVal <- i-3
noAttr <- length(unique(name)) - ordVal
statNames<-getStatNames()
noStats <- length(statNames) ## we get 8 statistics about random normalizers
attrNames <- c()
avNames <- c(1:ordVal)
for (iA in 1:noAttr) {
attrNames[iA] <- as.character(ord[(iA-1)*(ordVal+1)+1,1])
}
reinfPosAV <- matrix(0, nrow=noAttr, ncol=ordVal,dimnames=list(attrNames,avNames));
reinfNegAV <- matrix(0, nrow=noAttr, ncol=ordVal,dimnames=list(attrNames,avNames));
anchorAV <- matrix(0, nrow=noAttr, ncol=ordVal,dimnames=list(attrNames,avNames));
noAV <- matrix(0, nrow=noAttr, ncol=ordVal,dimnames=list(attrNames,avNames));
reinfPosAttr <- array(0,dim=c(noAttr),dimnames=list(attrNames));
reinfNegAttr <- array(0, dim=c(noAttr),dimnames=list(attrNames));
anchorAttr <- array(0, dim=c(noAttr),dimnames=list(attrNames));
noAVattr <- array(0, dim=c(noAttr),dimnames=list(attrNames));
rndReinfPosAV <- array(0, dim=c(noAttr, ordVal,noStats), dimnames=list(attrNames, avNames, statNames));
rndReinfNegAV <- array(0, dim=c(noAttr, ordVal,noStats), dimnames=list(attrNames, avNames, statNames)) ;
rndAnchorAV <- array(0, dim=c(noAttr, ordVal,noStats), dimnames=list(attrNames, avNames, statNames));
rndReinfPosAttr <- array(0, dim=c(noAttr, noStats), dimnames=list(attrNames, statNames));
rndReinfNegAttr <- array(0, dim=c(noAttr, noStats), dimnames=list(attrNames, statNames)) ;
rndAnchorAttr <- array(0, dim=c(noAttr,noStats), dimnames=list(attrNames, statNames));
valueNames<-list()
for (iA in 1:noAttr) {
#attrNames[iA] <- ord[(iA-1)*(ordVal+1)+1,1]
valueNames[[iA]] <- as.character(ord[(2+(iA-1)*(ordVal+1)):(iA*(ordVal+1)),1])
noAV[iA,] <- ord[(2+(iA-1)*(ordVal+1)):(iA*(ordVal+1)),5]
for(i in 1:ordVal) {
reinfPosAV[iA,i] <- ord[(iA - 1) * (ordVal + 1) + i + 1, 2]
reinfNegAV[iA,i] <- ord[(iA - 1) * (ordVal + 1) + i + 1, 3]
anchorAV[iA,i] <- ord[(iA - 1) * (ordVal + 1) + i + 1, 4]
if (!is.null(rndFile)) {
rndReinfPosAV[iA,i,] <- as.numeric(ordNorm[(iA - 1) * (ordVal + 1) + i + 1, 2:(noStats+1)])
rndReinfNegAV[iA,i,] <- as.numeric(ordNorm[(iA - 1) * (ordVal + 1) + i + 1, (2+noStats):(1+2*noStats)])
rndAnchorAV[iA,i,] <- as.numeric(ordNorm[(iA - 1) * (ordVal + 1) + i + 1, (2+2*noStats):(1+3*noStats)])
}
}
i <- 0
noAVattr[iA] <- ord[(1+(iA-1)*(ordVal+1)),5]
reinfPosAttr[iA] <- ord[(iA - 1) * (ordVal + 1) + i + 1, 2]
reinfNegAttr[iA] <- ord[(iA - 1) * (ordVal + 1) + i + 1, 3]
anchorAttr[iA] <- ord[(iA - 1) * (ordVal + 1) + i + 1, 4]
if (!is.null(rndFile)) {
rndReinfPosAttr[iA,] <- as.numeric(ordNorm[(iA - 1) * (ordVal + 1) + i + 1, 2:(noStats+1)])
rndReinfNegAttr[iA,] <- as.numeric(ordNorm[(iA - 1) * (ordVal + 1) + i + 1, (2+noStats):(1+2*noStats)])
rndAnchorAttr[iA,] <- as.numeric(ordNorm[(iA - 1) * (ordVal + 1) + i + 1, (2+2*noStats):(1+3*noStats)])
}
}
oeObj <- list(reinfPosAV=reinfPosAV, reinfNegAV=reinfNegAV, anchorAV=anchorAV, noAV = noAV,
reinfPosAttr=reinfPosAttr, reinfNegAttr=reinfNegAttr, anchorAttr=anchorAttr, noAVattr = noAVattr,
rndReinfPosAV=rndReinfPosAV, rndReinfNegAV=rndReinfNegAV, rndAnchorAV=rndAnchorAV,
rndReinfPosAttr=rndReinfPosAttr, rndReinfNegAttr=rndReinfNegAttr, rndAnchorAttr=rndAnchorAttr,
attrNames= attrNames, valueNames=valueNames, noAttr=length(attrNames),ordVal=ordVal,variant=NULL,file=file, rndFile=rndFile
)
class(oeObj) <- "ordEval"
plot(oeObj,...) ## call of plot.ordEval
}
plot.ordEval<-function(x, graphType=c("avBar", "attrBar", "avSlope"), ...) {
graphType<-match.arg(graphType)
if (graphType=="avSlope")
avSlopeObject(x, ...)
else if (graphType=="avBar" )
avNormBarObject(x, ...)
else if (graphType=="attrBar")
attrNormBarObject(x, ...)
invisible(x)
}
printOrdEval<-function(x) {
object <- x
maxAttrChars <- max(nchar(c(object$attrNames,"Attribute")))
maxAVChars <- max(nchar(c(unlist(object$valueNames),"Value")))
header <- paste(sprintf("%*s %*s",maxAttrChars,"Attribute",maxAVChars,"Value"),
sprintf("%6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s","Down","Down_p","Down_l","Down_h","Up","Up_p","Up_l","Up_h","Anchor","Anch_p","Anch_l","Anch_h")
,sep=" ")
cat(header,"\n")
for (a in 1:object$noAttr) {
line <- paste(sprintf("%*s %*s",maxAttrChars,object$attrNames[a],maxAVChars,"all"),
sprintf("%6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %6.4f",
object$reinfPosAttr[a],
object$rndReinfPosAttr[a,"p-value"],
object$rndReinfPosAttr[a,"lowPercentile"],
object$rndReinfPosAttr[a,"highPercentile"],
object$reinfNegAttr[a],
object$rndReinfNegAttr[a,"p-value"],
object$rndReinfNegAttr[a,"lowPercentile"],
object$rndReinfNegAttr[a,"highPercentile"],
object$anchorAttr[a],
object$rndAnchorAttr[a,"p-value"],
object$rndAnchorAttr[a,"lowPercentile"],
object$rndAnchorAttr[a,"highPercentile"]),
sep=" ") ;
cat(line,"\n")
for (v in 1:object$ordVal){
line <- paste(sprintf("%*s %*s",maxAttrChars," ",maxAVChars,object$valueNames[[a]][v]),
sprintf("%6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %6.4f",
object$reinfPosAV[a,v],
object$rndReinfPosAV[a,v,"p-value"],
object$rndReinfPosAV[a,v,"lowPercentile"],
object$rndReinfPosAV[a,v,"highPercentile"],
object$reinfNegAV[a,v],
object$rndReinfNegAV[a,v,"p-value"],
object$rndReinfNegAV[a,v,"lowPercentile"],
object$rndReinfNegAV[a,v,"highPercentile"],
object$anchorAV[a,v],
object$rndAnchorAV[a,v,"p-value"],
object$rndAnchorAV[a,v,"lowPercentile"],
object$rndAnchorAV[a,v,"highPercentile"]),
sep=" ")
cat(line,"\n")
}
}
}
modelEval <- function(model=NULL, correctClass, predictedClass, predictedProb=NULL, costMatrix=NULL, priorClProb = NULL, avgTrainPrediction = NULL, beta=1) {
if (is.null(predictedClass) && is.null(predictedProb)) {
warning("Only one of the predictedClass and predictedProb parameters can be NULL")
return(NULL) ;
}
if (is.null(model)) {
if (!is.null(avgTrainPrediction)) {
return(modelEvaluationReg.Core(correctClass,predictedClass,avgTrainPrediction))
}
else {
if (is.null(predictedClass))
predictedClass <- levels(correctClass)[apply(predictedProb, 1, which.max)]
return(modelEvaluationClass.Core(correctClass,predictedClass,predictedProb,costMatrix,priorClProb,beta))
}
}
if ( !("CoreModel" %in% class(model)) ){
warning("Only models of type CoreModel can be evaluated with this type of call. Others shall supply NULL for parameter model, and provide value of avgTrainPrediction in case of regression.")
return(NULL) ;
}
if (model$model == "regTree") {
if (is.null(avgTrainPrediction))
avgTrainPrediction <- model$avgTrainPrediction
return(modelEvaluationReg.Core(correctClass,predictedClass,model$avgTrainPrediction))
}
else {
if (is.null(priorClProb))
priorClProb <- model$priorClassProb
if (is.null(predictedClass))
predictedClass <- model$class.lev[apply(predictedProb, 1, which.max)]
return(modelEvaluationClass.Core(correctClass,predictedClass,predictedProb,costMatrix,priorClProb,beta))
}
}
modelEvaluationClass.Core <- function(correctClass, predictedClass, predictedProb=NULL, costMatrix=NULL, priorClassProb=NULL, beta=1) {
# common vector of levels
if (inherits(correctClass,"factor")) {
levelsCorrect<-levels(correctClass)
} else {
levelsCorrect<-sort(unique(correctClass))
}
if (inherits(predictedClass,"factor")) {
levelsPredicted<-levels(predictedClass)
} else {
levelsPredicted<-sort(unique(predictedClass))
}
levelsBoth<-union(levelsCorrect,levelsPredicted)
# some data validity checks
correctClass<-factor(correctClass,levels=levelsBoth)
if (any(is.na(correctClass)))
stop("Correct class should not contain NA values.")
noClasses <- length(levelsBoth)
predictedClass<-factor(predictedClass,levels=levelsBoth)
if (any(is.na(predictedClass)))
stop("Predicted class should not contain NA values.")
noInst <- length(correctClass)
if (is.null(predictedProb)){
## create and fill the prediction matrix
predictedProb <- matrix(0, nrow=noInst, ncol=noClasses)
for (i in 1:noInst)
predictedProb[i, predictedClass[i]] <- 1
}
if (is.null(costMatrix)) {
## create and fill uniform costs matrix
costMatrix <- 1 - diag(noClasses)
}
if (is.null(priorClassProb))
priorClassProb <- table(correctClass)/noInst
tmp <- .C(C_modelEvaluate,
noInst = length(correctClass),
correctClass = as.integer(correctClass),
# predictedClass = as.integer(predictedClass), # computed from predictedProb and CostMatrix
predictedProb = as.double(predictedProb),
costMatrix = as.double(costMatrix),
noClasses = as.integer(noClasses),
priorClassProb = as.double(priorClassProb),
accuracy = double(1),
avgCost = double(1),
infScore = double(1),
auc = double(1),
predMatrix = integer(noClasses * noClasses),
sensitivity = double(1),
specificity = double(1),
brier = double(1),
kappa = double(1),
precision = double(1),
Gmean = double(1),
KS = double(1),
TPR = double(1),
FPR = double(1),
NAOK=TRUE)
recall = tmp$sensitivity
denominator <- (beta*beta * recall + tmp$precision)
if (denominator == 0)
Fmeasure <- 0
else
Fmeasure = (1+beta*beta)*recall*tmp$precision / denominator
predMx <- matrix(tmp$predMatrix, nrow = noClasses, ncol=noClasses, dimnames = list(levels(correctClass),levels(correctClass)))
list(accuracy = tmp$accuracy, averageCost = tmp$avgCost, informationScore = tmp$infScore,
AUC = tmp$auc, predictionMatrix = predMx, sensitivity = tmp$sensitivity,
specificity = tmp$specificity, brierScore = tmp$brier, kappa = tmp$kappa,
precision = tmp$precision, recall = tmp$sensitivity, Fmeasure = Fmeasure,
Gmean = tmp$Gmean, KS = tmp$KS, TPR = tmp$TPR, FPR = tmp$FPR)
}
modelEvaluationReg.Core <- function(correct, predicted, avgTrainPredicted) {
noInst <- length(correct) ;
tmp <- .C(C_modelEvaluateReg,
noInst = length(correct),
correct = as.double(correct),
predicted = as.double(predicted),
avgPredicted = as.double(avgTrainPredicted),
MSE = double(1),
RMSE = double(1),
MAE = double(1),
RMAE = double(1),
NAOK=TRUE)
list(MSE = tmp$MSE, RMSE = tmp$RMSE, MAE = tmp$MAE, RMAE = tmp$RMAE)
}
paramCoreIO <- function(model, fileName, io=c("read","write")) {
io = match.arg(io)
tmp <- .C(C_optionsInOut,
modelID = as.integer(model$modelID),
fileName=as.character(fileName),
io=as.character(io),
NAOK=FALSE)
invisible(tmp)
}
saveRF <- function(model, fileName) {
if (model$model != "rf") stop("Only random forest model can be saved at the moment.");
modelID <- model$modelID
tmp <- .C(C_saveRF,
modelID = as.integer(modelID),
fileName=as.character(fileName))
save(model,file=paste(fileName,".Rda",sep=""))
invisible(tmp)
}
loadRF <- function(fileName) {
# model="rf"
# # check formula or response index or reponse name
# if (inherits(formula,"formula")) {
# dat <- model.frame(formula, data=data, na.action=na.pass)
# trms <- attr(dat,"terms")
# attributes(trms) <- NULL
# formulaExpanded <- as.formula(trms)
# }
# else {
# if (is.numeric(formula)) {
# if (formula == round(formula)) {# index of response variable
# classIdx <- formula
# className <- names(data)[classIdx]
# }
# else stop("The first argument must be a formula or prediction column name or prediction column index.")
# }
# else if (is.character(formula)) { # name of response variable
# classIdx <- match(formula, names(data))
# if (length(classIdx) != 1 || is.na(classIdx))
# stop("The first argument must be a formula or prediction column name or prediction column index.")
# className <- names(data[classIdx])
# }
# else stop("The first argument must be a formula or prediction column name or prediction column index.")
#
# dat <- data.frame(data[, classIdx], data[, -classIdx, drop=FALSE])
# names(dat)[1] <- className
# # get formula explicitly to allow storage of all terms and their manipulation
# frml <- paste(className, "~",paste(names(dat)[-1], sep="+",collapse="+"),sep="")
# formulaExpanded <- as.formula(frml)
# }
# if (!inherits(dat[[1]],"factor")) {
# dat[[1]] <- factor(dat[[1]]);
# cat("Changing dependent variable to factor with levels:",levels(dat[[1]]),"\n");
# }
# class.lev <- levels(dat[[1]]);
# noClasses <- length(class.lev);
load(file=paste(fileName,".Rda",sep="")) #loads object with name model
tmp <- .C(C_readRF,
fileName=as.character(fileName),
modelID = integer(1))
if (tmp$modelID == -1) {
return(NULL)
}
model$modelID <- tmp$modelID
#res <- list(modelID=tmp$modelID, class.lev=class.lev, model=model, formula=formula, noClasses = noClasses)
#class(res) <- "CoreModel"
#res
model
}
getRFsizes <- function(model, type=c("size", "sumdepth")) {
if (model$model != "rf") stop("The model must be a random forest.");
type <- match.arg(type)
switch(type,
size=.Call(C_exportSizesRF, as.integer(model$modelID)),
sumdepth=.Call(C_exportSumOverLeavesRF, as.integer(model$modelID)))
}
getCoreModel <- function(model) {
if (model$model != "rf") stop("The model must be a random forest.");
.Call(C_exportModel, as.integer(model$modelID))
}
calibrate <- function(correctClass, predictedProb, class1=1, method = c("isoReg","binIsoReg","binning","mdlMerge"),
weight=NULL,noBins=10, assumeProbabilities=FALSE){
noClasses <- length(levels(correctClass)) ;
method <- match.arg(method)
methodIdx = match(method, eval(formals()$method), nomatch=-1)
if (assumeProbabilities==TRUE && any(predictedProb >1.0 | predictedProb<0))
stop("Predicted probabilities in predictedValues are expected to be in [0,1] range.")
noInst <- length(correctClass) ;
if (is.null(weight)) {
weight<-numeric(noInst)
weight[]<-1
}
# class1 can be either class name (factor) or its index
if (is.factor(class1))
class1idx<-match(class1, levels(correctClass),nomatch=-1)
else {
class1idx<-class1
class1<-factor(levels(correctClass)[class1idx],levels=levels(correctClass))
}
# convert true class to a vector of 0 and 1
tc<-integer(length(correctClass))
tc[]<-0
tc[correctClass==class1]<-1
tmp <- .C(C_calibrate,
methodIdx = as.integer(methodIdx),
noInst = as.integer(noInst),
correctClass = as.integer(tc),
predictedProb = as.double(predictedProb),
weight=as.double(weight),
noBins=as.integer(noBins),
noIntervals = integer(1),
interval = double(noInst),
calProb = double(noInst),
NAOK=TRUE)
if (assumeProbabilities == TRUE)
tmp$interval[tmp$noIntervals] <- 1 # set sentinel for probabilities
else tmp$interval[tmp$noIntervals] <- Inf
list(interval = tmp$interval[1:tmp$noIntervals], calProb = tmp$calProb[1:tmp$noIntervals])
}
applyCalibration <- function(predictedProb, calibration) {
if (is.null(calibration))
return(predictedProb)
calIntervals <- findInterval(predictedProb, calibration$interval)
calProbs <- calibration$calProb[calIntervals+1]
return(calProbs)
}
applyDiscretization <- function(data, boundsList, noDecimalsInValueName=2) {
if (is.null(boundsList))
return(data)
for (i in 1:length(boundsList)) {
noDecimals <- noDecimalsInValueName
attrName <- names(boundsList)[i]
if (length(boundsList[[i]]) == 1 && is.na(boundsList[[i]])) {
data[,attrName] <- factor(NA)
}
else {
discValues <- apply( outer(data[,attrName], boundsList[[i]], ">"), 1, sum)
data[,attrName] <- factor(discValues, levels=0:length(boundsList[[i]]))
repeat {
levels(data[,attrName]) <- intervalNames(boundsList[[i]], noDecimals)
if (length(unique(levels(data[,attrName])))==length(boundsList[[i]])+1)
break
else
noDecimals <- noDecimals +1
}
}
}
data
}
discretize <- function(formula, data, method=c("greedy", "equalFrequency", "equalWidth"), estimator=NULL,
discretizationLookahead=3,discretizationSample=0, maxBins=0, equalDiscBins=4, ...)
{
method <- match.arg(method)
methodIdx = match(method,as.vector(formals()$"method",mode="character")[-1],nomatch=-1)
isRegression <- NULL
# check formula or response index or reponse name
if (inherits(formula,"formula")) {
dat <- model.frame(formula, data=data, na.action=na.pass)
trms <- attr(dat,"terms")
attributes(trms) <- NULL
formulaExpanded <- as.formula(trms)
}
else {
if (is.numeric(formula)) {
if (formula == round(formula)) {# index of response variable
classIdx <- formula
className <- names(data)[classIdx]
}
else stop("The first argument must be a formula or prediction column name or prediction column index.")
}
else if (is.character(formula)) { # name of response variable
classIdx <- match(formula, names(data))
if (length(classIdx) != 1 || is.na(classIdx))
stop("The first argument must be a formula or prediction column name or prediction column index.")
className <- names(data[classIdx])
}
else stop("The first argument must be a formula or prediction column name or prediction column index.")
dat <- data.frame(data[, classIdx], data[, -classIdx, drop=FALSE])
names(dat)[1] <- className
# get formula explicitly to allow storage of all terms and their manipulation
frml <- paste(className, "~",paste(names(dat)[-1], sep="+",collapse="+"),sep="")
formulaExpanded <- as.formula(frml)
}
## find the index of estimator
if (is.null(estimator)) {
isRegression <- ! inherits(dat[[1]], "factor")
if (isRegression) # regression
estimator <- "RReliefFexpRank"
else
estimator <- "ReliefFexpRank"
}
estDsc <- infoCore(what="attrEval");
estIndex <- match(estimator, estDsc, nomatch=-1);
if (estIndex == -1) {
estDscReg <- infoCore(what="attrEvalReg");
estIndex <- match(estimator, estDscReg, nomatch=-1);
if (estIndex == -1)
stop("Invalid estimator parameter")
else isRegression <- TRUE
}
else isRegression <- FALSE
if (method == "greedy") {
if (!is.numeric(maxBins))
stop("The maximal number of bins shall be an integer or integer vector of length equal to the number of numeric attributes.")
if (any(maxBins<0 | maxBins==1))
stop("The maximal number of bins shall be 0 (don't care) or an integer >=2")
}
else { # if (method == "equalFrequency" || method="equalWidth" )
if (!is.numeric(equalDiscBins))
stop("The number of bins (equalDiscBins) shall be an integer or an integer vector of length equal to the number of numeric attributes.")
if (any(equalDiscBins<2))
stop("The number of bins (equalDiscBins) shall be an integer >=2")
}
#if (is.null(isRegression)) # in case of equal width or equal frequency discretization
# isRegression <- ! inherits(dat[[1]], "factor")
if (!isRegression && !inherits(dat[[1]],"factor")) {
dat[[1]] <- factor(dat[[1]]);
cat("Changing dependent variable to factor with levels:", levels(dat[[1]]),"\n");
warning("Possibly this is an error caused by regression formula and classification attribute estimator or vice versa.")
}
aux <- prepare.Data(dat,formulaExpanded,dependent=TRUE,numericAsOrdered=FALSE,orderedAsNumeric=FALSE, skipNAcolumn=TRUE,skipEqualColumn=TRUE);
discnumvalues <- aux$discnumvalues;
discdata <- aux$discdata;
discmap <- aux$discmap;
numdata <- aux$numdata;
nummap <- aux$nummap;
skipmap<-aux$skipmap
if (length(skipmap) > 0)
warning("The discretization for the following attributes was not computed due to inadequate data:", paste(names(dat)[aux$skipmap],collapse=", "))
discAttrNames <- dimnames(discdata)[[2]]
discValCompressed <- aux$disccharvalues
discValues <- aux$discValues
numAttrNames <- dimnames(numdata)[[2]]
bounds <- matrix(0, nrow=nrow(numdata),ncol=ncol(numdata))
options <- prepare.Options(...);
options[[length(options)+1]] <- discretizationLookahead
options[[length(options)+1]] <- discretizationSample
names(options)[(length(options)-1):length(options)] <- c("discretizationLookahead","discretizationSample")
checkOptionsValues(options) ;
if (isRegression) {
if (nummap[1] != 1) stop("No dependent variable in prepared regression data.");
attr2Disc <- ncol(numdata)-1
}
else {
if (discmap[1] != 1) stop("No class in prepared data.");
attr2Disc <- ncol(numdata)
}
if (attr2Disc==0)
return(NULL)
if (method == "greedy")
maxBins <- rep(maxBins, length.out=attr2Disc)
else
maxBins <- rep(equalDiscBins, length.out
=attr2Disc)
tmp <- .C(C_discretize,
methodIdx = as.integer(methodIdx),
isRegression = as.integer(isRegression),
noInst = aux$noInst,
noDiscrete = ncol(discdata),
noDiscreteValues = as.integer(discnumvalues),
discreteData = as.integer(discdata), # vector of length noInst*noDiscrete, columnwise
noNumeric = ncol(numdata),
numericData = as.double(numdata), # vector of length noInst*noNumeric, columnwise
discAttrNames = as.character(discAttrNames),
discValNames = as.character(discValCompressed),
numAttrNames = as.character(numAttrNames),
numOptions = length(options),
optionsName = names(options),
optionsVal = options,
selEst = estIndex,
maxBins = as.integer(maxBins),
noBounds = integer(ncol(bounds)),
bounds = as.double(bounds), # vector of length noInst*noNumeric, columnwise
NAOK=TRUE)
boundsMx <- matrix(tmp$bounds, nrow=nrow(bounds),ncol=ncol(bounds),byrow=FALSE)
outBounds <- list()
for (i in 1:ncol(boundsMx)) {
if (tmp$noBounds[i]>0)
outBounds[[i]] <- boundsMx[1:tmp$noBounds[i], i]
else
outBounds[[i]] <- NA
}
names(outBounds) <- numAttrNames
if (length(skipmap) > 0) {
for (i in 1:length(skipmap))
outBounds[[length(outBounds)+1]] <- NA
names(outBounds)[(length(outBounds)-length(skipmap)+1):length(outBounds)] <- names(dat)[aux$skipmap]
}
if (isRegression)
outBounds[[1]] <- NULL
return(outBounds)
}
noEqualRows <- function(data1, data2, tolerance=1e-5, countOnce=TRUE) {
if (ncol(data1) != ncol(data2))
stop("Only data sets with equal number of columns can be compared.")
d1 <- data.matrix(data1)
d2 <- data.matrix(data2)
replaceNA <- (max(d1,d2, na.rm=TRUE)+tolerance)*1.0001 # larger value than any existing
d1[is.na(d1)] <- replaceNA
d2[is.na(d2)] <- replaceNA
storage.mode(d1) <- "double"
storage.mode(d2) <- "double"
.Call(C_noEqualRows, d1, d2, as.integer(nrow(d1)), as.integer(nrow(d2)), as.integer(ncol(d1)),
as.double(tolerance), as.integer(countOnce))
}
|
/scratch/gouwar.j/cran-all/cranData/CORElearn/R/Rinterface.R
|
classDataGen <- function(noInst, t1=0.7, t2=0.9, t3=0.34, t4=0.32, p1=0.5, classNoise=0)
{
stopifnot(0 <= t1 & t1 <= 1)
stopifnot(0 <= t2 & t2 <= 1)
stopifnot(0 <= t3 & t3 <= 1)
stopifnot(0 <= t4 & t4 <= 1)
stopifnot(0 <= p1 & p1 <= 1)
prob <- c(t1, t1, t2, t2, t2, t2, t3, t3 * (2 - t3), t3, p1, classNoise)
mult <- c(3, 3, 6)
n <- length(prob) + length(mult) + 3
r <- matrix(runif(n*noInst), nrow=noInst, ncol=n, byrow=TRUE)
for (i in 1:length(prob)) {
r[,i] <- r[,i] < prob[i]
}
for (i in 11+(1:length(mult))) {
r[,i] <- ceiling(mult[i-11] * r[,i])
}
for (i in 11+(4:6)) {
r[,i] <- qnorm(r[,i])
}
a <- matrix(nrow=noInst, ncol=7)
# discrete attributes for class 1
a[,1] <- r[,1] * r[,3]
a[,2] <- (r[,1] * r[,4] + r[,2] * r[,5]) >= 1
a[,3] <- 1 + r[,2] * r[,6] * r[,11+1]
a[,4] <- r[,6+1]
a[,5] <- r[,6+2]
a[,6] <- 1 + r[,6+3] * r[,11+2]
a[,7] <- pmax(1, r[,11+3] - 2)
# continuous attribute specific for class 1
r[,11+5] <- r[,11+5] * t4
# case classes
cl <- 2 - r[,10]
ind <- which(cl == 2)
if (length(ind) >= 1) {
a[ind,1:6] <- a[ind,c(4:6,1:3),drop=FALSE]
r[ind,11+(4:5)] <- r[ind,11+(5:4),drop=FALSE]
}
# class noise
ind <- which(r[,11] == 1)
if (length(ind) >= 1) {
cl[ind] <- 3 - cl[ind]
}
# output
data.frame(
a1=factor(a[,1], levels=c(0,1)),
a2=factor(a[,2], levels=c(0,1)),
a3=factor(letters[a[,3]], levels=c("a","b","c","d")),
a4=factor(a[,4], levels=c(0,1)),
a5=factor(a[,5], levels=c(0,1)),
a6=factor(letters[a[,6]], levels=c("a","b","c","d")),
a7=factor(letters[a[,7]], levels=c("a","b","c","d")),
x1=r[,11+4],
x2=r[,11+5],
x3=r[,11+6],
class=factor(cl, levels=c(1,2)))
}
regDataGen <- function(noInst, t1=0.8, t2=0.5, noise=0.1)
{
stopifnot(0 <= t1 & t1 <= 1)
prob <- c(1/2, t1, t1, 1/2, 1/2, 1/2, 1/2)
n <- length(prob) + 9
r <- matrix(runif(n*noInst), nrow=noInst, ncol=n, byrow=TRUE)
for (i in 1:length(prob)) {
r[,i] <- r[,i] < prob[i]
}
for (i in 7+(1:9)) {
r[,i] <- qnorm(r[,i])
}
a <- matrix(nrow=noInst, ncol=7)
# discrete attributes for r[,1] = 0
a[,1] <- r[,2]
a[,2] <- 2*r[,3] + r[,4] + 1
a[,3] <- r[,5]
a[,4] <- 2*r[,6] + r[,7] + 1
# auxiliary continuous variables
x1 <- r[,7+1]
x2 <- r[,7+2]
x3 <- r[,7+3]
x4 <- 1/(1 + exp( - r[,7+4]))
x5 <- 1/(1 + exp( - r[,7+5]))
x6 <- 1/(1 + exp( - r[,7+6]))
# internal split
f <- numeric(noInst)
linear <- r[,1] == 0
if (any(linear)) {
f[linear] <- x4[linear] - 2*x5[linear] + 3*x6[linear]
x1[linear] <- t2 * x1[linear]
}
nonlinear <- !linear
if (any(nonlinear)) {
f[nonlinear] <- cos(4*pi*x4[nonlinear])*(2*x5[nonlinear]-3*x6[nonlinear])
a[nonlinear,1] <- 1 - a[nonlinear,1]
a[nonlinear,2] <- 5 - a[nonlinear,2]
x2[nonlinear] <- t2 * x2[nonlinear]
}
# output
data.frame(
a1=factor(a[,1], levels=c(0,1)),
a2=factor(letters[a[,2]], levels=c("a","b","c","d")),
a3=factor(a[,3], levels=c(0,1)),
a4=factor(letters[a[,4]], levels=c("a","b","c","d")),
x1=x1,
x2=x2,
x3=x3,
x4=1/(1 + exp( - r[,7+4] + noise * r[,7+7])),
x5=1/(1 + exp( - r[,7+5] + noise * r[,7+8])),
x6=1/(1 + exp( - r[,7+6] + noise * r[,7+9])),
response=f)
}
# generate ordinal data to be used for example with ordEval algorithm
ordDataGen<-function(noInst, classNoise=0) {
len <- noInst
maxValue <- 5
# generate performance, basic, random, and excitement attributes
P <- list()
P[[1]]<-distGen(len,c(0.2,0.2,0.2,0.2,0.2))
P[[2]]<-distGen(len,c(0.2,0.2,0.2,0.2,0.2))
B<-list()
for (i in 1:2) {
B[[i]] <- distGen(len,c(0.2,0.2,0.2,0.2,0.2))
}
E <- list()
for (i in 1:2)
E[[i]]<-distGen(len,c(0.2,0.2,0.2,0.2,0.2))
R<-list()
R[[1]] <- distGen(len,c(0.2,0.2,0.2,0.2,0.2))
rn<-rnorm(len)
R[[2]]<-1+as.integer(((rn-min(rn))/(max(rn)-min(rn)+1e-10))*5)
# generate response
C=vector(mode="integer",length=len)
C<- performanceWeak(P[[1]])+performanceStrong(P[[2]])+basicWeak(B[[1]])+basicStrong(B[[2]])+
excitementWeak(E[[1]])+excitementStrong(E[[2]])
Cnorm <- forceDist(C,c(0.2,0.2,0.20,0.2,0.2))
#class noise
if (classNoise > 0) {
ns<-runif(noInst,0,1)
Cn <- as.integer(runif(len,1, maxValue+1))
Cnorm[ns<classNoise]<-Cn # change to random variable
}
#output
out<-as.data.frame(cbind(Cnorm,P[[1]],P[[2]],B[[1]],B[[2]],E[[1]],E[[2]],R[[1]],R[[2]]))
names(out)<-c("class","Pweak","Pstrong","Bweak","Bstrong","Eweak","Estrong","Iuniform","Inormal")
out
}
basicStrong<-function(A) {
X<-vector(mode="integer",length=length(A))
X[A<=3] <- -4
X[A==4] <- -2
X
}
basicWeak<-function(A) {
X<-vector(mode="integer",length=length(A))
X[A<=2] <- -2
X
}
performanceWeak<-function(A){
X<-vector(mode="integer",length=length(A))
X[A==1] <- -3
X[A==2] <- -2
X[A==4] <- 2
X[A==5] <- 3
X
}
performanceStrong<-function(A){
X<-vector(mode="integer",length=length(A))
X[A==1] <- -5
X[A==2] <- -3
X[A==4] <- 3
X[A==5] <- 5
X
}
excitementWeak<-function(A){
X<-vector(mode="integer",length=length(A))
X[A<=4] <- 0
X[A==5] <- 1
X
}
excitementStrong<-function(A){
X<-vector(mode="integer",length=length(A))
X[A<=4] <- 0
X[A==5] <- 4
X
}
distGen<-function(len, probs){
sumP <- sum(probs)
normProbs <- probs / sumP
cumProbs <- c(normProbs[1])
for (i in 2: length(normProbs))
cumProbs[i] <- cumProbs[i-1]+normProbs[i]
rnd <- runif(len,0,1)
gen <- vector(mode="integer", length=len)
gen[rnd <= cumProbs[1]] <- 1
for (i in 2:length(cumProbs)){
gen[rnd > cumProbs[i-1] & rnd <= cumProbs[i]] <- i
}
gen
}
## ties are broken by first come gets lower score
forceDist <- function(A,probs) {
sumP <- sum(probs)
normProbs <- probs / sumP
cumProbs <- c(normProbs[1])
for (i in 2: length(normProbs))
cumProbs[i] <- cumProbs[i-1]+normProbs[i]
cumIdx <- as.integer(cumProbs*length(A))
cumIdx[length(cumIdx)] <- length(A) ## to avoid errors due to numerical rounding
oA<-order(A)
gen <- vector(mode="integer", length=length(A))
gen[oA[1:cumIdx[1]]] <- 1
for (i in 2:length(cumIdx)){
gen[oA[cumIdx[i-1]:cumIdx[i]]] <- i
}
gen
}
|
/scratch/gouwar.j/cran-all/cranData/CORElearn/R/dataGenerator.R
|
.onLoad <- function(lib, pkg) {
# .First.lib <- function(lib, pkg) {
#library.dynam("CORElearn", pkg, lib)
initCore(16384)
}
.onUnload <- function(libpath) {
#.Last.lib <- function(libpath) {
destroyCore()
#library.dynam.unload("CORElearn", libpath)
}
initCore <- function(maxModels=16384)
{
tmp <- .C(C_initCore, as.integer(maxModels)) ## maximal number of models
}
destroyCore <- function()
{
tmp <- .C(C_destroyCore)
}
|
/scratch/gouwar.j/cran-all/cranData/CORElearn/R/init.R
|
# ordEval.R
#
# visualization of ordEval algorithm and data preparation for it
#
# Author: rmarko
###############################################################################
oeInst<-function(ord, noAttr, graphTitle = "", normalization=TRUE, instSelection=NULL, bw=FALSE)
{
noStats = 8
noMethods = 3
noInst = length(ord)
xInit <- c(0, 0)
yInit <- c(1, noAttr+0.85)
ylabName = "" ## "attributes"
subtitleName <- ""#" impact"
boxHeight = 1.0
chExp = 1.0 ## char expansion for boxes
if (is.null(instSelection))
instSelection<-1:min(noInst,100)
if (bw) {
downColor <- gray(0.7)
downOverColor <- gray(0.9)
upColor <- gray(0.5)
upOverColor <- gray(0.3)
}
else {
downColor <- "blue"
downOverColor <- "lightblue"
upColor <- "red"
upOverColor <- "orange"
}
for (inst in instSelection){
plot(xInit, yInit, type = "n", xlim = c(-1.2, 1), ylim = c(0.9, noAttr+0.9), xlab="",
ylab = ylabName, axes = FALSE)
par(fig=c(0.2/2.2,1,0,1),new=TRUE) ## to make more space for labels on left
mtext(text="downward upward",side=1,line=2,adj=c(0.5,0.5))
lines(xInit,yInit)
## plot title
if (graphTitle=="")
graphTitle <- "Impact on instance"
subtitleName <- paste(" ", ord[[inst]]$className,"=",ord[[inst]]$classValue,sep="")
title(main=graphTitle, sub=subtitleName)
## x axis
axis(1, at = c(-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1), labels = c(1, 0.8, 0.6, 0.4, 0.2, 0, 0.2, 0.4, 0.6, 0.8, 1),cex.axis=0.8)
## left y axis, attribute values
leftLabel <- c()
for(iA in 1:noAttr) {
leftLabel <- c(leftLabel, paste(ord[[inst]]$attributeName[[iA]],"=",ord[[inst]]$valueName[[iA]],sep=""))
}
axis(2, at = boxHeight/4+c(1:noAttr), labels = leftLabel, las = 1, cex.axis = 0.6)
for(iA in 1:noAttr) {
xDown <- - ord[[inst]]$reinfPos[iA]
xUp <- ord[[inst]]$reinfNeg[iA]
y <- iA
if (xDown<0) {
rect(xDown, y, 0.0, y+0.45*boxHeight, col=downColor)
}
if (xUp>0) {
rect(0.0, y, xUp, y+0.45*boxHeight, col=upColor)
}
if (normalization) {
## box and whiskers for random down
boxwhiskers(ord[[inst]]$rndReinfNeg[[iA]], y+0.50*boxHeight, y+0.70*boxHeight)
## box and whiskers for random up
boxwhiskers(-ord[[inst]]$rndReinfPos[[iA]], y+0.50*boxHeight, y+0.70*boxHeight)
}
}
}
invisible()
}
avNormBarObject<-function(oe, ciType=c("two.sided","upper","lower","none"), ciDisplay=c("box","color"),
ciDecorate = NULL,
graphTitle = NULL, ylabLeft = "attribute values", ylabRight="number of values" ,
xlabel="reinforcement", attrIdx=0, equalUpDown=FALSE, colors=c("green","lightgreen","blue","lightblue"))
{
ciType<-match.arg(ciType)
ciDisplay<-match.arg(ciDisplay)
if (is.null(colors)) { # black and white
downColor <- gray(0.7)
downOverColor <- gray(0.9)
upColor <- gray(0.5)
upOverColor <- gray(0.3)
}
else {
downColor <- colors[1]
downOverColor <- colors[2]
upColor <- colors[3]
upOverColor <- colors[4]
}
noStats <- length(getStatNames())
if (is.null(ylabLeft))
ylab <-""
else ylab <- ylabLeft
if (is.null(xlabel)) {
xlab <-""
subtitleName <- ""
}
else {
subtitleName <- xlabel
if (equalUpDown)
xlab <- "decrease to increase to"
else
xlab <- "downward upward"
}
# if equalUpDown=TRUE upward and downward reinforcements are shown on the same level
if (equalUpDown)
equalUD=1
else
equalUD=0
boxHeight <- 1.0
chExp <- 1.0 ## char expansion for boxes
## for(iA in c(1,3,5,7)) {
attrSelection <- 1:oe$noAttr
if (attrIdx!=0)
attrSelection<-c(attrIdx)
for(iA in attrSelection) {
noAttrValues <- length(oe$valueNames[[iA]])
x <- c(0, 0)
y <- c(1, noAttrValues)
par(xpd=NA,mar=c(5.5,7,5,7))
plot(x, y, type = "l", xlim = c(-1, 1), ylim = c(0.9, noAttrValues-equalUD+0.9), xlab = xlab,
ylab = ylab, axes = FALSE)
## plot title
if (is.null(graphTitle))
titleName <- paste("", gsub('_', ' ', oe$attrNames[iA]))
else if (graphTitle == "")
titleName <- ""
else
titleName <-paste(gsub('_', ' ', oe$attrNames[iA]),"\neffect on ", graphTitle)
title(main=titleName, sub=subtitleName)
## bottom x axis
axis(1, at = c(-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1), labels = c(1, 0.8, 0.6, 0.4, 0.2, 0, 0.2, 0.4, 0.6, 0.8, 1))
# prepare left and right axis labels
if (equalUpDown) {
# left and right contain values
leftLabels <- oe$valueNames[[iA]][-length(oe$valueNames[[iA]])]
rightLabels <- oe$valueNames[[iA]][-1]
}
else {
# values on the left, number of values on the right-hand side
leftLabels <- oe$valueNames[[iA]]
rightLabels <- oe$noAV[iA,]
}
las <- 1 ## horizontal
cex.axis <- 1
maxAVchars <- max(nchar(leftLabels))
if (maxAVchars > 3) {
las <- 0
if (maxAVchars > 9)
cex.axis <- 0.5
}
## left y axis, attribute values
axis(2, at = boxHeight/4+c(1:(noAttrValues-equalUD)), labels = leftLabels, las = las, line = 1, cex.axis=cex.axis)
## right y axis, number of instances or attribute values
axis(4, at = boxHeight/4+c(1:(noAttrValues-equalUD)), line = -0.8, labels = rightLabels, las = las)
if (!is.null(ylabRight) && (equalUpDown==FALSE || (equalUpDown==TRUE && ylabRight!="number of values"))) {
mtext(ylabRight, side=4, line = 1.35,cex=1.0)
}
for(i in 1:(noAttrValues-equalUD)) {
xUp <- oe$reinfNegAV[iA,i]
statsUp <- oe$rndReinfNegAV[iA,i,]
xDown <- oe$reinfPosAV[iA,i+equalUD]
statsDown <- oe$rndReinfPosAV[iA,i+equalUD,]
y <- i
if (xDown>0) {
rect(-xDown, y, 0.0, y+0.45*boxHeight, col=downColor)
}
## box and whiskesrs
if (statsDown[["highPercentile"]] > 0) {
if (ciType=="lower")
statsDown[["highPercentile"]] <- 1
if (ciType=="upper")
statsDown[["lowPercentile"]] <- 0
if (ciDisplay == "box")
boxwhiskers(-statsDown, y+0.50*boxHeight, y+0.70*boxHeight, ciType)
else if (ciType != "none") {
# change the color within upper limit of confidence interval
rect(max(-xDown, -statsDown[["highPercentile"]]), y, 0.0, y+0.45*boxHeight, col=downOverColor)
}
if (!is.null(ciDecorate) && abs(statsDown[["highPercentile"]]) < abs(xDown) ) {
segments(-statsDown[["highPercentile"]], y - 0.10, -statsDown[["highPercentile"]], y + 0.55, lty="dashed")
draw.ellipse(x = -(statsDown[["highPercentile"]]+xDown)/2, y = y+0.225*boxHeight, a = max(0.05, 0.05+(xDown - statsDown[["highPercentile"]])/2), b= 0.05 + 0.225*boxHeight, border=ciDecorate, lwd=2)
}
}
if (xUp>0) {
rect(0.0, y, xUp, y+0.45*boxHeight, col=upColor)
}
## box and whiskesrs
if (statsUp[["highPercentile"]] > 0){
if (ciType=="lower")
statsUp[["highPercentile"]]<-1
if (ciType=="upper")
statsUp[["lowPercentile"]]<-0
if (ciDisplay == "box"){
boxwhiskers(statsUp, y+0.50*boxHeight, y+0.70*boxHeight,ciType)
}
else if (ciType != "none"){
# change the color within upper limit of confidence interval
rect(min(xUp, statsUp[["highPercentile"]]), y, 0.0, y+0.45*boxHeight, col=upOverColor)
}
if (!is.null(ciDecorate) && abs(statsUp[["highPercentile"]]) < abs(xUp) ) {
segments(statsUp[["highPercentile"]], y - 0.10, statsUp[["highPercentile"]], y + 0.55, lty="dashed")
draw.ellipse(x = (statsUp[["highPercentile"]]+xUp)/2, y = y+0.225*boxHeight, a = max(0.05, 0.05+(xUp - statsUp[["highPercentile"]])/2), b = 0.05 + 0.225*boxHeight, border=ciDecorate, lwd = 2)
}
}
}
par(lwd = 1)
}
invisible()
}
avSlopeObject<- function(oe, ciType=c("two.sided","upper","lower","none"),attrIdx=0, graphTitle=NULL, xlabel = "attribute values", colors=c("green","lightgreen","blue","lightblue"))
{
ciType<-match.arg(ciType)
noAttr <- oe$noAttr
ordVal <- oe$ordVal
if (is.null(colors)) {
downColor <- "black"
downOverColor <- gray(0.9)
upColor <- "black"
upOverColor <- gray(0.3)
}
else {
downColor <- colors[1]
downOverColor <- colors[2]
upColor <- colors[3]
upOverColor <- colors[4]
}
ciColor = gray(0.9)
yU<-matrix(nrow=noAttr,ncol=ordVal)
yUlow<-matrix(nrow=noAttr,ncol=ordVal)
yUhigh<-matrix(nrow=noAttr,ncol=ordVal)
xU <- c(1:ordVal)
yD<-matrix(nrow=noAttr,ncol=ordVal)
yDlow<-matrix(nrow=noAttr,ncol=ordVal)
yDhigh<-matrix(nrow=noAttr,ncol=ordVal)
xD <- c(1:ordVal)
for(iA in 1:noAttr) {
yU[iA,1] <- 0
for(i in 2:ordVal) {
ySlope <- oe$reinfPosAV[iA,i]
stats <- oe$rndReinfPosAV[iA,i,]
ySlopeLow <- stats[["lowPercentile"]]
ySlopeHigh <- stats[["highPercentile"]]
yU[iA,i] <- yU[iA,i-1] + ySlope
yUlow[iA,i] <- yU[iA,i-1] + ySlopeLow
yUhigh[iA,i] <- yU[iA,i-1] + ySlopeHigh
}
yD[iA,ordVal] <- 0
for(i in (ordVal-1):1) {
ySlope <- oe$reinfNegAV[iA,i]
stats <- oe$rndReinfNegAV[iA,i,]
ySlopeLow <- stats[["lowPercentile"]]
ySlopeHigh <- stats[["highPercentile"]]
yD[iA,i] <- yD[iA,i+1] - ySlope
yDlow[iA,i] <- yD[iA,i+1] - ySlopeLow
yDhigh[iA,i] <- yD[iA,i+1] - ySlopeHigh
}
}
yLimit <- c(min(yD,yU)-0.1, max(yU,yD) + 0.1)
attrSelection <- 1:noAttr
if (attrIdx!=0)
attrSelection<-c(attrIdx)
if (length(xlabel)==1) ## expand a single label to vector
xlabel[2:length(attrSelection)] <- xlabel[1]
for(iA in attrSelection) {
par(lwd = 1)
x <- c(0.8, ordVal+0.2)
y <- c(0, 0)
plot(x, y, xlim = c(1, ordVal), ylim = yLimit, xlab = "", ylab = "cumulative reinforcement", type = "n", axes = FALSE)
if (is.null(graphTitle))
titleName <- paste("", gsub('_', ' ', oe$attrNames[iA]))
else if (graphTitle == "")
titleName <- ""
else
titleName <-paste(gsub('_', ' ', oe$attrNames[iA]),"\neffect on ", graphTitle)
title(main=titleName)
text((ordVal+1)/2.0, yLimit[1]-0.1, label = xlabel[iA], adj=c(0.5,1),xpd=TRUE)
lines(x, y, lwd = 2)
x <- c(1:ordVal)
y <- rep(-0.09, ordVal)
av <- oe$valueNames[[iA]]
text(x, y, label = av, adj = c(0.5,1))
axis(2)
lines(xU, yU[iA,], type = "o", pch = 15, col = upColor)
lines(xD, yD[iA,], type = "o", pch = 15, col = downColor)
for(i in 1:(ordVal - 1)) {
if (ciType=="lower")
polygon(c(xU[i], xU[i+1],xU[i+1]), c(yU[iA,i], yUlow[iA,i+1],yU[iA,i]), col = ciColor, border=NA)
if (ciType=="upper")
polygon(c(xU[i], xU[i+1],xU[i+1]), c(yU[iA,i], yUhigh[iA,i+1],yU[iA,i]), col = ciColor, border=NA)
if (ciType=="two.sided")
polygon(c(xU[i], xU[i+1],xU[i+1]), c(yU[iA,i], yUhigh[iA,i+1],yUlow[iA,i+1]), col = ciColor, border=NA)
arrows(xU[i], yU[iA,i], xU[i+1], yU[iA,i+1], col = upColor, angle=9, length=0.12)
if (ciType=="lower")
polygon(c(xD[i+1], xD[i],xD[i]), c(yD[iA,i+1], yD[iA,i+1],yDlow[iA,i]), col = ciColor, border=NA)
if (ciType=="upper")
polygon(c(xD[i+1], xD[i],xD[i]), c(yD[iA,i+1], yD[iA,i+1],yDhigh[iA,i]), col = ciColor, border=NA)
if (ciType=="two.sided")
polygon(c(xD[i+1], xD[i],xD[i]), c(yD[iA,i+1], yDhigh[iA,i],yDlow[iA,i]), col = ciColor, border=NA)
arrows(xD[i + 1], yD[iA,i+1], xD[i], yD[iA,i], col = downColor, angle=9, length=0.12)
}
}
invisible()
}
attrNormBarObject<-function(oe, graphTitle = "OrdEval for all attributes",
ciType=c("two.sided","upper","lower","none"),ciDisplay=c("box","color"), ciDecorate=NULL, colors=c("green","lightgreen","blue","lightblue"))
{
ciType = match.arg(ciType)
ciDisplay=match.arg(ciDisplay)
noAttr <- oe$noAttr
ordVal <- oe$ordVal
noStats <- length(getStatNames())
if (is.null(colors)) {
downColor <- gray(0.7)
downOverColor <- gray(0.9)
upColor <- gray(0.5)
upOverColor <- gray(0.3)
}
else {
downColor <- colors[1]
downOverColor <- colors[2]
upColor <- colors[3]
upOverColor <- colors[4]
}
boxHeight <- 1.0
chExp <- 1.0 ## char expansion for boxes
x <- c(0, 0)
y <- c(1, noAttr+0.85)
ylabName <- "" ## "attributes"
par(xpd=NA,mgp=c(3,0.7,0),mar=c(5,12,4,1))
plot(x, y, type = "l", xlim = c(-1, 1), ylim = c(0.9, noAttr+0.9), xlab = "downward upward ",
ylab = ylabName, axes = FALSE)
## plot title
subtitleName <- "reinforcement"
title(main=graphTitle, sub=subtitleName)
## x axis
axis(1, at = c(-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1), labels = c(1,0.8, 0.6, 0.4, 0.2, 0, 0.2, 0.4, 0.6, 0.8, 1))
## left y axis, attributes
attrSelection <- 1:noAttr
attrNames <- oe$attrNames
noAV <- oe$noAVattr
# for(iA in attrSelection) {
# if (nchar(attrNames[[iA]],type="chars") > 7) {
# attrNames[[iA]] <- paste(strsplit(attrNames[[iA]],split="_",fixed=TRUE)[[1]],sep="",collapse="\n")
# }
# }
cex.axisA = 1.0
maxCharsA = max(nchar(attrNames[attrSelection]))
if (maxCharsA > 15) {
cex.axisA = 0.9
if (maxCharsA > 20)
cex.axisA = 0.6
}
axis(2, at = boxHeight/4+c(1:noAttr), labels = attrNames, las = 1, line = 1, cex.axis = cex.axisA)
## right y axis, number of instances
#axis(4, at = boxHeight/4+c(1:noAttr), line = -0.8, labels = noAV, las = 1, cex.axis = 0.7)
#axisLabel4 <- "number of values"
#mtext(axisLabel4, side=4, line = 1.3)
for(iA in attrSelection) {
xUp <- oe$reinfNegAttr[iA]
xDown <- -oe$reinfPosAttr[iA]
y <- iA
if (xDown<0) {
rect(xDown, y, 0.0, y+0.45*boxHeight, col=downColor)
}
## box and whiskesrs for random down
if (oe$rndReinfNegAttr[iA,"highPercentile"] > 0) {
stats <- oe$rndReinfNegAttr[iA,]
if (ciType=="lower")
stats[["highPercentile"]]<-1
if (ciType=="upper")
stats[["lowPercentile"]]<-0
if (ciDisplay == "box"){
boxwhiskers(stats, y+0.50*boxHeight, y+0.70*boxHeight, ciType)
}
else if (ciType != "none") {
# change the color within upper limit of confidence interval
rect(max(xDown, -stats[["highPercentile"]]), y, 0.0, y+0.45*boxHeight, col=downOverColor)
}
if (!is.null(ciDecorate) && abs(stats[["highPercentile"]]) < abs(xDown) ) {
segments(-stats[["highPercentile"]], y - 0.10, -stats[["highPercentile"]], y + 0.55, lty="dashed")
draw.ellipse(x = (-stats[["highPercentile"]]+xDown)/2, y = y+0.225*boxHeight, a = max(0.05, 0.05+(-xDown - stats[["highPercentile"]])/2), b= 0.05 + 0.225*boxHeight, border=ciDecorate, lwd=2)
}
}
if (xUp>0) {
rect(0.0, y, xUp, y+0.45*boxHeight, col=upColor)
}
## box and whiskesrs for random up
if (oe$rndReinfPosAttr[iA,"highPercentile"] > 0){
stats <- oe$rndReinfPosAttr[iA,]
if (ciType=="lower")
stats[["highPercentile"]]<-1
if (ciType=="upper")
stats[["lowPercentile"]]<-0
if (ciDisplay == "box"){
boxwhiskers(-stats, y+0.50*boxHeight, y+0.70*boxHeight, ciType)
}
else if (ciType != "none") {
# change the color within upper limit of confidence interval
rect(min(xUp, stats[["highPercentile"]]), y, 0.0, y+0.45*boxHeight, col=upOverColor)
}
if (!is.null(ciDecorate) && abs(stats[["highPercentile"]]) < abs(xUp) ) {
segments(stats[["highPercentile"]], y - 0.10, stats[["highPercentile"]], y + 0.55, lty="dashed")
draw.ellipse(x = (stats[["highPercentile"]]+xUp)/2, y = y+0.225*boxHeight, a = max(0.05, 0.05+(xUp - stats[["highPercentile"]])/2), b = 0.05 + 0.225*boxHeight, border=ciDecorate, lwd = 2)
}
}
}
invisible()
}
boxwhiskers<-function(stats, y1, y2, ciType="two.sided")
{
# names(stats) contains: c("median", "Q1", "Q3", "lowPercentile", "highPercentile", "mean", "stdDev", "exp"))
if (ciType=="none")
return()
## quartile box
rect(stats[["Q1"]], y1, stats[["Q3"]], y2, col="lightgrey")
## median
segments(stats[["median"]], y1, stats[["median"]], y2, lwd=2)
##minimum line and whisker
midY = (y1+y2)/2.0
yLen = y2-y1
if (ciType != "upper") {
segments(stats[["Q1"]], midY, stats[["lowPercentile"]], midY,lty="solid")
segments(stats[["lowPercentile"]], y1+0.2*yLen, stats[["lowPercentile"]], y2-0.2*yLen)
}
else
segments(stats[["Q1"]], midY, stats[["lowPercentile"]], midY,lty="solid")
##maximum line and whisker
if (ciType != "lower") {
segments(stats[["Q3"]], midY, stats[["highPercentile"]], midY, lty="solid")
segments(stats[["highPercentile"]], y1+0.2*yLen, stats[["highPercentile"]], y2-0.2*yLen)
}
else
segments(stats[["Q3"]], midY, stats[["highPercentile"]], midY, lty="solid")
#if (length(stats)==8) ## also expeceted is present
# points(stats[[8]],midY,pch=20)
}
trimSpaces<-function(strng) {
s1=sub('^ +', '', strng) ## trailing spaces only
s2=sub(' +$', '', s1) ## trailing spaces only
s2
}
|
/scratch/gouwar.j/cran-all/cranData/CORElearn/R/ordEval.R
|
getRpartModel <- function(model, dataset) {
m <- getRpart(model);
ee <-list();
class(ee)<-"rpart";
if (model$model == "regTree")
ee$method <- "anova"
else ee$method<-"class";
ff<-m[[1]];
ff<-matrix(as.numeric(ff),m[[4]],m[[5]],TRUE);
dim(m[[3]])<-c(m[[4]],3);
attrDesc<-matrix(m[[12]],m[[8]],m[[13]], TRUE)
var<-m[[3]][,2];
suppressWarnings(idv<-as.integer(var))
var[!is.na(idv)]<-attrDesc[,2];
dim(var)<-c(m[[4]],1);
dfr<-data.frame(var,ff);
desc<-m[[3]][,3]!= ""
yvalue<-dfr[,9];
yvalue[desc]<-m[[3]][desc,3]
classLev<-length(model$class.lev);
#special case if class has levels -
#it is not a regression tree.
if(classLev > 0){
stat<-matrix(m[[14]], m[[4]],classLev,TRUE);
stat<-unlist(stat);
dim(stat)<-c(m[[4]],classLev);
stat1<-stat/dfr[,2];
dim(stat)<-c(1,m[[4]]*classLev);
dim(stat1)<-c(1,m[[4]]*classLev);
y2 <- c()
for (i in 1:length(yvalue)) {
if (yvalue[i] %in% model$class.lev)
y2[i] <- as.integer(factor(yvalue[i],levels=model$class.lev))
else y2[i] <- as.integer(yvalue[i])
}
dfryval2<-c(y2,stat,stat1);
# dfryval2<-c(yvalue,stat,stat1);
dim(dfryval2)<-c(m[[4]],2*classLev+1);
method <- "class"
}
else{
av<-mat.or.vec(m[[4]],1);
ps<-strsplit(yvalue, " ", fixed=TRUE);
isOldType<-length(unlist(ps)) == length(yvalue);
if(isOldType){
yvalue<-as.numeric(yvalue);
}
dfryval2<-c(yvalue);
dim(dfryval2)<-c(m[[4]],1);
}
dfr[,9]<-dfryval2;
ee$frame<-dfr;
attr(ee$frame, "row.names")<-as.integer(c(m[[3]][,1]));
attr(ee$frame, "names")<-m[[2]];
splits <-matrix(as.numeric(m[[6]]),m[[8]],m[[9]],TRUE);
splits<-splits[,2:dim(splits)[2]];
dim(splits)<-c(m[[8]],as.integer(m[[9]])-1);
splCol<-attrDesc[,2]
attr(splits, "dimnames")<-list(splCol, m[[7]]);
#replace count column with values from frame$n
attrIndex<-ee$frame$var!="<leaf>"
leftchild<-attr(ee$frame[attrIndex,],"row.names")*2
i<-1
while(i<=length(leftchild)){
selected<-attr(ee$frame,"row.names")==leftchild[i]
splits[i,1]<-ee$frame$n[selected]
i<-i+1;
}
ee$splits<-splits;
search <- all.vars(model$formula)[1]
b<-lapply(attr(dataset, 'names'),function(x){x!=search});
#we can use levels onto dataset,
#because rpart.labels matches attributes by name.
cs<-lapply(dataset,levels);
#cs is a list of attributes each of them has a list of values.
cs<-cs[unlist(b)]
d<-max(unlist(lapply(cs, length)));
d<-min(d, as.numeric(m[[11]]));
if(d > 0){
csplit<-matrix(as.numeric(m[[10]]),m[[8]],m[[11]],TRUE);
#csplit==3 is the default,
#if min is 1 there is at least one discrete attribute
if(min(csplit) == 1){
csplit<-csplit[,1:d];
dim(csplit)<-c(m[[8]],d);
ee$csplit<-csplit;
}
}
attr(ee, "xlevels")<-cs;
attr(ee, "ylevels")<-model$class.lev;
if(classLev > 0){
#function used for formatting discrete class
textFunction <- function (yval, dev, wt, ylevel,
digits, n, use.n){
nclass <- (ncol(yval) - 1L)/2
group <- yval[, 1L]
counts <- yval[, 1L + (1L:nclass)]
dimCounts<-dim(counts);
counts<-matrix(as.numeric(counts),dimCounts[1],dimCounts[2])
temp1 <- rpart.formatg(counts, digits)
if (nclass > 1) {
temp1 <- apply(matrix(temp1, ncol = nclass),
1, paste, collapse = "/")
}
if (use.n) {
out <- paste(format(group, justify = "left"),
"\n", temp1, sep = "")
}
else {
out <- format(group, justify = "left")
}
return(out);
}
}
else{
#function used for formatting continuous class
textFunction <- function (yval, dev, wt, ylevel,
digits, n, use.n){
if(!is.numeric(yval)){
ps<-strsplit(yval, "+", fixed=TRUE);
for(ij in 1:length(ps)){
yval[ij]<-paste(ps[[ij]], collapse = "+\n");
}
if (use.n) {
yval<-paste(yval, "\nn=", n, sep = "")
}
return(yval);
}
else{
if (use.n) {
paste(rpart.formatg(yval, digits),
"\nn=", n, sep = "")
}
else {
paste(rpart.formatg(yval, digits))
}
}
}
}
#rpartNamespace <- asNamespace("rpart");
#environment(textFunction) <- rpartNamespace;
ee$functions$text <- textFunction;
ee;
}
getRpart <- function(model) {
#regression tree
if(model$model == "regTree"){
.Call(C_exportModelRT, as.integer(model$modelID))
}
#classification tree
else if(model$model == "tree"){
.Call(C_exportModelT, as.integer(model$modelID))
}
else{
stop("The model must be a regresion or a decision tree.");
}
}
rfProximity <- function(model, outProximity=TRUE){
if (model$model == "rf"){
.Call(C_exportProximity, as.integer(model$modelID),as.integer(outProximity==FALSE))
}
else{
stop("The model must be a random forest.");
}
}
getVarImportanceCluster <-function(model, cluster){
modelID <- model$modelID;
tmp <- .C(C_exportVarImportanceCluster,
as.integer(modelID),
clusterData = as.integer(cluster),
var = double(model$noNumeric + model$noDiscrete-1),
NAOK=TRUE)
}
spaceScale <- function(pr, component){
space<-cmdscale(pr, component, add=TRUE);
}
rfClustering <- function(model, noClusters=4){
covMatrix<-rfProximity(model, outProximity=F);
cluster<-cluster::pam(covMatrix, noClusters, diss=TRUE)
}
rfOutliers <- function(model, dataset){
pr <- rfProximity(model, outProximity=TRUE);
search <- all.vars(model$formula)[1];
b<-lapply(attr(dataset, 'names'),function(x){x==search});
attrClass<-c(1:length(b))[b==TRUE]
caseIndex<-matrix(c(1:dim(pr)[2]), dim(pr)[1], dim(pr)[2], TRUE)
el<-caseIndex;
i<-1;
prSum<-matrix(0);
while(i<=dim(pr)[2]){
rclass<-dataset[el[i,], attrClass]==dataset[i,attrClass]
prSumMedian<-median(pr[i,rclass])
prSumSd<-sd(pr[i,rclass]);
prSum[i]<-sum(pr[i,rclass]^2)^(-1)-prSumMedian
if(is.na(prSumSd)){
prSum[i] = 10;
warning(paste("Element ",i," is the only one in his class. Setting output to 10."))
}
else if(is.numeric(prSumSd) && prSumSd>0){
prSum[i]<-prSum[i]/prSumSd
}
i<-i+1
}
outliers<-prSum
}
classPrototypes<-function(model, dataset, noPrototypes=10){
search <- all.vars(model$formula)[1];
b<-unlist(lapply(attr(dataset, 'names'),function(x){x==search}));
lev<-levels(dataset[, b]);
nclass<-length(lev);
n<-length(dataset[, 1]);
pre<-predict(model, dataset);
cluster<-NULL;
index<-matrix(1:n, n, nclass);
out<-matrix(0, nclass, noPrototypes);
for(j in 1:nclass){
p<-pre$probabilities;
maxelem<-NULL;
i<-1;
while(i<=n && length(p[p!=0])>0 && length(maxelem) < noPrototypes){
tmp<-index[p==max(p)];
ntmp<-length(tmp);
outPos<-list();
for(k in 1:ntmp){
outPos[length(outPos)+1]<-c(1:nclass)[lev==dataset[tmp[k],b]]
}
outPos<-unlist(outPos);
# if(length(outPos==j) > 0)
# {
maxelem<-c(maxelem, tmp[outPos==j]);
# }
p[tmp,]<-0;
i<-i+1;
}
nmaxelem<-length(maxelem);
if(nmaxelem>=noPrototypes) {
out[j, ]<-maxelem[1:noPrototypes];
}
else{
out[j, c(1:nmaxelem)]<-maxelem[1:nmaxelem];
}
cluster<-c(cluster, (c(1:noPrototypes)*0+j)[out[j,]>0]);
}
out<-t(out);
dim(out)<-c(1, noPrototypes*nclass);
o<-list();
o$prototypes<-as.numeric(out[out>0]);
o$clustering<-as.numeric(cluster);
o$levels<-as.character(lev);
impPredictedExample<-o;
}
# the data.frame set is converted to a form such that all the attributes have values between 0 and 1
# this is useful in visualization
varNormalization<-function(md, set){
#d-discrete, a-attribure, n-names
column<-length(set[1,]);
n<-length(set[,1]);
colPos<-matrix(FALSE, column, column);
dan<-md$discAttrNames;
nd<-length(dan);
ian<-0;
if(nd>0){
int<-vector("numeric",nd);
for(ian in 1:nd)
{
search<-dan[ian];
colPos[ian,]<-unlist(lapply(attr(set, 'names'),function(x){x==search}));
int[ian]<-1/(length(levels(set[, colPos[ian, ]]))-1);
}
}
nan<-md$numAttrNames;
nn<-length(nan);
if(nn > 0){
offset<-ian;
mi<-vector("numeric", nn);
sigma<-vector("numeric", nn);
maxnorm<-vector("numeric", nn);
moveup<-vector("numeric", nn);
for(ian in 1:nn){
search<-nan[ian];
tmp<-unlist(lapply(attr(set, 'names'),function(x){x==search}));
colPos[ian+offset,]<-tmp
mi[ian]<-as.numeric(mean(set[,tmp]));
sigma[ian]<-as.numeric(sd(set[,tmp]));
allcurval<-(set[, tmp]-mi[ian])/sigma[ian];
moveup[ian]<-min(c(0,allcurval));
maxnorm[ian]<-max(allcurval-moveup[ian])
}
}
out<-NULL;
classV<- all.vars(md$formula)[1];
classV<-unlist(lapply(attr(set, 'names'),function(x){x!=classV}));
for(ex in 1:n){
pos<-vector("numeric", column);
if(nd>0){
for(da in 1:nd) {
lev<-levels(set[, colPos[da, ]]);
val<-set[ex, colPos[da, ]];
if(is.na(val)){
index<-1;
}
else{
index<-c(1:length(lev))[lev==val];
}
pos[colPos[da, ]]<-(index-1)*int[da];
}
}
if(nn>0){
for(na in 1:nn){
normal<-((set[ex, colPos[na+offset, ]]-mi[na])/sigma[na]);
val<-(normal-moveup[na])/maxnorm[na];
pos[colPos[na+offset, ]]<-val;
}
}
out<-c(out,pos[classV]);
}
out<-matrix(out, n, column-1, TRUE);
}
getQuartils<-function(examples){
int<-0.25;
i<-1
minOld<-FALSE
while(i<5){
minIndex<-examples<i*int
aMedian<-median(examples[!minOld & minIndex]);
examples[!minOld & minIndex]<-aMedian;
minOld<- minOld | minIndex
i<-i+1
}
getQuartils<-examples;
}
rfAttrEvalClustering<-function(model, dataset, clustering=NULL){
search <- all.vars(model$formula)[1];
b<-lapply(attr(dataset, 'names'),function(x){x==search})
b<-unlist(b);
i<-1;
imp<-NULL;
lev<-NULL;
if(is.null(clustering)){
cl2<-as.numeric(dataset[,b]);
levSet<-dataset[,b];
}
else{
cl2<-clustering;
levSet<-cl2;
}
cl<-cl2;
stopme<-length(levels(levSet));
while(i<=stopme){
cl[cl!=i]<-0;
cl[cl==i]<-1;
lev<-c(lev, as.character((levSet[cl==1])[1]));
d<-getVarImportanceCluster(model, cl);
imp<-c(imp,d$var);
cl<-cl2;
i<-i+1;
}
temp<-list();
ncolumn<-length(b[b==FALSE]);
imptemp<-matrix(imp, stopme, ncolumn, TRUE);
colnames(imptemp) <- names(dataset)[b==FALSE]
rownames(imptemp) <- levels(levSet)
temp$imp<-imptemp
temp$levels<-lev;
rfAttrEvalClustering<-temp;
}
plotRFStats <- function(point, cluster=FALSE, plotLine=FALSE,
lOffset=0, myCount=7, myAxes=FALSE)
{
pointLen <- length(point);
if(is.null(dim(point)) || length(dim(point)) == 1)
{
tmpPoint <- point;
point<-matrix(0, pointLen, 2);
point[,1] <- c(1:pointLen);
point[,2] <- tmpPoint;
}
noVar <- pointLen;
ylim<-c(min(point[,2]), max(point[,2])+lOffset)
xlim<-c(min(point[,1]), max(point[,1]))
if(is.logical(myAxes)){
axesShow<-TRUE;
}
else{
axesShow<-FALSE;
}
plot(1, 1, xlim=xlim, ylim=ylim, type="n", ann=FALSE, frame=TRUE, axes=axesShow);
if(!is.logical(myAxes) && length(myAxes) > 0){
axis(2);
axis(1, at=1:noVar, labels=myAxes);
}
if(length(cluster) > 1 )
{
tmpCluster <- cluster;
clusterLevelNames <- list();
clusterLevels <- 0;
i <- 1;
while(length(tmpCluster) > 0 && i < 13)
{
clusterLevels[i] <- i;
clusterLevelNames[i]<-tmpCluster[1]
cluster[cluster==tmpCluster[1]]<-i;
tmpCluster <- tmpCluster[tmpCluster!=tmpCluster[1]];
i <- i+1;
}
clusterLevels <- clusterLevels[clusterLevels>0];
myPch <-0;
myColor <-0;
for(value in clusterLevels)
{
#mod
myPch[value]<-floor(value/myCount);
myColor[value]<- 1+value - myCount*floor(value/myCount);
points(point[cluster==value,], col=myColor[value], pch=myPch[value]);
if(plotLine == TRUE)
{
lines(point[cluster==value,], col=myColor[value], pch=myPch[value]);
}
}
prefix<-""
if(is.integer(clusterLevelNames[1]))
{
prefix<-"skupina "
}
clusterNames <- paste(prefix, clusterLevelNames, sep = "");
legend(xlim[1], ylim[2], clusterNames, cex=0.8, col=myColor, pch=myPch);
}
else
{
points(point);
if(plotLine == TRUE)
{
lines(point);
}
}
}
plotRFMulti<-function(point, legendNames=FALSE, lOffset=0,
myCount=7, myHoriz=FALSE, myAxes=FALSE)
{
noVar<-dim(point)[2]
noCluster<-dim(point)[1]
ylim<-c(min(point), max(point)+lOffset)
xlim<-c(1, noVar)
if(is.logical(myAxes)){
axesShow<-TRUE;
}
else{
axesShow<-FALSE;
}
plot(1, 1, xlim=xlim, ylim=ylim, type="n", ann=FALSE, frame=TRUE, axes=axesShow);
if(!is.logical(myAxes) && length(myAxes) > 0){
axis(2);
axis(1, at=1:noVar, labels=myAxes);
}
myPch <-0;
myColor <-0;
pPoint<- matrix(0.0, noVar, 2)
pPoint[,1]<-c(1:noVar)
for(i in c(1:noCluster))
{
myPch[i]<-floor(i/myCount);
myColor[i]<- 1+i - myCount*floor(i/myCount);
pPoint[,2]<-point[i,]
points(pPoint, col=myColor[i], pch=myPch[i]);
lines(pPoint, col=myColor[i], pch=myPch[i]);
}
prefix<-""
if(is.logical(legendNames) && legendNames==FALSE)
{
legendNames<-c(1:noCluster)
prefix <-"cluster"
}
clusterNames <- paste(prefix, legendNames, sep = "");
color<-c(1:noCluster);
legend(xlim[1], ylim[2], clusterNames, cex=0.8, col=myColor, pch=myPch, horiz=myHoriz);
}
plotRFNorm<-function(point, cluster, somnames, lOffset,
myHoriz=FALSE, myAxes=FALSE)
{
noVar<-dim(point)[2];
ylim<-c(min(point), max(point)+lOffset)
xlim<-c(1, noVar)
if(is.logical(myAxes)){
axesShow<-TRUE;
}
else{
axesShow<-FALSE;
}
plot(1, 1, xlim=xlim, ylim=ylim, type="n", ann=FALSE, frame=TRUE, axes=axesShow);
if(!is.logical(myAxes) && length(myAxes) > 0){
axis(2);
axis(1, at=1:noVar, labels=myAxes);
}
tmpCluster <- cluster;
clusterLevelNames <- list();
clusterLevels <- 0;
i <- 1;
while(length(tmpCluster) > 0 && i < 13){
clusterLevels[i] <- i;
clusterLevelNames[i]<-tmpCluster[1]
cluster[cluster==tmpCluster[1]]<-i;
tmpCluster <- tmpCluster[tmpCluster!=tmpCluster[1]];
i <- i+1;
}
clusterLevels <- clusterLevels[clusterLevels>0];
myPch <-0;
myColor <-0;
myCount<-7
nexamples<-length(point[,1]);
for(value in 1:nexamples){
#mod
myPch[cluster[value]]<-floor(cluster[value]/myCount);
myColor[cluster[value]]<- 1+cluster[value] - myCount*floor(cluster[value]/myCount);
points(point[value,], col=myColor[cluster[value]], pch=myPch[cluster[value]]);
lines(point[value,], col=myColor[cluster[value]], pch=myPch[cluster[value]]);}
legend(xlim[1], ylim[2], somnames, cex=0.8, col=myColor, pch=myPch, horiz=myHoriz);
}
# taken from rpart package, because it does not export it anymore
## format a set of numbers using C's "g" format
rpart.formatg <- function(x, digits = getOption("digits"),
format = paste0("%.", digits, "g"))
{
if (!is.numeric(x)) stop("'x' must be a numeric vector")
temp <- sprintf(format, x)
if (is.matrix(x)) matrix(temp, nrow = nrow(x)) else temp
}
|
/scratch/gouwar.j/cran-all/cranData/CORElearn/R/rfVisualize.R
|
comparePredict <- function(valueName, pred, continue)
{
stored <- c(
616,929,329,362,456,474,607,857,377,191,228,388,536,314,850,403,265,293,572,849,461,201,451,891,860,600,218,738,678,283,
698,748,620,382,852,59,361,822,522,637,846,535,549,199,155,678,779,262,861,611,719,264,840,820,926,472,738,827,829,806,445,
263,562,833,876,804,125,212,841,930,713,141,712,860,245,789,358,901,780,370,387,208,778,757,585,479,730,408,353,174,398,
935,151,808,716,795,917,884,305,833,828,540,118,417,913,694,799,850,252,446,372,800,663,572,282,557,251,428,676,126,919,631,
783,746,737,854,839,826,644,916,266,911,109,164,781,548,98,754,838,820,838,690,707,669,880,304,866,378,57,475,115,76,777,
948,812,484,471,106,875,833,767,358,938,791,171,852,911,138,85,233,806,76,272,681,311,337,222,817,421,406,419,857,273,119,
674,79,367,366,77,383,276,872,291,374,678,84,170,681,757,713)/1000
#stored[3*(10:30)] <- 0.6 # simulate an error
sdappr <- 0.095*(stored*(1 - stored))^0.61
out <- pred$probabilities[, 2]
err <- (out - stored)/sdappr
curr <- mean(err^2)
res <- curr < 2
if (!res) {
cat("Comparison FAILED for ", valueName, "\n", sep="")
cat("mean(error^2) =", curr, "\n")
ind <- order(err, decreasing=TRUE)[1:10]
print(cbind(ind, stored=stored[ind], obtained=out[ind], error=err[ind]))
if (continue) {
cat("comparison FAILED\n")
} else {
stop("comparison FAILED")
}
}
res
}
cmp.table <- function(a, b)
{
aa <- unclass(a)
bb <- unclass(b)
if (identical(dim(aa), dim(bb))) {
return(all(aa == bb))
} else {
return(FALSE)
}
}
compareMEval <- function(valueName, pred, cl, mEval, continue)
{
accuracy <- mean(pred$class == cl)
res1 <- accuracy == mEval$accuracy
aux.pred.mat <- table(cl, pred$class)
res2 <- cmp.table(mEval$predictionMatrix, aux.pred.mat)
res <- all(res1, res2)
if (!res) {
cat("Comparison FAILED for ", valueName, "\n", sep="")
cat("accuracy =", accuracy, "\n")
cat("mEval$accuracy =", mEval$accuracy, "\n")
cat("aux.pred.mat\n")
print(aux.pred.mat)
cat("mEval$predictionMatrix\n")
print(mEval$predictionMatrix)
if (continue) {
cat("comparison FAILED\n")
} else {
stop("comparison FAILED")
}
}
res
}
compareClass <- function(valueName, value1, value2, continue)
{
res <- identical(value1, value2)
if (!res) {
cat("Comparison FAILED for ", valueName, "\n", sep="")
print(table(value1, value2))
if (continue) {
cat("comparison FAILED\n")
} else {
stop("Comparison FAILED")
}
}
res
}
compareApprox <- function(valueName, value1, value2, tolerance, continue)
{
res <- max(abs(value1 - value2)) <= tolerance
if (!res) {
cat("Comparison FAILED for ", valueName, "\n", sep="")
cat(deparse(substitute(value1)), "\n")
print(value1)
cat("difference\n")
print(value2 - value1)
cat(deparse(substitute(value2)), "\n")
print(value2)
if (continue) {
cat("comparison FAILED\n")
} else {
stop("Comparison FAILED")
}
}
res
}
testCoreClass <- function(continue=TRUE)
{
ncases <- 200
RNGkind("Mersenne-Twister")
set.seed(12345)
train <- classDataGen(ncases)
test <- classDataGen(ncases)
model <- CoreModel(class ~ ., train, model="rf", minNodeWeightRF=5, minNodeWeightEst=2, rfNoTrees=50, maxThreads=1)
pred <- predict(model, test, rfPredictClass=FALSE)
destroyModels(model)
# consistency of predict output
res1 <- compareClass("testCoreClass/pred$class", pred$class=="2", pred$probabilities[, 2] >= 0.5, continue)
# compare with stored values
res2 <- comparePredict("testCoreClass/pred", pred, continue)
# modelEval test
mEval <- modelEval(model, test$class, pred$class, pred$prob)
res3 <- compareMEval("testCoreClass/modelEval", pred, test$class, mEval, continue)
all(res1, res2, res3)
}
testCoreAttrEval <- function(continue=TRUE)
{
ncases <- 200
RNGkind("Mersenne-Twister")
set.seed(0)
train <- classDataGen(ncases)
set.seed(0)
estReliefF1 <- attrEval(class ~ ., train, estimator="ReliefFexpRank", maxThreads=1)
set.seed(0)
estReliefF0 <- attrEval(class ~ ., train, estimator="ReliefFexpRank", maxThreads=1) # makes sense to use maxThreads=2 once g++-5.0 is available
resA1 <- compareApprox("testCoreAttrEval/estReliefF/threads", estReliefF0, estReliefF1, 1e-9, continue)
stored <- c(0.07413109,0.0852054,0.05018575,0.02656779,0.0652197,0.03082657,-0.008773201,0.1002774,0.08263487,-0.00481844)
resA2 <- compareApprox("testCoreAttrEval/estReliefF/stored", stored, estReliefF0, 1e-8, continue)
set.seed(0)
estMdl1 <- attrEval(class ~ ., train, estimator="MDL", minNodeWeightEst=5, maxThreads=1)
set.seed(0)
estMdl0 <- attrEval(class ~ ., train, estimator="MDL", minNodeWeightEst=5, maxThreads=1)
resB1 <- compareApprox("testCoreAttrEval/estMdl/threads", estMdl0, estMdl1, 1e-8, continue)
stored <- c(0.05068002,0.04873661,0.0236104,0.007925046,0.03501767,0.004991399,-0.02464427,0.1364728,0.1114686,0.00282483)
resB2 <- compareApprox("testCoreAttrEval/estMdl/stored", stored, estMdl0, 1e-7, continue)
all(resA1, resA2, resB1, resB2)
}
testCoreReg <- function(continue=TRUE)
{
ncases <- 200
RNGkind("Mersenne-Twister")
set.seed(0)
train <- regDataGen(ncases)
test<- regDataGen(ncases)
model <- CoreModel(response~., train, model="regTree", modelTypeReg=5, minNodeWeightEst=1, minNodeWeightTree=5)
pred <- predict(model, test)
destroyModels(model)
# Model evaluation
mEval <- modelEval(model, test[["response"]], pred)
stored <- c(0.7983146,0.7638713,0.5261048,0.7238808) # for ModelTypeReg=5, for modelTypeReg = 3: c(0.7326231, 0.7194654, 0.4705813, 0.6805264)
res1 <- compareApprox("testCoreReg/mEval", stored, c(mEval$MSE,mEval$RMSE,mEval$MAE,mEval$RMAE), 1e-6, continue)
# Attribute evaluation with RReliefFexpRank
estRReliefF <- attrEval(response~., train, estimator="RReliefFexpRank")
stored <- c(0.03235394,0.02799911,0.004161262,-0.05577854,-0.04937824,0.05685567,-0.03946655,0.001630726,0.05570145,0.1200363)
res2 <- compareApprox("testCoreReg/estReliefF", stored, estRReliefF, 1e-6, continue)
# Attribute evaluation with MSEofMean
stored <- c(-0.7192786,-0.686779,-0.7668486,-0.7625726,-0.7373601,-0.6837401,-0.753099,-0.7328526,-0.6412497,-0.6879387)
estMSE <- attrEval(response~., train, estimator="MSEofMean")
res3 <- compareApprox("testCoreReg/estMSE", stored, estMSE, 1e-6, continue)
all(res1, res2, res3)
}
testCoreOrdEval <- function(continue=TRUE)
{
ncases <- 200
RNGkind("Mersenne-Twister")
set.seed(0)
train <- ordDataGen(ncases)
estOrdEval <- ordEval(class~., train, ordEvalNoRandomNormalizers=0 )
stored <- c(0.5385996,0.6631206,0.344894,0.4327273,0.3623188,0.4223827,0.3440285,0.3432203)
res1 <- compareApprox("testCoreOrdEval/reinfPosAttr", stored, estOrdEval$reinfPosAttr, 1e-7, continue)
stored <- c(0.4653641,0.581854,0.3009524,0.3561151,0.2833333,0.3364486,0.2515213,0.2696177)
res2 <- compareApprox("testCoreOrdEval/reinfNegAttr", stored, estOrdEval$reinfNegAttr, 1e-7, continue)
stored <- c(0.4693182,0.525296,0.376569,0.4295302,0.3568282,0.4039517,0.3794926,0.4151309)
res3 <- compareApprox("testCoreOrdEval/anchorAttr", stored, estOrdEval$anchorAttr, 1e-7, continue)
all(res1, res2, res3)
}
#gener.Reg <- function(m,n)
#{
# x <- matrix(runif(m*n),nrow=m,ncol=n)
# data.frame(x,resp=rowSums(x))
#}
outputResult <- function(testName, status, failMessage, continue)
{
if (!all(status)) {
outMessage <- testName
if (failMessage != "") {
outMessage <- paste(outMessage, " (", failMessage, ")", sep="")
}
if (continue) {
cat("Test FAILED: ", outMessage, "\n", sep="")
} else {
stop("Test FAILED: ", outMessage)
}
}
}
singleTestNA <- function(t, x)
{
.C(C_testNA, as.integer(t), as.double(x), out=integer(2), NAOK=TRUE)$out
}
testCoreNA <- function(continue=TRUE)
{
a <- matrix(nrow=4, ncol=2)
a[1,] <- singleTestNA(0, NA) # pass NA to CORElearn
a[2,] <- singleTestNA(0, NaN) # pass NaN to CORElearn
a[3,] <- singleTestNA(1, 0) # use internal NAcont
a[4,] <- singleTestNA(2, 0) # generate NaN
ok <- a == rbind(c(1,0), c(0,1), c(1,0), c(0,1))
outputResult("testCoreNA", ok, "", continue)
all(ok)
}
testCoreRPORT <- function(continue=TRUE)
{
tmp <- .C(C_testRPORT, a=as.integer(2))
ok <- tmp$a == 1
outputResult("testCoreRPORT", ok, tmp$a, continue)
all(ok)
}
testCoreRand <- function(continue=TRUE)
{
n <- 10
runif(1)
state <- .Random.seed
x <- runif(n)
.Random.seed <<- state
y <- .C(C_testCoreRand, as.integer(n), a=double(n))$a
ok <- x == y
outputResult("testCoreRand", ok, paste(x[1], x[2], y[1], y[2]), continue)
all(ok)
}
asTxt <- function(ok)
{
if (all(ok)) {
"OK"
} else {
"FAIL"
}
}
allTests <- function(continue=TRUE, timed=FALSE)
{
t1 <- system.time(r1 <- testCoreClass(continue))
cat("testCoreClass() : ", asTxt(r1), "\n")
if (timed) cat("Elapsed", t1["elapsed"],"sec\n")
t2 <- system.time(r2 <- testCoreAttrEval(continue))
cat("testCoreAttrEval() : ", asTxt(r2), "\n")
if (timed) cat("Elapsed", t2["elapsed"],"sec\n")
t3 <- system.time(r3 <- testCoreReg(continue))
cat("testCoreReg() : ", asTxt(r3), "\n")
if (timed) cat("Elapsed", t3["elapsed"],"sec\n")
t4 <- system.time(r4 <- testCoreOrdEval(continue))
cat("testCoreOrdEval() : ", asTxt(r4), "\n")
if (timed) cat("Elapsed", t4["elapsed"],"sec\n")
if (timed) {
cat("system.time() summary\n")
print(rbind(t1, t2, t3, t4)[, 1:3])
}
r5 <- testCoreNA(continue)
cat("testCoreNA() : ", asTxt(r5), "\n")
r6 <- testCoreRPORT(continue)
cat("testCoreRPORT() : ", asTxt(r6), "\n")
r7 <- testCoreRand(continue)
cat("testCoreRand() : ", asTxt(r7), "\n")
result <- all(r1, r2, r3, r4, r5, r6, r7)
outputResult("allTests", result, "", continue=FALSE)
invisible(result)
}
testClassPseudoRandom <- function(s, k, m)
{
n <- length(s)
aux <- .C(C_testClassPseudoRandom,
n = as.integer(n),
s = as.integer(s),
k = as.integer(k),
m = as.integer(m),
x = double(k*m))
matrix(aux$x, nrow=k, ncol=m)
}
testTime <- function()
{
.C(C_testTime, x=double(1))$x
}
|
/scratch/gouwar.j/cran-all/cranData/CORElearn/R/testCore.R
|
prepare.Data <- function(data, formulaIn, dependent, class.lev=NULL, numericAsOrdered=FALSE, orderedAsNumeric=TRUE, skipNAcolumn=TRUE, skipEqualColumn=TRUE)
{
if (dependent) { # shall we fill in the column with the dependent data
if (inherits(data[[1]],"ordered")) {
data[[1]] <- factor(data[[1]],ordered=FALSE,levels=levels(data[[1]])); # protect against transformation to numeric
cat("Changing dependent variable to unordered factor.\n");
}
if (length(data[[1]][is.na(data[[1]])])>0)
warning("Instances with dependent variable equal to NA are left out.")
data <- data[!is.na(data[[1]]),]
}
else {
if (is.null(class.lev)) {# regression
predictionColumn <- double(length=nrow(data))
predictionColumn[] <- NA
data <- cbind(prediction=predictionColumn,data)
}
else {
predictionColumn <- double(length=nrow(data))
predictionColumn[] <- NA
data <- cbind(prediction=factor(predictionColumn,levels=class.lev),data);
}
}
col.names <- names(data)
discnumvalues <- integer(0);
disccharvalues <- c();
discValues <- list()
discdata <- matrix(nrow=nrow(data),ncol=0);
numdata <- matrix(nrow=nrow(data),ncol=0);
discmap <- integer(0);
nummap <- integer(0);
skipmap <- integer(0)
for (i in seq(along=data)) {
# check validity of columns
if (all(is.na(data[[i]]))) { #
if (i > 1) {
if (skipNAcolumn) {
skipmap <- c(skipmap,i)
warning(sprintf("Variable %s has all values equal to NA and has been skipped.",names(data)[i]))
formulaIn <- update.formula(formulaIn, paste(". ~ . - ",names(data)[i],sep=""))
next
}
else {
if (dependent)
warning(sprintf("Variable %s has all values equal to NA.",names(data)[i]))
}
}
}
else {
sc <- sort(data[[i]],na.last=NA)
if (nrow(data) > 1 && (length(sc) <= 1 || sc[1]==sc[length(sc)]) ) { # all equal
if (i>1) {
if (skipEqualColumn) {
skipmap <- c(skipmap,i)
warning(sprintf("Variable %s has all values equal and has beeen skipped.",names(data)[i]))
formulaIn <- update.formula(formulaIn, paste(". ~ . - ",names(data)[i],sep=""))
next
}
else {
if (dependent)
warning(sprintf("Variable %s has all values equal.",names(data)[i]))
}
}
else warning(sprintf("Dependent variable %s has all values equal.",names(data)[i]))
}
}
# this transformation is needed for ordEval algorithm
if (inherits(data[[i]],"character") || (!inherits(data[[i]],"factor") && numericAsOrdered==TRUE)) {
data[[i]] <- factor(data[[i]]);
}
if (inherits(data[[i]],"factor") &&
(numericAsOrdered==TRUE || !inherits(data[[i]],"ordered") || !orderedAsNumeric)) {
column <- as.integer(data[[i]]);
column[is.na(column)] <- as.integer(0);
column <- matrix(column,ncol=1,dimnames=list(NULL,col.names[i]))
discnumvalues <- c(discnumvalues,length(levels(data[[i]])));
disccharvalues <- c(disccharvalues,paste(levels(data[[i]]),collapse="\x1F"));
discdata <- cbind(discdata,column);
discmap <- c(discmap,i);
discValues[[length(discmap)]] <- levels(data[[i]])
} else {
column <- matrix(as.double(data[[i]]),ncol=1,dimnames=list(NULL,col.names[i]))
column[is.nan(column)] <- NA
column[is.infinite(column) & column > 0] <- .Machine$double.xmax
column[is.infinite(column) & column < 0] <- - .Machine$double.xmax
numdata <- cbind(numdata,column);
nummap <- c(nummap,i);
}
}
numdata[is.na(numdata)] <- as.double(NA)
if (length(discnumvalues) != ncol(discdata)) stop("internal problem 1 in prepare dataa"); # for debugging only
if (length(discmap) != ncol(discdata)) stop("internal problem 2 in prepare data"); # for debugging only
if (length(nummap) != ncol(numdata)) stop("internal problem 3 in prepare data"); # for debugging only
if (nrow(data) != nrow(numdata)) stop("internal problem 4 in prepare data"); # for debugging only
if (nrow(data) != nrow(discdata)) stop("internal problem 5 in prepare data"); # for debugging only
#if (ncol(data) != ncol(discdata) + ncol(numdata)) stop("internal problem 4 in prepare data"); # for debugging only
list(discnumvalues=discnumvalues,disccharvalues=disccharvalues,discdata=discdata,discmap=discmap,
numdata=numdata,nummap=nummap,discValues=discValues, noInst=nrow(data),skipmap=skipmap,
formulaOut=formulaIn);
}
get.formula <- function(class.name)
{
as.formula(paste(class.name,"~ ."));
}
infoCore<-function(what=c("attrEval","attrEvalReg")) {
what <- match.arg(what)
switch (what,
attrEval =
c("ReliefFequalK", "ReliefFexpRank", "ReliefFbestK", "Relief", "InfGain", "GainRatio", "MDL", "Gini",
"MyopicReliefF", "Accuracy", "ReliefFmerit", "ReliefFdistance", "ReliefFsqrDistance",
"DKM", "ReliefFexpC", "ReliefFavgC", "ReliefFpe", "ReliefFpa", "ReliefFsmp", "GainRatioCost", "DKMcost",
"ReliefKukar", "MDLsmp","ImpurityEuclid", "ImpurityHellinger",
"UniformDKM","UniformGini","UniformInf","UniformAccuracy",
"EqualDKM", "EqualGini","EqualInf", "EqualHellinger","DistHellinger","DistAUC", "DistAngle", "DistEuclid"
),
attrEvalReg = c("RReliefFequalK", "RReliefFexpRank", "RReliefFbestK","RReliefFwithMSE","MSEofMean","MSEofModel","MAEofModel",
"RReliefFdistance","RReliefFsqrDistance")
)
}
prepare.Options <- function(...)
{
scp <- getOption("scipen") #store the value
options(scipen=20) # change option
optionsList <- list(...);
for (i in seq(along=optionsList)) {
if (is.logical(optionsList[[i]])) {
optionsList[[i]] <- if (optionsList[[i]]) "Y" else "N";
} else if (is.numeric(optionsList[[i]])) {
optionsList[[i]] <- as.character(optionsList[[i]]);
} else if (!is.character(optionsList[[i]])) {
stop(paste("wrong type of option",names(optionsList)[i],"=",optionsList[[i]]));
}
}
options(scipen=scp) # restore the old value
opt <- unlist(optionsList)
# convert integer values of estimators to their character descriptors
for (est in c("selectionEstimator","constructionEstimator")) {
idx = match(est, names(opt),nomatch=-1)
if (idx > 0) {
warn.save <- getOption("warn")
options(warn=-1)
ival <- as.numeric(opt[idx])
options(warn=warn.save)
if (! is.na(ival) ) {
est = infoCore(what="attrEval")
opt[idx] <- est[ival]
}
}
}
if (is.null(opt)) {
opt <- character(0)
names(opt) <- character(0)
}
opt
}
convert.Options <- function(opt) {
# convert character description of estimators to their character descriptors
for (est in c("selectionEstimator","constructionEstimator")) {
idx = match(est, names(opt),nomatch=-1)
if (idx > 0) {
estIdx = match(opt[idx], infoCore(what="attrEval"), nomatch=-1)
opt[idx] <- estIdx
}
}
for (est in c("selectionEstimatorReg","constructionEstimatorReg")) {
idx = match(est, names(opt),nomatch=-1)
if (idx > 0) {
estIdx = match(opt[idx], infoCore(what="attrEvalReg"), nomatch=-1)
opt[idx] <- estIdx
}
}
opt
}
optionData <- function() {
estDsc <- infoCore(what="attrEval")
estList <- paste(estDsc,collapse=",")
estDscReg <- infoCore(what="attrEvalReg")
estListReg <- paste(estDscReg,collapse=",")
optAllList <- list(
## follows description of all parameters
## the format is list with 5 components, some may be empty "":
## 1. name of parameter
## 2. parameter type: logical, integer, numeric, character
## 3. default value
## 4. lower limit
## 5. upper limit
## Option data follow
## \section{Attribute/feature evaluation}
list("binaryEvaluation", "logical", FALSE, FALSE, TRUE),
list("binaryEvaluateNumericAttributes", "logical", TRUE, FALSE, TRUE),
list("multiclassEvaluation", "integer", 1, 1, 4),
list("attrEvaluationInstances", "integer", 0, 0, Inf),
list("minNodeWeightEst", "numeric", 2, 0, Inf),
list("ReliefIterations", "integer", 0, -2, Inf),
list("numAttrProportionEqual", "numeric", 0.04, 0, 1),
list("numAttrProportionDifferent", "numeric", 0.1, 0, 1),
list("kNearestEqual", "integer", 10, 0, Inf),
list("kNearestExpRank", "integer", 70, 0, Inf),
list("quotientExpRankDistance", "numeric", 20, 0, Inf),
## \section{Algorithm ordEval}
list("ordEvalNoRandomNormalizers", "integer", 0, 0, Inf),
list("ordEvalBootstrapNormalize", "logical", FALSE, FALSE, TRUE),
list("ordEvalNormalizingPercentile", "numeric", 0.025, 0, 0.5),
list("attrWeights", "character", "", "", ""),
## \section{Decision/regression tree construction}
list("selectionEstimator", "character", "MDL", estList, ""),
list("selectionEstimatorReg", "character", "RReliefFexpRank", estListReg, ""),
list("minReliefEstimate", "numeric", 0, -1, 1),
list("minInstanceWeight", "numeric", 0.05, 0, 1),
## \section{Stop tree building}
list("minNodeWeightTree", "numeric", 5, 0, Inf),
list("minNodeWeightRF", "numeric", 2, 0, Inf),
list("relMinNodeWeight", "numeric", 0, 0, 1),
list("majorClassProportion", "numeric", 1, 0, 1),
list("rootStdDevProportion", "numeric", 0, 0, 1),
list("minNonMajorityWeight", "numeric", 2, 0, Inf),
## \section{Models in the tree leaves}
list("modelType", "integer", 1, 1, 4),
list("modelTypeReg", "integer", 5, 1, 8),
list("kInNN", "integer", 10, 0, Inf),
list("nnKernelWidth", "numeric", 2, 0, Inf),
list("bayesDiscretization", "integer", 2, 1, 3),
list("discretizationIntervals", "integer", 4, 1, Inf),
## \section{Constructive induction aka. feature construction}
list("constructionMode", "integer", 15, 1, 15),
list("constructionDepth", "integer", 0, 0, Inf),
list("noCachedInNode", "integer", 5, 0, Inf),
list("constructionEstimator", "character", "MDL", estList, ""),
list("constructionEstimatorReg", "character", "RReliefFexpRank", estListReg, ""),
list("beamSize", "integer", 20, 1, Inf),
list("maxConstructSize", "integer", 3, 1, Inf),
## \section{Attribute discretization and binarization}
list("discretizationLookahead", "integer", 3, 0, Inf),
list("discretizationSample", "integer", 50, 0, Inf),
list("maxValues4Exhaustive", "integer", 7, 2, Inf),
list("maxValues4Exhaustive", "integer", 30, 2, Inf),
## \section{Tree pruning}
list("selectedPruner", "integer", 1, 0, 1),
list("selectedPrunerReg", "integer", 2, 0, 4),
list("mdlModelPrecision", "numeric", 0.1, 0, Inf),
list("mdlErrorPrecision", "numeric", 0.01, 0, Inf),
list("mEstPruning", "numeric", 2, 0, Inf),
list("alphaErrorComplexity", "numeric", 0, 0, Inf),
## \section{Prediction}
list("smoothingType", "integer", 0, 0, 4),
list("smoothingValue", "numeric", 0, 0, Inf),
## \section{Random forests}
list("rfNoTrees", "integer", 100, 1, Inf),
list("rfNoSelAttr", "integer", 0, -2, Inf),
list("rfMultipleEst", "logical", FALSE, FALSE, TRUE),
list("rfkNearestEqual", "integer", 30, 0, Inf),
list("rfPropWeightedTrees", "numeric", 0, 0, 1),
list("rfPredictClass", "logical", FALSE, FALSE, TRUE),
## \section{General tree ensembles}
list("rfSampleProp", "numeric", 0, 0, 1),
list("rfNoTerminals", "integer", 0, 0, Inf),
list("rfRegType", "integer", 2, 0, 2),
list("rfRegLambda", "numeric", 0, 0, Inf),
## \section{Read data directly from files}
list("domainName", "character", "", "", ""),
list("dataDirectory", "character", "", "", ""),
list("NAstring", "character", "?", "", ""),
## \section{Miscellaneous}
list("maxThreads", "integer", 0, 0, Inf)
)
optAll <- data.frame()
optLogical <- data.frame()
optInteger <- data.frame()
optNumeric <- data.frame()
optCharacter <- data.frame()
for (i in seq(along=optAllList)) {
optRow <- optAllList[[i]]
if (optRow[[2]] == "logical") {
optDsc <- list(default=optRow[[3]])
optLogical <- rbind(optLogical,data.frame(optDsc,stringsAsFactors=FALSE))
optIndex <- nrow(optLogical)
} else if (optRow[[2]] == "integer") {
optDsc <- list(default=optRow[[3]],lower=optRow[[4]],upper=optRow[[5]])
optInteger <- rbind(optInteger,data.frame(optDsc,stringsAsFactors=FALSE))
optIndex <- nrow(optInteger)
} else if (optRow[[2]] == "numeric") {
optDsc <- list(default=optRow[[3]],lower=optRow[[4]],upper=optRow[[5]])
optNumeric <- rbind(optNumeric,data.frame(optDsc,stringsAsFactors=FALSE))
optIndex <- nrow(optNumeric)
} else if (optRow[[2]]=="character") {
if (optRow[[4]] != "" && length(grep(",",optRow[[4]]))==0) {
warning(paste("Character option",optRow[[1]],"with a single value",optRow[[4]]))
}
optDsc <- list(default=optRow[[3]],type=optRow[[4]])
optCharacter <- rbind(optCharacter,data.frame(optDsc,stringsAsFactors=FALSE))
optIndex <- nrow(optCharacter)
} else {
warning(paste("Unknown type of option",optRow[[2]]))
}
optDsc <- list(name=optRow[[1]],type=optRow[[2]],index=optIndex);
optAll <- rbind(optAll,data.frame(optDsc,stringsAsFactors=FALSE))
}
list(All=optAll,Logical=optLogical,Integer=optInteger,Numeric=optNumeric,Character=optCharacter)
}
# prepare the structure in advance for speed
optData <- optionData()
getStatNames <- function() {
return(c("median", "Q1", "Q3", "lowPercentile", "highPercentile", "mean", "stdDev", "p-value","exp"))
}
checkOptionsValues <- function(options) {
optNames <- names(options)
occurs <- rep(FALSE,times=nrow(optData$All))
for (i in seq(along=options)) {
j <- match(optNames[i], optData$All$name, nomatch=-1)
if (j == -1) {
warning(paste("unrecognised option",optNames[i]),call.=FALSE)
}
else if (occurs[j]) {
warning(paste("option",optNames[i],"used more than once"),call.=FALSE)
}
else {
occurs[j] <- TRUE
if (optData$All$type[j] == "numeric") {
warn.save <- getOption("warn")
options(warn=-1)
nval <- as.numeric(options[i])
options(warn=warn.save)
if (is.na(nval)) {
warning(paste("option",optNames[i],"should be numeric"),call.=FALSE)
}
else {
k <- optData$All$index[j];
lower <- optData$Numeric$lower[k]
upper <- optData$Numeric$upper[k]
if (nval < lower || nval > upper) {
warning(sprintf("option %s should be in [%f, %f]", optNames[i], lower, upper), call.=FALSE)
}
}
}
else if (optData$All$type[j] == "logical") {
cval <- toupper(as.character(options[i]))
if ( ! cval %in% c("Y","N")) {
warning(paste("option",optNames[i],"should be TRUE, \"Y\", FALSE or \"N\""),call.=FALSE)
}
}
else if (optData$All$type[j] == "integer") {
warn.save <- getOption("warn")
options(warn=-1)
ival <- as.numeric(options[i])
options(warn=warn.save)
if (is.na(ival) || ival != trunc(ival)) {
warning(paste("option",optNames[i],"should be integer"),call.=FALSE)
}
else {
k <- optData$All$index[j];
lower <- optData$Integer$lower[k]
upper <- optData$Integer$upper[k]
if (ival < lower || ival > upper) {
warning(sprintf("option %s should be in [%d, %d]", optNames[i], lower, upper), call.=FALSE)
}
}
}
else if (optData$All$type[j] == "character") {
k <- optData$All$index[j];
if (optData$Character$type[k] != "") {
values <- strsplit(optData$Character$type[k],",")[[1]];
if ( ! options[i] %in% values) {
warning(paste("option",optNames[i],"should be one of:",optData$Character$type[k]),call.=FALSE)
}
}
}
}
}
}
checkEstimatorOptions <- function(estimator, options, isRegression) {
errMsg <- NULL ;
optNames <- names(options);
estDsc <- infoCore(what="attrEval");
estDscReg <- infoCore(what="attrEvalReg");
if (isRegression)
estimator <- match.arg(estimator, estDscReg)
else
estimator <- match.arg(estimator, estDsc);
## options allowed for all estimators,
commonOpts <- c("attrEvaluationInstances","minNodeWeightEst","binaryEvaluation","binaryEvaluateNumericAttributes","maxThreads")
for (i in 1:length(commonOpts)) {
idx <- match(commonOpts[i], optNames, nomatch=-1);
if (idx >0)
optNames = optNames[-idx];
}
# options controlling extension of two-class estimators to multiclass
twoClassOpts <- c("multiclassEvaluation")
for (i in 1:length(twoClassOpts)) {
idx <- match(twoClassOpts[i], optNames, nomatch=-1);
if (idx >0)
optNames = optNames[-idx];
}
## options allowed for Relief and its derivatives
ReliefEst <- c(estDsc[grep("^Relief", estDsc)], estDscReg[grep("^RRelief", estDscReg)])
if (estimator %in% ReliefEst) {
ReliefOpts = c("ReliefIterations","numAttrProportionEqual","numAttrProportionDifferent")
for (i in 1:length(ReliefOpts)) {
idx <- match(ReliefOpts[i], optNames, nomatch=-1);
if (idx >0)
optNames = optNames[-idx];
}
## more checks to follow
kNearEqualEst <- c("ReliefFequalK", "RReliefFequalK") ;
expRankEst <- c("ReliefFexpRank","ReliefFdistance","ReliefFsqrDistance","ReliefFexpC","ReliefFavgC",
"ReliefFpe","ReliefFpa","ReliefFsmp",
"RReliefFexpRank", "RReliefFwithMSE", "RReliefFdistance","RReliefFsqrDistance"
);
if (estimator %in% kNearEqualEst) {
kNearEqualOpts <- c("kNearestEqual");
for (i in 1:length(kNearEqualOpts)) {
idx <- match(kNearEqualOpts[i], optNames, nomatch=-1);
if (idx >0)
optNames <- optNames[-idx];
}
}
else if (estimator %in% expRankEst) {
expRankOpts <- c("kNearestExpRank","quotientExpRankDistance")
for (i in 1:length(expRankOpts)) {
idx <- match(expRankOpts[i], optNames, nomatch=-1);
if (idx >0)
optNames <- optNames[-idx];
}
}
}
options[optNames]
}
checkModelOptions <- function(model, options) {
optNames <- names(options);
## first check options by models, later handle special cases
discretizationOpts <- c("selectionEstimator","discretizationLookahead","discretizationSample","maxValues4Exhaustive","maxValues4Greedy")
discretizationOptsReg <- c("selectionEstimatorReg","discretizationLookahead","discretizationSample","maxValues4Exhaustive","maxValues4Greedy") # currently not used
bayesOpts <- c(discretizationOpts,"bayesDiscretization","discretizationIntervals")
knnOpts <- c("kInNN")
knnKernelOpts <- c("kInNN","nnKernelWidth")
miscOpts <-c("maxThreads")
treeModelOpts<-c("modelType",bayesOpts,knnOpts,knnKernelOpts,miscOpts)
treeModelOptsReg<-c("modelTypeReg",knnKernelOpts,miscOpts)
treeStopOpts <- c("minNodeWeightTree","minNodeWeightRF","relMinNodeWeight","majorClassProportion","minInstanceWeight","minNonMajorityWeight")
treeStopOptsReg <- c("minNodeWeightTree","minNodeWeightRF","relMinNodeWeight","minInstanceWeight","rootStdDevProportion")
treePruneOpts <- c("selectedPruner","mEstPruning","mdlModelPrecision","mdlErrorPrecision")
treePruneOptsReg <- c("selectedPrunerReg","mEstPruning","mdlModelPrecision","mdlErrorPrecision")
treeConstructOpts <- c("constructionEstimator","constructionMode","constructionDepth","beamSize","maxConstructSize","noCachedInNode")
treeConstructOptsReg <- c("constructionEstimatorReg","constructionMode","constructionDepth","beamSize","maxConstructSize","noCachedInNode")
treeOpts <- unique(c("selectionEstimator",treeModelOpts,treeStopOpts,treePruneOpts,treeConstructOpts))
rfOpts <- unique(c("selectionEstimator",treeStopOpts,discretizationOpts,miscOpts,"rfNoTrees", "rfNoSelAttr","rfMultipleEst","rfPropWeightedTrees","rfPredictClass","rfSampleProp","rfNoTerminals","rfRegType","rfRegLambda"))
rfNearOpts <- c(rfOpts,"rfkNearestEqual")
regOpts <- unique(c("selectionEstimatorReg",treeModelOptsReg,treeStopOptsReg,treePruneOptsReg,treeConstructOptsReg))
opts = switch(model, rf=rfOpts, rfNear=rfNearOpts, bayes=bayesOpts, knn=knnOpts, knnKernel=knnKernelOpts, tree=treeOpts, regTree=regOpts)
for (i in 1:length(opts)) {
idx <- match(opts[i], optNames, nomatch=-1);
if (idx >0)
optNames = optNames[-idx];
}
if (model %in% c("rf","rfNear","tree")) {
selEstIdx = match("selectionEstimator",names(options),nomatch=-1)
if (selEstIdx==-1)
selEst = optDefault("selectionEstimator")
else
selEst = options[selEstIdx]
optRemain <- checkEstimatorOptions(selEst, options[optNames], FALSE) ;
}
else if (model == "regTree") {
selEstIdx = match("selectionEstimatorReg",names(options),nomatch=-1)
if (selEstIdx==-1)
selEst = optDefault("selectionEstimatorReg")
else
selEst = options[selEstIdx]
optRemain <- checkEstimatorOptions(selEst, options[optNames], TRUE) ;
}
else {
optRemain <- options[optNames]
}
optRemain
}
checkPredictOptions <- function(model, options) {
optNames <- names(options);
## first check options by models, later handle special cases
miscOpts <-c("maxThreads")
bayesOpts <- c()
knnOpts <- c("kInNN")
knnKernelOpts <- c("kInNN","nnKernelWidth")
smoothingOpts <- c("smoothingType","smoothingValue")
treeOpts <- c(knnKernelOpts,smoothingOpts,miscOpts)
rfOpts <- c("rfPredictClass",smoothingOpts,miscOpts)
rfNearOpts <- c(rfOpts,"rfkNearestEqual")
regOpts <- treeOpts
opts = switch(model$model, rf=rfOpts, rfNear=rfNearOpts, bayes=bayesOpts, knn=knnOpts, knnKernel=knnKernelOpts, tree=treeOpts, regTree=regOpts)
for (i in seq(along=opts)) {
idx <- match(opts[i], optNames, nomatch=-1);
if (idx >0)
optNames = optNames[-idx];
}
options[optNames] ;
}
checkOrdEvalOptions <- function(options) {
optNames <- names(options);
miscOpts <-c("maxThreads")
ordEvalOpts <- c(miscOpts,"ordEvalNoRandomNormalizers","ordEvalBootstrapNormalize","ordEvalNormalizingPercentile","attrWeights") #,"ordEvalConfidenceInterval")
opts = ordEvalOpts
for (i in 1:length(opts)) {
idx <- match(opts[i], optNames, nomatch=-1);
if (idx >0)
optNames = optNames[-idx];
}
optRemain <- checkEstimatorOptions("ReliefFequalK", options[optNames], FALSE) ;
optRemain
}
checkDataOptions <- function(options) {
optNames <- names(options);
opts <- c("domainName","dataDirectory","NAstring")
for (i in 1:length(opts)) {
idx <- match(opts[i], optNames, nomatch=-1);
if (idx >0)
optNames = optNames[-idx];
}
options[optNames]
}
optDefault <- function(optName) {
optdat <- optData$All
allIdx = match(optName, optdat[,"name"])
type = optdat[allIdx,"type"]
tabIdx = optdat[allIdx,"index"]
switch (type, integer=optData$Integer[tabIdx,"default"],numeric=optData$Numeric[tabIdx,"default"],
logical=optData$Logical[tabIdx,"default"],character=optData$Character[tabIdx,"default"])
}
preparePlot<-function(fileName="Rplot", ...)
{
interactive=FALSE
fileType = unlist(strsplit(fileName,"\\."))
if (is.na(fileType[2]))
interactive = TRUE
else if (tolower(fileType[2])=="pdf")
pdf(file = fileName, paper="default", ...)
else if (tolower(fileType[2])=="ps" || tolower(fileType[2])=="eps")
postscript(file = fileName, paper="default", horizontal=FALSE, encoding="ISOLatin1.enc",...)
else if (tolower(fileType[2])=="emf" && .Platform$OS.type == "windows")
eval(call(paste("win","metafile",sep="."), filename=quote(fileName),quote(...)))
# next line would be better than previous but generates an unjustified note about violation of CRAN policy
# win.metafile(filename = fileName,...)
else if (tolower(fileType[2])=="jpg")
jpeg(filename = fileName, ...)
else if (tolower(fileType[2])=="tif")
tiff(filename = fileName, ...)
else if (tolower(fileType[2])=="bmp")
bmp(filename = fileName, ...)
else if (tolower(fileType[2])=="png")
png(filename = fileName, ...)
else if (tolower(fileType[2])=="tiff")
bitmap(file = fileName, type="tiff24nc", ...)
else interactive = TRUE
if(interactive && dev.cur() == 1) # opens the default screen device on this platform if no device is open
dev.new()
invisible()
}
# construct named inetrvals from the discretization bounds
intervalNames<-function(sortedBounds, noDecimalsInValueName=2) {
nms <- c(paste("<=", sprintf("%.*f",noDecimalsInValueName, sortedBounds[1]), sep=""))
i <- 2
while (i<=length(sortedBounds)) {
# nms <- c(nms, paste(">", sortedBounds[i-1], ", <=", sortedBounds[i], sep=""))
nms <- c(nms, paste("(", sprintf("%.*f",noDecimalsInValueName,sortedBounds[i-1]), ", ",sprintf("%.*f",noDecimalsInValueName,sortedBounds[i]), "]",sep=""))
i <- i+1
}
nms <- c(nms, paste(">", sprintf("%.*f",noDecimalsInValueName,sortedBounds[length(sortedBounds)]), sep=""))
nms
}
# finds middle points of discretization intervals
intervalMidPoint <- function(data, boundsList, midPointMethod=c("equalFrequency", "equalWidth")) {
method <- match.arg(midPointMethod)
if (is.null(boundsList))
return(NULL)
midPoints <- list()
for (i in 1:length(boundsList)) {
attrName <- names(boundsList)[i]
discValues <- apply( outer(data[,attrName], boundsList[[i]], ">"), 1, sum)
midPoints[[i]] <- vector(mode="numeric", length=length(boundsList[[i]])+1)
for (j in 0:length(boundsList[[i]])) {
values <- data[discValues==j,attrName]
if (method == "equalFrequency")
midPoints[[i]][j+1] <- median(values)
else if (method=="equalWidth")
midPoints[[i]][j+1] <- (min(values) + max(values)) / 2.0
}
}
names(midPoints) <- names(boundsList)
midPoints
}
reliabilityPlot<-function(probScore, trueProb, titleText="", boxing="equipotent", noBins=10, classValue = 1, printWeight=FALSE) {
# depending on boxing, select thresholds
boxing=match.arg(boxing,c("unique","equidistant","equipotent"))
if (boxing == "unique") {
uniqueScore = unique(probScore)
threshold = uniqueScore[order(uniqueScore)]
noBins = length(threshold)
}
else if (boxing=="equidistant"){
threshold=seq(min(probScore),max(probScore),length.out=noBins)[-1]
}
else if(boxing=="equipotent") {
idxs<-as.integer(seq(1,length(probScore),length.out=noBins))[-1]
idxs[length(idxs)]<-length(probScore)
threshold<-sort(probScore)[idxs]
}
# prepare classValue and trueProb to be of proper types
if (is.factor(classValue))
classValue <- as.numeric(classValue)
if (is.factor(trueProb))
trueProb <- as.numeric(as.numeric(trueProb)==classValue)
if (is.factor(probScore))
probScore <- as.numeric(as.numeric(probScore)==classValue)
# sort probability score and do likewise with true probabilities
sortedScoreIdx = order(probScore)
scoreSorted = probScore[sortedScoreIdx]
trueProbSorted= trueProb[sortedScoreIdx]
brier = 0.0
values = rep(0, length(threshold))
count = rep(0, length(threshold))
classCount = rep(0, length(threshold))
tIdx = 1
for (i in 1:length(scoreSorted)) {
if (scoreSorted[i] <= threshold[tIdx]) {
count[tIdx] = count[tIdx] + 1
}
else {
tIdx = tIdx+1
count[tIdx] = 1
}
if (trueProbSorted[i] == classValue)
classCount[tIdx] = classCount[tIdx] + 1
brier = brier + (trueProbSorted[i] - scoreSorted[i])^2
}
values = classCount/count
brier = brier / length(scoreSorted)
xlim = c(0,1)
ylim = c(0,1)
plot(xlim, ylim, type = "l", xlab="predicted probability", ylab="true probability" )
points(threshold, values, type="p", pch=20)
if (printWeight)
text(threshold, values, labels = count, pos = 3, offset = 0.2, cex = 0.7)
#subText = sprintf("Brier = %.4g", brier)
subText = ""
title(main=titleText, sub=subText)
}
#generate k-fold cross validation of n instances
cvGen<-function(n, k) {
v <- 1:k
vec <- array(1:k,dim=n)
sample(vec, size=n)
}
# generate stratified k-fold cross validation partition based on classes in classVal
cvGenStratified<-function(classVal,k) {
classVal<-factor(classVal)
levs = factor(levels(classVal), levels=levels(classVal))
classFreq <- table(classVal)
noClasses <- length(levs)
n <- length(classVal)
srt <- order(classVal)
vec <- array(1:k,dim=n)
cv <- array(0,dim=n)
cv[srt]<-vec
for (i in 1:noClasses)
cv[classVal==levs[i]] <- sample(cv[classVal==levs[i]], size=classFreq[i], replace=F)
cv
}
# collect instances with the same position in different sublists of lst
gatherFromList<-function(lst){
m <-list()
for (j in 1:length(lst[[1]])) {
m[[j]]<-vector(mode="numeric",length=length(lst))
names(m[[j]]) <- names(lst)
}
names(m)<-names(lst[[1]])
for (i in 1:length(lst)){
for (j in 1:length(lst[[i]])){
if (is.null(dim(lst[[i]][[j]])))
m[[j]][i] <- lst[[i]][[j]]
}
}
m
}
|
/scratch/gouwar.j/cran-all/cranData/CORElearn/R/util.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.