content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' @useDynLib BLPestimatoR #' @importFrom Rcpp sourceCpp NULL #' Calculates derivatives of all shares with respect to all non-linear parameters in a given market. #' #' @param blp_data data object created by the function \code{BLP_data}, #' @param par_theta2 matrix with column and rownames providing a starting value for the optimization routine (see details), #' @param market character specifying the market in which derivatives are calculated, #' @param printLevel level of output information (default = 1) #' #' @return Returns a numeric matrix with derivatives. #' Cell in row i and col j is the derivative of share i with respect to parameter j. #' #' @details NA's in \code{par_theta2} entries indicate the exclusion from estimation, i.e. the coefficient is assumed to be zero. #' If only unobserved heterogeneity is used (no demographics), the column name of \code{par_theta2} must be "unobs_sd". #' With demographics the colnames must match the names of provided demographics (as in \code{demographic_draws}) and "unobs_sd". #' Row names of \code{par_theta2} must match random coefficients as specified in \code{model}. Constants must be named "(Intercept)". #' #' @examples #' K<-2 #number of random coefficients #' data <- simulate_BLP_dataset(nmkt = 25, nbrn = 20, #' Xlin = c("price", "x1", "x2", "x3", "x4", "x5"), #' Xexo = c("x1", "x2", "x3", "x4", "x5"), #' Xrandom = paste0("x",1:K),instruments = paste0("iv",1:10), #' true.parameters = list(Xlin.true.except.price = rep(0.2,5), #' Xlin.true.price = -0.2, #' Xrandom.true = rep(2,K), #' instrument.effects = rep(2,10), #' instrument.Xexo.effects = rep(1,5)), #' price.endogeneity = list( mean.xi = -2, #' mean.eita = 0, #' cov = cbind( c(1,0.7), c(0.7,1))), #' printlevel = 0, seed = 234234 ) #' #' #' model <- as.formula("shares ~ price + x1 + x2 + x3 + x4 + x5 | #' x1 + x2 + x3 + x4 + x5 | #' 0+ x1 + x2 | #' iv1 + iv2 + iv3 + iv4 + iv5 + iv6 + iv7 + iv8 +iv9 +iv10" ) #' #' blp_data <- BLP_data(model = model, market_identifier="cdid", #' product_id = "prod_id", #' productData = data, #' integration_method = "MLHS" , #' integration_accuracy = 40, #' integration_seed = 1) #' #' theta2 <- matrix(c(0.5,2), nrow=2) #' rownames(theta2) <- c("x1","x2") #' colnames(theta2) <- "unobs_sd" #' #' derivatives1 <- dstdtheta_wrap( blp_data=blp_data, #' par_theta2 = theta2, #' market = 2) #' @importFrom methods is #' #' @export dstdtheta_wrap <- function( blp_data, par_theta2, market, printLevel = 1 ){ nobs <- blp_data$parameters$nobs K <- blp_data$parameters$K original_market_id <- blp_data$parameters$market_id_char_in original_product_id <- blp_data$parameters$product_id market <- as.character(market) ## check inouts if( !is(blp_data,"blp_data")) stop("Input has wrong class. Call BLP_data() first.") if( printLevel > 0) cat("Mean utility (delta) is used as provided in the BLP_data() function.") if( missing(market)) stop("Specify a valid market.") if( (length(market) != 1) ) stop("Only one market can be specified.") if( !any(market %in% original_market_id) ) stop("Market is not available in provided dataset.") ## calculate share evaluations (all markets) current_delta <- blp_data$data$delta start_theta2 <- .prepare_theta2(par_theta2, final_col_names_par = c( "unobs_sd" , blp_data$parameters$demographic_names), final_row_names_par = colnames(blp_data$data$X_rand), K = blp_data$parameters$K, M = blp_data$parameters$total_demogr) theta2Mat<- .get.theta2.reshape(theta2.in = start_theta2$par_theta2, totalRC = blp_data$parameters$K, total.demogr.in = blp_data$parameters$total_demogr, indices.in = start_theta2$indices, fill = 0 ) # NA are replaced by zeros to simplify x * par in getExpMu expmu <- getExpMu( theta2Matrix = theta2Mat, qv = blp_data$integration$drawsRcMktShape, Xrandom = blp_data$data$X_rand, cdid = blp_data$parameters$market_id, demographics = blp_data$integration$drawsDemMktShape) sij <- getSij(expmu = expmu, expdelta = exp(current_delta), cdindex = blp_data$parameters$cdindex ) ## extract market information indicator_prod <- which( market == original_market_id) indicator_mkt <- which( market == unique(original_market_id)) X_rand_mkt <- blp_data$data$X_rand[indicator_prod,,drop=FALSE] drawsRcMktShape_mkt <- blp_data$integration$drawsRcMktShape[indicator_mkt,,drop=FALSE] if(blp_data$parameters$total_demogr > 0){ drawsDemMktShape_mkt <- blp_data$integration$drawsDemMktShape[indicator_mkt,,drop=FALSE] }else{ drawsDemMktShape_mkt<- matrix(NA) } sij_mkt <- sij[indicator_prod,,drop=FALSE] ## calculate dstdtheta_c in the given market out <- dstdtheta_c( sijt_arma = sij_mkt, indices = start_theta2$indices, xt_arma = blp_data$data$X_rand[indicator_prod,,drop=FALSE], qvt_arma = drawsRcMktShape_mkt, dt_arma = drawsDemMktShape_mkt, weights_arma = blp_data$integration$weights ) ## preparing output rownames(out) <- paste0("share_" ,original_product_id[indicator_prod]) names_par <- kronecker( start_theta2$final_col_names_par , start_theta2$final_row_names_par, paste, sep="*") relevantRcDem_index <- start_theta2$indices[,"row"] + max( start_theta2$indices[,"row"] ) * ( start_theta2$indices[,"col"] - 1 ) colnames(out) <- names_par[relevantRcDem_index] return(out) } #' Calculates derivatives of all shares with respect to all mean utilities in a given market. #' #' @param blp_data data object created by the function \code{BLP_data}, #' @param par_theta2 matrix with column and rownames providing a starting value for the optimization routine (see details), #' @param market character specifying the market in which derivatives are calculated, #' @param printLevel level of output information (default = 1) #' #' @return Returns a numeric matrix with derivatives. #' Cell in row i and col j is the derivative of share i with respect to mean utility j. #' #' @details NA's in \code{par_theta2} entries indicate the exclusion from estimation, i.e. the coefficient is assumed to be zero. #' If only unobserved heterogeneity is used (no demographics), the column name of \code{par_theta2} must be "unobs_sd". #' With demographics the colnames must match the names of provided demographics (as in \code{demographic_draws}) and "unobs_sd". #' Row names of \code{par_theta2} must match random coefficients as specified in \code{model}. Constants must be named "(Intercept)". #' #' @examples #' K<-2 #number of random coefficients #' data <- simulate_BLP_dataset(nmkt = 25, nbrn = 20, #' Xlin = c("price", "x1", "x2", "x3", "x4", "x5"), #' Xexo = c("x1", "x2", "x3", "x4", "x5"), #' Xrandom = paste0("x",1:K),instruments = paste0("iv",1:10), #' true.parameters = list(Xlin.true.except.price = rep(0.2,5), #' Xlin.true.price = -0.2, #' Xrandom.true = rep(2,K), #' instrument.effects = rep(2,10), #' instrument.Xexo.effects = rep(1,5)), #' price.endogeneity = list( mean.xi = -2, #' mean.eita = 0, #' cov = cbind( c(1,0.7), c(0.7,1))), #' printlevel = 0, seed = 234234 ) #' #' #' model <- as.formula("shares ~ price + x1 + x2 + x3 + x4 + x5 | #' x1 + x2 + x3 + x4 + x5 | #' 0+ x1 + x2 | #' iv1 + iv2 + iv3 + iv4 + iv5 + iv6 + iv7 + iv8 +iv9 +iv10" ) #' #' blp_data <- BLP_data(model = model, market_identifier="cdid", #' product_id = "prod_id", #' productData = data, #' integration_method = "MLHS" , #' integration_accuracy = 40, #' integration_seed = 1) #' #' theta2 <- matrix(c(0.5,2), nrow=2) #' rownames(theta2) <- c("x1","x2") #' colnames(theta2) <- "unobs_sd" #' #' derivatives2 <- dstddelta_wrap( blp_data=blp_data, #' par_theta2 = theta2, #' market = 2) #' @importFrom methods is #' #' @export dstddelta_wrap <- function( blp_data, par_theta2, market, printLevel = 1 ){ nobs <- blp_data$parameters$nobs K <- blp_data$parameters$K original_market_id <- blp_data$parameters$market_id_char_in original_product_id <- blp_data$parameters$product_id market <- as.character(market) ## check inouts if( !is(blp_data,"blp_data")) stop("Input has wrong class. Call BLP_data() first.") if( printLevel > 0) cat("Mean utility (delta) is used as provided in the BLP_data() function.") if( missing(market)) stop("Specify a valid market.") if( (length(market) != 1) ) stop("Only one market can be specified.") if( !any(market %in% original_market_id) ) stop("Market is not available in provided dataset.") ## calculate share evaluations (all markets) current_delta <- blp_data$data$delta start_theta2 <- .prepare_theta2(par_theta2, final_col_names_par = c( "unobs_sd" , blp_data$parameters$demographic_names), final_row_names_par = colnames(blp_data$data$X_rand), K = blp_data$parameters$K, M = blp_data$parameters$total_demogr) theta2Mat<- .get.theta2.reshape(theta2.in = start_theta2$par_theta2, totalRC = blp_data$parameters$K, total.demogr.in = blp_data$parameters$total_demogr, indices.in = start_theta2$indices, fill = 0 ) # NA are replaced by zeros to simplify x * par in getExpMu expmu <- getExpMu( theta2Matrix = theta2Mat, qv = blp_data$integration$drawsRcMktShape, Xrandom = blp_data$data$X_rand, cdid = blp_data$parameters$market_id, demographics = blp_data$integration$drawsDemMktShape) sij <- getSij(expmu = expmu, expdelta = exp(current_delta), cdindex = blp_data$parameters$cdindex ) ## extract market information indicator_prod <- which( market == original_market_id) indicator_mkt <- which( market == unique(original_market_id)) X_rand_mkt <- blp_data$data$X_rand[indicator_prod,,drop=FALSE] drawsRcMktShape_mkt <- blp_data$integration$drawsRcMktShape[indicator_mkt,,drop=FALSE] if(blp_data$parameters$total_demogr > 0){ drawsDemMktShape_mkt <- blp_data$integration$drawsDemMktShape[indicator_mkt,,drop=FALSE] }else{ drawsDemMktShape_mkt<- matrix(NA) } sij_mkt <- sij[indicator_prod,,drop=FALSE] ## calculate dstdtheta_c in the given market out <- dstddelta_c( sijt = sij_mkt, weights= blp_data$integration$weights ) colnames(out) <- paste0("meanUtility_" ,original_product_id[indicator_prod]) rownames(out) <- paste0("share_" ,original_product_id[indicator_prod]) return(out) } #' Calculating the GMM objective for a given set of non-linear parameters. #' #' @param blp_data data object created by the function \code{BLP_data}, #' @param par_theta2 matrix with column and rownames providing a starting value for the optimization routine (see details), #' @param printLevel level of output information ranges from 1 (no GMM results) to 4 (every norm in the contraction mapping) #' #' @return Returns a list with results from the GMM evaluation. #' \describe{ #' \item{\code{local_min}}{GMM point evaluation} #' \item{\code{gradient}}{GMM derivative with respect to non-linear parameters} #' \item{\code{delta}}{result of the contraction mapping} #' \item{\code{xi}}{residuals of GMM evaluation} } #' #' @details NA's in \code{par_theta2} entries indicate the exclusion from estimation, i.e. the coefficient is assumed to be zero. #' If only unobserved heterogeneity is used (no demographics), the column name of \code{par_theta2} must be "unobs_sd". #' With demographics the colnames must match the names of provided demographics (as in \code{demographic_draws}) and "unobs_sd". #' Row names of \code{par_theta2} must match random coefficients as specified in \code{model}. Constants must be named "(Intercept)". #' #' @examples #' K<-2 #number of random coefficients #' data <- simulate_BLP_dataset(nmkt = 25, nbrn = 20, #' Xlin = c("price", "x1", "x2", "x3", "x4", "x5"), #' Xexo = c("x1", "x2", "x3", "x4", "x5"), #' Xrandom = paste0("x",1:K),instruments = paste0("iv",1:10), #' true.parameters = list(Xlin.true.except.price = rep(0.2,5), #' Xlin.true.price = -0.2, #' Xrandom.true = rep(2,K), #' instrument.effects = rep(2,10), #' instrument.Xexo.effects = rep(1,5)), #' price.endogeneity = list( mean.xi = -2, #' mean.eita = 0, #' cov = cbind( c(1,0.7), c(0.7,1))), #' printlevel = 0, seed = 234234 ) #' #' #' model <- as.formula("shares ~ price + x1 + x2 + x3 + x4 + x5 | #' x1 + x2 + x3 + x4 + x5 | #' 0+ x1 + x2 | #' iv1 + iv2 + iv3 + iv4 + iv5 + iv6 + iv7 + iv8 +iv9 +iv10" ) #' #' blp_data <- BLP_data(model = model, market_identifier="cdid", #' product_id = "prod_id", #' productData = data, #' integration_method = "MLHS" , #' integration_accuracy = 40, #' integration_seed = 1) #' #' theta_guesses <- matrix(c(0.5,2), nrow=2) #' rownames(theta_guesses) <- c("x1","x2") #' colnames(theta_guesses) <- "unobs_sd" #' #' gmm <- gmm_obj_wrap( blp_data=blp_data, #' par_theta2 = theta_guesses, #' printLevel = 2) #' gmm$local_min #' #' @importFrom methods is #' #' @export gmm_obj_wrap <- function( blp_data, par_theta2, printLevel = 2){ nobs <- blp_data$parameters$nobs K <- blp_data$parameters$K ## BLP_data class if( !is(blp_data,"blp_data")) stop("Input has wrong class. Call BLP_data() first.") ## calc matrices Z <- blp_data$data$Z W <- try( solve((t(Z) %*% Z)) ) if (any(class(W) == "try-error")) stop("Problems with singular matrizes. This might be caused by (nearly) linear dependent regressors or weak instruments.") xzwz <- t(blp_data$data$X_lin) %*% Z %*% W %*% t(Z) xzwzx <- xzwz %*% blp_data$data$X_lin invxzwzx <- try( solve(xzwzx) ) if (any(class(invxzwzx) == "try-error")) stop("Problems with singular matrices. This might be caused by (nearly) linear dependent regressors or weak instruments.") blp_data$data$W <- W blp_data$data$xzwz <- xzwz blp_data$data$invxzwzx <- invxzwzx ## check and prepare par_theta2 start_theta2 <- .prepare_theta2(par_theta2, final_col_names_par = c( "unobs_sd" , blp_data$parameters$demographic_names), final_row_names_par = colnames(blp_data$data$X_rand), K = blp_data$parameters$K, M = blp_data$parameters$total_demogr) # global variables with blp_results: blp_results <- new.env( parent = emptyenv()) blp_results$deltaOld <- blp_data$data$delta blp_results$innerItAll <- c() blp_results$negShares<- FALSE blp_results$gradient <- rep(NA_real_, start_theta2$total_par ) innerItAll_out <- blp_results$innerItAll start <- Sys.time() finalTmp <- gmm_obj(par_theta2 = start_theta2$par_theta2,#### indices=start_theta2$indices, blp_results=blp_results, blp_data=blp_data, printLevel=printLevel) end <- Sys.time() delta_out<- blp_results$deltaOld theta_rc_out <- start_theta2$par_theta2 theta_lin_out <- blp_results$bet sij_out <- blp_results$sij local_min_out <- finalTmp gradient_out <- blp_results$gradient jacob_out <- blp_results$jacobian xi_out <- blp_results$xi if( printLevel > 0) print(local_min_out) out <- list("delta" = delta_out, "theta_rc" = theta_rc_out , "theta_lin" = theta_lin_out , "sij" = sij_out , "local_min" = local_min_out , "gradient" = gradient_out , "jacob" = jacob_out, "xi" = xi_out, "time" = end - start) names(out$delta) <- paste0( blp_data$parameters$product_id , "_" , blp_data$parameters$market_id_char_in ) rownames(out$sij) <- paste0( blp_data$parameters$product_id , "_" , blp_data$parameters$market_id_char_in ) colnames(out$sij) <- paste0("individual_", 1: blp_data$integration$amountDraws) names_par <- kronecker( start_theta2$final_col_names_par , start_theta2$final_row_names_par , paste, sep="*") relevantRcDem_index <- start_theta2$indices[,"row"] + max( start_theta2$indices[,"row"] ) * ( start_theta2$indices[,"col"] - 1 ) rownames(out$gradient) <- names_par[relevantRcDem_index] return( out ) } #' Performs a contration mapping for a given set of non-linear parameters. #' #' @param blp_data data object created by the function \code{BLP_data}, #' @param par_theta2 matrix with column and rownames providing a starting value for the optimization routine (see details), #' @param printLevel level of output information (default = 1) #' #' @return Returns an object of class "blp_cm" with results from the contraction mapping. #' \describe{ #' \item{\code{delta}}{resulting vector of mean utilities after the contraction mapping} #' \item{\code{counter}}{inner iterations needed to convergence} #' \item{\code{sij}}{market share integral evaluations for each product (in rows) for the final mean utility} } #' #' @details NA's in \code{par_theta2} entries indicate the exclusion from estimation, i.e. the coefficient is assumed to be zero. #' If only unobserved heterogeneity is used (no demographics), the column name of \code{par_theta2} must be "unobs_sd". #' With demographics the colnames must match the names of provided demographics (as in \code{demographic_draws}) and "unobs_sd". #' Row names of \code{par_theta2} must match random coefficients as specified in \code{model}. Constants must be named "(Intercept)". #' #' Starting guesses for the contraction mapping are provided with \code{BLP_data}. #' #' @examples #' K<-2 #number of random coefficients #' data <- simulate_BLP_dataset(nmkt = 25, nbrn = 20, #' Xlin = c("price", "x1", "x2", "x3", "x4", "x5"), #' Xexo = c("x1", "x2", "x3", "x4", "x5"), #' Xrandom = paste0("x",1:K),instruments = paste0("iv",1:10), #' true.parameters = list(Xlin.true.except.price = rep(0.2,5), #' Xlin.true.price = -0.2, #' Xrandom.true = rep(2,K), #' instrument.effects = rep(2,10), #' instrument.Xexo.effects = rep(1,5)), #' price.endogeneity = list( mean.xi = -2, #' mean.eita = 0, #' cov = cbind( c(1,0.7), c(0.7,1))), #' printlevel = 0, seed = 234234 ) #' #' #' model <- as.formula("shares ~ price + x1 + x2 + x3 + x4 + x5 | #' x1 + x2 + x3 + x4 + x5 | #' 0+ x1 + x2 | #' iv1 + iv2 + iv3 + iv4 + iv5 + iv6 + iv7 + iv8 +iv9 +iv10" ) #' #' blp_data <- BLP_data(model = model, market_identifier="cdid", #' product_id = "prod_id", #' productData = data, #' integration_method = "MLHS" , #' integration_accuracy = 40, #' integration_seed = 1) #' #' theta_guesses <- matrix(c(0.5,2), nrow=2) #' rownames(theta_guesses) <- c("x1","x2") #' colnames(theta_guesses) <- "unobs_sd" #' #' delta_eval <- getDelta_wrap( blp_data=blp_data, #' par_theta2 = theta_guesses, #' printLevel = 4) #' #' @export getDelta_wrap <- function(blp_data, par_theta2, printLevel = 1){ nobs <- blp_data$parameters$nobs K <- blp_data$parameters$K ## BLP_data class if( !is(blp_data,"blp_data")) stop("Input has wrong class. Call BLP_data() first.") ## check and prepare par_theta2 start_theta2 <- .prepare_theta2(par_theta2, final_col_names_par = c( "unobs_sd" , blp_data$parameters$demographic_names), final_row_names_par = colnames(blp_data$data$X_rand), K = blp_data$parameters$K, M = blp_data$parameters$total_demogr) deltaOld <- blp_data$data$delta theta2Mat<- .get.theta2.reshape(theta2.in = start_theta2$par_theta2, totalRC = blp_data$parameters$K, total.demogr.in = blp_data$parameters$total_demogr, indices.in = start_theta2$indices, fill = 0 ) # NA are replaced by zeros to simplify x * par in getExpMu #Call C++ function: tmp <- getDelta( theta2 = theta2Mat, cdid = blp_data$parameters$market_id, cdindex = blp_data$parameters$cdindex, innerCrit = blp_data$parameters$inner_tol, indices = start_theta2$indices, innerMaxit= blp_data$parameters$inner_maxit, Xrandom = blp_data$data$X_rand, obsshare = blp_data$data$shares, deltaOld = deltaOld, nodesDemMktShape = blp_data$integration$drawsDemMktShape , nodesRcMktShape = blp_data$integration$drawsRcMktShape, weights = blp_data$integration$weights, printLevel = printLevel) names(tmp$delta) <- paste0( blp_data$parameters$product_id , "_" , blp_data$parameters$market_id_char_in ) rownames(tmp$sij) <- paste0("share_", blp_data$parameters$product_id , "_" , blp_data$parameters$market_id_char_in ) colnames(tmp$sij) <- paste0("individual_", 1: blp_data$integration$amountDraws) out <- list( delta = tmp$delta, counter = tmp$counter, sij = tmp$sij, theta2 = start_theta2 ) class(out) <- "blp_cm" return(out) } #' Calculates information related to predicted shares for a given set of non-linear parameters and data. #' #' @param blp_data data object created by the function \code{BLP_data} (provides, among others, mean utilitys and integration draws), #' @param par_theta2 matrix with column and rownames providing the evaluation point (see details), #' @param printLevel level of output information (default = 1) #' #' @return Returns a list with information related to predicted shares. #' #' @examples #' K<-2 #number of random coefficients #' data <- simulate_BLP_dataset(nmkt = 25, nbrn = 20, #' Xlin = c("price", "x1", "x2", "x3", "x4", "x5"), #' Xexo = c("x1", "x2", "x3", "x4", "x5"), #' Xrandom = paste0("x",1:K),instruments = paste0("iv",1:10), #' true.parameters = list(Xlin.true.except.price = rep(0.2,5), #' Xlin.true.price = -0.2, #' Xrandom.true = rep(2,K), #' instrument.effects = rep(2,10), #' instrument.Xexo.effects = rep(1,5)), #' price.endogeneity = list( mean.xi = -2, #' mean.eita = 0, #' cov = cbind( c(1,0.7), c(0.7,1))), #' printlevel = 0, seed = 234234 ) #' #' model <- as.formula("shares ~ price + x1 + x2 + x3 + x4 + x5 | #' x1 + x2 + x3 + x4 + x5 | #' 0+ x1 + x2 | #' iv1 + iv2 + iv3 + iv4 + iv5 + iv6 + iv7 + iv8 +iv9 +iv10" ) #' #' blp_data <- BLP_data(model = model, market_identifier="cdid", #' product_id = "prod_id", #' productData = data, #' integration_method = "MLHS" , #' integration_accuracy = 40, #' integration_seed = 1) #' #' theta_guesses <- matrix(c(0.5,2), nrow=2) #' rownames(theta_guesses) <- c("x1","x2") #' colnames(theta_guesses) <- "unobs_sd" #' #' shares <- getShareInfo( blp_data=blp_data, #' par_theta2 = theta_guesses, #' printLevel = 4) #' #' @export getShareInfo <- function(blp_data, par_theta2, printLevel = 1){ nobs <- blp_data$parameters$nobs K <- blp_data$parameters$K ## BLP_data class if( !is(blp_data,"blp_data")) stop("Input has wrong class. Call BLP_data() first.") ## mean utility if( printLevel > 0){ cat("Mean utility (delta) is used as provided in the BLP_data() function.") } current_delta <- blp_data$data$delta ## check and prepare par_theta2 start_theta2 <- .prepare_theta2(par_theta2, final_col_names_par = c( "unobs_sd" , blp_data$parameters$demographic_names), final_row_names_par = colnames(blp_data$data$X_rand), K = blp_data$parameters$K, M = blp_data$parameters$total_demogr) theta2Mat<- .get.theta2.reshape(theta2.in = start_theta2$par_theta2, totalRC = blp_data$parameters$K, total.demogr.in = blp_data$parameters$total_demogr, indices.in = start_theta2$indices, fill = 0 ) # NA are replaced by zeros to simplify x * par in getExpMu colnames(theta2Mat) <- c( "unobs_sd" , blp_data$parameters$demographic_names) rownames(theta2Mat) <- colnames(blp_data$data$X_rand) # get the exp of individual part of utility: expMu <- getExpMu( theta2Matrix = theta2Mat, qv = blp_data$integration$drawsRcMktShape, Xrandom = blp_data$data$X_rand, cdid = blp_data$parameters$market_id, demographics = blp_data$integration$drawsDemMktShape ) ; rownames(expMu) <- paste0("expMu_", blp_data$parameters$product_id , "_" , blp_data$parameters$market_id_char_in ) colnames(expMu) <- paste0("individual_", 1: blp_data$integration$amountDraws) # calculate individual choice probabilities Sij <- getSij(expmu = expMu, expdelta = exp(current_delta), cdindex = blp_data$parameters$cdindex ) rownames(Sij) <- paste0("share_", blp_data$parameters$product_id , "_" , blp_data$parameters$market_id_char_in ) colnames(Sij) <- paste0("individual_", 1: blp_data$integration$amountDraws) # calc. aggregated choice probabilities, i.e. shares shares <- c( Sij %*% blp_data$integration$weights) names(shares) <- paste0( blp_data$parameters$product_id , "_" , blp_data$parameters$market_id_char_in ) out <- list( "shares"=shares, "sij" = Sij, "expMu" = expMu, "theta2" = theta2Mat) class(out) <- "shareInfo" return(out) } #' Calculating the Jacobian for a given set of non-linear parameters and mean utilities. #' #' @param blp_data data object created by the function \code{BLP_data}, #' @param par_theta2 matrix with column and rownames providing the evaluation point (see details), #' @param printLevel level of output information (default = 1) #' #' @return Returns a matrix with the jacobian (products in rows, parameters in columns). #' #' @details NA's in \code{par_theta2} entries indicate the exclusion from estimation, i.e. the coefficient is assumed to be zero. #' If only unobserved heterogeneity is used (no demographics), the column name of \code{par_theta2} must be "unobs_sd". #' With demographics the colnames must match the names of provided demographics (as in \code{demographic_draws}) and "unobs_sd". #' Row names of \code{par_theta2} must match random coefficients as specified in \code{model}. Constants must be named "(Intercept)". #' #' @examples #' K<-2 #number of random coefficients #' data <- simulate_BLP_dataset(nmkt = 25, nbrn = 20, #' Xlin = c("price", "x1", "x2", "x3", "x4", "x5"), #' Xexo = c("x1", "x2", "x3", "x4", "x5"), #' Xrandom = paste0("x",1:K),instruments = paste0("iv",1:10), #' true.parameters = list(Xlin.true.except.price = rep(0.2,5), #' Xlin.true.price = -0.2, #' Xrandom.true = rep(2,K), #' instrument.effects = rep(2,10), #' instrument.Xexo.effects = rep(1,5)), #' price.endogeneity = list( mean.xi = -2, #' mean.eita = 0, #' cov = cbind( c(1,0.7), c(0.7,1))), #' printlevel = 0, seed = 234234 ) #' #' #' model <- as.formula("shares ~ price + x1 + x2 + x3 + x4 + x5 | #' x1 + x2 + x3 + x4 + x5 | #' 0+ x1 + x2 | #' iv1 + iv2 + iv3 + iv4 + iv5 + iv6 + iv7 + iv8 +iv9 +iv10" ) #' #' blp_data <- BLP_data(model = model, market_identifier="cdid", #' product_id = "prod_id", #' productData = data, #' integration_method = "MLHS" , #' integration_accuracy = 40, #' integration_seed = 1) #' #' theta_guesses <- matrix(c(0.5,2), nrow=2) #' rownames(theta_guesses) <- c("x1","x2") #' colnames(theta_guesses) <- "unobs_sd" #' #' jacobian <- getJacobian_wrap(blp_data=blp_data, #' par_theta2 = theta_guesses, #' printLevel = 2) #' head(jacobian) #' @export getJacobian_wrap <- function( blp_data, par_theta2, printLevel = 1){ nobs <- blp_data$parameters$nobs K <- blp_data$parameters$K ## BLP_data class if( !is(blp_data,"blp_data")) stop("Input has wrong class. Call BLP_data() first.") ## mean utility if( printLevel > 0){ cat("Mean utility (delta) is used as provided in the BLP_data() function.") } current_delta <- blp_data$data$delta ## check and prepare par_theta2 start_theta2 <- .prepare_theta2(par_theta2, final_col_names_par = c( "unobs_sd" , blp_data$parameters$demographic_names), final_row_names_par = colnames(blp_data$data$X_rand), K = blp_data$parameters$K, M = blp_data$parameters$total_demogr) theta2Mat<- .get.theta2.reshape(theta2.in = start_theta2$par_theta2, totalRC = blp_data$parameters$K, total.demogr.in = blp_data$parameters$total_demogr, indices.in = start_theta2$indices, fill = 0 ) # NA are replaced by zeros to simplify x * par in getExpMu expmu <- getExpMu( theta2Matrix = theta2Mat, qv = blp_data$integration$drawsRcMktShape, Xrandom = blp_data$data$X_rand, cdid = blp_data$parameters$market_id, demographics = blp_data$integration$drawsDemMktShape) sij <- getSij(expmu = expmu, expdelta = exp(current_delta), cdindex = blp_data$parameters$cdindex ) jacobian <- jacob_c(sij = sij, indices = start_theta2$indices, blp_data = blp_data$data, blp_parameters = blp_data$parameters, blp_integration = blp_data$integration, printLevel = printLevel) rownames(jacobian) <- paste0( blp_data$parameters$product_id , "_" , blp_data$parameters$market_id_char_in ) names_par <- kronecker( start_theta2$final_col_names_par , start_theta2$final_row_names_par , paste, sep="*") relevantRcDem_index <- start_theta2$indices[,"row"] + max( start_theta2$indices[,"row"] ) * ( start_theta2$indices[,"col"] - 1 ) colnames(jacobian) <- names_par[relevantRcDem_index] return( jacobian ) }
/scratch/gouwar.j/cran-all/cranData/BLPestimatoR/R/wrappers.R
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----------------------------------------------------------------------------- library(BLPestimatoR) ## ----------------------------------------------------------------------------- nevos_model <- as.formula("share ~ price + productdummy | 0+ productdummy | price + sugar + mushy | 0+ IV1 + IV2 + IV3 + IV4 + IV5 + IV6 + IV7 + IV8 + IV9 + IV10 + IV11 + IV12 + IV13 + IV14 + IV15 + IV16 + IV17 + IV18 + IV19 + IV20") ## ----------------------------------------------------------------------------- head(productData_cereal) ## ----------------------------------------------------------------------------- demographicData_cereal$income[1:4, 1:5] demographicData_cereal$incomesq[1:4, 1:5] demographicData_cereal$age[1:4, 1:5] demographicData_cereal$child[1:4, 1:5] ## ----------------------------------------------------------------------------- originalDraws_cereal$constant[1:4, 1:5] # renaming constants: names(originalDraws_cereal)[1] <- "(Intercept)" originalDraws_cereal$price[1:4, 1:5] originalDraws_cereal$sugar[1:4, 1:5] originalDraws_cereal$mushy[1:4, 1:5] ## ----------------------------------------------------------------------------- productData_cereal$startingGuessesDelta <- c(log(w_guesses_cereal)) # include orig. draws in the product data cereal_data <- BLP_data( model = nevos_model, market_identifier = "cdid", par_delta = "startingGuessesDelta", product_identifier = "product_id", productData = productData_cereal, demographic_draws = demographicData_cereal, blp_inner_tol = 1e-6, blp_inner_maxit = 5000, integration_draws = originalDraws_cereal, integration_weights = rep(1 / 20, 20) ) ## ----------------------------------------------------------------------------- # before: theta_guesses_cereal theta_guesses_cereal[theta_guesses_cereal == 0] <- NA colnames(theta_guesses_cereal) <- c("unobs_sd", "income", "incomesq", "age", "child") rownames(theta_guesses_cereal) <- c("(Intercept)", "price", "sugar", "mushy") # correctly named: theta_guesses_cereal ## ----------------------------------------------------------------------------- cereal_est <- estimateBLP( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, solver_method = "BFGS", solver_maxit = 1000, solver_reltol = 1e-6, standardError = "heteroskedastic", extremumCheck = FALSE, printLevel = 1 ) summary(cereal_est) ## ----------------------------------------------------------------------------- cereal_data2 <- BLP_data( model = nevos_model, market_identifier = "cdid", product_identifier = "product_id", productData = productData_cereal, integration_method = "MLHS", integration_accuracy = 20, integration_seed = 213 ) cereal_est2 <- estimateBLP(blp_data = cereal_data2, printLevel = 1) summary(cereal_est2) ## ----------------------------------------------------------------------------- # extract parameters from output theta1_price <- cereal_est$theta_lin["price", ] theta2 <- matrix(NA, nrow = 4, ncol = 5) colnames(theta2) <- c("unobs_sd", "income", "incomesq", "age", "child") rownames(theta2) <- c("(Intercept)", "price", "sugar", "mushy") for (i in 1:13) { theta2[cereal_est$indices[i, 1], cereal_est$indices[i, 2]] <- cereal_est$theta_rc[i] } delta_data <- data.frame( "product_id" = cereal_data$parameters$product_id, "cdid" = cereal_data$parameters$market_id_char_in, "startingGuessesDelta" = cereal_est$delta ) # always use update_BLP_data() to update data object to maintain consistent data cereal_data <- update_BLP_data( data_update = delta_data, blp_data = cereal_data ) shareObj <- getShareInfo( blp_data = cereal_data, par_theta2 = theta2, printLevel = 1 ) get_elasticities( blp_data = cereal_data, share_info = shareObj, theta_lin = theta1_price, variable = "price", products = c("cereal_1", "cereal_4"), market = "market_2" ) ## ----------------------------------------------------------------------------- delta_eval <- getDelta_wrap( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, printLevel = 4 ) productData_cereal$startingGuessesDelta[1:6] delta_eval$delta[1:6] delta_eval$counter gmm <- gmm_obj_wrap( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, printLevel = 2 ) gmm$local_min ## ----------------------------------------------------------------------------- shareObj <- getShareInfo( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, printLevel = 4 ) shareObj$shares[1:6] ## ----------------------------------------------------------------------------- # market 2: derivatives1 <- dstdtheta_wrap( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, market = "market_2" ) derivatives2 <- dstddelta_wrap( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, market = "market_2" ) jac_mkt2 <- -solve(derivatives2) %*% derivatives1 jac_mkt2[1:5, 1:4] # all markets jacobian_nevo <- getJacobian_wrap( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, printLevel = 2 ) jacobian_nevo[25:29, 1:4] # compare to jac_mkt2 ## ----------------------------------------------------------------------------- # add owner matix to productData own_pre <- dummies_cars colnames(own_pre) <- paste0("company", 1:26) productData_cars <- cbind(productData_cars, own_pre) # construct instruments nobs <- nrow(productData_cars) X <- data.frame( productData_cars$const, productData_cars$hpwt, productData_cars$air, productData_cars$mpg, productData_cars$space ) sum_other <- matrix(NA, nobs, ncol(X)) sum_rival <- matrix(NA, nobs, ncol(X)) sum_total <- matrix(NA, nobs, ncol(X)) for (i in 1:nobs) { other_ind <- productData_cars$firmid == productData_cars$firmid[i] & productData_cars$cdid == productData_cars$cdid[i] & productData_cars$id != productData_cars$id[i] rival_ind <- productData_cars$firmid != productData_cars$firmid[i] & productData_cars$cdid == productData_cars$cdid[i] total_ind <- productData_cars$cdid == productData_cars$cdid[i] sum_other[i, ] <- colSums(X[other_ind == 1, ]) sum_rival[i, ] <- colSums(X[rival_ind == 1, ]) sum_total[i, ] <- colSums(X[total_ind == 1, ]) } colnames(sum_other) <- paste0("IV", 1:5) colnames(sum_rival) <- paste0("IV", 6:10) productData_cars <- cbind(productData_cars, sum_other, sum_rival) head(productData_cars) # To show similarities between implementations of other authors, # the variable "const" is used, although constants are considered by default. blps_model <- as.formula("share ~ 0 + const + price + hpwt + air + mpg + space | 0 + const + hpwt + air + mpg + space | 0 + price + const + hpwt + air + mpg | 0 + IV1 + IV2 + IV3 + IV4 + IV5 + IV6 + IV7 + IV8 + IV9 + IV10") car_data <- BLP_data( model = blps_model, market_identifier = "cdid", product_identifier = "id", additional_variables = paste0("company", 1:26), # check reordering works productData = productData_cars, blp_inner_tol = 1e-9, blp_inner_maxit = 5000, integration_method = "MLHS", integration_accuracy = 50, integration_seed = 48 ) ## ----------------------------------------------------------------------------- set.seed(121) theta_guesses <- matrix(rnorm(5)) rownames(theta_guesses) <- c("price", "const", "hpwt", "air", "mpg") colnames(theta_guesses) <- "unobs_sd" car_est <- estimateBLP( blp_data = car_data, par_theta2 = theta_guesses, solver_method = "BFGS", solver_maxit = 1000, solver_reltol = 1e-6, extremumCheck = FALSE, printLevel = 0 ) summary(car_est) ## ----------------------------------------------------------------------------- ## Pre-Merger data own_pre <- as.matrix(car_data$data$additional_data[, paste0("company", 1:26)]) delta_pre <- car_est$delta theta1_price <- car_est$theta_lin["price", ] theta2_price <- car_est$theta_rc["unobs_sd*price"] theta2_all <- matrix(car_est$theta_rc) rownames(theta2_all) <- c("price", "const", "hpwt", "air", "mpg") colnames(theta2_all) <- "unobs_sd" ## update mean utility in data ( always use update_BLP_data() to update data object to maintain consistent data ) delta_data <- data.frame( "id" = car_data$parameters$product_id, "cdid" = car_data$parameters$market_id, "delta" = delta_pre ) car_data_updated <- update_BLP_data( data_update = delta_data, blp_data = car_data ) ## ----------------------------------------------------------------------------- ## calculate sij shareObj <- getShareInfo( blp_data = car_data_updated, par_theta2 = theta2_all, printLevel = 0 ) ## computation of marginal costs market_id <- car_data$parameters$market_id nmkt <- length(unique(market_id)) markups <- numeric(length(market_id)) sh <- shareObj$shares prices_pre <- car_data$data$X_rand[, "price"] for (i in 1:nmkt) { mkt_ind <- market_id == i share_i <- sh[ mkt_ind ] price_pre_i <- prices_pre[ mkt_ind ] scalar_i <- matrix(1 / share_i) %*% matrix(price_pre_i, nrow = 1) elasticities_i <- get_elasticities( blp_data = car_data_updated, share_info = shareObj, theta_lin = theta1_price, variable = "price", market = i, printLevel = 0 ) derivatives_i <- elasticities_i / scalar_i # partial derivatives of shares wrt price own_pre_i <- own_pre[ mkt_ind, ] own_prod_pre_i <- own_pre_i %*% t(own_pre_i) # if element (i,j) equals 1, that means that prod i and j are produced by same firm markups[mkt_ind] <- c(-solve(t(derivatives_i) * own_prod_pre_i) %*% share_i) } marg_cost <- prices_pre - markups ## ----------------------------------------------------------------------------- # Merger between company 16 and 19 (i.e. GM and Chrysler) prices_post <- numeric(2217) own_post <- cbind( own_pre[, 1:15], own_pre[, 16] + own_pre[, 19], own_pre[, 17:18], own_pre[, 20:26] ) ## ---- eval = FALSE------------------------------------------------------------ # foc_bertrand_mkt <- function(par, own_prod, blp_data, mkt, marg_cost, theta_lin, theta_rc) { # # argument par: candidate for post merger prices # # arguments own_prod, blp_data, mkt, marg_cost, theta_lin, theta_rc: see previous code blocks # # # post merger updates: update the BLP_data object for market i # tmp <- data.frame( # "id" = blp_data$parameters$product_id, # "cdid" = blp_data$parameters$market_id, # "delta" = blp_data$data$delta, # "price" = blp_data$data$X_rand[, "price"] # ) # # market_ind <- blp_data$parameters$market_id == mkt # delta_old <- blp_data$data$delta # prices_pre <- blp_data$data$X_rand[, "price"] # tmp$price[ market_ind ] <- par # tmp$delta[ market_ind ] <- delta_old[market_ind] - prices_pre[market_ind] * theta_lin + par * theta_lin # # # new_blp_data <- update_BLP_data( # blp_data = blp_data, # data_update = tmp # ) # # ShareObj <- getShareInfo( # blp_data = new_blp_data, # par_theta2 = theta_rc, # printLevel = 0 # ) # # implied_shares <- as.matrix(ShareObj$shares[market_ind]) # # elasticities_post_mkt <- get_elasticities( # blp_data = new_blp_data, # share_info = ShareObj, # theta_lin = theta_lin, # variable = "price", # market = mkt, # printLevel = 0 # ) # # scalar_mkt <- matrix(1 / implied_shares) %*% matrix(par, nrow = 1) # derivatives_mkt <- elasticities_post_mkt / scalar_mkt # # markups_post <- c(-solve(t(derivatives_mkt) * own_prod) %*% implied_shares) # differences <- par - marg_cost[market_ind] - markups_post # # return(differences) # } ## ----eval=FALSE--------------------------------------------------------------- # library(nleqslv) # to solve non linear first order conditions # for (i in 1:nmkt) { # mkt_ind <- market_id == i # own_post_i <- own_post[ mkt_ind, ] # own_prod_post_i <- own_post_i %*% t(own_post_i) # price_pre_i <- prices_pre[ mkt_ind ] # # solution <- nleqslv( # x = price_pre_i, foc_bertrand_mkt, # startingguesses: price_pre_i # own_prod = own_prod_post_i, # blp_data = car_data_updated, # mkt = i, # marg_cost = marg_cost, # theta_lin = theta1_price, # theta_rc = theta2_all # ) # # prices_post[ market_id == i ] <- solution$x # }
/scratch/gouwar.j/cran-all/cranData/BLPestimatoR/inst/doc/blp_intro.R
--- title: "BLPestimatoR - Package for Demand Estimation" author: "Daniel Brunner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BLPestimatoR - Package for Demand Estimation} %\VignetteEngine{knitr::knitr} %\VignetteEncoding{UTF-8} references: - id: BLP1995 title: Automobile Prices in Market Equilibrium author: - family: Berry given: Steven - family: Levinsohn given: James - family: Pakes given: Ariel container-title: Econometrica type: article-journal issued: year: 1995 - id: Brunner2017 title: Reliable Estimation of Random Coefficient Logit Demand Models author: - family: Brunner given: Daniel - family: Heiss given: Florian - family: Romahn given: Andre - family: Weiser given: Constantin container-title: DICE Discussion Paper No 267 type: article-journal issued: year: 2017 - id: KM2014 title: 'Estimation of Random-Coefficient Demand Models: Two Empiricists Perspective' author: - family: Knittel given: Christopher - family: Metaxoglou given: Konstantinos container-title: The Review of Economics and Statistics type: article-journal issued: year: 2014 - id: Nevo2001 title: A Practitioner's Guide to Estimation of Random-Coefficients Logit Models of Demand author: - family: Nevo given: Aviv container-title: Journal of Economics \& Management Strategy type: article-journal issued: year: 2001 --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` # Intro `BLPestimatoR` provides an efficient estimation algorithm to perform the demand estimation described in @BLP1995. The routine uses analytic gradients and offers a large number of optimization routines and implemented integration methods as discussed in @Brunner2017. This extended documentation demonstrates the steps of a typical demand estimation with the package: - prepare the data with `BLP_data` (includes the specification of a model and providing integration draws for observed or unobserved heterogeneity) - estimate the parameters with `estimate_BLP` - showing the results with `summary` - view the own- and crosspriceelasticities with `get_elasticities` - perform a hypothetical merger analysis For this purpose the well-known training datasets for the cereal market [@Nevo2001] and the car market [@BLP1995] are included in the package. Loading the package is therefore the very first step of the demand estimation: ```{r} library(BLPestimatoR) ``` # Data ## Model Since version 0.1.6 the model is provided in R’s formula syntax and consists of five parts. The variable to be explained is given by observed market shares. Explanatory variables are grouped into four (possibly overlapping) categories separated by `|`: - linear variables - exogenous variables - random coefficients - instruments The first part of this documentation starts with the cereal data example from @Nevo2001. Nevo's model can be translated into the following formula syntax: ```{r} nevos_model <- as.formula("share ~ price + productdummy | 0+ productdummy | price + sugar + mushy | 0+ IV1 + IV2 + IV3 + IV4 + IV5 + IV6 + IV7 + IV8 + IV9 + IV10 + IV11 + IV12 + IV13 + IV14 + IV15 + IV16 + IV17 + IV18 + IV19 + IV20") ``` The model is directly related to consumer $i$'s indirect utility from purchasing cereal $j$ in market $t$: $$u_{ijt}=\sum_{m=1}^M x^{(m)}_{jt} \beta_{i,m}+\xi_{jt}+\epsilon_{ijt} \;\; \text{with}$$ $$\beta_{i,m}= \bar{\beta}_m + \sum_{r=1}^R \gamma_{m,r} d_{i,r} + \sigma_m \nu_{i,m}$$ and - M = 4 random coefficients (`price`, `sugar`, `mushy` and an intercept) - R = 4 demographics (`income`, `incomesq`, `age`, `child`) - and the set of non-linear parameters to estimate: $$\theta_2 = \begin{pmatrix} \sigma_1 & \gamma_{1,1} & \cdots & \gamma_{1,R} \\ \sigma_2 & \gamma_{2,1} & \cdots & \gamma_{2,R} \\ \vdots & \vdots & \ddots & \vdots \\ \sigma_M & \gamma_{M,1} & \cdots & \gamma_{M,R} \end{pmatrix}$$ ## Dataframe Product related variables are collected in the dataframe `productData` with the following requirements: - missings are not allowed - character variables are automatically transformed to a set of dummy variables - a variable that describes market affiliation (`market_identifier`) A variable that uniquely identifies a product in a market (`product_identifier`) is optional, but enhances clarity (interpreting elasticities, for example, is much easier). `market_identifier` and `product_identifier` together uniquely identify an observation, which is used by the function `update_BLP_data` to update any variable in the data (in this case `product_identifier` is mandatory). In the cereal example, this gives the following dataframe: ```{r} head(productData_cereal) ``` ## Integration Draws The arguments related to the numerical integration problem are of particular importance when providing own integration draws and weights, which is most relevant for observed heterogeneity (for unobserved heterogeneity, the straightforward approach is the use of automatic integration). In the cereal data, both, observed and unobserved heterogeneity, is used for the random coefficients. Starting with observed heterogeneity, user provided draws are collected in a list. Each list entry must be named according to the name of a demographic. Each entry contains the following variables: - a variable `market_identifier` that matches each line to a market (same variable name as in `productData`) - integration draws for each market In the cereal example, observed heterogeneity is provided as follows (list names correspond to the demographics): ```{r} demographicData_cereal$income[1:4, 1:5] demographicData_cereal$incomesq[1:4, 1:5] demographicData_cereal$age[1:4, 1:5] demographicData_cereal$child[1:4, 1:5] ``` If demographic input (`demographicData`) is missing, the estimation routine considers only coefficients for unobserved heterogeneity. This can be done by already implemented integration methods via `integration_method` as shown in the estimation section. In Nevo's cereal example however, a specific set of 20 draws is given. For this situation, draws are also provided as a list (list names correspond to the formula's random coefficients and each list entry has a variable `market_identifier`): ```{r} originalDraws_cereal$constant[1:4, 1:5] # renaming constants: names(originalDraws_cereal)[1] <- "(Intercept)" originalDraws_cereal$price[1:4, 1:5] originalDraws_cereal$sugar[1:4, 1:5] originalDraws_cereal$mushy[1:4, 1:5] ``` As demonstrated above, list entries for draws of constants **must** be named `(Intercept)`. Other names of list entries must match the random coefficients specified in the formula. ## Calling BLP_data Calling `BLP_data` structures and prepares the data for estimation and creates the data object: ```{r } productData_cereal$startingGuessesDelta <- c(log(w_guesses_cereal)) # include orig. draws in the product data cereal_data <- BLP_data( model = nevos_model, market_identifier = "cdid", par_delta = "startingGuessesDelta", product_identifier = "product_id", productData = productData_cereal, demographic_draws = demographicData_cereal, blp_inner_tol = 1e-6, blp_inner_maxit = 5000, integration_draws = originalDraws_cereal, integration_weights = rep(1 / 20, 20) ) ``` The arguments in greater detail: - `model` provides the utility model as explained above - `market_identifier` gives the name of the variable in `productData` that matches each observation to a market - `product_identifier` gives the name of the variable in `productData` that matches each observation to a product (must be unique in a market) - `productData` is given as a dataframe and `demographicData` as a list as described above - `par_delta` gives the name of the variable in `productData` for mean utilities - `blp_inner_tol` , `blp_inner_maxit`: arguments related to be BLP algorithm include the convergence threshold and the maximum number of iterations in the contraction mapping - if integration draws are provided manually, `integration_draws` and `integration_weights` need to be specified - for automatic integration the user specifies `integration_method`, for example `integration_method= "MLHS"`, and the accuracy of the integration method by `integration_accuracy` (for stochastic integration methods this equals the number of draws) If you decide to update your data later, you can use the function `update_BLP_data`. # Estimation ## Starting guesses The provided set of starting guesses `par_theta2` is matched with formula input and demographic data: - rownames of `par_theta2` must match with the random coefficients specified in the formula (note: constants **must** be named `(Intercept)` ) - colnames of `par_theta2` must match with list entry names of `demographicData` and a column for unobserved heterogeneity (**must** be named `unobs_sd) - `NA`s in `par_theta2` indicate the exclusion from estimation, i.e. the coefficient is assumed to be zero. These requirements are demonstrated with a set of exemplary starting guesses: ```{r} # before: theta_guesses_cereal theta_guesses_cereal[theta_guesses_cereal == 0] <- NA colnames(theta_guesses_cereal) <- c("unobs_sd", "income", "incomesq", "age", "child") rownames(theta_guesses_cereal) <- c("(Intercept)", "price", "sugar", "mushy") # correctly named: theta_guesses_cereal ``` ## Calling estimateBLP The following code performs the demand estimation: ```{r} cereal_est <- estimateBLP( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, solver_method = "BFGS", solver_maxit = 1000, solver_reltol = 1e-6, standardError = "heteroskedastic", extremumCheck = FALSE, printLevel = 1 ) summary(cereal_est) ``` The arguments in greater detail: - `par_theta2` gives initial values for non-linear parameters to be optimized over. Correct naming of columns and rows is important to allow correct matching. - `solver_method`, `solver_maxit` , `solver_reltol`: solver related arguments that specify the R internal optimization (`optim` function). Additional arguments can be passed to optim via `...` - `standardError` can be specified as `homoskedastic`, `heteroskedastic` or `cluster`. The latter requires the variable `group_structure` in `productData` giving the related cluster. - if `extremumCheck` is `TRUE`, numerical derivatives at the solver optimum are used to check, if a local minimum was found - `printLevel` controls for the amount of information that is provided during the estimation Many of these arguments have default values. In the following setting you see a minimum of necessary arguments with an automatic generation of integration draws and just unobserved heterogeneity. The summary output informs you about the most important default values. ```{r} cereal_data2 <- BLP_data( model = nevos_model, market_identifier = "cdid", product_identifier = "product_id", productData = productData_cereal, integration_method = "MLHS", integration_accuracy = 20, integration_seed = 213 ) cereal_est2 <- estimateBLP(blp_data = cereal_data2, printLevel = 1) summary(cereal_est2) ``` # Postestimation ## Standard Errors Standard errors can be computed with three options that control for the unobserved characteristic $\xi$, which consists of $N$ elements. $\Omega$ denotes the variance covariance matrix of $\xi$. - option `homoskedastic` requires the standard deviation $\sigma_i$ for each $\xi_i \;\forall i\in 1,\cdots,N$ to be identical: $$\Omega = \begin{pmatrix} \sigma & 0 & \dots & 0\\ 0 & \sigma & & 0\\ \vdots & & \ddots & 0\\ 0 & 0 & 0 & \sigma \\ \end{pmatrix}$$ - option `heteroskedastic` allows for individual standard deviations $\sigma_i$ for each $\xi_i$ : $$\Omega = \begin{pmatrix} \sigma_1 & 0 & \dots & 0\\ 0 & \sigma_2 & & 0\\ \vdots & & \ddots & 0\\ 0 & 0 & 0 & \sigma_N \\ \end{pmatrix}$$ - option `cluster` allows for cluster individual variance covariance matrices in each of $M$ cluster groups. For this option the argument `group_structure` needs to be specified in the function `BLP_data` to determine the cluster group. This gives the block-diagonal form with $\Sigma_m$ as the variance covariance matrix for all $\xi_i$ in cluster $m$: $$\Omega = \begin{pmatrix} \Sigma_1 & 0 & \dots & 0\\ 0 & \Sigma_2 & & 0\\ \vdots & & \ddots & 0\\ 0 & 0 & 0 & \Sigma_M \\ \end{pmatrix}$$ ## Elasticities The following code demonstrates the calculation of elasticities for the estimation object `cereal_est`. ```{r} # extract parameters from output theta1_price <- cereal_est$theta_lin["price", ] theta2 <- matrix(NA, nrow = 4, ncol = 5) colnames(theta2) <- c("unobs_sd", "income", "incomesq", "age", "child") rownames(theta2) <- c("(Intercept)", "price", "sugar", "mushy") for (i in 1:13) { theta2[cereal_est$indices[i, 1], cereal_est$indices[i, 2]] <- cereal_est$theta_rc[i] } delta_data <- data.frame( "product_id" = cereal_data$parameters$product_id, "cdid" = cereal_data$parameters$market_id_char_in, "startingGuessesDelta" = cereal_est$delta ) # always use update_BLP_data() to update data object to maintain consistent data cereal_data <- update_BLP_data( data_update = delta_data, blp_data = cereal_data ) shareObj <- getShareInfo( blp_data = cereal_data, par_theta2 = theta2, printLevel = 1 ) get_elasticities( blp_data = cereal_data, share_info = shareObj, theta_lin = theta1_price, variable = "price", products = c("cereal_1", "cereal_4"), market = "market_2" ) ``` The value of the elasticity matrix in row $j$ and column $i$ for a variable $x$, gives the effect of a change in product $i$'s characteristic $x$ on the share of product $j$. # Modular Examples Further analysis like incorporating a supply side or performing a merger simulation often requires access to building blocks of the BLP algorithm. The following wrappers insure correct data inputs and access the internal functions of the algorithm. In the following, you find an example of the contraction mapping and an evaluation of the GMM function at the starting guess: ```{r} delta_eval <- getDelta_wrap( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, printLevel = 4 ) productData_cereal$startingGuessesDelta[1:6] delta_eval$delta[1:6] delta_eval$counter gmm <- gmm_obj_wrap( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, printLevel = 2 ) gmm$local_min ``` Printed distances in the contraction mapping are maximum absolute distances between the current vector of mean utilities and the previous one. For any $\theta_2$, you can compute predicted shares: ```{r} shareObj <- getShareInfo( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, printLevel = 4 ) shareObj$shares[1:6] ``` The object contains a list of outputs that are useful for further economic analysis. For example, the list element `sij` contains share probabilities for every individual and needs to be given to calculate elasticities. The gradient contains two important building blocks as explained in the appendix of @Nevo2001: - $\frac{\partial s_{ijt}}{\partial \theta_2}$ , i.e. the derivative of individual $i$'s share of product $j$ in market $t$ with respect to non-linear parameters - $\frac{\partial s_{ijt}}{\partial \delta}$ , i.e. the derivative of individual $i$'s share of product $j$ in market $t$ with respect to mean utilities Both are used to compute the jacobian and are easy to obtain with the package as the following example demonstrates: ```{r} # market 2: derivatives1 <- dstdtheta_wrap( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, market = "market_2" ) derivatives2 <- dstddelta_wrap( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, market = "market_2" ) jac_mkt2 <- -solve(derivatives2) %*% derivatives1 jac_mkt2[1:5, 1:4] # all markets jacobian_nevo <- getJacobian_wrap( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, printLevel = 2 ) jacobian_nevo[25:29, 1:4] # compare to jac_mkt2 ``` # Another Example: Merger Analysis with BLP's car data Analyzing a hypothetical merger is demonstrated by the car data of @BLP1995. In this case, the preparation of product data comprises the computation of instruments as a function of product characteristics of competitors' products (for details, check @BLP1995). This example is based on data and documentation of @KM2014. ```{r} # add owner matix to productData own_pre <- dummies_cars colnames(own_pre) <- paste0("company", 1:26) productData_cars <- cbind(productData_cars, own_pre) # construct instruments nobs <- nrow(productData_cars) X <- data.frame( productData_cars$const, productData_cars$hpwt, productData_cars$air, productData_cars$mpg, productData_cars$space ) sum_other <- matrix(NA, nobs, ncol(X)) sum_rival <- matrix(NA, nobs, ncol(X)) sum_total <- matrix(NA, nobs, ncol(X)) for (i in 1:nobs) { other_ind <- productData_cars$firmid == productData_cars$firmid[i] & productData_cars$cdid == productData_cars$cdid[i] & productData_cars$id != productData_cars$id[i] rival_ind <- productData_cars$firmid != productData_cars$firmid[i] & productData_cars$cdid == productData_cars$cdid[i] total_ind <- productData_cars$cdid == productData_cars$cdid[i] sum_other[i, ] <- colSums(X[other_ind == 1, ]) sum_rival[i, ] <- colSums(X[rival_ind == 1, ]) sum_total[i, ] <- colSums(X[total_ind == 1, ]) } colnames(sum_other) <- paste0("IV", 1:5) colnames(sum_rival) <- paste0("IV", 6:10) productData_cars <- cbind(productData_cars, sum_other, sum_rival) head(productData_cars) # To show similarities between implementations of other authors, # the variable "const" is used, although constants are considered by default. blps_model <- as.formula("share ~ 0 + const + price + hpwt + air + mpg + space | 0 + const + hpwt + air + mpg + space | 0 + price + const + hpwt + air + mpg | 0 + IV1 + IV2 + IV3 + IV4 + IV5 + IV6 + IV7 + IV8 + IV9 + IV10") car_data <- BLP_data( model = blps_model, market_identifier = "cdid", product_identifier = "id", additional_variables = paste0("company", 1:26), # check reordering works productData = productData_cars, blp_inner_tol = 1e-9, blp_inner_maxit = 5000, integration_method = "MLHS", integration_accuracy = 50, integration_seed = 48 ) ``` In the next step, starting guesses for random coefficients are generated from a standard normal distribution. The estimation of the model works like before. ```{r} set.seed(121) theta_guesses <- matrix(rnorm(5)) rownames(theta_guesses) <- c("price", "const", "hpwt", "air", "mpg") colnames(theta_guesses) <- "unobs_sd" car_est <- estimateBLP( blp_data = car_data, par_theta2 = theta_guesses, solver_method = "BFGS", solver_maxit = 1000, solver_reltol = 1e-6, extremumCheck = FALSE, printLevel = 0 ) summary(car_est) ``` Next, all parameters that are required by the subsequent merger analysis are extracted. Note that all extracted data is based on the estimation object `car_est` or the data object `car_data` to maintain data consistency (for example, the order of data in `product_data_cars` might differ from `car_data`). Moreover, mean utilities are updated in `car_data` by the values in the estimation object `car_est`. ```{r} ## Pre-Merger data own_pre <- as.matrix(car_data$data$additional_data[, paste0("company", 1:26)]) delta_pre <- car_est$delta theta1_price <- car_est$theta_lin["price", ] theta2_price <- car_est$theta_rc["unobs_sd*price"] theta2_all <- matrix(car_est$theta_rc) rownames(theta2_all) <- c("price", "const", "hpwt", "air", "mpg") colnames(theta2_all) <- "unobs_sd" ## update mean utility in data ( always use update_BLP_data() to update data object to maintain consistent data ) delta_data <- data.frame( "id" = car_data$parameters$product_id, "cdid" = car_data$parameters$market_id, "delta" = delta_pre ) car_data_updated <- update_BLP_data( data_update = delta_data, blp_data = car_data ) ``` In the next step, an estimate for marginal costs $mc$ **before** the merger is computed. The following is based on the FOC of a Bertrand equilibrium with prices $p$ before the merger: $$ p^{pre} - \widehat{mc} = \Omega^{pre}(p^{pre})^{-1} \hat{s}(p^{pre}) $$ $\Omega^{pre}(p^{pre})^{-1}$ is defined marketwise as the inverse of $$ \Omega^{pre}(p^{pre}) = \pmatrix{ -\frac{\partial s_{1}}{\partial p_{1}} (p^{pre}) \cdot D_{1,1} & -\frac{\partial s_{2}}{\partial p_{1}} (p^{pre}) \cdot D_{1,2} & \cdots & -\frac{\partial s_{j}}{\partial p_{1}} (p^{pre}) \cdot D_{1,j} & \cdots & -\frac{\partial s_{J}}{\partial p_{1}} (p^{pre}) \cdot D_{1,J}\\ -\frac{\partial s_{1}}{\partial p_{2}} (p^{pre}) \cdot D_{2,1} & -\frac{\partial s_{2}}{\partial p_{2}} (p^{pre}) \cdot D_{2,2} & \cdots & -\frac{\partial s_{j}}{\partial p_{2}} (p^{pre}) \cdot D_{2,j} & \cdots & -\frac{\partial s_{J}}{\partial p_{2}} (p^{pre}) \cdot D_{2,J}\\ \vdots & \vdots & \ddots & \vdots & \ddots & \vdots\\ -\frac{\partial s_{1}}{\partial p_{k}} (p^{pre}) \cdot D_{k,1} & -\frac{\partial s_{2}}{\partial p_{k}} (p^{pre}) \cdot D_{k,2} & \cdots & -\frac{\partial s_{j}}{\partial p_{k}} (p^{pre}) \cdot D_{k,j} & \cdots & -\frac{\partial s_{J}}{\partial p_{k}} (p^{pre}) \cdot D_{k,J}\\ \vdots & \vdots & \ddots & \vdots & \ddots & \vdots\\ -\frac{\partial s_{1}}{\partial p_{J}} (p^{pre}) \cdot D_{J,1} & -\frac{\partial s_{2}}{\partial p_{J}} (p^{pre}) \cdot D_{J,2} & \cdots & -\frac{\partial s_{j}}{\partial p_{J}} (p^{pre}) \cdot D_{J,j}& \cdots & -\frac{\partial s_{J}}{\partial p_{J}} (p^{pre}) \cdot D_{J,J}\\ } $$ with $$D_{k,j} = \begin{cases} 1 & \text{if products k and j are produced by the same firm} \\ 0 & \text{otherwise} \\ \end{cases}$$ Partial derivatives $\frac{\partial s_j}{\partial p_k}$ can be calculated based on the elasticity $\eta_{jk} = \frac{\partial s_j }{\partial p_k }\frac{ p_k}{ s_j}$, so $$ \frac{\partial s_j}{\partial p_k} = \eta_{jk} \cdot \frac{ s_j}{ p_k} $$ In the following code chunk, these objects in a market `i` are labeled as follows: - `own_prod_pre_i` ($D_{k,j}$) - `elasticities_i` ($\eta_{jk}$) - `derivatives_i` ($\eta_{jk} \cdot \frac{ s_j}{ p_k}$) - `-solve(t(derivatives_i) * own_prod_pre_i)` ($\Omega^{pre}(p^{pre})^{-1}$) - `shareObj$shares` ($\hat{s}(p^{pre})$). ```{r} ## calculate sij shareObj <- getShareInfo( blp_data = car_data_updated, par_theta2 = theta2_all, printLevel = 0 ) ## computation of marginal costs market_id <- car_data$parameters$market_id nmkt <- length(unique(market_id)) markups <- numeric(length(market_id)) sh <- shareObj$shares prices_pre <- car_data$data$X_rand[, "price"] for (i in 1:nmkt) { mkt_ind <- market_id == i share_i <- sh[ mkt_ind ] price_pre_i <- prices_pre[ mkt_ind ] scalar_i <- matrix(1 / share_i) %*% matrix(price_pre_i, nrow = 1) elasticities_i <- get_elasticities( blp_data = car_data_updated, share_info = shareObj, theta_lin = theta1_price, variable = "price", market = i, printLevel = 0 ) derivatives_i <- elasticities_i / scalar_i # partial derivatives of shares wrt price own_pre_i <- own_pre[ mkt_ind, ] own_prod_pre_i <- own_pre_i %*% t(own_pre_i) # if element (i,j) equals 1, that means that prod i and j are produced by same firm markups[mkt_ind] <- c(-solve(t(derivatives_i) * own_prod_pre_i) %*% share_i) } marg_cost <- prices_pre - markups ``` The ownership matrix is adjusted to implement a hypothetical merger between Chrysler and GM: ```{r} # Merger between company 16 and 19 (i.e. GM and Chrysler) prices_post <- numeric(2217) own_post <- cbind( own_pre[, 1:15], own_pre[, 16] + own_pre[, 19], own_pre[, 17:18], own_pre[, 20:26] ) ``` To analyze the effect on prices the FOC of the new equilibrium must be solved: $$ p^{post} - \widehat{mc} = \Omega^{post}(p^{post})^{-1} \hat{s}(p^{post}) $$ The solution of this set of non-linear equations is obtained by the function `foc_bertrand_mkt` and the package `nleqslv`: ```{r, eval = FALSE} foc_bertrand_mkt <- function(par, own_prod, blp_data, mkt, marg_cost, theta_lin, theta_rc) { # argument par: candidate for post merger prices # arguments own_prod, blp_data, mkt, marg_cost, theta_lin, theta_rc: see previous code blocks # post merger updates: update the BLP_data object for market i tmp <- data.frame( "id" = blp_data$parameters$product_id, "cdid" = blp_data$parameters$market_id, "delta" = blp_data$data$delta, "price" = blp_data$data$X_rand[, "price"] ) market_ind <- blp_data$parameters$market_id == mkt delta_old <- blp_data$data$delta prices_pre <- blp_data$data$X_rand[, "price"] tmp$price[ market_ind ] <- par tmp$delta[ market_ind ] <- delta_old[market_ind] - prices_pre[market_ind] * theta_lin + par * theta_lin new_blp_data <- update_BLP_data( blp_data = blp_data, data_update = tmp ) ShareObj <- getShareInfo( blp_data = new_blp_data, par_theta2 = theta_rc, printLevel = 0 ) implied_shares <- as.matrix(ShareObj$shares[market_ind]) elasticities_post_mkt <- get_elasticities( blp_data = new_blp_data, share_info = ShareObj, theta_lin = theta_lin, variable = "price", market = mkt, printLevel = 0 ) scalar_mkt <- matrix(1 / implied_shares) %*% matrix(par, nrow = 1) derivatives_mkt <- elasticities_post_mkt / scalar_mkt markups_post <- c(-solve(t(derivatives_mkt) * own_prod) %*% implied_shares) differences <- par - marg_cost[market_ind] - markups_post return(differences) } ``` Finally, the function is used to compute the new equilibrium: ```{r,eval=FALSE} library(nleqslv) # to solve non linear first order conditions for (i in 1:nmkt) { mkt_ind <- market_id == i own_post_i <- own_post[ mkt_ind, ] own_prod_post_i <- own_post_i %*% t(own_post_i) price_pre_i <- prices_pre[ mkt_ind ] solution <- nleqslv( x = price_pre_i, foc_bertrand_mkt, # startingguesses: price_pre_i own_prod = own_prod_post_i, blp_data = car_data_updated, mkt = i, marg_cost = marg_cost, theta_lin = theta1_price, theta_rc = theta2_all ) prices_post[ market_id == i ] <- solution$x } ``` # References
/scratch/gouwar.j/cran-all/cranData/BLPestimatoR/inst/doc/blp_intro.Rmd
--- title: "BLPestimatoR - Package for Demand Estimation" author: "Daniel Brunner" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BLPestimatoR - Package for Demand Estimation} %\VignetteEngine{knitr::knitr} %\VignetteEncoding{UTF-8} references: - id: BLP1995 title: Automobile Prices in Market Equilibrium author: - family: Berry given: Steven - family: Levinsohn given: James - family: Pakes given: Ariel container-title: Econometrica type: article-journal issued: year: 1995 - id: Brunner2017 title: Reliable Estimation of Random Coefficient Logit Demand Models author: - family: Brunner given: Daniel - family: Heiss given: Florian - family: Romahn given: Andre - family: Weiser given: Constantin container-title: DICE Discussion Paper No 267 type: article-journal issued: year: 2017 - id: KM2014 title: 'Estimation of Random-Coefficient Demand Models: Two Empiricists Perspective' author: - family: Knittel given: Christopher - family: Metaxoglou given: Konstantinos container-title: The Review of Economics and Statistics type: article-journal issued: year: 2014 - id: Nevo2001 title: A Practitioner's Guide to Estimation of Random-Coefficients Logit Models of Demand author: - family: Nevo given: Aviv container-title: Journal of Economics \& Management Strategy type: article-journal issued: year: 2001 --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` # Intro `BLPestimatoR` provides an efficient estimation algorithm to perform the demand estimation described in @BLP1995. The routine uses analytic gradients and offers a large number of optimization routines and implemented integration methods as discussed in @Brunner2017. This extended documentation demonstrates the steps of a typical demand estimation with the package: - prepare the data with `BLP_data` (includes the specification of a model and providing integration draws for observed or unobserved heterogeneity) - estimate the parameters with `estimate_BLP` - showing the results with `summary` - view the own- and crosspriceelasticities with `get_elasticities` - perform a hypothetical merger analysis For this purpose the well-known training datasets for the cereal market [@Nevo2001] and the car market [@BLP1995] are included in the package. Loading the package is therefore the very first step of the demand estimation: ```{r} library(BLPestimatoR) ``` # Data ## Model Since version 0.1.6 the model is provided in R’s formula syntax and consists of five parts. The variable to be explained is given by observed market shares. Explanatory variables are grouped into four (possibly overlapping) categories separated by `|`: - linear variables - exogenous variables - random coefficients - instruments The first part of this documentation starts with the cereal data example from @Nevo2001. Nevo's model can be translated into the following formula syntax: ```{r} nevos_model <- as.formula("share ~ price + productdummy | 0+ productdummy | price + sugar + mushy | 0+ IV1 + IV2 + IV3 + IV4 + IV5 + IV6 + IV7 + IV8 + IV9 + IV10 + IV11 + IV12 + IV13 + IV14 + IV15 + IV16 + IV17 + IV18 + IV19 + IV20") ``` The model is directly related to consumer $i$'s indirect utility from purchasing cereal $j$ in market $t$: $$u_{ijt}=\sum_{m=1}^M x^{(m)}_{jt} \beta_{i,m}+\xi_{jt}+\epsilon_{ijt} \;\; \text{with}$$ $$\beta_{i,m}= \bar{\beta}_m + \sum_{r=1}^R \gamma_{m,r} d_{i,r} + \sigma_m \nu_{i,m}$$ and - M = 4 random coefficients (`price`, `sugar`, `mushy` and an intercept) - R = 4 demographics (`income`, `incomesq`, `age`, `child`) - and the set of non-linear parameters to estimate: $$\theta_2 = \begin{pmatrix} \sigma_1 & \gamma_{1,1} & \cdots & \gamma_{1,R} \\ \sigma_2 & \gamma_{2,1} & \cdots & \gamma_{2,R} \\ \vdots & \vdots & \ddots & \vdots \\ \sigma_M & \gamma_{M,1} & \cdots & \gamma_{M,R} \end{pmatrix}$$ ## Dataframe Product related variables are collected in the dataframe `productData` with the following requirements: - missings are not allowed - character variables are automatically transformed to a set of dummy variables - a variable that describes market affiliation (`market_identifier`) A variable that uniquely identifies a product in a market (`product_identifier`) is optional, but enhances clarity (interpreting elasticities, for example, is much easier). `market_identifier` and `product_identifier` together uniquely identify an observation, which is used by the function `update_BLP_data` to update any variable in the data (in this case `product_identifier` is mandatory). In the cereal example, this gives the following dataframe: ```{r} head(productData_cereal) ``` ## Integration Draws The arguments related to the numerical integration problem are of particular importance when providing own integration draws and weights, which is most relevant for observed heterogeneity (for unobserved heterogeneity, the straightforward approach is the use of automatic integration). In the cereal data, both, observed and unobserved heterogeneity, is used for the random coefficients. Starting with observed heterogeneity, user provided draws are collected in a list. Each list entry must be named according to the name of a demographic. Each entry contains the following variables: - a variable `market_identifier` that matches each line to a market (same variable name as in `productData`) - integration draws for each market In the cereal example, observed heterogeneity is provided as follows (list names correspond to the demographics): ```{r} demographicData_cereal$income[1:4, 1:5] demographicData_cereal$incomesq[1:4, 1:5] demographicData_cereal$age[1:4, 1:5] demographicData_cereal$child[1:4, 1:5] ``` If demographic input (`demographicData`) is missing, the estimation routine considers only coefficients for unobserved heterogeneity. This can be done by already implemented integration methods via `integration_method` as shown in the estimation section. In Nevo's cereal example however, a specific set of 20 draws is given. For this situation, draws are also provided as a list (list names correspond to the formula's random coefficients and each list entry has a variable `market_identifier`): ```{r} originalDraws_cereal$constant[1:4, 1:5] # renaming constants: names(originalDraws_cereal)[1] <- "(Intercept)" originalDraws_cereal$price[1:4, 1:5] originalDraws_cereal$sugar[1:4, 1:5] originalDraws_cereal$mushy[1:4, 1:5] ``` As demonstrated above, list entries for draws of constants **must** be named `(Intercept)`. Other names of list entries must match the random coefficients specified in the formula. ## Calling BLP_data Calling `BLP_data` structures and prepares the data for estimation and creates the data object: ```{r } productData_cereal$startingGuessesDelta <- c(log(w_guesses_cereal)) # include orig. draws in the product data cereal_data <- BLP_data( model = nevos_model, market_identifier = "cdid", par_delta = "startingGuessesDelta", product_identifier = "product_id", productData = productData_cereal, demographic_draws = demographicData_cereal, blp_inner_tol = 1e-6, blp_inner_maxit = 5000, integration_draws = originalDraws_cereal, integration_weights = rep(1 / 20, 20) ) ``` The arguments in greater detail: - `model` provides the utility model as explained above - `market_identifier` gives the name of the variable in `productData` that matches each observation to a market - `product_identifier` gives the name of the variable in `productData` that matches each observation to a product (must be unique in a market) - `productData` is given as a dataframe and `demographicData` as a list as described above - `par_delta` gives the name of the variable in `productData` for mean utilities - `blp_inner_tol` , `blp_inner_maxit`: arguments related to be BLP algorithm include the convergence threshold and the maximum number of iterations in the contraction mapping - if integration draws are provided manually, `integration_draws` and `integration_weights` need to be specified - for automatic integration the user specifies `integration_method`, for example `integration_method= "MLHS"`, and the accuracy of the integration method by `integration_accuracy` (for stochastic integration methods this equals the number of draws) If you decide to update your data later, you can use the function `update_BLP_data`. # Estimation ## Starting guesses The provided set of starting guesses `par_theta2` is matched with formula input and demographic data: - rownames of `par_theta2` must match with the random coefficients specified in the formula (note: constants **must** be named `(Intercept)` ) - colnames of `par_theta2` must match with list entry names of `demographicData` and a column for unobserved heterogeneity (**must** be named `unobs_sd) - `NA`s in `par_theta2` indicate the exclusion from estimation, i.e. the coefficient is assumed to be zero. These requirements are demonstrated with a set of exemplary starting guesses: ```{r} # before: theta_guesses_cereal theta_guesses_cereal[theta_guesses_cereal == 0] <- NA colnames(theta_guesses_cereal) <- c("unobs_sd", "income", "incomesq", "age", "child") rownames(theta_guesses_cereal) <- c("(Intercept)", "price", "sugar", "mushy") # correctly named: theta_guesses_cereal ``` ## Calling estimateBLP The following code performs the demand estimation: ```{r} cereal_est <- estimateBLP( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, solver_method = "BFGS", solver_maxit = 1000, solver_reltol = 1e-6, standardError = "heteroskedastic", extremumCheck = FALSE, printLevel = 1 ) summary(cereal_est) ``` The arguments in greater detail: - `par_theta2` gives initial values for non-linear parameters to be optimized over. Correct naming of columns and rows is important to allow correct matching. - `solver_method`, `solver_maxit` , `solver_reltol`: solver related arguments that specify the R internal optimization (`optim` function). Additional arguments can be passed to optim via `...` - `standardError` can be specified as `homoskedastic`, `heteroskedastic` or `cluster`. The latter requires the variable `group_structure` in `productData` giving the related cluster. - if `extremumCheck` is `TRUE`, numerical derivatives at the solver optimum are used to check, if a local minimum was found - `printLevel` controls for the amount of information that is provided during the estimation Many of these arguments have default values. In the following setting you see a minimum of necessary arguments with an automatic generation of integration draws and just unobserved heterogeneity. The summary output informs you about the most important default values. ```{r} cereal_data2 <- BLP_data( model = nevos_model, market_identifier = "cdid", product_identifier = "product_id", productData = productData_cereal, integration_method = "MLHS", integration_accuracy = 20, integration_seed = 213 ) cereal_est2 <- estimateBLP(blp_data = cereal_data2, printLevel = 1) summary(cereal_est2) ``` # Postestimation ## Standard Errors Standard errors can be computed with three options that control for the unobserved characteristic $\xi$, which consists of $N$ elements. $\Omega$ denotes the variance covariance matrix of $\xi$. - option `homoskedastic` requires the standard deviation $\sigma_i$ for each $\xi_i \;\forall i\in 1,\cdots,N$ to be identical: $$\Omega = \begin{pmatrix} \sigma & 0 & \dots & 0\\ 0 & \sigma & & 0\\ \vdots & & \ddots & 0\\ 0 & 0 & 0 & \sigma \\ \end{pmatrix}$$ - option `heteroskedastic` allows for individual standard deviations $\sigma_i$ for each $\xi_i$ : $$\Omega = \begin{pmatrix} \sigma_1 & 0 & \dots & 0\\ 0 & \sigma_2 & & 0\\ \vdots & & \ddots & 0\\ 0 & 0 & 0 & \sigma_N \\ \end{pmatrix}$$ - option `cluster` allows for cluster individual variance covariance matrices in each of $M$ cluster groups. For this option the argument `group_structure` needs to be specified in the function `BLP_data` to determine the cluster group. This gives the block-diagonal form with $\Sigma_m$ as the variance covariance matrix for all $\xi_i$ in cluster $m$: $$\Omega = \begin{pmatrix} \Sigma_1 & 0 & \dots & 0\\ 0 & \Sigma_2 & & 0\\ \vdots & & \ddots & 0\\ 0 & 0 & 0 & \Sigma_M \\ \end{pmatrix}$$ ## Elasticities The following code demonstrates the calculation of elasticities for the estimation object `cereal_est`. ```{r} # extract parameters from output theta1_price <- cereal_est$theta_lin["price", ] theta2 <- matrix(NA, nrow = 4, ncol = 5) colnames(theta2) <- c("unobs_sd", "income", "incomesq", "age", "child") rownames(theta2) <- c("(Intercept)", "price", "sugar", "mushy") for (i in 1:13) { theta2[cereal_est$indices[i, 1], cereal_est$indices[i, 2]] <- cereal_est$theta_rc[i] } delta_data <- data.frame( "product_id" = cereal_data$parameters$product_id, "cdid" = cereal_data$parameters$market_id_char_in, "startingGuessesDelta" = cereal_est$delta ) # always use update_BLP_data() to update data object to maintain consistent data cereal_data <- update_BLP_data( data_update = delta_data, blp_data = cereal_data ) shareObj <- getShareInfo( blp_data = cereal_data, par_theta2 = theta2, printLevel = 1 ) get_elasticities( blp_data = cereal_data, share_info = shareObj, theta_lin = theta1_price, variable = "price", products = c("cereal_1", "cereal_4"), market = "market_2" ) ``` The value of the elasticity matrix in row $j$ and column $i$ for a variable $x$, gives the effect of a change in product $i$'s characteristic $x$ on the share of product $j$. # Modular Examples Further analysis like incorporating a supply side or performing a merger simulation often requires access to building blocks of the BLP algorithm. The following wrappers insure correct data inputs and access the internal functions of the algorithm. In the following, you find an example of the contraction mapping and an evaluation of the GMM function at the starting guess: ```{r} delta_eval <- getDelta_wrap( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, printLevel = 4 ) productData_cereal$startingGuessesDelta[1:6] delta_eval$delta[1:6] delta_eval$counter gmm <- gmm_obj_wrap( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, printLevel = 2 ) gmm$local_min ``` Printed distances in the contraction mapping are maximum absolute distances between the current vector of mean utilities and the previous one. For any $\theta_2$, you can compute predicted shares: ```{r} shareObj <- getShareInfo( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, printLevel = 4 ) shareObj$shares[1:6] ``` The object contains a list of outputs that are useful for further economic analysis. For example, the list element `sij` contains share probabilities for every individual and needs to be given to calculate elasticities. The gradient contains two important building blocks as explained in the appendix of @Nevo2001: - $\frac{\partial s_{ijt}}{\partial \theta_2}$ , i.e. the derivative of individual $i$'s share of product $j$ in market $t$ with respect to non-linear parameters - $\frac{\partial s_{ijt}}{\partial \delta}$ , i.e. the derivative of individual $i$'s share of product $j$ in market $t$ with respect to mean utilities Both are used to compute the jacobian and are easy to obtain with the package as the following example demonstrates: ```{r} # market 2: derivatives1 <- dstdtheta_wrap( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, market = "market_2" ) derivatives2 <- dstddelta_wrap( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, market = "market_2" ) jac_mkt2 <- -solve(derivatives2) %*% derivatives1 jac_mkt2[1:5, 1:4] # all markets jacobian_nevo <- getJacobian_wrap( blp_data = cereal_data, par_theta2 = theta_guesses_cereal, printLevel = 2 ) jacobian_nevo[25:29, 1:4] # compare to jac_mkt2 ``` # Another Example: Merger Analysis with BLP's car data Analyzing a hypothetical merger is demonstrated by the car data of @BLP1995. In this case, the preparation of product data comprises the computation of instruments as a function of product characteristics of competitors' products (for details, check @BLP1995). This example is based on data and documentation of @KM2014. ```{r} # add owner matix to productData own_pre <- dummies_cars colnames(own_pre) <- paste0("company", 1:26) productData_cars <- cbind(productData_cars, own_pre) # construct instruments nobs <- nrow(productData_cars) X <- data.frame( productData_cars$const, productData_cars$hpwt, productData_cars$air, productData_cars$mpg, productData_cars$space ) sum_other <- matrix(NA, nobs, ncol(X)) sum_rival <- matrix(NA, nobs, ncol(X)) sum_total <- matrix(NA, nobs, ncol(X)) for (i in 1:nobs) { other_ind <- productData_cars$firmid == productData_cars$firmid[i] & productData_cars$cdid == productData_cars$cdid[i] & productData_cars$id != productData_cars$id[i] rival_ind <- productData_cars$firmid != productData_cars$firmid[i] & productData_cars$cdid == productData_cars$cdid[i] total_ind <- productData_cars$cdid == productData_cars$cdid[i] sum_other[i, ] <- colSums(X[other_ind == 1, ]) sum_rival[i, ] <- colSums(X[rival_ind == 1, ]) sum_total[i, ] <- colSums(X[total_ind == 1, ]) } colnames(sum_other) <- paste0("IV", 1:5) colnames(sum_rival) <- paste0("IV", 6:10) productData_cars <- cbind(productData_cars, sum_other, sum_rival) head(productData_cars) # To show similarities between implementations of other authors, # the variable "const" is used, although constants are considered by default. blps_model <- as.formula("share ~ 0 + const + price + hpwt + air + mpg + space | 0 + const + hpwt + air + mpg + space | 0 + price + const + hpwt + air + mpg | 0 + IV1 + IV2 + IV3 + IV4 + IV5 + IV6 + IV7 + IV8 + IV9 + IV10") car_data <- BLP_data( model = blps_model, market_identifier = "cdid", product_identifier = "id", additional_variables = paste0("company", 1:26), # check reordering works productData = productData_cars, blp_inner_tol = 1e-9, blp_inner_maxit = 5000, integration_method = "MLHS", integration_accuracy = 50, integration_seed = 48 ) ``` In the next step, starting guesses for random coefficients are generated from a standard normal distribution. The estimation of the model works like before. ```{r} set.seed(121) theta_guesses <- matrix(rnorm(5)) rownames(theta_guesses) <- c("price", "const", "hpwt", "air", "mpg") colnames(theta_guesses) <- "unobs_sd" car_est <- estimateBLP( blp_data = car_data, par_theta2 = theta_guesses, solver_method = "BFGS", solver_maxit = 1000, solver_reltol = 1e-6, extremumCheck = FALSE, printLevel = 0 ) summary(car_est) ``` Next, all parameters that are required by the subsequent merger analysis are extracted. Note that all extracted data is based on the estimation object `car_est` or the data object `car_data` to maintain data consistency (for example, the order of data in `product_data_cars` might differ from `car_data`). Moreover, mean utilities are updated in `car_data` by the values in the estimation object `car_est`. ```{r} ## Pre-Merger data own_pre <- as.matrix(car_data$data$additional_data[, paste0("company", 1:26)]) delta_pre <- car_est$delta theta1_price <- car_est$theta_lin["price", ] theta2_price <- car_est$theta_rc["unobs_sd*price"] theta2_all <- matrix(car_est$theta_rc) rownames(theta2_all) <- c("price", "const", "hpwt", "air", "mpg") colnames(theta2_all) <- "unobs_sd" ## update mean utility in data ( always use update_BLP_data() to update data object to maintain consistent data ) delta_data <- data.frame( "id" = car_data$parameters$product_id, "cdid" = car_data$parameters$market_id, "delta" = delta_pre ) car_data_updated <- update_BLP_data( data_update = delta_data, blp_data = car_data ) ``` In the next step, an estimate for marginal costs $mc$ **before** the merger is computed. The following is based on the FOC of a Bertrand equilibrium with prices $p$ before the merger: $$ p^{pre} - \widehat{mc} = \Omega^{pre}(p^{pre})^{-1} \hat{s}(p^{pre}) $$ $\Omega^{pre}(p^{pre})^{-1}$ is defined marketwise as the inverse of $$ \Omega^{pre}(p^{pre}) = \pmatrix{ -\frac{\partial s_{1}}{\partial p_{1}} (p^{pre}) \cdot D_{1,1} & -\frac{\partial s_{2}}{\partial p_{1}} (p^{pre}) \cdot D_{1,2} & \cdots & -\frac{\partial s_{j}}{\partial p_{1}} (p^{pre}) \cdot D_{1,j} & \cdots & -\frac{\partial s_{J}}{\partial p_{1}} (p^{pre}) \cdot D_{1,J}\\ -\frac{\partial s_{1}}{\partial p_{2}} (p^{pre}) \cdot D_{2,1} & -\frac{\partial s_{2}}{\partial p_{2}} (p^{pre}) \cdot D_{2,2} & \cdots & -\frac{\partial s_{j}}{\partial p_{2}} (p^{pre}) \cdot D_{2,j} & \cdots & -\frac{\partial s_{J}}{\partial p_{2}} (p^{pre}) \cdot D_{2,J}\\ \vdots & \vdots & \ddots & \vdots & \ddots & \vdots\\ -\frac{\partial s_{1}}{\partial p_{k}} (p^{pre}) \cdot D_{k,1} & -\frac{\partial s_{2}}{\partial p_{k}} (p^{pre}) \cdot D_{k,2} & \cdots & -\frac{\partial s_{j}}{\partial p_{k}} (p^{pre}) \cdot D_{k,j} & \cdots & -\frac{\partial s_{J}}{\partial p_{k}} (p^{pre}) \cdot D_{k,J}\\ \vdots & \vdots & \ddots & \vdots & \ddots & \vdots\\ -\frac{\partial s_{1}}{\partial p_{J}} (p^{pre}) \cdot D_{J,1} & -\frac{\partial s_{2}}{\partial p_{J}} (p^{pre}) \cdot D_{J,2} & \cdots & -\frac{\partial s_{j}}{\partial p_{J}} (p^{pre}) \cdot D_{J,j}& \cdots & -\frac{\partial s_{J}}{\partial p_{J}} (p^{pre}) \cdot D_{J,J}\\ } $$ with $$D_{k,j} = \begin{cases} 1 & \text{if products k and j are produced by the same firm} \\ 0 & \text{otherwise} \\ \end{cases}$$ Partial derivatives $\frac{\partial s_j}{\partial p_k}$ can be calculated based on the elasticity $\eta_{jk} = \frac{\partial s_j }{\partial p_k }\frac{ p_k}{ s_j}$, so $$ \frac{\partial s_j}{\partial p_k} = \eta_{jk} \cdot \frac{ s_j}{ p_k} $$ In the following code chunk, these objects in a market `i` are labeled as follows: - `own_prod_pre_i` ($D_{k,j}$) - `elasticities_i` ($\eta_{jk}$) - `derivatives_i` ($\eta_{jk} \cdot \frac{ s_j}{ p_k}$) - `-solve(t(derivatives_i) * own_prod_pre_i)` ($\Omega^{pre}(p^{pre})^{-1}$) - `shareObj$shares` ($\hat{s}(p^{pre})$). ```{r} ## calculate sij shareObj <- getShareInfo( blp_data = car_data_updated, par_theta2 = theta2_all, printLevel = 0 ) ## computation of marginal costs market_id <- car_data$parameters$market_id nmkt <- length(unique(market_id)) markups <- numeric(length(market_id)) sh <- shareObj$shares prices_pre <- car_data$data$X_rand[, "price"] for (i in 1:nmkt) { mkt_ind <- market_id == i share_i <- sh[ mkt_ind ] price_pre_i <- prices_pre[ mkt_ind ] scalar_i <- matrix(1 / share_i) %*% matrix(price_pre_i, nrow = 1) elasticities_i <- get_elasticities( blp_data = car_data_updated, share_info = shareObj, theta_lin = theta1_price, variable = "price", market = i, printLevel = 0 ) derivatives_i <- elasticities_i / scalar_i # partial derivatives of shares wrt price own_pre_i <- own_pre[ mkt_ind, ] own_prod_pre_i <- own_pre_i %*% t(own_pre_i) # if element (i,j) equals 1, that means that prod i and j are produced by same firm markups[mkt_ind] <- c(-solve(t(derivatives_i) * own_prod_pre_i) %*% share_i) } marg_cost <- prices_pre - markups ``` The ownership matrix is adjusted to implement a hypothetical merger between Chrysler and GM: ```{r} # Merger between company 16 and 19 (i.e. GM and Chrysler) prices_post <- numeric(2217) own_post <- cbind( own_pre[, 1:15], own_pre[, 16] + own_pre[, 19], own_pre[, 17:18], own_pre[, 20:26] ) ``` To analyze the effect on prices the FOC of the new equilibrium must be solved: $$ p^{post} - \widehat{mc} = \Omega^{post}(p^{post})^{-1} \hat{s}(p^{post}) $$ The solution of this set of non-linear equations is obtained by the function `foc_bertrand_mkt` and the package `nleqslv`: ```{r, eval = FALSE} foc_bertrand_mkt <- function(par, own_prod, blp_data, mkt, marg_cost, theta_lin, theta_rc) { # argument par: candidate for post merger prices # arguments own_prod, blp_data, mkt, marg_cost, theta_lin, theta_rc: see previous code blocks # post merger updates: update the BLP_data object for market i tmp <- data.frame( "id" = blp_data$parameters$product_id, "cdid" = blp_data$parameters$market_id, "delta" = blp_data$data$delta, "price" = blp_data$data$X_rand[, "price"] ) market_ind <- blp_data$parameters$market_id == mkt delta_old <- blp_data$data$delta prices_pre <- blp_data$data$X_rand[, "price"] tmp$price[ market_ind ] <- par tmp$delta[ market_ind ] <- delta_old[market_ind] - prices_pre[market_ind] * theta_lin + par * theta_lin new_blp_data <- update_BLP_data( blp_data = blp_data, data_update = tmp ) ShareObj <- getShareInfo( blp_data = new_blp_data, par_theta2 = theta_rc, printLevel = 0 ) implied_shares <- as.matrix(ShareObj$shares[market_ind]) elasticities_post_mkt <- get_elasticities( blp_data = new_blp_data, share_info = ShareObj, theta_lin = theta_lin, variable = "price", market = mkt, printLevel = 0 ) scalar_mkt <- matrix(1 / implied_shares) %*% matrix(par, nrow = 1) derivatives_mkt <- elasticities_post_mkt / scalar_mkt markups_post <- c(-solve(t(derivatives_mkt) * own_prod) %*% implied_shares) differences <- par - marg_cost[market_ind] - markups_post return(differences) } ``` Finally, the function is used to compute the new equilibrium: ```{r,eval=FALSE} library(nleqslv) # to solve non linear first order conditions for (i in 1:nmkt) { mkt_ind <- market_id == i own_post_i <- own_post[ mkt_ind, ] own_prod_post_i <- own_post_i %*% t(own_post_i) price_pre_i <- prices_pre[ mkt_ind ] solution <- nleqslv( x = price_pre_i, foc_bertrand_mkt, # startingguesses: price_pre_i own_prod = own_prod_post_i, blp_data = car_data_updated, mkt = i, marg_cost = marg_cost, theta_lin = theta1_price, theta_rc = theta2_all ) prices_post[ market_id == i ] <- solution$x } ``` # References
/scratch/gouwar.j/cran-all/cranData/BLPestimatoR/vignettes/blp_intro.Rmd
#Random number generation from Inverse Gaussian Distribution #f(x;mu,lambda)=sqrt(lambda/(2*pi*x^3))*exp(-lambda*(x-mu)^2/(2*x*mu^2)), #x>0,mu>0,lambda>0 #n: number of observations. If 'length(n) > 1', the length is taken to be the number required. #mu: mean, it can be vector, each element must be bigger than 0 #lambda: shape parameter, it can be a vector each element must be bigger than 0 #Reference: #Michael, J., Schucany, W., & Haas, R. (1976). Generating Random Variates Using #Transformations with Multiple Roots. The American Statistician, 30(2), 88-90. #doi:10.2307/2683801 #Added January, 2, 2020 rinvGauss=function(n,mu,lambda) { #As in the case of normal distribution, check lengths if(length(n)>1) n<-length(n) #Check that mu and lambda are positive if(any(mu<=0)) stop("mu must be positive") if(any(lambda<0)) stop("lambda must be positive") #Check lengths and adjust them recycling if(length(mu)>1 && length(mu)!=n) mu<- rep(mu,length=n) if(length(lambda)>1 && length(lambda)!=n) lambda = rep(lambda,length=n) #Generate random sample from standard normal g<-rnorm(n,mean=0,sd=1) #Transform to a sample from chi-squared with 1 df v<-g*g #Compute roots, equation 5 in reference paper #see Fortran code below equation (6) w<-mu*v cte<-mu/(2*lambda) sol1<-mu+cte*(w-sqrt(w*(4*lambda+w))) sol2<-mu*mu/sol1 #Uniform random numbers (0,1) u<-runif(n) ifelse(u<mu/(mu+sol1),sol1,sol2) } BLR<-function (y, XF = NULL, XR = NULL, XL = NULL, GF = list(ID = NULL, A = NULL), prior = NULL, nIter = 1100, burnIn = 100, thin = 10, thin2 = 1e+10, saveAt = "", minAbsBeta = 1e-09, weights = NULL){ welcome() y <- as.numeric(y) n <- length(y) if (!is.null(XF)) { if (any(is.na(XF)) | nrow(XF) != n) stop("The number of rows in XF does not correspond with that of y or it contains missing values") } if (!is.null(XR)) { if (any(is.na(XR)) | nrow(XR) != n) stop("The number of rows in XR does not correspond with that of y or it contains missing values") } if (!is.null(XL)) { if (any(is.na(XL)) | nrow(XL) != n) stop("The number of rows in XL does not correspond with that of y or it contains missing values") } if (is.null(prior)) { cat("===============================================================\n") cat("No prior was provided, BLR is running with improper priors.\n") cat("===============================================================\n") prior = list(varE = list(S = 0, df = 0), varBR = list(S = 0,df = 0), varU = list(S = 0, df = 0), lambda = list(shape = 0, rate = 0, type = "random", value = 50)) } nSums <- 0 whichNa <- which(is.na(y)) nNa <- sum(is.na(y)) if (is.null(weights)) { weights <- rep(1, n) } sumW2 <- sum(weights^2) mu <- weighted.mean(x = y, w = weights, na.rm = TRUE) yStar <- y * weights yStar[whichNa] <- mu * weights[whichNa] e <- (yStar - weights * mu) varE <- var(e, na.rm = TRUE)/2 if (is.null(prior$varE)) { cat("==============================================================================\n") cat("No prior was provided for residual variance, BLR will use an improper prior.\n") prior$varE <- list(df = 0, S = 0) cat("==============================================================================\n") } post_mu <- 0 post_varE <- 0 post_logLik <- 0 post_yHat <- rep(0, n) post_yHat2 <- rep(0, n) hasRidge <- !is.null(XR) hasXF <- !is.null(XF) hasLasso <- !is.null(XL) hasGF <- !is.null(GF[[1]]) if (hasXF) { for (i in 1:n) { XF[i, ] <- weights[i] * XF[i, ] } SVD.XF <- svd(XF) SVD.XF$Vt <- t(SVD.XF$v) SVD.XF <- SVD.XF[-3] pF0 <- length(SVD.XF$d) pF <- ncol(XF) bF0 <- rep(0, pF0) bF <- rep(0, pF) namesBF <- colnames(XF) post_bF <- bF post_bF2 <- bF rm(XF) } if (hasLasso) { if (is.null(prior$lambda)) { cat("==============================================================================\n") cat("No prior was provided for lambda, BLR will use an improper prior.\n") cat("==============================================================================\n") prior$lambda <- list(shape = 0, rate = 0, value = 50, type = "random") } for (i in 1:n) { XL[i, ] <- weights[i] * XL[i, ] } pL <- ncol(XL) xL2 <- colSums(XL*XL) bL <- rep(0, pL) namesBL <- colnames(XL) tmp<-1/2/sum(xL2/n) tau2 <- rep(tmp, pL) lambda <- prior$lambda$value lambda2 <- lambda^2 post_lambda <- 0 post_bL <- rep(0, pL) post_bL2 <- post_bL post_tau2 <- rep(0, pL) XLstacked <- as.vector(XL) rm(XL) } else { lambda = NA } if (hasRidge) { if (is.null(prior$varBR)) { cat("==============================================================================\n") cat("No prior was provided for varBR, BLR will use an improper prior.\n") cat("==============================================================================\n") prior$varBR <- list(df = 0, S = 0) } for (i in 1:n) { XR[i, ] <- weights[i] * XR[i, ] } pR <- ncol(XR) xR2 <- colSums(XR * XR) bR <- rep(0, pR) namesBR <- colnames(XR) varBR <-varE/2/sum(xR2/n) post_bR <- rep(0, pR) post_bR2 <- post_bR post_varBR <- 0 XRstacked <- as.vector(XR) rm(XR) } if (hasGF) { if (is.null(prior$varU)) { cat("==============================================================================\n") cat("No prior was provided for varU, BLR will use an improper prior.\n") cat("==============================================================================\n") prior$varU <- list(df = 0, S = 0) } ID <- factor(GF$ID) pU <- nrow(GF$A) L <- t(chol(GF$A)) if (pU != nrow(table(ID))) { stop("L must have as many columns and rows as levels on ID\n") } Z <- model.matrix(~ID - 1) for (i in 1:n) { Z[i, ] <- weights[i] * Z[i, ] } Z <- Z %*% L z2 <- colSums(Z * Z) u <- rep(0, pU) namesU <- colnames(GF$A) varU <- varE/4/mean(diag(GF$A)) GF$A <- NULL post_U <- u post_U2 <- post_U post_varU <- 0 Zstacked <- as.vector(Z) rm(Z) } time <- proc.time()[3] for (i in 1:nIter) { if (hasXF) { sol <- (crossprod(SVD.XF$u, e) + bF0) tmp <- sol + rnorm(n = pF0, sd = sqrt(varE)) bF <- crossprod(SVD.XF$Vt, tmp/SVD.XF$d) e <- e + SVD.XF$u %*% (bF0 - tmp) bF0 <- tmp } if (hasRidge) { ans <- .Call("sample_beta", n, pR, XRstacked, xR2, bR, e, rep(varBR, pR), varE, minAbsBeta) bR <- ans[[1]] e <- ans[[2]] SS <- crossprod(bR) + prior$varBR$S df <- pR + prior$varBR$df varBR <- SS/rchisq(df = df, n = 1) } if (hasLasso) { varBj <- tau2 * varE ans <- .Call("sample_beta", n, pL, XLstacked, xL2, bL, e, varBj, varE, minAbsBeta) bL <- ans[[1]] e <- ans[[2]] nu <- sqrt(varE) * lambda/abs(bL) tmp<-NULL try(tmp <- rinvGauss(n = pL, mu = nu, lambda = lambda2)) if(!is.null(tmp)) { if(!any(is.na(sqrt(tmp)))) { tau2 <- 1/tmp }else{ cat("WARNING: tau2 was not updated due to numeric problems with beta\n"); } }else{ cat("WARNING: tau2 was not updated due to numeric problems with beta\n"); } if (prior$lambda$type == "random") { if (is.null(prior$lambda$rate)) { lambda <- metropLambda(tau2 = tau2, lambda = lambda, shape1 = prior$lambda$shape1, shape2 = prior$lambda$shape2, max = prior$lambda$max) lambda2 <- lambda^2 } else { rate <- sum(tau2)/2 + prior$lambda$rate shape <- pL + prior$lambda$shape lambda2 <- rgamma(rate = rate, shape = shape,n = 1) if(!is.na(lambda2)) { lambda <- sqrt(lambda2) }else{ cat("WARNING: lambda was not updated due to numeric problems with beta\n"); } } } } if (hasGF) { ans <- .Call("sample_beta", n, pU, Zstacked, z2, u, e, rep(varU, pU), varE, minAbsBeta) u <- ans[[1]] e <- ans[[2]] SS <- crossprod(u) + prior$varU$S df <- pU + prior$varU$df varU <- SS/rchisq(df = df, n = 1) } e <- e + weights * mu rhs <- sum(weights * e)/varE C <- sumW2/varE sol <- rhs/C mu <- rnorm(n = 1, sd = sqrt(1/C)) + sol e <- e - weights * mu SS <- crossprod(e) + prior$varE$S df <- n + prior$varE$df if (hasLasso) { if(!any(is.na(sqrt(tau2)))) { SS <- SS + as.numeric(crossprod(bL/sqrt(tau2))) }else{ cat("WARNING: SS was not updated due to numeric problems with beta\n"); } df <- df + pL } varE <- as.numeric(SS)/rchisq(n = 1, df = df) sdE <- sqrt(varE) yHat <- yStar - e if (nNa > 0) { e[whichNa] <- rnorm(n = nNa, sd = sdE) yStar[whichNa] <- yHat[whichNa] + e[whichNa] } if ((i%%thin == 0)) { tmp <- c(varE) fileName <- paste(saveAt, "varE", ".dat", sep = "") write(tmp, ncolumns = length(tmp), file = fileName, append = TRUE, sep = " ") if (hasXF) { tmp <- bF fileName <- paste(saveAt, "bF", ".dat", sep = "") write(tmp, ncolumns = length(tmp), file = fileName, append = TRUE, sep = " ") } if (hasLasso) { tmp <- lambda fileName <- paste(saveAt, "lambda", ".dat", sep = "") write(tmp, ncolumns = length(tmp), file = fileName, append = TRUE, sep = " ") } if (hasRidge) { tmp <- varBR fileName <- paste(saveAt, "varBR", ".dat", sep = "") write(tmp, ncolumns = length(tmp), file = fileName, append = TRUE, sep = " ") } if (hasGF) { tmp <- varU fileName <- paste(saveAt, "varU", ".dat", sep = "") write(tmp, ncolumns = length(tmp), file = fileName, append = TRUE, sep = " ") } if (i >= burnIn) { nSums <- nSums + 1 k <- (nSums - 1)/(nSums) tmpE <- e/weights tmpSD <- sqrt(varE)/weights if (nNa > 0) { tmpE <- tmpE[-whichNa] tmpSD <- tmpSD[-whichNa] } logLik <- sum(dnorm(tmpE, sd = tmpSD, log = TRUE)) post_logLik <- post_logLik * k + logLik/nSums post_mu <- post_mu * k + mu/nSums post_varE <- post_varE * k + varE/nSums post_yHat <- post_yHat * k + yHat/nSums post_yHat2 <- post_yHat2 * k + (yHat^2)/nSums if (hasXF) { post_bF <- post_bF * k + bF/nSums post_bF2 <- post_bF2 * k + (bF^2)/nSums } if (hasLasso) { post_lambda <- post_lambda * k + lambda/nSums post_bL <- post_bL * k + bL/nSums post_bL2 <- post_bL2 * k + (bL^2)/nSums post_tau2 <- post_tau2 * k + tau2/nSums } if (hasRidge) { post_bR <- post_bR * k + bR/nSums post_bR2 <- post_bR2 * k + (bR^2)/nSums post_varBR <- post_varBR * k + varBR/nSums } if (hasGF) { tmpU<-L%*%u post_U <- post_U * k + tmpU/nSums post_U2 <- post_U2 * k + (tmpU^2)/nSums post_varU <- post_varU * k + varU/nSums } } } if ((i%%thin2 == 0) & (i > burnIn)) { tmp <- post_yHat fileName <- paste(saveAt, "rmYHat", ".dat", sep = "") write(tmp, ncolumns = length(tmp), file = fileName, append = TRUE, sep = " ") if (hasLasso) { tmp <- post_bL fileName <- paste(saveAt, "rmBL", ".dat", sep = "") write(tmp, ncolumns = length(tmp), file = fileName, append = TRUE, sep = " ") } if (hasRidge) { tmp <- post_bR fileName <- paste(saveAt, "rmBR", ".dat", sep = "") write(tmp, ncolumns = length(tmp), file = fileName, append = TRUE, sep = " ") } if (hasGF) { tmp <- post_U fileName <- paste(saveAt, "rmU", ".dat", sep = "") write(tmp, ncolumns = length(tmp), file = fileName, append = TRUE, sep = " ") } } tmp <- proc.time()[3] cat(paste(c("Iter: ", "time/iter: ", "varE: ", "lambda: "), c(i, round(tmp - time, 3), round(varE, 3), round(lambda, 3)))) cat("\n") cat(paste("------------------------------------------------------------")) cat("\n") time <- tmp } tmp <- sqrt(post_yHat2 - (post_yHat^2)) out <- list(y = y, weights = weights, mu = post_mu, varE = post_varE, yHat = I(post_yHat/weights), SD.yHat = I(tmp/weights), whichNa = whichNa) names(out$yHat) <- names(y) names(out$SD.yHat) <- names(y) tmpE <- (yStar - post_yHat)/weights tmpSD <- sqrt(post_varE)/weights if (nNa > 0) { tmpE <- tmpE[-whichNa] tmpSD <- tmpSD[-whichNa] } out$fit <- list() out$fit$logLikAtPostMean <- sum(dnorm(tmpE, sd = tmpSD, log = TRUE)) out$fit$postMeanLogLik <- post_logLik out$fit$pD <- -2 * (post_logLik - out$fit$logLikAtPostMean) out$fit$DIC <- out$fit$pD - 2 * post_logLik if (hasXF) { out$bF <- as.vector(post_bF) out$SD.bF <- as.vector(sqrt(post_bF2 - post_bF^2)) names(out$bF) <- namesBF names(out$SD.bF) <- namesBF } if (hasLasso) { out$lambda <- post_lambda out$bL <- as.vector(post_bL) tmp <- as.vector(sqrt(post_bL2 - (post_bL^2))) out$SD.bL <- tmp out$tau2 <- post_tau2 names(out$bL) <- namesBL names(out$SD.bL) <- namesBL } if (hasRidge) { out$bR <- as.vector(post_bR) tmp <- as.vector(sqrt(post_bR2 - (post_bR^2))) out$SD.bR <- tmp out$varBR <- post_varBR names(out$bR) <- namesBR names(out$SD.bR) <- namesBR } if (hasGF) { out$u <- as.vector(post_U) tmp <- as.vector(sqrt(post_U2 - (post_U^2))) out$SD.u <- tmp out$varU <- post_varU names(out$u) <- namesU names(out$SD.u) <- namesU } out$prior <- prior out$nIter <- nIter out$burnIn <- burnIn out$thin <- thin return(out) } ################################################################################################## welcome<-function(){ cat("======== Bayesian Regression Coupled with LASSO ========\n") cat("# #\n") cat("# BLR v1.6 #\n") cat("# January, 2020 #\n") cat("# Contact: #\n") cat("# Gustavo de los Campos, [email protected] #\n") cat("# Paulino Perez-Rodriguez,[email protected] #\n") cat("# #\n") cat("=========================================================\n") } ################################################################################################## ################################################################################################## dScaledInvChisq<-function (x, df, S){ tmp <- dchisq(S/x, df = df)/(x^2) return(tmp) } ################################################################################################## dLambda<-function (rate, shape, lambda) { tmp <- dgamma(x = I(lambda^2), rate = rate, shape = shape) * 2 * lambda return(tmp) } metropLambda<-function (tau2, lambda, shape1 = 1.2, shape2 = 1.2, max = 200, ncp = 0) { lambda2 <- lambda^2 l2_new <- rgamma(rate = sum(tau2)/2, shape = length(tau2), n = 1) l_new <- sqrt(l2_new) logP_old <- sum(dexp(x = tau2, log = TRUE, rate = (lambda2/2))) + dbeta(x = lambda/max, log = TRUE, shape1 = shape1, shape2 = shape2) - dgamma(shape = sum(tau2)/2, rate = length(tau2), x = (2/lambda2), log = TRUE) logP_new <- sum(dexp(x = tau2, log = TRUE, rate = (l2_new/2))) + dbeta(x = l_new/max, log = TRUE, shape1 = shape1, shape2 = shape2) - dgamma(shape = sum(tau2)/2, rate = length(tau2), x = (2/l2_new), log = TRUE) accept <- (logP_new - logP_old) > log(runif(1)) if (accept) { lambda <- l_new } return(lambda) } ################################################################################################## .onAttach <- function(library, pkg) { Rv <- R.Version() if(!exists("getRversion", baseenv()) || (getRversion() < "3.1.2")) stop("This package requires R 3.1.2 or later") assign(".BLR.home", file.path(library, pkg), pos=match("package:BLR", search())) BLR.version <- "1.6 (2020-01-02)" assign(".BLR.version", BLR.version, pos=match("package:BLR", search())) if(interactive()) { packageStartupMessage(paste("Package 'BLR', ", BLR.version, ". ",sep=""),appendLF=TRUE) packageStartupMessage("Type 'help(BLR)' for summary information",appendLF=TRUE) } invisible() } ##################################################################################################
/scratch/gouwar.j/cran-all/cranData/BLR/R/BLRw_c.R
rm(list=ls()) library(BLR) data(wheat) #Loads the wheat dataset nIter<-1500 #For real data sets more samples are needed burnIn<-500 thin<-10 folds<-10 y<-Y[,1] priorBL<-list( varE=list(df=3,S=2.5), varU=list(df=3,S=0.63), lambda = list(shape=0.52,rate=1e-5,value=20,type='random') ) set.seed(123) #Set seed for the random number generator sets<-rep(1:10,60)[-1] sets<-sets[order(runif(nrow(A)))] COR.CV<-rep(NA,times=(folds+1)) names(COR.CV)<-c(paste('fold=',1:folds,sep=''),'Pooled') w<-rep(1/nrow(A),folds) ## weights for pooled correlations and MSE yHatCV<-numeric() for(fold in 1:folds) { yNa<-y whichNa<-which(sets==fold) yNa[whichNa]<-NA prefix<-paste('PM_BL','_fold_',fold,'_',sep='') fm<-BLR(y=yNa,XL=X,GF=list(ID=(1:nrow(A)),A=A),prior=priorBL, nIter=nIter,burnIn=burnIn,thin=thin) yHatCV[whichNa]<-fm$yHat[fm$whichNa] w[fold]<-w[fold]*length(fm$whichNa) COR.CV[fold]<-cor(fm$yHat[fm$whichNa],y[whichNa]) } COR.CV[11]<-mean(COR.CV[1:10]) COR.CV
/scratch/gouwar.j/cran-all/cranData/BLR/demo/cross_validation.R
rm(list=ls()) library(BLR) data(wheat) #Loads the wheat dataset y=Y[,1] ### Creates a testing set with 100 observations whichNa<-sample(1:length(y),size=100,replace=FALSE) yNa<-y yNa[whichNa]<-NA ### Runs the Gibbs sampler fm<-BLR(y=yNa,XL=X,GF=list(ID=1:nrow(A),A=A), prior=list(varE=list(df=3,S=0.25), varU=list(df=3,S=0.63), lambda=list(shape=0.52,rate=1e-4, type='random',value=30)), nIter=5500,burnIn=500,thin=1, saveAt="example_") MSE.tst<-mean((fm$yHat[whichNa]-y[whichNa])^2) MSE.tst MSE.trn<-mean((fm$yHat[-whichNa]-y[-whichNa])^2) MSE.trn COR.tst<-cor(fm$yHat[whichNa],y[whichNa]) COR.tst COR.trn<-cor(fm$yHat[-whichNa],y[-whichNa]) COR.trn plot(fm$yHat~y,xlab="Phenotype", ylab="Pred. Gen. Value" ,cex=.8) points(x=y[whichNa],y=fm$yHat[whichNa],col=2,cex=.8,pch=19) x11() plot(scan('example_varE.dat'),type="o", ylab=expression(paste(sigma[epsilon]^2)))
/scratch/gouwar.j/cran-all/cranData/BLR/demo/fit_bL.R
################################################################################################################# ##' Bartlett-Lewis Rectangular Pulse Model ##' ##' Model description (Rodriguez-Iturbe et al., 1987): ##' ##' The model is a combination of 2 poisson processes and simulates storms and cells. ##' During the given simulation time storms are generated in a poisson process with rate lambda. ##' Those storms are given a exponetially distributed duration with parameter gamma. ##' During its duration the storm generates in a second poisson process cells with rate beta. ##' The first cell has to be instantaneous at the time of the storm arrival. ##' The cell duration is exponentially distributed with parameter eta. For the whole lifetime ##' each cell is given a constant intensity which is exponentially distributed with parameter 1/mux. ##' ##' Aggregation: ##' ##' The intensities of all cells alive at time t are summed up for total precipitation at time t. ##' ##' Parameter estimation: ##' ##' The model parameters (lambda,gamma,beta,eta,mux) can be estimated from simulated or ##' observed precipitation time series using the method of moments. Certain moments, e.g. mean, variance ##' can be calculated from the time series at different aggregation levels. These moments ##' can also be calculated theoretically from model parameters. Both sets of statistics can be ##' compared in an objective function, similar to a squared error estimator. By numerical optimization ##' the model parameters can be tuned to match the time series characteristics. ############################################################################################################### ######################### ## Part 1: Simulation ## ######################### ##' \code{BL.sim} generates model realisations of storms and cells by using given model ##' parameters \code{lambda,gamma,beta,eta,mux} for a given simulation time \code{t.sim} ##' @title Simulating storms and cells ##' @param lambda value specifying the generation rate of storms [1/h] ##' @param gamma value specifying the storm duration [1/h] ##' @param beta value specifying the generation rate of cells [1/h] ##' @param eta value specifying the cell duration [1/h] ##' @param mux value specifying the cell intensity [mm/h] ##' @param t.sim value specifying the simulation time [h] ##' @return \code{BL.sim} returns storms; \code{data.frame} of all storms containing information about occurence time, end time and number of cells ##' @return \code{BL.sim} returns cells; \code{data.frame} of all cells containing information about occurence time, end time, intensity and storm index ##' @author Christoph Ritschel \email{christoph.ritschel@@met.fu-berlin.de} ##' @examples ##' lambda <- 4/240 ##' gamma <- 1/10 ##' beta <- 0.3 ##' eta <- 2 ##' mux <- 4 ##' t.sim <- 240 ##' simulation <- BL.sim(lambda,gamma,beta,eta,mux,t.sim) ##' @export BL.sim <- function(lambda=4/240,gamma=1/10,beta=0.3,eta=2,mux=4,t.sim=240) { ## initialize output list for cells cells <- NULL ################################################ ## 1st layer: storms ## generate n.s storms in a poisson process with rate parameter lambda for a given ## simulation time t.sim ## expectation is lambda times t.sim n.s <- rpois(1,lambda*t.sim) if(n.s > 0) { ## if there is at least one storm... ## storms are uniform distributed over the simulation time t.sim s.s <- runif(n.s,0,t.sim) ## storm duration is exponentially distributed with parameter gamma ## expectation of storm duration d.s is 1/gamma ## calculating end time of storms d.s <- rexp(n.s,gamma) e.s <- s.s+d.s ## write information about storms into a data.frame PL <- array(NA,dim=c(n.s)) ## place holder for number of cells storms.matrix <- cbind(s.s,e.s,PL) dimnames(storms.matrix)[[2]] <- c("start","end","n.cells") storms <- as.data.frame(storms.matrix) ######################################################## ### 2nd layer: cell generation for each storm for(i in 1:n.s) { # loop over storms ## generate n.c cells in a poisson process with rate parameter beta over storm duration ## there has to be at least one cell for each storm ## expectation of n.c is 1+beta/gamma n.c <- rpois(1,beta*d.s[i])+1 storms[i,3] <- n.c # write number of cells into storm dataframe ## cells are uniform distributed over storm duration ## constraint: first cell has to be active instantaneous at storm occurence time s.c <- c(s.s[i],runif(n.c-1,s.s[i],e.s[i])) ## cell duration is exponentially distributed with parameter eta ## expectation ov cell duration is 1/eta d.c <- rexp(n.c,eta) e.c <- s.c+d.c # end time of cells ## cell intensity is exponentially distributed with parameter 1/mux ## expectation of cell intensity is mux int <- rexp(n.c,1/mux) ## Schreibe Zellinformationen in einen data.frame cells.new <- cbind(s.c,e.c,int,i) cells <- rbind(cells,cells.new) # bind cells of each storm } # end loop over all storms dimnames(cells)[[2]] <- c("start","end","int","i") cells <- as.data.frame(cells) }else { ## if there is no storm cells <- NULL storms <- NULL } ## end if storms... ## return dataframes of storms and cells return(list("cells"=cells,"storms"=storms)) } # end of function BL.sim ######################################################################################## ##################################################################### ### accumulated time series ### ##################################################################### ##' \code{BL.stepfun} calculates a continous stepfunction of precipitation from ##' the \code{data.frame} \code{cells} ##' @title BLRPM continous stepfunction of precipitation ##' @param cells \code{data.frame} of all cells containing information about occurence time, end time, intensity and storm index ##' @return sfn returns stepfunction of precipitation ##' @usage BL.stepfun(cells) ##' @author Christoph Ritschel \email{christoph.ritschel@@met.fu-berlin.de} ##' @examples ##' lambda <- 4/240 ##' gamma <- 1/10 ##' beta <- 0.3 ##' eta <- 2 ##' mux <- 4 ##' t.sim <- 240 ##' simulation <- BL.sim(lambda,gamma,beta,eta,mux,t.sim) ##' stepfun <- BL.stepfun(simulation$cells) ##' @export BL.stepfun <- function(cells) { ## get start and end times of cells ## for start times associate positive cell intensities, for end times negative intensities t <- c(cells$start,cells$end) p <- c(cells$int,cells$int*(-1)) cells.new <- cbind(t,p) ### sort the cells for cumulative stepfunction cells.sort <- cells.new[sort.int(cells.new[,1],index.return=TRUE)$ix,] cells.sort[,2] <- cumsum(cells.sort[,2]) ## Cumsum ## stepfunction sfn <- stepfun(cells.sort[,1],c(0,cells.sort[,2])) ## return stepfunction return(sfn) } # End of Function BL.stepfun ####################################################################################### ##' \code{BL.acc} accumulates the BLRPM stepfunction for a given accumulation time \code{t.acc} ##' at a given accumulation level \code{acc.val}. An \code{offse} can be defined. The unit is typically hours. ##' @title Accumulation of a precipitation stepfunction ##' @param sfn stepfunction of precipitation ##' @param t.acc \code{value} specifiying the length of accumulated time series [h] ##' @param acc.val \code{value} specifying the accumulation level [h] ##' @param offset \code{value} specifying the offset of the accumulated time series [h] ##' @return p.acc \code{data.frame} ##' @author Christoph Ritschel \email{christoph.ritschel@@met.fu-berlin.de} ##' @examples ##' lambda <- 4/240 ##' gamma <- 1/10 ##' beta <- 0.3 ##' eta <- 2 ##' mux <- 4 ##' t.sim <- 240 ##' t.acc <- t.sim ##' acc.val <- 1 ##' offset <- 0 ##' ##' simulation <- BL.sim(lambda,gamma,beta,eta,mux,t.sim) ##' sfn <- BL.stepfun(simulation$cells) ##' ts <- BL.acc(sfn,t.acc,acc.val,offset) ##' @export BL.acc <- function(sfn,t.acc=240,acc.val=1,offset=0) { ## end of accumulation time series end <- t.acc+offset ## sequence of accumulation time steps bins <- seq(from=offset,to=end,by=acc.val) ## sorting knotpoints of sfn and bins kn <- sort(unique(c(knots(sfn),bins))) delta.kn <- diff(kn) ## calculate cumulative sums csum <- cumsum(sfn(kn[1:(length(kn)-1)])*delta.kn) ### add zero for start csum <- c(0,csum) ## precipitiation is difference between sums of each bins p <- c(diff(csum[is.element(kn,bins)])) ## write precipitation and time information in data.frame p.acc <- as.data.frame(cbind(bins[2:length(bins)],p)) names(p.acc) <- c("time","RR") ## return data.frame return(p.acc) } # End of Function BL.acc ########################################################################### ##' \code{TS.acc} accumulates a given time series \code{x} at a given accumulation level \code{acc.val}. Minimum value ##' for acc.val is 2 [unit time] ##' @title Accumulation of a time series ##' @param x \code{vector} of a time series ##' @param acc.val \code{value} specifying the accumulation level, minimum value is 2 ##' @return x.acc \code{TS.acc} returns a \code{vector} of an accumulated time series ##' @author Christoph Ritschel \email{christoph.ritschel@@met.fu-berlin.de} ##' @usage TS.acc(x,acc.val) ##' @examples ##' x <- rgamma(1000,1) ##' x.2 <- TS.acc(x,acc.val=2) ##' @export TS.acc <- function(x,acc.val=2) { ## check for input value of acc.val if(acc.val<1) cat(paste("Warning: accumulation value acc.val too small for accumulation of the time series \n")) l.new <- length(x)%/%acc.val ## calculate new length of accumulated time series l.rest <- length(x)%%acc.val ## calculate values left over if(l.rest==0) { x.acc <- apply(matrix(x,nrow=l.new,byrow=T),1,sum) }else{ x.acc <- apply(matrix(x[1:(length(x)-l.rest)],nrow=l.new,byrow=T),1,sum) cat(paste("Warning: ",l.rest,"time steps left and not used for accumulation \n")) } ## return accumulated time series return(x.acc) } # End of function TS.acc ##################################################################################### ################################################### ### calculate statistics of a given time series ### ################################################### ##' \code{TS.stats} calculates statistics of a given time series \code{x} at given accumulation ##' levels \code{acc.vals}. The calculated statistics are the mean of the first accumulation level, ##' the variance, auto-covariance lag-1 and the probability of zero rainfall of all given accumulation ##' levels of the time series. These statistics are needed for estimating the BLRPM parameters. ##' @title calculating statistics of a time series needed for parameter estimation ##' @param x \code{vector} of a time series ##' @param acc.vals \code{vector} of accumulation levels, first value should be 1 ##' @return stats \code{TS.stats} returns a \code{vector} of statistics calculated at given accumulation levels ##' @author Christoph Ritschel \email{christoph.ritschel@@met.fu-berlin.de} ##' @usage TS.stats(x,acc.vals) ##' @examples ##' time.series <- rgamma(1000,shape=1) ##' statistics <- TS.stats(time.series,acc.vals=c(1,3,12,24)) ##' @export TS.stats <- function(x,acc.vals=c(1,3,12,24)) { ## initialize array Statistik <- array(NA,dim=c(length(acc.vals),3)) ## loop over accumulation values for(i in 1:length(acc.vals)) { ts <- TS.acc(x,acc.vals[i]) if(i==1) stats.mean <- mean(ts,na.rm=T) # Mean for first level of aggregation Statistik[i,1] <- var(ts,na.rm=T) # Var for all agg levels Statistik[i,2] <- acf(ts,lag.max=1,type="covariance",plot=F)$acf[2,1,1] Statistik[i,3] <- length(ts[ts==0])/length(ts) # Prob.Zero 24h } stats <- c(stats.mean,as.vector(Statistik)) names(stats) <- c(paste("mean.",acc.vals[1],sep=""),paste("var.",acc.vals,sep=""), paste("cov.",acc.vals,sep=""),paste("pz.",acc.vals,sep="")) ## return vector of statistics return(stats) } # End of function TS.stats ######################################################################################### ##' \code{BLRPM.class} defines a new class for objects of type BLRPM containing the information about storms, ##' cells, stepfunction and the precipitation time series. ##' @title BLRPM class ##' @author Christoph Ritschel \email{christoph.ritschel@@met.fu-berlin.de} BLRPM.class <- R6Class("BLRPM", public = list( storms = NA, cells = NA, sfn = NA, RR = NA, time = NA, initialize = function(storms, cells, sfn, RR, time) { if (!missing(storms)) self$storms <- storms if (!missing(cells)) self$cells <- cells if (!missing(cells)) self$sfn <- sfn if (!missing(cells)) self$RR <- RR if (!missing(cells)) self$time <- time }) ) ########################################################################### ##' \code{BLRPM.sim} is the main function for simulating precipitation with the Bartlett-Lewis rectangular pulse model. ##' It generates storms and cells using the given five BLRPM parameters \code{lambda, gamma, beta, eta, mux} for a given ##' simulation time \code{t.sim}. The function \code{BLRPM.sim} then accumulates a precipitation time series of length ##' \code{t.akk} (typically the same as t.sim) with an accumulation time step \code{interval} from the generated ##' storms and cells. An \code{offset} can be used to delay the precipitation time series for initialization reasons. ##' \code{BLRPM.sim} returns a list of different variables and data.frames: \code{Storms, Cells, Stepfun, Precip, time}. ##' @title Simulating precipitation with the BLRPM ##' @param lambda \code{value} specifying the expected storm generation rate [1/units.time] ##' @param gamma \code{value} specifying the expected storm duration[1/units.time] ##' @param beta \code{value} specifying the expected cell generation rate [1/units.time] ##' @param eta \code{value} specifying the expected cell duration [1/units.time] ##' @param mux \code{value} specifying the expected cell intenstity [mm/unit.time] ##' @param t.sim \code{value} specifying the simulation length [units.time] ##' @param t.acc \code{value} specifying the length of the accumulated time series [units.time]. ##' Note: if longer than t.sim only zeros are added after t.sim. ##' @param interval \code{value} specifying the accumulation time step [units.time] ##' @param offset \code{value} specifying the offset of the accumulated time series with ##' respect to the start time of the simulation [units.time]. Note: negative values are not allowed. ##' @return $storms returns \code{data.frame} containing information about storms: start, end, number of cells ##' @return $cells returns \code{data.frame} containing information about cells: start, end, intensity, storm index ##' @return $sfn returns \code{stepfunction} used to accumulate precipitation time series ##' @return $RR returns \code{vector} of accumulated precipitation with time step \code{interval} [mm/interval] ##' @return $time returns \code{vector} of time steps [interval] ##' @usage BLRPM.sim(lambda,gamma,beta,eta,mux,t.sim,t.acc,interval,offset) ##' @examples ##' lambda <- 4/240 ##' gamma <- 1/10 ##' beta <- 0.3 ##' eta <- 2 ##' mux <- 4 ##' t.sim <- 240 ##' t.acc <- t.sim ##' interval <- 1 ##' offset <- 0 ##' simulation <- BLRPM.sim(lambda,gamma,beta,eta,mux,t.sim,t.acc=t.sim,interval,offset) ##' @author Christoph Ritschel \email{christoph.ritschel@@met.fu-berlin.de} ##' @export BLRPM.sim <- function(lambda=4/240,gamma=1/10,beta=0.3,eta=2,mux=4,t.sim=240,t.acc=t.sim,interval=1,offset=0) { ## if offset negative: warning and abort if(offset<0) {cat("Warning: offset value negative \n") }else{ t.sim <- t.sim+2*offset ## Simulate storms and cells with given parameters sim <- BL.sim(lambda,gamma,beta,eta,mux,t.sim) ## if at least one storm and one cell is generated ## accumulate precipitation time series if(!is.null(sim$cells)) { sfn <- BL.stepfun(sim$cells) p.acc <- BL.acc(sfn,t.acc,interval,offset) BLRPM <- BLRPM.class$new(sim$storms,sim$cells,sfn,p.acc$RR,p.acc$time) ## return data.frames and vectors return(BLRPM) }else cat("Warning: no storm generated. Possibly increase simulation time t.sim or storm generation parameter lambda \n") } } ## End of function BLRPM.sim ################################################################ ####################### ### part2: plotting ### ####################### ##' \code{plot.BLRPM} plots an object of \code{class} BLRPM returned by the function \code{BLRPM.sim} with an option to plot ##' either only the storms and cells or to additionally plot the stepfunction and the precipitation time series ##' in a multiframe plot. ##' @title Plotting of an object of \code{class} BLRPM ##' @param x \code{class} BLRPM object which is returned by function \code{BLRPM.sim} ##' @param OSC \code{logical} determing type of plot. OSC=True only storms and cells are plotted. OSC=FALSE storms, cells, ##' stepfunction and precipitation time series plotted. ##' @param start.time \code{numerical} value setting the starting time of a time window to be plotted. Default is NULL, therefore start time is 0 ##' @param end.time \code{numerical} value setting the end time of a time window to be plotted. Default is NULL, meaning the plot will ##' end with the last active cell ##' @param legend \code{logical} setting the option for legend to be plotted or not ##' @param c.axis \code{numerical} value for axis label size, default is 1.5 ##' @param c.lab \code{numerical} value for plot label size, default is 1.5 ##' @param c.legend \code{numerical} value for legend font size, default is 1.5 ##' @param ... Arguments to be passed to methods, such as \link{graphical parameters} (see \code{\link{par}}). ##' @seealso \code{\link{plot}} ##' @examples ##' lambda <- 4/240 ##' gamma <- 1/10 ##' beta <- 0.3 ##' eta <- 2 ##' mux <- 4 ##' t.sim <- 240 ##' t.acc <- t.sim ##' interval <- 1 ##' offset <- 0 ##' simulation <- BLRPM.sim(lambda,gamma,beta,eta,mux,t.sim,t.acc=t.sim,interval,offset) ##' plot(simulation,OSC=FALSE) ##' \donttest{ ##' plot(simulation,OSC=TRUE,start.time=1,end.time=24) ##' } ##' @author Christoph Ritschel \email{christoph.ritschel@@met.fu-berlin.de} ##' @import R6 ##' @method plot BLRPM ##' @export plot.BLRPM <- function(x,...,OSC=FALSE,start.time=NULL,end.time=NULL,legend=TRUE,c.axis=1.5,c.lab=1.5,c.legend=1.5) { ## variables cells <- x$cells storms <- x$storms sfn <- x$sfn RR <- x$RR time <- x$time ## IF only Cells and Storms are to be plotted... if(OSC) { par(oma=c(0,0,0,0)) par(mar=c(4.2,4,0.3,0)) max.int <- max(cells$int) #max.cell <- max(storms$Anzahl.Zellen/(storms$end-storms$start)) if(is.null(start.time)){ start <- 0 l <- max(cells$end) }else if(start.time < 0 | is.infinite(start.time)){ cat("Warning: start time can not be negative or infinite \n") }else{ start <- start.time l <- start.time + 2 } if(is.null(end.time)){ end <- max(cells$end) }else if(end.time < 0 | is.infinite(end.time)){ cat("Warning: end time can not be negative or infinite \n") }else{end <- end.time} ## Plot Window and Legend par(oma=c(0,0,0,0)) par(mar=c(5,5,1,1)) plot(NULL,ylim=c(-1/10*(max.int+4.5*max.int/10),max.int+4.5*max.int/10),xlim=c(start,end),cex.lab=c.lab,xlab="time [h]",ylab="cell intensity [mm/h]",axes=F) axis(2,cex.axis=c.axis) axis(1,cex.axis=c.axis) if(legend) { if(is.null(start.time)){ l <- max(cells$end) polygon(x=c(l/60,l/60,l/6.5,l/6.5),y=c(max.int*1,max.int*1.35,max.int*1.35,max.int*1),col=rgb(1,1,1,0)) polygon(x=c(l/50,l/50,l/20,l/20),y=c(max.int*1.18,max.int*1.25,max.int*1.25,max.int*1.18),col=rgb(1,0,0,0.4),border=NA) polygon(x=c(l/50,l/50,l/20,l/20),y=c(max.int*1.08,max.int*1.15,max.int*1.15,max.int*1.08),col=rgb(0,0,1,0.4),border=NA) text(x=l/10,y=max.int*1.24,labels="storm",cex=c.legend) text(x=l/10,y=max.int*1.14,labels="cell",cex=c.legend) }else{ l <- start.time polygon(x=c(l,l,l+1.7*c.legend,l+1.7*c.legend),y=c(max.int*1.1,max.int*1.3,max.int*1.3,max.int*1.1),col=rgb(1,1,1,0)) polygon(x=c(l+1.3*c.legend,l+1.3*c.legend,l+1.6*c.legend,l+1.6*c.legend),y=c(max.int*1.23,max.int*1.28,max.int*1.28,max.int*1.23),col=rgb(1,0,0,0.4),border=NA) polygon(x=c(l+1.3*c.legend,l+1.3*c.legend,l+1.6*c.legend,l+1.6*c.legend),y=c(max.int*1.13,max.int*1.18,max.int*1.18,max.int*1.13),col=rgb(0,0,1,0.4),border=NA) text(x=l,y=max.int*1.25,labels="storm",cex=c.legend,pos=4) text(x=l,y=max.int*1.15,labels="cell",cex=c.legend,pos=4) } } for(j in 1:length(storms$start)) { # Plot Polygon of each Storm [Start-End] Start <- storms$start[j] Ende <- storms$end[j] #cell.factor <- storms$Anzahl.Zellen[j]/(Ende-Start)/max.cell #system(paste("echo",cell.factor)) polygon(x=c(Start,Start,Ende,Ende),y=c(0,-1/10*(max.int+1),-1/10*(max.int+1),0),col=rgb(1,0,0,0.4),border=NA) } for(k in 1:length(cells$start)) { # Plot Polygon of each Cell [Start-End] Start <- cells$start[k] Ende <- cells$end[k] int <- cells$int[k] polygon(x=c(Start,Start,Ende,Ende),y=c(0,0+int,0+int,0),col=rgb(0,0,1,0.5),border=NA) } ## else plot Storms and Cells, the cumulative stepfunction and the accumulated precipitation time series ## one plot window with 3 subfigures }else{ ## Multiple Plot (3 Plots in 1 Window) par(mfrow=c(3,1)) par(mar=c(1,4,1,2)) ## axes lim max.int <- max(cells$int) l <- max(time) if(is.null(start.time)){ start <- min(time) }else{start <- start.time} if(is.null(end.time)){ end <- max(time) }else{end <- end.time} ## initialize plot window plot(NULL,ylim=c(-1/10*(max.int+1),max.int+1),xlim=c(start,end),xlab="",ylab="cell intensity [mm/h]",axes=F) axis(2) ## legend polygon(x=c(l/60,l/60,l/10,l/10),y=c(max.int*0.82,max.int*1.13,max.int*1.13,max.int*0.82),col=rgb(1,1,1,0)) polygon(x=c(l/50,l/50,l/30,l/30),y=c(max.int*0.99,max.int*1.07,max.int*1.07,max.int*0.99),col=rgb(1,0,0,0.4),border=NA) polygon(x=c(l/50,l/50,l/30,l/30),y=c(max.int*0.86,max.int*0.95,max.int*0.95,max.int*0.86),col=rgb(0,0,1,0.4),border=NA) text(x=l/15,y=max.int*1.035,labels="storm",font=1) text(x=l/15,y=max.int*0.92,labels="cell",font=1) ## plot storms as polygons [Start-End] for(j in 1:length(storms$start)) { Start <- storms$start[j] Ende <- storms$end[j] polygon(x=c(Start,Start,Ende,Ende),y=c(0,-1/10*(max.int+1),-1/10*(max.int+1),0),col=rgb(1,0,0,0.4),border=NA) } ## plot cells as polygons [Start-End] for(k in 1:length(cells$start)) { Start <- cells$start[k] Ende <- cells$end[k] int <- cells$int[k] polygon(x=c(Start,Start,Ende,Ende),y=c(0,0+int,0+int,0),col=rgb(0,0,1,0.5),border=NA) } ## plot stepfunction par(mar=c(1,4,1,2)) plot(sfn,do.points=0,xlim=c(start,end),xlab="",ylab="cell intensity [mm/h]",axes=F,main="") axis(2) ## plot accumulated time series par(mar=c(5,4,1,2)) plot(start:end,RR[start:end],type="h",xlab="time [h]",ylab=paste("precipitation [mm/h]"),frame.plot=F) layout(1) } ## end if(OSC) } ## End of Function plot.BLRPM ##################################################################################################### #################################### ### part 3: parameter estimation ### #################################### ##' \code{Beta.fun} is a help function for \code{OF} ##' @title Beta function needed in objective function ##' @param a \code{value} specifying Parameter a ##' @param b \code{value} specifying Parameter b ##' @return beta returns value of \code{Beta.fun} for parameters a and b ##' @author Christoph Ritschel \email{christoph.ritschel@@met.fu-berlin.de} Beta.fun <- function(a,b) { Beta <- gamma(a)*gamma(b)/gamma(a+b) return(Beta) } ## End of function Beta.fun #################################################################### ##' \code{Delta.fun} is a help function for \code{OF} ##' @title Delta function needed in objective function ##' @param kappa \code{value} specifying Parameter kappa ##' @param MStrich \code{value} specifying dimension of error correction in objective function ##' @return Delta returns value of \code{Delta.fun} for kappa and MStrich ##' @author Christoph Ritschel \email{christoph.ritschel@@met.fu-berlin.de} Delta.fun <- function(kappa,MStrich) { MStrich.Reihe <- 0:MStrich SummeMstrich <- (kappa^MStrich.Reihe)/factorial(MStrich.Reihe) Delta <- exp(kappa)-sum(SummeMstrich) return(Delta) } # End of function Delta.fun ############################################################################ ##' \code{BLRPM.OF} is the objective function used for parameter estimation of the BLRPM parameters. ##' Given a set of BLRPM parameters \code{par} this function calculates a set of model statistics at ##' given accumulation time steps \code{acc.vals}. These model statistics are compared with given ##' time series statistics \code{stats} in the objective function. The user is able to define \code{weights} ##' for each statistic (has to be the same length as statistics input vector). Option for debugging is given. ##' A \code{scale} parameter defines a criterium for which different kinds of model statistics are calculated. ##' This criterium is mainly based on the timescale difference between storm duration parameter gamma and cell duration ##' parameter eta. ##' If \code{use.log} is true, the objective function needs logarithmic input parameters. The value ##' of \code{OF} defines the kind of objective function to be used: 1= quadratic 2= quadratic extended 3= absolute 4= absolute extended. ##' @title BLRPM objective function for parameter estimation ##' @param par \code{vector} specifying the five model parameters (lambda,gamma,beta,eta,mux) at which the objective function is to be calculated ##' @param stats \code{vector} specifying the time series statistics to which the model is compared to ##' @param acc.vals \code{vector} specifying the accumulation time steps at which the model statistics are calculated ##' @param weights \code{vector} specifying the weight of each statistic in the objective function. ##' Note: has to have the same length as \code{stats} ##' @param debug \code{logical} defining if debuggging of function has to be done ##' @param scale \code{value} spacifying the scale factor for comparisson between duration parameters gamma and eta ##' @param use.log \code{logical} defining if input parameters are logarithmic ##' @param OF \code{value} specifying the type of objective function. 1: quadratic, 2: quad extended, 3: absolute, 4: abs extended ##' @return Z returns value of objective function for input parameters and input statistics ##' @author Christoph Ritschel \email{christoph.ritschel@@met.fu-berlin.de} BLRPM.OF <- function(par,stats,acc.vals=c(1,3,12,24), weights=rep(1,length(stats)),debug=FALSE,scale=1, use.log=TRUE,OF=2) { ## Log-Paramter Input? ## logarithmierte Parameter verhindern negative Werte geschaetzter Parameter if(use.log){ lambda <- exp(par[1]) gamma <- exp(par[2]) beta <- exp(par[3]) eta <- exp(par[4]) mux <- exp(par[5]) }else{ lambda <- par[1] gamma <- par[2] beta <- par[3] eta <- par[4] mux <- par[5] } ## Debug erlaubt es die Ziefunktionsausgaben fuer jeden Zwischenschritt in eine ## Auslagerungsdatei zu schreiben und anschliessend zu kontrollieren if(debug==T){ debug.values <- c(lambda,gamma,beta,eta,mux) } ## Fallunterscheidung: kleine Werte fuer Gamma und Beta im Vergleich zu eta ## Fall 1: Bedingung erfuellt if( beta<(scale*eta) && gamma<(scale*eta)) { ## Benoetigte Werte fuer Prob.Zero mu.T <- 1/gamma*(1+(gamma*(beta+(gamma/2))/(eta^2))-((gamma*(5*gamma*beta+(beta^2)+2*(gamma^2)))/(4*eta^3))+ (gamma*(4*(beta^3)+31*(beta^2)*gamma+99*beta*(gamma^2)+36*(gamma^3)))/(72*(eta^4))) G.PStern <- 1/gamma*(1-((beta+gamma)/eta)+((3*beta*gamma+2*(gamma^2)+(beta^2))/(2*(eta^2)))) if(debug) debug.values <- c(debug.values,NA,NA,NA,NA) ## Bedingung nicht erfuellt } else { ## Grenzen fuer M und MStrich, je groesser, desto geringer der Fehler M.Reihe <- 1:10 M <- 10 MStrich.Reihe <- 0:10 MStrich <- 10 kappa <- beta/eta # Variablenumformulierung phi <- gamma/eta # Variablenumformulierung suppressWarnings( { beta.val1 <- beta(M.Reihe+1,phi) beta.val2 <- beta(MStrich.Reihe+phi,2) } ) SummeM <- (-1*(kappa^(M.Reihe-1))*(kappa-(M.Reihe^2)-M.Reihe))/factorial(M.Reihe*(M.Reihe+1))*beta.val1 SummeMstrich <- (kappa^MStrich.Reihe)/factorial(MStrich.Reihe)*beta.val2 ## Benoetigte Werte fuer Prob.Zero mu.T <- 1/eta*(1+phi*sum(SummeM)+1/phi) G.PStern <- 1/eta*exp(-kappa)*sum(SummeMstrich)+Delta.fun(kappa,MStrich)/((MStrich+phi+1)*(MStrich+phi+2)) if(debug) debug.values <- c(debug.values,kappa,phi,sum(beta.val1),sum(beta.val2)) } # Ende Fallunterscheidung ## Bestimmung restlicher Variablen: mu.c <- 1+beta/gamma ## AkkumulationsZeiten: h <- acc.vals stats.tmp <- array(NA,dim=c(length(acc.vals),3)) ## Schleife ueber alle Akkumulationszeiten for(i in 1:length(h)) { ## Mittel der Niederschlagsint first aggregation step if(i==1) stats.tmp.mean <- (lambda*h[i]*mux*mu.c)/eta ## Varianz der Niederschlagsint stats.tmp[i,1] <- 2*lambda*mu.c/eta*( (2*(mux^2)+(beta*(mux^2)/gamma))*h[i]/eta+( ((mux^2)*beta*eta*(1-exp(-gamma*h[i])))/((gamma^2)*((gamma^2)-(eta^2))))- ((2*(mux^2)+((beta*gamma*(mux^2))/(gamma^2-eta^2)))*((1-exp(-eta*h[i]))/(eta^2)))) ## Autokovarianz k <- 1 #lag stats.tmp[i,2] <- ((lambda*mu.c)/eta)*( ((2*(mux^2)+((beta*gamma*(mux^2))/((gamma^2)-(eta^2))))*((((1-exp(-1*eta*h[i]))^2)*exp(-1*eta*(k-1)*h[i]))/(eta^2)))-( ((mux^2)*beta*eta*((1-exp(-1*gamma*h[i]))^2)*exp(-1*gamma*(k-1)*h[i]))/((gamma^2)*((gamma^2)-(eta^2))))) ## Probability of zero Rainfall stats.tmp[i,3] <- max(1e-8,min(1,exp((-lambda*(h[i]+mu.T))+(lambda*G.PStern*(gamma+beta*exp(-(beta+gamma)*h[i])))/(beta+gamma)),na.rm=T),na.rm=T) } ## Ende Schleife ueber alle Akkumulationszeiten stats.model <- c(stats.tmp.mean,as.vector(stats.tmp)) ## O.Funktionsformulierung ## Unterscheidung durch OF ## OF = 1 : quadratisch ## OF = 2 : quadratisch erweitert ## OF = 3 : absolut ## OF = 4 : absolut erweitert # stats <- c(stats[1,1],stats[2,],stats[3,],stats[4,]) # stats.model <- c(stats.model[1,1],stats.model[2,],stats.model[3,],stats.model[4,]) # if(OF==1) ZF <- sum(Gewichte %*% (((1-stats.model/stats))^2)) # else if(OF==2) ZF <- sum(Gewichte * ((((1-stats.model/stats))^2)+(((1-stats/stats.model))^2))) # else if(OF==3) ZF <- sum(Gewichte * abs((stats.model/stats)-1)) # else if(OF==4) ZF <- sum(Gewichte * (abs((stats.model/stats)-1)+abs(stats/stats.model-1))) if(OF==1) Z <- weights %*% (((1-stats.model/stats))^2) else if(OF==2) Z <- weights %*% ((((1-stats.model/stats))^2)+(((1-stats/stats.model))^2)) else if(OF==3) Z <- weights %*% abs((stats.model/stats)-1) else if(OF==4) Z <- weights %*% (abs((stats.model/stats)-1)+abs(stats/stats.model-1)) ## Debug? if(debug) { options(digits.secs=6) debug.values <- c(debug.values,stats.model,Z,as.character(Sys.time())) write(debug.values,file="optim.log",append=TRUE,ncolumns=length(debug.values)) } ## Ausgabe des aktuellen Werts der O.Funktion return(Z) } ## End of function BLRPM.OF ############################################################################################################# ##' \code{BLRPM.est} estimates the five Bartlett-Lewis rectangular pulse model parameters \code{lambda,gamma,beta,eta,mux} ##' for a given time series \code{data}. At first the time series statistics at given accumulation levels \code{acc.vals} ##' are calculated. These statistics are given over to the parameter estimation algorithm together with ##' parameter starting values \code{par}. An objective function \code{O.Fun} can be specified, default is \code{BLRPM.OF}. ##' In addition the weights for different statistics and accumulation levels \code{weights.mean, weights.var, weights.cov, weights.pz} ##' can be specified. For the BLRPM objective function the user can select the measure of distance between observation ##' and model with \code{OF}: =1 quadratic, =2: quad extended, =3: absolute, =4: abs extended. ##' A \code{scale} parameter controls different cases in the objective function for differences in the scale of duration ##' parameters gamma and eta. ##' If a debugging is wished, \code{debug} can be set to \code{TRUE} and a log file is created in working directory. ##' Several \code{optim} parameters can be also defined. For specifics see \code{?optim}. ##' @title BLRPM Parameter Estimation function ##' @param RR \code{vector} of a precipitation time series ##' @param acc.vals \code{vector} of different accumulation levels at which statistics are to be calculated ##' @param pars.in \code{vector} specifying starting values of \code{lambda,gamma,beta,eta,mux} for optimization ##' @param O.Fun \code{objective function} to be used during optimization ##' @param weights.mean \code{value} for weight for mean value at first accumulation level ##' @param weights.var \code{vecotr} of weights for variances, has to have \code{length(acc.vals)} ##' @param weights.cov \code{vecotr} of weights for covariances, has to have \code{length(acc.vals)} ##' @param weights.pz \code{vecotr} of weights for probability of zero rainfall, has to have \code{length(acc.vals)} ##' @param OF \code{value} specifying the type of objective function. 1: quadratic, 2: quad symmetrized, 3: absolute, 4: abs symmetrized ##' Note: quadratic symmetrized proofed to be most effective and fastest ##' @param debug set \code{TRUE} if debugging is wished, default \code{FALSE}. Creates a log file in working directory ##' @param scale \code{value} specifying the scaling between gamma and eta in the objective function ##' @param method \code{character} defining the method to be used in \code{optim}, preferences are: "Nelder-Mead", "BFGS", "L-BFGS-B"e ##' @param lower \code{vector} specifying the lower boundary of parameters for "L-BFGS-B" method ##' @param upper \code{vector} specifying the upper boundary of parameters for "L-BFGS-B" method ##' @param use.log \code{logical}, set \code{TRUE} if logarithmic parameters during optimization should be used. Advantage: zero as lower boundary for parameters ##' @param maxit \code{value} specifying the maximum number of itereations durion optimization ##' @param ndeps \code{vector} specifying the change for each parameter during one interation step ##' @param trace \code{value} specifying output information of \code{optim} ##' @return $est returns \code{vector} of estimated parameters \code{lambda,gamma,beta,eta,mux} ##' @return $conv returns \code{value} of convergence of optimization, see \code{optim} for details ##' @return $mess returns \code{character} message about optimization if using "L-BFGS-B" method ##' @return $Z returns \code{value} of objective function for estimated parameters ##' @usage BLRPM.est(RR,acc.vals,pars.in,O.Fun, ##' weights.mean,weights.var,weights.cov,weights.pz,OF,debug, ##' scale,method,lower,upper,use.log,maxit,ndeps,trace) ##' @examples ##' t.sim=240 ##' ##' lambda <- 4/240 ##' gamma <- 1/10 ##' beta <- 0.3 ##' eta <- 2 ##' mux <- 4 ##' ##' pars <- c(lambda,gamma,beta,eta,mux) ##' ##' sim <- BLRPM.sim(lambda,gamma,beta,eta,mux,t.sim) ##' est <- BLRPM.est(sim$RR,pars.in=pars,method="BFGS",use.log=TRUE) ##' ##' @author Christoph Ritschel \email{christoph.ritschel@@met.fu-berlin.de} ##' @export BLRPM.est <- function(RR,acc.vals=c(1,3,12,24),pars.in=c(4/240,1/10,0.3,2,4),O.Fun=BLRPM.OF, weights.mean=100, weights.var=rep(1,length(acc.vals)), weights.cov=rep(1,length(acc.vals)), weights.pz=rep(1,length(acc.vals)), OF=2, debug=FALSE,scale=1e-3,method="BFGS",lower=-Inf,upper=Inf, use.log=TRUE,maxit=2000,ndeps=rep(1e-3,length(pars.in)),trace=0) { ### calculate time series statistics at given accumulation levels stats <- TS.stats(RR,acc.vals) ## logarithmieren der Parameter und der Grenzen? if(use.log){ pars.in <- log(pars.in) if(method=="L-BFGS-B"){ upper <- log(upper) lower <- log(lower) } } ## If Debug: initialisiere log Datei if(debug) { debug.names <- c("lambda","gamma","beta","eta","mux", "kappa","phi","beta1","beta2", "mean", "var.1h","var.3h","var.12h","var.24h", "acv.1h","acv.3h","acv.12h","acv.24h", "probzero.1h","probzero.3h","probzero.12h","probzero.24h", "ZF","Date","Time") write(debug.names,file="optim.log",append=FALSE,ncolumns=length(debug.names)) } ## Check Statistik auf NAs if(sum(is.na(stats)) == 0) { ## Untersuche Startwert der O.Funktion: Kann Optimierung gestartet werden... ZF <- O.Fun(pars.in,acc.vals=acc.vals,stats=stats,use.log=use.log,OF=OF) ## Wenn Ok, dann Starte Optim #if((!is.infinite(ZF)) & (!is.na(ZF)) & (ZF<15000)) { if((!is.infinite(ZF)) & (!is.na(ZF))) { ## Unterscheidung methodn -- Grenzen if(method=="L-BFGS-B") { fit.optim <- optim(par=pars.in,O.Fun,stats=stats,OF=OF, #stats=Teststatistik[run,], weights=c(weights.mean,weights.var,weights.cov,weights.pz), debug=debug,scale=scale,method=method,use.log=use.log,lower=lower,upper=upper, control=list(maxit=maxit,trace=trace,ndeps=ndeps)) est <- fit.optim$par conv <- fit.optim$convergence mess <- fit.optim$message Z <- fit.optim$value }else{ fit.optim <- optim(par=pars.in,O.Fun,stats=stats,OF=OF,acc.vals=acc.vals, #stats=Teststatistik[run,], weights=c(weights.mean,weights.var,weights.cov,weights.pz), debug=debug,scale=scale,method=method,use.log=use.log,#lower=lower,upper=upper, control=list(maxit=maxit,ndeps=ndeps,trace=trace)) est <- fit.optim$par conv <- fit.optim$convergence Z <- fit.optim$value } }else{ # fuer den Fall fehlerhafter O.Funktionswerte est <- rep(NA,length(pars.in)) conv <- NA mess <- NA Z <- NA } ## Ende Abfrage is.infinite... }else { ## Falls Statistiken Fehlerhaft... est <- rep(NA,length(pars.in)) conv <- NA mess <- NA Z <- NA } ## Ende Abfrage Statistik if(use.log) { ## logarithmierte Parameter exponieren est <- exp(est) } names(est) <- c("lambda","gamma","beta","eta","mux") ## Ausgabe if(method=="L-BFGS-B") { return(list("est"=est,"conv"=conv,"mess"=mess,"Z"=Z)) }else{ return(list("est"=est,"conv"=conv,"Z"=Z)) } } ## Ende Funktion BLRPM.est ###########################################################################################
/scratch/gouwar.j/cran-all/cranData/BLRPM/R/BLRPM.R
#' Start BLRShiny #' @title Launch 'BLRShiny' Interface #' @return Nothing #' @description BLRShiny() loads interactive user interface built using R 'shiny'. #' @details The interactive user interface is to provide an easy way for binary logistic regression analysis and downloading relevant plot. #' @keywords BLRShiny #' @examples #' if(interactive()){ #' library(rmarkdown) #' BLRShiny() #' } BLRShiny <- function() { rmarkdown::run(system.file("img", "BLRShiny.Rmd", package = "BLRShiny")) Sys.setenv("R_TESTS" = "") }
/scratch/gouwar.j/cran-all/cranData/BLRShiny/R/BLRShiny.R
--- title: "Binary Logistic Regression Modelling" output: html_document runtime: shiny --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) library(dplyr) library(caret) library(e1071) library(rhandsontable) library(ggplot2) library(datasets) ``` ```{r,echo=FALSE} sidebarPanel( checkboxInput("ex","Uncheck for using your own file",value = TRUE), fileInput("file", "Upload the *.csv file with headers"), sliderInput("train_num", label = "Enter the proportion of training dataset:", min = 0.6, max = 1, value = 0.6, step = 0.01), sliderInput("cut_offprob", label = "Enter cutoff probability", min = 0, max = 1, value = 0.5, step = 0.01), uiOutput("vx"), uiOutput("vy"), tableOutput("convertd"), uiOutput("vxi"), downloadButton("downloadPlot", "Download LR Plot(Quantitative Predictors)") ) mainPanel( tabsetPanel(type = "tab", tabPanel("Model Summary", verbatimTextOutput("AD") ), tabPanel("Model Visualization", plotOutput("MV") ), tabPanel("Model Evaluation", verbatimTextOutput("ME") ), tabPanel("Model Deployment",verbatimTextOutput("MD")) ), h6("Edit the test data record"), rHandsontableOutput("testdata"), h6("", tags$img(src ="K.JPG", height= 400, width=400)) ) output$AD<-renderPrint({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds =data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] valid.rows = setdiff(row.names(dataframe),train.rows) dataframev = dataframe[valid.rows,] model = glm(formula = as.formula(mod),data = dataframet,family = "binomial") print(summary(model)) }) output$MV<-renderPlot({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] valid.rows = setdiff(row.names(dataframe),train.rows) dataframev = dataframe[valid.rows,] attach(dataframet) if(class(get(input$variablexi)) == "factor") { assocplot(table(get(input$variablexi),get(input$variabley)),col = c("green","red"),xlab = input$variablexi, ylab = input$variabley) } else { ggplot(dataframet,aes(get(input$variablexi),get(input$variabley)))+ geom_point() + geom_smooth(method ="glm",se = FALSE,method.args=list(family = "binomial")) + labs(title= "Logistic model visualization", x= input$variablexi,y=input$variabley) } }) output$ME<-renderPrint({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] valid.rows = setdiff(row.names(dataframe),train.rows) dataframev = dataframe[valid.rows,] model = glm(formula = as.formula(mod),data = dataframet,family = "binomial") if(prop <1 ) { cat(sprintf("\nValidation data is used\n")) prediction = ifelse(predict(model,newdata = dataframev,type= "response") > input$cut_offprob,1,0) attach(dataframev) } else { cat(sprintf("\nTraining data is used\n")) prediction = ifelse(predict(model,newdata = dataframet,type= "response") > input$cut_offprob,1,0) attach(dataframet) } print(confusionMatrix(as.factor(prediction),as.factor(get(input$variabley)))) }) output$MD<-renderPrint({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] valid.rows = setdiff(row.names(dataframe),train.rows) dataframev = dataframe[valid.rows,] model = glm(formula = as.formula(mod),data = dataframet,family = "binomial") # print(summary(model)) test_data = data.frame(hot_to_r(input$testdata)) prediction = ifelse(predict(model,newdata = test_data ,type= "response") > input$cut_offprob,1,0) test_data$predictedvalue = prediction print(test_data) }) output$vx <- renderUI({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } checkboxGroupInput("variablex","Select the variables",choices = colnames(data),selected = colnames(data)) }) output$vxi <- renderUI({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) ds = select(ds,c(-input$variabley)) selectInput("variablexi","Select the variable x for the plot",choices = colnames(ds),selected = "") }) output$vy <- renderUI({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) ds = select_if(ds,is.factor) d = data.frame(count=t( data.frame((lapply(ds,nlevels))))) d$varname =row.names(d) d= filter(d,count==2) selectInput("variabley","Select the dependent variable",choices = d$varname ,selected = d$varname) }) output$testdata <- renderRHandsontable({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) ds = select(ds,c(-input$variabley)) row.names(ds)= 1:NROW(ds) rhandsontable(data.frame(ds[1,])) }) output$convertd <- renderTable({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,c(input$variabley)) df = ds ds = model.matrix(~0 + get(input$variabley),data = ds) dnew = data.frame(cbind(df,ds)) df = data.frame(dcat =unique(dnew)) row.names(df)= 1:NROW(df) df[,3]= NULL colnames(df)=c("ActualValues","BinaryValues") print(data.frame(df)) }) plotInput = function() { if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] attach(dataframet) if(class(get(input$variablexi)) != "factor") { ggplot(dataframet,aes(get(input$variablexi),get(input$variabley)))+ geom_point() + geom_smooth(method ="glm",se = FALSE,method.args=list(family = "binomial")) + labs(title= "Logistic model visualization", x= input$variablexi,y=input$variabley) } } output$downloadPlot = downloadHandler( filename = 'LR.png', content = function(file) { device <- function(..., width, height) { grDevices::png(..., width = width, height = height, res = 300, units = "in") } ggsave(file, plot = plotInput(), device = device) dev.off })
/scratch/gouwar.j/cran-all/cranData/BLRShiny/inst/BLRShiny.Rmd
--- title: "Binary Logistic Regression Modelling" output: html_document runtime: shiny --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) library(dplyr) library(caret) library(e1071) library(rhandsontable) library(ggplot2) library(datasets) ``` ```{r,echo=FALSE} sidebarPanel( checkboxInput("ex","Uncheck for using your own file",value = TRUE), fileInput("file", "Upload the *.csv file with headers"), sliderInput("train_num", label = "Enter the proportion of training dataset:", min = 0.6, max = 1, value = 0.6, step = 0.01), sliderInput("cut_offprob", label = "Enter cutoff probability", min = 0, max = 1, value = 0.5, step = 0.01), uiOutput("vx"), uiOutput("vy"), tableOutput("convertd"), uiOutput("vxi"), downloadButton("downloadPlot", "Download LR Plot(Quantitative Predictors)") ) mainPanel( tabsetPanel(type = "tab", tabPanel("Model Summary", verbatimTextOutput("AD") ), tabPanel("Model Visualization", plotOutput("MV") ), tabPanel("Model Evaluation", verbatimTextOutput("ME") ), tabPanel("Model Deployment",verbatimTextOutput("MD")) ), h6("Edit the test data record"), rHandsontableOutput("testdata"), h6("", tags$img(src ="K.JPG", height= 400, width=400)) ) output$AD<-renderPrint({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds =data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] valid.rows = setdiff(row.names(dataframe),train.rows) dataframev = dataframe[valid.rows,] model = glm(formula = as.formula(mod),data = dataframet,family = "binomial") print(summary(model)) }) output$MV<-renderPlot({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] valid.rows = setdiff(row.names(dataframe),train.rows) dataframev = dataframe[valid.rows,] attach(dataframet) if(class(get(input$variablexi)) == "factor") { assocplot(table(get(input$variablexi),get(input$variabley)),col = c("green","red"),xlab = input$variablexi, ylab = input$variabley) } else { ggplot(dataframet,aes(get(input$variablexi),get(input$variabley)))+ geom_point() + geom_smooth(method ="glm",se = FALSE,method.args=list(family = "binomial")) + labs(title= "Logistic model visualization", x= input$variablexi,y=input$variabley) } }) output$ME<-renderPrint({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] valid.rows = setdiff(row.names(dataframe),train.rows) dataframev = dataframe[valid.rows,] model = glm(formula = as.formula(mod),data = dataframet,family = "binomial") if(prop <1 ) { cat(sprintf("\nValidation data is used\n")) prediction = ifelse(predict(model,newdata = dataframev,type= "response") > input$cut_offprob,1,0) attach(dataframev) } else { cat(sprintf("\nTraining data is used\n")) prediction = ifelse(predict(model,newdata = dataframet,type= "response") > input$cut_offprob,1,0) attach(dataframet) } print(confusionMatrix(as.factor(prediction),as.factor(get(input$variabley)))) }) output$MD<-renderPrint({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] valid.rows = setdiff(row.names(dataframe),train.rows) dataframev = dataframe[valid.rows,] model = glm(formula = as.formula(mod),data = dataframet,family = "binomial") # print(summary(model)) test_data = data.frame(hot_to_r(input$testdata)) prediction = ifelse(predict(model,newdata = test_data ,type= "response") > input$cut_offprob,1,0) test_data$predictedvalue = prediction print(test_data) }) output$vx <- renderUI({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } checkboxGroupInput("variablex","Select the variables",choices = colnames(data),selected = colnames(data)) }) output$vxi <- renderUI({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) ds = select(ds,c(-input$variabley)) selectInput("variablexi","Select the variable x for the plot",choices = colnames(ds),selected = "") }) output$vy <- renderUI({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) ds = select_if(ds,is.factor) d = data.frame(count=t( data.frame((lapply(ds,nlevels))))) d$varname =row.names(d) d= filter(d,count==2) selectInput("variabley","Select the dependent variable",choices = d$varname ,selected = d$varname) }) output$testdata <- renderRHandsontable({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) ds = select(ds,c(-input$variabley)) row.names(ds)= 1:NROW(ds) rhandsontable(data.frame(ds[1,])) }) output$convertd <- renderTable({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,c(input$variabley)) df = ds ds = model.matrix(~0 + get(input$variabley),data = ds) dnew = data.frame(cbind(df,ds)) df = data.frame(dcat =unique(dnew)) row.names(df)= 1:NROW(df) df[,3]= NULL colnames(df)=c("ActualValues","BinaryValues") print(data.frame(df)) }) plotInput = function() { if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} } ds = data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] attach(dataframet) if(class(get(input$variablexi)) != "factor") { ggplot(dataframet,aes(get(input$variablexi),get(input$variabley)))+ geom_point() + geom_smooth(method ="glm",se = FALSE,method.args=list(family = "binomial")) + labs(title= "Logistic model visualization", x= input$variablexi,y=input$variabley) } } output$downloadPlot = downloadHandler( filename = 'LR.png', content = function(file) { device <- function(..., width, height) { grDevices::png(..., width = width, height = height, res = 300, units = "in") } ggsave(file, plot = plotInput(), device = device) dev.off })
/scratch/gouwar.j/cran-all/cranData/BLRShiny/inst/img/BLRShiny.Rmd
#' Start BLRShiny2 #' @title Launch 'BLRShiny2' Interface #' @return Nothing #' @description BLRShiny2() loads interactive user interface built using R 'shiny'. #' @details The interactive user interface is to provide an easy way for binary logistic regression analysis and downloading relevant plot. #' @keywords BLRShiny2 #' @examples #' if(interactive()){ #' library(rmarkdown) #' BLRShiny2() #' } BLRShiny2 <- function() { rmarkdown::run(system.file("img", "BLRShiny2.Rmd", package = "BLRShiny2")) Sys.setenv("R_TESTS" = "") }
/scratch/gouwar.j/cran-all/cranData/BLRShiny2/R/BLRShiny2.R
--- title: "Binary Logistic Regression Modelling" output: html_document runtime: shiny --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) library(dplyr) library(caret) library(e1071) library(rhandsontable) library(ggplot2) library(datasets) ``` ```{r,echo=FALSE} options(shiny.maxRequestSize = 100 * 1024^2) sidebarPanel( checkboxInput("ex","Uncheck for using your own file",value = TRUE), fileInput("file", "Upload the *.csv file with headers"), sliderInput("train_num", label = "Enter the proportion of training dataset:", min = 0.6, max = 1, value = 0.6, step = 0.01), sliderInput("cut_offprob", label = "Enter cutoff probability", min = 0, max = 1, value = 0.5, step = 0.01), uiOutput("vx"), uiOutput("vy"), tableOutput("convertd"), uiOutput("vxi"), downloadButton("downloadPlot", "Download LR Plot(Quantitative Predictors)") ) mainPanel( tabsetPanel(type = "tab", tabPanel("Model Summary", verbatimTextOutput("AD") ), tabPanel("Model Visualization", plotOutput("MV") ), tabPanel("Model Evaluation", verbatimTextOutput("ME") ), tabPanel("Model Deployment",verbatimTextOutput("MD")) ), h6("Edit the test data record"), rHandsontableOutput("testdata"), h6("", tags$img(src ="K.JPG", height= 400, width=400)) ) output$AD<-renderPrint({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds =data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] valid.rows = setdiff(row.names(dataframe),train.rows) dataframev = dataframe[valid.rows,] model = glm(formula = as.formula(mod),data = dataframet,family = "binomial") print(summary(model)) }) output$MV<-renderPlot({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] valid.rows = setdiff(row.names(dataframe),train.rows) dataframev = dataframe[valid.rows,] attach(dataframet) if(class(get(input$variablexi)) == "factor") { assocplot(table(get(input$variablexi),get(input$variabley)),col = c("green","red"),xlab = input$variablexi, ylab = input$variabley) } else { ggplot(dataframet,aes(get(input$variablexi),get(input$variabley)))+ geom_point() + geom_smooth(method ="glm",se = FALSE,method.args=list(family = "binomial")) + labs(title= "Logistic model visualization", x= input$variablexi,y=input$variabley) } }) output$ME<-renderPrint({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] valid.rows = setdiff(row.names(dataframe),train.rows) dataframev = dataframe[valid.rows,] model = glm(formula = as.formula(mod),data = dataframet,family = "binomial") if(prop <1 ) { cat(sprintf("\nValidation data is used\n")) prediction = ifelse(predict(model,newdata = dataframev,type= "response") > input$cut_offprob,1,0) attach(dataframev) } else { cat(sprintf("\nTraining data is used\n")) prediction = ifelse(predict(model,newdata = dataframet,type= "response") > input$cut_offprob,1,0) attach(dataframet) } print(confusionMatrix(as.factor(prediction),as.factor(get(input$variabley)))) }) output$MD<-renderPrint({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] valid.rows = setdiff(row.names(dataframe),train.rows) dataframev = dataframe[valid.rows,] model = glm(formula = as.formula(mod),data = dataframet,family = "binomial") # print(summary(model)) test_data = data.frame(hot_to_r(input$testdata)) prediction = ifelse(predict(model,newdata = test_data ,type= "response") > input$cut_offprob,1,0) test_data$predictedvalue = prediction print(test_data) }) output$vx <- renderUI({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } checkboxGroupInput("variablex","Select the variables",choices = colnames(data),selected = colnames(data)) }) output$vxi <- renderUI({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) ds = select(ds,c(-input$variabley)) selectInput("variablexi","Select the variable x for the plot",choices = colnames(ds),selected = "") }) output$vy <- renderUI({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) ds = select_if(ds,is.factor) d = data.frame(count=t( data.frame((lapply(ds,nlevels))))) d$varname =row.names(d) d= filter(d,count==2) selectInput("variabley","Select the dependent variable",choices = d$varname ,selected = d$varname) }) output$testdata <- renderRHandsontable({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) ds = select(ds,c(-input$variabley)) row.names(ds)= 1:NROW(ds) rhandsontable(data.frame(ds[1,])) }) output$convertd <- renderTable({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,c(input$variabley)) df = ds ds = model.matrix(~0 + get(input$variabley),data = ds) dnew = data.frame(cbind(df,ds)) df = data.frame(dcat =unique(dnew)) row.names(df)= 1:NROW(df) df[,3]= NULL colnames(df)=c("ActualValues","BinaryValues") print(data.frame(df)) }) plotInput = function() { if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] attach(dataframet) if(class(get(input$variablexi)) != "factor") { ggplot(dataframet,aes(get(input$variablexi),get(input$variabley)))+ geom_point() + geom_smooth(method ="glm",se = FALSE,method.args=list(family = "binomial")) + labs(title= "Logistic model visualization", x= input$variablexi,y=input$variabley) } } output$downloadPlot = downloadHandler( filename = 'LR.png', content = function(file) { device <- function(..., width, height) { grDevices::png(..., width = width, height = height, res = 300, units = "in") } ggsave(file, plot = plotInput(), device = device) dev.off })
/scratch/gouwar.j/cran-all/cranData/BLRShiny2/inst/BLRShiny2.Rmd
--- title: "Binary Logistic Regression Modelling" output: html_document runtime: shiny --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) library(dplyr) library(caret) library(e1071) library(rhandsontable) library(ggplot2) library(datasets) ``` ```{r,echo=FALSE} options(shiny.maxRequestSize = 100 * 1024^2) sidebarPanel( checkboxInput("ex","Uncheck for using your own file",value = TRUE), fileInput("file", "Upload the *.csv file with headers"), sliderInput("train_num", label = "Enter the proportion of training dataset:", min = 0.6, max = 1, value = 0.6, step = 0.01), sliderInput("cut_offprob", label = "Enter cutoff probability", min = 0, max = 1, value = 0.5, step = 0.01), uiOutput("vx"), uiOutput("vy"), tableOutput("convertd"), uiOutput("vxi"), downloadButton("downloadPlot", "Download LR Plot(Quantitative Predictors)") ) mainPanel( tabsetPanel(type = "tab", tabPanel("Model Summary", verbatimTextOutput("AD") ), tabPanel("Model Visualization", plotOutput("MV") ), tabPanel("Model Evaluation", verbatimTextOutput("ME") ), tabPanel("Model Deployment",verbatimTextOutput("MD")) ), h6("Edit the test data record"), rHandsontableOutput("testdata"), h6("", tags$img(src ="K.JPG", height= 400, width=400)) ) output$AD<-renderPrint({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds =data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] valid.rows = setdiff(row.names(dataframe),train.rows) dataframev = dataframe[valid.rows,] model = glm(formula = as.formula(mod),data = dataframet,family = "binomial") print(summary(model)) }) output$MV<-renderPlot({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] valid.rows = setdiff(row.names(dataframe),train.rows) dataframev = dataframe[valid.rows,] attach(dataframet) if(class(get(input$variablexi)) == "factor") { assocplot(table(get(input$variablexi),get(input$variabley)),col = c("green","red"),xlab = input$variablexi, ylab = input$variabley) } else { ggplot(dataframet,aes(get(input$variablexi),get(input$variabley)))+ geom_point() + geom_smooth(method ="glm",se = FALSE,method.args=list(family = "binomial")) + labs(title= "Logistic model visualization", x= input$variablexi,y=input$variabley) } }) output$ME<-renderPrint({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] valid.rows = setdiff(row.names(dataframe),train.rows) dataframev = dataframe[valid.rows,] model = glm(formula = as.formula(mod),data = dataframet,family = "binomial") if(prop <1 ) { cat(sprintf("\nValidation data is used\n")) prediction = ifelse(predict(model,newdata = dataframev,type= "response") > input$cut_offprob,1,0) attach(dataframev) } else { cat(sprintf("\nTraining data is used\n")) prediction = ifelse(predict(model,newdata = dataframet,type= "response") > input$cut_offprob,1,0) attach(dataframet) } print(confusionMatrix(as.factor(prediction),as.factor(get(input$variabley)))) }) output$MD<-renderPrint({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] valid.rows = setdiff(row.names(dataframe),train.rows) dataframev = dataframe[valid.rows,] model = glm(formula = as.formula(mod),data = dataframet,family = "binomial") # print(summary(model)) test_data = data.frame(hot_to_r(input$testdata)) prediction = ifelse(predict(model,newdata = test_data ,type= "response") > input$cut_offprob,1,0) test_data$predictedvalue = prediction print(test_data) }) output$vx <- renderUI({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } checkboxGroupInput("variablex","Select the variables",choices = colnames(data),selected = colnames(data)) }) output$vxi <- renderUI({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) ds = select(ds,c(-input$variabley)) selectInput("variablexi","Select the variable x for the plot",choices = colnames(ds),selected = "") }) output$vy <- renderUI({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) ds = select_if(ds,is.factor) d = data.frame(count=t( data.frame((lapply(ds,nlevels))))) d$varname =row.names(d) d= filter(d,count==2) selectInput("variabley","Select the dependent variable",choices = d$varname ,selected = d$varname) }) output$testdata <- renderRHandsontable({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) ds = select(ds,c(-input$variabley)) row.names(ds)= 1:NROW(ds) rhandsontable(data.frame(ds[1,])) }) output$convertd <- renderTable({ if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,c(input$variabley)) df = ds ds = model.matrix(~0 + get(input$variabley),data = ds) dnew = data.frame(cbind(df,ds)) df = data.frame(dcat =unique(dnew)) row.names(df)= 1:NROW(df) df[,3]= NULL colnames(df)=c("ActualValues","BinaryValues") print(data.frame(df)) }) plotInput = function() { if(input$ex == TRUE) { data("iris") data = iris[1:100,1:4] data$Species = "setosa" data[51:100,5] = "versicolor" data$Species =factor(data$Species) set.seed(1) gp= runif(nrow(data)) data = data[order(gp),] } else{ file1 = input$file if(is.null(file1)){return()} data = read.table(file = file1$datapath,sep =",",header = TRUE) if(is.null(data())){return()} quantdata = select_if(data,is.numeric) qualdata = select_if(data,is.character) qualdata = data.frame(lapply(qualdata,as.factor)) data = data.frame(cbind(quantdata,qualdata)) } ds = data ds = select(ds,input$variablex) mod = paste(input$variabley,"~.") indexdependent= grep(input$variabley, colnames(ds)) df = data.frame(model.matrix(~0 + get(input$variabley),data = ds)) df[,2]= NULL ds[,indexdependent] = df[,1] options(scipen = 999) prop = input$train_num set.seed(1) dataframe = ds train.rows = sample(row.names(dataframe),dim(dataframe)[1]*prop) dataframet = dataframe[train.rows,] attach(dataframet) if(class(get(input$variablexi)) != "factor") { ggplot(dataframet,aes(get(input$variablexi),get(input$variabley)))+ geom_point() + geom_smooth(method ="glm",se = FALSE,method.args=list(family = "binomial")) + labs(title= "Logistic model visualization", x= input$variablexi,y=input$variabley) } } output$downloadPlot = downloadHandler( filename = 'LR.png', content = function(file) { device <- function(..., width, height) { grDevices::png(..., width = width, height = height, res = 300, units = "in") } ggsave(file, plot = plotInput(), device = device) dev.off })
/scratch/gouwar.j/cran-all/cranData/BLRShiny2/inst/img/BLRShiny2.Rmd
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #' @title Geodesic distance #' @description Evaluate geodesic distance (shortest path) between all pairs of nodes in the network. #' #' @param M Input adjacency matrix #' #' @return Matrix containing all the pairwise geodesic distances #' @examples dst(example_adjacency_matrix) #' @export dst <- function(M) { .Call(`_BLSM_dst`, M) } #' @title Distance between latent positions #' @description Compute the square root of the Euclidean distances between latent positions and #' return them with a negative sign. #' #' @param Z Latent positions matrix. The matrix size must be \code{(n,k)}, where \code{n} and \code{k} denote respectively #' the number of nodes in the network and the latent space dimensionality. #' @return Matrix containing the negative square root of the Euclidean distances between latent positions #' @examples pos = matrix(rnorm(20), ncol=2) #' lpz_dist(pos) #' @export lpz_dist <- function(Z) { .Call(`_BLSM_lpz_dist`, Z) } #' @title Network log-likelihood #' @description Compute the log-likelihood of the whole observed network based on the #' latent positions estimates and the model assumptions. See \link[BLSM]{BLSM} for more information. #' #' @param Y Adjacency matrix of the observed network #' @param lpz Matrix containing the negative square root of the Euclidean distances between latent positions #' (output of \link[BLSM]{lpz_dist}) #' @param alpha Model variable \eqn{\alpha} #' @param W BLSM Weights matrix of the observed network #' #' @return Log-likelihood of the observed network lpY <- function(Y, lpz, alpha, W) { .Call(`_BLSM_lpY`, Y, lpz, alpha, W) } #' @title Network (positive) log-likelihood #' @description Compute the (positive) log-likelihood of the whole observed network based on the #' latent positions estimates and the model assumptions. The inputs are slightly different from those of \link[BLSM]{lpY}, #' so the function basically applies some preprocessing before calling \link[BLSM]{lpY} and returning its value with the opposite sign. #' #' @param avZ Vector containing the \eqn{\alpha} value and the latent positions #' @param Y Adjacency matrix of the observed network #' @param W BLSM Weights matrix of the observed network #' #' @return Log-likelihood of the observed network mlpY <- function(avZ, Y, W) { .Call(`_BLSM_mlpY`, avZ, Y, W) } #' @title lpz_dist optimized for individual updates #' @description Compute the square root of the Euclidean distances between a specific coordinate in the latent space #' and all the others. The function follows almost the same approach as \link[BLSM]{lpz_dist}, but it is #' more suitable for the individual updates occurring during the simulation. #' #' @param Z Latent positions matrix #' @param node Specific node in the network corresponding to the latent coordinate which will be used as reference #' @param diag Diagonal from \code{t(Z)\%*\%Z} matrix, passed to speed up the process. #' @return Vector containing the negative square root of the Euclidean distances between latent positions lpz_distNODE <- function(Z, node, diag) { .Call(`_BLSM_lpz_distNODE`, Z, node, diag) } #' @title Network log-likelihood for individual updates #' @description Compute the log-likelihood of the whole observed network based on the #' latent positions estimates and the model assumptions. The function follows almost the same approach as \link[BLSM]{lpY}, but it is #' more suitable for the individual updates occurring during the simulation. #' @param Y Adjacency matrix of the observed network #' @param Z Latent positions matrix #' @param alpha Model variable \eqn{\alpha} #' @param node Specific node in the network corresponding to the latent coordinate which will be used as reference #' @param diag Diagonal from \code{t(Z)\%*\%Z} matrix, passed to speed up the process. #' @param W BLSM Weights matrix of the observed network #' #' @return Log-likelihood of the observed network lpYNODE <- function(Y, Z, alpha, node, diag, W) { .Call(`_BLSM_lpYNODE`, Y, Z, alpha, node, diag, W) } #' @title Update step for the latent positions #' @description Accept/reject the proposals for the latent positions #' #' @param Y Adjacency matrix of the observed network #' @param Z Latent positions matrix #' @param W BLSM Weights matrix of the observed network #' @param alpha Model variable \eqn{\alpha} #' @param zdelta Standard deviation of the Gaussian proposal for latent positions #' @param mu_z Mean of the Gaussian prior distribution for latent positions #' @param sd_z Standard deviation of the Gaussian prior distribution for latent positions #' #' @return Updated latent positions matrix Z_up <- function(Y, Z, W, alpha, zdelta, mu_z, sd_z) { .Call(`_BLSM_Z_up`, Y, Z, W, alpha, zdelta, mu_z, sd_z) } #' @title Update step for the \eqn{\alpha} variable #' @description Accept/reject the proposal for the \eqn{\alpha} model variable #' #' @param Y Adjacency matrix of the observed network #' @param lpz Matrix containing the negative square root of the Euclidean distances between latent positions #' @param W BLSM Weights matrix of the observed network #' @param alpha Model variable \eqn{\alpha} #' @param adelta The uniform proposal for \eqn{\alpha} is defined on the \eqn{[-adelta,+adelta]} interval #' @param a_a Shape parameter of the Gamma prior distribution for \eqn{\alpha}. The value is usually set to 1, so the prior is actually an exponential distribution. #' @param a_b Rate parameter of the Gamma prior distribution for \eqn{\alpha}. #' #' @return Updated value of the \eqn{\alpha} variable alpha_up <- function(Y, lpz, W, alpha, adelta, a_a, a_b) { .Call(`_BLSM_alpha_up`, Y, lpz, W, alpha, adelta, a_a, a_b) }
/scratch/gouwar.j/cran-all/cranData/BLSM/R/RcppExports.R
blsm_colors = function(n) { hues = seq(15, 375, length = n + 1) hcl(h = hues, l = 65, c = 100)[1:n] } proc_crr=function(Z,Z0){ #' @title Procrustean corresponding positions #' @description Given a set of starting coordinates, the function returns the Procrustean Transform of the initial points that minimizes #' the sum of squared positional difference from a set of reference coordinates. The (Euclidean) distances between a candidate #' configuration and the reference are evaluated by considering the couples of corresponding points. #' #' The reference configuration must be centered at the origin. #' #' @param Z set of initial coordinates to be transformed #' @param Z0 set of reference coordinates centered at the origin #' #' @return Set of coordinates minimizing the distance between the initial configuration and the reference one #' @examples #' # Create configuration and center it at the origin #' pos_ref = matrix(runif(20), ncol=2) #' pos_ref = t(t(pos_ref)-colMeans(pos_ref)) #' #' # Create a new configuration by adding a perturbation to the previous one #' pos = pos_ref + matrix(rnorm(20, mean=1, sd=0.1), ncol=2) #' #' # Compute the Procrustean Transform and inspect the results #' proc_pos = proc_crr(pos, pos_ref) #' plot(pos_ref, col="blue", pch=20, xlim=c(-1,3), ylim=c(-1,3)) #' points(pos, col="red", pch=20) #' points(proc_pos, col="purple", pch=20) #' @export Z=t(t(Z)-colMeans(Z)) A=t(Z)%*%(Z0%*%t(Z0))%*%Z eA=eigen(A,symmetric=T) Ahalf=eA$vec%*%diag(sqrt(eA$val))%*%t(eA$vec) t(t(Z0)%*%Z%*%solve(Ahalf)%*%t(Z)) } estimate_latent_positions = function (Y,W, procrustean = TRUE, k=2, alpha=2, nscan=8*10^5, burn_in=5*10^5, odens=10^3, zdelta=1, z_norm_prior_mu=0, z_norm_prior_sd=10, adelta=.3, a_exp_prior_a=1, a_exp_prior_b=1, dynamic_plot = FALSE, dynamic_circles = FALSE, ...){ #' @title BLSM simulation #' @description Core function of the BLSM package: run a simulation to obtain the positions of the network nodes #' in the latent space for each sampled iteration. #' #' The positions are simulated accordingly to the model assumptions, please refer to \link[BLSM]{BLSM} for further information. #' The output of the function can be used to retrieve and compare specific iterations, observe their evolution or simply compute #' the average positions (more details in the descriptions and examples below). #' #' @param Y Adjacency matrix of the network #' @param W (Optional) BLSM Weight matrix of the network #' @param k Space dimensionality #' @param procrustean Boolean to include/exclude (\code{TRUE/FALSE}) the Procrustean Transform step in the algorithm. Set \code{TRUE} by default. #' @param alpha Starting value of the \eqn{\alpha} variable #' @param nscan Number of iterations #' @param burn_in Burn-in value (starting iterations to be discarded) #' @param odens Thinning: only 1 iteration every \code{odens} will be sampled and stored in the output #' @param zdelta Standard deviation of the Gaussian proposal for latent positions #' @param z_norm_prior_mu Mean of the Gaussian prior distribution for latent positions #' @param z_norm_prior_sd Standard deviation of the Gaussian prior distribution for latent positions #' @param adelta The uniform proposal for \eqn{\alpha} is defined on the \eqn{[-adelta,+adelta]} interval #' @param a_exp_prior_a Shape parameter of the Gamma prior distribution for \eqn{\alpha}. As the value is usually set to 1 the prior is an exponential distribution. #' @param a_exp_prior_b Rate parameter of the Gamma prior distribution for \eqn{\alpha}. #' @param dynamic_plot Boolean to plot dynamically the simulated positions (one update every \code{odens} iterations) #' @param dynamic_circles Boolean to add circles of radius \eqn{\alpha} to the dynamic plots #' @param \dots Additional parameters that can be passed to \link[BLSM]{plot_latent_positions} #' #' @return Returns a "BLSM object" (\code{blsm_obj}), i.e. a list containing: #' \itemize{ #' \item \code{Alpha }{\eqn{\alpha} values from the sampled iterations} #' \item \code{Likelihood }{Log-likelihood values from the sampled iterations} #' \item \code{Iterations }{Latent space coordinates from the sampled iterations. Latent positions are stored in a #' 3D array whose dimensions are given by (1: number of nodes, 2: space dimensionality, 3: number of iterations). #' In the non-Procrustean framework the latent distances are given instead of the positions: another 3D array is returned, whose dimensions #' are given by (1: number of nodes, 2: number of nodes, 3: number of iterations). The command needed in order to get the average values over the iterations for #' either the positions or the distances is \code{rowMeans(blsm_obj$Iterations, dims=2)} (see example below).} #' \item \code{StartingPositions }{Latent space coordinates right after the initialization step. In the non-Procrustean framework starting distances are given instead.} #' \item \code{Matrix }{Original matrices of the network (adjacency and BLSM weights)} #' \item \code{Parameters }{List of parameters specified during the call to \link[BLSM]{estimate_latent_positions}} #' } #' #' @examples #' \dontshow{ #' blsm_obj_test_1 = estimate_latent_positions(example_adjacency_matrix, burn_in = 10^3, nscan = 3*10^3, odens=100) #' blsm_obj_test_2 = estimate_latent_positions(example_adjacency_matrix, procrustean=FALSE, burn_in = 10^3, nscan = 3*10^3, odens=100) #' } #' \dontrun{ #' # Procrustean version followed by clustering #' blsm_obj = estimate_latent_positions(example_adjacency_matrix, #' burn_in = 3*10^4, nscan = 10^5, dynamic_plot = TRUE) #' #' avg_latent_positions = rowMeans(blsm_obj$Iterations, dims=2) #' h_cl = hclust(dist(avg_latent_positions), method="complete") #' n = 3 #' latent_space_clusters = cutree(h_cl, k=n) #' print(latent_space_clusters) #' plot(avg_latent_positions, col=rainbow(n)[latent_space_clusters], pch=20) #' #' # Non-Procrustean version followed by clustering #' blsm_obj_2 = estimate_latent_positions(example_adjacency_matrix, procrustean=FALSE, #' burn_in = 3*10^4, nscan = 10^5) #' avg_latent_distances = rowMeans(blsm_obj_2$Iterations, dims=2) #' h_cl = hclust(as.dist(avg_latent_distances), method="complete") #' n = 3 #' latent_space_clusters_2 = cutree(h_cl, k=n) #' print(latent_space_clusters_2) #' #' # Weighted network #' blsm_obj_3 = estimate_latent_positions(example_adjacency_matrix, example_weights_matrix, #' burn_in = 10^5, nscan = 2*10^5, dynamic_plot = TRUE) #' } #' @export if (missing(W)) { W=Y*0+1 } if (k==3 & dynamic_plot){ if (!requireNamespace("rgl", quietly = TRUE)) { message("rgl package needed for the 3D plot. Please install it or set dynamic_plot=FALSE.") return(NULL) } } params = list(procrustean=procrustean, k=k, alpha=alpha, nscan=nscan, burn_in=burn_in, odens=odens, zdelta=zdelta, z_norm_prior_mu=z_norm_prior_mu, z_norm_prior_sd=z_norm_prior_sd, adelta=adelta, a_exp_prior_a=a_exp_prior_a, a_exp_prior_b=a_exp_prior_b) nscan = nscan + burn_in it_cont = 1 create_window_flag=dynamic_plot rem = which(rowMeans(Y)==0) if(length(rem)>0) { Y=Y[-rem,-rem] W=W[-rem,-rem] } n=dim(Y)[1] my_colors=blsm_colors(n) cc=(Y>0)+0 D=dst(cc) Z=cmdscale(D, k) Z=t(t(Z)-colMeans(Z)) tmp_opt=c(alpha,c(Z)) tmp_opt=optim(tmp_opt,mlpY,Y=Y,W=W, method="SANN")$par tmp_opt=optim(tmp_opt,mlpY,Y=Y,W=W,method="Nelder-Mead")$par tmp_opt=tmp_opt*2/(tmp_opt[1]) alpha=tmp_opt[1] Z_Proc = matrix(tmp_opt[-1],nrow=n,ncol=k) Z_Proc=t(t(Z_Proc)-colMeans(Z_Proc)) Z = Z_Proc lpz = lpz_dist(Z) acc_a=0 acc_z=0 Alpha=alpha Lik=lpY(Y,lpz,alpha, W) inputs = list(Adjacency = Y, Weight = W) if (procrustean){ blsm_obj=list(Alpha=rep(NA, (nscan-burn_in)/odens), Likelihood=rep(NA, (nscan-burn_in)/odens), Iterations=array(NA,dim=c(n,k,(nscan-burn_in)/odens)), StartingPositions=Z, Matrix=inputs, Parameters=params) avg_Z_est = Z for(ns in 1:nscan){ tmp = Z_up(Y,Z,W,alpha,zdelta, z_norm_prior_mu, z_norm_prior_sd) if(any(tmp!=Z)){ acc_z=acc_z+sum(tmp!=Z)/(2*n*odens) tryCatch({ Z = proc_crr(tmp,Z_Proc) }, error = function(e) { message("The matrix used to compute the Procrustean transformation is singular. \nIf changing the parameters doesn't solve the issue, please try to lower the space dimensionality.") return(blsm_obj) graphics.off() } ) } lpz = lpz_dist(Z) tmp = alpha_up(Y,lpz,W,alpha,adelta,a_exp_prior_a,a_exp_prior_b) if(tmp!=alpha) { acc_a=acc_a+1/odens alpha=tmp } if (ns%%odens==0){ if(ns>burn_in){ if (create_window_flag){ if (k==2) { dev.new(noRStudioGD = TRUE) } else if (k==3) { rgl::par3d(windowRect = c(50, 50, 800, 800)) } else { message("Error: plot cannot be displayed since space dimensionality is bigger than 3.") dynamic_plot=FALSE } create_window_flag = FALSE } blsm_obj$Alpha[it_cont]= alpha lik = lpY(Y, lpz, alpha, W) blsm_obj$Likelihood[it_cont] = lik cat(ns-burn_in,acc_a,acc_z,alpha,lik,"\n") acc_z=acc_a=0 blsm_obj$Iterations[,,it_cont] = Z avg_Z_est= ((it_cont-1)*avg_Z_est+Z)/it_cont it_cont = it_cont+1 if (dynamic_plot){ plot_latent_positions(blsm_obj, circles_2D = dynamic_circles, ...) } } else { if (ns==odens){ cat("\nBeginning burn-in period...\n\n") } lik = lpY(Y, lpz, alpha, W) cat(ns,acc_a,acc_z,alpha,lik,"\n") acc_z=acc_a=0 if (ns==burn_in){ cat("\nBurn-in period ended.\n\nBeginning simulation...\n\n") } } } } } else { blsm_obj=list(Alpha=rep(NA, (nscan-burn_in)/odens), Likelihood=rep(NA, (nscan-burn_in)/odens), Iterations=array(NA,dim=c(n,n,(nscan-burn_in)/odens)), StartingDistances=-lpz_dist(Z), Matrix=inputs, Parameters=params) for(ns in 1:nscan){ Z_tmp=Z_up(Y,Z,W,alpha,zdelta, z_norm_prior_mu, z_norm_prior_sd) if(any(Z_tmp!=Z)){ acc_z=acc_z+sum(Z_tmp!=Z)/(2*n*odens) Z=Z_tmp } lpz = lpz_dist(Z) tmp=alpha_up(Y,lpz,W,alpha,adelta,a_exp_prior_a,a_exp_prior_b) if(tmp!=alpha) { acc_a=acc_a+1/odens alpha=tmp } if (ns%%odens==0){ if( ns > burn_in){ blsm_obj$Alpha[it_cont]= alpha lik = lpY(Y, lpz, alpha, W) blsm_obj$Likelihood[it_cont] = lik cat(ns-burn_in,acc_a,acc_z,alpha,lik,"\n") acc_z=acc_a=0 blsm_obj$Iterations[,,it_cont] = -lpz it_cont = it_cont + 1 } else { if (ns==odens){ cat("\nBeginning burn-in period...\n\n") } lik = lpY(Y, lpz, alpha, W) cat(ns,acc_a,acc_z,alpha,lik,"\n") acc_z=acc_a=0 if (ns==burn_in){ cat("\nBurn-in period ended.\n\nBeginning simulation...\n\n") } } } } } cat("\nThe simulation ended successfully.") return(blsm_obj) } plot_traceplots_acf = function (blsm_obj, chosen_node=1, coordinate=1, chosen_pair=c(1,2)){ #' @title BLSM traceplots and ACF #' @description Traceplots and autocorrelation functions for the \eqn{\alpha} variable and a selected node (or pair of nodes in the non-Procrustean framework). #' #' @param blsm_obj BLSM object obtained through \link[BLSM]{estimate_latent_positions} #' @param chosen_node Specified node for traceplot and autocorrelation function (Procrustean framework) #' @param coordinate Specified coordinate dimension from the n-dimensional latent space #' @param chosen_pair Specified pair of nodes for traceplot and autocorrelation function (non-Procrustean framework) #' #' @examples #' plot_traceplots_acf(example_blsm_obj, chosen_node=3, coordinate=1) #' #'\dontrun{ #' # Run the simulation without Procrustean step #' blsm_obj = estimate_latent_positions(example_adjacency_matrix, procrustean = FALSE, #' burn_in = 3*10^4, nscan = 10^5) #' #' # Plot #' plot_traceplots_acf(blsm_obj, chosen_pair=c(2,5)) #'} #' @export par(mfrow=c(2,2)) if (blsm_obj$Parameters$procrustean==TRUE){ plot(blsm_obj$Alpha,type="l",main="Alpha") plot(blsm_obj$Iterations[chosen_node,coordinate,],type="l", ylab=paste0("Coordinate ",coordinate), main = paste0("Node ",chosen_node)) plot(acf(blsm_obj$Alpha,plot=F),main="Alpha") plot(acf(blsm_obj$Iterations[chosen_node,coordinate,],plot=F),main= paste0("Node ",chosen_node)) } else { plot(blsm_obj$Alpha,type="l",main="Alpha") plot(blsm_obj$Iterations[chosen_pair[1],chosen_pair[2],],type="l", ylab="Euclidean distance", main = paste0("Nodes ",chosen_pair[1], " and ", chosen_pair[2])) plot(acf(blsm_obj$Alpha,plot=F),main="Alpha") plot(acf(blsm_obj$Iterations[chosen_pair[1],chosen_pair[2],],plot=F),main= paste0("Nodes ",chosen_pair[1], " and ", chosen_pair[2])) } } plot_latent_positions = function(blsm_obj, colors, points_size=0.1, labels_point_size=5, labels_point_color="yellow", labels_text_size=1, labels_text_color="blue", circles_2D = FALSE){ #' @title Base BLSM plot function #' @description Plot latent positions from a Procrustean simulation. #' #' @param blsm_obj BLSM object obtained through \link[BLSM]{estimate_latent_positions} #' @param colors (Optional) Colors of the simulated coordinate points in the latent space. Internal default colors are used if the argument is missing. #' @param points_size Size of the coordinate points #' @param labels_point_size Size of the label points #' @param labels_point_color Color of the label points #' @param labels_text_size Text size in the label points #' @param labels_text_color Text color in the label points #' @param circles_2D Plot circles of radius \eqn{\alpha} (see the model's main variables) centered around the label points #' #' @examples #' plot_latent_positions(example_blsm_obj, labels_point_color = "black", labels_text_color = "black") #' #' plot_latent_positions(example_blsm_obj, circles_2D = TRUE) #' @export n=dim(blsm_obj$Iterations)[1] if (missing(colors)) { colors=blsm_colors(n) } if (blsm_obj$Parameters$procrustean==TRUE){ if (dim(blsm_obj$Iterations)[2]==2){ par(mfrow=c(1,1)) par(mar=c(1,1,1,1)) par(mgp=c(2,1,0)) dev.hold() plot(blsm_obj$Iterations[,1,],blsm_obj$Iterations[,2,],pch=20,cex=points_size, col=colors, xlab="", ylab="", xaxt="n", yaxt="n") avg_Z_est=rowMeans(blsm_obj$Iterations, dims=2, na.rm = TRUE) for(i in 1:(n-1)){ for(j in (i+1):n){ lines(avg_Z_est[c(i,j),1], avg_Z_est[c(i,j),2] ,lty=blsm_obj$Matrix$Adjacency[i,j], col="blue", lwd =2) } } points(avg_Z_est[,1],avg_Z_est[,2],xaxt="n",yaxt="n",xlab="",ylab="", col=labels_point_color,pch=20,cex=labels_point_size) text(avg_Z_est[,1],avg_Z_est[,2],labels(blsm_obj$Matrix$Adjacency)[[1]],col=labels_text_color,cex=labels_text_size) if (circles_2D){symbols(avg_Z_est, circles = rep(mean(blsm_obj$Alpha),n), add=TRUE, fg = colors, inches=F)} dev.flush() } else if (dim(blsm_obj$Iterations)[2]==3){ if (!requireNamespace("rgl", quietly = TRUE)) { message("rgl package needed for the 3D plot. Please install it.") return(NULL) } par(mfrow=c(1,1)) par(mar=c(3,3,1,1)) par(mgp=c(2,1,0)) rgl::plot3d(blsm_obj$Iterations[,1,],blsm_obj$Iterations[,2,],blsm_obj$Iterations[,3,], size=points_size*10, col=colors, xlab = "", ylab="", zlab="") rgl::rgl.viewpoint(theta=30, phi=10, fov=30) if (missing(avg_Z_est)){ avg_Z_est=rowMeans(blsm_obj$Iterations, dims=2, na.rm = TRUE) } for(i in 1:(n-1)){ for(j in (i+1):n){ if (blsm_obj$Matrix$Adjacency[i,j]){ rgl::lines3d( avg_Z_est[c(i,j),], col="blue", lwd =2) } } } rgl::points3d(avg_Z_est[,1],avg_Z_est[,2],avg_Z_est[,3],xaxt="n",yaxt="n",xlab="",ylab="", col=labels_point_color,size=labels_point_size*10) rgl::text3d(avg_Z_est[,1],avg_Z_est[,2],avg_Z_est[,3],labels(blsm_obj$Matrix$Adjacency)[[1]],col=labels_text_color,cex=labels_text_size) } else { message("Error: plot cannot be displayed since space dimensionality is bigger than 3.") } } else { message("Sampled latent positions are not available in the non-Procrustean framework.\n") } }
/scratch/gouwar.j/cran-all/cranData/BLSM/R/blsm.R
#' Example Adjacency Matrix #' #' Adjacency matrix of a 10 nodes random network for testing purposes #' #' @name example_adjacency_matrix #' @docType data #' @format A binary adjacency matrix representing links between nodes. "example_adjacency_matrix" #' Example Weights Matrix #' #' "BLSM weights" matrix of a 10 nodes random network for testing purposes #' #' @name example_weights_matrix #' @docType data #' @format A matrix containing positive weights for all pairs of nodes. #' #' Given a couple of nodes, a weight expresses the importance of the distance between the #' coordinates associated to the two nodes in the latent space in terms of the overall likelihood of the graph. #' For this reason, even missing links must have a coefficient, otherwise the relative positioning of disconnected nodes #' would have no effect at all on the graph likelihood. #' #' The exact probability equation is described in \link[BLSM]{BLSM}, as well as the notation used. #' #' A few examples: #' \itemize{ #' \item for unweighted networks, the "BLSM weights" matrix has all the values set to 1. #' \item if two nodes share a strong connection, then #' the weight coefficient should be greater than 1 so that their positions in the latent space will be closer than they would be in an unweighted framework. #' \item if two nodes share a weak connection, a coefficient smaller than 1 will allow the latent coordinates to be pretty far from each other even though the nodes are connected. #' } "example_weights_matrix" #' Example BLSM object #' #' BLSM object obtained by applying the Procrustean version of the latent space model to the unweighted network #' whose adjacency matrix is \link[BLSM]{example_adjacency_matrix}. Further details concerning the #' simulation are contained in the BLSM object itself. #' #' @name example_blsm_obj #' @docType data #' @format BLSM object (\code{blsm_obj}), i.e. a list containing: #' \itemize{ #' \item \code{Alpha }{\eqn{\alpha} values from the sampled iterations} #' \item \code{Likelihood }{Log-likelihood values from the sampled iterations} #' \item \code{Iterations }{Latent space coordinates from the sampled iterations. Latent positions are stored in a #' 3D array whose dimensions are given by (1: number of nodes, 2: space dimensionality, 3: number of iterations). #' In the non-Procrustean framework the latent distances are given instead of the positions: another 3D array is returned, whose dimensions #' are given by (1: number of nodes, 2: number of nodes, 3: number of iterations). The command needed in order to get the average values over the iterations for #' either the positions or the distances is \code{rowMeans(blsm_obj$Iterations, dims=2)} (see example below).} #' \item \code{StartingPositions }{Latent space coordinates right after the initialization step. In the non-Procrustean framework starting distances are given instead.} #' \item \code{Matrix }{Original matrices of the network (adjacency and BLSM weights)} #' \item \code{Parameters }{List of parameters specified during the call to \link[BLSM]{estimate_latent_positions}} #' } "example_blsm_obj"
/scratch/gouwar.j/cran-all/cranData/BLSM/R/data.R
#' @name BLSM #' @title Bayesian Latent Space Model #' @docType package #' #' @description R package allowing the computation of a Bayesian Latent Space Model for complex networks. #' #' Latent Space Models are characterized by the presence of unobservable variables (latent coordinates) that are used to #' compute the likelihood of the observed networks. Their goal is to map the observed network in the latent #' space by meeting specific probabilistic requirements, so that the estimated latent coordinates can then be used to #' describe and characterize the original graph. #' #' In the BSLM package framework, given a network characterized by its adjacency \eqn{Y} matrix, the model assigns a binary random #' variable to each tie: \eqn{Y_ij} is related to the tie between nodes \eqn{i} and \eqn{j} and its value is 1 #' if the tie exists, 0 otherwise. #' #' The model assumes the independence of \eqn{Y_ij | x_i,x_j, \alpha}, where \eqn{x_i} and \eqn{x_j} are the coordinates #' of the nodes in the multidimensional latent space and \eqn{\alpha} is an additional parameter such that #' \eqn{logit(P(Y_ij = 1)) = \alpha - ||x_i -x_j||}. #' #' The latent space coordinates are estimated by following a MCMC procedure that is based on the overall likelihood induced by the above equation. #' #' Due to the symmetry of the distance, the model leads to more intuitive outputs for undirected networks, #' but the functions can also deal with directed graphs. #' #' If the network is weighted, i.e. to each tie is associated a positive coefficient, the model's probability equation #' becomes \eqn{logit(P(Y_ij = 1)) = \alpha - W_ij||x_i -x_j||}, where \eqn{W_ij} denotes the weight related to link existing between \eqn{x_i} and \eqn{x_j}. #' This means that even non existing links should have a weight, therefore the matrix used in the computation isn't the original weights matrix but #' actually a specific "BLSM weights" matrix that contains positive coefficients for all the possible pairs of nodes. #' When dealing with weighted networks, please be careful to pass a "BLSM weights" matrix as input #' #' (please refer to \link[BLSM]{example_weights_matrix} for more detailed information and a valid example). #' #' The output of the model allows the user to inspect the MCMC simulation, create insightful graphical representations or #' apply clustering techniques to better describe the latent space. #' See \link[BLSM]{estimate_latent_positions} or \link[BLSM]{plot_latent_positions} for further information. #' #' @import Rcpp #' @importFrom Rcpp evalCpp #' @importFrom grDevices dev.flush dev.hold dev.new graphics.off hcl #' @importFrom graphics lines par plot points symbols text #' @importFrom stats acf cmdscale optim #' #' @useDynLib BLSM, .registration = TRUE #' #' @references P. D. Hoff, A. E. Raftery, M. S. Handcock, Latent Space Approaches to Social Network Analysis, #' Journal of the American Statistical Association, Vol. 97, No. 460, (2002), pp. 1090-1098. #' #' A. Donizetti, A Latent Space Model Approach for Clustering Complex Network Data, #' Master's Thesis, Politecnico di Milano, (2017). #' NULL
/scratch/gouwar.j/cran-all/cranData/BLSM/R/package.R
"bic.glm" <- function (x, ...) UseMethod("bic.glm") "bic.glm.data.frame" <- function (x, y, glm.family, wt = rep(1, nrow(x)), strict = FALSE, prior.param = c(rep(0.5, ncol(x))), OR = 20, maxCol = 30, OR.fix = 2, nbest = 150, dispersion = NULL, factor.type = TRUE, factor.prior.adjust = FALSE, occam.window = TRUE, call = NULL, ...) { namx <- names(x) if (is.null(colnames(x))) colnames(x) <- 1:ncol(x) # this will add a suffix ".x" to the names to prevent duplicate names when # there are factors LAST <- sapply(colnames(x), function(z) substring(z,nchar(z))) m <- match(LAST,as.character(0:9),nomatch=0) pad <- "" if (any(m != 0)) { pad <- ".x" colnames(x) <- paste( colnames(x), ".x", sep = "") } facx <- sapply(x,is.factor) leaps.glm <- function(info, coef, names.arg, nbest = nbest) { names.arg <- names.arg if (is.null(names.arg)) names.arg <- c(as.character(1:9), LETTERS, letters)[1:ncol(info)] if (length(names.arg) < ncol(info)) stop("Too few names") bIb <- coef %*% info %*% coef kx <- ncol(info) maxreg <- nbest * kx if (kx < 3) stop("Too few independent variables") imeth <- 1 df <- kx + 1 Ib <- info %*% coef rr <- cbind(info, Ib) rr <- rbind(rr, c(Ib, bIb)) it <- 0 n.cols <- kx + 1 nv <- kx + 1 nf <- 0 no <- 1e+05 ib <- 1 mb <- nbest nd <- n.cols nc <- 4 * n.cols rt <- matrix(rep(0, times = nd * nc), ncol = nc) rt[, 1:n.cols] <- rr iw <- c(1:(kx + 1), rep(0, times = 4 * nd)) nw <- length(iw) rw <- rep(0, times = 2 * mb * kx + 7 * nd) nr <- length(rw) t1 <- 2 s2 <- -1 ne <- 0 iv <- 0 nret <- mb * kx Subss <- rep(0, times = nret) RSS <- Subss ans <- .Fortran("fwleaps", as.integer(nv), as.integer(it), as.integer(kx), as.integer(nf), as.integer(no), as.integer(1), as.double(2), as.integer(mb), as.double(rt), as.integer(nd), as.integer(nc), as.integer(iw), as.integer(nw), as.double(rw), as.integer(nr), as.double(t1), as.double(s2), as.integer(ne), as.integer(iv), as.double(Subss), as.double(RSS), as.integer(nret), PACKAGE = "BMA") regid <- ans[[21]]/2 r2 <- ans[[20]] nreg <- sum(regid > 0) regid <- regid[1:nreg] r2 <- r2[1:nreg] which <- matrix(TRUE, nreg, kx) z <- regid which <- matrix(as.logical((rep.int(z, kx)%/%rep.int(2^((kx - 1):0), rep.int(length(z), kx)))%%2), byrow = FALSE, ncol = kx) size <- which %*% rep(1, kx) label <- character(nreg) sep <- if (all(nchar(names.arg) == 1)) "" else "," for (i in 1:nreg) label[i] <- paste(names.arg[which[i, ]], collapse = sep) ans <- list(r2 = r2, size = size, label = label, which = which) return(ans) } factor.names <- function(x) { out <- list() for (i in 1:ncol(x)) if (is.factor(x[, i])) out[[i]] <- levels(x[, i]) else out <- c(out, list(NULL)) attributes(out)$names <- names(x) return(out) } create.assign <- function(xx) { asgn <- list() asgn[[1]] <- 1 cnt <- 2 for (i in 1:ncol(xx)) { if (!is.factor(xx[, i])) size <- 1 else size <- length(levels(xx[, i])) - 1 asgn[[i + 1]] <- cnt:(cnt + size - 1) cnt <- cnt + size } names(asgn) <- c("(Intercept)", attributes(xx)$names) return(asgn) } dropcols <- function(x, y, glm.family, wt, maxCols = 30) { vnames <- attributes(x)$names nvar <- length(vnames) isfac <- rep(FALSE, times = nvar) for (i in 1:nvar) isfac[i] <- is.factor(x[, i]) nlevels <- rep(NA, times = nvar) for (i in 1:nvar) if (isfac[i]) nlevels[i] <- length(levels(x[, i])) any.dropped <- FALSE mm <- model.matrix(terms.formula(~., data = x), data = x) designx <- attributes(mm)$assign n.designx <- length(designx) designx.levels <- rep(1, times = n.designx) for (i in 2:n.designx) if (isfac[designx[i]]) designx.levels[i] <- sum(designx[1:i] == designx[i]) + 1 x.df <- data.frame(x = x) glm.out <- glm(y ~ ., family = glm.family, weights = wt, data = x.df) glm.assign <- create.assign(x) while (length(glm.out$coefficients) > maxCol) { any.dropped <- TRUE dropglm <- drop1(glm.out, test = "Chisq") lrtname <- if(!is.null(dropglm$LRT)) "LRT" else "scaled dev." dropped <- which.max(dropglm[[lrtname]][-1]) + 1 if (length(dropped) == 0) stop("dropped == 0") x.df <- x.df[, -(dropped - 1)] designx.levels <- designx.levels[-dropped] designx <- designx[-dropped] glm.out <- glm(y ~ ., family = glm.family, weights = wt, data = x.df) } remaining.vars <- unique(designx[-1]) new.nvar <- length(remaining.vars) dropped.vars <- vnames[-remaining.vars] dropped.levels <- NULL ncol.glm <- ncol(x.df) - 1 xx <- data.frame(matrix(rep(NA, times = new.nvar * nrow(x.df)), ncol = new.nvar)) colnames(xx) <- sapply(colnames(x.df), function(s) substring(s,3,nchar(s))) x.df <- x.df[-(ncol.glm + 1)] new.names = rep(NA, times = new.nvar) for (i in 1:new.nvar) { cvar <- remaining.vars[i] lvls <- designx.levels[cvar == designx] if (isfac[cvar]) { if (length(lvls) != length(levels(x[, cvar]))) { newvar <- (as.matrix(x.df[, cvar == designx[-1]]) %*% cbind(lvls - 1)) + 1 xx[, i] <- factor(levels(x[, cvar])[newvar]) new.names[i] <- vnames[cvar] removed.levels <- levels(x[, cvar])[-c(1, lvls)] dropped.levels <- c(dropped.levels, paste(vnames[cvar], "_", removed.levels, sep = "")) } else { xx[, i] <- factor(x[, cvar]) new.names[i] <- vnames[cvar] } } else { xx[, i] <- x[, cvar] new.names[i] <- vnames[cvar] } } dropped <- c(dropped.vars, dropped.levels) return(list(mm = xx, any.dropped = any.dropped, dropped = dropped, var.names = new.names, remaining.vars = remaining.vars)) } if (is.null(call)) cl <- match.call() else cl <- call options(contrasts = c("contr.treatment", "contr.treatment")) prior.weight.denom <- 0.5^ncol(x) x <- as.data.frame(x) LEVELS <- lapply(x, levels) names.arg <- names(x) if (is.null(names.arg)) names.arg <- paste("X", 1:ncol(x), sep = "") x2 <- na.omit(x) used <- match(row.names(x), row.names(x2)) omitted <- seq(nrow(x))[is.na(used)] if (length(omitted) > 0) { wt <- wt[-omitted] x <- x2 y <- y[-omitted] warning(paste("There were ", length(omitted), "records deleted due to NA's")) } leaps.x <- x output.names <- names(x) fn <- factor.names(x) factors <- !all(unlist(lapply(fn, is.null))) x.df <- data.frame(x = x) glm.out <- glm(y ~ ., family = glm.family, weights = wt, data = x.df) glm.assign <- create.assign(x) fac.levels <- unlist(lapply(glm.assign, length)[-1]) varNames <- names.arg if (factors) { # factors get turned into vectors in leaps.x using coefficient values cdf <- cbind.data.frame(y = y, x) ncoly <- if (is.null(dim(y))) 1 else ncol(y) mm <- model.matrix(formula(cdf), data = cdf)[, -(1:ncoly), drop = FALSE] varNames <- colnames(mm) mmm <- data.frame(matrix(mm, nrow = nrow(mm), byrow = FALSE)) names(mmm) <- dimnames(mm)[[2]] output.names <- names(mmm) if (factor.type) { for (i in 1:length(names(x))) { if (!is.null(fn[[i]])) { nx <- names(x)[i] coefs <- glm.out$coef[glm.assign[[i + 1]]] old.vals <- x[, i] new.vals <- c(0, coefs) new.vec <- as.vector(new.vals[match(old.vals, fn[[i]])]) leaps.x[, nx] <- new.vec } } } else { new.prior <- NULL for (i in 1:length(names(x))) { addprior <- prior.param[i] if (!is.null(fn[[i]])) { k <- length(fn[[i]]) if (factor.prior.adjust) addprior <- rep(1 - (1 - prior.param[i])^(1/(k - 1)), k - 1) else addprior <- rep(prior.param[i], k - 1) } new.prior <- c(new.prior, addprior) } prior.param <- new.prior x <- leaps.x <- mmm } } xx <- data.frame() xx <- dropcols(leaps.x, y, glm.family, wt, maxCol) var.names <- xx$var.names remaining <- xx$remaining.vars leaps.x <- xx$mm reduced <- xx$any.dropped dropped <- 0 if (reduced) { dropped <- pmatch(xx$dropped, varNames, nomatch = 0) varNames <- varNames[-dropped] prior.param <- prior.param[-dropped] } nvar <- length(x[1, ]) x <- x[, remaining, drop = FALSE] x <- data.frame(x) fac.levels <- fac.levels[remaining] output.names <- list() for (i in 1:length(var.names)) { if (is.factor(x[, i])) output.names[[i]] <- levels(x[, i]) else output.names[[i]] <- NA } xnames <- names(x) names(leaps.x) <- var.names x.df <- data.frame(x = leaps.x) glm.out <- glm(y ~ ., family = glm.family, weights = wt, data = x.df, x = TRUE) # glm.assign <- create.assign(leaps.x) glm.assign <- create.assign(x) if (factor.type == FALSE) fac.levels <- unlist(lapply(glm.assign, length)[-1]) famname <- glm.out$family["family"]$family linkinv <- glm.out$family["linkinv"]$linkinv if (is.null(dispersion)) { if (famname == "poisson" | famname == "binomial") dispersion <- FALSE else dispersion <- TRUE } nobs <- length(y) resid <- resid(glm.out, "pearson") rdf <- glm.out$df.resid is.wt <- !all(wt == rep(1, nrow(x))) if (is.wt) { resid <- resid * sqrt(wt) excl <- wt == 0 if (any(excl)) { warning(paste(sum(excl), "rows with zero wts not counted")) resid <- resid[!excl] } } phihat <- sum(resid^2)/rdf if (dispersion) disp <- phihat else disp <- 1 coef <- glm.out$coef[-1] p <- glm.out$rank R <- glm.out$R rinv <- diag(p) rinv <- backsolve(R, rinv) rowlen <- drop(((rinv^2) %*% rep(1, p))^0.5) sigx <- rowlen %o% sqrt(disp) correl <- rinv %*% t(rinv) * outer(1/rowlen, 1/rowlen) cov <- correl * sigx %*% t(sigx) info <- solve(cov[-1, -1]) if (ncol(x) > 2) { a <- leaps.glm(info, coef, names.arg = names(leaps.x), nbest = nbest) a$r2 <- pmin(pmax(0, a$r2), 0.999) a$r2 <- c(0, a$r2) a$size <- c(0, a$size) a$label <- c("NULL", a$label) a$which <- rbind(rep(FALSE, ncol(x)), a$which) nmod <- length(a$size) prior.mat <- matrix(rep(prior.param, nmod), nmod, ncol(x), byrow = TRUE) prior <- apply(a$which*prior.mat + (!a$which)*(1 - prior.mat), 1, prod) bIb <- as.numeric(coef %*% info %*% coef) lrt <- bIb - (a$r2 * bIb) bic <- lrt + (a$size) * log(nobs) - 2 * log(prior) occam <- bic - min(bic) < 2 * OR.fix * log(OR) size <- a$size[occam] label <- a$label[occam] which <- a$which[occam, , drop = FALSE] bic <- bic[occam] prior <- prior[occam] } else { nmod <- switch(ncol(x), 2, 4) bic <- label <- rep(0, nmod) which <- matrix(c(FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, TRUE, TRUE)[1:(nmod*nmod/2)], nmod, nmod/2) size <- c(0, 1, 1, 2)[1:nmod] sep <- if (all(nchar(names.arg) == 1)) "" else "," prior.mat <- matrix(rep(prior.param, nmod), nmod, ncol(x), byrow = TRUE) prior <- apply(which * prior.mat + (!which) * (1 - prior.mat), 1, prod) for (k in 1:nmod) { if (k == 1) label[k] <- "NULL" else label[k] <- paste(names.arg[which[k, ]], collapse = sep) } } nmod <- length(label) model.fits <- as.list(rep(0, nmod)) dev <- rep(0, nmod) df <- rep(0, nmod) xdf <- x.df colnames(xdf) <- sapply(colnames(xdf), function(s) substring(s,3,nchar(s))) for (k in 1:nmod) { if (sum(which[k, ]) == 0) { glm.out <- glm(y ~ 1, family = glm.family, weights = wt) } else { x.df <- data.frame(x = x[, which[k, ], drop = F]) if (ncol(x.df) != 1) { colnames(x.df) <- sapply(colnames(x.df), function(s) substring(s,3,nchar(s))) } glm.out <- glm(y ~ ., data = x.df, family = glm.family, weights = wt) } dev[k] <- glm.out$deviance df[k] <- glm.out$df.residual model.fits[[k]] <- matrix(0, nrow = length(glm.out$coef), ncol = 2, dimnames = list(names(glm.out$coef), NULL)) model.fits[[k]][, 1] <- glm.out$coef coef <- glm.out$coef p <- glm.out$rank R <- glm.out$R rinv <- diag(p) rinv <- backsolve(R, rinv) rowlen <- drop(((rinv^2) %*% rep(1, p))^0.5) sigx <- rowlen %o% sqrt(disp) correl <- rinv %*% t(rinv) * outer(1/rowlen, 1/rowlen) cov <- correl * sigx %*% t(sigx) model.fits[[k]][, 2] <- sqrt(diag(cov)) } bic <- dev/disp - df * log(nobs) - 2 * log(prior) if (occam.window) occam <- bic - min(bic) < 2 * log(OR) else occam = rep(TRUE, length(bic)) dev <- dev[occam] df <- df[occam] size <- size[occam] label <- label[occam] which <- which[occam, , drop = FALSE] bic <- bic[occam] prior <- prior[occam] model.fits <- model.fits[occam] postprob <- exp(-0.5 * (bic - min(bic)))/sum(exp(-0.5 * (bic - min(bic)))) order.bic <- order(bic, size, label) dev <- dev[order.bic] df <- df[order.bic] size <- size[order.bic] label <- label[order.bic] which <- which[order.bic, , drop = FALSE] bic <- bic[order.bic] prior <- prior[order.bic] postprob <- postprob[order.bic] model.fits <- model.fits[order.bic] nmod <- length(bic) if (strict & (nmod != 1)) { occam <- rep(TRUE, nmod) for (k in (2:nmod)) for (j in (1:(k - 1))) { which.diff <- which[k, ] - which[j, ] if (all(which.diff >= 0)) occam[k] <- FALSE } dev <- dev[occam] df <- df[occam] size <- size[occam] label <- label[occam] which <- which[occam, , drop = FALSE] bic <- bic[occam] prior <- prior[occam] postprob <- postprob[occam] postprob <- postprob/sum(postprob) model.fits <- model.fits[occam] } bic <- bic + 2 * log(prior) probne0 <- round(100 * t(which) %*% as.matrix(postprob), 1) nmod <- length(bic) nvar <- max(unlist(glm.assign)) Ebi <- SDbi <- rep(0, nvar) EbiMk <- sebiMk <- matrix(rep(0, nmod * nvar), nrow = nmod) for (i in (1:ncol(x))) { whereisit <- glm.assign[[i + 1]] if (any(which[, i])) for (k in (1:nmod)) if (which[k, i] == TRUE) { spot <- sum(which[k, (1:i)]) posMk <- (c(0, cumsum(fac.levels[which[k, ]])) + 1)[spot] posMk <- posMk:(posMk + fac.levels[i] - 1) + 1 EbiMk[k, whereisit] <- model.fits[[k]][posMk, 1] sebiMk[k, whereisit] <- model.fits[[k]][posMk, 2] } } for (k in 1:nmod) { EbiMk[k, 1] <- model.fits[[k]][1, 1] sebiMk[k, 1] <- model.fits[[k]][1, 2] } Ebi <- postprob %*% EbiMk Ebimat <- matrix(rep(Ebi, nmod), nrow = nmod, byrow = TRUE) SDbi <- sqrt(postprob %*% (sebiMk^2) + postprob %*% ((EbiMk - Ebimat)^2)) CEbi <- CSDbi <- rep(0, nvar) for (i in (1:ncol(x))) { sel <- which[, i] if (sum(sel) > 0) { cpp <- rbind(postprob[sel]/sum(postprob[sel])) CEbi[glm.assign[[i + 1]]] <- as.numeric(cpp %*% EbiMk[sel, glm.assign[[i + 1]]]) CSDbi[glm.assign[[i + 1]]] <- sqrt(cpp %*% (sebiMk[sel, glm.assign[[i + 1]]]^2) + cpp %*% ((EbiMk[sel, glm.assign[[i + 1]]] - CEbi[glm.assign[[i + 1]]])^2)) } } CSDbi[1] <- SDbi[1] CEbi[1] <- Ebi[1] names(output.names) <- var.names postmean <- as.vector(Ebi) varNames <- gsub("`", "", varNames) if (pad != "") varNames <- sapply( varNames, function(z) substring(z,1,nchar(z)-2)) names(varNames) <- NULL colnames(EbiMk) <- names(postmean) <- c("(Intercept)", varNames) names(probne0) <- if (factor.type) { if (!all(dropped == 0)) namx[-dropped] else namx } else varNames result <- list(postprob = postprob, label = label, deviance = dev, size = size, bic = bic, prior.param = prior.param, prior.model.weights = prior/prior.weight.denom, family = famname, linkinv = linkinv, levels = LEVELS, disp = disp, which = which, probne0 = c(probne0), postmean = postmean, postsd = as.vector(SDbi), condpostmean = CEbi, condpostsd = CSDbi, mle = EbiMk, se = sebiMk, namesx = var.names, reduced = reduced, dropped = dropped, call = cl, n.models = length(postprob), n.vars = length(probne0), nests = length(Ebi), output.names = output.names, assign = glm.assign, factor.type = factor.type, design = leaps.x, x = x, y = y) class(result) <- "bic.glm" result } "bic.glm.formula" <- function (f, data, glm.family, wt = rep(1, nrow(data)), strict = FALSE, prior.param = c(rep(0.5, ncol(x))), OR = 20, maxCol = 30, OR.fix = 2, nbest = 150, dispersion = NULL, factor.type = TRUE, factor.prior.adjust = FALSE, occam.window = TRUE, na.action = na.omit, ...) { cl <- match.call() tms <- terms(f, data = data) fmatrix <- attr(tms, "factors") tms.order <- attr(tms, "order") tms.labels <- attr(tms, "term.labels") attr(data, "na.action") <- na.action if(!is.null(na.action)) data <- na.action(data) mm <- model.matrix(tms, data = data) ############################################################################ ## change to facilitate predict 10/2011 CF # tms.labels <- colnames(mm) # if (tms.labels[1] == "(Intercept)") tms.labels <- tms.labels[-1] ############################################################################ assn <- attr(mm, "assign") nterms <- max(assn) datalist <- eval(attr(tms, "variables"), envir = data) nvar <- nrow(fmatrix) - 1 isvarfac <- rep(NA, times = nvar) for (i in 1:nvar) isvarfac[i] <- is.factor(datalist[[i + 1]]) istermfac <- rep(NA, times = nterms) for (i in 1:nterms) { cterms <- fmatrix[-1, i] == 1 istermfac[i] <- sum(isvarfac[cterms] == FALSE) == 0 } resp.name <- all.vars(f)[1] moddata <- data.frame(rep(NA, times = dim(mm)[1])) cnames <- NULL for (i in 1:nterms) { if (istermfac[i]) { if (tms.order[i] == 1) { moddata <- cbind(moddata, datalist[[i + 1]]) cnames <- c(cnames, tms.labels[i]) } else { sel <- assn == i nlev <- sum(sel) newfac.index <- (mm[, sel] %*% cbind(1:nlev)) + 1 facnames <- c("ref", colnames(mm)[sel]) newfac <- facnames[newfac.index] newfac <- factor(newfac) moddata <- cbind(moddata, newfac) cnames <- c(cnames, paste(tms.labels[i], "..", sep = "")) } } else { sel <- assn == i moddata <- cbind(moddata, mm[, sel]) cnames <- c(cnames, colnames(mm)[sel]) } } moddata <- moddata[, -1, drop = FALSE] ######################################################################## ## deleted to facilitate predict 10/2011 CF ## cnames <- gsub(":", ".", cnames) ## moddata <- moddata ## colnames(moddata) <- c(cnames) colnames(moddata) <- cnames ######################################################################## # caution: at least x needs to be assigned before the call (don't know why) CF y <- datalist[[1]] if (!is.null(dim(y))) { ncases <- apply( y, 1, sum) index <- rep( 1:nrow(y), ncases) x <- moddata[index,] wt <- wt[index] y <- as.vector(apply( y, 1, function(n) c(rep(0,n[1]),rep(1,n[2])))) } else { x <- moddata } result <- bic.glm(x=x, y=y, glm.family, wt = wt, strict = FALSE, prior.param = prior.param, OR = OR, maxCol = maxCol, OR.fix = OR.fix, nbest = nbest, dispersion = dispersion, factor.type = factor.type, factor.prior.adjust = factor.prior.adjust, occam.window = occam.window, call = cl) ######################################################################## ## added to facilitate predict 10/2011 CF ######################################################################## result$formula <- f result$x <- moddata result$y <- datalist[[1]] result$na.action <- length(attr(data, "na.action")) result } bic.glm.matrix <- function (x, y, glm.family, wt = rep(1, nrow(x)), strict = FALSE, prior.param = c(rep(0.5, ncol(x))), OR = 20, maxCol = 30, OR.fix = 2, nbest = 150, dispersion = NULL, factor.type = TRUE, factor.prior.adjust = FALSE, occam.window = TRUE, call = NULL, ...) { leaps.glm <- function(info, coef, names.arg, nbest = nbest) { names.arg <- names.arg if (is.null(names.arg)) names.arg <- c(as.character(1:9), LETTERS, letters)[1:ncol(info)] if (length(names.arg) < ncol(info)) stop("Too few names") bIb <- coef %*% info %*% coef kx <- ncol(info) maxreg <- nbest * kx if (kx < 3) stop("Too few independent variables") imeth <- 1 df <- kx + 1 Ib <- info %*% coef rr <- cbind(info, Ib) rr <- rbind(rr, c(Ib, bIb)) it <- 0 n.cols <- kx + 1 nv <- kx + 1 nf <- 0 no <- 1e+05 ib <- 1 mb <- nbest nd <- n.cols nc <- 4 * n.cols rt <- matrix(rep(0, times = nd * nc), ncol = nc) rt[, 1:n.cols] <- rr iw <- c(1:(kx + 1), rep(0, times = 4 * nd)) nw <- length(iw) rw <- rep(0, times = 2 * mb * kx + 7 * nd) nr <- length(rw) t1 <- 2 s2 <- -1 ne <- 0 iv <- 0 nret <- mb * kx Subss <- rep(0, times = nret) RSS <- Subss ans <- .Fortran("fwleaps", as.integer(nv), as.integer(it), as.integer(kx), as.integer(nf), as.integer(no), as.integer(1), as.double(2), as.integer(mb), as.double(rt), as.integer(nd), as.integer(nc), as.integer(iw), as.integer(nw), as.double(rw), as.integer(nr), as.double(t1), as.double(s2), as.integer(ne), as.integer(iv), as.double(Subss), as.double(RSS), as.integer(nret), PACKAGE = "BMA") regid <- ans[[21]]/2 r2 <- ans[[20]] nreg <- sum(regid > 0) regid <- regid[1:nreg] r2 <- r2[1:nreg] which <- matrix(TRUE, nreg, kx) z <- regid which <- matrix(as.logical((rep.int(z, kx)%/%rep.int(2^((kx - 1):0), rep.int(length(z), kx)))%%2), byrow = FALSE, ncol = kx) size <- which %*% rep(1, kx) label <- character(nreg) sep <- if (all(nchar(names.arg) == 1)) "" else "," for (i in 1:nreg) label[i] <- paste(names.arg[which[i, ]], collapse = sep) ans <- list(r2 = r2, size = size, label = label, which = which) return(ans) } factor.names <- function(x) { out <- list() for (i in 1:ncol(x)) if (is.factor(x[, i])) out[[i]] <- levels(x[, i]) else out <- c(out, list(NULL)) attributes(out)$names <- names(x) return(out) } create.assign <- function(xx) { asgn <- list() asgn[[1]] <- 1 cnt <- 2 for (i in 1:ncol(xx)) { if (!is.factor(xx[, i])) size <- 1 else size <- length(levels(xx[, i])) - 1 asgn[[i + 1]] <- cnt:(cnt + size - 1) cnt <- cnt + size } names(asgn) <- c("(Intercept)", attributes(xx)$names) return(asgn) } dropcols <- function(x, y, glm.family, wt, maxCols = 30) { vnames <- attributes(x)$names nvar <- length(vnames) isfac <- rep(FALSE, times = nvar) for (i in 1:nvar) isfac[i] <- is.factor(x[, i]) nlevels <- rep(NA, times = nvar) for (i in 1:nvar) if (isfac[i]) nlevels[i] <- length(levels(x[, i])) any.dropped <- FALSE mm <- model.matrix(terms.formula(~., data = x), data = x) designx <- attributes(mm)$assign n.designx <- length(designx) designx.levels <- rep(1, times = n.designx) for (i in 2:n.designx) if (isfac[designx[i]]) designx.levels[i] <- sum(designx[1:i] == designx[i]) + 1 x.df <- data.frame(x = x) glm.out <- glm(y ~ ., family = glm.family, weights = wt, data = x.df) glm.assign <- create.assign(x) while (length(glm.out$coefficients) > maxCol) { any.dropped <- TRUE dropglm <- drop1(glm.out, test = "Chisq") # dropped <- which.max(dropglm$"Pr(Chi)"[-1]) + 1 dropped <- which.max(dropglm$LRT[-1]) + 1 if (length(dropped) == 0) stop("dropped == 0") x.df <- x.df[, -(dropped - 1)] designx.levels <- designx.levels[-dropped] designx <- designx[-dropped] glm.out <- glm(y ~ ., family = glm.family, weights = wt, data = x.df) } remaining.vars <- unique(designx[-1]) new.nvar <- length(remaining.vars) dropped.vars <- vnames[-remaining.vars] dropped.levels <- NULL ncol.glm <- ncol(x.df) - 1 x.df <- x.df[-(ncol.glm + 1)] xx <- data.frame(matrix(rep(NA, times = new.nvar * nrow(x.df)), ncol = new.nvar)) new.names = rep(NA, times = new.nvar) for (i in 1:new.nvar) { cvar <- remaining.vars[i] lvls <- designx.levels[cvar == designx] if (isfac[cvar]) { if (length(lvls) != length(levels(x[, cvar]))) { newvar <- (as.matrix(x.df[, cvar == designx[-1]]) %*% cbind(lvls - 1)) + 1 xx[, i] <- factor(levels(x[, cvar])[newvar]) new.names[i] <- vnames[cvar] removed.levels <- levels(x[, cvar])[-c(1, lvls)] dropped.levels <- c(dropped.levels, paste(vnames[cvar], "_", removed.levels, sep = "")) } else { xx[, i] <- factor(x[, cvar]) new.names[i] <- vnames[cvar] } } else { xx[, i] <- x[, cvar] new.names[i] <- vnames[cvar] } } dropped <- c(dropped.vars, dropped.levels) return(list(mm = xx, any.dropped = any.dropped, dropped = dropped, var.names = new.names, remaining.vars = remaining.vars)) } if (is.null(call)) cl <- match.call() else cl <- call options(contrasts = c("contr.treatment", "contr.treatment")) prior.weight.denom <- 0.5^ncol(x) x <- data.frame(x) names.arg <- names(x) if (is.null(names.arg)) names.arg <- paste("X", 1:ncol(x), sep = "") x2 <- na.omit(x) used <- match(row.names(x), row.names(x2)) omitted <- seq(nrow(x))[is.na(used)] if (length(omitted) > 0) { wt <- wt[-omitted] x <- x2 y <- y[-omitted] warning(paste("There were ", length(omitted), "records deleted due to NA's")) } leaps.x <- x output.names <- names(x) fn <- factor.names(x) factors <- !all(unlist(lapply(fn, is.null))) x.df <- data.frame(x = x) glm.out <- glm(y ~ ., family = glm.family, weights = wt, data = x.df) glm.assign <- create.assign(x) fac.levels <- unlist(lapply(glm.assign, length)[-1]) if (factors) { cdf <- cbind.data.frame(y = y, x) mm <- model.matrix(formula(cdf), data = cdf)[, -1, drop = FALSE] mmm <- data.frame(matrix(mm, nrow = nrow(mm), byrow = FALSE)) names(mmm) <- dimnames(mm)[[2]] output.names <- names(mmm) if (factor.type) { for (i in 1:length(names(x))) { if (!is.null(fn[[i]])) { nx <- names(x)[i] coefs <- glm.out$coef[glm.assign[[i + 1]]] old.vals <- x[, i] new.vals <- c(0, coefs) new.vec <- as.vector(new.vals[match(old.vals, fn[[i]])]) leaps.x[, nx] <- new.vec } } } else { new.prior <- NULL for (i in 1:length(names(x))) { addprior <- prior.param[i] if (!is.null(fn[[i]])) { k <- length(fn[[i]]) if (factor.prior.adjust) addprior <- rep(1 - (1 - prior.param[i])^(1/(k - 1)), k - 1) else addprior <- rep(prior.param[i], k - 1) } new.prior <- c(new.prior, addprior) } prior.param <- new.prior x <- leaps.x <- mmm } } xx <- data.frame() xx <- dropcols(leaps.x, y, glm.family, wt, maxCol) var.names <- xx$var.names remaining <- xx$remaining.vars leaps.x <- xx$mm reduced <- xx$any.dropped # dropped <- NULL # if (reduced) # dropped <- xx$dropped dropped <- 0 varNames <- var.names if (reduced) { dropped <- match(xx$dropped,varNames,nomatch=0) varNames <- varNames[-dropped] } nvar <- length(x[1, ]) x <- x[, remaining, drop = FALSE] x <- data.frame(x) fac.levels <- fac.levels[remaining] output.names <- list() for (i in 1:length(var.names)) { if (is.factor(x[, i])) output.names[[i]] <- levels(x[, i]) else output.names[[i]] <- NA } xnames <- names(x) names(leaps.x) <- var.names x.df <- data.frame(x = leaps.x) glm.out <- glm(y ~ ., family = glm.family, weights = wt, data = x.df, x = TRUE) glm.assign <- create.assign(leaps.x) if (factor.type == FALSE) fac.levels <- unlist(lapply(glm.assign, length)[-1]) famname <- glm.out$family["family"]$family linkinv <- glm.out$family["linkinv"]$linkinv if (is.null(dispersion)) { if (famname == "poisson" | famname == "binomial") dispersion <- FALSE else dispersion <- TRUE } nobs <- length(y) resid <- resid(glm.out, "pearson") rdf <- glm.out$df.resid is.wt <- !all(wt == rep(1, nrow(x))) if (is.wt) { resid <- resid * sqrt(wt) excl <- wt == 0 if (any(excl)) { warning(paste(sum(excl), "rows with zero wts not counted")) resid <- resid[!excl] } } phihat <- sum(resid^2)/rdf if (dispersion) disp <- phihat else disp <- 1 coef <- glm.out$coef[-1] p <- glm.out$rank R <- glm.out$R rinv <- diag(p) rinv <- backsolve(R, rinv) rowlen <- drop(((rinv^2) %*% rep(1, p))^0.5) sigx <- rowlen %o% sqrt(disp) correl <- rinv %*% t(rinv) * outer(1/rowlen, 1/rowlen) cov <- correl * sigx %*% t(sigx) info <- solve(cov[-1, -1]) if (ncol(x) > 2) { a <- leaps.glm(info, coef, names.arg = names(leaps.x), nbest = nbest) a$r2 <- pmin(pmax(0, a$r2), 0.999) a$r2 <- c(0, a$r2) a$size <- c(0, a$size) a$label <- c("NULL", a$label) a$which <- rbind(rep(FALSE, ncol(x)), a$which) nmod <- length(a$size) prior.mat <- matrix(rep(prior.param, nmod), nmod, ncol(leaps.x), byrow = TRUE) prior <- apply(a$which * prior.mat + (!a$which) * (1 - prior.mat), 1, prod) bIb <- as.numeric(coef %*% info %*% coef) lrt <- bIb - (a$r2 * bIb) bic <- lrt + (a$size) * log(nobs) - 2 * log(prior) occam <- bic - min(bic) < 2 * OR.fix * log(OR) size <- a$size[occam] label <- a$label[occam] which <- a$which[occam, , drop = FALSE] bic <- bic[occam] prior <- prior[occam] } else { nmod <- switch(ncol(x), 2, 4) bic <- label <- rep(0, nmod) which <- matrix(c(FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, TRUE, TRUE)[1:(nmod*nmod/2)], nmod, nmod/2) size <- c(0, 1, 1, 2)[1:nmod] sep <- if (all(nchar(names.arg) == 1)) "" else "," prior.mat <- matrix(rep(prior.param, nmod), nmod, ncol(x), byrow = TRUE) prior <- apply(which * prior.mat + (!which) * (1 - prior.mat), 1, prod) for (k in 1:nmod) { if (k == 1) label[k] <- "NULL" else label[k] <- paste(names.arg[which[k, ]], collapse = sep) } } nmod <- length(label) model.fits <- as.list(rep(0, nmod)) dev <- rep(0, nmod) df <- rep(0, nmod) for (k in 1:nmod) { if (sum(which[k, ]) == 0) { glm.out <- glm(y ~ 1, family = glm.family, weights = wt) } else { x.df <- data.frame(x = x[, which[k, ]]) glm.out <- glm(y ~ ., data = x.df, family = glm.family, weights = wt) } dev[k] <- glm.out$deviance df[k] <- glm.out$df.residual model.fits[[k]] <- matrix(0, nrow = length(glm.out$coef), ncol = 2) model.fits[[k]][, 1] <- glm.out$coef coef <- glm.out$coef p <- glm.out$rank R <- glm.out$R rinv <- diag(p) rinv <- backsolve(R, rinv) rowlen <- drop(((rinv^2) %*% rep(1, p))^0.5) sigx <- rowlen %o% sqrt(disp) correl <- rinv %*% t(rinv) * outer(1/rowlen, 1/rowlen) cov <- correl * sigx %*% t(sigx) model.fits[[k]][, 2] <- sqrt(diag(cov)) } bic <- dev/disp - df * log(nobs) - 2 * log(prior) if (occam.window) occam <- bic - min(bic) < 2 * log(OR) else occam = rep(TRUE, length(bic)) dev <- dev[occam] df <- df[occam] size <- size[occam] label <- label[occam] which <- which[occam, , drop = FALSE] bic <- bic[occam] prior <- prior[occam] model.fits <- model.fits[occam] postprob <- exp(-0.5 * (bic - min(bic)))/sum(exp(-0.5 * (bic - min(bic)))) order.bic <- order(bic, size, label) dev <- dev[order.bic] df <- df[order.bic] size <- size[order.bic] label <- label[order.bic] which <- which[order.bic, , drop = FALSE] bic <- bic[order.bic] prior <- prior[order.bic] postprob <- postprob[order.bic] model.fits <- model.fits[order.bic] nmod <- length(bic) if (strict & (nmod != 1)) { occam <- rep(TRUE, nmod) for (k in (2:nmod)) for (j in (1:(k - 1))) { which.diff <- which[k, ] - which[j, ] if (all(which.diff >= 0)) occam[k] <- FALSE } dev <- dev[occam] df <- df[occam] size <- size[occam] label <- label[occam] which <- which[occam, , drop = FALSE] bic <- bic[occam] prior <- prior[occam] postprob <- postprob[occam] postprob <- postprob/sum(postprob) model.fits <- model.fits[occam] } bic <- bic + 2 * log(prior) probne0 <- round(100 * t(which) %*% as.matrix(postprob), 1) nmod <- length(bic) nvar <- max(unlist(glm.assign)) Ebi <- rep(0, nvar) SDbi <- rep(0, nvar) EbiMk <- matrix(rep(0, nmod * nvar), nrow = nmod) sebiMk <- matrix(rep(0, nmod * nvar), nrow = nmod) for (i in (1:ncol(x))) { whereisit <- glm.assign[[i + 1]] if (any(which[, i])) for (k in (1:nmod)) if (which[k, i] == TRUE) { spot <- sum(which[k, (1:i)]) posMk <- (c(0, cumsum(fac.levels[which[k, ]])) + 1)[spot] posMk <- posMk:(posMk + fac.levels[i] - 1) + 1 EbiMk[k, whereisit] <- model.fits[[k]][posMk, 1] sebiMk[k, whereisit] <- model.fits[[k]][posMk, 2] } } for (k in 1:nmod) { EbiMk[k, 1] <- model.fits[[k]][1, 1] sebiMk[k, 1] <- model.fits[[k]][1, 2] } Ebi <- postprob %*% EbiMk Ebimat <- matrix(rep(Ebi, nmod), nrow = nmod, byrow = TRUE) SDbi <- sqrt(postprob %*% (sebiMk^2) + postprob %*% ((EbiMk - Ebimat)^2)) CSDbi <- rep(0, nvar) CEbi <- CSDbi for (i in (1:ncol(x))) { sel <- which[, i] if (sum(sel) > 0) { cpp <- rbind(postprob[sel]/sum(postprob[sel])) CEbi[glm.assign[[i + 1]]] <- as.numeric(cpp %*% EbiMk[sel, glm.assign[[i + 1]]]) CSDbi[glm.assign[[i + 1]]] <- sqrt(cpp %*% (sebiMk[sel, glm.assign[[i + 1]]]^2) + cpp %*% ((EbiMk[sel, glm.assign[[i + 1]]] - CEbi[glm.assign[[i + 1]]])^2)) } } CSDbi[1] <- SDbi[1] CEbi[1] <- Ebi[1] names(output.names) <- var.names result <- list(postprob = postprob, label = label, deviance = dev, size = size, bic = bic, prior.param = prior.param, prior.model.weights = prior/prior.weight.denom, family = famname, linkinv = linkinv, disp = disp, which = which, probne0 = c(probne0), postmean = as.vector(Ebi), postsd = as.vector(SDbi), condpostmean = CEbi, condpostsd = CSDbi, mle = EbiMk, se = sebiMk, namesx = var.names, reduced = reduced, dropped = dropped, call = cl, n.models = length(postprob), n.vars = length(probne0), nests = length(Ebi), output.names = output.names, assign = glm.assign, factor.type = factor.type, design = leaps.x, x = x, y = y) class(result) <- "bic.glm" result }
/scratch/gouwar.j/cran-all/cranData/BMA/R/bic.glm.R
"bic.surv" <- function (x, ...) UseMethod("bic.surv") "bic.surv.data.frame" <- function (x, surv.t, cens, strict = FALSE, OR = 20, maxCol = 30, prior.param = c(rep(0.5, ncol(x))), OR.fix = 2, nbest = 150, factor.type = TRUE, factor.prior.adjust = FALSE, call = NULL, ...) { leaps.bs <- function(info, coef, names.arg, nbest = nbest) { names.arg <- names.arg if (is.null(names.arg)) names.arg <- c(as.character(1:9), LETTERS, letters)[1:ncol(info)] if (length(names.arg) < ncol(info)) stop("Too few names") bIb <- coef %*% info %*% coef kx <- ncol(info) maxreg <- nbest * kx if (kx < 3) stop("Too few independent variables") imeth <- 1 df <- kx + 1 Ib <- info %*% coef rr <- cbind(info, Ib) rr <- rbind(rr, c(Ib, bIb)) it <- 0 n.cols <- kx + 1 nv <- kx + 1 nf <- 0 no <- 1e+05 ib <- 1 mb <- nbest nd <- n.cols nc <- 4 * n.cols rt <- matrix(rep(0, times = nd * nc), ncol = nc) rt[, 1:n.cols] <- rr iw <- c(1:(kx + 1), rep(0, times = 4 * nd)) nw <- length(iw) rw <- rep(0, times = 2 * mb * kx + 7 * nd) nr <- length(rw) t1 <- 2 s2 <- -1 ne <- 0 iv <- 0 nret <- mb * kx Subss <- rep(0, times = nret) RSS <- Subss ans <- .Fortran("fwleaps", as.integer(nv), as.integer(it), as.integer(kx), as.integer(nf), as.integer(no), as.integer(1), as.double(2), as.integer(mb), as.double(rt), as.integer(nd), as.integer(nc), as.integer(iw), as.integer(nw), as.double(rw), as.integer(nr), as.double(t1), as.double(s2), as.integer(ne), as.integer(iv), as.double(Subss), as.double(RSS), as.integer(nret), PACKAGE = "BMA") regid <- ans[[21]]/2 r2 <- ans[[20]] nreg <- sum(regid > 0) regid <- regid[1:nreg] r2 <- r2[1:nreg] which <- matrix(TRUE, nreg, kx) z <- regid which <- matrix(as.logical((rep.int(z, kx)%/%rep.int(2^((kx - 1):0), rep.int(length(z), kx)))%%2), byrow = FALSE, ncol = kx) size <- which %*% rep(1, kx) label <- character(nreg) sep <- if (all(nchar(names.arg) == 1)) "" else "," for (i in 1:nreg) label[i] <- paste(names.arg[which[i, ]], collapse = sep) ans <- list(r2 = r2, size = size, label = label, which = which) return(ans) } factor.names <- function(x) { out <- list() for (i in 1:ncol(x)) if (is.factor(x[, i])) out[[i]] <- levels(x[, i]) else out <- c(out, list(NULL)) attributes(out)$names <- names(x) return(out) } dropcols <- function(x, surv.t, cens, maxCols = 30) { vnames <- attributes(x)$names nvar <- length(vnames) isfac <- rep(FALSE, times = nvar) for (i in 1:nvar) isfac[i] <- is.factor(x[, i]) nlevels <- rep(NA, times = nvar) for (i in 1:nvar) if (isfac[i]) nlevels[i] <- length(levels(x[, i])) any.dropped <- FALSE mm <- model.matrix(terms.formula(~., data = x), data = x) designx <- attributes(mm)$assign n.designx <- length(designx) designx.levels <- rep(1, times = n.designx) for (i in 2:n.designx) if (isfac[designx[i]]) designx.levels[i] <- sum(designx[1:i] == designx[i]) + 1 x.coxph <- data.frame(mm[, -1], surv.t = surv.t, cens = cens) cox.out <- coxph(Surv(surv.t, cens) ~ ., data = x.coxph, method = "breslow", iter.max = 30) while (length(cox.out$coefficients) > maxCol) { any.dropped <- TRUE dropcox <- drop1(cox.out, test = "Chisq") dropped <- which.max(dropcox$"Pr(>Chi)"[-1]) + 1 x.coxph <- x.coxph[, -(dropped - 1)] designx.levels <- designx.levels[-dropped] designx <- designx[-dropped] cox.out <- coxph(Surv(surv.t, cens) ~ ., data = x.coxph, method = "breslow", iter.max = 30) } remaining.vars <- unique(designx[-1]) new.nvar <- length(remaining.vars) dropped.vars <- vnames[-remaining.vars] dropped.levels <- NULL ncol.cox <- ncol(x.coxph) - 2 x.coxph <- x.coxph[-((ncol.cox + 1):(ncol.cox + 2))] xx <- data.frame(matrix(rep(NA, times = new.nvar * nrow(x.coxph)), ncol = new.nvar)) new.names = rep(NA, times = new.nvar) for (i in 1:new.nvar) { cvar <- remaining.vars[i] lvls <- designx.levels[cvar == designx] if (isfac[cvar]) { if (length(lvls) != length(levels(x[, cvar]))) { newvar <- (as.matrix(x.coxph[, cvar == designx[-1]]) %*% cbind(lvls - 1)) + 1 xx[, i] <- factor(levels(x[, cvar])[newvar]) new.names[i] <- vnames[cvar] removed.levels <- levels(x[, cvar])[-c(1, lvls)] dropped.levels <- c(dropped.levels, paste(vnames[cvar], "_", removed.levels, sep = "")) } else { xx[, i] <- factor(x[, cvar]) new.names[i] <- vnames[cvar] } } else { xx[, i] <- x[, cvar] new.names[i] <- vnames[cvar] } } dropped <- c(dropped.vars, dropped.levels) return(list(mm = xx, any.dropped = any.dropped, dropped = dropped, var.names = new.names, remaining.vars = remaining.vars)) } if (is.null(call)) cl <- match.call() else cl <- call options(contrasts = c("contr.treatment", "contr.treatment")) x <- data.frame(x) prior.weight.denom <- 0.5^ncol(x) x.omit <- na.omit(x) used <- match(row.names(x), row.names(x.omit)) omitted <- seq(nrow(x))[is.na(used)] if (length(omitted) > 0) { x <- x.omit surv.t <- surv.t[-omitted] cens <- cens[-omitted] warning(paste("There were ", length(omitted), "records deleted due to NA's")) } leaps.x <- x fac.levels <- rep(1, times = ncol(x)) fn <- factor.names(x) factors <- !all(unlist(lapply(fn, is.null))) if (factors) { cdf <- cbind.data.frame(y = surv.t, x) mm <- model.matrix(formula(cdf), data = cdf)[, -1, drop = FALSE] mmm <- data.frame(matrix(mm, nrow = nrow(mm), byrow = FALSE)) names(mmm) <- dimnames(mm)[[2]] output.names <- names(mmm) x.coxph <- data.frame(surv.t = surv.t, cens = cens, x) cox.out <- coxph(Surv(surv.t, cens) ~ ., data = x.coxph, method = "breslow") cox.assign <- cox.out$assign for (tempi in length(cox.assign):1) { cox.assign[[tempi + 1]] <- cox.assign[[tempi]] names(cox.assign)[tempi + 1] <- names(cox.assign)[tempi] } cox.assign[[1]] <- numeric(0) names(cox.assign)[1] <- "(Intercept)" fac.levels <- unlist(lapply(cox.assign, length)[-1]) if (factor.type) { for (i in 1:length(names(x))) { if (!is.null(fn[[i]])) { nx <- names(x)[i] coefs <- cox.out$coef[cox.out$assign[[i]]] old.vals <- x[, i] new.vals <- c(0, coefs) new.vec <- as.vector(new.vals[match(old.vals, fn[[i]])]) leaps.x[, nx] <- new.vec } } } else { fac.levels <- rep(1, times = ncol(mmm)) new.prior <- NULL for (i in 1:length(names(x))) { addprior <- prior.param[i] if (!is.null(fn[[i]])) { k <- length(fn[[i]]) if (factor.prior.adjust) addprior <- rep(1 - (1 - prior.param[i])^(1/(k - 1)), k - 1) else addprior <- rep(prior.param[i], k - 1) } new.prior <- c(new.prior, addprior) } prior.param <- new.prior x <- leaps.x <- mmm } } xx <- data.frame() xx <- dropcols(leaps.x, surv.t, cens, maxCol) var.names <- xx$var.names remaining <- xx$remaining.vars leaps.x <- xx$mm reduced <- xx$any.dropped dropped <- NULL if (reduced) dropped <- xx$dropped nvar <- length(x[1, ]) x <- x[, remaining, drop = FALSE] x <- data.frame(x) fac.levels <- fac.levels[remaining] output.names <- list() for (i in 1:length(var.names)) { if (is.factor(x[, i])) output.names[[i]] <- levels(x[, i]) else output.names[[i]] <- NA } xnames <- names(x) names(leaps.x) <- var.names x.coxph <- data.frame(surv.t = surv.t, cens = cens, leaps.x) cox.out <- coxph(Surv(surv.t, cens) ~ ., data = x.coxph, method = "breslow") x.coxph.fac <- data.frame(surv.t = surv.t, cens = cens, x) cox.assign <- coxph(Surv(surv.t, cens) ~ ., data = x.coxph.fac, method = "breslow")$assign for (tempi in length(cox.assign):1) { cox.assign[[tempi + 1]] <- cox.assign[[tempi]] names(cox.assign)[tempi + 1] <- names(cox.assign)[tempi] } cox.assign[[1]] <- numeric(0) names(cox.assign)[1] <- "(Intercept)" n <- sum(cens) if (ncol(leaps.x) >= 3) { coef <- cox.out$coef info <- solve(cox.out$var) a <- leaps.bs(info, coef, names.arg = xnames, nbest = nbest) a$r2 <- c(0, a$r2) a$size <- c(0, a$size) a$label <- c("NULL", a$label) a$which <- rbind(rep(FALSE, ncol(x)), a$which) nmod <- length(a$size) prior.mat <- matrix(rep(prior.param, nmod), nmod, ncol(x), byrow = TRUE) prior <- apply(a$which * prior.mat + (!a$which) * (1 - prior.mat), 1, prod) bIb <- as.numeric(coef %*% info %*% coef) lrt <- bIb - (a$r2 * bIb) bic <- lrt + (a$size) * log(n) - 2 * log(prior) occam <- bic - min(bic) < 2 * OR.fix * log(OR) size <- a$size[occam] label <- a$label[occam] which <- a$which[occam, , drop = FALSE] bic <- bic[occam] prior <- prior[occam] } else { nmod <- switch(ncol(x), 2, 4) bic <- label <- rep(0, nmod) which <- matrix(c(FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, TRUE, TRUE)[1:(nmod*nmod/2)], nmod, nmod/2) size <- c(0, 1, 1, 2)[1:nmod] sep <- "," prior.mat <- matrix(rep(prior.param, nmod), nmod, ncol(x), byrow = TRUE) prior <- apply(which * prior.mat + (!which) * (1 - prior.mat), 1, prod) for (k in 1:nmod) { if (k == 1) label[k] <- "NULL" else label[k] <- paste(names(leaps.x)[which[k, ]], collapse = sep) } } model.fits <- as.list(rep(0, length(label))) loglik <- rep(0, length(label)) size <- rep(0, length(label)) loglik.null <- cox.out$loglik[1] for (k in (1:length(label))) { if (sum(which[k, ]) != 0) { x.coxph <- data.frame(x[, which[k, ], drop = FALSE], surv.t = surv.t, cens = cens) cox.out <- coxph(Surv(surv.t, cens) ~ ., data = x.coxph, iter.max = 30, method = "breslow") loglik[k] <- cox.out$loglik[2] size[k] <- length(cox.out$coef) model.fits[[k]] <- matrix(rep(0, 2 * length(cox.out$coef)), ncol = 2) model.fits[[k]][, 1] <- cox.out$coef model.fits[[k]][, 2] <- sqrt(diag(cox.out$var)) } else { loglik[k] <- loglik.null } } bic <- size * log(n) - 2 * (loglik - loglik.null) - 2 * log(prior) occam <- bic - min(bic) < 2 * log(OR) size <- size[occam] label <- label[occam] which <- which[occam, , drop = FALSE] bic <- bic[occam] prior <- prior[occam] model.fits <- model.fits[occam] postprob <- (exp(-0.5 * (bic - min(bic))))/sum(exp(-0.5 * (bic - min(bic)))) order.bic <- order(bic, size, label) size <- size[order.bic] label <- label[order.bic] which <- which[order.bic, , drop = FALSE] bic <- bic[order.bic] prior <- prior[order.bic] postprob <- postprob[order.bic] model.fits <- model.fits[order.bic] nmod <- length(size) if (strict & (nmod != 1)) { occam <- rep(TRUE, nmod) for (k in (2:nmod)) { for (j in (1:(k - 1))) { which.diff <- which[k, ] - which[j, ] if (all(which.diff >= 0)) occam[k] <- FALSE } } size <- size[occam] label <- label[occam] which <- which[occam, , drop = FALSE] bic <- bic[occam] prior <- prior[occam] postprob <- postprob[occam]/(sum(postprob[occam])) model.fits <- model.fits[occam] } bic <- bic + 2 * log(prior) probne0 <- round(100 * t(which) %*% as.matrix(postprob), 1) nmod <- length(bic) nvar <- max(unlist(cox.assign)) Ebi <- rep(0, nvar) SDbi <- rep(0, nvar) EbiMk <- matrix(rep(0, nmod * nvar), nrow = nmod) sebiMk <- matrix(rep(0, nmod * nvar), nrow = nmod) for (i in (1:ncol(x))) { whereisit <- cox.assign[[i + 1]] if (any(which[, i])) for (k in (1:nmod)) if (which[k, i] == TRUE) { spot <- sum(which[k, (1:i)]) posMk <- (c(0, cumsum(fac.levels[which[k, ]])) + 1)[spot] posMk <- posMk:(posMk + fac.levels[i] - 1) EbiMk[k, whereisit] <- model.fits[[k]][posMk, 1] sebiMk[k, whereisit] <- model.fits[[k]][posMk, 2] } } Ebi <- postprob %*% EbiMk Ebimat <- matrix(rep(Ebi, nmod), nrow = nmod, byrow = TRUE) SDbi <- sqrt(postprob %*% (sebiMk^2) + postprob %*% ((EbiMk - Ebimat)^2)) CSDbi <- rep(0, nvar) CEbi <- CSDbi for (i in (1:ncol(x))) { sel <- which[, i] if (sum(sel) > 0) { cpp <- rbind(postprob[sel]/sum(postprob[sel])) CEbi[cox.assign[[i + 1]]] <- as.numeric(cpp %*% EbiMk[sel, cox.assign[[i + 1]]]) CSDbi[cox.assign[[i + 1]]] <- sqrt(cpp %*% (sebiMk[sel, cox.assign[[i + 1]]]^2) + cpp %*% ((EbiMk[sel, cox.assign[[i + 1]]] - CEbi[cox.assign[[i + 1]]])^2)) } } names(output.names) <- var.names result <- list(postprob = postprob, label = label, size = size, bic = bic, prior.param = prior.param, prior.model.weights = prior/prior.weight.denom, which = which, probne0 = c(probne0), postmean = as.vector(Ebi), postsd = as.vector(SDbi), condpostmean = CEbi, condpostsd = CSDbi, mle = EbiMk, se = sebiMk, namesx = var.names, reduced = reduced, dropped = dropped, call = cl, n.models = length(postprob), n.vars = length(probne0), nests = length(Ebi), output.names = output.names, assign = cox.assign, factor.type = factor.type) class(result) <- "bic.surv" result } "bic.surv.formula" <- function (f, data, strict = FALSE, OR = 20, maxCol = 30, prior.param = c(rep(0.5, ncol(x))), OR.fix = 2, nbest = 150, factor.type = TRUE, factor.prior.adjust = FALSE, call = NULL, ...) { cl <- match.call() tms <- terms(f, data = data) fmatrix <- attr(tms, "factors") tms.order <- attr(tms, "order") tms.labels <- attr(tms, "term.labels") mm <- model.matrix(tms, data = data) assn <- attr(mm, "assign") nterms <- max(assn) datalist <- eval(attr(tms, "variables"), envir = data) nvar <- nrow(fmatrix) - 1 isvarfac <- rep(NA, times = nvar) for (i in 1:nvar) isvarfac[i] <- is.factor(datalist[[i + 1]]) istermfac <- rep(NA, times = nterms) for (i in 1:nterms) { cterms <- fmatrix[-1, i] == 1 istermfac[i] <- sum(isvarfac[cterms] == FALSE) == 0 } surv.t.name <- all.vars(f)[1] cens.name <- all.vars(f)[2] moddata <- data.frame(rep(NA, times = dim(mm)[1])) cnames <- NULL for (i in 1:nterms) { if (istermfac[i]) { if (tms.order[i] == 1) { moddata <- cbind(moddata, datalist[[i + 1]]) cnames <- c(cnames, tms.labels[i]) } else { sel <- assn == i nlev <- sum(sel) newfac.index <- (mm[, sel] %*% cbind(1:nlev)) + 1 facnames <- c("ref", colnames(mm)[sel]) newfac <- facnames[newfac.index] newfac <- factor(newfac) moddata <- cbind(moddata, newfac) cnames <- c(cnames, paste(tms.labels[i], "..", sep = "")) } } else { sel <- assn == i moddata <- cbind(moddata, mm[, sel]) cnames <- c(cnames, colnames(mm)[sel]) } } moddata <- moddata[, -1] cnames <- gsub(":", ".", cnames) moddata <- cbind(moddata, datalist[[1]][, 1], datalist[[1]][, 2]) colnames(moddata) <- c(cnames, surv.t.name, cens.name) nv <- ncol(moddata) - 2 surv.t <- moddata[, nv + 1] cens <- moddata[, nv + 2] x <- moddata[, 1:nv] bic.surv(x, surv.t, cens, strict = FALSE, OR = OR, maxCol = maxCol, prior.param = prior.param, OR.fix = OR.fix, nbest = nbest, factor.type = factor.type, factor.prior.adjust = factor.prior.adjust, call = cl) } "bic.surv.matrix" <- function (x, surv.t, cens, strict = FALSE, OR = 20, maxCol = 30, prior.param = c(rep(0.5, ncol(x))), OR.fix = 2, nbest = 150, factor.type = TRUE, factor.prior.adjust = FALSE, call = NULL, ...) { leaps.bs <- function(info, coef, names.arg, nbest = nbest) { names.arg <- names.arg if (is.null(names.arg)) names.arg <- c(as.character(1:9), LETTERS, letters)[1:ncol(info)] if (length(names.arg) < ncol(info)) stop("Too few names") bIb <- coef %*% info %*% coef kx <- ncol(info) maxreg <- nbest * kx if (kx < 3) stop("Too few independent variables") imeth <- 1 df <- kx + 1 Ib <- info %*% coef rr <- cbind(info, Ib) rr <- rbind(rr, c(Ib, bIb)) it <- 0 n.cols <- kx + 1 nv <- kx + 1 nf <- 0 no <- 1e+05 ib <- 1 mb <- nbest nd <- n.cols nc <- 4 * n.cols rt <- matrix(rep(0, times = nd * nc), ncol = nc) rt[, 1:n.cols] <- rr iw <- c(1:(kx + 1), rep(0, times = 4 * nd)) nw <- length(iw) rw <- rep(0, times = 2 * mb * kx + 7 * nd) nr <- length(rw) t1 <- 2 s2 <- -1 ne <- 0 iv <- 0 nret <- mb * kx Subss <- rep(0, times = nret) RSS <- Subss ans <- .Fortran("fwleaps", as.integer(nv), as.integer(it), as.integer(kx), as.integer(nf), as.integer(no), as.integer(1), as.double(2), as.integer(mb), as.double(rt), as.integer(nd), as.integer(nc), as.integer(iw), as.integer(nw), as.double(rw), as.integer(nr), as.double(t1), as.double(s2), as.integer(ne), as.integer(iv), as.double(Subss), as.double(RSS), as.integer(nret), PACKAGE = "BMA") regid <- ans[[21]]/2 r2 <- ans[[20]] nreg <- sum(regid > 0) regid <- regid[1:nreg] r2 <- r2[1:nreg] which <- matrix(TRUE, nreg, kx) z <- regid which <- matrix(as.logical((rep.int(z, kx)%/%rep.int(2^((kx - 1):0), rep.int(length(z), kx)))%%2), byrow = FALSE, ncol = kx) size <- which %*% rep(1, kx) label <- character(nreg) sep <- if (all(nchar(names.arg) == 1)) "" else "," for (i in 1:nreg) label[i] <- paste(names.arg[which[i, ]], collapse = sep) ans <- list(r2 = r2, size = size, label = label, which = which) return(ans) } factor.names <- function(x) { out <- list() for (i in 1:ncol(x)) if (is.factor(x[, i])) out[[i]] <- levels(x[, i]) else out <- c(out, list(NULL)) attributes(out)$names <- names(x) return(out) } dropcols <- function(x, surv.t, cens, maxCols = 30) { vnames <- attributes(x)$names nvar <- length(vnames) isfac <- rep(FALSE, times = nvar) for (i in 1:nvar) isfac[i] <- is.factor(x[, i]) nlevels <- rep(NA, times = nvar) for (i in 1:nvar) if (isfac[i]) nlevels[i] <- length(levels(x[, i])) any.dropped <- FALSE mm <- model.matrix(terms.formula(~., data = x), data = x) designx <- attributes(mm)$assign n.designx <- length(designx) designx.levels <- rep(1, times = n.designx) for (i in 2:n.designx) if (isfac[designx[i]]) designx.levels[i] <- sum(designx[1:i] == designx[i]) + 1 x.coxph <- data.frame(mm[, -1], surv.t = surv.t, cens = cens) cox.out <- coxph(Surv(surv.t, cens) ~ ., data = x.coxph, method = "breslow", iter.max = 30) while (length(cox.out$coefficients) > maxCol) { any.dropped <- TRUE dropcox <- drop1(cox.out, test = "Chisq") dropped <- which.max(dropcox$"Pr(>Chi)"[-1]) + 1 x.coxph <- x.coxph[, -(dropped - 1)] designx.levels <- designx.levels[-dropped] designx <- designx[-dropped] cox.out <- coxph(Surv(surv.t, cens) ~ ., data = x.coxph, method = "breslow", iter.max = 30) } remaining.vars <- unique(designx[-1]) new.nvar <- length(remaining.vars) dropped.vars <- vnames[-remaining.vars] dropped.levels <- NULL ncol.cox <- ncol(x.coxph) - 2 x.coxph <- x.coxph[-((ncol.cox + 1):(ncol.cox + 2))] xx <- data.frame(matrix(rep(NA, times = new.nvar * nrow(x.coxph)), ncol = new.nvar)) new.names = rep(NA, times = new.nvar) for (i in 1:new.nvar) { cvar <- remaining.vars[i] lvls <- designx.levels[cvar == designx] if (isfac[cvar]) { if (length(lvls) != length(levels(x[, cvar]))) { newvar <- (as.matrix(x.coxph[, cvar == designx[-1]]) %*% cbind(lvls - 1)) + 1 xx[, i] <- factor(levels(x[, cvar])[newvar]) new.names[i] <- vnames[cvar] removed.levels <- levels(x[, cvar])[-c(1, lvls)] dropped.levels <- c(dropped.levels, paste(vnames[cvar], "_", removed.levels, sep = "")) } else { xx[, i] <- factor(x[, cvar]) new.names[i] <- vnames[cvar] } } else { xx[, i] <- x[, cvar] new.names[i] <- vnames[cvar] } } dropped <- c(dropped.vars, dropped.levels) return(list(mm = xx, any.dropped = any.dropped, dropped = dropped, var.names = new.names, remaining.vars = remaining.vars)) } if (is.null(call)) cl <- match.call() else cl <- call options(contrasts = c("contr.treatment", "contr.treatment")) x <- data.frame(x) prior.weight.denom <- 0.5^ncol(x) x.omit <- na.omit(x) used <- match(row.names(x), row.names(x.omit)) omitted <- seq(nrow(x))[is.na(used)] if (length(omitted) > 0) { x <- x.omit surv.t <- surv.t[-omitted] cens <- cens[-omitted] warning(paste("There were ", length(omitted), "records deleted due to NA's")) } leaps.x <- x fac.levels <- rep(1, times = ncol(x)) fn <- factor.names(x) factors <- !all(unlist(lapply(fn, is.null))) if (factors) { cdf <- cbind.data.frame(y = surv.t, x) mm <- model.matrix(formula(cdf), data = cdf)[, -1, drop = FALSE] mmm <- data.frame(matrix(mm, nrow = nrow(mm), byrow = FALSE)) names(mmm) <- dimnames(mm)[[2]] output.names <- names(mmm) x.coxph <- data.frame(surv.t = surv.t, cens = cens, x) cox.out <- coxph(Surv(surv.t, cens) ~ ., data = x.coxph, method = "breslow") cox.assign <- cox.out$assign for (tempi in length(cox.assign):1) { cox.assign[[tempi + 1]] <- cox.assign[[tempi]] names(cox.assign)[tempi + 1] <- names(cox.assign)[tempi] } cox.assign[[1]] <- numeric(0) names(cox.assign)[1] <- "(Intercept)" fac.levels <- unlist(lapply(cox.assign, length)[-1]) if (factor.type) { for (i in 1:length(names(x))) { if (!is.null(fn[[i]])) { nx <- names(x)[i] coefs <- cox.out$coef[cox.out$assign[[i]]] old.vals <- x[, i] new.vals <- c(0, coefs) new.vec <- as.vector(new.vals[match(old.vals, fn[[i]])]) leaps.x[, nx] <- new.vec } } } else { fac.levels <- rep(1, times = ncol(mmm)) new.prior <- NULL for (i in 1:length(names(x))) { addprior <- prior.param[i] if (!is.null(fn[[i]])) { k <- length(fn[[i]]) if (factor.prior.adjust) addprior <- rep(1 - (1 - prior.param[i])^(1/(k - 1)), k - 1) else addprior <- rep(prior.param[i], k - 1) } new.prior <- c(new.prior, addprior) } prior.param <- new.prior x <- leaps.x <- mmm } } xx <- data.frame() xx <- dropcols(leaps.x, surv.t, cens, maxCol) var.names <- xx$var.names remaining <- xx$remaining.vars leaps.x <- xx$mm reduced <- xx$any.dropped dropped <- NULL if (reduced) dropped <- xx$dropped nvar <- length(x[1, ]) x <- x[, remaining, drop = FALSE] x <- data.frame(x) fac.levels <- fac.levels[remaining] output.names <- list() for (i in 1:length(var.names)) { if (is.factor(x[, i])) output.names[[i]] <- levels(x[, i]) else output.names[[i]] <- NA } xnames <- names(x) names(leaps.x) <- var.names x.coxph <- data.frame(surv.t = surv.t, cens = cens, leaps.x) cox.out <- coxph(Surv(surv.t, cens) ~ ., data = x.coxph, method = "breslow") x.coxph.fac <- data.frame(surv.t = surv.t, cens = cens, x) cox.assign <- coxph(Surv(surv.t, cens) ~ ., data = x.coxph.fac, method = "breslow")$assign for (tempi in length(cox.assign):1) { cox.assign[[tempi + 1]] <- cox.assign[[tempi]] names(cox.assign)[tempi + 1] <- names(cox.assign)[tempi] } cox.assign[[1]] <- numeric(0) names(cox.assign)[1] <- "(Intercept)" n <- sum(cens) if (ncol(leaps.x) >= 3) { coef <- cox.out$coef info <- solve(cox.out$var) a <- leaps.bs(info, coef, names.arg = xnames, nbest = nbest) a$r2 <- c(0, a$r2) a$size <- c(0, a$size) a$label <- c("NULL", a$label) a$which <- rbind(rep(FALSE, ncol(x)), a$which) nmod <- length(a$size) prior.mat <- matrix(rep(prior.param, nmod), nmod, ncol(x), byrow = TRUE) prior <- apply(a$which * prior.mat + (!a$which) * (1 - prior.mat), 1, prod) bIb <- as.numeric(coef %*% info %*% coef) lrt <- bIb - (a$r2 * bIb) bic <- lrt + (a$size) * log(n) - 2 * log(prior) occam <- bic - min(bic) < 2 * OR.fix * log(OR) size <- a$size[occam] label <- a$label[occam] which <- a$which[occam, , drop = FALSE] bic <- bic[occam] prior <- prior[occam] } else { nmod <- switch(ncol(x), 2, 4) bic <- label <- rep(0, nmod) which <- matrix(c(FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, TRUE, TRUE)[1:(nmod*nmod/2)], nmod, nmod/2) size <- c(0, 1, 1, 2)[1:nmod] sep <- "," prior.mat <- matrix(rep(prior.param, nmod), nmod, ncol(x), byrow = TRUE) prior <- apply(which * prior.mat + (!which) * (1 - prior.mat), 1, prod) for (k in 1:nmod) { if (k == 1) label[k] <- "NULL" else label[k] <- paste(names(leaps.x)[which[k, ]], collapse = sep) } } model.fits <- as.list(rep(0, length(label))) loglik <- rep(0, length(label)) size <- rep(0, length(label)) loglik.null <- cox.out$loglik[1] for (k in (1:length(label))) { if (sum(which[k, ]) != 0) { x.coxph <- data.frame(x[, which[k, ], drop = FALSE], surv.t = surv.t, cens = cens) cox.out <- coxph(Surv(surv.t, cens) ~ ., data = x.coxph, iter.max = 30, method = "breslow") loglik[k] <- cox.out$loglik[2] size[k] <- length(cox.out$coef) model.fits[[k]] <- matrix(rep(0, 2 * length(cox.out$coef)), ncol = 2) model.fits[[k]][, 1] <- cox.out$coef model.fits[[k]][, 2] <- sqrt(diag(cox.out$var)) } else { loglik[k] <- loglik.null } } bic <- size * log(n) - 2 * (loglik - loglik.null) - 2 * log(prior) occam <- bic - min(bic) < 2 * log(OR) size <- size[occam] label <- label[occam] which <- which[occam, , drop = FALSE] bic <- bic[occam] prior <- prior[occam] model.fits <- model.fits[occam] postprob <- (exp(-0.5 * (bic - min(bic))))/sum(exp(-0.5 * (bic - min(bic)))) order.bic <- order(bic, size, label) size <- size[order.bic] label <- label[order.bic] which <- which[order.bic, , drop = FALSE] bic <- bic[order.bic] prior <- prior[order.bic] postprob <- postprob[order.bic] model.fits <- model.fits[order.bic] nmod <- length(size) if (strict & (nmod != 1)) { occam <- rep(TRUE, nmod) for (k in (2:nmod)) { for (j in (1:(k - 1))) { which.diff <- which[k, ] - which[j, ] if (all(which.diff >= 0)) occam[k] <- FALSE } } size <- size[occam] label <- label[occam] which <- which[occam, , drop = FALSE] bic <- bic[occam] prior <- prior[occam] postprob <- postprob[occam]/(sum(postprob[occam])) model.fits <- model.fits[occam] } bic <- bic + 2 * log(prior) probne0 <- round(100 * t(which) %*% as.matrix(postprob), 1) nmod <- length(bic) nvar <- max(unlist(cox.assign)) Ebi <- rep(0, nvar) SDbi <- rep(0, nvar) EbiMk <- matrix(rep(0, nmod * nvar), nrow = nmod) sebiMk <- matrix(rep(0, nmod * nvar), nrow = nmod) for (i in (1:ncol(x))) { whereisit <- cox.assign[[i + 1]] if (any(which[, i])) for (k in (1:nmod)) if (which[k, i] == TRUE) { spot <- sum(which[k, (1:i)]) posMk <- (c(0, cumsum(fac.levels[which[k, ]])) + 1)[spot] posMk <- posMk:(posMk + fac.levels[i] - 1) EbiMk[k, whereisit] <- model.fits[[k]][posMk, 1] sebiMk[k, whereisit] <- model.fits[[k]][posMk, 2] } } Ebi <- postprob %*% EbiMk Ebimat <- matrix(rep(Ebi, nmod), nrow = nmod, byrow = TRUE) SDbi <- sqrt(postprob %*% (sebiMk^2) + postprob %*% ((EbiMk - Ebimat)^2)) CSDbi <- rep(0, nvar) CEbi <- CSDbi for (i in (1:ncol(x))) { sel <- which[, i] if (sum(sel) > 0) { cpp <- rbind(postprob[sel]/sum(postprob[sel])) CEbi[cox.assign[[i + 1]]] <- as.numeric(cpp %*% EbiMk[sel, cox.assign[[i + 1]]]) CSDbi[cox.assign[[i + 1]]] <- sqrt(cpp %*% (sebiMk[sel, cox.assign[[i + 1]]]^2) + cpp %*% ((EbiMk[sel, cox.assign[[i + 1]]] - CEbi[cox.assign[[i + 1]]])^2)) } } names(output.names) <- var.names result <- list(postprob = postprob, label = label, size = size, bic = bic, prior.param = prior.param, prior.model.weights = prior/prior.weight.denom, which = which, probne0 = c(probne0), postmean = as.vector(Ebi), postsd = as.vector(SDbi), condpostmean = CEbi, condpostsd = CSDbi, mle = EbiMk, se = sebiMk, namesx = var.names, reduced = reduced, dropped = dropped, call = cl, n.models = length(postprob), n.vars = length(probne0), nests = length(Ebi), output.names = output.names, assign = cox.assign, factor.type = factor.type) class(result) <- "bic.surv" result }
/scratch/gouwar.j/cran-all/cranData/BMA/R/bic.surv.R
bicreg <- function (x, y, wt = rep(1, length(y)), strict = FALSE, OR = 20, maxCol = 31, drop.factor.levels = TRUE, nbest = 150) { dropcols <- function(x, y, wt, maxCols = 31) { x1.ldf <- data.frame(x, y = y) temp.wt <- wt lm.out <- lm(y ~ ., data = x1.ldf, weights = temp.wt) form.vars <- all.vars(formula(lm.out))[-1] any.dropped <- FALSE dropped.which <- NULL while (length(lm.out$coefficients) > maxCol) { any.dropped <- TRUE droplm <- drop1(lm.out, test = "none") dropped <- row.names(droplm)[which.min(droplm$RSS[-1]) + 1] dropped.index <- match(dropped, form.vars) form.vars <- form.vars[-dropped.index] formla <- formula(paste("y", "~", paste(form.vars, collapse = " + "), sep = " ")) lm.out <- lm(formla, data = x1.ldf, weights = temp.wt) dropped.which <- c(dropped.which, dropped) } new.var.names <- names(lm.out$coefficients) return(list(mm = model.matrix(lm.out)[, -1, drop = FALSE], any.dropped = any.dropped, dropped = dropped.which, var.names = new.var.names)) } cl <- match.call() x <- data.frame(x) if (is.null(dimnames(x))) dimnames(x) <- list(NULL, paste("X", 1:ncol(x), sep = "")) y <- as.numeric(y) options(contrasts = c("contr.treatment", "contr.treatment")) xnames <- input.names <- dimnames(x)[[2]] x2 <- na.omit(data.frame(x)) used <- match(row.names(data.frame(x)), row.names(x2)) omitted <- seq(nrow(x))[is.na(used)] if (length(omitted) > 0) { wt <- wt[-omitted] x <- x2 y <- y[-omitted] warning(paste("There were ", length(omitted), "records deleted due to NA's")) } if (drop.factor.levels) { cdf <- cbind.data.frame(y = y, x) mm <- model.matrix(formula(cdf), data = cdf)[, -1, drop = FALSE] x <- mm } xx <- dropcols(x, y, wt, maxCol) xnames <- xx$var.names[-1] x <- xx$mm reduced <- xx$any.dropped dropped <- NULL if (reduced) dropped <- xx$dropped nvar <- length(x[1, ]) if (nvar > 2) { # a <- leaps(x, y, wt = wt, method = "r2", names = dimnames(x)[[2]], # strictly.compatible = FALSE, nbest = nbest) a <- regsubsets(x, y, weights = wt, nbest = nbest, nvmax = ncol(x), method = "exhaustive", really.big = TRUE) a <- summary(a) size <- apply(a$which,1,sum) names(size) <- NULL size <- c(1,size) a$which <- a$which[,-1, drop=FALSE] a$r2 <- a$rsq a$r2 <- pmin(pmax(0, a$r2), 0.999) x.lm <- cbind.data.frame(y = y, as.data.frame(x[, a$which[2, , drop = FALSE]]), w = wt) lm.fix <- lm(y ~ . - w, weights = wt, data = x.lm) r2.fix <- summary(lm.fix)$r.sq N <- ncol(x) magic <- N * log(1 - a$r2[2]) - N * log(1 - r2.fix) a$r2 <- 1 - (1 - a$r2) * exp(-magic/N) r2 <- round(c(0, a$r2) * 100, 3) which <- rbind(rep(FALSE, ncol(x)), a$which) templabs <- t(matrix(rep(colnames(which), times = nrow(which)), ncol = nrow(which))) templabs[!which] <- "" label <- apply(templabs, 1, paste, collapse = "") label[1] <- "NULL" } else { r2 <- bic <- NULL nmod <- switch(ncol(x), 2, 4) bic <- label <- rep(0, nmod) model.fits <- as.list(rep(0, nmod)) which <- matrix(c(FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, TRUE, TRUE)[1:(nmod*nmod/2)], nmod, nmod/2) size <- c(1, 2, 2, 3)[1:nmod] sep <- if (all(nchar(dimnames(x)[[2]]) == 1)) "" else "," for (k in 1:nmod) { if (k == 1) { label[k] <- "NULL" lm1 <- lm(y ~ 1, weights = wt) } else { label[k] <- paste(dimnames(x)[[2]][which[k, ]], collapse = sep) x.lm <- cbind.data.frame(y = y, x = x[, which[k, , drop = FALSE]], wt = wt) lm1 <- lm(y ~ . - wt, data = x.lm, weights = wt) } r2[k] <- summary(lm1)$r.sq * 100 } } n <- length(y) if (any((1 - r2/100) <= 0)) { #stop("a model is perfectly correlated with the response") # Change (12/10/2020) suggested by Luca Scrucca warning("a model is perfectly correlated with the response") r2[(1 - r2/100) <= 0] <- (1 - .Machine$double.eps)*100 } bic <- n * log(1 - r2/100) + (size - 1) * log(n) occam <- bic - min(bic) < 2 * log(OR) r2 <- r2[occam] size <- size[occam] label <- label[occam] which <- which[occam, , drop = FALSE] bic <- bic[occam] mbic <- bic - max(bic) postprob <- exp(-0.5 * mbic)/sum(exp(-0.5 * mbic)) postprob[is.na(postprob)] <- 1 order.bic <- order(bic, size, label) r2 <- r2[order.bic] size <- size[order.bic] label <- label[order.bic] which <- which[order.bic, , drop = FALSE] bic <- bic[order.bic] postprob <- postprob[order.bic] if (strict) { nmod <- length(bic) if (nmod > 1) { occam <- rep(TRUE, nmod) for (k in (2:nmod)) { for (j in (1:(k - 1))) { which.diff <- which[k, ] - which[j, ] if (all(which.diff >= 0)) occam[k] <- FALSE } } r2 <- r2[occam] size <- size[occam] label <- label[occam] nmod <- sum(occam) which <- which[occam, , drop = FALSE] bic <- bic[occam] postprob <- postprob[occam] postprob <- postprob/sum(postprob) } } probne0 <- round(100 * t(which) %*% as.matrix(postprob), 1) nmod <- length(bic) model.fits <- as.list(rep(0, nmod)) for (k in (1:nmod)) { if (sum(which[k, ]) != 0) { model.fits[[k]] <- ls.print(lsfit(x[, which[k, ], drop = FALSE], y, wt = wt), print.it = FALSE)$coef.table[[1]] } else model.fits[[k]] <- ls.print(lsfit(rep(1, length(y)), y, wt = wt, intercept = FALSE), print.it = FALSE)$coef.table[[1]] } Ebi <- rep(0, (nvar + 1)) SDbi <- rep(0, (nvar + 1)) CEbi <- Ebi CSDbi <- SDbi EbiMk <- matrix(rep(0, nmod * (nvar + 1)), nrow = nmod) sebiMk <- matrix(rep(0, nmod * (nvar + 1)), nrow = nmod) for (i in 1:(nvar + 1)) { if ((i == 1) || (sum(which[, (i - 1)] != 0))) { for (k in (1:nmod)) { if ((i == 1) || (which[k, (i - 1)] == TRUE)) { if (i == 1) pos <- 1 else pos <- 1 + sum(which[k, (1:(i - 1))]) EbiMk[k, i] <- model.fits[[k]][pos, 1] sebiMk[k, i] <- model.fits[[k]][pos, 2] } } Ebi[i] <- as.numeric(sum(postprob * EbiMk[, i])) SDbi[i] <- sqrt(postprob %*% (sebiMk[, i]^2) + postprob %*% ((EbiMk[, i] - Ebi[i])^2)) if (i == 1) { CEbi[i] <- Ebi[i] CSDbi[i] <- SDbi[i] } else { sel <- which[, i - 1] cpp <- postprob[sel]/sum(postprob[sel]) CEbi[i] <- as.numeric(sum(cpp * EbiMk[sel, i])) CSDbi[i] <- sqrt(cpp %*% (sebiMk[sel, i]^2) + cpp %*% ((EbiMk[sel, i] - CEbi[i])^2)) } } } dimnames(which) <- list(NULL, colnames(x)) dimnames(EbiMk) <- dimnames(sebiMk) <- list(NULL, c("(Intercept)", colnames(x))) postmean <- apply(postprob * EbiMk, 2, sum) fittedValues <- function(coef, x) cbind(1, x) %*% coef residualVariance <- function(coef, x, y) sum((y - fittedValues(coef, x))^2)/(length(y) - length(coef)) resvar <- apply(EbiMk, 1, residualVariance, x = x, y = y) result <- list(postprob = postprob, namesx = xnames, label = label, r2 = r2, bic = bic, size = (size - 1), which = which, probne0 = c(probne0), postmean = postmean, residvar = resvar, postsd = SDbi, condpostmean = CEbi, condpostsd = CSDbi, ols = EbiMk, mle = EbiMk, se = sebiMk, reduced = reduced, dropped = dropped, input.names = input.names, call = cl, n.models = length(postprob), n.vars = length(probne0)) class(result) <- "bicreg" result }
/scratch/gouwar.j/cran-all/cranData/BMA/R/bicreg.R
"as.bic.glm" <- function (g, ...) { UseMethod("as.bic.glm") } summary.glib <- function (object, n.models = 5, digits = max(3, getOption("digits") - 3), conditional = FALSE, index.phi=1, ...) { x<- object gobj<- as.bic.glm.glib(x, index.phi=index.phi) summary(gobj, n.models=n.models, digits=digits, conditional=conditional, display.dropped=FALSE, ...) } "as.bic.glm.glib" <- function (g, index.phi = 1, ...) { probne0 <- 100*(1-g$posterior$prob0[, index.phi]) postmean <- c(NA, g$posterior$mean[, index.phi]) postsd <- c(NA, g$posterior$sd[, index.phi]) size <- g$bf$npar bic <- g$bf$twologB10[, index.phi] postprob <- g$bf$postprob[, index.phi] deviance <- g$bf$deviance disp <- 1 which <- g$models == 1 family <- g$inputs$error if (!is.null(g$glm.out)) reduced <- g$glm.out$reduced else reduced <- FALSE if (!is.null(g$glm.out)) dropped <- g$glm.out$dropped else dropped <- NULL call <- g$call n.models <- length(postprob) n.vars <- length(probne0) factor.type <- FALSE x <- g$inputs$x y <- g$input$y if (is.null(colnames(x))) colnames(x)<- paste("X",1:ncol(x),sep="") nms <- c("Intercept", colnames(x)) assign <- list() for (i in 1:length(nms)) assign[[i]] <- i names(assign) <- nms output.names <- list() for (i in 2:length(nms)) output.names[[i - 1]] <- NA names(output.names) <- nms[-1] namesx <- colnames(x) if (!is.null(g$glm.out)) design <- g$glm.out$design else design <- NULL label <- rep(NA, times = n.models) for (i in 1:n.models) label[i] <- paste(namesx[which[i, ]], sep = "", collapse = ",") mle <- matrix(rep(0, times = n.models * (n.vars + 1)), ncol = n.vars + 1) for (i in 1:n.models) mle[i, c(FALSE, which[i, ])] <- rbind(g$posterior.bymodel$mean[[i]][-1, index.phi]) se <- matrix(rep(0, times = n.models * (n.vars + 1)), ncol = n.vars + 1) for (i in 1:n.models) se[i, c(FALSE, which[i, ])] <- rbind(g$posterior.bymodel$sd[[i]][-1, index.phi]) mle[,1]<- rep(NA, times=nrow(mle)) se[,1]<- rep(NA, times=nrow(se)) nests <- n.vars + 1 CSDbi <- rep(NA, n.vars) CEbi <- CSDbi for (i in (1:ncol(x))) { sel <- which[, i] if (sum(sel) > 0) { cpp <- rbind(postprob[sel]/sum(postprob[sel])) CEbi[i + 1] <- as.numeric(cpp %*% mle[sel, i + 1]) CSDbi[i + 1] <- sqrt(cpp %*% (se[sel, i + 1]^2) + cpp %*% ((mle[sel, i + 1] - CEbi[i + 1])^2)) } } prior.model.weights <- NULL prior.param <- NULL result <- list(postprob = postprob, label = label, deviance = deviance, size = size, bic = bic, prior.param = prior.param, prior.model.weights = prior.model.weights, family = family, disp = disp, which = which, probne0 = probne0, postmean = postmean, postsd = postsd, condpostmean = CEbi, condpostsd = CSDbi, mle = mle, se = se, namesx = namesx, reduced = reduced, dropped = dropped, call = call, n.models = n.models, n.vars = n.vars, nests = nests, output.names = output.names, assign = assign, factor.type = factor.type, design = design, x = x, y = y) class(result) <- "bic.glm" return(result) } "glib" <- function (x, ...) UseMethod("glib") "glib.data.frame" <- function (x, y, n = rep(1, nrow(x)), error = "poisson", link = "log", scale = 1, models = NULL, phi = c(1, 1.65, 5), psi = 1, nu = 0, pmw = rep(1, nrow(models)), glimest = TRUE, glimvar = FALSE, output.priorvar = FALSE, post.bymodel = TRUE, output.postvar = FALSE, priormean = NULL, priorvar = NULL, nbest = 150, call = NULL, ...) { glim.pmean.A <- function(x, y, n, error, link, scale, nu, prior.spec) { glimot <- glim(x, y, n, error = error, link = link, scale = scale) coef <- matrix(glimot$coef, ncol = 1) eta <- cbind(rep(1, nrow(x)), x) %*% coef if (link == "identity") { mu <- eta deta <- rep(1,length(mu)) } if (link == "log") { mu <- exp(eta) deta <- 1/mu } if (link == "logit") { mu <- exp(eta)/(1 + exp(eta)) * n deta <- 1/mu + 1/(n - mu) } if (link == "probit") { mu <- pnorm(eta) deta <- 1/(dnorm(qnorm(mu))) } if (link == "sqrt") { mu <- eta^2 deta <- 1/(2 * sqrt(mu)) } if (link == "inverse") { mu <- 1/eta deta <- -1/(mu^2) } if (link == "loglog") { mu <- 1 - exp(-exp(eta)) deta <- -1/((1 - mu) * log(1 - mu)) } if (error == "gaussian") v <- rep(1,length(mu)) if (error == "poisson") v <- mu if (error == "binomial") v <- mu * (1 - mu/n) if (error == "gamma") v <- mu^2 if (error == "inverse gaussian") v <- mu^3 w <- 1/(deta^2 * v) w <- w/sum(w) z <- eta + (y - mu) * deta xbar <- t(x) %*% w xbar2 <- t(x^2) %*% w s2x <- xbar2 - xbar^2 zbar <- as.numeric(weighted.mean(z, w)) zbar2 <- t(z^2) %*% w if (error == "binomial") s2z <- weighted.mean((eta - zbar)^2 + 2 * (eta - zbar) * (deta * n) * (y - mu)/n + (deta * n)^2 * (y - 2 * mu * y/n + mu^2/n)/n, w) else s2z <- zbar2 - zbar^2 varz <- as.numeric(s2z) if (is.numeric(prior.spec$mean)) pmean <- prior.spec$mean else pmean <- c((zbar + nu * sqrt(varz)), rep(0, ncol(x))) if (is.numeric(prior.spec$var)) A <- prior.spec$var else { A <- diag(as.vector(1/sqrt(s2x)), nrow = length(s2x)) A <- rbind(t(-xbar/sqrt(s2x)), A) Acol1 <- c(1, rep(0, ncol(x))) A <- sqrt(varz) * cbind(Acol1, A) } list(x = x, y = y, n = n, error = error, link = link, pmean = pmean, A = A, scale = glimot$scale, varz = varz) } glim.pvar <- function(model, phi = 1, psi = 1, Aot, prior.spec) { model1 <- c(1, model + 1) if (is.numeric(prior.spec$var)) { sigma <- Aot$A sig11 <- sigma[model1, model1] sig12 <- sigma[model1, -model1] sig21 <- sigma[-model1, model1] sig22 <- sigma[-model1, -model1] if (length(model1) != ncol(Aot$A)) pvar <- sig11 - sig12 %*% solve(sig22) %*% sig21 else pvar <- sigma } else { A <- Aot$A[model1, model1] p <- length(model) B <- diag(c(psi^2, rep(phi^2, p)), nrow = (p + 1)) pvar <- A %*% B %*% t(A) } list(model = model, phi = phi, psi = psi, pvar = pvar) } E1 <- function(model, phi, psi, glimot1, Aot, prior.spec) { V <- glimot1$var A <- solve(V) logdetV <- sum(log(eigen(V)$values)) pmean <- matrix(Aot$pmean[c(1, model + 1)], ncol = 1) thetahat <- matrix(glimot1$coef, ncol = 1) nphi <- length(phi) app1 <- rep(0, nphi) npar <- length(model) + 1 pvar <- array(rep(0, npar * npar * nphi), dim = c(npar, npar, nphi)) for (j in (1:nphi)) { phij <- phi[j] pvar[, , j] <- glim.pvar(model, phij, psi, Aot, prior.spec)$pvar B <- solve(pvar[, , j]) C <- solve(A + B) I <- diag(rep(1, length(model) + 1), nrow = length(model) + 1) logdetW <- sum(log(eigen(pvar[, , j])$values)) F1 <- B %*% C %*% A %*% C %*% B + (t(I - C %*% B)) %*% B %*% (I - C %*% B) app1[j] <- -(t(thetahat - pmean)) %*% F1 %*% (thetahat - pmean) - logdetW - sum(log(eigen(A + B)$values)) } list(model = model, phi = phi, psi = psi, app1 = app1) } E0 <- function(psi = 1, glimot0, Aot) { V <- glimot0$var A <- 1/V logV <- log(V) pmean <- Aot$pmean[1] thetahat <- glimot0$coef pvar <- Aot$varz * psi^2 B <- 1/pvar C <- 1/(A + B) logW <- log(pvar) F0 <- B^2 * C^2 * A + (1 - C * B)^2 * B app1 <- F0 * (thetahat - pmean)^2 - logW - log(A + B) list(psi = psi, app1 = app1) } glim <- function(x, y, n, error = "gaussian", link = "identity", wt = 1, resid = "none", init, intercept = TRUE, scale, offset = 0, sequence, eps = 1e-04, iter.max = 10) { error.int <- charmatch(error, c("gaussian", "poisson", "binomial", "gamma", "inverse gaussian")) if (is.na(error.int)) stop("Invalid error type") else if (error.int == 0) stop("Ambiguous error type") resid.int <- charmatch(resid, c("none", "pearson", "deviance")) if (is.na(resid.int)) stop("Invalid residual type") if (!missing(scale) && !is.numeric(scale)) { temp <- charmatch(scale, c("pearson", "deviance")) if (is.na(temp)) stop("Invalid scale option") } if (!(is.numeric(y) && is.numeric(x))) stop("Invalid y or x values") x <- as.matrix(x) nn <- nrow(x) nvar <- ncol(x) if (length(y) != nn) stop("Dimensions of x and y don't match") if (!missing(wt)) { if (length(wt) != nn) stop("Dimensions of x and wt don't match") if (any(wt < 0, na.rm = TRUE)) stop("Weights must be >=0") } if (!missing(offset) && (length(offset) != nn)) stop("Dimensions of x and offset don't match") if (error.int != 3) n <- rep(1, nn) else { if (missing(n)) stop("Binomial fits require the n vector") else if (length(n) != nn) stop("Length of n vector is incorrect") n <- as.vector(n) } nomiss <- !(is.na(y) | is.na(wt) | is.na(n) | is.na(x %*% rep(1, nvar))) if (sum(nomiss) < nn) { warning(paste(nn - sum(nomiss), "deleted due to missing values")) x <- x[nomiss, , drop = FALSE] y <- y[nomiss] n <- n[nomiss] if (!missing(wt)) wt <- wt[nomiss] if (!missing(offset)) offset <- offset[nomiss] nn <- sum(nomiss) } if (missing(sequence)) sequence <- c(0, nvar) else { if (max(sequence) > nvar) stop("Invalid sequence argument") if (min(sequence) < 0) stop("Invalid sequence argument") if (any(diff(sequence) <= 0)) stop("Invalid sequence argument") } xn <- dimnames(x)[[2]] if (is.null(xn)) xn <- paste("X", 1:nvar, sep = "") if (intercept) { x <- cbind(rep(1, nn), x) xn <- c("Intercept", xn) nvar <- nvar + 1 sequence <- sequence + 1 } dimnames(x) <- list(dimnames(x)[[1]], xn) if (error.int != 1 && any(y < 0)) stop("Illegal y values") if (error.int >= 4 && any(y == 0)) stop("Illegal y values") if (error.int == 3) { y <- y/n if (any(y > 1)) stop("Illegal y values") } var <- switch(error.int, function(x, n) rep(1, length(x)), function(x, n) x, function(x, n) (x - x^2)/n, function(x, n) x^2, function(x, n) x^3) dev <- switch(error.int, function(y, mu, n) (y - mu)^2, function(y, mu, n) 2 * (y * log(ifelse(y == 0, 1, y/mu)) - (y - mu)), function(y, mu, n) 2 * n * (y * log(ifelse(y == 0, 1, y/mu)) + (1 - y) * log(ifelse(y == 1, 1, (1 - y)/(1 - mu)))), function(y, mu, n) -2 * (log(y/mu) - (y - mu)/mu), function(y, mu, n) (y - mu)^2/(y * mu^2)) link.int <- charmatch(link, c("identity", "log", "logit", "sqrt", "inverse", "probit", "loglog")) if (is.na(link.int)) stop("Invalid link type") else if (link.int == 0) stop("Ambiguous link type") f <- switch(link.int, function(x) x, function(x) exp(x), function(x) { z <- exp(ifelse(x > 80, 80, ifelse(x < -80, -80, x))) z/(z + 1) }, function(x) x^2, function(x) 1/x, pnorm, function(x) { z <- exp(ifelse(x > 80, 80, ifelse(x < -80, -80, x))) 1 - exp(-z) }) deriv <- switch(link.int, function(x) rep(1, length(x)), function(x) exp(x), function(x) exp(x)/((1 + exp(x))^2), function(x) 2 * x, function(x) -1/(x^2), function(x) 0.3989422 * exp(-0.5 * x^2), function(x) exp(x) * exp(-exp(x))) temp.y <- switch(error.int, y, y + 0.01, (n * y + 0.5)/(n + 1), y, y) eta <- switch(link.int, temp.y, log(temp.y), log(temp.y/(1 - temp.y)), sqrt(temp.y), 1/temp.y, qnorm(temp.y), log(-log(1 - temp.y))) if (sum(is.na(eta)) > 0) stop("Illegal value(s) of y for this link function") if (missing(init)) { tempd <- deriv(eta) z <- eta + (y - temp.y)/tempd - offset w <- sqrt((wt * tempd^2)/var(temp.y, n)) fit <- qr(x * w) if (fit$rank < nvar) stop("X matrix is not full rank") init.qr <- fit$qr[1:nvar, ] init.qty <- qr.qty(fit, z * w)[1:nvar] } else if (length(init) != nvar) stop("Argument 'init' is the wrong length") if (!missing(wt)) nn <- sum(wt > 0) deviance <- df <- NULL if (sequence[1] == 1 && intercept && missing(offset)) { if (missing(wt)) yhat <- sum(y * n)/sum(n) else yhat <- sum((y * n * wt)[wt > 0])/sum((n * wt)[wt > 0]) if ((yhat > 1) && (link.int == 3 || link.int == 6 || link.int == 7)) yhat <- 1 if ((yhat < 0) && link.int > 1 && link.int != 5) yhat <- 0 deviance <- sum(dev(y, yhat, n) * wt) df <- nn - 1 sequence <- sequence[-1] } else if (sequence[1] == 0) { deviance <- sum(dev(y, f(offset), n) * wt) df <- nn sequence <- sequence[-1] } for (modnum in sequence) { model <- 1:modnum if (missing(init)) coef <- backsolve(init.qr, init.qty, k = modnum)[model] else coef <- init[model] tempx <- as.matrix(x[, model, drop = FALSE]) nvar <- length(model) eta <- c(tempx %*% coef) yhat <- f(eta + offset) olddev <- sum(dev(y, yhat, n) * wt) fini <- FALSE for (i in seq(length = iter.max)) { tempd <- deriv(eta + offset) if (any(is.na(yhat) | is.na(tempd))) { warning("Coef vector is diverging, iteration truncated") break } leave.in <- ((1 + tempd) != 1) z <- eta + (y - yhat)/tempd w <- sqrt((wt * tempd^2)/var(yhat, n)) fit <- qr(tempx[leave.in, ] * w[leave.in]) if (fit$rank < nvar) { warning(paste("Weighted X matrix no longer of full rank at iteration", i)) break } coef <- qr.coef(fit, (z * w)[leave.in]) eta <- c(tempx %*% coef) yhat <- f(eta + offset) newdev <- sum(dev(y, yhat, n) * wt) if (abs((olddev - newdev)/(newdev + 1)) < eps) { fini <- TRUE break } else olddev <- newdev } if (fini == FALSE) warning(paste("Model with", length(model), "variables did not converge")) deviance <- c(deviance, newdev) df <- c(df, nn - nvar) } coef <- as.vector(coef) names(coef) <- dimnames(tempx)[[2]] if (missing(scale)) scale <- switch(error.int, newdev/(nn - nvar), 1, 1, newdev/(nn - nvar), newdev/(nn - nvar)) else if (!is.numeric(scale)) scale <- switch(charmatch(scale, c("pearson", "deviance")), sum((y - yhat)^2/(var(yhat) * (nn - nvar))), newdev/(nn - nvar)) varmat <- backsolve(fit$qr[1:nvar,,drop=F], diag(nvar)) varmat <- varmat %*% t(varmat) temp <- list(coef = coef, var = varmat * scale, deviance = deviance, df = df, scale = scale, intercept = intercept) if (resid.int == 1) temp else if (resid.int == 3) { resid <- rep(NA, length(nomiss)) resid.good <- sqrt(dev(y, yhat, n)) resid[nomiss] <- ifelse(y > yhat, resid.good, -resid.good) c(temp, list(resid = resid)) } else { resid <- rep(NA, length(nomiss)) resid[nomiss] <- (y - yhat)/ifelse(y == yhat, 1, sqrt(var(yhat, n))) c(temp, list(resid = resid)) } } ##### start of function proper ##### if (is.null(call)) cl <- match.call() else cl <- call glm.out <- NULL prior.spec <- list(mean = FALSE, var = FALSE) if (!is.null(priormean)) prior.spec$mean <- priormean if (!is.null(priorvar)) { prior.spec$var <- priorvar phi <- 1 } if (is.null(models)) { glm.out <- bic.glm(x = x, y = y, glm.family = error, nbest = nbest, dispersion = NULL, occam.window = FALSE, strict = FALSE, factor.type = FALSE, ...) tempdata <- data.frame(glm.out$x, glm.out$y) mm <- model.matrix(terms(y ~ ., data = tempdata), data = tempdata) df <- data.frame(mm) df <- df[, -ncol(df)] df <- df[, -1] xin <- as.matrix(df) y <- glm.out$y models <- glm.out$which + 0 whichin <- matrix(rep(NA, times = nrow(models) * ncol(xin)), ncol = ncol(xin)) for (i in 1:ncol(models)) whichin[, glm.out$assign[[i + 1]] - 1] <- models[, i] models <- whichin family <- glm.out$family if (is.character(family)) family <- get(family, mode = "function", envir = parent.frame()) if (is.function(family)) family <- family() error <- family$family link <- family$link x<- xin } if (is.data.frame(x)) { nx <- names(x) x <- as.matrix(x) dimnames(x) <- list(NULL, nx) } glimot0 <- glim(rep(1, length(y)), y, n, error = error, link = link, intercept = FALSE, scale = scale) Aot <- glim.pmean.A(x, y, n, error, link, scale, nu, prior.spec) scale <- Aot$scale pmean1 <- Aot$pmean E0ot <- E0(psi, glimot0, Aot) nmodel <- nrow(models) nphi <- length(phi) chi2 <- rep(0, nmodel) npar <- rep(0, nmodel) app1 <- matrix(rep(0, nmodel * nphi), ncol = nphi, nrow = nmodel) glim.coef <- as.list(rep(0, nmodel)) glim.var <- as.list(rep(0, nmodel)) glim.se <- as.list(rep(0, nmodel)) prior.var <- as.list(rep(0, nmodel)) postbym.mean <- as.list(rep(0, nmodel)) postbym.sd <- as.list(rep(0, nmodel)) postbym.var <- as.list(rep(0, nmodel)) deviance <- rep(0, nmodel) df <- rep(0, nmodel) prob0 <- matrix(rep(0, (ncol(x) * nphi)), ncol = nphi) post.mean <- matrix(rep(0, (ncol(x) * nphi)), ncol = nphi) post.sd <- matrix(rep(0, (ncol(x) * nphi)), ncol = nphi) postprob <- matrix(rep(0, (nmodel * nphi)), ncol = nphi) prior.var <- as.list(rep(0, nmodel)) for (i in (1:nmodel)) { if (sum(models[i, ]) == 0) { npar[i] <- 1 prior.var[[i]] <- array(rep(0, nphi), dim = c(1, 1, nphi)) for (j in (1:nphi)) prior.var[[i]][, , j] <- psi^2 * Aot$varz } else { model <- (1:ncol(x))[models[i, ] == 1] npar[i] <- length(model) + 1 prior.var[[i]] <- array(rep(0, npar[i] * npar[i] * nphi), dim = c(npar[i], npar[i], nphi)) for (j in (1:nphi)) prior.var[[i]][, , j] <- glim.pvar(model, phi[j], psi, Aot, prior.spec)$pvar } } for (i in (1:nmodel)) { if (sum(models[i, ]) == 0) { chi2[i] <- 0 npar[i] <- 1 app1[i, ] <- 0 betahat <- glimot0$coef glim.coef[[i]] <- betahat V <- glimot0$var glim.var[[i]] <- V glim.se[[i]] <- sqrt(diag(glim.var[[i]])) deviance[i] <- glimot0$deviance[2] df[i] <- glimot0$df[2] } else { model <- (1:ncol(x))[models[i, ] == 1] glimot1 <- glim(x[, model], y, n = n, error = error, link = link, scale = scale) chi2[i] <- (glimot0$deviance[2] - glimot1$deviance[2])/scale npar[i] <- sum(models[i, ]) + 1 E1ot <- E1(model, phi, psi, glimot1, Aot, prior.spec) app1[i, ] <- chi2[i] + (E1ot$app1 - E0ot$app1) betahat <- glimot1$coef glim.coef[[i]] <- betahat V <- glimot1$var glim.var[[i]] <- V glim.se[[i]] <- sqrt(diag(glim.var[[i]])) deviance[i] <- glimot1$deviance[2] df[i] <- glimot1$df[2] } if (sum(models[i, ]) == 0) { prior.mean <- pmean1[1] postbym.mean[[i]] <- matrix(rep(0, nphi), ncol = nphi) postbym.sd[[i]] <- matrix(rep(0, nphi), ncol = nphi) postbym.var[[i]] <- array(rep(0, nphi), dim = c(1, 1, nphi)) for (j in (1:nphi)) { W <- prior.var[[i]][, , j] postbym.mean[[i]][, j] <- betahat - V %*% solve(V + W) %*% (betahat - prior.mean) postbym.var[[i]][, , j] <- solve(solve(V) + solve(W)) postbym.sd[[i]][, j] <- sqrt(postbym.var[[i]][, , j]) } } else { prior.mean <- c(pmean1[1], rep(0, (npar[i] - 1))) postbym.mean[[i]] <- matrix(rep(0, (npar[i] * nphi)), ncol = nphi) postbym.sd[[i]] <- matrix(rep(0, (npar[i] * nphi)), ncol = nphi) postbym.var[[i]] <- array(rep(0, (npar[i] * npar[i] * nphi)), dim = c(npar[i], npar[i], nphi)) for (j in (1:nphi)) { W <- prior.var[[i]][, , j] postbym.mean[[i]][, j] <- betahat - V %*% solve(V + W) %*% (betahat - prior.mean) postbym.var[[i]][, , j] <- solve(solve(V) + solve(W)) postbym.sd[[i]][, j] <- sqrt(diag(postbym.var[[i]][, , j])) } } } for (j in (1:nphi)) { tmp <- app1[, j] - max(app1[, j]) tmp <- pmw * exp(0.5 * tmp) postprob[, j] <- tmp/sum(tmp) for (k in (1:ncol(x))) prob0[k, j] <- sum(postprob[models[, k] == 0, j]) for (k in (1:ncol(x))) { tmp1 <- 0 tmp2 <- 0 for (i in (1:nmodel)) { if (models[i, k] == 1) { pos <- sum(models[i, (1:k)]) + 1 tmp1 <- tmp1 + postbym.mean[[i]][pos, j] * postprob[i, j] tmp2 <- tmp2 + (postbym.sd[[i]][pos, j]^2 + postbym.mean[[i]][pos, j]^2) * postprob[i, j] } post.mean[k, j] <- tmp1/(1 - prob0[k, j]) post.sd[k, j] <- sqrt(tmp2/(1 - prob0[k, j]) - post.mean[k, j]^2) } } } inputs <- list(x = x, y = y, n = n, error = error, link = link, models = models, phi = phi, psi = psi, nu = nu) if (is.numeric(prior.spec$var)) inputs <- inputs[-(7:8)] if (is.numeric(prior.spec$mean)) inputs <- inputs[-match("nu", names(inputs))] bf <- list(twologB10 = app1, postprob = postprob, deviance = deviance, df = df, chi2 = chi2, npar = npar, scale = scale) posterior <- list(prob0 = prob0, mean = post.mean, sd = post.sd) if (glimest == TRUE) { if (glimvar == FALSE) glim.est <- list(coef = glim.coef, se = glim.se) if (glimvar == TRUE) glim.est <- list(coef = glim.coef, se = glim.se, var = glim.var) } if (glimest == FALSE) glim.est <- NULL if (post.bymodel == TRUE) { if (output.postvar == FALSE) posterior.bymodel <- list(mean = postbym.mean, sd = postbym.sd) if (output.postvar == TRUE) posterior.bymodel <- list(mean = postbym.mean, sd = postbym.sd, var = postbym.var) } if (post.bymodel == FALSE) posterior.bymodel <- NULL if (output.priorvar == FALSE) prior <- list(mean = pmean1) if (output.priorvar == TRUE) prior <- list(mean = pmean1, var = prior.var) result <- list(inputs = inputs, bf = bf, posterior = posterior, glim.est = glim.est, posterior.bymodel = posterior.bymodel, prior = prior, x = x, models = models, glm.out = glm.out, call = cl) class(result) <- "glib" result } glib.matrix <- glib.data.frame glib.bic.glm<- function (x, scale = 1, phi = 1, psi = 1, nu = 0, glimest = TRUE, glimvar = FALSE, output.priorvar = FALSE, post.bymodel = TRUE, output.postvar = FALSE, priormean = NULL, priorvar = NULL, call = NULL, ...) { if (is.null(call)) cl <- match.call() else cl <- call tempdata <- data.frame(x$x, x$y) mm <- model.matrix(terms(y ~ ., data = tempdata), data = tempdata) df <- data.frame(mm) df <- df[, -ncol(df)] df <- df[, -1] xin <- as.matrix(df) y <- x$y models <- x$which + 0 whichin <- matrix(rep(NA, times = nrow(models) * ncol(xin)), ncol = ncol(xin)) for (i in 1:ncol(models)) whichin[, x$assign[[i + 1]] - 1] <- models[, i] models <- whichin family <- x$family if (is.character(family)) family <- get(family, mode = "function", envir = parent.frame()) if (is.function(family)) family <- family() error <- family$family link <- family$link glib(x = xin, y = y, error = error, link = link, models = models, scale = scale, phi = phi, psi = psi, nu = nu, glimest = glimest, glimvar = glimvar, output.priorvar = output.priorvar, post.bymodel = post.bymodel, output.postvar = output.postvar, priormean = priormean, priorvar = priorvar, call = cl, ...) }
/scratch/gouwar.j/cran-all/cranData/BMA/R/glib.R
iBMA.bicreg<- function(x, ...) UseMethod("iBMA.bicreg") iBMA.bicreg.data.frame<- function(x, Y, wt = rep(1, nrow(X)), thresProbne0 = 5, maxNvar = 30, nIter=100, verbose = FALSE, sorted = FALSE, ...) { printCGen<- function(printYN) { printYN<- printYN return(function(x) if (printYN) cat(paste(paste(x,sep="", collapse = " "),"\n", sep=""))) } sortX<- function(Y, X, wt) { r2vec<- rep(NA, times = ncol(X)) for (i in 1:ncol(X)) r2vec[i]<- summary(lm(Y~X[,i], weights = wt))$r.squared initial.order<- order(abs(r2vec),decreasing = TRUE) sortedX<- X[, initial.order] return(list(sortedX = sortedX, initial.order = initial.order)) } cl <- match.call() printC<- printCGen(verbose) X<- x ### sort variables prior to running iterations if required if (!sorted) { printC("sorting X") sorted<- sortX(Y,X, wt = wt) sortedX<- sorted$sortedX initial.order<- sorted$initial.order } else { sortedX<- X initial.order<- 1:ncol(sortedX) } #### Iteration Initiation nVar<- ncol(sortedX) # make sure that we do not try to use more columns at a time than are present maxNvar <- min (maxNvar, nVar) stopVar <- 0 nextVar <- maxNvar + 1 current.probne0<- rep(0, maxNvar) maxProbne0<- rep(0, times = nVar) nTimes<- rep(0, times = nVar) currIter <- 0 first.in.model<- rep(NA, times = nVar) new.vars<- 1:maxNvar first.in.model[new.vars]<- currIter + 1 iter.dropped<- rep(NA, times = nVar) currentSet<- NULL current_state<- list(Y = Y, sortedX = sortedX, wt = wt, call = cl, initial.order = initial.order, thresProbne0 = thresProbne0, maxNvar = maxNvar, nIter = nIter, verbose = verbose, nVar = nVar, currentSet = currentSet, new.vars= new.vars, stopVar = stopVar, nextVar = nextVar, current.probne0 = current.probne0, maxProbne0 = maxProbne0, nTimes = nTimes, currIter = currIter, first.in.model = first.in.model, iter.dropped = iter.dropped) class(current_state)<- "iBMA.intermediate.bicreg" result<- iBMA.bicreg.iBMA.intermediate.bicreg(current_state, ...) result } ### this function does a set number of iterations of iBMA, returning an intermediate result unless it is finished, ### in which case it returns a final result iBMA.bicreg.iBMA.intermediate.bicreg<- function (x, nIter = NULL, verbose = NULL, ...) { printCGen<- function(printYN) { printYN<- printYN return(function(x) if (printYN) cat(paste(paste(x,sep="", collapse = " "),"\n", sep=""))) } cs<- x # check if nIter has been redefined if (!is.null(nIter)) cs$nIter<- nIter if (!is.null(verbose)) cs$verbose<- verbose printC<- printCGen(cs$verbose) finalIter<- cs$currIter + cs$nIter ### iterate until a final result is produced (cs$stopVar == 1) or nIter more iterations have been done while (cs$stopVar == 0 && cs$currIter < finalIter) { # add in the new variables nextSet<- c(cs$currentSet, cs$new.vars) cs$currIter<- cs$currIter + 1 printC(paste("starting iteration ",cs$currIter," nextVar =",cs$nextVar)) printC("applying bicreg now") #browser() currentX<- cs$sortedX[,nextSet] colnames(currentX)<- colnames(cs$sortedX)[nextSet] ret.bicreg <- bicreg (x = currentX, y = cs$Y, wt = cs$wt, maxCol = cs$maxNvar + 1, ...) printC(ret.bicreg$probne0) cs$maxProbne0[nextSet]<- pmax(ret.bicreg$probne0, cs$maxProbne0[nextSet]) cs$nTimes[nextSet]<- cs$nTimes[nextSet] + 1 cs$rmVector <- ret.bicreg$probne0 < cs$thresProbne0 # adaptive threshold if (any(cs$rmVector) == FALSE) { # no var to swap in!!, increase threshold currMin <- min (ret.bicreg$probne0) printC (paste("no var to swap! Min probne0 = ", currMin, sep="")) newThresProbne0 <- currMin + 1 printC(paste("new probne0 threshold = ", newThresProbne0, sep="")) cs$rmVector <- ret.bicreg$probne0 < newThresProbne0 # check that we do not drop everything! if (all(cs$rmVector)) cs$rmVector<- c(rep(FALSE, times = length(cs$rmVector)-1), TRUE) } # drop the bad ones... cs$iter.dropped[nextSet[cs$rmVector]]<- cs$currIter cs$currentSet<- nextSet[!cs$rmVector] # now if there are more variables to examine add the new set of variables to the current set if ( cs$nextVar <= cs$nVar) { # set up new X printC ("generating next set of variables") lastVar<- sum(cs$rmVector) + cs$nextVar - 1 # add in bulk if we are not close to the end of the variables, if (lastVar <= cs$nVar) { cs$new.vars<- cs$nextVar:lastVar cs$first.in.model[cs$new.vars]<- cs$currIter + 1 cs$nextVar <- lastVar + 1 } # add in one by one until no variables left else { cs$new.vars<- NULL for (i in length(cs$rmVector):1) { if (cs$rmVector[i] == TRUE && cs$nextVar <= cs$nVar) { cs$new.vars<- c(cs$new.vars, cs$nextVar) cs$first.in.model[cs$nextVar]<- cs$currIter + 1 cs$nextVar <- cs$nextVar + 1 } } } } else { # exhausted all data cs$stopVar <- 1 cs$new.vars = NULL } } # if we have finished (all variables) do some wrap-up and generate output values if (cs$stopVar == 1) { printC("finished iterating") currentX<- cs$sortedX[,cs$currentSet] colnames(currentX)<- colnames(cs$sortedX)[cs$currentSet] ret.bicreg <- bicreg (x = currentX, y = cs$Y, wt = cs$wt, maxCol = cs$maxNvar + 1, ...) output<- cs output$bma<- ret.bicreg output$selected<- cs$currentSet output$nIterations<- cs$currIter class(output)<- "iBMA.bicreg" } else { output<- cs class(output)<- "iBMA.intermediate.bicreg" } output } iBMA.bicreg.matrix<- iBMA.bicreg.data.frame
/scratch/gouwar.j/cran-all/cranData/BMA/R/iBMA.bicreg.R
iBMA.glm<-function(x, ...) UseMethod("iBMA.glm") iBMA.glm.data.frame <- function (x, Y, wt = rep(1, nrow(X)), thresProbne0 = 5, glm.family, maxNvar = 30, nIter = 100, verbose = FALSE, sorted = FALSE, factor.type = TRUE, ...) { printCGen <- function(printYN) { printYN <- printYN return(function(x) if (printYN) cat(paste(paste(x, sep = "", collapse = " "), "\n", sep = ""))) } # CF: solution to namespace lock https://gist.github.com/wch/3280369 #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: inc <- ' /* This is taken from envir.c in the R 2.15.1 source https://github.com/SurajGupta/r-source/blob/master/src/main/envir.c */ #define FRAME_LOCK_MASK (1<<14) #define FRAME_IS_LOCKED(e) (ENVFLAGS(e) & FRAME_LOCK_MASK) #define UNLOCK_FRAME(e) SET_ENVFLAGS(e, ENVFLAGS(e) & (~ FRAME_LOCK_MASK)) ' src <- ' if (TYPEOF(env) == NILSXP) error("use of NULL environment is defunct"); if (TYPEOF(env) != ENVSXP) error("not an environment"); UNLOCK_FRAME(env); // Return TRUE if unlocked; FALSE otherwise SEXP result = PROTECT( Rf_allocVector(LGLSXP, 1) ); LOGICAL(result)[0] = FRAME_IS_LOCKED(env) == 0; UNPROTECT(1); return result; ' unlockEnvironment <- cfunction(signature(env = "environment"), includes = inc, body = src) nsEnv <- asNamespace('BMA') unlockEnvironment(nsEnv) nsEnv$glob <- function() { utils::globalVariables(parent.env(environment())) } environment(nsEnv$glob) <- nsEnv pkgEnv <- as.environment('package:BMA') unlockEnvironment(pkgEnv) pkgEnv$glob <- nsEnv$glob exportEnv <- nsEnv$.__NAMESPACE__.$exports exportEnv$glob <- c(glob="glob") #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: utils::globalVariables(c("nastyHack_glm.family", "nastyHack_x.df")) sortX <- function(Y, X, glm.family, wt) { fitvec <- rep(NA, times = ncol(X)) nastyHack_glm.family <- glm.family nastyHack_x.df <- data.frame(X) glm.out <- glm(Y ~ 1, family = nastyHack_glm.family, weights = wt, data = nastyHack_x.df) scp <- formula(paste("~", paste(colnames(X), sep = "", collapse = " + "))) addglm <- add1(glm.out, scope = scp, test = "Chisq", data = nastyHack_x.df) fitvec <- addglm[-1, grep("^P.*Chi", names(addglm))] initial.order <- order(fitvec, decreasing = FALSE) sortedX <- X[, initial.order] return(list(sortedX = sortedX, initial.order = initial.order)) } X <- x cl <- match.call() printC <- printCGen(verbose) if (factor.type == FALSE) { x.df <- data.frame(X) X <- model.matrix(terms.formula(~., data = x.df), data = x.df)[, -1] } if (!sorted) { printC("sorting X") sorted <- sortX(Y, X, glm.family, wt = wt) sortedX <- sorted$sortedX initial.order <- sorted$initial.order } else { sortedX <- X initial.order <- 1:ncol(sortedX) } nVar <- ncol(sortedX) maxNvar <- min(maxNvar, nVar) stopVar <- 0 nextVar <- maxNvar + 1 current.probne0 <- rep(0, maxNvar) maxProbne0 <- rep(0, times = nVar) nTimes <- rep(0, times = nVar) currIter <- 0 first.in.model <- rep(NA, times = nVar) new.vars <- 1:maxNvar first.in.model[new.vars] <- currIter + 1 iter.dropped <- rep(NA, times = nVar) currentSet <- NULL current_state <- list(Y = Y, sortedX = sortedX, wt = wt, call = cl, initial.order = initial.order, thresProbne0 = thresProbne0, maxNvar = maxNvar, glm.family = glm.family, nIter = nIter, verbose = verbose, nVar = nVar, currentSet = currentSet, new.vars = new.vars, stopVar = stopVar, nextVar = nextVar, current.probne0 = current.probne0, maxProbne0 = maxProbne0, nTimes = nTimes, currIter = currIter, first.in.model = first.in.model, iter.dropped = iter.dropped) class(current_state) <- "iBMA.intermediate.glm" result <- iBMA.glm.iBMA.intermediate.glm(current_state, ...) result } ### this function does a set number of iterations of iBMA, returning an intermediate result unless it is finished, ### in which case it returns a final result iBMA.glm.iBMA.intermediate.glm<- function (x, nIter = NULL, verbose = NULL, ...) { printCGen<- function(printYN) { printYN<- printYN return(function(x) if (printYN) cat(paste(paste(x,sep="", collapse = " "),"\n", sep=""))) } cs<- x # check if nIter has been redefined if (!is.null(nIter)) cs$nIter<- nIter if (!is.null(verbose)) cs$verbose<- verbose printC<- printCGen(cs$verbose) finalIter<- cs$currIter + cs$nIter ### iterate until a final result is produced (cs$stopVar == 1) or nIter more iterations have been done while (cs$stopVar == 0 && cs$currIter < finalIter) { # add in the new variables nextSet<- c(cs$currentSet, cs$new.vars) cs$currIter<- cs$currIter + 1 printC(paste("\n\n starting iteration ",cs$currIter," nextVar =",cs$nextVar)) printC("applying bic.glm now") currentX<- cs$sortedX[,nextSet] colnames(currentX)<- colnames(cs$sortedX)[nextSet] ret.bic.glm <- bic.glm (x = currentX, y = cs$Y, glm.family= cs$glm.family, maxCol = cs$maxNvar + 1, ...) printC(ret.bic.glm$probne0) cs$maxProbne0[nextSet]<- pmax(ret.bic.glm$probne0, cs$maxProbne0[nextSet]) cs$nTimes[nextSet]<- cs$nTimes[nextSet] + 1 cs$rmVector <- ret.bic.glm$probne0 < cs$thresProbne0 # adaptive threshold if (any(cs$rmVector) == FALSE) { # no var to swap in!!, increase threshold currMin <- min (ret.bic.glm$probne0) printC (paste("no var to swap! Min probne0 = ", currMin, sep="")) newThresProbne0 <- currMin + 1 printC(paste("new probne0 threshold = ", newThresProbne0, sep="")) cs$rmVector <- ret.bic.glm$probne0 < newThresProbne0 # check that we do not drop everything! if (all(cs$rmVector)) cs$rmVector<- c(rep(FALSE, times = length(cs$rmVector)-1), TRUE) } # drop the bad ones... cs$iter.dropped[nextSet[cs$rmVector]]<- cs$currIter cs$currentSet<- nextSet[!cs$rmVector] # now if there are more variables to examine add the new set of variables to the current set if ( cs$nextVar <= cs$nVar) { # set up new X printC ("generating next set of variables") lastVar<- sum(cs$rmVector) + cs$nextVar - 1 # add in bulk if we are not close to the end of the variables, if (lastVar <= cs$nVar) { cs$new.vars<- cs$nextVar:lastVar cs$first.in.model[cs$new.vars]<- cs$currIter + 1 cs$nextVar <- lastVar + 1 } # add in one by one until no variables left else { cs$new.vars<- NULL for (i in length(cs$rmVector):1) { if (cs$rmVector[i] == TRUE && cs$nextVar <= cs$nVar) { cs$new.vars<- c(cs$new.vars, cs$nextVar) cs$first.in.model[cs$nextVar]<- cs$currIter + 1 cs$nextVar <- cs$nextVar + 1 } } } } else { # exhausted all data cs$stopVar <- 1 cs$new.vars = NULL } } # if we have finished (all variables) do some wrap-up and generate output values if (cs$stopVar == 1) { printC("finished iterating") currentX<- cs$sortedX[,cs$currentSet] colnames(currentX)<- colnames(cs$sortedX)[cs$currentSet] ret.bic.glm <- bic.glm (x = currentX, y = cs$Y, glm.family= cs$glm.family, maxCol = cs$maxNvar + 1, ...) output<- cs output$bma<- ret.bic.glm output$selected<- cs$currentSet output$nIterations<- cs$currIter class(output)<- "iBMA.glm" } else { output<- cs class(output)<- "iBMA.intermediate.glm" } output } iBMA.glm.matrix<- iBMA.glm.data.frame
/scratch/gouwar.j/cran-all/cranData/BMA/R/iBMA.glm.R
iBMA.surv<- function(x, ...) UseMethod("iBMA.surv") iBMA.surv.data.frame <- function (x, surv.t, cens, wt = rep(1, nrow(X)), thresProbne0 = 5, maxNvar = 30, nIter = 100, verbose = FALSE, sorted = FALSE, factor.type = TRUE, ...) { printCGen <- function(printYN) { printYN <- printYN return(function(x) if (printYN) cat(paste(paste(x, sep = "", collapse = " "), "\n", sep = ""))) } # CF namespcae unlock from https://gist.github.com/wch/3280369 #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: inc <- ' /* This is taken from envir.c in the R 2.15.1 source https://github.com/SurajGupta/r-source/blob/master/src/main/envir.c */ #define FRAME_LOCK_MASK (1<<14) #define FRAME_IS_LOCKED(e) (ENVFLAGS(e) & FRAME_LOCK_MASK) #define UNLOCK_FRAME(e) SET_ENVFLAGS(e, ENVFLAGS(e) & (~ FRAME_LOCK_MASK)) ' src <- ' if (TYPEOF(env) == NILSXP) error("use of NULL environment is defunct"); if (TYPEOF(env) != ENVSXP) error("not an environment"); UNLOCK_FRAME(env); // Return TRUE if unlocked; FALSE otherwise SEXP result = PROTECT( Rf_allocVector(LGLSXP, 1) ); LOGICAL(result)[0] = FRAME_IS_LOCKED(env) == 0; UNPROTECT(1); return result; ' unlockEnvironment <- cfunction(signature(env = "environment"), includes = inc, body = src) nsEnv <- asNamespace('BMA') unlockEnvironment(nsEnv) nsEnv$glob <- function() { utils::globalVariables(parent.env(environment())) } environment(nsEnv$glob) <- nsEnv pkgEnv <- as.environment('package:BMA') unlockEnvironment(pkgEnv) pkgEnv$glob <- nsEnv$glob exportEnv <- nsEnv$.__NAMESPACE__.$exports exportEnv$glob <- c(glob="glob") #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: utils::globalVariables("nastyHack_x.df") sortX <- function(surv.t, cens, X, wt) { fitvec <- rep(NA, times = ncol(X)) nastyHack_x.df <- data.frame(X) scp <- formula(paste("~", paste(colnames(X), sep = "", collapse = " + "))) cox.out <- coxph(Surv(surv.t, cens) ~ 1, weights = wt, method = "breslow", iter.max = 30, data = nastyHack_x.df) addcox <- add1(cox.out, scope = scp, test = "Chisq", data = nastyHack_x.df) fitvec <- addcox[-1, grep("^P.*Chi", names(addcox))] initial.order <- order(fitvec, decreasing = FALSE) sortedX <- X[, initial.order] return(list(sortedX = sortedX, initial.order = initial.order)) } X <- x cl <- match.call() printC <- printCGen(verbose) if (factor.type == FALSE) { x.df <- data.frame(X) X <- model.matrix(terms.formula(~., data = x.df), data = x.df)[, -1] } if (!sorted) { printC("sorting X") sorted <- sortX(surv.t, cens, X, wt = wt) sortedX <- sorted$sortedX initial.order <- sorted$initial.order } else { sortedX <- X initial.order <- 1:ncol(sortedX) } nVar <- ncol(sortedX) maxNvar <- min(maxNvar, nVar) stopVar <- 0 nextVar <- maxNvar + 1 current.probne0 <- rep(0, maxNvar) maxProbne0 <- rep(0, times = nVar) nTimes <- rep(0, times = nVar) currIter <- 0 first.in.model <- rep(NA, times = nVar) new.vars <- 1:maxNvar first.in.model[new.vars] <- currIter + 1 iter.dropped <- rep(NA, times = nVar) currentSet <- NULL current_state <- list(surv.t = surv.t, cens = cens, sortedX = sortedX, wt = wt, call = cl, initial.order = initial.order, thresProbne0 = thresProbne0, maxNvar = maxNvar, nIter = nIter, verbose = verbose, nVar = nVar, currentSet = currentSet, new.vars = new.vars, stopVar = stopVar, nextVar = nextVar, current.probne0 = current.probne0, maxProbne0 = maxProbne0, nTimes = nTimes, currIter = currIter, first.in.model = first.in.model, iter.dropped = iter.dropped) class(current_state) <- "iBMA.intermediate.surv" result <- iBMA.surv.iBMA.intermediate.surv(current_state, ...) result } ### this function does a set number of iterations of iBMA, returning an intermediate result unless it is finished, ### in which case it returns a final result iBMA.surv.iBMA.intermediate.surv<- function (x, nIter = NULL, verbose = NULL, ...) { printCGen<- function(printYN) { printYN<- printYN return(function(x) if (printYN) cat(paste(paste(x,sep="", collapse = " "),"\n", sep=""))) } cs<- x # check if nIter has been redefined if (!is.null(nIter)) cs$nIter<- nIter if (!is.null(verbose)) cs$verbose<- verbose printC<- printCGen(cs$verbose) finalIter<- cs$currIter + cs$nIter ### iterate until a final result is produced (cs$stopVar == 1) or nIter more iterations have been done while (cs$stopVar == 0 && cs$currIter < finalIter) { # add in the new variables nextSet<- c(cs$currentSet, cs$new.vars) cs$currIter<- cs$currIter + 1 printC(paste("\n\n starting iteration ",cs$currIter," nextVar =",cs$nextVar)) printC("applying bic.surv now") currentX<- cs$sortedX[,nextSet] colnames(currentX)<- colnames(cs$sortedX)[nextSet] ret.bic<- bic.surv(x = currentX, surv.t = cs$surv.t, cens = cs$cens, maxCol = cs$maxNvar + 1, ...) printC(ret.bic$probne0) cs$maxProbne0[nextSet]<- pmax(ret.bic$probne0, cs$maxProbne0[nextSet]) cs$nTimes[nextSet]<- cs$nTimes[nextSet] + 1 cs$rmVector <- ret.bic$probne0 < cs$thresProbne0 # adaptive threshold if (any(cs$rmVector) == FALSE) { # no var to swap in!!, increase threshold currMin <- min (ret.bic$probne0) printC (paste("no var to swap! Min probne0 = ", currMin, sep="")) newThresProbne0 <- currMin + 1 printC(paste("new probne0 threshold = ", newThresProbne0, sep="")) cs$rmVector <- ret.bic$probne0 < newThresProbne0 # check that we do not drop everything! if (all(cs$rmVector)) cs$rmVector<- c(rep(FALSE, times = length(cs$rmVector)-1), TRUE) } # drop the bad ones... cs$iter.dropped[nextSet[cs$rmVector]]<- cs$currIter cs$currentSet<- nextSet[!cs$rmVector] # now if there are more variables to examine add the new set of variables to the current set if ( cs$nextVar <= cs$nVar) { # set up new X printC ("generating next set of variables") lastVar<- sum(cs$rmVector) + cs$nextVar - 1 # add in bulk if we are not close to the end of the variables, if (lastVar <= cs$nVar) { cs$new.vars<- cs$nextVar:lastVar cs$first.in.model[cs$new.vars]<- cs$currIter + 1 cs$nextVar <- lastVar + 1 } # add in one by one until no variables left else { cs$new.vars<- NULL for (i in length(cs$rmVector):1) { if (cs$rmVector[i] == TRUE && cs$nextVar <= cs$nVar) { cs$new.vars<- c(cs$new.vars, cs$nextVar) cs$first.in.model[cs$nextVar]<- cs$currIter + 1 cs$nextVar <- cs$nextVar + 1 } } } } else { # exhausted all data cs$stopVar <- 1 cs$new.vars = NULL } } # if we have finished (all variables) do some wrap-up and generate output values if (cs$stopVar == 1) { printC("finished iterating") currentX<- cs$sortedX[,cs$currentSet] colnames(currentX)<- colnames(cs$sortedX)[cs$currentSet] ret.bic<- bic.surv(x = currentX, surv.t = cs$surv.t, cens = cs$cens, maxCol = cs$maxNvar + 1, ...) output<- cs output$bma<- ret.bic output$selected<- cs$currentSet output$nIterations<- cs$currIter class(output)<- "iBMA.surv" } else { output<- cs class(output)<- "iBMA.intermediate.surv" } output } iBMA.surv.matrix<- iBMA.surv.data.frame
/scratch/gouwar.j/cran-all/cranData/BMA/R/iBMA.surv.R
imageplot.bma <- function (bma.out, color = c("red", "blue", "#FFFFD5"), order = c("input", "probne0", "mds"), ...) { clr <- color if (length(color) == 1) { if (color == "default") clr <- c("#FF0000", "#FF0000", "#FFFFD5") if (color == "blackandwhite") clr <- c("black", "black", "white") } keep.mar <- par(mar = c(5, 6, 4, 2) + 0.1) nmodel <- nrow(bma.out$which) which <- bma.out$which probne0 <- bma.out$probne0 if (inherits(bma.out, "bic.surv")) mle <- bma.out$mle else mle <- bma.out$mle[, -1, drop = FALSE] nvar <- ncol(mle) rownms <- bma.out$namesx if (ifelse(!is.null(bma.out$factor.type), bma.out$factor.type, FALSE)) { which <- matrix(NA, ncol = nvar, nrow = nmodel) probne0 <- rep(NA, times = nvar) rownms <- rep(NA, times = nvar) assign <- bma.out$assign offset <- 1 if (inherits(bma.out, "bic.surv")) offset <- 0 assign[[1]] <- NULL for (i in 1:length(assign)) { probne0[assign[[i]] - offset] <- bma.out$probne0[i] which[, assign[[i]] - offset] <- bma.out$which[, i] nm <- names(bma.out$output.names)[i] if (!is.na(bma.out$output.names[[i]][1])) nm <- paste(nm, bma.out$output.names[[i]][-1], sep = ".") rownms[assign[[i]] - offset] <- nm } } ordr.type <- match.arg(order) if (ordr.type == "probne0") ordr <- order(-probne0) else if (ordr.type == "mds") { postprob.rep <- matrix(bma.out$postprob, ncol = nvar, nrow = nmodel) k11 <- t(which + 0) %*% ((which + 0) * postprob.rep) k00 <- t(1 - which) %*% ((1 - which) * postprob.rep) k01 <- t(which + 0) %*% ((1 - which) * postprob.rep) k10 <- t(1 - which) %*% ((0 + which) * postprob.rep) ktau <- 4 * (k00 * k11 - k01 * k10) dissm <- 1 - abs(ktau) diag(dissm) <- 0 ordr <- order(as.vector(cmdscale(dissm, k = 1))) } else ordr <- 1:nvar ordr <- rev(ordr) postprob <- bma.out$postprob which <- which[, ordr, drop = FALSE] mle <- mle[, ordr, drop = FALSE] rownms <- rownms[ordr] color.matrix <- (which) * (2 - (mle > 0)) + 3 * (!which) par(las = 1) image(c(0, cumsum(postprob)), 1:nvar, color.matrix, col = clr, xlab = "Model #", ylab = "", xaxt = "n", yaxt = "n", xlim = c(0, 1), main = "Models selected by BMA", ...) xat <- (cumsum(postprob) + c(0, cumsum(postprob[-nmodel])))/2 axis(1, at = xat, labels = 1:nmodel, ...) axis(2, at = 1:nvar, labels = rownms, ...) par(mar = keep.mar) }
/scratch/gouwar.j/cran-all/cranData/BMA/R/imageplot.bma.R
For.MC3.REG<- function(i, g, Ys, Xs, PI, K, nu, lambda, phi, outs.list) { if (g$flag == 1) { if (sum(g$M0.var) != 0) g$M0.1 <- sum(2^((0:(length(g$M0.var) - 1))[g$M0.var])) + 1 else g$M0.1 <- 1 if (sum(g$M0.out) != 0) g$M0.2 <- sum(2^((0:(length(g$M0.out) - 1))[g$M0.out])) + 1 else g$M0.2 <- 1 } M1 <- MC3.REG.choose(g$M0.var, g$M0.out) if (sum(M1$var) != 0) M1.1 <- sum( 2^( (0:(length(g$M0.var) - 1)) [M1$var]) ) + 1 else M1.1 <- 1 if (sum(M1$out) != 0) M1.2<- sum(2^((0:(length(g$M0.out) - 1))[M1$out])) + 1 else M1.2 <- 1 if (sum(g$big.list[, 1] == M1.1 & g$big.list[, 2] == M1.2) == 0) { if (M1.1 == 1) #null model { if (g$outcnt != 0) a <- (dim(Ys)[1] - sum(M1$out)) * log(1 - PI) + sum(M1$out) * log(PI) + MC3.REG.logpost(Ys, Xs, 0, 0, outs.list[M1$out], K, nu, lambda, phi) else a <- MC3.REG.logpost(Ys, Xs, 0, 0, outs.list[M1$out], K, nu, lambda, phi) } else { if (g$outcnt != 0) a <- (dim(Ys)[1] - sum(M1$out)) * log(1 - PI) + sum(M1$out) * log(PI) + MC3.REG.logpost(Ys, Xs, M1$var, sum(M1$var), outs.list[M1$out], K, nu, lambda, phi) else a <- MC3.REG.logpost(Ys, Xs, M1$var, sum(M1$var), outs.list[M1$out], K, nu, lambda, phi) } g$big.list<- rbind(g$big.list, c(M1.1, M1.2, a, 0)) } BF <- exp(g$big.list[g$big.list[, 1] == M1.1 & g$big.list[, 2] == M1.2, 3] - g$big.list[g$big.list[, 1] == g$M0.1 & g$big.list[, 2] == g$M0.2, 3]) #print("") #print("g = ") #print(g) if (BF >= 1) g$flag <- 1 else g$flag <- rbinom(1, 1, BF) if (g$flag == 1) { g$M0.var <- M1$var g$M0.out <- M1$out g$M0.1 <- M1.1 g$M0.2 <- M1.2 } g$big.list[g$big.list[, 1] == g$M0.1 & g$big.list[, 2] == g$M0.2, 4]<- g$big.list[g$big.list[, 1] == g$M0.1 & g$big.list[, 2] == g$M0.2, 4] + 1 return(g) } MC3.REG<- function(all.y, all.x, num.its, M0.var = NULL, M0.out = NULL, outs.list = NULL, outliers = TRUE, PI=.1*(length(all.y) <50) + .02*(length(all.y) >= 50), K=7, nu= NULL, lambda= NULL, phi= NULL) { cl <- match.call() all.x<- data.frame(all.x) if (is.null(M0.var)) M0.var<- rep(TRUE, ncol(all.x)) if ((sum(M0.var) == 0)) stop("\nInput error: M0.var cannot be null model") if (outliers) { if (is.null(outs.list)) { outs.list<- out.ltsreg(all.x, all.y, 2) } if (length(outs.list) == 0) outliers<- FALSE } if (outliers) { if (is.null(M0.out)) { M0.out<- rep(TRUE, length(outs.list)) } } if ((length(M0.out) != length(outs.list)) || (length(M0.var) != dim(all.x)[2])) stop("\nInput error: M0.*** is not the right length") # calculate R for full model to determine defaults for nu, lambda and phi if (is.null(nu) | is.null(lambda) | is.null(phi) ) { r2<- summary(lm(all.y ~ ., data = data.frame(all.y, all.x)))$r.squared if (r2 < 0.9) { new.nu<- 2.58 new.lambda<- 0.28 new.phi<- 2.85 } else { new.nu<- 0.2 new.lambda<- 0.1684 new.phi<- 9.20 } if (is.null(nu)) nu<- new.nu if (is.null(lambda)) lambda<- new.lambda if (is.null(phi)) phi<- new.phi } var.names<- colnames(all.x) var.numbers<- 1:ncol(all.x) outlier.numbers<- outs.list Ys <- scale(all.y) Xs <- scale(all.x) #global variables g<- list() g$flag<- 1 g$M0.var<- M0.var g$M0.out<- M0.out if (is.null(outs.list)) g$outcnt<- 0 else g$outcnt<- sum(outs.list) g$big.list<- matrix(0, 1, 4) g$big.list[1, 1]<- sum(2^((0:(length(g$M0.var) - 1))[g$M0.var])) + 1 if (sum(g$M0.out) != 0) g$big.list[1, 2]<- sum(2^((0:(length(g$M0.out) - 1))[g$M0.out])) + 1 else g$big.list[1, 2]<- 1 if (g$outcnt != 0) g$big.list[1, 3] <- (dim(Ys)[1] - sum(g$M0.out)) * log(1 - PI) + sum(g$M0.out) * log(PI) + MC3.REG.logpost(Ys, Xs, g$M0.var, sum(g$M0.var), outs.list[g$M0.out], K, nu, lambda, phi) else g$big.list[1, 3] <- MC3.REG.logpost(Ys, Xs, g$M0.var, sum(g$M0.var), outs.list[g$M0.out], K, nu, lambda, phi) for(i in 1:num.its) g<- For.MC3.REG(i, g, Ys, Xs, PI, K, nu, lambda, phi, outs.list) var.matrix<- matrix(as.logical(rep(g$big.list[, 1] - 1, rep(length(g$M0.var), length(g$big.list[, 1])))%/%2^(0:(length(g$M0.var) - 1))%%2), ncol = length(g$M0.var), byrow = TRUE) n.var <- length(g$M0.var) ndx <- 1:n.var Xn <- rep("X", n.var) labs <- paste(Xn, ndx, sep = "") colnames(var.matrix) <- var.names # transform to avoid numerical problems if g$big.list[, 3] is highly negative gstar <- g$big.list[, 3] - max(g$big.list[, 3]) postprob<- exp(gstar)/(sum(exp(gstar))) visits <- g$big.list[, 4] if (length(outs.list) != 0) { out.matrix <- matrix(as.logical(rep(g$big.list[, 2] - 1, rep(length(outs.list), length(g$big.list[, 2])))%/%2^(0:(length(outs.list) - 1))%%2), ncol = length(outs.list), byrow = TRUE) colnames(out.matrix) <- outs.list } else out.matrix<- NULL ordr<- order(-postprob) result<- list(post.prob = postprob[ordr], variables = var.matrix[ordr, ,drop=FALSE], outliers = out.matrix[ordr, ,drop=FALSE], visit.count = visits[ordr], var.numbers = var.numbers, outlier.numbers = outlier.numbers, var.names = var.names, n.models = length(postprob[ordr]), PI = PI, K=K, nu=nu, lambda=lambda, phi=phi, call = cl) class(result)<- "mc3" return(result) } MC3.REG.choose<-function(M0.var,M0.out) { var <- M0.var in.or.out <- sample(c(1:length(M0.var), rep(0, length(M0.out))), 1) if (in.or.out == 0) { out <- M0.out in.or.out2 <- sample(1:length(M0.out), 1) out[in.or.out2] <- !M0.out[in.or.out2] } else { var[in.or.out] <- !M0.var[in.or.out] out <- M0.out } return(list(var=var, out=out)) } MC3.REG.logpost<- function(Y, X, model.vect, p, i, K, nu, lambda, phi) { #print(list(Y=Y,X=X,model.vect=model.vect, p=p, i=i, K=K, nu=nu, lambda=lambda,phi=phi)) n <- dim(Y)[1] ones <- rep(1, n) A <- cbind(ones, X[, model.vect]) V <- diag(c(1, rep(phi^2, p))) ones[i] <- K^2 det <- diag(ones) + A %*% V %*% t(A) divs <- prod(eigen(det, TRUE, TRUE)$values)^0.5 denom <- (t(Y) %*% solve(det, Y)) + (nu * lambda) lgamma((n + nu)/2) + log(nu * lambda) * (nu/2) - log(pi) * (n/2) - lgamma(nu/2) - log(divs) - ((nu + n)/2) * log(denom) } out.ltsreg <- function(x,y,delta) { # require(rrcov) abc<-ltsReg(x,y)$residuals (1:length(y))[abs(as.vector(abc)/mad(abc))>=delta] } print.mc3<- function(x, digits = max(3, getOption("digits") - 3), n.models = nrow(x$variables), ...) { mc3.out<- x n.best<- n.models cat("\nCall:\n", deparse(mc3.out$call), "\n\n", sep = "") cat(paste("Model parameters: PI = ", mc3.out$PI, " K = ", mc3.out$K, " nu = ",mc3.out$nu, " lambda = ",mc3.out$lambda, " phi = ", mc3.out$phi, "\n",sep="")) cat("\nModels visited: \n") n.var<- ncol(mc3.out$variables) n.out<- ncol(mc3.out$outliers) n.rows<- n.best pretty.var<- apply(mc3.out$variables+0,1,paste,sep=" ",collapse=" ") if (!is.null(mc3.out$outliers)) { pretty.out<- apply(mc3.out$outliers+0, 1,paste,sep=" ",collapse=" ") pretty<- format(data.frame(posterior=mc3.out$post.prob, n.visits= mc3.out$visit.count, variables = pretty.var, outliers = pretty.out, row.names=NULL), digits=digits, row.names=NULL) } else pretty<- format(data.frame(posterior=mc3.out$post.prob, n.visits= mc3.out$visit.count, variables = pretty.var, row.names=NULL), digits=digits, row.names=NULL) print.default(pretty[1:n.rows,], ...) } summary.mc3<- function(object, n.models = 5, digits = max(3, getOption("digits") - 3), ...) { mc3.out<- object cat("\nCall:\n", deparse(mc3.out$call), "\n\n", sep = "") cat(paste("Model parameters: PI = ", mc3.out$PI, " K = ", mc3.out$K, " nu = ",mc3.out$nu, " lambda = ",mc3.out$lambda, " phi = ", mc3.out$phi, "\n",sep="")) n.models <- min(n.models, mc3.out$n.models) sel <- 1:n.models cat("\n ", mc3.out$n.models, " models were selected") cat("\n Best ", n.models, " models (cumulative posterior probability = ", round(sum(mc3.out$post.prob[sel]), digits), "): \n\n") # calculate marginal posteriors var.probs<- mc3.out$post.prob %*% mc3.out$variables if (!is.null(mc3.out$outliers)) { out.probs<- mc3.out$post.prob %*% mc3.out$outliers marginals<- rbind(format(t(var.probs),digits=digits),"",format(t(out.probs),digits=digits)) probs<- format(mc3.out$post.prob[sel], digits=digits) # vars<- format(t(mc3.out$variables[sel, ,drop=FALSE]) + 0,digits=1) # outs<- format(t(mc3.out$outliers[sel,]) + 0,digits=1) vars<- matrix(".", ncol=ncol(mc3.out$variables[sel, ,drop=FALSE]), nrow=nrow(mc3.out$variables[sel, ,drop=FALSE])) vars[mc3.out$variables[sel, ,drop=FALSE]]<- "x" vars<- t(vars) outs<- matrix(".", ncol=ncol(mc3.out$outliers[sel, ,drop=FALSE]), nrow=nrow(mc3.out$outliers[sel, ,drop=FALSE])) outs[mc3.out$outliers[sel, ,drop=FALSE]]<- "x" outs<- t(outs) decpos <- nchar(unlist(strsplit(probs[1], "\\."))[1]) offset2 <- paste(rep(" ", times = decpos ), sep = "", collapse = "") # now loop through vars and outs pasting offset, since R 'paste' does not do vectors :-( for (i in 1:ncol(vars)) vars[,i]<- paste(offset2,vars[,i],sep="") for (i in 1:ncol(outs)) outs[,i]<- paste(offset2,outs[,i],sep="") seprow<- rep("",times=ncol(vars)) rght<- rbind(seprow,vars,seprow,outs,seprow,probs) lft<- rbind("",marginals,"","") all<- cbind(lft,rght) colnames(all)<- c("prob", paste("model ",1:n.models,sep="")) rownames(all)<- c("variables", paste(" ",mc3.out$var.names), "outliers", paste(" ", mc3.out$outlier.numbers), "","post prob") } else { marginals<- rbind(format(t(var.probs),digits=digits)) probs<- format(mc3.out$post.prob[sel], digits=digits) # vars<- format(t(mc3.out$variables[sel,]) + 0,digits=1) vars<- matrix(".", ncol=ncol(mc3.out$variables[sel,]), nrow=nrow(mc3.out$variables[sel,])) vars[mc3.out$variables[sel,]]<- "x" vars<- t(vars) decpos <- nchar(unlist(strsplit(probs[1], "\\."))[1]) offset2 <- paste(rep(" ", times = decpos ), sep = "", collapse = "") # now loop through vars and outs pasting offset, since R 'paste' does not do vectors :-( for (i in 1:ncol(vars)) vars[,i]<- paste(offset2,vars[,i],sep="") seprow<- rep("",times=ncol(vars)) rght<- rbind(seprow,vars,seprow,probs) lft<- rbind("",marginals,"","") all<- cbind(lft,rght) colnames(all)<- c("prob", paste("model ",1:n.models,sep="")) rownames(all)<- c("variables", paste(" ",mc3.out$var.names), "","post prob") } print.default(all, print.gap = 2, quote = FALSE, ...) } "[.mc3"<- function(x, ... ) { as.data.frame.mc3(x)[...] } as.data.frame.mc3<- function(x, ...) { y<- list() y$post.prob<- x$post.prob y$variables<- x$variables + 0 y$outliers<- x$outliers + 0 y$visit.count<- x$visit.count colnames(y$variables)<- x$var.names colnames(y$outliers)<- x$outlier.numbers outliers<- !is.null(dim(y$outliers)) if (outliers) yy<- data.frame(y$post.prob, y$visit.count, y$variables, y$outliers, ...) else yy<- data.frame(y$post.prob, y$visit.count, y$variables, ...) nms<- c("post.prob","visit.count",x$var.names) if (outliers) nms<- c(nms, x$outlier.numbers) names(yy)<- nms return(yy) }
/scratch/gouwar.j/cran-all/cranData/BMA/R/mc3.R
orderplot<- function (x, ...) UseMethod("orderplot") orderplot.iBMA.surv<- function(x, ...) { cs<- x nvar<- cs$nVar plot(c(0,cs$currIter + 1), c(0, nvar + 1), type='n', main = 'orderplot for iBMA', ylab = 'variable', xlab = 'iteration', ...) for (i in 1:nvar) { if (i %in% cs$selected) { lines(c(cs$first.in.model[i], cs$currIter + 1 ), c(i,i), col = 'blue') points(cs$currIter+1, i, col = 'blue', pch= 19) } else { if (is.na(cs$first.in.model[i]) | (i %in% cs$new.vars)) points(0, i, pch = 21, col = 'darkgreen') else { if (is.na(cs$iter.dropped[i])) lines(c(cs$first.in.model[i], cs$currIter+1), c(i,i), col = 'blue') else { if (cs$first.in.model[i] == (cs$iter.dropped[i] )) points(cs$first.in.model[i], i, pch = '-', col = 'black') else lines(c(cs$first.in.model[i], cs$iter.dropped[i]), c(i,i), col = 'black') } } } } invisible(x) } orderplot.iBMA.intermediate.glm<- orderplot.iBMA.intermediate.bicreg<- orderplot.iBMA.intermediate.surv<- orderplot.iBMA.glm<- orderplot.iBMA.bicreg<- orderplot.iBMA.surv
/scratch/gouwar.j/cran-all/cranData/BMA/R/orderplot.iBMA.R
"plot.bic.glm" <- function (x, e = 1e-04, mfrow = NULL, include = 1:length(x$namesx), ...) { plotvar <- function(prob0, mixprobs, means, sds, Emean, Esd, name, e = 1e-04, nsteps = 500, ...) { if (prob0 == 1) { xlower <- -0 xupper <- 0 xmax <- 1 } else { qmin <- qnorm(e/2, Emean, Esd) qmax = qnorm(1 - e/2, Emean, Esd) xlower <- min(qmin, 0) xupper <- max(0, qmax) } xx <- seq(xlower, xupper, length.out = nsteps) yy <- rep(0, times = length(xx)) maxyy <- 1 if (prob0 < 1) { for (j in 1:length(means)) yy <- yy + mixprobs[j] * dnorm(xx, means[j], sds[j]) maxyy <- max(yy) } ymax <- max(prob0, 1 - prob0) plot(c(xlower, xupper), c(0, ymax), type = "n", xlab = "", ylab = "", main = name, ...) lines(c(0, 0), c(0, prob0), lty = 1, lwd = 3, ...) lines(xx, (1 - prob0) * yy/maxyy, lty = 1, lwd = 1, ...) } vars <- unlist(x$assign[include + 1]) nvar <- length(vars) probs <- NULL for (i in include) probs <- c(probs, rep(x$probne0[i], times = length(x$assign[[i + 1]]))) nms <- NULL for (i in include) { if (is.na(x$output.names[i][1])) nms <- c(nms, names(x$output.names[i])) else nms <- c(nms, paste(names(x$output.names[i]), unlist(x$output.names[i])[-1], sep = ".")) } wwhich <- NULL for (i in include) { wwhich <- cbind(wwhich, matrix(rep(x$which[, i], times = length(x$assign[[i + 1]])), ncol = length(x$assign[[i + 1]]))) } if (!is.null(mfrow)) { lo <- mfrow losize <- lo[1] * lo[2] } else { layoutsizes <- c(1, 4, 9) layouts <- rbind(c(1, 1), c(2, 2), c(3, 3)) layout <- max((1:length(layoutsizes))[layoutsizes <= nvar]) losize <- layoutsizes[layout] lo <- layouts[layout, ] } keep.mfrow = par()$mfrow par(mfrow = c(1, 1)) par(ask = FALSE) par(mfrow = lo) ngroups <- ceiling(nvar/losize) for (k in 1:ngroups) { for (ii in ((k - 1) * losize + 1):min(k * losize, nvar)) { i <- vars[ii] - 1 prob0 <- 1 - probs[i]/100 sel <- wwhich[, i] mixprobs <- x$postprob[sel]/(1 - prob0) means <- x$mle[sel, i + 1] sds <- x$se[sel, i + 1] Emean <- x$condpostmean[i + 1] Esd <- x$condpostsd[i + 1] name <- nms[i] plotvar(prob0, mixprobs, means, sds, Emean, Esd, name, e = e, ...) } par(ask = TRUE) } par(mfrow=keep.mfrow) par(ask = FALSE) } "plot.bicreg" <- function (x, e = 1e-04, mfrow = NULL, include = 1:x$n.vars, include.intercept = TRUE, ...) { plotvar <- function(prob0, mixprobs, means, sds, Emean, Esd, name, e = 1e-04, nsteps = 500, ...) { if (prob0 == 1) { xlower <- -0 xupper <- 0 xmax <- 1 } else { qmin <- qnorm(e/2, Emean, Esd) qmax = qnorm(1 - e/2, Emean, Esd) xlower <- min(qmin, 0) xupper <- max(0, qmax) } xx <- seq(xlower, xupper, length.out = nsteps) yy <- rep(0, times = length(xx)) maxyy <- 1 if (prob0 < 1) { for (j in 1:length(means)) yy <- yy + mixprobs[j] * dnorm(xx, means[j], sds[j]) maxyy <- max(yy) } ymax <- max(prob0, 1 - prob0) plot(c(xlower, xupper), c(0, ymax), type = "n", xlab = "", ylab = "", main = name, ...) lines(c(0, 0), c(0, prob0), lty = 1, lwd = 3, ...) lines(xx, (1 - prob0) * yy/maxyy, lty = 1, lwd = 1, ...) } nvar <- length(include) + include.intercept vars <- include + 1 if (include.intercept) vars <- c(1, vars) if (!is.null(mfrow)) { lo <- mfrow losize <- lo[1] * lo[2] } else { layoutsizes <- c(1, 4, 9) layouts <- rbind(c(1, 1), c(2, 2), c(3, 3)) layout <- max((1:length(layoutsizes))[layoutsizes <= nvar]) losize <- layoutsizes[layout] lo <- layouts[layout, ] } keep.mfrow = par()$mfrow par(mfrow = c(1, 1)) par(ask = FALSE) par(mfrow = lo) ngroups <- ceiling(nvar/losize) for (k in 1:ngroups) { for (ii in ((k - 1) * losize + 1):min(k * losize, nvar)) { i <- vars[ii] if (i != 1) { prob0 <- 1 - x$probne0[i - 1]/100 sel <- x$which[, i - 1] } else { prob0 <- 0 sel <- rep(TRUE, times = x$n.models) } mixprobs <- x$postprob[sel]/(1 - prob0) means <- x$ols[sel, i] sds <- x$se[sel, i] Emean <- x$condpostmean[i] Esd <- x$condpostsd[i] if (i == 1) name <- "Intercept" else name <- x$namesx[i - 1] plotvar(prob0, mixprobs, means, sds, Emean, Esd, name, e = e, ...) } par(ask = TRUE) } par(mfrow=keep.mfrow) par(ask = FALSE) } "plot.bic.surv" <- function (x, e = 1e-04, mfrow = NULL, include = 1:length(x$namesx), ...) { plotvar <- function(prob0, mixprobs, means, sds, Emean, Esd, name, e = 1e-04, nsteps = 500, ...) { if (prob0 == 1) { xlower <- -0 xupper <- 0 xmax <- 1 } else { qmin <- qnorm(e/2, Emean, Esd) qmax = qnorm(1 - e/2, Emean, Esd) xlower <- min(qmin, 0) xupper <- max(0, qmax) } xx <- seq(xlower, xupper, length.out = nsteps) yy <- rep(0, times = length(xx)) maxyy <- 1 if (prob0 < 1) { for (j in 1:length(means)) yy <- yy + mixprobs[j] * dnorm(xx, means[j], sds[j]) maxyy <- max(yy) } ymax <- max(prob0, 1 - prob0) plot(c(xlower, xupper), c(0, ymax), type = "n", xlab = "", ylab = "", main = name, ...) lines(c(0, 0), c(0, prob0), lty = 1, lwd = 3, ...) lines(xx, (1 - prob0) * yy/maxyy, lty = 1, lwd = 1, ...) } vars <- unlist(x$assign[include + 1]) nvar <- length(vars) probs <- NULL for (i in include) probs <- c(probs, rep(x$probne0[i], times = length(x$assign[[i + 1]]))) nms <- NULL for (i in include) { if (is.na(x$output.names[i][1])) nms <- c(nms, names(x$output.names[i])) else nms <- c(nms, paste(names(x$output.names[i]), unlist(x$output.names[i])[-1], sep = ".")) } wwhich <- NULL for (i in include) { wwhich <- cbind(wwhich, matrix(rep(x$which[, i], times = length(x$assign[[i + 1]])), ncol = length(x$assign[[i + 1]]))) } if (!is.null(mfrow)) { lo <- mfrow losize <- lo[1] * lo[2] } else { layoutsizes <- c(1, 4, 9) layouts <- rbind(c(1, 1), c(2, 2), c(3, 3)) layout <- max((1:length(layoutsizes))[layoutsizes <= nvar]) losize <- layoutsizes[layout] lo <- layouts[layout, ] } keep.mfrow = par()$mfrow par(mfrow = c(1, 1)) par(ask = FALSE) par(mfrow = lo) ngroups <- ceiling(nvar/losize) for (k in 1:ngroups) { for (ii in ((k - 1) * losize + 1):min(k * losize, nvar)) { i <- vars[ii] prob0 <- 1 - probs[i]/100 sel <- wwhich[, i] mixprobs <- x$postprob[sel]/(1 - prob0) means <- x$mle[sel, i] sds <- x$se[sel, i] Emean <- x$condpostmean[i] Esd <- x$condpostsd[i] name <- nms[i] plotvar(prob0, mixprobs, means, sds, Emean, Esd, name, e = e, ...) } par(ask = TRUE) } par(mfrow=keep.mfrow) par(ask = FALSE) }
/scratch/gouwar.j/cran-all/cranData/BMA/R/plot.bic.R
predict.bic.glm <- function(object,newdata,...){ # CF October 2011 dropcols <- function(x, y, wt, maxCols = 31) { # CF: copied from bicreg (undocumented) x1.ldf <- data.frame(x, y = y) temp.wt <- wt lm.out <- lm(y ~ ., data = x1.ldf, weights = temp.wt) form.vars <- all.vars(formula(lm.out))[-1] any.dropped <- FALSE dropped.which <- NULL while (length(lm.out$coefficients) > maxCol) { any.dropped <- TRUE droplm <- drop1(lm.out, test = "none") dropped <- row.names(droplm)[which.min(droplm$RSS[-1]) + 1] dropped.index <- match(dropped, form.vars) form.vars <- form.vars[-dropped.index] formla <- formula(paste("y", "~", paste(form.vars, collapse = " + "), sep = " ")) lm.out <- lm(formla, data = x1.ldf, weights = temp.wt) dropped.which <- c(dropped.which, dropped) } new.var.names <- names(lm.out$coefficients) return(list(mm = model.matrix(lm.out)[, -1, drop = FALSE], any.dropped = any.dropped, dropped = dropped.which, var.names = new.var.names)) } get.names <- function(x,maxCol) { # CF: written from portion of bicreg code x <- data.frame(x) # dummy y, wt y <- rep(0,nrow(x)) wt <- rep(1,nrow(x)) if (is.null(dimnames(x))) dimnames(x) <- list(NULL, paste("X", 1:ncol(x), sep = "")) # y <- as.numeric(y) options(contrasts = c("contr.treatment", "contr.treatment")) xnames <- dimnames(x)[[2]] x2 <- na.omit(data.frame(x)) used <- match(row.names(data.frame(x)), row.names(x2)) omitted <- seq(nrow(x))[is.na(used)] if (length(omitted) > 0) { stop("NAs in newdata") # wt <- wt[-omitted] x <- x2 # y <- y[-omitted] warning(paste("There were ", length(omitted), "records deleted due to N\ A'\ s")) } # if (drop.factor.levels) { # cdf <- cbind.data.frame(y = y, x) # mm <- model.matrix(formula(cdf), data = cdf)[, -1, drop = FALSE] # x <- mm # } xx <- dropcols(x, y, wt, maxCol) xnames <- xx$var.names[-1] x <- xx$mm reduced <- xx$any.dropped dropped <- NULL if (reduced) dropped <- xx$dropped xnames } linkinvBinom <- function(x) { ## written by CF dimx <- dim(x) x <- as.vector(x) fine <- is.finite(x) pos <- x >= 0 small <- -x < log(.Machine$double.eps) index <- fine & pos & small x[index] <- 1 index <- fine & pos & !small xindex <- exp(-x[index]) x[index] <- 1/(1+xindex) small <- x < log(.Machine$double.xmin) index <- fine & !pos & small x[index] <- 0 index <- fine & !pos & !small xindex <- exp(x[index]) x[index] <- xindex/(1+xindex) if (!is.null(dimx)) array(x,dimx) else x } factor.names <- function(x) { # CF: copied from bic.glm.data.frame (undocumented) out <- list() for (i in 1:ncol(x)) if (is.factor(x[, i])) out[[i]] <- levels(x[, i]) else out <- c(out, list(NULL)) attributes(out)$names <- names(x) return(out) } create.assign <- function(x) { # CF: copied from bic.glm.data.frame (undocumented) asgn <- list() asgn[[1]] <- 1 cnt <- 2 for (i in 1:ncol(x)) { if (!is.factor(x[, i])) size <- 1 else size <- length(levels(x[, i])) - 1 asgn[[i + 1]] <- cnt:(cnt + size - 1) cnt <- cnt + size } names(asgn) <- c("(Intercept)", attributes(x)$names) return(asgn) } callList <- as.list(object$call) callFunc <- callList[[1]] defaults <- as.list(args(deparse(substitute(callFunc)))) factor.type <- callList$factor.type factor.type <- if (is.null(factor.type)) defaults$factor.type else eval(factor.type) if (as.character(callFunc) == "bic.glm.matrix") factor.type <- FALSE maxCol <- callList$maxCol maxCol <- if (is.null(maxCol)) defaults$maxCol else eval(maxCol) newdata <- data.frame(newdata) if (!is.null(object$formula)) { newdata <- model.matrix(object$formula, data = newdata)[,-1] } else { y <- rnorm(nrow(newdata)) newdata <- model.matrix(formula(cbind.data.frame(y = y, newdata)), data = newdata)[,-1] } nam <- colnames(object$mle)[-1] mvars <- match( nam, colnames(newdata), nomatch = 0) if (any(mvars == 0)) stop("newdata is missing variables") newdata <- newdata[,nam, drop = FALSE] nObs <- nrow(newdata) newnam <- get.names( newdata, maxCol) fn <- factor.names(newdata) new.assign <- create.assign(newdata) fac.levels <- unlist(lapply(new.assign, length)[-1]) fac.ident <- sapply(newdata,is.factor) nModels <- length(object$postprob) linpred <- matrix(NA,nModels,nObs) dimnames(object$which) <- list(dimnames(object$which)[[1]], object$xnames) colnames(object$mle) <- c("(Intercept)",newnam) if (any(fac.ident)) { nVars <- length(fac.ident) for (k in 1:nModels) { linpred[k,] <- object$mle[k,1] IN <- object$mle[k,] != 0 j <- 1 for (i in 1:nVars) { if (fac.ident[i]) { L <- (1:as.numeric(fac.levels[i])) if (any(IN[L])) { # works for factor.type = FALSE if all levels are in some model coefs <- object$mle[k,j+L] if (!all(coefs == 0)) { fac.vals <- newdata[, i] m <- match(fac.vals,fn[[i]],nomatch=NA) if (any(is.na(m))) stop("NA") linpred[k,] <- linpred[k,] + as.vector(c(0,coefs)[m]) } } j <- j + length(L) } else { j <- j + 1 linpred[k,] <- linpred[k,]+ object$mle[k,j]*newdata[,i] } } } } else { linpred <- tcrossprod(object$mle, cbind(1, as.matrix(newdata))) } rhs <- as.function(object$linkinv)(linpred) # rhs = predictions for individual models # if (show) print(rhs[,1:min(9,ncol(rhs))]) pred <- apply(object$postprob * rhs, 2, sum) names(pred) <- rownames(newdata) pred }
/scratch/gouwar.j/cran-all/cranData/BMA/R/predict.bic.glm.R
predict.bicreg <- function( object, newdata, quantiles = c(.1,.5,.9), ...){ # CF August 2011 - January 2012 cdfBMAnormal <- function (x, WEIGHTS, MEAN, SD, offset = 0) { # # copyright 2006-present, University of Washington. All rights reserved. # for terms of use, see the LICENSE file # sum(WEIGHTS*pnorm(x, mean = MEAN, sd = SD)) - offset } quantBMAnormal <- function(alpha, WEIGHTS, MEAN, SD) { # # copyright 2006-present, University of Washington. All rights reserved. # for terms of use, see the LICENSE file # lower <- min(MEAN-6*SD) upper <- max(MEAN+6*SD) if (cdfBMAnormal(lower, WEIGHTS, MEAN, SD, 0) > alpha) return(NA) if (cdfBMAnormal(upper, WEIGHTS, MEAN, SD, 0) < alpha) return(NA) uniroot(cdfBMAnormal, lower = lower, upper = upper, WEIGHTS=WEIGHTS, MEAN=MEAN, SD=SD, offset = alpha)$root } dropcols <- function(x, y, wt, maxCols = 31) { # CF: copied from bicreg (undocumented) x1.ldf <- data.frame(x, y = y) temp.wt <- wt lm.out <- lm(y ~ ., data = x1.ldf, weights = temp.wt) form.vars <- all.vars(formula(lm.out))[-1] any.dropped <- FALSE dropped.which <- NULL while (length(lm.out$coefficients) > maxCol) { any.dropped <- TRUE droplm <- drop1(lm.out, test = "none") dropped <- row.names(droplm)[which.min(droplm$RSS[-1]) + 1] dropped.index <- match(dropped, form.vars) form.vars <- form.vars[-dropped.index] formla <- formula(paste("y", "~", paste(form.vars, collapse = " + "), sep = " ")) lm.out <- lm(formla, data = x1.ldf, weights = temp.wt) dropped.which <- c(dropped.which, dropped) } new.var.names <- names(lm.out$coefficients) return(list(mm = model.matrix(lm.out)[, -1, drop = FALSE], any.dropped = any.dropped, dropped = dropped.which, var.names = new.var.names)) } get.names <- function(x,maxCol,drop.factor.levels) { # CF: written from portion of bicreg code x <- data.frame(x) # dummy y, wt y <- rep(0,nrow(x)) wt <- rep(1,nrow(x)) if (is.null(dimnames(x))) dimnames(x) <- list(NULL, paste("X", 1:ncol(x), sep = "")) # y <- as.numeric(y) options(contrasts = c("contr.treatment", "contr.treatment")) xnames <- dimnames(x)[[2]] x2 <- na.omit(data.frame(x)) used <- match(row.names(data.frame(x)), row.names(x2)) omitted <- seq(nrow(x))[is.na(used)] if (length(omitted) > 0) { stop("NAs in newdata") # wt <- wt[-omitted] x <- x2 # y <- y[-omitted] warning(paste("There were ", length(omitted), "records deleted due to NA'\ s")) } if (drop.factor.levels) { cdf <- cbind.data.frame(y = y, x) mm <- model.matrix(formula(cdf), data = cdf)[, -1, drop = FALSE] x <- mm } xx <- dropcols(x, y, wt, maxCol) xnames <- xx$var.names[-1] x <- xx$mm reduced <- xx$any.dropped dropped <- NULL if (reduced) dropped <- xx$dropped xnames } factor.names <- function(x) { # CF: copied from bic.glm.data.frame (undocumented) out <- list() for (i in 1:ncol(x)) if (is.factor(x[, i])) out[[i]] <- levels(x[, i]) else out <- c(out, list(NULL)) attributes(out)$names <- names(x) return(out) } create.assign <- function(x) { # CF: copied from bic.glm.data.frame (undocumented) asgn <- list() asgn[[1]] <- 1 cnt <- 2 for (i in 1:ncol(x)) { if (!is.factor(x[, i])) size <- 1 else size <- length(levels(x[, i])) - 1 asgn[[i + 1]] <- cnt:(cnt + size - 1) cnt <- cnt + size } names(asgn) <- c("(Intercept)", attributes(x)$names) return(asgn) } newdata <- as.data.frame(newdata[,object$input.names]) nObs <- nrow(newdata) callList <- as.list(object$call) callFunc <- callList[[1]] defaults <- as.list(args(deparse(substitute(callFunc)))) drop.factor.levels <- callList$drop.factor.levels if (is.null(drop.factor.levels)) { if (as.character(callFunc) == "bic.glm.matrix") { drop.factor.levels <- FALSE } else { drop.factor.levels <- defaults$drop.factor.levels } } else drop.factor.levels <- eval(drop.factor.levels) maxCol <- callList$maxCol maxCol <- if (is.null(maxCol)) defaults$maxCol else eval(maxCol) newnam <- get.names( newdata, maxCol, drop.factor.levels) # object$namesx should be a subset of newnam # caution: newdata may not have the same column order as the original mvars <- match( object$namesx, newnam, nomatch = 0) if (any(mvars == 0)) stop("newdata is missing variables") if (!all(1:length(mvars) == sort(mvars))) stop("newdata has extra variables") fn <- factor.names(newdata) new.assign <- create.assign(newdata) fac.levels <- unlist(lapply(new.assign, length)[-1]) fac.ident <- sapply(newdata,is.factor) nModels <- length(object$postprob) nVars <- length(fac.levels) # some may not be used nModels <- length(object$postprob) linpred <- matrix(NA,nModels,nObs) dimnames(object$which) <- list(rownames(object$which), object$xnames) dimnames(object$mle) <- list(rownames(object$mle), c("(Intercept)",object$namesx)) nVars <- length(fac.ident) for (k in 1:nModels) { linpred[k,] <- object$mle[k,1] IN <- object$mle[k,] != 0 j <- 1 for (i in 1:nVars) { if (fac.ident[i]) { if (IN[j+1]) { coefs <- object$mle[k,j+(1:as.numeric(fac.levels[i]))] if (!all(coefs == 0)) { fac.vals <- newdata[, i] m <- match(fac.vals,fn[[i]],nomatch=NA) if (any(is.na(m))) stop("NA") linpred[k,] <- linpred[k,] + as.vector(c(0,coefs)[m]) } } j <- j + as.numeric(fac.levels[i]) } else { j <- j + 1 linpred[k,] <- linpred[k,]+ object$mle[k,j]*newdata[,i] } } } # print(linpred) # predictions for the individual models predmean <- apply(object$postprob * linpred, 2, sum) objVARterm <- sum(object$postprob * object$residvar) # 2019/11/21 Fix by Anupreet Porwal (original (predmean - linpred) # combined by columns instead by rows) predSD <- sqrt(objVARterm + apply((predmean - t(linpred))^2, 1, weighted.mean, w = object$postprob)) # apply(object$postprob * (predmean - linpred)^2, 2, sum)) predInt <- matrix( NA, nrow(newdata), length(quantiles)) rownames(predInt) <- names(predmean) <- names(predSD) <- rownames(newdata) colnames(predInt) <- quantiles for (i in 1:nrow(newdata)) { predInt[i,] <- sapply( quantiles, quantBMAnormal, WEIGHTS=object$postprob, MEAN=linpred[,i], SD=predSD[i]) } list(mean = predmean, sd = predSD, quantiles = predInt) }
/scratch/gouwar.j/cran-all/cranData/BMA/R/predict.bicreg.R
print.bic.glm <- function (x, digits = max(3, getOption("digits") - 3), ...) { cat("\nCall:\n", deparse(x$call), "\n\n", sep = "") cat("\n Posterior probabilities(%): \n") out <- x$probne0 # names(out) <- if (x$factor.type) x$namesx else colnames(x$mle)[-1] print(out, ...) cat("\n Coefficient posterior expected values: \n") out <- x$postmean # names(out) <- c("(Intercept)", colnames(x$mle)[-1]) fout <- format(out, digits = digits) fout[is.na(out)] <- "" print.default(fout, print.gap = 2, quote = FALSE, ...) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/BMA/R/print.bic.glm.R
print.iBMA.intermediate.surv<- function(x, ...) { cs<- x cat("\n iBMA intermediate result \n") cat("\nCall:\n", deparse(cs$call), "\n\n", sep = "") examined<- 1:(cs$nextVar - 1) examined<- examined[!(examined %in% cs$new.vars)] all.names<- colnames(cs$sortedX)[examined] current<- cs$currentSet nextSet<- c(cs$currentSet, cs$new.vars) cat("completed ", cs$currIter, " iteration", c("", "s")[1 + (cs$currIter != 1)], "\n",sep="") cat("number of iterations in last call = ", cs$nIter, "\n",sep="") cat("last variable examined = ", ifelse(sum(examined) > 0, max(examined), ""), "\n",sep="") cat("variables currently selected = ", ifelse(sum(current) > 0, paste(current, collapse = " ", sep = ""), ""), "\n",sep="") cat("next set to examine = ", paste(nextSet, collapse = " ", sep = ""), "\n\n\n", sep = "") invisible(x) } summary.iBMA.intermediate.surv<- function(object, ...) { cs<- object cat("\n iBMA intermediate result \n") cat("\nCall:\n", deparse(cs$call), "\n\n", sep = "") ### if show_models show them here before statistics, (don't want user to think it is finished because ### iteration statistics are scrolled off screen examined<- 1:(cs$nextVar - 1) examined<- examined[!(examined %in% cs$new.vars)] all.names<- colnames(cs$sortedX)[examined] current<- cs$currentSet nextSet<- c(cs$currentSet, cs$new.vars) dropped<- examined[!(examined %in% current)] status<- rep("currently selected", times = length(examined)) status[dropped]<- "dropped" tble<- cbind( all.names, status, cs$maxProbne0[examined], cs$nTimes[examined]) colnames(tble)<- c("variable", "status", "max Probne0", "n times in model") row.names(tble)<- examined ### basic statistics cat("completed ", cs$currIter, " iteration", c("", "s")[1 + (cs$currIter != 1)], "\n",sep="") cat("number of iterations in last call = ", cs$nIter, "\n",sep="") cat("last variable examined = ", ifelse(sum(examined) > 0, max(examined), ""), "\n",sep="") cat("variables currently selected = ", ifelse(sum(current) > 0, paste(current, collapse = " ", sep = ""), ""), "\n",sep="") cat("next set to examine = ", paste(nextSet, collapse = " ", sep = ""), "\n\n\n", sep = "") cat("statistics for variables examined so far \n ") ### details print.default(tble, print.gap = 2, quote = FALSE, ...) invisible(object) } print.iBMA.surv<- function(x, ...) { cs<- x cat("\n iBMA final result \n") cat("\nCall:\n", deparse(cs$call), "\n\n", sep = "") examined<- 1:(cs$nextVar - 1) examined<- examined[!(examined %in% cs$last.added)] all.names<- colnames(cs$sortedX)[examined] current<- cs$currentSet cat("completed ", cs$currIter, " iteration", c("", "s")[1 + (cs$currIter != 1)], "\n",sep="") cat("number of iterations in last call = ", cs$nIter, "\n",sep="") cat("selected variables = ", ifelse(sum(current) > 0, paste(current, collapse = " ", sep = ""), ""), "\n",sep="") cat("\n\n Results of running BMA on final set of selected variables:\n\n") print(x$bma, ...) invisible(x) } summary.iBMA.surv<- function(object, ...) { cs<- object cat("\n iBMA.glm intermediate result \n") cat("\nCall:\n", deparse(cs$call), "\n\n", sep = "") ### if show_models show them here before statistics, (don't want user to think it is finished because ### iteration statistics are scrolled off screen all.names<- colnames(cs$sortedX) status<- rep("dropped", times = cs$nVar) status[cs$selected]<- "selected" tble<- cbind( all.names, status, cs$maxProbne0, cs$nTimes) colnames(tble)<- c("variable", "status", "max Probne0", "n times in model") row.names(tble)<- 1:cs$nVar ### basic statistics cat("completed ", cs$currIter, " iteration", c("", "s")[1 + (cs$currIter != 1)], "\n",sep="") cat("number of iterations in last call = ", cs$nIter, "\n",sep="") cat("variables selected = ", ifelse(sum(cs$selected) > 0, paste(cs$selected, collapse = " ", sep = ""), ""), "\n",sep="") cat("statistics for variables examined so far \n ") ### details print.default(tble, print.gap = 2, quote = FALSE, ...) cat("\n\n Results of running BMA on final set of selected variables:\n\n") summary(object$bma, ...) invisible(object) } print.iBMA.intermediate.bicreg<- print.iBMA.intermediate.glm<- print.iBMA.intermediate.surv print.iBMA.bicreg<- print.iBMA.glm<- print.iBMA.surv summary.iBMA.intermediate.bicreg<- summary.iBMA.intermediate.glm<- summary.iBMA.intermediate.surv summary.iBMA.bicreg<- summary.iBMA.glm<- summary.iBMA.surv
/scratch/gouwar.j/cran-all/cranData/BMA/R/print.iBMA.R
# old (CF) "print.bic.glm" <- function (x, digits = max(3, getOption("digits") - 3), ...) { cat("\nCall:\n", deparse(x$call), "\n\n", sep = "") cat("\n Posterior probabilities(%): \n") out <- x$probne0 names(out) <- x$namesx print(out, ...) cat("\n Coefficient posterior expected values: \n") out <- x$postmean outnames<- c(NA, x$output.names) names(outnames)[1]<- "Intercept" nms <- NULL for (i in 1:length(outnames)) { if (is.na(outnames[i][1])) nms <- c(nms, names(outnames[i])) else nms <- c(nms, paste(names(outnames[i]), unlist(outnames[i])[-1], sep = ".")) } names(out) <- nms fout<- format(out, digits=digits) fout[is.na(out)]<- "" print.default(fout, print.gap = 2, quote = FALSE, ...) invisible(x) } print.bic.glm <- function (x, digits = max(3, getOption("digits") - 3), ...) { cat("\nCall:\n", deparse(x$call), "\n\n", sep = "") cat("\n Posterior probabilities(%): \n") out <- x$probne0 # names(out) <- if (x$factor.type) x$namesx else colnames(x$mle)[-1] print(out, ...) cat("\n Coefficient posterior expected values: \n") out <- x$postmean # names(out) <- c("(Intercept)", colnames(x$mle)[-1]) fout <- format(out, digits = digits) fout[is.na(out)] <- "" print.default(fout, print.gap = 2, quote = FALSE, ...) invisible(x) } "print.bicreg" <- function (x, digits = max(3, getOption("digits") - 3), ...) { cat("\nCall:\n", deparse(x$call), "\n\n", sep = "") cat("\n Posterior probabilities(%): \n") out <- x$probne0 names(out) <- x$namesx print(out, ...) cat("\n Coefficient posterior expected values: \n") out <- x$postmean names(out) <- c("(Intercept)", x$namesx) print.default(format(out, digits = digits), print.gap = 2, quote = FALSE, ...) invisible(x) } "print.bic.surv" <- function (x, digits = max(3, getOption("digits") - 3), ...) { cat("\nCall:\n", deparse(x$call), "\n\n", sep = "") cat("\n Posterior probabilities(%): \n") out <- x$probne0 names(out) <- x$namesx print(out, ...) cat("\n Coefficient posterior expected values: \n") out <- x$postmean nms <- NULL for (i in 1:length(x$output.names)) { if (is.na(x$output.names[i][1])) nms <- c(nms, names(x$output.names[i])) else nms <- c(nms, paste(names(x$output.names[i]), unlist(x$output.names[i])[-1], sep = ".")) } names(out) <- nms print.default(format(out, digits = digits), print.gap = 2, quote = FALSE, ...) invisible(x) } "summary.bic.glm" <- function (object, n.models = 5, digits = max(3, getOption("digits") - 3), conditional = FALSE, display.dropped = FALSE, ...) { x<- object cat("\nCall:\n", deparse(x$call), "\n\n", sep = "") if (display.dropped & x$reduced) { cat("\nThe following variables were dropped prior to averaging:\n") cat(x$dropped) cat("\n") } n.models <- min(n.models, x$n.models) sel <- 1:n.models cat("\n ", length(x$postprob), " models were selected") cat("\n Best ", n.models, " models (cumulative posterior probability = ", round(sum(x$postprob[sel]), digits), "): \n\n") x$namesx<- c("Intercept", x$namesx) nms <- length(x$namesx) ncx <- length(unlist(x$assign)) nvar <- rep(0, times = n.models) for (i in 1:(nms-1)) nvar <- nvar + as.numeric(as.vector(rbind(rep(1, length(x$assign[[i+1]]))) %*% (t(x$mle[sel, x$assign[[i+1]], drop = FALSE] != 0)) > 0)) modelposts <- format(round(x$postprob[sel], 3), digits = 3) coeffs <- t(x$mle[sel, , drop = FALSE]) cfbic <- rbind(x$bic[sel], coeffs) cfbicf <- format(cfbic, digits = digits) coeffsf <- cfbicf[-1, , drop = FALSE] bic <- cfbicf[1, , drop = FALSE] postmeans <- format(x$postmean, digits = digits) postsds <- format(x$postsd, digits = digits) postmeans[is.na(x$postmean)]<- "" postsds[is.na(x$postsd)]<- "" if (conditional) { cpostmeans <- format(x$condpostmean, digits = digits) cpostsds <- format(x$condpostsd, digits = digits) cpostmeans[is.na(x$condpostmean)]<- "" cpostsds[is.na(x$condpostsd)]<- "" } varposts <- format(round(x$probne0, 1), digits = 3) strlength <- nchar(coeffsf[1, 1]) decpos <- nchar(unlist(strsplit(coeffsf[2, 1], "\\."))[1]) offset <- paste(rep(" ", times = decpos - 1), sep = "", collapse = "") offset2 <- paste(rep(" ", times = decpos + 1), sep = "", collapse = "") modelposts <- paste(offset, modelposts, sep = "") nvar <- paste(offset2, nvar, sep = "") dotoffset <- round(max(nchar(coeffsf))/2) zerocoefstring <- paste(paste(rep(" ", times = dotoffset), collapse = "", sep = ""), ".", sep = "") coeffsf[coeffs == 0] <- zerocoefstring coeffsf[is.na(coeffs)]<- "" avp <- NULL outnames<- c(NA, x$output.names) names(outnames)[1]<- "Intercept" varposts<- c("100",varposts) for (i in 1:nms) { avp <- rbind(avp, varposts[i]) if (!is.na(outnames[[i]][1])) avp <- rbind(avp, cbind(rep("", times = length(x$assign[[i]])))) } top <- cbind(postmeans, postsds) if (conditional) top <- cbind(top, cpostmeans, cpostsds) top <- cbind(top, coeffsf) atop <- NULL for (i in 1:nms) { if (!is.na(outnames[[i]][1])) atop <- rbind(atop, rbind(rep("", times = ncol(top)))) atop <- rbind(atop, top[x$assign[[i ]], ]) } top <- cbind(avp, atop) linesep <- rep("", times = ncol(top)) offset <- c("", "", "") if (conditional) offset <- c(offset, c("", "")) bottom <- rbind(c(offset, nvar), c(offset, bic), c(offset, modelposts)) out <- rbind(top, linesep, bottom) vnames <- NULL for (i in 1:nms) { vnames <- c(vnames, names(outnames[i])) blnk <- paste(rep(" ", times = nchar(names(outnames[i]))), collapse = "") if (!is.na(outnames[i][1])) vnames <- c(vnames, paste(blnk, unlist(outnames[i])[-1], sep = ".")) } row.names(out) <- c(vnames, "", "nVar", "BIC", "post prob") colnms <- c("p!=0", " EV", "SD") if (conditional) colnms <- c(colnms, "cond EV", "cond SD") colnms <- c(colnms, paste("model ", 1:n.models, sep = "")) dimnames(out)[[2]] <- colnms print.default(out, print.gap = 2, quote = FALSE, ...) if(!is.null(object$na.action) && object$na.action > 0) cat("\n ", object$na.action, " observations deleted due to missingness.\n") } "summary.bicreg" <- function (object, n.models = 5, digits = max(3, getOption("digits") - 3), conditional = FALSE, display.dropped = FALSE, ...) { x<- object cat("\nCall:\n", deparse(x$call), "\n\n", sep = "") if (display.dropped & x$reduced) { cat("\nThe following variables were dropped prior to averaging:\n") cat(x$dropped) cat("\n") } n.models <- min(n.models, x$n.models) sel <- 1:n.models cat("\n ", length(x$postprob), " models were selected") cat("\n Best ", n.models, " models (cumulative posterior probability = ", round(sum(x$postprob[sel]), digits), "): \n\n") nms <- length(x$namesx) + 1 r2 <- format(round(x$r2[sel]/100, 3), digits = 3) nvar <- rbind(rep(1, length(x$namesx) + 1)) %*% t(x$ols[sel, ] != 0) - 1 modelposts <- format(round(x$postprob[sel], 3), digits = 3) coeffs <- t(x$ols[sel, ]) cfbic <- rbind(x$bic[sel], coeffs) cfbicf <- format(cfbic, digits = digits) coeffsf <- cfbicf[-1, ] bic <- cfbicf[1, ] dotoffset <- round(max(nchar(coeffsf))/2) zerocoefstring <- paste(paste(rep(" ", times = dotoffset), collapse = "", sep = ""), ".", sep = "") coeffsf[coeffs == 0] <- zerocoefstring postmeans <- format(x$postmean, digits = digits) postsds <- format(x$postsd, digits = digits) if (conditional) { cpostmeans <- format(x$condpostmean, digits = digits) cpostsds <- format(x$condpostsd, digits = digits) } varposts <- format(round(c(100, x$probne0), 1), digits = 3) strlength <- nchar(coeffsf[1, 1]) decpos <- nchar(unlist(strsplit(coeffsf[1, 1], "\\."))[1]) offset <- paste(rep(" ", times = decpos - 1), sep = "", collapse = "") offset2 <- paste(rep(" ", times = decpos + 1), sep = "", collapse = "") r2 <- paste(offset, r2, sep = "") modelposts <- paste(offset, modelposts, sep = "") nvar <- paste(offset2, nvar, sep = "") top <- cbind(varposts, postmeans, postsds) if (conditional) top <- cbind(top, cpostmeans, cpostsds) top <- cbind(top, coeffsf) linesep <- rep("", times = ncol(top)) offset <- c("", "", "") if (conditional) offset <- c(offset, c("", "")) bottom <- rbind(c(offset, nvar), c(offset, r2), c(offset, bic), c(offset, modelposts)) out <- rbind(top, linesep, bottom) row.names(out) <- c("Intercept", x$namesx, "", "nVar", "r2", "BIC", "post prob") colnms <- c("p!=0", " EV", "SD") if (conditional) colnms <- c(colnms, "cond EV", "cond SD") colnms <- c(colnms, paste("model ", 1:n.models, sep = "")) dimnames(out)[[2]] <- colnms print.default(out, print.gap = 2, quote = FALSE, ...) } "summary.bic.surv" <- function (object, n.models = 5, digits = max(3, getOption("digits") - 3), conditional = FALSE, display.dropped = FALSE, ...) { x<- object cat("\nCall:\n", deparse(x$call), "\n\n", sep = "") if (display.dropped & x$reduced) { cat("\nThe following variables were dropped prior to averaging:\n") cat(x$dropped) cat("\n") } n.models <- min(n.models, x$n.models) sel <- 1:n.models cat("\n ", length(x$postprob), " models were selected") cat("\n Best ", n.models, " models (cumulative posterior probability = ", round(sum(x$postprob[sel]), digits), "): \n\n") nms <- length(x$namesx) ncx <- length(unlist(x$assign)[-1]) nvar <- rep(0, times = n.models) for (i in 1:nms) nvar <- nvar + as.numeric(as.vector(rbind(rep(1, length(x$assign[[i + 1]]))) %*% (t(x$mle[sel, x$assign[[i + 1]], drop = FALSE] != 0)) > 0)) modelposts <- format(round(x$postprob[sel], 3), digits = 3) coeffs <- t(x$mle[sel, , drop = FALSE]) cfbic <- rbind(x$bic[sel], coeffs) cfbicf <- format(cfbic, digits = digits) coeffsf <- cfbicf[-1, , drop = FALSE] bic <- cfbicf[1, , drop = FALSE] postmeans <- format(x$postmean, digits = digits) postsds <- format(x$postsd, digits = digits) if (conditional) { cpostmeans <- format(x$condpostmean, digits = digits) cpostsds <- format(x$condpostsd, digits = digits) } varposts <- format(round(x$probne0, 1), digits = 3) strlength <- nchar(coeffsf[1, 1]) decpos <- nchar(unlist(strsplit(coeffsf[1, 1], "\\."))[1]) offset <- paste(rep(" ", times = decpos - 1), sep = "", collapse = "") offset2 <- paste(rep(" ", times = decpos + 1), sep = "", collapse = "") modelposts <- paste(offset, modelposts, sep = "") nvar <- paste(offset2, nvar, sep = "") dotoffset <- round(max(nchar(coeffsf))/2) zerocoefstring <- paste(paste(rep(" ", times = dotoffset), collapse = "", sep = ""), ".", sep = "") coeffsf[coeffs == 0] <- zerocoefstring avp <- NULL for (i in 1:nms) { avp <- rbind(avp, varposts[i]) if (!is.na(x$output.names[[i]][1])) avp <- rbind(avp, cbind(rep("", times = length(x$assign[[i + 1]])))) } top <- cbind(postmeans, postsds) if (conditional) top <- cbind(top, cpostmeans, cpostsds) top <- cbind(top, coeffsf) atop <- NULL for (i in 1:nms) { if (!is.na(x$output.names[[i]][1])) atop <- rbind(atop, rbind(rep("", times = ncol(top)))) atop <- rbind(atop, top[x$assign[[i + 1]], ]) } top <- cbind(avp, atop) linesep <- rep("", times = ncol(top)) offset <- c("", "", "") if (conditional) offset <- c(offset, c("", "")) bottom <- rbind(c(offset, nvar), c(offset, bic), c(offset, modelposts)) out <- rbind(top, linesep, bottom) vnames <- NULL for (i in 1:nms) { vnames <- c(vnames, names(x$output.names[i])) blnk <- paste(rep(" ", times = nchar(names(x$output.names[i]))), collapse = "") if (!is.na(x$output.names[i][1])) vnames <- c(vnames, paste(blnk, unlist(x$output.names[i])[-1], sep = ".")) } row.names(out) <- c(vnames, "", "nVar", "BIC", "post prob") colnms <- c("p!=0", " EV", "SD") if (conditional) colnms <- c(colnms, "cond EV", "cond SD") colnms <- c(colnms, paste("model ", 1:n.models, sep = "")) dimnames(out)[[2]] <- colnms print.default(out, print.gap = 2, quote = FALSE, ...) } # duplicate (CF ?) `summary.bicreg` <- function (object, n.models = 5, digits = max(3, getOption("digits") - 3), conditional = FALSE, display.dropped = FALSE, ...) { x <- object cat("\nCall:\n", deparse(x$call), "\n\n", sep = "") if (display.dropped & x$reduced) { cat("\nThe following variables were dropped prior to averaging:\n") cat(x$dropped) cat("\n") } n.models <- min(n.models, x$n.models) sel <- 1:n.models cat("\n ", length(x$postprob), " models were selected") cat("\n Best ", n.models, " models (cumulative posterior probability = ", round(sum(x$postprob[sel]), digits), "): \n\n") nms <- length(x$namesx) + 1 r2 <- format(round(x$r2[sel]/100, 3), digits = 3) nvar <- rbind(rep(1, length(x$namesx) + 1)) %*% t(x$ols[sel, , drop = FALSE ] != 0) - 1 modelposts <- format(round(x$postprob[sel], 3), digits = 3) coeffs <- t(x$ols[sel, ,drop=FALSE]) cfbic <- rbind(x$bic[sel], coeffs) cfbicf <- format(cfbic, digits = digits) coeffsf <- cfbicf[-1, ,drop=FALSE] bic <- cfbicf[1, ] dotoffset <- round(max(nchar(coeffsf))/2) zerocoefstring <- paste(paste(rep(" ", times = dotoffset), collapse = "", sep = ""), ".", sep = "") coeffsf[coeffs == 0] <- zerocoefstring postmeans <- format(x$postmean, digits = digits) postsds <- format(x$postsd, digits = digits) if (conditional) { cpostmeans <- format(x$condpostmean, digits = digits) cpostsds <- format(x$condpostsd, digits = digits) } varposts <- format(round(c(100, x$probne0), 1), digits = 3) strlength <- nchar(coeffsf[1, 1]) decpos <- nchar(unlist(strsplit(coeffsf[1, 1], "\\."))[1]) offset <- paste(rep(" ", times = decpos - 1), sep = "", collapse = "") offset2 <- paste(rep(" ", times = decpos + 1), sep = "", collapse = "") r2 <- paste(offset, r2, sep = "") modelposts <- paste(offset, modelposts, sep = "") nvar <- paste(offset2, nvar, sep = "") top <- cbind(varposts, postmeans, postsds) if (conditional) top <- cbind(top, cpostmeans, cpostsds) top <- cbind(top, coeffsf) linesep <- rep("", times = ncol(top)) offset <- c("", "", "") if (conditional) offset <- c(offset, c("", "")) bottom <- rbind(c(offset, nvar), c(offset, r2), c(offset, bic), c(offset, modelposts)) out <- rbind(top, linesep, bottom) row.names(out) <- c("Intercept", x$namesx, "", "nVar", "r2", "BIC", "post prob") colnms <- c("p!=0", " EV", "SD") if (conditional) colnms <- c(colnms, "cond EV", "cond SD") colnms <- c(colnms, paste("model ", 1:n.models, sep = "")) dimnames(out)[[2]] <- colnms print.default(out, print.gap = 2, quote = FALSE, ...) }
/scratch/gouwar.j/cran-all/cranData/BMA/R/summary.bic.R
##'Toolkit for Bayesian estimation of the dependence structure ##' in Multivariate Extreme Value parametric models, with possible use of Bayesian model Averaging techniques ##' Includes a Generic MCMC sampler. Estimation of the marginal ##' distributions is a prerequisite, \emph{e.g.} using one of the ##' packages ##' \code{ismev}, \code{evd}, \code{evdbayes} or \code{POT}. This package handles data sets which are assumed ##' to be marginally unit-Frechet distributed. ##' @name BMAmevt-package ##' @aliases BMAmevt ##' @docType package ##' @title Bayesian Model Averaging for Multivariate Extremes ##' @author Anne Sabourin ##' @seealso \code{evdbayes} NULL ##' Five-dimensional air quality dataset recorded in Leeds(U.K.), during five winter seasons. ##' ##' Contains 590 daily maxima of five air pollutants ##' (respectively PM10, N0, NO2, 03, S02) recorded in Leeds (U.K.) ##' during five winter seasons (1994-1998, November-February included). Contains NA's. ##' @name winterdat ##' @docType data ##' @format A \eqn{590*5} matrix. ##' @source \url{https://uk-air.defra.gov.uk/} ##' @keywords datasets NULL ##' Multivariate data set with margins following unit Frechet ##' distribution. ##' ##' Five-variate dataset which margins follow unit-Frechet distributions, ##' obtained from \code{\link{winterdat}} by probability integral ##' transform. ##' Marginal estimation was performed by maximum likelihood estimation of a Generalized Pareto distribution over marginal thresholds corresponding to \eqn{0.7} quantiles, following ##' Cooley \emph{et.al.} (see reference below). The \dQuote{non extreme} part of the marginal distributions was approximated by the empirical distribution function. ##' @name frechetdat ##' @docType data ##' @format A \eqn{601*5} - matrix: ##' @references COOLEY, D., DAVIS, R. and NAVEAU, P. (2010). The pairwise beta distribution: A flexible parametric multivariate model for extremes. \emph{Journal of Multivariate Analysis 101, 2103-2117}. ##' @keywords datasets NULL ##' Tri-variate \sQuote{angular} data set approximately distributed according to a multivariate extremes angular distribution ##' ##' The data set is constructed from coordinates (columns) \eqn{1,2,3} of \code{\link{frechetdat}}. ##' It contains 100 angular points corresponding to the tri-variate vectors \eqn{V=(X,Y,Z)} with largest \eqn{L^1}{L1} norm (\eqn{||V||=X+Y+Z}). The angular points are obtained by \sQuote{normalizing}: \emph{e.g.}, ##' \eqn{x=X/||V||}. Thus, ##' each row in \code{Leeds} is a point on the two-dimensional simplex : \eqn{x+y+z=1}. ##' @name Leeds ##' @docType data ##' @format A \eqn{100*3} - matrix. ##' @references COOLEY, D., DAVIS, R. and NAVEAU, P. (2010). The pairwise beta distribution: A flexible parametric multivariate model for extremes. \emph{Journal of Multivariate Analysis 101, 2103-2117} ##' ##' RESNICK, S. (1987). Extreme values, regular variation, and point processes, \emph{Applied Probability. A, vol. 4, ##' Series of the Applied Probability Trust. Springer-Verlag, New York}. ##' @keywords datasets NULL ##' Multivariate data set with margins following unit Frechet distribution. ##' ##' The data set contains 590 (transformed) daily maxima of five air pollutants recorded in Leeds (U.K.) during five winter seasons (1994-1998). Contains NA's. Marginal transformation to unit Frechet was performed by Cooley \emph{et.al.} (see reference below).##' \eqn{x=X/||V||}. Thus, ##' @name Leeds.frechet ##' @docType data ##' @format A \eqn{590*5} - matrix: ##' @references COOLEY, D., DAVIS, R. and NAVEAU, P. (2010). The pairwise beta distribution: A flexible parametric multivariate model for extremes. \emph{Journal of Multivariate Analysis 101, 2103-2117} ##' @keywords datasets NULL ##' Default hyper-parameters for the Pairwise Beta model. ##' ##' The log-transformed dependence parameters are a priori independent, Gausian. This list contains the means and standard deviation for the prior distributions. ##' @name pb.Hpar ##' @docType data ##' @format A list of four parameters: \describe{ ##' ##' \item{ mean.alpha}{ ##' Mean of the log-transformed global dependence parameter. Default to \eqn{0} ) ##' } ##' ##' \item{sd.alpha}{Standard deviation of the log-transformed global dependence parameter. Default to \eqn{3}. ##' } ##'\item{mean.beta}{ ##' Mean of each of the log-transformed pairwise dependence parameters. ##' Default to \eqn{0} ) ##' } ##' \item{sd.beta}{Standard deviation of each of the log-transformed ##' pairwise dependence parameters. Default to \eqn{3}. ##' } ##' } ##' @keywords dataset NULL ##' Default MCMC tuning parameter for the Pairwise Beta model. ##' ##' The proposal for the log-transformed parameters are Gaussian, centered at the current value. ##' @name pb.MCpar ##' @docType data ##' @format A list made of a single element: \code{sd}, ##' the standard deviation of the normal proposition kernel (on the log-transformed parameter). Default to ##' \eqn{0.35}. ##' @keywords dataset NULL ##' Default hyper-parameters for the NL model. ##' ##' The logit-transformed parameters for the NL model are \emph{a priori} ##' Gaussian. The list has the same format as \code{\link{pb.Hpar}}. ##' @name nl.Hpar ##' @docType data ##' @format A list of four parameters: \describe{ ##' \item{mean.alpha, sd.alpha}{% ##' Mean and standard deviation of the normal prior distribution for the logit-transformed global dependence parameter \eqn{alpha} . ##' Default to \eqn{0, 3}. ##' } ##' \item{mean.beta, sd.beta}{Idem for the pairwise dependence parameters. ##' } ##' } ##' @keywords dataset NULL ##' Default MCMC tuning parameter for the Nested Asymmetric logistic model. ##' ##' The proposals (on the logit-scale) are Gaussian, centered aroud the current value. ##' @name nl.MCpar ##' @docType data ##' @format A list made of a single element: \code{sd}. The standard deviation of the normal proposition kernel centered at the (logit-transformed) ##' current state. Default to \eqn{0.35}. ##' @keywords dataset NULL ##' Example of valid Dirichlet mixture parameter for tri-variate extremes. ##' ##' The Dirichlet mixture density has three components, the center of mass of the three columns of \code{Mu}, with weights \code{wei} is \eqn{(1/3,1/3,1/3)}: the centroid of the two dimensional unit simplex. ##' @name dm.expar.D3k3 ##' @docType data ##' @format A list made of \describe{ ##' \item{Mu}{ A \eqn{3*3} matrix, which rows sum to one, such that the ##' center of mass of the three column vectors (weighted with \code{wei}) is the centroid of the simplex: each column is the center of a Dirichlet mixture component. } ##' \item{wei}{A vector of length three, summing to one: the mixture weights} ##' \item{lnu}{ A vector of length three: the logarithm of the concentration parameters. } ##' } ##' @keywords dataset NULL ##' @importFrom grDevices dev.new gray ##' NULL
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/BMAmevt-package.R
##' Approximation of a model marginal likelihood by Laplace method. ##' ##' The posterior mode is either supplied, or approximated by numerical optimization. For an introduction about Laplace's method, see \emph{e.g.} ##' Kass and Raftery, 1995 and the references therein. ##' @title Laplace approximation of a model marginal likelihood by Laplace approximation. ##' @param mode The parameter vector (on the \dQuote{unlinked} scale, \emph{i.e.} before transformation to the real line) ##' which maximizes the posterior density, or \code{NULL}. ##' @param npar The size of the parameter vector. Default to four. ##' @param likelihood The likelihood function, \emph{e.g.} \code{\link{dpairbeta}} or \code{\link{dnestlog}} ##' @param prior The prior density (takes an \dQuote{unlinked} parameter as argument and returns the density of the \code{linked} parameter) ##' @param Hpar The prior hyper parameter list. ##' @param data The angular dataset ##' @param link The link function, from the \dQuote{classical} or \dQuote{unlinked} parametrization onto the real line. (\emph{e.g.} \code{log} for the PB model, an \code{logit} for the NL model) ##' @param unlink The inverse link function (\emph{e.g.} \code{exp} for the PB model and \code{invlogit} for the NL model) ##' @param method The optimization method to be used. Default to \code{"L-BFGS-B"}. ##' @return A list made of \describe{ ##'\item{mode}{the parameter (on the unlinked scale) deemed to maximize the posterior density. This is equal to the argument if the latter is not null.} ##' \item{value}{The value of the posterior, evaluated at \code{mode}.} ##' \item{laplace.llh}{The logarithm of the estimated marginal likelihood} ##' \item{invHess}{The inverse of the estimated hessian matrix at \code{mode}} ##' } ##' @references KASS, R.E. and RAFTERY, A.E. (1995). Bayes Factors. ##' \emph{Journal of the American Statistical Association, ##' Vol. 90, No.430} ##' @export laplace.evt <- function(mode=NULL, npar=4, likelihood, prior, Hpar, data, link, unlink, method="L-BFGS-B") { if(!is.null(mode)) npar <- length(mode) post.fun <- function(par) { likelihood(x=data, par=unlink(par), log=TRUE, vectorial=FALSE)+ prior(type="d",par=unlink(par),log=TRUE, Hpar=Hpar) } if(is.null(mode)){ count <- 0 converged <- FALSE init <- rep(0,npar) while(!converged & count<20 ){ count <- count+1 opt.test <-tryCatch( optim(par=init, fn=post.fun, gr = NULL, method=method, control = list(fnscale=-1), hessian = FALSE), error=function(e) return( list(convergence=100)) ) if(as.integer(opt.test$convergence)>0) init <- rnorm(npar,mean=0,sd=1) else{ converged <- TRUE opt <- opt.test } } if(as.integer(opt.test$convergence)){ res <- list(mode=unlink(rep(0,npar)), value=-Inf, laplace.llh = -Inf, invHess = 1) return(res) } linkedmode <- opt$par mode <- unlink(opt$par) } else linkedmode <- link(mode) fmode <- post.fun(linkedmode) hessian.mode <- tryCatch(optimHess(par=linkedmode, fn=post.fun, gr = NULL), error=function(e) return( 1)) hessian.mode <- 0.5*(hessian.mode+t(hessian.mode)) ## Inverse.test <- try(VarCov.t <- -solve(hessian.mode), ##-chol2inv(chol(hessian.mode)),#, ## silent = TRUE) Inverse.test <- try(invHess.t <- chol2inv(chol(-hessian.mode)),#, silent = TRUE) if (!inherits(Inverse.test, "try-error")) { invHess <- Inverse.test diag(invHess) <- ifelse(diag(invHess) <= 0, .Machine$double.eps, diag(invHess)) } else { cat("\nWARNING: singular Hessian matrix in Laplace.evt\n ") cat("\nNB: Identity matrix is used instead of the inverse , consider the result with care.\n") invHess <- diag(npar) } laplace.llh <- NA # options(warn = -1) laplace.test <- try(LML0 <- npar/2 * log(2 * pi) + 0.5 * log(det(invHess)) + as.vector(fmode), silent = TRUE) if (is.finite(laplace.test[1])) laplace.llh <- laplace.test[1] # options(warn = 0) res <- list(mode=mode, value=fmode, laplace.llh = laplace.llh, invHess = invHess, hessian=hessian.mode) return(res) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/Laplace.evt.r
##' @rdname MCpriorIntFun.pb ##' @inheritParams MCpriorIntFun ##' @export MCpriorIntFun.nl <- function(Nsim=200, FUN=function(par,...){par}, store=TRUE, Hpar = get("nl.Hpar"), show.progress = floor(seq(1, Nsim, length.out = 20 ) ) , Nsim.min=Nsim, precision=0, ...) { mcres <- MCpriorIntFun(Nsim=Nsim, prior=prior.nl, Hpar=Hpar, dimData=3, FUN=FUN, show.progress=show.progress, Nsim.min=Nsim.min, precision=precision, ...) return(mcres) } ## ############ intialize ############ ## start.time=proc.time() ## not.finite=0 ## param = rparameter.prior.nl(n=1, Hpar=Hpar) ## temp.res=FUN(param,...) ## dim.res=dim(temp.res) ## if(is.null(dim.res) || (sum(dim.res!=1) ==1) ) ## { ## emp.mean=rep(0,length(temp.res)) ## } ## else ## { ## store=FALSE ## emp.mean=array(0,dim=dim.res) ## } ## emp.variance= emp.mean ## emp.variance.unNorm=emp.variance ## if(store) ## { ## stored.vals=matrix(0,nrow=Nsim,ncol=length(emp.mean)) ## } ## ################ start MC ######## ## n=1 ## while((n<=Nsim) && ## ( (n<=Nsim.min) || ## (max( sqrt(emp.variance/(n-1))/emp.mean) > precision) ) ## ) ## { ## ## show progression ## if(any(n==be.patient) || n==50 || n==100 || n==200) ## { ## cat(paste((n-1), "iterations done", "\n", sep = " " )) ## } ## flag=TRUE ## count = 0 ## while(flag & (count<=10)) ## { ## param = rparameter.prior.nl(n=1, Hpar=Hpar) ## temp.res=FUN(param,...) ## flag = (any(sapply(as.vector(temp.res), ## function(x){ ! is.finite(x) } ) ) ) ## if(flag) ## { ## not.finite = not.finite+1 ## } ## count = count+1 ## } ## if(flag) ## stop("more than 10 non finite values produced") ## cur.res=temp.res ##FUN(alpha=param[1],beta=param[2:(ll+1)],...) ## new.emp.mean=emp.mean+1/n*(cur.res-emp.mean) ## emp.variance.unNorm=emp.variance.unNorm + ## (cur.res-new.emp.mean)* (cur.res- emp.mean) ## emp.variance = emp.variance.unNorm/(n-1) ## emp.mean = new.emp.mean ## if(store) ## { ## stored.vals[n,]= as.vector(cur.res) ## } ## n=n+1 ## } ## ######### end MC ######### ## end.time = proc.time() ## elapsed=end.time-start.time ## print(elapsed) ## if(store) ## { ## return(list( stored.vals=stored.vals[1:(n-1), ], ## elapsed=elapsed, ## nsim = n-1, ## emp.mean=emp.mean, ## emp.stdev=sqrt(emp.variance), ## est.error=sqrt(emp.variance/(n-1)), ## not.finite = not.finite)) ## } ## else ## { ## return(list( nsim=n-1, ## elapsed=elapsed, ## emp.mean=emp.mean, ## emp.stdev=sqrt(emp.variance), ## est.error=sqrt(emp.variance/(n-1)), ## not.finite = not.finite )) ## } ## }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/MCpriorIntFun.nl.r
##' Wrappers for \code{\link{MCpriorIntFun}} with argument ##' \code{prior=prior.pb} or \code{prior=prior.nl} ##' ##' @title Generic Monte-Carlo integration under the prior distribution in the PB and NL models. ##' @inheritParams MCpriorIntFun ##' @param Hpar Hyper-parameters for the PB prior (in \code{MCpriorIntFun.pb}) or the NL prior (\code{MCpriorIntFun.nl}). See ##' \code{\link{pb.Hpar}} and \code{\link{nl.Hpar}} for the required ##' formats. ##' @param dimData Only for the PB model: The dimension of model's \emph{sample} space. The PB parameter space is of dimension \code{choose(dimData,2)+1}. The NL model implemented here is restricted to three-dimensional sample spaces. ##' @return The list returned by function ##' \code{\link{MCpriorIntFun}}. ##' @seealso \code{\link{MCpriorIntFun}} ##' @export MCpriorIntFun.pb <- function(Nsim=200, Hpar = get("pb.Hpar"), dimData=3, FUN=function(par, ... ){ as.vector(par) }, store=TRUE, show.progress = floor(seq(1, Nsim, length.out = 20 ) ), Nsim.min=Nsim, precision = 0, ...) { mcres <- MCpriorIntFun(Nsim=Nsim, prior=prior.pb, Hpar=Hpar, dimData=dimData, FUN=FUN, show.progress=show.progress, Nsim.min=Nsim.min, precision=precision, ...) return(mcres) } ## ############ intialize ############ ## start.time=proc.time() ## not.finite=0 ## param = rparameter.prior.pb(n=1,p=p, Hpar=Hpar) ## temp.res=FUN(param,...) ## dim.res=dim(temp.res) ## if(is.null(dim.res) || (sum(dim.res!=1) ==1) ) ## { ## emp.mean=rep(0,length(temp.res)) ## } ## else ## { ## store=FALSE ## emp.mean=array(0,dim=dim.res) ## } ## emp.variance= emp.mean ## emp.variance.unNorm=emp.variance ## if(store) ## { ## stored.vals=matrix(0,nrow=Nsim,ncol=length(emp.mean)) ## } ## ################ start MC ######## ## n=1 ## while((n<=Nsim) && ## ( (n<=Nsim.min) || ## (max( sqrt(emp.variance/(n-1))/emp.mean) > precision) ) ## ) ## { ## ## show progression ## if(any(n==be.patient) || n==50 || n==100 || n==200) ## { ## cat(paste((n-1), "iterations done", "\n", sep = " " )) ## } ## flag=TRUE ## count = 0 ## while(flag & (count<=10)) ## { ## param = rparameter.prior.pb(n=1,p=p, Hpar=Hpar) ## temp.res=FUN(param,...) ## flag = (any(sapply(as.vector(temp.res), ## function(x){ ! is.finite(x) } ) ) ) ## if(flag) ## { ## not.finite = not.finite+1 ## } ## count = count+1 ## } ## if(flag) ## stop("more than 10 non finite values produced") ## cur.res=temp.res ##FUN(alpha=param[1],beta=param[2:(ll+1)],...) ## new.emp.mean=emp.mean+1/n*(cur.res-emp.mean) ## emp.variance.unNorm=emp.variance.unNorm + ## (cur.res-new.emp.mean)* (cur.res- emp.mean) ## emp.variance = emp.variance.unNorm/(n-1) ## emp.mean = new.emp.mean ## if(store) ## { ## stored.vals[n,]= as.vector(cur.res) ## } ## n=n+1 ## } ## ######### end MC ######### ## end.time = proc.time() ## elapsed=end.time-start.time ## print(elapsed) ## if(store) ## { ## return(list( stored.vals=stored.vals[1:(n-1), ], ## elapsed=elapsed, ## nsim = n-1, ## emp.mean=emp.mean, ## emp.stdev=sqrt(emp.variance), ## est.error=sqrt(emp.variance/(n-1)), ## not.finite = not.finite)) ## } ## else ## { ## return(list( nsim=n-1, ## elapsed=elapsed, ## emp.mean=emp.mean, ## emp.stdev=sqrt(emp.variance), ## est.error=sqrt(emp.variance/(n-1)), ## not.finite = not.finite )) ## } ## }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/MCpriorIntFun.pb.r
##' Simple Monte-Carlo sampler approximating the integral of \code{FUN} with respect to the prior distribution. ##' ##' The algorithm exits after \eqn{n} iterations, ##' based on the following stopping rule : ##' \eqn{n} is the minimum number of iteration, greater than ##' \code{Nsim.min}, such that the relative ##' error is less than the specified \code{precision}. ##' \deqn{ max (est.esterr(n)/ |est.mean(n)| ) \le \epsilon ,} where ##' \eqn{est.mean(n)} is the estimated mean of \code{FUN} at time ##' \eqn{n}, \eqn{est.err(n)} is the estimated standard ##' deviation of the estimate: ##' \eqn{est.err(n) = \sqrt{est.var(n)/(nsim-1)} }. ##' The empirical variance is computed component-wise and the maximum ##' over the parameters' components is considered. ##' ##' The algorithm exits in any case after \code{Nsim} iterations, if the above condition is not fulfilled before this time. ##' @title Generic Monte-Carlo integration of a function under the prior distribution ##' @param Nsim Maximum number of iterations ##' @inheritParams posteriorMCMC ##' @param dimData The dimension of the model's \emph{sample} space, ##' on which the parameter's dimension may depend. ##' Passed to \code{prior} inside \code{MCintegrateFun} ##' @param FUN A function to be integrated. It may return a vector or an array. ##' @param store Should the successive evaluations of \code{FUN} be stored ? ##' @param show.progress same as in \code{\link{posteriorMCMC}} ##' @param Nsim.min The minimum number of iterations to be performed. ##' @param precision The desired relative precision \eqn{\epsilon}. ##' See \bold{Details} below. ##' @param ... Additional arguments to be passed to \code{FUN}. ##' @return A list made of ##' \itemize{ ##' \item \code{stored.vals} : A matrix with \code{nsim} rows and ##' \code{length(FUN(par))} columns. ##' \item \code{elapsed} : The time elapsed during the computation. ##' \item \code{nsim} : The number of iterations performed ##' \item \code{emp.mean} : The desired integral estimate: the empirical mean. ##' \item \code{emp.stdev} : The empirical standard deviation of the sample. ##' \item \code{est.error} : The estimated standard deviation of the estimate (\emph{i.e.} \eqn{emp.stdev/\sqrt(nsim)}). ##' \item \code{not.finite} : The number of non-finite values obtained (and discarded) when evaluating \code{FUN(par,...)} ##' } ##' @author Anne Sabourin ##' @export MCpriorIntFun <- function(Nsim=200, prior, Hpar, dimData, FUN=function(par,...){as.vector(par)}, store=TRUE, show.progress = floor(seq(1, Nsim, length.out = 20 ) ), Nsim.min=Nsim, precision = 0, ...) { ############ intialize ############ start.time=proc.time() not.finite=0 param = prior(type = "r", n=1, Hpar=Hpar, dimData=dimData) temp.res=FUN(param,...) dim.res=dim(temp.res) if(is.null(dim.res) || (sum(dim.res!=1) ==1) ) { emp.mean=rep(0,length(temp.res)) } else { store=FALSE emp.mean=array(0,dim=dim.res) } emp.variance= emp.mean emp.variance.unNorm=emp.variance if(store) { stored.vals=matrix(0,nrow=Nsim,ncol=length(emp.mean)) } ################ start MC ######## nsim=1 while((nsim<=Nsim) && ( (nsim<=Nsim.min) || (max( sqrt(emp.variance/(nsim-1)) / abs(emp.mean) ) > precision) ) ) { ## show progression if(any(nsim==show.progress)) { cat(paste((nsim-1), "iterations done", "\n", sep = " " )) } flag=TRUE count = 0 while(flag & (count<=50)) { param = prior(type = "r", n=1, Hpar=Hpar, dimData=dimData) temp.res=FUN(param,...) flag = (any(sapply(as.vector(temp.res), function(x){ ! is.finite(x) } ) ) ) if(flag) { not.finite = not.finite+1 } count = count+1 } if(flag) stop("more than 50 non finite values produced in a row") cur.res=temp.res new.emp.mean=emp.mean+1/nsim*(cur.res-emp.mean) emp.variance.unNorm=emp.variance.unNorm + (cur.res-new.emp.mean)* (cur.res- emp.mean) emp.variance = emp.variance.unNorm/(nsim-1) emp.mean = new.emp.mean if(store) { stored.vals[nsim,]= as.vector(cur.res) } nsim=nsim+1 } ######### end MC ######### end.time = proc.time() elapsed=end.time-start.time print(elapsed) if(store) { returned.vals=stored.vals[1:(nsim-1),] } else { returned.vals=0 } return(list( stored.vals= returned.vals, elapsed=elapsed, nsim = nsim-1, emp.mean=emp.mean, emp.stdev=sqrt(emp.variance), est.error=sqrt(emp.variance/(nsim-1)), not.finite = not.finite)) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/MCpriorIntFun.r
##' Adds graphical elements to the current plot (on the two-dimensional simplex). ##' ##' Generic graphical tool for obtaining nice plots of the two-dimensional simplex ##' @title Adds graphical elements to a plot of the two dimensional simplex. ##' @inheritParams discretize ##' @param lab1 Character string: label for first component. ##' @param lab2 Character string: label for second component. ##' @param lab3 Character string: label for third component. ##' @param col.polygon The background color outside the simplex. ##' @param axes logical. Should axes be added ? ##' @examples ##' plot.new() ##' add.frame() ##' plot.new() ##' mult.x=sqrt(2); mult.y=sqrt(3/2) ##' plot.window( xlim=c(0,mult.x),ylim=c(0,mult.y), asp=1,bty ="n") ##' add.frame(equi=TRUE) ##' @keywords aplot ##' @export add.frame add.frame <- function(equi=FALSE, lab1="w1",lab2="w2",lab3="w3",npoints=60, col.polygon="black", axes=TRUE) { mult.x <- sqrt(2)*equi+1*(!equi) mult.y <- sqrt(3/2)*equi+1*(!equi) mult.dens <- 1/sqrt(3)*equi+1*(!equi) if(!equi) { ## polygon(x=c(0,1,1,0),y=c(1,0,1,1), ## col=col.polygon, border=gray(0.5)) polygon(x=c(-1/npoints, 1+1/npoints,1+1/npoints, -1/npoints), y=c(1+1/npoints,-1/npoints,1+1/npoints,1+1/npoints), col=col.polygon, border="black") polygon(x=c(-1/npoints, -1/npoints,0, 0, -1/npoints), y=c(1+1/npoints,-1/npoints,-1/npoints,1+1/npoints,1+1/npoints), col=col.polygon, border="black") polygon(x=c(-1/npoints, 1+1/npoints,1+1/npoints, -1/npoints, -1/npoints), y=c(-1/npoints,-1/npoints,0,0,-1/npoints), col=col.polygon, border="black") if(axes) { axis(1, at = c(0, 0.25,0.5, 0.75, 1),tcl=NA,cex.lab=0.5, line=0.1,mgp=c(2,0.5,0),cex.axis=0.8) axis(2, at = c(0,0.25,0.5, 0.75, 1),tcl=NA,cex.lab=0.5, line=0.1,mgp=c(2,0.5,0), cex.axis=0.8) } mtext(lab1, side = 1, line = 2, outer = FALSE,adj=0.8) mtext(lab2, side = 2, line = 2, outer = FALSE,adj=0.8) } else { polygon(x=c(-1/npoints,0,sqrt(2)/2,sqrt(2),sqrt(2)+1/npoints, sqrt(2)+1/npoints,sqrt(2)/2,-1/npoints,-1/npoints), y=c(0,0,sqrt(3/2),0 ,0, sqrt(3/2)+1/npoints,sqrt(3/2)+1/npoints, sqrt(3/2)+1/npoints,0), col=col.polygon, border="black", lwd=0.3) polygon(x=c(-1/npoints,sqrt(2)+1/npoints,sqrt(2)+1/npoints , -1/npoints,-1/npoints), y=c(-1/npoints,-1/npoints,0 , 0,-1/npoints), col=col.polygon, border="black", lwd=0.3) if(axes) { axis(1,at=round(mult.x*c(0, 0.25, 0.5, 0.75, 1),2),tcl=NA, padj=1,cex.axis=0.8,mgp=c(1.5, 0.2, 0), line=0 ) } mtext(lab3,cex=1.2, side = 1, line = 2.5, outer = FALSE,adj=0) mtext(lab1,cex=1.2, side = 1, line = 2.5, outer = FALSE,adj=1) mtext(lab2,cex=1.2, side = 3, line = 0, outer = FALSE,adj=0.5) } }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/add.frame.r
##' Builds an angular data set, retaining the points with largest radial component. ##' ##' The data set \code{frechetDat} is assumed to be marginally unit Frechet distributed. ##' @title Angular data set generation from unit Frechet data. ##' @param coordinates Index vector of the columns in \code{frechetDat} to be retained to construct the angular data set. ##' @param frechetDat The data set. A matrix: each row is a multivariate record. May contain \var{NA}'s. ##' @param n The number of desired observations in the final angular data set. Should be less than \code{nrow(frechetDat)} ##' @param displ logical. Should the angular data set be plotted ? ##' @param invisible logical. Should the result be returned as invisible ? ##' @param add logical. Only used when \code{displ==TRUE}. Should the points be added to the current plot ? ##' @param ... Additional graphical parameters and arguments to be passed to function \code{\link[graphics]{plot.window}} and \code{\link[graphics]{points}}. ##' @return The angular data set: A \code{n*length(coordinates)} matrix, containing values between zero and one, which rows sum to one: Each row is thus a point on the unit simplex of dimension \code{length(coordinates)-1}. Returned as invisible if \code{invisible==TRUE}. ##' @import stats ##' @export ##' @include add.frame.r ##' @include transf.to.equi.r ##' @inheritParams add.frame ##' @examples \dontrun{cons.angular.dat()} ##' @keywords datagen manip multivariate cons.angular.dat <- function(coordinates=c(1,2,3), frechetDat=get("frechetdat"), n=100, displ=TRUE, invisible=TRUE, add=FALSE, lab1="w1", lab2="w2",lab3="w3",npoints=60, col.polygon="white", ...) { ff <- na.omit(frechetDat[, coordinates]) rr <- apply(ff, 1, sum) ww <- ff/rr ssl <- sort.list(rr, decreasing = T) sortW <- ww[ssl,] dat <- sortW[1:n,] if(displ) ###&(length(coordinates)==3)) { Points=(apply((dat[,1:2]),1,transf.to.equi)) mult.x=sqrt(2) mult.y=sqrt(3/2) if(!add) { plot.new() plot.window( xlim=c(0,mult.x),ylim=c(0,mult.y), asp=1, bty ="n",adj=1, ...) } points(Points[1,],Points[2,],... ) if(!add) { ## add.frame(equi=TRUE, ## lab1=lab1, lab2=lab2,lab3=lab3,npoints=npoints, ## col.polygon=col.polygon) ## if(!equi) ## { ## segments(0,0,0,1) ## segments(0,0,1,0) ## segments(0,1,1,0) ## axis(1, at = c(0, 0.25,0.5, 0.75, 1),tcl=NA,cex.lab=0.5, ## line=0.1,mgp=c(2,0.5,0),cex.axis=0.8) ## axis(2, at = c(0,0.25,0.5, 0.75, 1),tcl=NA,cex.lab=0.5, ## line=0.1,mgp=c(2,0.5,0), cex.axis=0.8) ## mtext(lab1, side = 1, line = 2, outer = FALSE,adj=0.8) ## mtext(lab2, side = 2, line = 2, outer = FALSE,adj=0.8) ## } ## else ## { segments(0,0,sqrt(2),0) segments(0,0,sqrt(2)/2,sqrt(3/2)) segments(sqrt(2)/2,sqrt(3/2),sqrt(2),0) axis(1,at=round(mult.x*c(0, 0.25, 0.5, 0.75, 1),2),tcl=NA, padj=1,cex.axis=0.8,mgp=c(1.5, 0.2, 0), line=0 ) mtext(lab3,cex=1.2, side = 1, line = 2.5, outer = FALSE,adj=0) mtext(lab1,cex=1.2, side = 1, line = 2.5, outer = FALSE,adj=1) mtext(lab2,cex=1.2, side = 3, line = 0, outer = FALSE,adj=0.5) ## } } } if(invisible) { return( invisible(dat)) } return(dat) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/cons.angular.dat.r
##' Only valid in the tri-variate case ##' ##' @title Plots the Dirichlet mixture density on a discretization grid ##' @inheritParams ddirimix ##' @inheritParams discretize ##' @param marginal logical. If \code{TRUE}, the angular density corresponds to the marginal intensity measure, over coordinates \code{coord}. Otherwise, it is only the projection of the full dimensional angular measure (hence the moments constraints is not satisfied anymore). ##' @param coord A vector of size 3: ##' the indices of the coordinates upon which the marginalization is to be done. ##' @param invisible Logical: should the result be returned as invisible ? ##' @param displ Logical: should a plot be issued ? ##' @param ... Additional arguments to be passed to ##' \code{\link{dgridplot}}. ##' @return The discretized density ##' @export ddirimix.grid <- function(par= get("dm.expar.D3k3"), wei=par$wei, Mu= par$Mu, lnu=par$lnu, npoints=30, eps=10^(-3), equi=TRUE, marginal=TRUE, coord=c(1,2,3), invisible=TRUE, displ=TRUE, ... ) { if(length(coord) != 3) {warning("coord is not of length 3; density projection/marginalisation will be made on the two-dimensional face corresponding to the first two indices.") if(length(coord)<3) stop("at least 3 coordinates should be specified") coord <- coord[1:3] } if(!marginal & (nrow(Mu)>3)) { equi <- FALSE mu3 <- 1 - Mu[coord[1],] - Mu[coord[2],] Mu <- rbind(matrix(Mu[coord[1:2],], nrow=2), matrix(mu3, nrow = 1)) } else { MMu <- matrix(Mu[coord,], nrow=3) p <- nrow(Mu) multconst <- apply(MMu,2,sum) if(length(multconst)>1) Mu <- MMu %*% diag(1/multconst) else Mu <- MMu/multconst lnu <- lnu + log(multconst) wei <- p/3*wei*multconst } discr <- discretize(npoints=npoints,eps=eps,equi=equi) X.grid <- discr $X Y.grid <- discr $ Y nu <- exp(lnu) k <- length(lnu) Density <- double(npoints*npoints) C.out <- .C("ddirimix_grid", as.double(X.grid), as.double(Y.grid), as.integer(npoints), as.double(Mu), as.integer(k), as.double(wei), as.double(nu), as.integer(equi), result=Density) Density <- matrix(C.out $ result, ncol = npoints, byrow=F) mult.dens <- 1/sqrt(3) * equi + 1 * (!equi) if(displ) { dgridplot(density=mult.dens*Density, ## npoints=npoints, eps=eps, equi=equi, ...) } if(!invisible) return(mult.dens*Density) else invisible(mult.dens*Density) } ## proj.fun=function(u,v) ## { ## if(!equi) ## { ## if(u<0 || v<0 || u+v>1) ## return ( 0 ) ## else ## return(ddirimix(x=c(u,v,1-(u+v)), ## wei=wei, ## Mu=Mu, ## lnu=lnu, ## log=FALSE, ## vectorial=FALSE)) ## } ## else ## { ## W=transf.to.rect(c(u,v)) ## if(W[1]<0 || W[2]<0 || sum(W)>1) ## return(0) ## else ## return (ddirimix(x=c(W[1],W[2],1-sum(W)), ## wei=wei, ## Mu=Mu, ## lnu=lnu, ## log=FALSE, ## vectorial=FALSE)) ## } ## } ## discr <- discretize(npoints=npoints,eps=eps,equi=equi) ## X_grid <- discr$X ## Y_grid <- discr$Y ## wrapper <- ## function(x, y, my.fun,...) #internal ## { ## sapply(seq_along(x), FUN = function(i) my.fun(x[i], y[i],...)) ## } ## Density= outer(X_grid,Y_grid, ## FUN=wrapper, ## my.fun=proj.fun) ## @param project Logical: if \code{TRUE} (default) the density is the projection of a higher dimensional dirichlet mixture. Otherwise, it is the angular density on S3 associated to the exponent measure( i.e. intensity of the limiting point process) obtained by marginalisation of the initial exponent measure. In such a case, integration is done over all directions but the one indicated by \code{coord}.
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/ddirimix.grid.r
##' Plots a univariate Dirichlet mixture (in other words, a Beta mixture) angular density for extreme bi-variate data. ##' ##' @title Univariate projection or marginalization of a Dirichlet mixture density on on \code{[0,1]} ##' @inheritParams ddirimix ##' @param coord A vector of size 2: ##' the indices of the coordinates upon which the marginalization or projection is to be done if the dimension of the sample space is greater than two. ##' @param marginal logical. If \code{TRUE}, the angular density corresponds to the marginal intensity measure of the extreme Poisson process, over coordinates \code{coord}. Otherwise, it is only the projection of the full dimensional angular measure (hence the moments constraints is not satisfied anymore). ##' @param npoints number of points on the 1D discretization grid. ##' @param eps the minimum value ( = 1- the maximum value) of the grid points. ##' @param invisible Logical: should the result be returned as invisible ? ##' @param displ Logical: should a plot be issued ? ##' @param add Logical: should the density be added to the currently active plot ? ##' @param ... Additional arguments to be passed to \code{plot} ##' @return The discretized density on \code{[eps, 1-eps]} (included in [0,1]) ##' @export ddirimix.grid1D <- function(par= get("dm.expar.D2k4"), wei=par$wei, Mu= par$Mu, lnu=par$lnu, npoints=30, eps=10^(-3), coord=c(1,2), marginal=TRUE, invisible=TRUE, displ=TRUE, add=FALSE, ... ) { dim <- nrow(Mu) if(dim>2) { if(length(coord) !=2) { warning("coord is not of length 2") coord <- coord[1:2] } MMu <- matrix(Mu[coord,], nrow=length(coord)) if(marginal) { multconst <- apply(MMu,2,sum) if(length(multconst)>1) Mu <- MMu %*% diag(1/multconst) else Mu <- MMu/multconst lnu <- lnu + log(multconst) wei <- dim/2*wei*multconst } else { Mu <- MMu } } if(eps<0 | eps>=1) warning("eps should be in [0,1)") X.grid <- seq(eps,1-eps, length.out=npoints) nu <- exp(lnu) k <- length(lnu) Density <- double(npoints) C.out <- .C("ddirimix_grid1D", as.double(X.grid), as.integer(npoints), as.double(Mu), as.integer(k), as.double(wei), as.double(nu), result=Density) if(displ){ if(!add) plot(X.grid, C.out$result, type="l", ...) else lines(X.grid, C.out$result, type="l", ...) } if(!invisible) return( C.out$result) else invisible( C.out$result) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/ddirimix.grid1D.r
##' Likelihood function (spectral density on the simplex) ##' and angular data sampler in the Dirichlet mixture model. ##' ##' The spectral probability measure defined on the simplex ##' characterizes the ##' dependence structure of multivariate extreme value models. ##' The parameter list for a mixture ##' with \eqn{k} components, is made of ##' \describe{ ##' \item{Mu}{ The density kernel centers ##' \eqn{\mu_{i,m}, 1\le i \le p, 1\le m \le k}{\mu[1:p,1:k]} : ##' A \eqn{p*k} matrix, ##' which columns sum to one, and such that \code{Mu \%*\% wei=1}, ##' for the moments constraint to be satisfied. ##' Each column is a Dirichlet kernel center. ##' } ##' \item{wei}{ The weights vector for the kernel densities: ##' A vector of \eqn{k} positive numbers summing to one.} ##' \item{lnu}{The logarithms of the shape parameters ##' \eqn{nu_m, 1\le m \le k}{\nu[1:k] } for the density kernels: ##' a vector of size \eqn{k}.} ##' } ##' The moments constraint imposes that the barycenter of the columns in ##' \code{Mu}, with weights \code{wei}, be the center of the simplex. ##' @title Angular density/likelihood function in the Dirichlet Mixture ##' model. ##' @param x An angular data set which may be reduced to a single point: ##' A \eqn{n*p} matrix or a vector of length \code{p}, where ##' \eqn{p} is the dimension of the sample space and \eqn{n} is ##' the sample size. ##' Each row is a point on the simplex, so that each row sum to one. ##' The error tolerance is set to \code{1e-8} ##' in this package. ##' @param par The parameter list for the Dirichlet mixture model. ##' @param wei Optional. If present, overrides the value of ##' \code{par$wei}. ##' @param Mu Optional. If present, overrides the value of ##' \code{par$Mu}. ##' @param lnu Optional. If present, overrides the value of ##' \code{par$lnu}. ##' @param log Logical: should the density or the likelihood be returned on the log-scale ? ##' @param vectorial Logical: Should a vector of size \eqn{n} or a single value be returned ? ##' @return \code{ddirimix} returns the likelihood as a single number if ##' \code{vectorial ==FALSE}, or as a vector of size ##' \code{nrow(x)} containing the likelihood of each angular data point. ##' If \code{log == TRUE}, the log-likelihood is returned instead. ##' \code{rdirimix} returns a matrix with \code{n} points and ##' \code{p=nrow(Mu)} columns. ##' @export ddirimix <- function(x=c(0.1,0.2,0.7), par, #= get("dm.expar.D3k3"), wei=par$wei, Mu= par$Mu, lnu=par$lnu, log=FALSE, vectorial=FALSE) { k=length(lnu) nu=exp(lnu) if(is.vector(x)) { x=matrix(x,nrow=1) } n=nrow(x) p=ncol(x) if(vectorial) {result=as.double(rep(0,n))} else { result=as.double(0) } C.out=.C("d_dirimix",as.double(t(x)),as.integer(n),as.integer(k), as.integer(p),as.double(wei),as.double(Mu),as.double(nu), as.integer(log),as.integer(vectorial),is_error=as.integer(0), result=result) if (C.out$is_error==1) { stop("in ddirimix: some data or kernels not on the simplex or misbehaved memory allocation.") } if(C.out$is_error==2) stop("in d_dm_dat: misbehaved memory allocation") return(C.out$result) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/ddirimix.r
##' Plots contours or gray-scale level sets of a spectral density on the two-dimensional simplex. ##' ##' The function interprets the \code{density} matrix as ##' \code{\link[graphics]{contour}} does, \emph{i.e.} as a table of ##' \code{f(X[i], Y[j])} values, with column 1 at the bottom, ##' where \code{X} and \code{Y} are ##' returned by \code{\link{discretize}} and \code{f} is the ##' density function. ##' @title Image and/or Contour plots of spectral densities in trivariate extreme value models ##' @inheritParams discretize ##' @inheritParams add.frame ##' @param density A \code{npoints*npoints} matrix containing the ##' density's values scattered on the discretization grid defined by ##' \code{npoints, equi, eps} (see \code{\link{discretize}}). ##' @param add Logical. Should the contours be added to a currently active plot ? ##' @param breaks Set of breakpoints for the gray scale colors. ##' See \code{\link[graphics]{image}} ##' @param levels Levels to which plot the contour lines. See \code{\link[graphics]{contour}} ##' @param labcex \code{cex} for contour labeling. ##' See \code{\link[graphics]{contour}}. ##' @param col.lines The color to be used for the contour lines. ##' @param background Logical. Should a the background be filled ##' inside the simplex \emph{via} a call to ##' \code{\link[graphics]{image}} ? ##' @param ... Additional graphical parameters and arguments to be passed ##' to \code{\link[graphics]{contour}} and \code{\link[graphics]{image}}. ##' @examples ##' wrapper <- function(x, y, my.fun,...) ##' { ##' sapply(seq_along(x), FUN = function(i) my.fun(x[i], y[i],...)) ##' } ##' ##' grid <- discretize(npoints=40,eps=1e-3,equi=FALSE) ##' ##' Density <- outer(grid$X,grid$Y,FUN=wrapper, ##' my.fun=function(x,y){10*((x/2)^2+y^2)*((x+y)<1)}) ##' ##' dgridplot(density= Density,npoints=40, equi=FALSE) ##' @import graphics ##' @export dgridplot <- function(density= matrix(5*sin(1/73*(1:(40*40)))^2, ncol=40, nrow=40), # npoints=40, eps=10^(-3), equi=TRUE, add=FALSE, breaks=seq(-0.01,5.1,length.out=1000), levels=seq(0,6, length.out=13), col.lines="black", labcex= 0.8, background=FALSE, col.polygon=gray(0.5), lab1="w1", lab2="w2", lab3="w3", ...) { npoints <- dim(density)[1] discr=discretize(npoints=npoints,eps=eps,equi=equi) X_grid=discr$X Y_grid=discr$Y mult.dens=1## 1/sqrt(3)*equi+1*(!equi) mult.x=sqrt(2)*equi+1*(!equi) mult.y=sqrt(3/2)*equi+1*(!equi) if(add) { contour(x = X_grid, y = Y_grid, density *mult.dens, levels=levels, labels = NULL,col=col.lines,labcex=labcex, add=TRUE, ...) add.frame (equi=equi, lab1="",lab2="",lab3="",npoints=npoints, col.polygon=col.polygon) } else { plot.new() plot.window( xlim=c(0,mult.x),ylim=c(0,mult.y), asp=1, bty ="n",adj=1, mgp=c(1.5, 0.4, 0) ) if(background) { image(X_grid,Y_grid,density*mult.dens,breaks=breaks, # col=gray((1:(length(breaks)-1))/(length(breaks)-1)), xlab="", ylab="", cex.axis=0.8, xaxt ="n", yaxt = "n", bty ="n",adj=1,asp=1, ...) contour(x = X_grid, y = Y_grid, density*mult.dens , levels=levels, labels = NULL, labcex=labcex, col=col.lines, add=TRUE,...) } else { contour(x = X_grid, y = Y_grid, density*mult.dens , xlab="", ylab="", xaxt ="n", yaxt = "n", bty ="n", adj=1,asp=1, levels=levels, labels = NULL, labcex=labcex, col=col.lines, add=FALSE,...) } add.frame(equi=equi, lab1=lab1, lab2=lab2,lab3=lab3,npoints=npoints, col.polygon=col.polygon) } }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/dgridplot.R
##' @rdname diagnose.PBNLpostsample ##' @export diagnose <- function(obj, ...) UseMethod("diagnose") ## @param Hpar The hyper parameter list. ## @param dat The angular dataset ## @param model one of the character strings \code{"pairbeta"} or\code{"nestlog"} ## @param save Logical. Should the result be saved ? ## @param name.save The name under whic hthe result is to be saved. ## @param save.directory The directory where the result is to be saved, without trailing slash. ##' The method issues several convergence diagnostics, in the particular case when the PB or the NL model is used. The code may be easily modified for other angular models. ##' ##' @title Diagnostics for the MCMC output in the PB and NL models. ##' @param obj an object of class \code{postsample}: posterior sample, as produced by ##' \code{\link{posteriorMCMC.pb}} or \code{\link{posteriorMCMC.nl}} ##' @param true.par The true parameter. If \code{NULL}, it is considered as unknown. ##' @inheritParams discretize ##' @inheritParams add.frame ##' @inheritParams posterior.predictive3D ##' @param autocor.max The maximum accepted auto-correlation for two successive parameters in the thinned sample. ##' @param default.thin The default thinning interval if the above condition cannot be satisfied. ##' @param predictive Logical. Should the predictive density be plotted ? ##' @param xlim.density The \code{xlim} interval for the density plots, ##' on the transformed scale. ##' @param ylim.density the \code{ylim} intervals for the density plots. ##' @param plot Logical. Should plots be issued ? ##' @param save Logical: should the result be saved ? Only used if the posterior sample has been saved itself (\emph{i.e.} if it contains \code{save=TRUE} in its arguments list) ##' @param ... Additional parameters to be passed to the functions ##' \code{\link{posterior.predictive.pb}} or \code{\link{posterior.predictive.nl}}. ##' @return A list made of \describe{ ##' \item{predictive}{The posterior predictive, or \code{0} if \code{predictive=FALSE} } ##' \item{effective.size}{the effective sample size of each component} ##' \item{heidelTest}{The first part of the Heidelberger and Welch test (stationarity test). The first row indicates \dQuote{success} (1) or ##' rejection(0), the second line shows the number of iterations to be discarded, the third line is the p-value of the test statistic.} ##' \item{gewekeTest}{The test statistics from the Geweke stationarity test.} ##' \item{gewekeScore}{The p-values for the above test statistics} ##' \item{thin}{The thinning interval retained} ##' \item{correl.max.thin}{The maximum auto-correlation for a lag equal to \code{thin} } ##' \item{linked.est.mean}{The posterior mean of the transformed parameter (on the real line)} ##' \item{linked.est.sd}{The standard deviation of the transformed parameters} ##' \item{est.mean}{The posterior mean of the original parameters, as they appears in the expression of the likelihood} ##' \item{sample.sd}{the posterior standard deviation of the original parameters} ##' } ##' @export diagnose.PBNLpostsample <- function(obj, true.par=NULL, from =NULL, to=NULL, autocor.max = 0.2, default.thin=50, xlim.density=c(-4,4), ylim.density=NULL, plot=TRUE, predictive=FALSE, save=TRUE, ## npoints=60, eps=10^(-3), ## equi=TRUE, ## save=FALSE, ## name.save="pb.predictive", ## save.directory = "~", ... ) { dat <- obj$arguments$dat Hpar <- obj$arguments$Hpar model <- obj$arguments$name.model likelihood <- obj$arguments$likelihood prior <- obj$arguments$prior save.directory <- obj$arguments$save.directory if(model=="pairbeta") { link <- function(x){log(x)} unlink <- function(x){exp(x)} } if(model=="nestlog") { link <- logit invlink <- invlogit } orig.vals <- obj$stored.vals vals <- link(obj$stored.vals) Nbin <- obj$arguments$Nbin Nsim <- obj$arguments$Nsim if(is.null(to)) to <- Nsim if(is.null(from)) from <- Nbin+1 vals <- vals[(from-Nbin):(to-Nbin),] orig.vals <- orig.vals[(from-Nbin):(to-Nbin),] autocorr <- acf(vals, lag.max=default.thin, plot=plot) $ acf goodLags <- which( apply(abs(autocorr),1,max) < autocor.max) if(length(goodLags) == 0) thin <- default.thin else thin <- min(goodLags) correl.thin <- max(autocorr[thin,,]) effective.size <- effectiveSize(vals) heidelTest <- apply(vals,2,heidel.diag, eps=0.1, pvalue= 0.05)[1:3,] gewekeTest <- geweke.diag(vals) gewekeScore <- pnorm(abs(gewekeTest[[1]]), lower.tail=FALSE) linked.cum.mean <- apply(vals,2,cumsum)/(1:(to-from+1)) cum.mean <- apply(orig.vals,2,cumsum)/(1:(to-from+1)) linked.est.mean <- linked.cum.mean[to-from+1,] est.mean <- cum.mean[to-from+1,] linked.est.sd <- apply(vals,2,sd) est.sd <- apply(orig.vals,2,sd) ## est.error.effsize = apply(orig.vals,2,function(X){ ## summary(mcmc(X) )$statistics[4]}) if(plot) { for(i in 1:ncol(vals)) { dev.new() plot(from:to, linked.cum.mean[,i],type ="l", main=paste("par", toString(i), sep=" "), ylab="") } dprior.Talpha <- function(x) { return( dnorm(x, mean=Hpar$mean.alpha, sd=Hpar$sd.alpha) ) } dprior.Tbeta <- function(x) { return( dnorm(x, mean=Hpar$mean.beta, sd=Hpar$sd.beta) ) } XX <- seq(xlim.density[1], xlim.density[2], length.out=100) YY.alpha <- sapply(XX,dprior.Talpha) YY.beta <- sapply(XX,dprior.Tbeta) dev.new() par(mfrow = c(ceiling(sqrt(ncol(vals))), ceiling(ncol(vals) / ceiling(sqrt(ncol(vals))) ))) for(i in 1:ncol(vals)) { plot(density(vals[,(i)]), main="", xlab="",ylab="", ylim=ylim.density, xlim=xlim.density ,# add=FALSE, lty=1,lwd=2,col="black") if(i==1){ lines(XX,YY.alpha,lty=3, lwd=1.5, col="black") title(main=switch(model, pairbeta="log(alpha)", nestlog="logit(alpha)")) } else{ lines(XX,YY.beta,lty=3, lwd=1.5, col="black") title(main=switch(model, pairbeta= paste("log(beta[",i-1,"])", sep=""), nestlog=paste("logit(beta[",i-1,"])", sep="")) ) } if( ! is.null(true.par) ) abline(v=link(true.par[i]),col="black",lwd=2, lty=2) } } time.idx = (from-Nbin):(to-Nbin) kept.idx = time.idx[ (time.idx %% thin == 0) ] if(predictive) { if(plot) dev.new() if(model == "pairbeta") predictiveDens <- posterior.predictive.pb( post.sample = obj, thin = thin, displ=plot, ... ) if(model=="nestlog") predictiveDens <- posterior.predictive.nl( post.sample = obj, thin = thin, displ=plot, ... ) } if(predictive) { pred.return <- predictiveDens } else { pred.return <- NA } result.list <- list(predictive = pred.return, size.subsample.predictive= length(kept.idx ), effective.size=effective.size, heidelTest = heidelTest, gewekeTest=gewekeTest, gewekeScore=gewekeScore, thin = thin, from=from,to=to, thinnedCorrel=autocorr[thin,,], correl.max.thin=correl.thin, linked.est.mean=linked.est.mean, linked.sample.sd=linked.est.sd, est.mean=est.mean, sample.sd=est.sd, model=model ) class(result.list) <- "PBNLdiagnostic" if(save) save <- obj$arguments$save if(save) { name.save <- paste(obj$arguments$name.save, ".diagnose", sep="") assign(name.save, result.list) save(result.list, list = name.save, file=paste( obj$arguments$save.directory, "/", name.save, ".rda", sep = "")) loglist <- c(list(true.par=true.par, autocor.max =autocor.max, default.thin=default.thin), list(...) ) name.log=paste(name.save, ".log", sep="") assign(name.log, loglist ) save(loglist,list=name.log, file=paste( obj$arguments$save.directory, "/", name.log, ".rda", sep = "")) } return(result.list) } ##' @export print.PBNLdiagnostic <- function(x,...) { cat("\n Model:", x$model, "\n", sep="\t") cat("\nPredictive angular density:", ifelse(is.matrix(x$predictive), "not shown", "not computed"), "\n", sep=" ") cat("Thinning:", x$thin, "\t from:",x$from, "\tto:", x$to, "\n", sep=" ") cat("Corresponding autocorrelations \n", diag(x$thinnedCorrel),"\n", sep="\t") cat("Size of thinned sample used for the predictive:", x$size.subsample.predictive, "\n", sep=" ") cat("Effective sizes (marginally):\n", x$effective.size,"\n", sep="\t") cat("Heidelberger and Welches (stationarity part) p-values:\n", x$heidelTest[3,], "\n", sep="\t") cat("Geweke p-values:\n", x$gewekeScore,"\n", sep="\t") cat("posterior mean (transformed sample, defined on the real line)\n", x$linked.est.mean, sep="\t") cat("\nposterior standard deviation (idem)\n", x$linked.sample.sd,"\n", sep="\t") cat("posterior mean (original parametrization)\n", x$est.mean,"\n", sep="\t") cat("posterior standard deviation (idem)\n", x$sample.sd,"\n", sep="\t") } ## posterior.predictive.compare <- ## function( pb.postsample = pb.postsample.Leeds, ## nl.postsample=nl.postsample.Leeds, ## dat=Leeds, ## true.par=NULL, ## npoints=60, eps=10^(-3), ## equi=TRUE, ## from =NULL,## 10e+3, ## to=NULL,##20e+3, ## autocor.max = 0.2, ## default.thin=10, ## ... ## ) ## { ## dev.new() ## par(mai=c(0.7,0.1,0.7,0.1), mar=c(3.5,3,2,3)) ## if(equi) ## { ## Points = (apply((dat[, 1:2]), 1, transf.to.equi)) ## } ## else ## { ## Points=t(dat) ## } ## pb.predictive <- ## postsample.diagnose(model= "pairbeta", ##"nestlog" ## postsample = pb.postsample, ## dat=dat, ## true.par=true.par, ## npoints=npoints, eps=10^(-3), ## equi=equi, ## from =from, ## 10e+3, ## to=to, ##20e+3, ## autocor.max = autocor.max, ## default.thin=default.thin, ## predictive=TRUE, ## plot=FALSE, ## save=FALSE, ... ## ) ## points(Points[1, ], Points[2, ], col=gray(0.5), ## cex=0.8, pch=21, bg="white") ## dev.new() ## par(mai=c(0.7,0.1,0.7,0.1), mar=c(3.5,3,2,3)) ## nl.predictive <- ## postsample.diagnose(model= "nestlog", ## postsample = nl.postsample, ## dat=dat, ## true.par=true.par, ## npoints=npoints, eps=10^(-3), ## equi=equi, ## from =from, ## 10e+3, ## to=to, ##20e+3, ## autocor.max = autocor.max, ## default.thin=default.thin, ## predictive=TRUE, ## plot=FALSE, ## save=FALSE,... ## ) ## points(Points[1, ], Points[2, ], ## col=gray(0.5), cex=0.8, pch=21, bg="white") ## }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/diagnose.r
##' Builds a discretization grid covering the two-dimensional unit simplex, with specified number of points and minimal distance from the boundary. ##' ##' The \code{npoints*npoints} grid covers either ##' the equilateral representation of ##' the simplex, or the right angled one. ##' In any case, the grid is ##' \emph{rectangular}: some nodes lie outside the triangle. ##' Density computations on such a grid should handle the case when ##' the point passed as argument is outside the simplex (typically, ##' the function should return zero in such a case). ##' @title Discretization grid builder. ##' @param npoints The number of grid nodes on the squared grid containing the desired triangle. ##' @param eps Positive number: minimum ##' distance from any node inside the simplex to the simplex boundary ##' @param equi logical. Is the simplex represented as an equilateral triangle (if \code{TRUE}) or a right triangle (if \code{FALSE}) ? ##' @note In case \code{equi==TRUE}, \code{epsilon} is the minimum ##' distance from any node inside the simplex to the simplex boundary, ##' \emph{after transformation} to the right-angled representation. ##' @return A list containing two elements: \code{X} and \code{Y}, vectors of size \code{npoints}, the Cartesian coordinates of the grid nodes. ##' @export discretize <- function(npoints=40,eps=1e-3,equi=FALSE) { if(!equi) { X=seq(eps,1-2*eps,length.out=npoints) Y=X return(list(X=X,Y=Y)) } else { ## X=sqrt(2)*seq(eps,1-eps,length.out=npoints) ### Y=sqrt(3/2)*seq(eps/3,1-2*eps/3,length.out=npoints) ##Y=seq(sqrt(2/3)*eps, sqrt(3/2) -sqrt(2)*eps, length.out=npoints) X=seq(eps*3/sqrt(2), sqrt(2)-eps*3/sqrt(2), length.out=npoints) Y=seq(sqrt(3/2)*eps,sqrt(3/2)-eps*3/sqrt(2) ,length.out=npoints) return(list(X=X,Y=Y)) } }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/discretize.R
##' @inheritParams dnestlog ##' @rdname dpairbeta.grid ##' @export dnestlog.grid <- function(par, npoints=50,eps=1e-3, equi = TRUE, displ=TRUE, invisible=TRUE, ... ) { discr <- discretize(npoints=npoints,eps=eps,equi=equi) X.grid <- discr $X Y.grid <- discr $ Y if((length(par) !=4) || any(par>1) || any(par<0) ) stop("misspecified parameters") C.out <- .C("d_trinestlog_grid", as.double(X.grid), as.double(Y.grid), as.integer(length(X.grid)), as.double(par[1]), as.double(par[2:4]), as.integer(equi), result = as.double(rep(0,npoints*npoints)) ) density <- matrix(C.out $ result, ncol = npoints, byrow=F) mult.dens <- 1/sqrt(3)*equi + 1*(!equi) density <- mult.dens*density if(displ) { dgridplot(density=density, ## npoints=npoints, eps=eps, equi=equi, ...) } if(invisible) {invisible(density)} else return(density) } ## dnestlog.grid <- ## function(par=c(0.5,0.5), ## npoints=30,eps=10^(-3), equi = FALSE, ## displ=FALSE, invisible=FALSE, ## ... ) ## { ## discr <- discretize(npoints=npoints,eps=eps,equi=equi) ## X.grid <- discr $X ## Y.grid <- discr $ Y ## C.out <- .C("d_nestlog_grid", as.double(X.grid), ## as.double(Y.grid), ## as.integer(length(X.grid)), ## as.double(par[1]), ## as.double(par[2]), as.integer(equi), ## result = as.double(rep(0,npoints*npoints)) ) ## density <- matrix(C.out $ result, ncol = npoints, byrow=F) ## mult.dens <- 1/sqrt(3)*equi + 1*(!equi) ## density <- mult.dens*density ## if(displ) ## { ## dgridplot(density=density, ## npoints=npoints, ## eps=eps, ## equi=equi, ## ...) ## } ## if(invisible) ## {invisible(density)} ## else ## return(density) ## }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/dnestlog.grid.r
##' @export ##' @rdname dpairbeta dnestlog <- function(x=rbind(c(0.1,0.3,0.6),c(0.3,0.3,0.4)) , par=c(0.5,0.5,0.2,0.3), log=FALSE, vectorial = TRUE) { xvect = as.double(as.vector(t(x) )) if(is.vector(x)) { dim = as.integer( length(x) ) n = as.integer(1) } else { dim = as.integer(ncol(x) ) n = as.integer(nrow(x) ) } if((length(par) !=4) || any(par>1) || any(par<0) ) stop("misspecified parameters") alpha <- as.double(par[1]) beta <- as.double(par[2:4]) if(vectorial) { result=double(n) } else { result=double(1) } C.out = .C("d_trinestlog", x=xvect, pnx = n, alpha = alpha, beta = beta, take_logs = as.integer(log), return_vector = as.integer(vectorial), result = result) ##browser() return(C.out$result) } ## dtrinestlog(x=rbind(c(0.1,0.3,0.6),c(0.3,0.4,0.3)) , ## par=c(0.5,0.1,0.4,0.8), ## log=FALSE, vectorial = T)
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/dnestlog.r
##' The two functions compute respectively the NL and PB spectral ##' densities, in the three-dimensional case, on a discretization grid. ##' A plot is issued (optional). ##' ##' @title PB and NL spectral densities on the two-dimensional simplex ##' @inheritParams discretize ##' @inheritParams dpairbeta ##' @param displ logical. Should a plot be produced ? ##' @param invisible logical. If \code{TRUE}, the result is returned as \code{invisible}. ##' @param ... Additional arguments to be passed to \code{\link{dgridplot}} ##' @return A \code{npoints*npoints} matrix containing the ##' considered density's values on the grid. ##' The row (resp. column) indices increase ##' with the first (resp. second) coordinate on the simplex. ##' @note If \code{equi==TRUE}, the density is relative to the Hausdorff ##' measure on the simplex itself: the values obtained with ##' \code{equi = FALSE} are thus divided by ##' \eqn{\sqrt 3}. ##' @export ##' @examples ##' ##' dpairbeta.grid(par=c( 0.8, 8, 5, 2), ##' npoints=70, eps = 1e-3, equi = TRUE, displ = TRUE, invisible=TRUE) ##' ##' ## or ... ##' ##' Dens <- dpairbeta.grid(par=c(0.8, 8, 5, 2), ##' npoints=70, eps = 1e-3, equi = TRUE, displ = FALSE) ##' Grid=discretize(npoints=70,eps=1e-3,equi=TRUE) ##' dev.new() ##' image(Grid$X, Grid$Y, Dens) ##' contour(Grid$X, Grid$Y, Dens, add=TRUE) ##' add.frame(equi=TRUE, npoints=70, axes=FALSE) ##' ##' dpairbeta.grid <- function(par, npoints=50, eps = 1e-3, equi = TRUE, displ = TRUE, invisible=TRUE, ...) { discr <- discretize(npoints=npoints,eps=eps,equi=equi) X.grid <- discr $X Y.grid <- discr $ Y if(length(par) != 4 | any(par<0)) stop("misspecified parameter") C.out <- .C("d_pairbeta_grid", as.double(X.grid), as.double(Y.grid), as.integer(length(X.grid)), as.double(par[1]), as.double(par[-1]), as.integer(equi), result = as.double(rep(0,npoints*npoints)) ) density <- matrix(C.out $ result, ncol = npoints, byrow=F) mult.dens <- 1/sqrt(3)*equi + 1*(!equi) density <- mult.dens*density if(displ) { dgridplot(density=density, ## npoints=npoints, eps=eps, equi=equi, ...) } if(invisible) {invisible(density)} else return(density) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/dpairbeta.grid.r
#' Likelihood function (spectral density) and random generator in the Pairwise Beta and NL models. #' #' Applies to angular data sets. The density is given with respect to the Lebesgue measure on \eqn{R^{p-1}}{R^(p-1)}, where \code{p} is the number of columns in \code{x} (or the length of \code{x}, if the latter is a single point). #' @title Pairwise Beta (PB) and Nested Asymmetric Logistic (NL) distributions #' @inheritParams prior.pb #' @param x An angular data set (may be reduced to a single point). #' A \code{npoints*dimData} matrix #' (or a vector of length(\code{dimData}). #' For the NL model, \code{dimData} is always \eqn{3}. #' Each row is a point on the simplex, so that the sum of each rows #' should equal \eqn{1} (the error tolerance is set to \code{1e-8} #' in this package). #' @param par The parameter for the Pairwise Beta or the Nested Logistic density. #' \itemize{ #' \item In the Pairwise Beta model, \code{par} is of length #' \code{choose(p,2)+1}. The first element is the global dependence #' parameter, the subsequent ones are the pairwise dependence #' parameters, in lexicographic order (\emph{e.g.} #' \eqn{\beta_{12}, \beta_{13}, \beta_{23}}). #' \item In the NL model, \code{par} is a vector of length four with components between zero and one. The first one is the global dependence parameter, the three subsequent ones are the pairwise dependence parameters, again in lexicographic order. #' } #' @param log Logical. Should the density be returned on the log scale ? #' @param vectorial Logical. #' Should a vector or a single value be returned ? #' @return The value returned by the likelihood function is imposed (see #' \emph{e.g.} \code{\link{posteriorMCMC}}. #' In contrast, the random variable have unconstrained output format. #' \itemize{ #' \item \code{dpairbeta} returns the likelihood as a single number if \code{vectorial ==FALSE}, or as a vector of size \code{nrow(x)} containing the likelihood of each angular data point. If \code{log == TRUE}, the log-likelihood is returned instead. #' \code{rpairbeta} returns a matrix with \code{n} #' rows and \code{dimData} columns. #' \item \code{dnestlog} returns the likelihood as a single number if \code{vectorial ==FALSE}, or as a vector of size \code{nrow(x)} containing the likelihood of each angular data point. If \code{log == TRUE}, the log-likelihood is returned instead. #' \code{rnestlog} returns a matrix with \code{n} rows and \code{dimData} columns if \code{return.points==FALSE} (the default). Otherwise, #' a list is returned, with two elements: #' \itemize{ #' \item \code{Angles}: The angular data set #' \item \code{Points}: The full tri-variate data set above #' \code{threshold} (\emph{i.e.} \code{Angles} #' multiplied by the radial components) #' } #' } #' @export #' @keywords distribution models multivariate dpairbeta<- function(x , par=c(1,rep(2,choose(4,2)+1)), log = FALSE, vectorial = TRUE) { ##evaluates the pairwise beta density for a matrix of points ##(one row = one point) ##on the simplex with given pairwise parameters for a ##given dimension p , with respect to the lebesgue measure #####!!!!!!!!on the projected unit simplex ##### xvect = as.double(as.vector(t(x) )) if(is.vector(x)) { p = as.integer( length(x) ) n = as.integer(1) } else { p = as.integer(ncol(x) ) n = as.integer(nrow(x) ) } if(length(par) <2) stop("misspecified parameter") alpha= as.double (par[1]) beta = as.double (par[-1]) if(vectorial) { result=as.double(rep(0,n)) } else { result=as.double(0) } C.out = .C("d_pairbeta", xvect, n , p , alpha, beta, as.integer(log), as.integer(vectorial),is_error =as.integer(0),result = result) ##browser() if (C.out$is_error==1) { ## browser() stop("in d_pairbeta: misbehaved memory allocation") } return(C.out$result) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/dpairbeta.r
emp_density<- function(dat, L=NULL) ##density with respect to Lebesgue ##on the projected unit simplex {ndat=nrow(dat) if(is.null(L)) { L=max(5,floor(sqrt(ndat/100))) #200 } Mdat=L*dat[,c(1,2)] Dens=matrix(0,nrow=L,ncol=L) for(k in (1:ndat)) { i0=ceiling(Mdat[k,1]) j0=ceiling(Mdat[k,2]) Dens[i0,j0]= Dens[i0,j0]+1#*((i0+j0)<=L) } return(list(l_grid=L, emp_dens=Dens/(ndat)*L^2 )) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/emp_density.r
##' simple MC integration on the simplex. ##' ##' @title Probability of joint threshold exceedance, in the Dirichlet Mixture model, given a DM parameter. ##' @param N The number of MC iterations to be performed ##' @param par the DM parameter, as a list ##' @param thres the multivariate threshold ##' @param plot logical: should convergence diagnostic plots be issued ? ##' @param add logical: should the plot be added to a current one ? ##' @return a list made of \describe{ ##' \item{mean}{the mean estimate from the MC sample} ##' \item{esterr}{the estimated standard deviation of the estimator} ##' \item{estsd}{The estimated standard deviation of the MC sample} ##' } ##' @export excessProb.condit.dm <- function(N=100, par=get("dm.expar.D3k3"), thres=rep(100,3), plot=FALSE, add=FALSE) { res <- rep(0,N) for(i in 1:N) { w <- as.vector(rdirimix(n=1,par=par)) res[i] <-nrow(par$Mu )*min(w/thres) } cummean <- cumsum(res)/(1:N) estsd <- sqrt(cumsum((res-cummean)^2)/(1:N) ) esterr <- estsd/sqrt(1:N) if(plot) { ymax=max(cummean+1.1*estsd) ymin=min(cummean-1.1*estsd) if(!add) plot(1:N, cummean, ylim=c(ymin,ymax), type="l",col="blue") else lines(1:N, cummean,col="blue") lines(1:N, cummean+esterr, col="blue", lty=2) lines(1:N, cummean-esterr,col="blue", lty=2) ## lines(1:N, cummean+estsd, col="red") ## lines(1:N, cummean-estsd,col="red") } return(list(mean=cummean[N], esterr=esterr[N], estsd=estsd[N])) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/excessProb.condit.dm.r
##' The exponent function \eqn{V} for a max-stable variable \eqn{M} is such that \eqn{P(M<x) = exp(-V(x))} ##' ##' @title Exponent function in the NL model. ##' @param par The parameter for the NL distribution, ##' respectively of length two or four. ##' @param x A vector of three extended positive real numbers ##' @return the value of \eqn{V(x)} for \eqn{x=thres}. ##' @export expfunction.nl <- function(par=c(0.3,0.4,0.5,0.6), x=10*rep(1,3)) { alpha=par[1] beta=par[-1] U1 = ( x[1]^(-1./(alpha*beta[1]) ) + x[2]^(-1./(alpha*beta[1]) ) )^(beta[1]) U2 = ( x[1]^(-1./(alpha*beta[2]) ) + x[3]^(-1./(alpha*beta[2]) ) )^(beta[2]) U3 = ( x[2]^(-1./(alpha*beta[3]) ) + x[3]^(-1./(alpha*beta[3]) ) )^(beta[3]) return( 2^(-alpha) * ( (U1 + U2 + U3)^(alpha) ) ) } ##' @title Probability of joint threshold excess in the NL model ##' @param par The Nested logistic parameter: of length four. ##' @param thres a positive vector of size three. ##' @return The approximate probability of joint excess, valid when at least one coordinate of \code{thres} is large ##' @export excessProb.condit.nl <- function(par=c(0.3,0.4,0.5,0.6), thres=rep(100,3)) { expfun <- expfunction.nl zeros <- which(thres==0) if(length(zeros)==3) stop(" x must have at least one positive element") if(length(zeros)==2) { x <- thres x[zeros] <- Inf return(expfun(par=par, x=x)) } if(length(zeros)==1) { nonzeros <- c(1,2,3)[-zeros] x <- thres x[zeros] <- Inf T0 <- expfun(par=par,x=x) x1 <- x x1[nonzeros[1]] <- Inf T1 <- expfun(par=par, x=x1) x2 <- x x2[nonzeros[2]] <- Inf T2 <- expfun(par=par, x=x2) return( T1 + T2 - T0 ) } T1 <- expfun(par=par,x=thres) T2 <- expfun(par=par,x=c(thres[1], Inf,Inf)) + expfun(par=par,x=c(Inf, thres[2],Inf)) + expfun(par=par,x=c(Inf, Inf, thres[3])) T3 <- expfun(par=par,x=c(thres[1], thres[2],Inf)) + expfun(par=par,x=c(thres[1], Inf, thres[3])) + expfun(par=par,x=c(Inf, thres[2], thres[3])) return( T1 + T2 - T3 ) } ##' @title Posterior distribution the probability of joint threshold excess, in the NL model. ##' @param post.sample The posterior sample, as returned by \code{posteriorMCMC} ##' @inheritParams posteriorMean ##' @inheritParams excessProb.condit.nl ##' @param known.par logical. Is the true parameter known ? ##' @param true.par The true parameter, only used if \code{known.par=TRUE} ##' @return A list made of \describe{ ##' \item{whole}{The output of \code{posteriorMean} called with \code{FUN=excessProb.condit.nl}.} ##' \item{mean}{The posterior mean of the excess probability} ##' \item{esterr}{The standard deviation of the mean estimator} ##' \item{estsd}{The standard deviation of the excess probability, ##' in the posterior sample. } ##' \item{lowquant}{The lower 0.1 quantile of the empirical posterior distribution of the excess probability } ##' \item{upquant}{The upper 0.1 quantile of the empirical posterior distribution of the excess probability } ##' \item{true}{\code{NULL} if \code{known.par=FALSE}, otherwise the excess probability in the true model.} ##' } ##' @export excessProb.nl <- function(post.sample, ## model="trinestlog", from=NULL,to=NULL, thin=100, thres=rep(100,3), known.par= FALSE, true.par, displ=FALSE) { reslist <- posteriorMean(post.sample=post.sample, from=from,to=to, thin=thin, FUN=excessProb.condit.nl, displ=FALSE, thres=thres ) #browser() res <- as.vector(reslist$values) N <- length(res) cummean <- cumsum(res)/(1:N) estsd <- sqrt(cumsum((res-cummean)^2)/(1:N)) esterr <- estsd/sqrt(1:N) ymax= max(res) ## max(cummean+1.1*estsd) ymin=min(res) ##min(cummean-1.1*estsd) if(displ){ plot(1:N, cummean, ylim=range(res), type="l", lwd=2) ##mean estimate polygon(c(1:N, N:1), c(cummean+qnorm(0.9)*estsd, rev(cummean-qnorm(0.9)*estsd)), col=gray(0.8)) lines(1:N, cummean, lwd=2) ##mean estimate } if(known.par) { true <- excessProb.condit.nl ( par=true.par, thres=thres ) if(displ) abline(h=true, col="red", lwd=2 ) } else true <- NULL sorted <- ##sort.int(res[,1], decreasing=TRUE, index.return=TRUE) sort(res, decreasing=TRUE) upquant <- sorted[ceiling(10/100*N)] lowquant <- sorted[floor(90/100*N)] if(displ){ abline(h=lowquant, col="blue", lwd=2) abline(h=upquant, col="blue", lwd=2) if(known.par) { legend("topright", legend=c("true", "posterior mean", "posterior 0.1/0.9 quantiles", "posterior 0.1/0.9 Gaussian quantiles" ), lwd=c(2,2,2,3), col=c("red", "black", "blue", gray(0.5)) ) } else { legend("topright", legend=c( "posterior mean", "posterior 0.1/0.9 quantiles", "posterior 0.1/0.9 Gaussian quantiles" ), lwd=c(2,2,4), col=c( "black", "blue", gray(0.5)) ) } } return(list(whole=res, mean=cummean[N], esterr=esterr[N], estsd=estsd[N], lowquant= lowquant, upquant=upquant, true=true)) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/excessProb.nl.r
##' Simple MC integration on the simplex for joint excess probability, ##' in the PB model. ##' ##' @title Estimates the probability of joint excess, given a PB parameter. ##' @param par the DM parameter, as a list ##' @param thres the multivariate threshold ##' @param precision The desired relative precision of the estimate. ##' @param Nmin The number of MC iterations to be performed ##' @param displ logical: should convergence diagnostic plots be issued ? ##' @param add logical: should the plot be added to a current one ? ##' @return a list made of \describe{ ##' \item{mean}{The mean estimate from the MC sample} ##' \item{esterr}{The estimated standard deviation of the estimator} ##' \item{estsd}{The estimated standard deviation of the MC sample} ##' } ##' @export ##' @keywords internal excessProb.condit.pb <- function(par=c(0.8,1,2,3), thres=rep(500,5), precision=0.1, Nmin=200, displ=FALSE, add=FALSE) { ## res <- rep(0,N) ## ws <- rpairbeta(N,par=par,dimData=3) ## res <- apply(ws, 1, function(w){3*min(w/thres)}) i <- 0 cond <- FALSE mean <- 0 Mi <- 0 si <- 0 res <- double(2*Nmin) while((i<Nmin) || !cond) { i <- i+1 if(i>length(res)) res <- c(res,double(i)) repeat{ w <- as.vector(rpairbeta(n=1,par=par, dimData=3)) if(all(w>0)) break } xi <- 3*min(w/thres) res[i] <- xi delta <- xi-mean mean <- mean + delta/i Mi <- Mi + delta*(xi-mean) si <- Mi/i cond <- (si/(i*mean^2) < precision^2 || i> 1e+5) } N <- i res <- res[1:N] if(displ) { cummean <- cumsum(res)/(1:N) estsd <- sqrt(cumsum((res-cummean)^2)/(1:N) ) esterr <- estsd/sqrt(1:N) ymax=max(cummean+1.1*estsd) ymin=min(cummean-1.1*estsd) if(!add) plot(1:N, cummean, ylim=c(ymin,ymax), type="l",col="blue") else lines(1:N, cummean,col="blue") lines(1:N, cummean+esterr, col="blue", lty=2) lines(1:N, cummean-esterr,col="blue", lty=2) ## lines(1:N, cummean+estsd, col="red") ## lines(1:N, cummean-estsd,col="red") } ## return(list(mean=cummean[N], esterr=esterr[N], estsd=estsd[N])) return(c(mean, sqrt(si/N) , sqrt(si))) } ## tt <- excessProb.condit.pb(par=c(0.8,1,2,3), ## thres=rep(500,3), ## precision=0.01, Nmin=200, ## displ=T, add=FALSE) #NULL ##' Double Monte-Carlo integration. ##' ##' @title Estimates the probability of joint excess (Frechet margins) ##' @inheritParams posteriorMean ##' @param Nmin.intern The minimum number of MC iteration in the internal loop (excess probability, conditional to a parameter). ##' @param precision The desired precision for the internal MC estimate ##' @param post.sample The posterior sample. ##' @param thres A multivariate threshold ##' @param known.par Logical ##' @param true.par The true parameter from which the data are issued. ##' @return A list made of \describe{ ##' \item{whole}{ A vector of estimated excess probabilities, one for each element of the thinned posterior sample.} ##' \item{mean}{the estimated threshold excess probability: mean estimate.} ##' \item{esterr}{The estimated standard deviation of the mean estimate ##' (where the Monte-Carlo error is neglected)} ##' \item{estsd}{The estimated standard deviation of the posterior sample (where the Monte-Carlo error is neglected)} ##' \item{lowquants}{The three lower \eqn{0.1} quantiles of, respectively, the conditional mean estimates and of the upper and lower bounds of the Gaussian (centered) \eqn{80} \% confidence intervals around the conditional estimates. } ##' \item{upquants}{The three upper \eqn{0.9} quantiles} ##' \item{true.est}{the mean estimate conditional to the true parameter: ##' a vector of size three: the mean estimate , and the latter +/- the standard deviation of the estimate} ##' } ##' @export excessProb.pb <- function( post.sample, Nmin.intern=100, precision=0.05, from=NULL,to=NULL, thin=100, displ=FALSE, thres=rep(500,5), known.par= FALSE, true.par) { reslist <- posteriorMean(post.sample=post.sample, from=from,to=to, thin=thin, FUN=excessProb.condit.pb, Nmin=Nmin.intern, precision=precision, thres=thres, displ=FALSE) res <- reslist$values N <- ncol(res) cummean <- cumsum(res[1,])/(1:N) estsd <- sqrt(cumsum((res[1,]-cummean)^2)/(1:N)) esterr <- estsd/sqrt(1:N) ymax= max(res[1,]) ## max(cummean+1.1*estsd) ymin=min(res[1,]) ##min(cummean-1.1*estsd) if(displ) { plot(1:N, cummean, ylim=range(res[1,]), type="l", lwd=2) ##mean estimate polygon(c(1:N, N:1), c(cummean+qnorm(0.9)*estsd, rev(cummean-qnorm(0.9)*estsd)), col=gray(0.8)) lines(1:N, cummean, lwd=2) ##mean estimate } if(known.par) { true.est <- excessProb.condit.pb ( par=true.par,Nmin=Nmin.intern, precision=precision, thres=thres, displ=FALSE) if(displ){ polygon(c(1,N,N,1), c(true.est[1]+ qnorm(0.9)*true.est[2], true.est[1]+ qnorm(0.9)*true.est[2], true.est[1] - qnorm(0.9)*true.est[2], true.est[1] - qnorm(0.9)*true.est[2]), density=10, col="red") abline(h=true.est[1], col="red", lwd=2 ) } } else{ true.est <- NULL } sorted <- ##sort.int(res[,1], decreasing=TRUE, index.return=TRUE) sort(res[1,], decreasing=TRUE) sortedUp <- sort(res[1,] + qnorm(0.9)* res[2,], decreasing=TRUE) sortedLow <- sort(res[1,] - qnorm(0.9)* res[2,], decreasing=TRUE) upquant <- sorted[ceiling(10/100*N)] ##sorted$x[ceiling(10/100*N)] upquantUp <- sortedUp[ceiling(10/100*N)] upquantLow <- sortedLow[ceiling(10/100*N)] ## polygon(c(1,N,N,1), c(upquantUp,upquantUp,upquantLow,upquantLow), ## col="blue", density=10) if(displ){ abline(h=upquant, col="blue", lwd=2) } lowquant <- sorted[floor(90/100*N)] lowquantUp <- sortedUp[floor(90/100*N)] lowquantLow <- sortedLow[floor(90/100*N)] ## polygon(c(1,N,N,1), c(lowquantUp,lowquantUp,lowquantLow,lowquantLow), ## col="blue", density=10) if(displ){ abline(h=lowquant, col="blue", lwd=2) lines(1:N, cummean, lwd=2) ##mean estimate if(known.par) { legend("topright", legend=c("true", "posterior mean", "posterior 0.1/0.9 quantiles", "posterior 0.1/0.9 Gaussian quantiles" ), lwd=c(2,2,2,3), col=c("red", "black", "blue", gray(0.5)) ) } else { legend("topright", legend=c( "posterior mean", "posterior 0.1/0.9 quantiles", "posterior 0.1/0.9 Gaussian quantiles" ), lwd=c(2,2,4), col=c( "black", "blue", gray(0.5)) ) } } return(list(whole=res, mean=cummean[N], esterr=esterr[N], estsd=estsd[N], lowquants=c( lowquant,lowquantLow,lowquantUp), upquants=c(upquant,upquantLow,upquantUp), true.est=true.est)) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/excessProb.pb.r
##' Logarithm of the acceptance probability ##' ##' \code{lAccept.ratio} is a functional: \code{likelihood,proposal,prior} are user defined functions. Should not be called directly, but through the MCMC sampler \code{\link{posteriorMCMC}} generating the posterior. ##' @title Acceptance probability in the MCMC algorithm. ##' @param cur.par The current parameter in the Markov chain ##' @param prop.par The candidate parameter ##' @return The log-acceptance probability. ##' @inheritParams posteriorMCMC ##' @export ##' @keywords internal lAccept.ratio <- function(cur.par, prop.par, llh.cur, lprior.cur, dat, likelihood, proposal, prior, Hpar, MCpar ) { p <- ncol(dat) ## old.ll=likelihood(x=dat, par=cur.par, ## log=TRUE, vectorial=FALSE) new.ll <- likelihood(x=dat, par=prop.par, log=TRUE, vectorial=FALSE) ## old.lprior = prior(type = "d", par = cur.par, ## Hpar = Hpar, log = TRUE, dimData=p) new.lprior <- prior(type="d", par = prop.par, Hpar = Hpar, log = TRUE, dimData=p) proposal.oldToProp <- proposal (type = "d", cur.par = cur.par, prop.par=prop.par, MCpar=MCpar, log=TRUE) proposal.propToOld <- proposal (type = "d", cur.par = prop.par, prop.par=cur.par, MCpar=MCpar, log=TRUE) return(list(lrho=new.ll - llh.cur + new.lprior-lprior.cur + proposal.propToOld - proposal.oldToProp, llh= new.ll, lprior=new.lprior) ) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/lAccept.ratio.r
##' @export ##' @rdname marginal.lkl.pb marginal.lkl.nl <- function(dat, Nsim=10e+3, displ=TRUE, Hpar = get("nl.Hpar"), Nsim.min=Nsim, precision=0, show.progress = floor(seq(1, Nsim, length.out = 20 ) ) ) { marginal.lkl(dat=dat, likelihood=dnestlog, prior=prior.nl, Nsim=Nsim, displ=displ, Hpar=Hpar, Nsim.min=Nsim.min, precision=precision, show.progress=show.progress ) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/marginal.lkl.nl.r
##' Wrappers for \code{\link{marginal.lkl}}, in the specific cases of the PB and NL models, ##' with parameter \code{likelihood} set to \code{dpairbeta} or ##' \code{dnestlog}, and \code{prior} set to \code{prior.pb} or ##' \code{prior.nl}. See \code{\link{MCpriorIntFun}} for more details. ##' ##' @title Marginal likelihoods of the PB and NL models. ##' @inheritParams marginal.lkl ##' @inheritParams MCpriorIntFun ##' @inheritParams posteriorMCMC ##' @return The list returned by ##' \code{\link{marginal.lkl}}, \emph{i.e.}, the one returned by \code{\link{MCpriorIntFun}} ##' @export ##' @seealso \code{\link{marginal.lkl}}, \code{\link{MCpriorIntFun}} . ##' @examples ##' \dontrun{ ##' ##' marginal.lkl.pb(dat=Leeds , ##' Nsim=20e+3 , ##' displ=TRUE, Hpar = get("pb.Hpar") , ##' ) ##' ##' marginal.lkl.nl(dat=Leeds , ##' Nsim=10e+3 , ##' displ=TRUE, Hpar = get("nl.Hpar") , ##' ) ##' } marginal.lkl.pb <- function(dat , Nsim=10e+3 , displ=TRUE, Hpar = get("pb.Hpar") , Nsim.min=Nsim, precision=0, show.progress = floor(seq(1, Nsim, length.out = 20 ) ) ) { marginal.lkl(dat=dat, likelihood=dpairbeta, prior=prior.pb, Nsim=Nsim, displ=displ, Hpar=Hpar, Nsim.min=Nsim.min, precision=precision, show.progress=show.progress ) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/marginal.lkl.pb.r
##' Estimates the marginal likelihood of a model, proceeding by simple Monte-Carlo integration under the prior distribution. ##' ##' The function is a wrapper calling \code{\link{MCpriorIntFun}} with parameter \code{FUN} set to \code{likelihood}. ##' @title Marginal model likelihood ##' @inheritParams posteriorMCMC ##' @inheritParams MCpriorIntFun ##' @param dat The angular data set relative to which the marginal model likelihood is to be computed ##' @param likelihood The likelihood function of the model. ##' See \code{\link{posteriorMCMC}} for the required format. ##' @param displ logical. If \code{TRUE}, a plot is produced, showing the temporal evolution of the cumulative mean, with approximate confidence intervals of \eqn{+/-2} estimated standard errors. ##' @param precision the desired relative precision. See ##' \code{\link{MCpriorIntFun}}. ##' @return The list returned by \code{\link{MCpriorIntFun}}. The estimate is the list's element named \code{emp.mean}. ##' @note The estimated standard deviations of the estimates produced by this function should be handled with care:For "larger" models than the Pairwise Beta or the NL models, ##' the likelihood may have ##' infinite second moment under the prior distribution. In such a case, ##' it is recommended to resort to more sophisticated integration methods, ##' \emph{e.g.} by sampling from a mixture of the prior and the ##' posterior distributions. See the reference below for more details. ##' @export ##' @references KASS, R. and RAFTERY, A. (1995). Bayes factors. \emph{Journal of the american statistical association , 773-795}. ##' @examples ##' \dontrun{ ##' lklNL= marginal.lkl(dat=Leeds, ##' likelihood=dnestlog, ##' prior=prior.nl, ##' Nsim=20e+3, ##' displ=TRUE, ##' Hpar=nl.Hpar, ##' ) ##'} ##' ##' @seealso \code{\link{marginal.lkl.pb}}, \code{\link{marginal.lkl.nl}} for direct use with the implemented models. marginal.lkl <- function(dat, likelihood, prior, Nsim=300 , displ=TRUE, Hpar, Nsim.min=Nsim, precision=0, show.progress = floor(seq(1, Nsim, length.out = 20 ) ) ) { intern.fun=function(param) { likelihood(x=dat, par=param, log = FALSE, vectorial = FALSE) } ## MC integration mc.res=MCpriorIntFun(Nsim=Nsim, prior=prior, Hpar=Hpar, dimData= ncol(dat), FUN=intern.fun, store=TRUE, Nsim.min=Nsim.min, precision=precision, show.progress=show.progress ) ## end. ## check convergence of marginal likelihood estimators if(displ ) { nsim = mc.res$nsim est.mean = cumsum(mc.res$stored.vals) / (1:nsim) est.err = sqrt( cumsum( (mc.res$stored.vals- est.mean)^2) )/ (1:nsim) dev.new() par(mfrow=c(1,1)) plot(1:nsim,est.mean,type="l", xlab="mc iterations", ylab="likelihood", main="", lwd=1.5, ylim=c(min( est.mean, (est.mean-2*est.err)[floor(nsim/2):nsim]), max(est.mean, (est.mean+2*est.err)[floor(nsim/2):nsim]) ) ) lines(1:nsim, est.mean + 2* est.err,col=gray(0.5)) lines(1:nsim , est.mean - 2* est.err,col=gray(0.5)) ## legend("topright", ## legend=c("mean likelihood","2*stand. deviation of mean estimator"), ## #"2*stand.dev. of successive points" ## lwd=c(1,1), ## col=c("black","gold") , ## cex=0.7) } return(mc.res) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/marginal.lkl.r
##' @title Maximum likelihood optimization ##' @param data The angular data to be used for inference ##' @param model A list made of \describe{ ##'\item{likelihood}{The likelihood function, see \code{\link{dpairbeta}} ##' for a template}. ##' \item{npar}{The length of the parameter vector} ##' } ##' @param init NULL or a real vector of size \code{model$npar} giving the initial values for \code{link{par}}. ##' @param maxit maximum number of iterations to be performed by ##' function \code{optim} ##' @param method The method to be used by \code{optim} ##' @param hess logical: should an approximation of the hessian be performed ? ##' @param link the link function from the natural marginal parameter spaces to the real line. ##' @param unlink the inverse link function. If \code{x} is any real number, then \code{unlink(x)} should be in the admissible range for the likelihood function and the prior function. ##' @return The list returned by \code{optim} and the AIC and BIC criteria ##' @export maxLikelihood <- function(data, model,## = list(likelihood, npar), init = NULL, maxit = 500, method="L-BFGS-B", hess = T, link, unlink) ## @param prior The prior function (see \code{\link{prior.pb}} for a template) for generating initial parameters in case the initial value results in non finite log-likelihood. ## @param Hpar the prior hyper parameters { p = dim(data)[2] ndat=dim(data)[1] npar <- model$npar lhood <- function(vec) { -model$likelihood(x=data, par=unlink(vec), log=TRUE, vectorial=FALSE) } if(is.null(init)) init <- rep(0,model$npar) count <- 0 converged <- FALSE while(!converged & count<20 ) { count <- count+1 opt.test <- tryCatch(optim(init, lhood, method = method, control =list(maxit = maxit,trace=0 ), hessian = hess), error=function(e){ # print(e) return(list(convergence=100)) }) if(as.integer(opt.test$convergence)>0){ init <- rnorm(model$npar,mean=0,sd=1) } else{ converged <- TRUE opt <- opt.test } } rtn <- list() if(as.integer(opt.test$convergence)>0){ rtn$message <- "optimisation failed" rtn$counts <- count rtn$convergence <- 100 rtn$linkedpar <- rep(0,npar) rtn$par <- unlink(rep(0,npar)) rtn$value <- -Inf rtn$aic <- rtn$aicc <- rtn$bic <- Inf rtn$linkedHessian <- 1 return(rtn) } rtn$message <- opt$message rtn$counts <- opt$counts rtn$convergence <- opt$convergence rtn$linkedpar <- opt$par rtn$par <- unlink(opt$par) rtn$value <- opt$value rtn$aic <- 2* (opt$value + npar) rtn$aicc <- 2* (opt$value + npar) +2*npar*(npar+1)/(ndat-npar-1 ) rtn$bic <- 2*opt$value + npar*log(ndat) if(!hess) { rtn$linkedHessian <- 1 } else { rtn$linkedHessian <- opt$hessian tryCatch(expr={ asympt.variance <- chol2inv(chol(opt$hessian)) rtn$asympt.variance <- asympt.variance rtn$linked.esterr <- sqrt(diag(asympt.variance))}, error=function(e){## rtn$asympt.variance <- NULL ## rtn$linked.esterr <- NULL return(rtn)} ) } return(rtn) } ##?optim
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/maxLikelihood.r
##'@inheritParams posterior.predictive3D ##' @export ##' @rdname posterior.predictive.pb posterior.predictive.nl <- function( post.sample , ## from pb.post.sample from = post.sample$Nbin+1, to = post.sample$Nsim, thin = 50, npoints=40,eps=1e-3, equi = T, displ=T, ... ) { predictive <- posterior.predictive3D(post.sample = post.sample , densityGrid =dnestlog.grid, from = from, to = to, thin = thin, npoints=npoints, eps=eps, equi = equi, displ=displ, ... ) return(predictive) } ## discr=discretize(npoints=npoints,eps=eps,equi=equi) ## X.grid = discr $X ## Y.grid = discr $ Y ## Nsim = post.sample$Nsim ## Nbin = post.sample $ Nbin ## time.idx = (from-Nbin):(to-Nbin) ## kept.idx = time.idx[ (time.idx %% thin == 0) ] ## mat.postSample = post.sample $ stored.vals[kept.idx,] ## density.grid.fun=function(v) ## { ## C.out = .C("d_nestlog_grid", as.double(X.grid), ## as.double(Y.grid), ## as.integer(npoints), ## as.double(v[1]), ## as.double(v[-1]), ## as.integer(equi), ## result = as.double(rep(0,npoints*npoints)) ) ## return(C.out $ result) ## } ## ## bindvect.res = apply(mat.postSample,1,density.grid.fun) ## ## mean.vect.res = apply(bindvect.res, 1,mean) ## mean.vect.res=rep(0, npoints*npoints) ## for(i in 1:length(kept.idx)) ## { ## mean.vect.res=mean.vect.res+ ## density.grid.fun(mat.postSample[i,]) ## } ## mean.vect.res=mean.vect.res/length(kept.idx) ## pred.density = matrix(mean.vect.res, ncol =npoints, byrow=T) ## if(displ) ## {plot.dens(Density = pred.density, ## npoints = npoints, ## eps = eps, ## equi = equi, ## ... ) ## } ## return(pred.density) ## }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/posterior.predictive.nl.r
##' Wrappers for \code{\link{posterior.predictive3D}} in the PB and NL models. ##' ##' The posterior predictive density is approximated by averaging the densities corresponding to the parameters stored in \code{post.sample}. See ##' \code{\link{posterior.predictive3D}} for details. ##' @title Posterior predictive densities in the three dimensional ##' PB, NL and NL3 models ##' @inheritParams posterior.predictive3D ##' @return A \code{npoints*npoints} matrix: the posterior predictive density. ##' @seealso \code{\link{posterior.predictive3D}}, \code{\link{posteriorMCMC.pb}}. ##' @export posterior.predictive.pb= function( post.sample , from = post.sample$Nbin+1, to = post.sample$Nsim, thin = 50, npoints=40,eps=10^(-3), equi = T, displ=T, ... ) { predictive <- posterior.predictive3D(post.sample = post.sample , densityGrid =dpairbeta.grid, from = from, to = to, thin = thin, npoints=npoints, eps=eps, equi = equi, displ=displ, ... ) return(predictive) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/posterior.predictive.pb.r
##' Computes an approximation of the predictive density based on a posterior parameters sample. Only allowed in the three-dimensional case. ##' ##' The posterior predictive density is approximated by averaging the ##' densities produced by the function ##' \code{densityGrid(par, npoints, eps, equi, displ,invisible, ...)} for ##' \code{par} in a subset of the parameters sample stored in ##' \code{post.sample}. The arguments of \code{densityGrid} must be ##' \itemize{ ##' \item \code{par}: A vector containing the parameters. ##' \item \code{npoints, eps, equi}: Discretization parameters ##' to be passed to \code{\link{dgridplot}}. ##' \item \code{displ}: logical. Should a plot be produced ? ##' \item \code{invisible}: logical. Should the result be returned as \code{invisible} ? ##' \item \code{...} additional arguments to be passed to ##' \code{\link{dgridplot}} ##' } ##' Only a sub-sample is used: one out of \code{thin} parameters is used ##' (thinning). Further, only the parameters produced between time ##' \code{from} and time \code{to} (included) are kept. ##' @title Posterior predictive density on the simplex, for three-dimensional extreme value models. ##' @param post.sample A posterior sample as returned by \code{\link{posteriorMCMC}} ##' @param densityGrid A function returning a \code{npoints*npoints} ##' matrix, representing a discretized version of the spectral density ##' on the two dimensional simplex. ##' The function should be compatible with \code{\link{dgridplot}}. ##' In particular, it must use \code{\link{discretize}} to produce ##' the discretization grid. It must be of type \cr ##' \code{function(par, npoints, eps, equi, displ,invisible, ##' ... )}. ##' See \bold{Details} below. ##' @param from Integer or \code{NULL}. If \code{NULL}, the default value is used. Otherwise, should be greater than \code{post.sample$Nbin}. Indicates the index where the averaging process should start. Default to \code{post.sample$Nbin +1} ##' @param to Integer or \code{NULL}. If \code{NULL}, the default ##' value is used. Otherwise, must be lower than \code{Nsim+1}. ##' Indicates where the averaging process should stop. ##' Default to \code{post.sample$Nsim}. ##' @param thin Thinning interval. ##' @inheritParams dgridplot ##' @inheritParams discretize ##' @param displ logical. Should a plot be produced ? ##' @note The computational burden may be high: it is proportional to ##' \code{npoints^2}. Therefore, the function assigned to ##' \code{densityGridplot} should be ##' optimized, typically by calling \code{.C} with an internal, ##' user defined \code{C} function. ##' @return A \code{npoints*npoints} matrix: the posterior predictive density. ##' @seealso \code{\link{dgridplot}}, \code{\link{posteriorMCMC}}. ##' @author Anne Sabourin ##' @include dgridplot.R ##' @export posterior.predictive3D <- function(post.sample , densityGrid, from = post.sample$Nbin+1, to = post.sample$Nsim, thin = 40, npoints=40,eps=10^(-3), equi = T, displ=T, ... ) { Nsim = post.sample$Nsim Nbin = post.sample $ Nbin if(is.null(from)) { from=Nbin+1 } if(is.null(to)) { to=Nsim } if((from<=Nbin) | (to>Nsim) ) {stop(" argument \"from\" or \"to\" out of range")} time.idx = (from-Nbin):(to-Nbin) kept.idx = time.idx[ (time.idx %% thin == 0) ] mat.postSample = post.sample $ stored.vals[kept.idx,] mean.res=matrix(0,ncol=npoints, nrow=npoints) for(i in 1:length(kept.idx)) { mean.res=mean.res+ densityGrid(par=mat.postSample[i,], npoints=npoints, eps=eps,equi=equi, displ=FALSE,invisible=FALSE) } mean.res=mean.res/length(kept.idx) if(displ) {dgridplot(density = mean.res, ##pred.density, # npoints = npoints, eps = eps, equi = equi, ... ) } return(mean.res) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/posterior.predictive3D.r
##' Builds an empirical distribution defined as a sum of weighted Dirac masses from posterior samples in individual models. ##' ##' @title Posterior distribution in the average model ##' @param postweights a vector of positive real numbers, summing to one: the posterior weights (in the same order as the elements of \code{post.distrs}) of the individual models. ##' @param post.distrs A list of same length as \code{postweights}. ##' Each element must be a vector which will be used as a posterior sample. ##' @return A matrix with two rows and as many columns as the sum of the lengths of the elements of \code{post.distrs}. The second line contains the weighted posterior sample in the BMA; the first line contains the weights to be assigned to each corresponding value on the second one. ##' @export posteriorDistr.bma <- function(postweights=c(0.5,0.5), post.distrs=list() ) { M <- length(post.distrs) sizes <- double(M) sortedlist <- list() for (i in 1:M) { sizes[i] <- length(post.distrs[[i]]) sortedlist[[i]] <- sort(post.distrs[[i]], decreasing=FALSE) } Nunion <- sum(sizes) sortedsample <- double(Nunion) sortedweights <- double(Nunion) for(i in 1:Nunion) { candidates <- sapply(1:M, function(m){sortedlist[[m]][1]} ) minind <- which.min(candidates) sortedsample[i] <- candidates[minind] sortedweights[i] <- postweights[minind]/sizes[minind] sortedlist[[minind]] <- sortedlist[[minind]][-1] } res <- rbind(sortedsample,sortedweights) rownames(res) <- c("values","weights") return(res) } ## posteriorMean.bma <- function(postweights=c(0.5,0.5), ## post.samples=list(), ## FUN=function(par,model,...){NULL}, ## models=c("pairbeta", "trinestlog"), ## N=1e+3, ## froms=c(NULL,NULL), ## tos=c(NULL,NULL), ## ...) ## { ## if( (length(postweights) != length(models) )| (length(models) != length(froms) ) | (length(froms) != length(tos) ) ) ## {stop("arguments 'postweights', 'models', 'froms' and 'tos' should have same length. ") ## } ## Nmodel <- length(postweights) ## for( m in 1:Nmodel) ## { ## if(is.null(froms[m])) ## froms[m] <- post.samples[[m]]$Nbin +1 ## if(is.null(tos[m])) ## { ## tos[m] <- post.samples[[m]]$Nsim ## } ## if((froms[m]<= post.samples[[m]]$Nbin) | ## (tos[m]>post.samples[[m]]$Nsim) ) ## {stop(" argument \"from\" or \"to\" out of range")} ## } ## idx <- 1:N ## cumweights <- cumsum(postweights) ## intern.fun <- function(i) ## { ## u <- runif(1) ## m <- min((1:Nmodel)[u<cumweights]) ## ind.i <- sample(froms[m]:tos[m],1) ## par.i <- (post.samples[[m]])$stored.vals[ind.i,] ## res.i <- as.vector(FUN(par=par.i, model=models[m], ...) ) ## return(res.i) ## } ## values <- sapply(idx, ## intern.fun, ## simplify=TRUE ## ) ## if(is.vector(values)) ## values <- matrix(values,nrow=1) ## est.mean <- apply(values, 1, mean) ## est.sd <- apply(values,1,sd) ## if(displ) ## { ## for(i in 1:nrow(values)) ## { ## dev.new() ## ylim <- range(values[i,]) ## cummean <- cumsum(values[i,])/(1:N) ## plot(kept.idx, cummean, type="l" ) ## esterr <- sqrt(cumsum( (values[i,]- cummean)^2 ) )/1:N ## lines( cummean+2*esterr, col=gray(0.5) ) ## lines( cummean-2*esterr, col=gray(0.5) ) ## } ## } ## return(list(values=values, est.mean=est.mean, est.sd=est.sd)) ## }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/posteriorDistr.bma.r
##' @export ##' @rdname posteriorMCMC.pb posteriorMCMC.nl <- function(Nsim, dat, Hpar, MCpar, ... ) { postsample <- posteriorMCMC(Nsim=Nsim, dat=dat, prior=prior.nl, proposal=proposal.nl, likelihood=dnestlog, Hpar=Hpar, MCpar=MCpar, name.model="nestlog", class="PBNLpostsample", ...) ## oldclasses <- class(postsample) ## class(postsample) <- c("PBNLpostsample", oldclasses) return(postsample) } ## posteriorMCMC.nl <- ## function(Nsim=500,Nbin=200, ## dat=get("Leeds"), ## par.start=NULL, ## Hpar=get("nl.Hpar"), ## MCpar = get("nl.MCpar"), ## show.progress = floor(seq(1, Nsim, length.out = 20 ) ), ## seed=1, kind ="Mersenne-Twister", ## name.save=NULL, ## save.directory="~", name.dat=NULL ## ) ## { ## post <- ## posteriorMCMC(Nsim=Nsim,Nbin=Nbin, ## dat=dat, ## par.start=par.start, ## prior=prior.nl, ## proposal=proposal.nl, ## likelihood=dnestlog, ## Hpar=Hpar, ## MCpar=MCpar, ## show.progress = show.progress, ## seed=seed, kind=kind, name.save=name.save, ## save.directory=save.directory, name.dat=name.dat, ## name.model="nestlog") ## return(post) ## }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/posteriorMCMC.nl.r
##' The functions generate parameters samples approximating the posterior distribution in the PB model or the NL model. ##' ##' The two functions are wrappers simplifying the use of ##' \code{\link{posteriorMCMC}} for the two models implemented in this package. ##' @title MCMC posterior samplers for the pairwise beta and the negative logistic models. ##' @inheritParams posteriorMCMC ##' @param ... Additional arguments to be passed to ##' \code{\link{posteriorMCMC}} instead of their ##' default values (must not contain any of ##' \code{"prior", ##' "likelihood", "proposal", ##' "name.model"} or \code{"class"}). ##' @return an object with class attributes \code{"postsample"} and ##' \code{"PBNLpostsample"}: The posterior sample and some statistics ##' as returned by function \code{\link{posteriorMCMC}} ##' @seealso \code{\link{posteriorMCMC}} ##' @note For the Leeds data set, and for simulated data sets with ##' similar features, setting \code{Nsim=50e+3} and \code{Nbin=15e+3} ##' is enough (possibly too much), ##' with respect to the Heidelberger and Welch tests implemented in ##' \code{\link[coda]{heidel.diag}}. ##' @examples ##' \dontrun{ ##' data(Leeds) ##' data(pb.Hpar) ##' data(pb.MCpar) ##' data(nl.Hpar) ##' data(nl.MCpar) ##' pPB <- posteriorMCMC.pb(Nsim=5e+3, dat=Leeds, Hpar=pb.Hpar, ##' MCpar=pb.MCpar) ##' ##' dim(pPB[1]) ##' pPB[-(1:3)] ##' ##' pNL <- posteriorMCMC.nl(Nsim=5e+3, dat=Leeds, Hpar=nl.Hpar, ##' MCpar=nl.MCpar) ##' ##' dim(pNL[1]) ##' pNL[-(1:3)] ##' } ##' @export posteriorMCMC.pb <- function(Nsim, dat, Hpar, MCpar, ... ) { postsample <- posteriorMCMC(Nsim=Nsim, dat=dat, prior=prior.pb, proposal=proposal.pb, likelihood=dpairbeta, Hpar=Hpar, MCpar=MCpar, name.model="pairbeta", class="PBNLpostsample", ...) return(postsample) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/posteriorMCMC.pb.r
##' Generates a posterior parameters sample, and computes the posterior mean and component-wise variance on-line. ##' ##' @title MCMC sampler for parametric spectral measures ##' @param Nsim Total number of iterations to perform. ##' @param Nbin Length of the burn-in period. ##' @param par.start Starting point for the MCMC sampler. ##' @param dat An angular data set, \emph{e.g.}, constructed by ##' \code{\link{cons.angular.dat}}: A matrix which rows are the Cartesian coordinates of points on the unit simplex (summing to one). ##' @param likelihood The likelihood function. ##' Should be of type\cr ##' \code{function(x, par, log, vectorial)}, where \code{log} and ##' \code{vectorial} are logical flags indicating respectively if ##' the result is to be returned on the log-scale, and if the ##' value is a vector of length \code{nrow(x)} or a single number ##' (the likelihood, or the log-likelihood, for the data set \code{x}). ##' See \code{\link{dpairbeta}} or \code{\link{dnestlog}} ##' for templates. ##' @param proposal The proposal function: of type \cr ##' \code{function(type = c("r","d"), ##' cur.par, prop.par, MCpar, log) ##' }. ##' Should ##' return the (logarithm of) the proposal density for the move ##' \code{cur.par --> prop.par} if \code{type == "d"}. If ##' \code{type =="r"}, \code{proposal} must return a candidate ##' parameter, depending on \code{cur.par}, as a vector. ##' See \code{\link{proposal.pb}} or \code{\link{proposal.nl}} ##' for templates. ##' @param prior The prior distribution: of type \cr ##' \code{function(type=c("r","d"), ##' n ,par, Hpar, log, dimData ##' )}, ##' where \code{dimData} is the dimension of the sample ##' space (\emph{e.g.}, for ##' the two-dimensional simplex (triangle), \code{dimData=3}. ##' Should return either a matrix with \code{n} rows containing a ##' random parameter sample generated under the prior ##' (if \code{type == "d"}), or the density of the ##' parameter \code{par} (the logarithm of the density if ##' \code{log==TRUE}. ##' See \code{\link{prior.pb}} and \code{\link{prior.nl}} for templates. ##' @param Hpar A list containing Hyper-parameters to be passed to ##' \code{prior}. ##' @param MCpar A list containing MCMC tuning parameters to be ##' passed to \code{proposal}. ##' @param show.progress An vector of integers containing the times ##' (iteration numbers) at which a message showing progression ##' will be printed on the standard output. ##' @param seed The seed to be set \emph{via} ##' \code{\link[base]{set.seed}}. ##' @param kind The kind of random numbers generator. Default to ##' "Mersenne-Twister". See \code{\link[base]{set.seed}} for details. ##' @param save Logical. Should the result be saved ? ##' @param class Optional character string: additional class attribute to be assigned to the result. A predefined class \code{"PBNLpostsample"} exists, for which a method performing convergence diagnostics is defined (see \code{\link{diagnose}} ) ##' @param save.directory A character string giving the directory where the result is to be saved (without trailing slash). ##' @param name.save A character string giving the name under which ##' the result is to be saved. If \code{NULL} (default), ##' nothing is saved. Otherwise, the result is saved in file \cr ##' \code{paste(save.directory,"/", ##' name.save,".rda",sep="")}. ##' A "log" list is also saved, named \cr ##' \code{paste(name.save, ".log", sep="")}, in file \cr ##' \code{paste(save.directory,"/", name.log,".rda",sep="")}. ##' @param name.dat A character string naming the data set used for inference. Default to \code{""}. ##' @param name.model A character string naming the model. Default to \code{""}. ##' @return A list made of ##' \itemize{ ##' \item \code{stored.vals}: A \code{(Nsim-Nbin)*d} matrix, where ##' \code{d} ##' is the dimension of the parameter space. ##' \item \code{llh} A vector of size \code{(Nsim-Nbin)} containing the loglikelihoods evaluated at each parameter of the posterior sample. ##' \item \code{lprior} A vector of size \code{(Nsim-Nbin)} containing the logarithm of the prior densities evaluated at each parameter of the posterior sample. ##' \item \code{elapsed}: The time elapsed, as given by ##' \code{proc.time} between the start and the end of the run. ##' \item \code{Nsim}: The same as the passed argument ##' \item \code{Nbin}: idem. ##' \item\code{n.accept}: The total number of accepted proposals. ##' \item \code{n.accept.kept}: The number of accepted proposals after the burn-in period. ##' \item \code{emp.mean} The estimated posterior parameters mean ##' \item \code{emp.sd} The empirical posterior sample standard deviation.} ##' @export ##' @seealso \code{\link{posteriorMCMC.pb}}, ##' \code{\link{posteriorMCMC.pb}} for specific uses ##' in the PB and the NL models. ##' @examples ##' data(Leeds) ##' data(pb.Hpar) ##' data(pb.MCpar) ##' postsample1 <- posteriorMCMC(Nsim=1e+3,Nbin=500, ##' dat= Leeds, ##' prior = prior.pb, ##' proposal = proposal.pb, ##' likelihood = dpairbeta, ##' Hpar=pb.Hpar, ##' MCpar=pb.MCpar) ##' ##' dim(postsample1[[1]]) ##' postsample1[-1] ##' ##' \dontrun{ ##' ## a more realistic one: ##' ##' postsample2 <- posteriorMCMC(Nsim=50e+3,Nbin=15e+3, ##' dat= Leeds, ##' prior = prior.pb, ##' proposal = proposal.pb, ##' likelihood = dpairbeta, ##' Hpar=pb.Hpar, ##' MCpar=pb.MCpar) ##' dim(postsample2[[1]]) ##' postsample2[-1] ##' } ##' ##' @keywords htest multivariate posteriorMCMC <- function(prior = function(type=c("r","d"), n , par, Hpar, log, dimData){ NULL}, proposal = function(type = c("r","d"), cur.par, prop.par, MCpar, log){ NULL} , likelihood = function(x, par, log, vectorial){ NULL}, Nsim, dat, Hpar, MCpar, Nbin=0, par.start=NULL, show.progress = floor(seq(1, Nsim, length.out = 20 ) ), seed=NULL, kind ="Mersenne-Twister", save=FALSE, class=NULL, name.save=NULL, save.directory = "~", name.dat = "", name.model= "" ) { ## keep track of arguments argnames <-ls() arglist <- list() for(i in 1:length(argnames)) { arglist[[i]] <- get(argnames[i]) } names(arglist) <- argnames ####### initialize #### if(!is.null(seed)) set.seed(seed,kind=kind) start.time <- proc.time() p <- ncol(dat) if(is.null(par.start)) { repeat{ par.start <- prior (type = "r", n=1, Hpar = Hpar, dimData=p) condit <- likelihood(x=dat, par=par.start, log=TRUE, vectorial=FALSE) + prior(type = "d", par = par.start, Hpar = Hpar, log = TRUE, dimData=p) if(is.finite(condit)) break } } cur.par <- par.start llh.cur <- likelihood(x=dat, par=cur.par, log=TRUE, vectorial=FALSE) lprior.cur <- prior(type = "d", par = cur.par, Hpar = Hpar, log = TRUE, dimData=p) nsim=1 n.accept=0 n.accept.kept = 0 leng=length(cur.par) mean.res=rep(0,leng) emp.variance=rep(0,leng) emp.variance.unNorm=rep(0,leng) stored.vals <- matrix(0,nrow=Nsim-Nbin,ncol=leng) if(!is.null(names(par.start)) & length(names(par.start)) == leng){ colnames(stored.vals) <- names(par.start) } llh <- double(Nsim-Nbin) lprior <- double(Nsim-Nbin) stored.vals[1,]=cur.par lprior[1] <- lprior.cur llh[1] <- llh.cur ############## start MCMC ############# while(nsim<=Nsim) { ## show progression if(any(nsim==show.progress) ) { cat(paste("iter",nsim, ": n.accepted=", n.accept, "\n", sep = " " )) } ######### propose move prop.par <- proposal(type="r", cur.par = cur.par, prop.par=NULL, MCpar=MCpar) ###### acceptance probability ratio.list <- lAccept.ratio( cur.par = cur.par, prop.par=prop.par, llh.cur=llh.cur, lprior.cur=lprior.cur, dat=dat, likelihood=likelihood, proposal=proposal, prior=prior, Hpar=Hpar, MCpar=MCpar ) if( (is.finite(ratio.list$lrho) )&&( (ratio.list$lrho>0) || (log(runif(1))<=ratio.list$lrho) ) ) ##then: proposal accepted: move, ie. update { n.accept <- n.accept+1 if(nsim > Nbin) n.accept.kept = n.accept.kept +1 cur.par <- prop.par llh.cur <- ratio.list$llh lprior.cur <- ratio.list$lprior } ###### store results after burn-in if(nsim>Nbin) { n <- nsim-Nbin new.mean.res=mean.res+1/n*(cur.par-mean.res) emp.variance.unNorm <- emp.variance.unNorm + (cur.par-new.mean.res)* (cur.par- mean.res) mean.res <- new.mean.res if(nsim == Nsim) { emp.variance <- emp.variance.unNorm / (n-1) } stored.vals[n,] <- cur.par llh[n] <- llh.cur lprior[n] <- lprior.cur } nsim <- nsim+1 } ########### end MCMC ############### end.time <- proc.time() print(end.time-start.time) res <- list(stored.vals=stored.vals, llh=llh, lprior=lprior, arguments=arglist, elapsed =end.time-start.time, Nsim=Nsim, Nbin=Nbin, n.accept=n.accept, n.accept.kept = n.accept.kept, emp.mean=mean.res, emp.sd=sqrt(emp.variance) ##est.error=sqrt(emp.variance/n) ) class(res) <- c(class,"postsample") if( save && !is.null(name.save)) { loglist <- list(model=name.model, Nsim=Nsim,Nbin=Nbin,dat=name.dat, Hpar=Hpar,MCpar=MCpar, seed=seed, kind=kind) name.log <- paste(name.save, ".log", sep="") assign(name.save,res) assign(name.log,loglist) save(list=name.save, file=paste(save.directory,"/",name.save,".rda",sep="")) save(list=name.log, file=paste(save.directory,"/", name.log,".rda",sep="")) } return(res) } ##' @export print.postsample <- function(x, ...) { cat("\n A posterior sample of class (S3) \"postsample\" \n ") cat("Acceptance ratio after burn-in:", x$n.accept.kept/(x$Nsim- x$Nbin-1), "\n", sep=" ") cat("time elapsed:\n") print(x$elapsed) cat("List names: ") cat(names(x), sep=" , ") cat("\n") }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/posteriorMCMC.r
##' Computes an approximation of the posterior mean of a parameter functional, based on a posterior parameters sample. ##' ##' Only a sub-sample is used: one out of \code{thin} parameters is used ##' (thinning). Further, only the parameters produced between time ##' \code{from} and time \code{to} (included) are kept. ##' @title Posterior predictive density on the simplex, for three-dimensional extreme value models. ##' @param post.sample A posterior sample as returned by \code{\link{posteriorMCMC}} ##' @param FUN a parameter functional returning a vector. ##' @param ... Additional parameters to be passed to \code{FUN}. ##' @param from Integer or \code{NULL}. If \code{NULL}, the default value is used. Otherwise, should be greater than \code{post.sample$Nbin}. Indicates the index where the averaging process should start. Default to \code{post.sample$Nbin +1} ##' @param to Integer or \code{NULL}. If \code{NULL}, the default ##' value is used. Otherwise, must be lower than \code{Nsim+1}. ##' Indicates where the averaging process should stop. ##' Default to \code{post.sample$Nsim}. ##' @param thin Thinning interval. ##' @param displ logical. Should a plot be produced ? ##' @return A list made of \describe{ ##' \item{values}{A matrix : each column is the result of \code{FUN} applied to a parameter from the posterior sample.} ##' \item{est.mean}{The posterior mean} ##' \item{est.sd}{The posterior standard deviation } ##' } ##' @seealso \code{\link{posteriorMCMC}}. ##' @export posteriorMean <- function(post.sample , FUN=function(par,...){par}, from = NULL, to = NULL, thin=50, displ=TRUE, ... ) { if(is.null(from)) from <- post.sample$Nbin +1 if(is.null(to)) { to <- post.sample$Nsim } if((from<= post.sample$Nbin) | (to>post.sample$Nsim) ) {stop(" argument \"from\" or \"to\" out of range")} time.idx <- (from-post.sample$Nbin):(to-post.sample$Nbin) kept.idx <- time.idx[ (time.idx %% thin == 0) ] # mat.postSample <- post.sample $ stored.vals[kept.idx,] values <- sapply(kept.idx, function(i){as.vector(FUN(par=post.sample $ stored.vals[i,], ...) )}, simplify=TRUE ) # mean.res=matrix(0,ncol=npoints, nrow=npoints) if(is.vector(values)) values <- matrix(values,nrow=1) est.mean <- apply(values, 1, mean) est.sd <- apply(values,1,sd) if(displ) { N <- length(kept.idx) shuffle <- sample(N,N,replace=F) for(i in 1:nrow(values)) { dev.new() ylim <- range(values[i,]) cummean <- cumsum(values[i,shuffle])/(1:N) plot(kept.idx, cummean, type="l" ) esterr <- sqrt(cumsum( (values[i,shuffle]- cummean)^2 ) )/1:N lines( cummean+2*esterr, col=gray(0.5) ) lines( cummean-2*esterr, col=gray(0.5) ) } } return(list(values=values, emp.mean=est.mean, emp.sd=est.sd)) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/posteriorMean.r
##' Approximates the models' posterior weights by simple Monte Carlo integration ##' ##' if \eqn{J} is the number of models, the posterior weights are given by ##'\deqn{postW(i) = priorW(i)*lkl(i)/ ##' (\sum_{j=1,\dots,J} priorW(j)*lkl(j)),} ##' where \eqn{lkl(i)} stands for the Monte-Carlo estimate of the ##' marginal likelihood of model \eqn{i} and \eqn{priorW(i)} is ##' the prior weight ##' defined in \code{priorweights[i]}. For more explanations, see the reference below ##' The confidence intervals are obtained by adding/subtracting two times the estimated standard errors of the marginal likelihood estimates. ##' The latter are only estimates, which interpretation may be misleading: ##' See the note in section \code{\link{marginal.lkl}} ##' @title Posterior model weights ##' @inheritParams marginal.lkl ##' @inheritParams posteriorMCMC ##' @param HparList A list containing the hyper Parameter for the priors in each model. (list of lists). ##' @param lklList A list containing the likelihood functions of each model ##' @param priorList A list containing the prior definitions of each model. ##' @param priorweights A vector of positive weights, summing to one: the prior marginal weights of each model. ##' @param Nsim The maximum number of iterations to be performed. ##' @param displ Logical. Should convergence monitoring plots be issued ? ##' @references HOETING, J., MADIGAN, D., RAFTERY, A. and VOLINSKY, C. (1999). Bayesian model averaging: A tutorial. \emph{Statistical science 14, 382-401}. ##' @return A matrix of \eqn{6} columns and \code{length(priorweights)} rows. The columns contain respectively the posterior model weights (in the same order as in \code{priorweights}), the lower and the upper bound of the confidence interval (see \bold{Details}), the marginal model weights, the estimated standard error of the marginal likelihood estimators, and the number of simulations performed. ##'@examples ##'data(pb.Hpar) ##' data(nl.Hpar) ##' set.seed(5) ##' mixDat=rbind(rpairbeta(n=10,dimData=3, par=c(0.68,3.1,0.5,0.5)), ##' rnestlog(n=10,par=c(0.68,0.78, 0.3,0.5))) ##' posteriorWeights (dat=mixDat, ##' HparList=list(get("pb.Hpar"),get("nl.Hpar")), ##' lklList=list(get("dpairbeta"), get("dnestlog")), ##' priorList=list(get("prior.pb"), get("prior.nl")), ##' priorweights=c(0.5,0.5), ##' Nsim=1e+3, ##' Nsim.min=5e+2, precision=0.1, ##' displ=FALSE) ##' \dontrun{posteriorWeights (dat=mixDat, ##' HparList=list(get("pb.Hpar"),get("nl.Hpar")), ##' lklList=list(get("dpairbeta"), get("dnestlog")), ##' priorList=list(get("prior.pb"), get("prior.nl")), ##' priorweights=c(0.5,0.5), ##' Nsim=20e+3, ##' Nsim.min=10e+3, precision=0.05, ##' displ=TRUE)} ##' @export posteriorWeights <- function(dat, HparList=list(get("pb.Hpar"),get("nl.Hpar")), lklList=list(get("dpairbeta"), get("dnestlog")), priorList=list(get("prior.pb"), get("prior.nl")), priorweights=c(0.5,0.5), Nsim=20e+3, Nsim.min=10e+3, precision=0.05, seed=1, kind = "Mersenne-Twister", show.progress=floor(seq(1,Nsim,length.out=10)), displ=FALSE) { if( (length(HparList) != length(lklList)) | (length(lklList) != length(priorweights) ) | (length(priorweights) != length(priorList) ) ) {stop("arguments 'HparList', 'lklList', 'priorList' and 'priorweights' should have same length. ")} Nmodel=length(HparList) output=matrix(0,ncol=6,nrow=Nmodel, dimnames=list(NULL, c("postW", "lowConf", "upConf", "margLkl", "estErr", "nsim")) ) for(i in 1:Nmodel) { set.seed(seed, kind =kind) lklMarg=marginal.lkl(dat=dat, likelihood=lklList[[i]], prior=priorList[[i]], Nsim=Nsim , displ= displ, Hpar=HparList[[i]], Nsim.min=Nsim.min, show.progress=show.progress, precision=precision ) if(displ) { mtext(paste("Model ", toString(i), " : marginal likelihood", sep=""), side=3, line=2) } output[i,4]=lklMarg$emp.mean output[i,5]=lklMarg$est.error output[i,6]=lklMarg$nsim } lklTot= sum(output[,4] * priorweights) lklTotSup=sum((output[,4]+ 2*output[,5]) * priorweights) lklTotInf=sum((output[,4]- 2*output[,5]) * priorweights) output[,1]=(output[,4]*priorweights)/lklTot output[,2] =priorweights* (output[,4]-2*output[,5] )/ (lklTotSup -4*priorweights*output[,5] ) output[,3] =priorweights* (output[,4]+2*output[,5] )/ (lklTotInf + 4*priorweights*output[,5] ) return(output) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/posteriorWeights.r
##' Bijective Transformation from \eqn{(0,1)} to the real line, defined ##' by ##' \eqn{logit(p) = log( p / (1-p) )}. ##' ##' @title Logit transformation ##' @return A real number ##' @param p A real number in \eqn{[0,1]} ##' @export logit <- function (p) { if ( any(p < 0) || any(p > 1) ) stop("p must be in [0,1].") return( log(p)-log( 1 - p) ) } ##' Inverse transformation of the \code{logit} function. ##' ##' @title Inverse logit transformation ##' @return A number between \eqn{0} and \eqn{1}. ##' @param x A real number ##' @export invlogit <- function (x) { return( 1/(1 + exp(-x)) ) } ##' Density and generating function of the prior distribution. ##' ##' The four parameters are independent, the logit-transformed parameters follow a normal distribution. ##' @title Prior parameter distribution for the NL model ##' @param type One of the character strings \code{"r"}, \code{"d"} ##' @param n The number of parameters to be generated. Only used ##' if \code{type == "r"}. ##' @param par A vector of length four, with component comprised ##' between \eqn{0} and \eqn{1} (both end points excluded for the first ##' element and \eqn{1} included for the others): ##' The parameter where ##' the density is to be taken. ##' Only used if \code{type=="d"}. ##' ##' In the NL model, ##' \code{par} is of length \eqn{4}. ##' The first element is the global dependence ##' parameter, the others are partial dependence parameter between pairs (12), (13), (23) respectively. ##' ##' In the NL model, ##' \code{par} is of length \eqn{4}. ##' The first element has the same interpretation as in the NL model, the subsequent ones are dependence parameters between ##' @param Hpar list of Hyper-parameters : see \code{\link{nl.Hpar}} for a template. ##' @param log logical. Should the density be returned on the log scale ? ##' Only used if \code{type=="d"} ##' @param dimData The dimension of the sample space, equal to \eqn{3}. ##' Only for compatibility with \emph{e.g.} \code{\link{posteriorMCMC}}. ##' @return Either a matrix with \code{n} rows containing a random parameter sample generated under the prior (if type == "d"), or the (log)-density of the parameter \code{par}. ##' @examples \dontrun{prior.nl(type="r", n=5 ,Hpar=get("nl.Hpar")) } ##' ##' \dontrun{prior.trinl(type="r", n=5 ,Hpar=get("nl.Hpar")) } ##' \dontrun{prior.pb(type="d", par=rep(0.5,2), Hpar=get("nl.Hpar")) } ##' @author Anne Sabourin ##' @export prior.nl <- function(type=c("r","d"), n ,par, Hpar, log, dimData=3) { if(type =="r") { stopifnot(dimData == 3) alpha <- invlogit(rnorm(n, mean=Hpar$mean.alpha, sd=Hpar$sd.alpha) ) beta <- invlogit(matrix(rnorm(3*n, mean=Hpar$mean.beta, sd=Hpar$sd.beta), ncol=3)) res <- c(alpha,beta ) names(res) <- c("alpha","beta12","beta13", "beta23") return(res) } if(type=="d") { lpar <- logit(par) ld1 <- dnorm(lpar[1], mean=Hpar$mean.alpha, sd=Hpar$sd.alpha, log=TRUE) ld2 <- dnorm(lpar[-1], mean=Hpar$mean.beta, sd=Hpar$sd.beta, log=TRUE) if(log) return(ld1 +sum(ld2)) else return( exp(ld1+sum(ld2))) } }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/prior.nl.r
##' Density and generating function of the prior distribution. ##' ##' The parameters components are independent, log-normal. ##' @title Prior parameter distribution for the Pairwise Beta model ##' @param type One of the character strings \code{"r"}, \code{"d"} ##' @param n The number of parameters to be generated. Only used ##' if \code{type == "r"}. ##' @param par A vector with positive components: ##' The parameter where the density is to be taken. ##' Only used if \code{type=="d"}. In the Pairwise Beta model, ##' \code{par} is of length ##' \code{choose(p,2)+1}. The first element is the global dependence ##' parameter, the subsequent ones are the pairwise dependence ##' parameters, in lexicographic order (\emph{e.g.} ##' \eqn{\beta_{1,2}, \beta_{1,3}, \beta_{2,3}}. ##' @param Hpar list of Hyper-parameters : see \code{\link{pb.Hpar}} for a template. ##' @param log logical. Should the density be returned on the log scale ? ##' Only used if \code{type=="d"} ##' @param dimData The dimension of the sample space. (one more than the dimension of the simplex) ##' @return Either a matrix with \code{n} rows containing a random parameter sample generated under the prior (if type == "d"), or the (log)-density of the parameter \code{par}. ##' @examples \dontrun{prior.pb(type="r", n=5 ,Hpar=get("pb.Hpar"), dimData=3 ) } ##' \dontrun{prior.pb(type="d", par=rep(1,choose(4,2), Hpar=get("pb.Hpar"), dimData=4 ) } ##' @author Anne Sabourin ##' @export prior.pb <- function(type=c("r","d"), n ,par, Hpar, log, dimData ) { if(type =="r") { p <- dimData lengthPar <- choose(p,2)+1 ## res <- matrix(0,ncol=lengthPar, nrow = n) alpha <- exp(rnorm(n, mean=Hpar$mean.alpha, sd=Hpar$sd.alpha)) beta <-exp(rnorm((lengthPar-1)*n, mean=Hpar$mean.beta, sd=Hpar$sd.beta)) res <- c(alpha,beta) names(res) <- c("alpha", paste0("beta", apply(combn(p,2), 2, function(x){paste0(x, collapse = "")}))) return(res) } if(type =="d") { lpar <- log(par) ld1 <- dnorm(lpar[1], mean=Hpar$mean.alpha, sd=Hpar$sd.alpha, log=TRUE) ld2 <- dnorm(lpar[-1], mean=Hpar$mean.beta, sd=Hpar$sd.beta, log=TRUE) ## browser() if(log) return(ld1 +sum(ld2)) else return( exp(ld1+sum(ld2))) ## return(ifelse(log, sum(logdprior), exp(sum(logdprior))) ) } stop("wrong 'type' argument") }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/prior.pb.r
##' Density of the proposal distribution \code{q(cur.par,prop.par)} and random generator for MCMC algorithm in the NL3 model. ##' ##' The two components of proposal parameter ##' \code{(alpha*, beta12*, beta13*, beta23*)} are generated independently, under a beta distribution with mode at the current parameter's value. ##' ##' Let \eqn{\epsilon =} \code{MCpar$eps.recentre}. To generate \code{alpha*}, given the current state \code{alpha(t)}, ##' let \eqn{m(t) = \epsilon /2 + (1-\epsilon) * \alpha(t)} be the mean ##' of the Beta proposal distribution and \eqn{\lambda = 2/\epsilon} ##' (a scaling constant). Then ##' \deqn{ ##' \alpha^* \sim \textrm{Beta}(\lambda m(t), (1-\lambda) m(t))}{% ##' \alpha * ~ Beta(\lambda m(t), \lambda(1-m(t))).} ##' The \code{betaij*}'s are generated similarly. ##' @title NL3 model: proposal distribution. ##' @param type One of the character strings \code{"r"} or \code{"d"}. ##' @param cur.par Current state of the chain. ##' @param prop.par Candidate parameter. ##' @param MCpar A list made of a single element: MCMC parameter. Re-centering parameter for the proposal distribution. ##' @param log Logical. Only used when \code{type =="d"}. Should the result be returned on the log-scale ? ##' @return Either the (log-)density of the proposal parameter \code{prop.par}, given \code{cur.par} (if \code{type == "d"}), or a proposal parameter (a vector), if \code{type =="r"}. ##' @export proposal.nl <- function(type = c("r", "d"), cur.par, prop.par, MCpar=get("nl.MCpar"), log=TRUE ) { sd <- rep(MCpar$sd,length(cur.par)) transfo <- function(x){logit(x)} invtransfo <- function(x){invlogit(x)} mean <- transfo(cur.par) if(type =="r") { return(invtransfo(rnorm(length(cur.par), mean=mean,sd=sd))) } if(type == "d") { vect.res=sapply(1:length(prop.par), function(j){dnorm(transfo(prop.par[j]), mean=mean[j], sd=sd[j], log=TRUE)}) return(ifelse(log,sum(vect.res),exp(sum(vect.res)) )) } stop("wrong type specification") ## mu.a=MCpar $ eps.recentre* rep(1/2,4) + ## (1-MCpar $ eps.recentre)* cur.par ## mu.b=1-mu.a ## Nu.ab=2/ MCpar $eps.recentre ## vect.a=mu.a*Nu.ab ## vect.b=mu.b*Nu.ab ## if(type=="r") ## { ## proposal=rbeta(4,shape1=vect.a,shape2=vect.b) ## return(proposal) ## } ## if(type =="d") ## { ## vect.res = sapply(1:4, ## function(j){dbeta(prop.par[j], ## shape1 = vect.a[j], shape2=vect.b[j], ## log=TRUE)} ) ## #browser() ## if(log) ## return(sum(vect.res)) ## else ## return(exp(sum(vect.res))) ## } } #nestlog.proposal()
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/proposal.nl.r
##' Density of the proposal distribution \code{q(cur.par,prop.par)} and random generator for MCMC algorithm in the PB model. ##' ##' The components \code{prop.par[i]} of the proposal parameter are generated independently, from the lognormal distribution: ##' ##' \code{prop.par = rlnorm(length(cur.par), meanlog=log(cur.par), ##' sdlog=rep(MCpar$sdlog,length(cur.par)))} ##' @title PB model: proposal distribution ##' @param type One of the character strings \code{"r"}, \code{"d"} ##' @param cur.par Current state of the chain ##' @param prop.par Candidate parameter ##' @param MCpar A list made of a single element: MCMC parameter for the standard deviation of the log-normal proposition, on the log scale. See \code{\link{pb.MCpar}} for the default value ##' @param log Logical. Only used when \code{type =="d"}. Should the result be returned on the log-scale ? ##' @return Either the (log-)density of the proposal \code{prop.par}, given \code{cur.par} (if \code{type == "d"}), or a proposal parameter (a vector), if \code{type =="r"}. ##' @examples \dontrun{ proposal.pb(type = "r", ##' cur.par = rep(1,4), MCpar=get("pb.MCpar")) ##' } ##' \dontrun{ proposal.pb(type = "d", cur.par = rep(1,4), ##' prop.par=rep(1.5,4), MCpar=get("pb.MCpar")) ##' } ##' @export proposal.pb <- function(type = c("r","d"), cur.par, prop.par, MCpar, log=TRUE) { sd <- rep(MCpar$sd,length(cur.par)) transfo <- function(x){log(x)} invtransfo <- function(x){exp(x)} mean <- transfo(cur.par) if(type =="r") { return(invtransfo(rnorm(length(cur.par), mean=mean,sd=sd))) } if(type == "d") { vect.res=sapply(1:length(prop.par), function(j){dnorm(transfo(prop.par[j]), mean=mean[j], sd=sd[j], log=TRUE)}) return(ifelse(log,sum(vect.res),exp(sum(vect.res)) )) } stop("wrong type specification") } ## new.lAlpha=rnorm(1,mean=log(par[1]), ## sd=sqrt(MCpar$sig_jumpA)) ## new.lBeta=rnorm( ## length(par)-1, ## mean=log(par[-1]), ## sd=sqrt(MCpar$sig_jumpB) ## ) ## return(exp(c(new.lALpha, new.lBeta))) ## }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/proposal.pb.r
## rdirichlet.dm <- function (n, alpha) ## { ## l <- length(alpha) ## x <- matrix(rgamma(l * n, alpha), ncol = l, byrow = TRUE) ## sm <- apply(x, 1, sum) ## return(x/ sm ) ## } ##' @param n The number of angular points to be generated ##' @rdname ddirimix ##' @export rdirimix <- function(n=10, par=get("dm.expar.D3k3"), wei=par$wei, Mu=par$Mu , lnu=par$lnu ) { if(is.vector(Mu)) { Mu=matrix(Mu,ncol=1) } p=nrow(Mu) k=length(wei) ## dat=matrix(0,ncol=p, nrow=n) ## for(i in 1:n) ## { ## u=runif(1) ## cum_wei=cumsum(wei) ## lowers=cum_wei[cum_wei<u] ## m=length(lowers)+1 ## dat[i,]=rdirichlet(1,exp(lnu[m])*Mu[,m] ) ## } u <- runif(n) cum_wei <- cumsum(wei) ms <- sapply(1:n, function(i){ length(which(cum_wei<u[i])) + 1}) if(n>1){ # matpars <- matrix(Mu[,ms], ncol=length(ms))%*%diag(exp(lnu[ms])) matpars <-Mu[,ms]%*%diag(exp(lnu[ms])) } else matpars <- matrix(Mu[,ms]*exp(lnu[ms])) dat <- rdirichlet(n=n, alpha=matpars) return(dat ) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/rdirimix.r
##' The integral is approximated by a rectangular method, using the values stored in matrix \code{density}. ##' ##' Integration is made with respect to the Lebesgue measure on the projection of the simplex onto the plane \eqn{(x,y): x > 0, y > 0, x+y < 1}. ##' It is assumed that \code{density} has been constructed on a ##' grid obtained \emph{via} function \code{\link{discretize}}, ##' with argument \code{equi} set to \code{FALSE} and \code{npoints} ##' and \code{eps} equal to those passed to \code{rect.integrate}. ##' @title Density integration on the two-dimensional simplex ##' @inheritParams dgridplot ##' @inheritParams discretize ##' @return The value of the estimated integral of \code{density}. ##' @examples ##' wrapper <- function(x, y, my.fun,...) ##' { ##' sapply(seq_along(x), FUN = function(i) my.fun(x[i], y[i],...)) ##' } ##' ##' grid <- discretize(npoints=40,eps=1e-3,equi=FALSE) ##' ##' Density <- outer(grid$X,grid$Y,FUN=wrapper, ##' my.fun=function(x,y){10*((x/2)^2+y^2)}) ##' ##' rect.integrate(Density,npoints=40,eps=1e-3) ##' ##' @export rect.integrate <- function(density,npoints,eps) { wrapper <- function(x, y, my.fun,...) { sapply(seq_along(x), FUN = function(i) my.fun(x[i], y[i],...)) } grid <- discretize(npoints=npoints,eps=eps,equi=FALSE) mask <- outer(grid$X,grid$Y,FUN=wrapper, my.fun=function(x,y){(x+y)<1}) maskDensity=mask*density sum(maskDensity[2:npoints,2:npoints]) * ((1-3*eps)/(npoints-1))^2 + sum(maskDensity[1,(2:npoints)])* eps * (1-3*eps)/(npoints-1) + sum(maskDensity[(2:npoints),1]) * eps * (1-3*eps)/(npoints-1) + maskDensity[1,1]* eps^2 }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/rect.integrate.r
## @param n The number of points on the simplex to be generated. ##' @rdname dpairbeta ##' @param threshold The radial threshold \eqn{r} above which the simulated points should be kept to build the angular dataset. Should be set to a high value, for the asymptotic approximation ##' \deqn{P(W \in B |\; ||X|| >r)\simeq H(B)}{P(W \in B | ||X|| >r) ~ H(B)} to hold. ##' @param return.points logical: should the censored vectorial dataset corresponding to the angular one be returned ? ##' @export rnestlog <- function(n=5,par=c(0.2,0.3,0.4,0.5), threshold=1000, return.points=FALSE) { Angles <- matrix(0,ncol=3,nrow=n) Points <- Angles alpha <- par[1] beta <- par[2:4] for(i in 1:n) { repeat { s <- rstable.posit(alpha=alpha) s12 <- rstable.posit(alpha=beta[1]) s13 <- rstable.posit(alpha=beta[2]) s23 <- rstable.posit(alpha=beta[3]) X1.12= (s12*(s/2)^(1/beta[1])/rexp(1) )^(alpha*beta[1]) X1.13= (s13*(s/2)^(1/beta[2])/rexp(1) )^(alpha*beta[2]) X1 = max(X1.12,X1.13) X2.12= (s12*(s/2)^(1/beta[1])/rexp(1) )^(alpha*beta[1]) X2.23= (s23*(s/2)^(1/beta[3])/rexp(1) )^(alpha*beta[3]) X2 = max(X2.12,X2.23) X3.13= (s13*(s/2)^(1/beta[2])/rexp(1) )^(alpha*beta[2]) X3.23= (s23*(s/2)^(1/beta[3])/rexp(1) )^(alpha*beta[3]) X3 = max(X3.13,X3.23) X=c(X1,X2,X3) if(sum(X)>threshold) { break } } Angle=X/sum(X) Angles[i,]=Angle Points[i,]=X } if(return.points) { return(list(Angles=Angles,Points=Points)) } else { return(Angles) } } ## rnestlog <- ## function(n=3, par=c(0.5,0.5), ## threshold=1000, return.points=FALSE) ## { ## Angles <- matrix(0,ncol=3,nrow=n) ## Points <- Angles ## alpha0 <- par[1] ## alpha12 <- par[2] ## for(i in 1:n) ## { ## repeat ## { ## s0=rstable.posit(alpha=alpha0) ## s12=rstable.posit(alpha=alpha12) ## E1=rexp(1) ## E2=rexp(1) ## E3=rexp(1) ## X1=(s0^(1/alpha12 )*s12 / E1)^(alpha0*alpha12) #^(1/alpha12 ) ## X2=(s0^(1/alpha12)*s12 / E2)^(alpha0*alpha12) #s0^(1/alpha12) ## X3= (s0/E3)^(alpha0) ## X=c(X1,X2,X3) ## if(sum(X)>threshold) ## { ## break ## } ## } ## Angle=X/sum(X) ## Angles[i,]=Angle ## Points[i,]=X ## } ## if(return.points) ## { ## return(list(Angles=Angles,Points=Points)) ## } ## else ## { ## return(Angles) ## } ## }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/rnestlog.r
##' @title Dirichlet distribution: random generator ##' @param n Number of draws ##' @param alpha Dirichlet parameter: a vector of positive number ##' @return A matrix with \code{n} rows and \code{length(alpha)} columns ##' @export rdirichlet <- function (n=1, alpha) { if(is.vector(alpha)) l <- length(alpha) else l <- nrow(alpha) x <- matrix(rgamma(l * n, alpha), ncol = l, byrow = TRUE) sm <- apply(x, 1, sum) return(x/ sm ) } ##' @rdname dpairbeta ##' @param n The number of points on the simplex to be generated. ##' @param dimData the dimension of the sample space, which is \eqn{1 + } the dimension of the simplex. ##' @keywords datagen distribution ##' @export rpairbeta <- function(n=1, dimData=3, par= c(1, rep(1,3)) ) { p <- dimData if((choose(p,2)+1) != length(par) ) { stop("wrong parameter length")} alpha <- par[1] beta <- par[-1] res=matrix(0,ncol=p,nrow=n) #to store the result for(ll in 1:n) { ##choose uniformly a pair i,j: i_0=floor(runif(1,1,(p+1))) jStar=floor(runif(1,1,p)) if(jStar<i_0){j_0=jStar}else{j_0=jStar+1} i=min(i_0,j_0);j=max(i_0,j_0) place=(i-1)*(p-(i)/2)+j-i ##place=\sum_{k=0}^{i_1}(p-k) + (j-i) ##simulate r=wi+wj and theta=wi/r r_ij=rbeta(1,2*alpha+1,(p-2)*alpha) #2*alpha+1 theta=rbeta(1,beta[place],beta[place]) wi=theta*r_ij wj=(1-theta)*r_ij #simulate the remaining components (p-2), uniformly on the simplex : "sum(w_i)=1-r" remains=(1-r_ij)*rdirichlet(1,rep(1,(p-2))) #put together r,theta and the remainings W0=append(as.vector(remains),wi,after=(i-1)) W=append(W0,wj,after=(j-1)) #keep the result... res[ll,]= W } return(res) } ## @importFrom MCMCpack rdirichlet
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/rpairbeta.r
##' Random variable generator ##' ##' An alpha-stable random variable \eqn{S} with index \eqn{\alpha} is ##' defined by its Laplace transform ##' \eqn{E(exp(tS))) = exp(-t^\alpha)}. The algorithm used here is directly derived from Stephenson (2003). ##' @title Positive alpha-stable distribution. ##' @param alpha The parameter of the alpha-stable random variable ##' @return A realization of the alpha-stable random variable. ##' @references STEPHENSON, A. (2003). Simulating multivariate extreme value distributions of logistic type. \emph{Extremes 6, 49-59}. ##' @export rstable.posit <- function(alpha=0.5) { if(alpha==1) { return(1) } else { U=runif(1,0,pi) W=rexp(1) A1=sin((1-alpha)*U)/W A2=sin(alpha*U) A3=(sin(U))^(1/alpha) return(A1^((1-alpha)/alpha) *A2 / A3 ) } } test.rstable.posit <- function() { X=c() for(i in 1:5000) { X=c(X,rstable.posit(alpha=2/3)) } hist(X,probability=TRUE) ##,xlim=c(0,10), break = 100) } ##test.rstable.posit()
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/rstable.posit.R
##' Computes the Kullback-Leibler divergence and the \eqn{L^2} distance between the "true" density (\code{true.dens}) and an estimated density (\code{est.dens}). ##' ##' The integration is made \emph{via} \code{\link{rect.integrate}}: The discretization grid corresponding to the two matrices must be constructed ##' with \code{discretize(npoints, eps, equi=FALSE)}. ##' @title Logarithmic score and \eqn{L^2} distance between two densities on the simplex (trivariate case). ##' @param true.dens A \code{npoints*npoints} matrix: The reference density, typically the distribution from which data was simulated. Must be a valid \code{density} argument to be passed to \code{dgridplot}, with \code{equi=FALSE}. ##' @param est.dens The estimated density: of the same type as \code{true.dens}. ##' @param npoints Number of grid points used to construct the density matrices (see \code{\link{discretize}}). ##' @param eps Minimum distance from a grid point to the simplex boundary (see \code{\link{discretize}}). ##' @return A list made of ##' \itemize{ ##' \item \code{check.true}: The result of the rectangular integration of ##' \code{true.dens}. It should be equal to one. If not, re size the grid. ##' \item \code{check.true}: ##' Idem, replacing \code{true.dens} with \code{est.dens}. ##' \item \code{L2score}: The estimated \eqn{L^2} distance. ##' \item \code{KLscore}: The estimated Kullback-Leibler divergence between the two re-normalized densities, using \code{check.true} and \code{check.est} as normalizing constants (this ensures that the divergence is always positive). ##' } ##' @examples ##' dens1=dpairbeta.grid(par=c(0.8,2,5,8),npoints=150,eps=1e-3, ##' equi=FALSE) ##' dens2=dnestlog.grid(par=c(0.5,0.8,0.4,0.6),npoints=150,eps=1e-3, equi=FALSE) ##' ##' scores3D(true.dens=dens1, ##' est.dens=dens2, ##' npoints=150, eps=1e-4) ##' ##' @export scores3D=function(true.dens, est.dens, npoints, eps) { check.true=rect.integrate(density=true.dens, npoints=npoints,eps=eps) check.est=rect.integrate(density=est.dens, npoints=npoints,eps=eps) L2.score= ( rect.integrate(density= (est.dens - true.dens)^2, npoints=npoints,eps=eps) )^(1/2) KL.score= rect.integrate( density= (log(true.dens+(true.dens==0))-log(check.true)- log(est.dens+ (est.dens==0))+ log(check.est) ) * true.dens/check.true, npoints=npoints,eps=eps) return(list(check.true=check.true, check.est=check.est, L2score=L2.score, KLscore=KL.score)) } ## dens1=dpairbeta.grid(par=c(0.8,2,5,8),npoints=150,eps=1e-3, equi=FALSE) ## dens2=dnestlog.grid(par=c(0.5,0.8),npoints=150,eps=1e-3, equi=FALSE) ## scores3D(true.dens=dens1, ## est.dens=dens2, ## npoints=150, eps=1e-4) ## graphics.off() ## dens1=dpairbeta.grid(par=c(0.8,2,5,8),npoints=100,eps=1e-3, equi=T,displ=T) ## dev.new() ## dens2=dnestlog.grid(par=c(0.5,0.8),npoints=100,eps=1e-3, equi=T,displ=T) ## dens2=dpairbeta.grid(par=c(0.8,5,10,2),npoints=100,eps=1e-3, equi=T,displ=T)
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/scores3D.r
##' Switching coordinates system between equilateral and right-angled ##' representation of the two-dimensional simplex. ##' ##' If \code{transf.to.rect}, is called, \code{vect} must belongs to the triangle \eqn{[(0,0), (\sqrt(2), 0), (\sqrt(2)/2,\sqrt(3/2) ) ]}{[(0,0), (\sqrt(2), 0), (\sqrt(2)/2,\sqrt(3/2) ) ]} ##' and the result lies in \eqn{([(0,0), (1,0), (0,1)]}{([(0,0), (1,0), (0,1)]}. \code{transf.to.equi} is the reciprocal. ##' @title Linear coordinate transformations ##' @param vect a bi-variate vector, giving the first two coordinates of the angular point to be transformed. ##' @return The vector obtained by linear transformation. ##' @author Anne Sabourin ##' @aliases transf.to.rect ##' @examples \dontrun{ transf.to.equi(c(sqrt(2)/2, sqrt(3/8) ) )} ##' @export transf.to.equi transf.to.equi <- function(vect) { M=rbind( c(sqrt(2), sqrt(2)/2) , c(0 , sqrt(3/2)) ) return(as.vector( M%*%t(t(vect)) ) ) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/transf.to.equi.r
##' @rdname transf.to.equi ##' @export transf.to.rect transf.to.rect=function(vect) { M=1/sqrt(3)*rbind(c(sqrt(3/2),-sqrt(2)/2), c(0,sqrt(2)) ) return(as.vector( M%*%t(t(vect)) ) ) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/transf.to.rect.r
wrapper <- function(x, y, FUN,...) #internal { sapply(seq_along(x), FUN = function(i) FUN(x[i], y[i],...)) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/wrapper.R
##' @import utils ##' @import coda ##' @useDynLib BMAmevt, .registration=TRUE .onAttach=function(libname,pkgname = "BMAmevt") { ## data(frechetdat) ## data(Leeds) ## data(nl.Hpar) ## data(nl.MCpar) ## data(pb.Hpar) ## data(pb.MCpar) ## data(dm.expar.D3k3) ## data(winterdat) set.seed(1) } .onUnload = function(libpath) { library.dynam.unload(chname="BMAmevt",libpath) }
/scratch/gouwar.j/cran-all/cranData/BMAmevt/R/zzz.r
#' bmrb_download #' #' Main function that download the BMRB file from www.bmrb.wisc.edu repo. It will download file in nmr-star3.1 format. #' @import utils #' @param id_list (Required) A list of file ids that are corresponding to the requested files. #' @param output_dir (Required) Location were downloaded file will be saved. #' @param base_url Default location is http://www.bmrb.wisc.edu/ftp/pub/bmrb/entry_lists/nmr-star3.1/. #' @param verbose Boolean parameter. If set to be "True", the downloader will output detailed results in the console. #' @return Save file in the output_dir location #' @examples #' download_dir = tempdir() #' bmrb_download(965, download_dir) #' @export bmrb_download bmrb_download <- function(id_list, output_dir, base_url = "http://www.bmrb.wisc.edu/ftp/pub/bmrb/entry_lists/nmr-star3.1", verbose=TRUE) { prefix <- "bmr" extension <- "str" if(!dir.exists(file.path(output_dir))) {dir.create(file.path(output_dir))} id_list <- gsub('([[:alpha:]]+)', '', id_list) print("Downloading...") for (bmrb_id in id_list) { file_url = paste0(base_url, "/", prefix, bmrb_id, ".", extension) file_name = paste0(output_dir, "/", prefix, bmrb_id, ".", extension) if (file.exists(file_name)) { print(paste0("Skipping ", prefix, bmrb_id, ", it's already exists.")) } else{ tryCatch( { if (verbose) { cat(paste0("Processing: ", prefix, bmrb_id)) } utils::download.file(file_url, file_name, quiet = TRUE) print(paste0("Downloaded: ", prefix, bmrb_id)) }, error = function(e) { print(e) } ) } } }
/scratch/gouwar.j/cran-all/cranData/BMRBr/R/bmrb_download.R
#' collect_ids #' #' Function will parse all the files of BMRB nmr-star 3.1 repo and return all the available files that are available for downloading. #' @import xml2 rvest #' @param base_url (optional) The BMRB entry list page for nmr-star3.1, http://www.bmrb.wisc.edu/ftp/pub/bmrb/entry_lists/nmr-star3.1/. #' @param to_list (optional) whether to output as a list of ids. #' @return BMRB_files. This could be a list of ids if output, if 'to_list' is set to be True, otherwise, it will return a html table. #' @examples #' # collect_ids(to_list=TRUE) # It will take more than 5 sec #' @export collect_ids collect_ids <- function(base_url="http://www.bmrb.wisc.edu/ftp/pub/bmrb/entry_lists/nmr-star3.1/", to_list = FALSE) { print("Parsing data, it might take a while ...\n") webpage<- xml2::read_html(base_url) table <-rvest::html_nodes(webpage, "table") table <- rvest::html_table(table, fill = TRUE)[[1]] BMRB_files <- table[-c(1,2), -c(1,5)] if (to_list) { return(BMRB_files["Name"]) }else{ return(BMRB_files) } }
/scratch/gouwar.j/cran-all/cranData/BMRBr/R/collect_ids.R
## ---- results='asis'----------------------------------------------------- library(BMRBr) ## ----eval=FALSE---------------------------------------------------------- # bmrb_download(965, "/Users/download") ## ----eval=FALSE---------------------------------------------------------- # bmrb_download(c(965, 966, 967), "/Users/download") ## ----eval=FALSE---------------------------------------------------------- # bmrb_list = collect_ids(to_list = TRUE) # bmrb_list[c(1:10),] ## ----eval=FALSE---------------------------------------------------------- # bmrb_table = collect_ids(to_list = FALSE) # bmrb_table[c(1:10),]
/scratch/gouwar.j/cran-all/cranData/BMRBr/inst/doc/vignette.R
--- title: "BMRBr Package" author: "Xi Chen" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BMRBr Package} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- BMRB File Downloader is a R package that allows user to download individual files from Biological Magnetic Resonance Data Bank (BMRB) by provide their file IDs. ## Usage: ### 1. Load the BMRBr package: ```{r, results='asis'} library(BMRBr) ``` ### 2. Download file from BMRB repo: ```{r,eval=FALSE} bmrb_download(965, "/Users/download") ``` ```{r,eval=FALSE} bmrb_download(c(965, 966, 967), "/Users/download") ``` ### 3. Get a list/table of files that are aviailable to downoad from BMRB repo: ```{r,eval=FALSE} bmrb_list = collect_ids(to_list = TRUE) bmrb_list[c(1:10),] ``` ```{r,eval=FALSE} bmrb_table = collect_ids(to_list = FALSE) bmrb_table[c(1:10),] ```
/scratch/gouwar.j/cran-all/cranData/BMRBr/inst/doc/vignette.Rmd
--- title: "BMRBr Package" author: "Xi Chen" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BMRBr Package} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- BMRB File Downloader is a R package that allows user to download individual files from Biological Magnetic Resonance Data Bank (BMRB) by provide their file IDs. ## Usage: ### 1. Load the BMRBr package: ```{r, results='asis'} library(BMRBr) ``` ### 2. Download file from BMRB repo: ```{r,eval=FALSE} bmrb_download(965, "/Users/download") ``` ```{r,eval=FALSE} bmrb_download(c(965, 966, 967), "/Users/download") ``` ### 3. Get a list/table of files that are aviailable to downoad from BMRB repo: ```{r,eval=FALSE} bmrb_list = collect_ids(to_list = TRUE) bmrb_list[c(1:10),] ``` ```{r,eval=FALSE} bmrb_table = collect_ids(to_list = FALSE) bmrb_table[c(1:10),] ```
/scratch/gouwar.j/cran-all/cranData/BMRBr/vignettes/vignette.Rmd
#' @import pracma #' @importFrom fields image.plot #' @importFrom logOfGamma gammaln #' @importFrom multicool Stirling2 #' @importFrom MCMCpack rdirichlet #' @importFrom grDevices rgb terrain.colors #' @importFrom graphics abline axis barplot grid hist lines polygon text #' @importFrom stats IQR acf dgamma kmeans quantile rbeta rbinom rgamma rmultinom runif sd #' @importFrom utils combn NULL #' Bayesian Markov Renewal Mixed Models (BMRMMs) #' #' Provides inference results of both transition probabilities and duration times using BMRMMs. #' #' Users have the option to ignore duration times or model duration times as #' a discrete or continuous variable via defining `duration.distr`. #' #' `duration.distr` can be one of the following: \cr #' \itemize{ #' \item{`NULL`}{ duration times are ignored. This is the default setting.} #' \item{`list('mixgamma', shape, rate)`}{ duration times are modeled as a mixture gamma variable. `shape` and `rate` #' must be numeric vectors of the same length. The length indicates the number of mixture components.} #' \item{`list('mixDirichlet', unit)`}{ duration times are modeled as a new state with discretization `unit`. The duration #' state is then analyzed along with the original states. For example, if an duration time entry is 20 and `unit` is 5, #' then the model will add 4 consecutive new states. If an duration time entry is 23.33 and `unit` is 5, then the model #' will still add 4 consecutive new states as the blocks are calculated with the floor operation.} #' } #' #'@return An object of class `BMRMM` consisting of `results.trans` and `results.duration` if duration times are analyzed as a continuous variable. \cr #' #' The field `results.trans` is a data frame giving the inference results of transition probabilities. #'\tabular{ll}{ #' `covs` \tab covariates levels for each row of the data. \cr #' `dpreds` \tab maximum level for each related covariate. \cr #' `MCMCparams` \tab MCMC parameters including simsize, burnin and thinning factor. \cr #' `tp.exgns.post.mean`\tab posterior mean of transition probabilities for different combinations of covariates. \cr #' `tp.exgns.post.std` \tab posterior standard deviation of transition probabilities for different combinations of covariates. \cr #' `tp.anmls.post.mean` \tab posterior mean of transition probabilities for different individuals. \cr #' `tp.anmls.post.std` \tab posterior standard deviation of transition probabilities for different individuals. \cr #' `tp.all.post.mean` \tab posterior mean of transition probabilities for different combinations of covariates AND different individuals. \cr #' `tp.exgns.diffs.store` \tab difference in posterior mean of transition probabilities for every pair of covariate levels given levels of the other covariates. \cr #' `tp.exgns.all.itns` \tab population-level transition probabilities for every MCMC iteration. \cr #' `clusters` \tab number of clusters for each covariate for each MCMC iteration. \cr #' `type` \tab a string identifier for results, which is "Transition Probabilities". \cr #' `cov.labels` \tab a list of string vectors giving labels of covariate levels. \cr #' `state.labels` \tab a list of strings giving labels of states. \cr #'} #' The field `results.duration` is a data frame giving the inference results of duration times. #'\tabular{ll}{ #' `covs` \tab covariates related to duration times.\cr #' `dpreds` \tab maximum level for each related covariate.\cr #' `MCMCparams` \tab MCMC parameters: simsize, burnin and thinning factor.\cr #' `duration.times` \tab duration times from the data set.\cr #' `comp.assignment` \tab mixture component assignment for each data point in the last MCMC iteration.\cr #' `duration.exgns.store` \tab posterior mean of mixture probabilities for different combinations of covariates of each MCMC iteration.\cr #' `marginal.prob` \tab estimated marginal mixture probabilities for each MCMC iteration.\cr #' `shape.samples` \tab estimated shape parameters for gamma mixtures for each MCMC iteration.\cr #' `rate.samples` \tab estimated rate parameters for gamma mixtures for each MCMC iteration.\cr #' `clusters` \tab number of clusters for each covariate for each MCMC iteration.\cr #' `type` \tab a string identifier for results, which is "Duration Times".\cr #' `cov.labels` \tab a list of string vectors giving labels of covariate levels. \cr #'} #' #'@examples #' #' # In the examples, we use a shorted version of the foxp2 dataset, foxp2sm #' #' # ignores duration times and only models transition probabilities using all three covariates #' results <- BMRMM(foxp2sm, num.cov = 2, simsize = 50) #' #' # models duration times as a continuous variable with 3 gamma mixture components, #' results <- BMRMM(foxp2sm, num.cov = 2, simsize = 50, #' duration.distr = list('mixgamma', shape = rep(1,3), rate = rep(1,3))) #' #' # models duration times as a discrete state with discretization 0.025 and #' results <- BMRMM(foxp2sm, num.cov = 2, simsize = 50, #' duration.distr = list('mixDirichlet', unit = 0.025)) #' #' #' @author Yutong Wu, \email{yutong.wu@@utexas.edu} #' #' @param data a data frame containing -- individual ID, covariate values, previous state, current state, duration times (if applicable), in that order. #' @param num.cov total number of covariates provided in `data`. #' @param cov.labels a list of vectors giving names of the covariate levels. Default is a list of numerical vectors. #' @param state.labels a vector giving names of the states. Default is a numerical vector. #' @param random.effect `TRUE` if population-level effects are considered. Default is `TRUE`. #' @param fixed.effect `TRUE` if individual-level effects are considered. Default is `TRUE`. #' @param trans.cov.index a numeric vector indicating the indices of covariates that are used for transition probabilities. Default is all of the covariates. #' @param duration.cov.index a numeric vector indicating the indices of covariates that are used for duration times. Default is all of the covariates. #' @param duration.distr a list of arguments indicating the distribution of duration times. Default is `NULL`, which is ignoring duration times. #' @param duration.incl.prev.state `TRUE` if the previous state is included in the inference of duration times. Default is `TRUE`. #' @param simsize total number of MCMC iterations. Default is 10000. #' @param burnin number of burn-ins for the MCMC iterations. Default is `simsize`/2. #' #' @export BMRMM <- function(data,num.cov,cov.labels=NULL,state.labels=NULL,random.effect=TRUE,fixed.effect=TRUE, trans.cov.index=1:num.cov,duration.cov.index=1:num.cov,duration.distr=NULL, duration.incl.prev.state=TRUE,simsize=10000,burnin=simsize/2) { if(num.cov>5 || num.cov==0) { stop("The model takes 1 to 5 covariates.") } # data should be in format: id, cov 1, ..., cov p, prev state, cur state, (duration if applicable) num_col <- ncol(data) id <- data[,1] covs <- as.matrix(data[,2:(num.cov+1)]) prev_state <- data[,1+num.cov+1] cur_state <- data[,1+num.cov+2] # assign covariate labels if not given if(missing(cov.labels)){ cov.labels <- apply(covs, 2, function(x) 1:length(unique(x))) } # assign state labels if not given if(missing(state.labels)){ state.labels <- sort(union(unique(prev_state),unique(cur_state))) } # all covs are taken if none specified if (ncol(covs)==1) { trans_covs <- as.matrix(data[,2]) duration_covs <- as.matrix(data[,2]) colnames(trans_covs) <- colnames(data)[2] colnames(duration_covs) <- colnames(data)[2] } else { trans_covs <- covs[,trans.cov.index] duration_covs <- covs[,duration.cov.index] } trans.cov.labels <- cov.labels[trans.cov.index] duration.cov.labels <- cov.labels[duration.cov.index] if(duration.incl.prev.state) { duration_covs <- cbind(duration_covs,prev_state) duration.cov.labels$prev_state <- state.labels } # get results res_duration <- NULL if(missing(duration.distr)) { res_trans <- model_transition(data.frame(cbind(id,trans_covs,prev_state,cur_state)),random.effect,fixed.effect,simsize,burnin) } else if('mixDirichlet' %in% duration.distr) { if(is.null(duration.distr$unit)) { stop("Must specify 'unit' if 'duration.distr' contains 'mixDirichlet'.") } else { state.labels <- append(state.labels,'dur.state') duration <- data[,num_col] aug_data <- add_isi_as_state(data.frame(cbind(id,trans_covs,prev_state,cur_state,duration)),duration.distr$unit) res_trans <- model_transition(aug_data,random.effect,fixed.effect,simsize,burnin) } } else if('mixgamma' %in% duration.distr) { if (is.null(duration.distr$shape) || is.null(duration.distr$rate)) { stop("Must specify both 'shape' and 'rate' if 'duration.distr' contains 'mixgamma'.") } else if (length(duration.distr$shape) != length(duration.distr$rate)) { stop("'shape' and 'rate' must have the same dimension.") } else { duration <- data[,num_col] data_for_trans <- data.frame(cbind(id,trans_covs,prev_state,cur_state)) data_for_duration <- data.frame(cbind(id,duration_covs,duration)) res_trans <- model_transition(data_for_trans,random.effect,fixed.effect,simsize,burnin) res_duration <- model_cont_isi(data_for_duration,random.effect,fixed.effect,simsize=simsize,burnin=burnin,K=length(duration.distr$shape),duration.distr$shape,duration.distr$rate) } } else { stop("The argument 'duration.distr' must be a list containing either 'mixDirichlet' or 'mixgamma'.") } # return results res_trans$state.labels <- state.labels res_trans$cov.labels <- trans.cov.labels if (!is.null(res_duration)) { res_duration$cov.labels <- duration.cov.labels results <- list("results.trans"=res_trans,"results.duration"=res_duration) } else { results <- list("results.trans"=res_trans) } class(results) <- "BMRMM" return(results) }
/scratch/gouwar.j/cran-all/cranData/BMRMM/R/BMRMM.R
######################################## ### Treating ISI as a Discrete State ### ######################################## # This function is called when 'isi_type' is 'Discrete' # We divide each ISI by 'isi_unit' and treat each ISI as a block of a new state # For example, if ISI=20 and isi_unit=5, then we will add 4 consecutive "ISI" states ############# ### Input ### ############# # data <- original data set # isi_unit <- specifies the unit of each block of ISI ############## ### Output ### ############## # aug_data <- augmented data set by adding ISI as a new state add_isi_as_state <- function(data,isi_unit) { aug_data <- NULL isi_ind <- ncol(data) cur_state_ind <- ncol(data)-1 prev_state_ind <- ncol(data)-2 num_states <- length(unique(c(data[,prev_state_ind],data[,cur_state_ind]))) new_state <- num_states+1 for(row in 1:nrow(data)) { num_block <- floor(data[row,isi_ind]/isi_unit) if(row != nrow(data) && data[row,1]==data[row+1,1] && num_block>0) { attr <- as.numeric(data[row,1:(prev_state_ind-1)]) prev_state <- data[row,prev_state_ind] cur_state <- data[row,cur_state_ind] aug_data <- rbind(aug_data,c(attr,prev_state,new_state)) if(num_block>1) { aug_data <- rbind(aug_data,c(attr,new_state,new_state)) } aug_data <- rbind(aug_data,c(attr,new_state,cur_state)) } else { aug_data <- rbind(aug_data,as.numeric(data[row,1:cur_state_ind])) } } aug_data <- data.frame(aug_data) colnames(aug_data) <- colnames(data)[1:cur_state_ind] return(aug_data) }
/scratch/gouwar.j/cran-all/cranData/BMRMM/R/add_isi_as_state.R
#' Simulated FoxP2 Data Set. #' #' A simulated data set of the original FoxP2 data set, which contains #' the sequences of syllables sung by male mice of #' different genotypes under various social contexts. #' #' @format A data frame with 17391 rows and 6 variables: #' \describe{ #' \item{Id}{Mouse Id} #' \item{Genotype}{Genotype of the mouse, 1 = FoxP2 knocked out, 2 = wild type} #' \item{Context}{Social context for the mouse, 1 = U (urine sample placed in the cage), 2 = L (living female mouse placed in the cage), 3 = A (an anesthetized female placed on the lid of the cage)} #' \item{Prev_State}{The previous syllable, \{1,2,3,4\} = \{d,m,s,u\}} #' \item{Cur_State}{The current syllable, \{1,2,3,4\} = \{d,m,s,u\}} #' \item{Transformed_ISI}{Modified inter-syllable interval times, log(original ISI + 1)} #' } #' #' @references #' Chabout, J., Sarkar, A., Patel, S. R., Radden, T., Dunson, D. B., Fisher, S. E., & Jarvis, E. D. (2016). A Foxp2 mutation implicated in human speech deficits alters sequencing of ultrasonic vocalizations in adult male mice. Frontiers in behavioral neuroscience, 10, 197. \cr #' #' Wu, Y., Jarvis E. D., & Sarkar, A. (2023). Bayesian semiparametric Markov renewal mixed models for vocalization syntax. Biostatistics, To appear. #' "foxp2"
/scratch/gouwar.j/cran-all/cranData/BMRMM/R/foxp2.R
#' Shortened Simulated FoxP2 Data Set. #' #' A shortened version of the `foxp2` data set for demonstrating R examples. #' See details of the `foxp2` data set by calling ?foxp2. #' "foxp2sm"
/scratch/gouwar.j/cran-all/cranData/BMRMM/R/foxp2sm.R
################################################################### ### This function is used when ISI is modeled as mixture gammas ### ################################################################### model_cont_isi <- function(data,random_effect,fixed_effect,simsize,burnin,K,init_alpha,init_beta) { ################## process data ################## # construct isi data and covs for simulation num_row <- nrow(data) covs <- data[,2:(ncol(data)-1)] # load covariate values mouseId <- data[,1] isi <- data[,ncol(data)] ################## initialization ################## p <- ncol(covs) # number of covariates covs.max <- as.vector(apply(covs,2,function(x) length(unique(x)))) # size of each covariate all_combs <- unique(covs) num_combs <- nrow(all_combs) z.max <- K # number of mixture components if (!fixed_effect){ M00 <- ones(1,p) } else { M00 <- covs.max } M <- matrix(0,nrow=simsize,ncol=p) # record # of clusters for each iteration clusts <- kmeans(isi,z.max)$cluster # get clusters via kmeans z.vec <- clusts # initialization for mixture component assignment lambda00 <- sapply(1:z.max,function(k) sum(clusts==k)/length(clusts)) # lambda_{isi,00} lambdaalpha0 <- 1 # alpha_{isi,00} lambda0 <- lambda00 # lambda_{isi,0} lambdaalpha <- 0.01 # alpha_{isi,0} lambda <- array(lambda00,dim=c(z.max,1)) # lambda_{g1,g2,g3} # individual probability and lambda if (random_effect && fixed_effect) { piv <- matrix(0.8,nrow=max(mouseId),ncol=z.max) # probability for population-level effect } else if (random_effect) { piv <- matrix(0,nrow=max(mouseId),ncol=z.max) # probability for population-level effect } else { piv <- matrix(1,nrow=max(mouseId),ncol=z.max) # probability for population-level effect } v <- sample(0:1,num_row,prob=c(piv[1,1],1-piv[1,1]),replace=TRUE) # v=0: exgns, v=1: anmls lambda_mice <- matrix(rep(lambda00,each=max(mouseId)),nrow=max(mouseId)) lambda_mice_alpha <- 0.01 # alpha_{isi}^{0} # record all marginal prob for gamma mixture of each iteration prob_mat <- matrix(0,nrow=simsize,ncol=z.max) # mixture gamma parameters Alpha <- matrix(1,nrow=simsize,ncol=z.max) # shape Beta <- matrix(1,nrow=simsize,ncol=z.max) # rate # cluster assignment for each covariate if (!fixed_effect) { z.cov <- ones(num_row,p) # initially, each covariate forms one cluster G <- matrix(1,nrow=p,ncol=max(covs.max)) } else { z.cov <- as.matrix(covs) # initially, each covariate forms its own cluster G <- matrix(0,nrow=p,ncol=max(covs.max)) for(pp in 1:p){ G[pp,1:covs.max[pp]] <- 1:covs.max[pp] } } # pM: prior probabilities of # of clusters for each covariate phi <- 0.25 pM <- matrix(0,p,max(covs.max)) for(pp in 1:p){ pM[pp,1:covs.max[pp]] <- exp(-(pp*(1:covs.max[pp])*phi-1)) # prior probability for k_{pp}, #cluster for covariate pp pM[pp,] <- pM[pp,]/sum(pM[pp,]) } # cM: for each covariate, # of ways to partition each j elements into two empty groups, 1<=j<=(size of covariate) cM <- matrix(0,p,max(covs.max)) # There are (2^r-2)/2 ways to split r objects into two non-empty groups. for(pp in 1:p) # (Consider 2 cells and r objects, subtract 2 for the two cases in which cM[pp,1:covs.max[pp]] <- c(2^(1:covs.max[pp])-2)/2 # one cell receives all objects, divide by 2 for symmetry.) # MCMC storage lambda0_all <- matrix(0,nrow=simsize,ncol=z.max) # store lambda_{0} lambda_all <- array(0,dim=c(simsize,covs.max,z.max)) # store lambda_{g1,g2,g3} lambda_mice_all <- array(0,dim=c(simsize,max(mouseId),z.max)) # store lambda^{(i)} lambdaalpha_all <- rep(0,simsize) # store alpha_0 lambdaalpha_mice_all <- rep(0,simsize) # store alpha^0 ################## simulation ################## for(iii in 1:simsize) { M[iii,] <- M00 prob_mat[iii,] <- as.numeric(table(factor(z.vec,levels=1:z.max))/num_row) if(iii%%100==0) print(paste("Duration Times: Iteration ",iii)) # update # cluster for each covariate and cluster mapping if(iii > 1 && fixed_effect) { # update cluster indicator z M00 <- M[iii,] for(pp in 1:p){ M0 <- M00[pp] # current # of clusters of covariate pp if(M0==1) { # propose to split one cluster into 2 new <- rbinom(covs.max[pp]-1,size=1,prob=0.5) # propose new mapping for (d_{pp}-1) values at 1 while(sum(new)==0) new <- rbinom(covs.max[pp]-1,size=1,prob=0.5) GG <- G[pp,1:covs.max[pp]] + c(0,new) # keep the first one at 1, propose new cluster mappings for the other (d_{pp}-1) levels of x_{pp} zz <- z.cov # zz initiated at z zz[,pp] <- GG[covs[,pp]] # proposed new zz by mapping to new cluster configurations for the observed values of x_{pp} ind2 <- which(M00>1) # current set of relevant predictors if(length(ind2)==0) # if no predictor is currently important ind2 <- 1 MM <- M00 # MM initiated at {k_{1},...,k_{p}}, current values, with k_{pp}=1 by the if condition MM[pp] <- 2 # proposed new value of k_{pp}=2 ind1 <- which(MM>1) # proposed set of important predictors, now includes x_{pp}, since MM[pp]=2 logR <- isi_logml(zz[,ind1],z.vec,MM[ind1],pM[ind1,],lambdaalpha*lambda0,z.max)-isi_logml(z.cov[,ind2],z.vec,M00[ind2],pM[ind2,],lambdaalpha*lambda0,z.max) logR <- logR+log(0.5)+log(cM[pp,covs.max[pp]]) # computational reasons if(log(runif(1))<logR) { G[pp,1:covs.max[pp]] <- GG M00 <- MM z.cov <- zz np <- length(ind2) } } if((M0>1)&&(M0<covs.max[pp])){ if(runif(1)<0.5) { # with prob 0.5 split one mapped value into two df <- sort(G[pp,1:covs.max[pp]]) # cluster mapping for covariate pp z0 <- unique(df) # z0 are unique cluster mappings, mm contains their positions mm <- which(!duplicated(df)) # mm contains the positions when they first appear on {1,...,n} gn <- c(diff(mm),size(df,2)-mm[length(mm)]+1) # frequencies of z0 pgn <- cM[pp,gn]/sum(cM[pp,gn]) # see the definition of cM rr <- sum(rmultinom(1,size=1,prob=pgn)*(1:M0)) # rr is the state to split, gn[rr] is the frequency of rr new <- rmultinom(1,size=1,0.5*rep(1,gn[rr]-1)) # propose new mapping for (gn(rr)-1) values at rr while(sum(new)==0) new <- rmultinom(1,size=1,0.5*rep(1,gn[rr]-1)) GG <- G[pp,1:covs.max[pp]] GG[GG==rr] <- rr+(M0+1-rr)*c(0,new) # keep first value at rr, propose new mapping (M0+1) for the rest of (gn[rr]-1) values at rr zz <- z.cov # zz initiated at z.cov zz[,pp] <- GG[covs[,pp]] # proposed new zz by mapping to new cluster configurations for the observed values of x_{pp} MM <- M00 # MM initizted at current values {k_{1},...,k_{p}} MM[pp] <- M0+1 # proposed new value of k_{pp}, since one original mapped value is split into two ind1 <- which(MM>1) # proposed set of important predictors, now includes x_{pp}, since MM[pp]=2 ind2 <- which(M00>1) # current set of important predictors if(length(ind2)==0) # if no predictor is currently important ind2 <- 1 logR <- isi_logml(zz[,ind1],z.vec,MM[ind1],pM[ind1,],lambdaalpha*lambda0,z.max)-isi_logml(z.cov[,ind2],z.vec,M00[ind2],pM[ind2,],lambdaalpha*lambda0,z.max) if(M00[pp]<(covs.max[pp]-1)){ logR = logR-log(M0*(M0+1)/2)+log(sum(cM[pp,gn])) } else { logR = logR-log(covs.max[pp]*(covs.max[pp]-1)/2)-log(0.5) # note that by replacing -log(0.5) with +log(2), we get the same reasoning as above } if(log(runif(1))<logR) { G[pp,1:covs.max[pp]] <- GG M00 <- MM z.cov <- zz np <- length(ind2) } } else { # with prob 0.5 merge two mapped values into one znew <- sample(M0,2) lnew <- max(znew) snew <- min(znew) GG <- G[pp,1:covs.max[pp]] GG[GG==lnew] <- snew # replace all lnews by snews GG[GG==M0] <- lnew # replace the largest cluster mapping by lnew (lnew itself may equal M0, in which case GG remains unchanged by this move) zz <- z.cov # zz initiated at z.cov zz[,pp] <- GG[covs[,pp]] # proposed new zz by mapping to new cluster configurations for the observed values of x_{pp} MM <- M00 # MM initizted at current values {k_{1},...,k_{p}}, with k_{pp}=d_{pp} by the if condition MM[pp] <- M00[pp]-1 # proposed new value of k_{pp}, since two mappings are merged ind1 <- which(MM>1) # proposed set of important predictors, may not include x_{pp} if original k_(j) was at 2 ind2 <- which(M00>1) # current set of important predictors if(length(ind1)==0) ind1 <- 1 if(length(ind2)==0) ind2 <- 1 logR <- isi_logml(zz[,ind1],z.vec,MM[ind1],pM[ind1,],lambdaalpha*lambda0,z.max)-isi_logml(z.cov[,ind2],z.vec,M00[ind2],pM[ind2,],lambdaalpha*lambda0,z.max) if(M0>2) { df <- sort(GG) z0 <- unique(df) # z0 are unique cluster mappings, mm contains their positions mm <- which(!duplicated(df)) # mm contains the positions when they first appear on {1,...,n} gn <- c(diff(mm),size(df,2)-mm[length(mm)]+1) # frequencies of z0 logR <- logR-log(sum(cM[pp,gn]))+log(M00[pp]*(M00[pp]-1)/2) } else { logR <- logR-log(cM[pp,covs.max[pp]])-log(0.5) } if(log(runif(1))<logR) { G[pp,1:covs.max[pp]] <- GG M00 <- MM z.cov <- zz np <- length(ind2) } } } if(M0==covs.max[pp]){ znew <- sample(covs.max[pp],2) lnew <- max(znew) snew <- min(znew) GG <- G[pp,1:covs.max[pp]] GG[GG==lnew] <- snew # replace all lnews by snews GG[GG==M0] <- lnew # replace the largest cluster mapping d_{pp} by lnew (lnew itself can be d_{pp}, in which case GG remains unchanged by this move) zz <- z.cov # zz initiated at z.cov zz[,pp] <- GG[covs[,pp]] # proposed new z_{,pp} as per the proposed new cluster mappings of the levels of x_{pp} MM <- M00 # MM initizted at current values {k_{1},...,k_{p}}, with k_{pp}=d_{pp} by the if condition MM[pp] <- covs.max[pp]-1 # proposed new value of k_{pp}, since originally k_{pp}=d_{pp} and now two mappings are merged ind1 <- which(MM>1) # proposed set of important predictors, does not include x_{pp} when d_{pp}=2 if(length(ind1)==0) ind1 <- 1 ind2 <- which(M00>1) # current set of important predictors if(length(ind2)==0) ind2 <- 1 logR <- isi_logml(zz[,ind1],z.vec,MM[ind1],pM[ind1,],lambdaalpha*lambda0,z.max)-isi_logml(z.cov[,ind2],z.vec,M00[ind2],pM[ind2,],lambdaalpha*lambda0,z.max) logR <- logR+log(0.5)+log(covs.max[pp]*(covs.max[pp]-1)/2) if(log(runif(1))<logR) { G[pp,1:covs.max[pp]] <- GG M00 <- MM z.cov <- zz } } # end of #cluster appointment # start of cluster mapping if(M00[pp]>1){ # propose a new cluster index mapping zz <- z.cov # initiate zz at z.cov per <- sample(covs.max[pp],covs.max[pp]) GG <- G[pp,per] # proposed new cluster mappings of different levels of x_{pp} zz[,pp] <- GG[covs[,pp]] # proposed new z_{,pp} as per the proposed new cluster mappings of the levels of x_{pp} MM <- M00 ind1 <- which(M00>1) ind2 <- which(M00>1) logR <- isi_logml(zz[,ind1],z.vec,MM[ind1],pM[ind1,],lambdaalpha*lambda0,z.max)-isi_logml(z.cov[,ind2],z.vec,M00[ind2],pM[ind2,],lambdaalpha*lambda0,z.max) if(log(runif(1))<logR){ G[pp,1:covs.max[pp]] <- GG z.cov <- zz np <- length(ind2) } } if((M00[pp]==1)&&(length(unique(covs.max))==1)&&(iii<simsize/2)){ ind2 <- which(M00>1) # the current set of important predictors tempind <- sample(length(ind2),1) temp <- ind2[tempind] # choose one, x_{temp}, from the current set of important predictors zz <- z.cov # initiate zz at z.cov zz[,temp] <- rep(1,num_row) # propose removal of x_{temp} by setting z_{i,temp}=1 for all i per <- sample(covs.max[pp],covs.max[pp]) # permutation of {1,...,d_{pp} GG <- G[temp,per] # proposed new cluster mappings of different levels of x_{pp} obtained by permuting the cluster mappings of the levels of x_{temp} zz[,pp] <- GG[covs[,pp]] # proposed new z_{i,pp} as per the proposed new cluster mappings of the levels of x_{pp} MM <- M00 # MM initiated at current values {k_{1},...,k_{p}}, with k_{pp}=1 and k_{temp}>1 by the conditions MM[temp] <- 1 # proposed new value of k_{temp}, since x_{temp} is removed from the set of important predictors MM[pp] <- M00[temp] # propose k_{pp}=k_{temp} ind1 <- which(MM>1) # proposed set of important predictors, now this set excludes x_{temp} but includes x_{pp} logR <- isi_logml(zz[,ind1],z.vec,MM[ind1],pM[ind1,],lambdaalpha*lambda0,z.max)-isi_logml(z.cov[,ind2],z.vec,M00[ind2],pM[ind2,],lambdaalpha*lambda0,z.max) if(log(runif(1))<logR) { G[pp,1:covs.max[pp]] <- GG G[temp,] <- rep(1,covs.max[pp]) M00 <- MM z.cov <- zz np <- length(ind2) } } } } ind00 <- which(M00>1) # selected predictors whose #clusters>1 if(length(ind00)==0) ind00 <- 1 K00 <- M00[ind00] # k_{pp}'s for the selected predictors i.e. #clusters for selected predicators p00 <- length(ind00) # number of selected predictors z00 <- as.matrix(z.cov[,ind00]) clT <- array(0,dim=c(2,z.max,K00)) # dmax levels of the response y, K00={k_{1},..,k_{p00}} clustered levels of x_{1},...,x_{p00} df <- cbind(v+1,z.vec,z00) df <- df[do.call(order,as.data.frame(df)),] z0 <- unique(df) # z0 are the sorted unique combinations of (y,z_{pp,1},...,z_{pp,p00}), m <- which(!duplicated(df)) # m contains the positions when they first appear on {1,...,n} m <- c(diff(m),size(df,1)-m[length(m)]+1) # m contains their frequencies clT[z0] <- clT[z0]+m # add the differences in positions to cells of clT corresponding to the unique combinations -> gives the number of times (y,z_{1},...,z_{p00}) appears clTdata <- array(clT,dim=c(2,z.max,prod(K00))) # matrix representation of the tensor clT, with rows of the matrix corresponding to dimension 1 i.e. the levels of y clTdata_pop <- matrix(clTdata[1,,],nrow=z.max) clTdata_all <- matrix(clTdata[1,,]+clTdata[2,,],nrow=z.max) sz <- size(clTdata_all) # update lambda (lambda_{isi,g_1,g_2,g_3}) lambdamat <- matrix(0,z.max,sz[2]) for(jj in 1:sz[2]) { lambdamat[,jj] <- rgamma(z.max,clTdata_pop[,jj]+lambdaalpha*lambda0,1) lambdamat[,jj] <- lambdamat[,jj]/sum(lambdamat[,jj]) # normalization } lambda <- array(lambdamat,dim=c(z.max,K00)) # there is a probability vector of dim z.max for each uniq combination for(cc in 1:num_combs){ # store population effect exog_val <- as.numeric(all_combs[cc,]) cls <- G[cbind(1:p,exog_val)][ind00] for(zm in 1:z.max){ lambda_all[t(c(iii,exog_val,zm))] <- lambda[t(c(zm,cls))] } } if (random_effect || fixed_effect) { # update v (v=0: exgns; v=1: anmls) mouse_k_pairs <- cbind(mouseId,z.vec) prob_exgns <- piv[mouse_k_pairs]*lambda[cbind(z.vec,z00)] prob_anmls <- (1-piv[mouse_k_pairs])*lambda_mice[mouse_k_pairs] probs <- prob_exgns/(prob_exgns+prob_anmls) probs[is.na(probs)] <- 0 v <- sapply(probs,function(x) sample(0:1,1,prob=c(x,1-x))) # update piv (pi_{isi,0}^{i}) v0 <- cbind(v+1,z.vec,mouseId) v0 <- v0[do.call(order,as.data.frame(v0)),] v00 <- cbind(unique(v0),1) # unique combinations of {v,k,mouse_id} v0m <- which(!duplicated(v0)) v0m <- c(diff(v0m),size(v0,1)-v0m[length(v0m)]+1) # contain occurrences of each {v,k,id} vMat <- array(rep(0,2*z.max*max(mouseId)),dim=c(2,z.max,max(mouseId),1)) vMat[v00] <- v0m for(id in 1:max(mouseId)){ vMouseMat <- vMat[,,id,] piv[id,] <- 1-rbeta(z.max,vMouseMat[1,]+1,vMouseMat[2,]+1) } } # update lambda_mice (lambda^(i)) if (random_effect) { for(id in 1:max(mouseId)){ vMouseCt <- vMat[,,id,][2,] lambda_mice[id,] <- rdirichlet(1,lambda_mice_alpha*lambda0+vMouseCt) lambda_mice_all[iii,id,] <- lambda_mice[id,] } } # update lambda0 (lambda_{isi,0}) mmat <- matrix(0,z.max,sz[2]) for(ii in 1:z.max) { for(mm in 1:sz[2]) { if(clTdata_all[ii,mm]>0) { prob <- lambdaalpha*lambdamat[ii,mm]/(c(1:clTdata_all[ii,mm])-1+lambdaalpha*lambdamat[ii,mm]) prob[is.na(prob)] <- 10^(-5) # For numerical reasons mmat[ii,mm] <- mmat[ii,mm] + sum(rbinom(length(prob),size=rep(1,clTdata_all[ii,mm]),prob=prob)) } } } lambda0 <- rgamma(z.max,rowSums(mmat)+lambdaalpha0/z.max,1) lambda0[lambda0==0] <- 10^(-5) # For numerical reasons lambda0[is.na(lambda0)] <- 10^(-5) # For numerical reasons lambda0 <- lambda0/sum(lambda0) lambda0_all[iii,] <- lambda0 # update alpha_{isi,0} and alpha_{isi}^{(0)} lambdaalpha_all[iii] <- lambdaalpha lambdaalpha_mice_all[iii] <- lambda_mice_alpha lambdaalpha <- rgamma(1,shape=1+z.max-1,scale=1-digamma(1)+log(num_row)) lambda_mice_alpha <- rgamma(1,shape=1+z.max-1,scale=1-digamma(1)+log(num_row)) # update mixed gamma parameters alpha & beta # idea: use approximation for conjugate gamma prior if (iii > 50) { for(kk in 1:z.max) { df <- isi[which(z.vec==kk)] a_cur <- Alpha[iii-1,kk] b_cur <- Beta[iii-1,kk] a_new <- approx_gamma_shape(df,a_cur/b_cur,1,1) # mu=shape/rate b_new <- rgamma(1,shape=1+length(df)*a_new,rate=1+sum(df)) Alpha[iii,kk] <- a_new Beta[iii,kk] <- b_new } } else { Alpha[iii,] <- init_alpha Beta[iii,] <- init_beta } # update z_{tau,k} for(ii in 1:num_row) { z.covs.indices <- array(c(1:z.max,rep(z00[ii,],each=z.max)),dim=c(z.max,p00+1)) # ii's values of important covariates if(iii > burnin/2) { id <- mouseId[ii] prob <- (piv[id,]*lambda[z.covs.indices]+(1-piv[id,])*lambda_mice[id,])*dgamma(isi[ii],shape=Alpha[iii,],rate=Beta[iii,]) } else { prob <- lambda0*dgamma(isi[ii],shape=Alpha[iii,],rate=Beta[iii,]) } prob[is.nan(prob)] <- 0 prob[is.infinite(prob)] <- max(prob[is.finite(prob)]) # Numerical Stability if (sum(prob==0)==z.max){ prob <- rep(1/z.max,z.max) } else { prob <- prob/sum(prob) } z.vec[ii] <- sample(z.max,1,TRUE,prob) } } results <- list("covs"=covs, "dpreds"=covs.max, "MCMCparams"=list("simsize"=simsize,"burnin"=burnin,"N_Thin"=5), "duration.times"=isi, "comp.assignment"=z.vec, "duration.exgns.store"=lambda_all, "marginal.prob"=prob_mat, "shape.samples"=Alpha, "rate.samples"=Beta, "clusters"=M, "type"="Duration Times") return(results) } # end of function
/scratch/gouwar.j/cran-all/cranData/BMRMM/R/model_cont_isi.R
# Supporting functions for model_cont_isi.R ############## likelihood function ############## isi_logml <- function(z,y,M,pM,alpha,zmax) { # z:z_isi (only for important predictors), y:z_tau df <- cbind(y,z) for(pp in 1:size(df,2)) df <- sortrows(df,size(df,2)-pp+1) z0 <- unique(df) # z0 are the sorted unique combinations of (y,z_{pp,1},...,z_{pp,p0}), m <- which(!duplicated(df)) # m contains the positions (indices) when they first appear on {1,...,n} m <- c(diff(m),size(df,1)-m[length(m)]+1) C <- array(0,dim=c(zmax,M)) # d0=levels of the response y, M=number of clustered levels of x_{pp,1},...,x_{pp,p0} C[z0] <- C[z0]+m # add the differences in positions to cells of clT corresponding to the unique combinations -> gives the number of times (y,z_{pp,1},...,z_{pp,p0}) appears Cdata <- matrix(C,nrow=zmax) # matrix representation of the array C, with rows of the matrix corresponding to dimension 1 i.e. the levels of y, # col: # of unique combinations of (y,z_{pp,1},...,z_{pp,p0}) JJ <- size(Cdata) if(length(alpha)==1) loglik <- sum(sum(lgamma(Cdata+alpha)))-sum(lgamma(colSums(Cdata)+zmax*alpha))-JJ[2]*(zmax*lgamma(alpha)-lgamma(zmax*alpha)) else loglik <- sum(lgamma(Cdata+t(repmat(alpha,JJ[2],1))))-sum(lgamma(colSums(Cdata)+t(repmat(sum(alpha),JJ[2],1))))-JJ[2]*(sum(lgamma(alpha))-lgamma(sum(alpha))) # likelihood for k_pp, #clusters p <- size(pM,1) # number of important predictors if(p==1) pM <- as.matrix(t(pM)) for(j in 1:p) { d <- sum(pM[j,]>0) # number of possible number of clusters loglik <- loglik+log(pM[j,M[j]]/as.numeric(Stirling2(d,M[j]))) # S(n,k): # of ways to partition n objects into k groups } return(loglik) } ############## approximate gamma shape function ############## approx_gamma_shape <- function(dat,mu,a0,b0,it=10,e=10^(-8)) { R <- sum(log(dat)) S <- sum(dat) n <- length(dat) Q <- S/mu-R+n*log(mu)-n A <- a0+n/2 B <- b0+Q for(j in 1:it){ a <- A/B A <- a0-n*a+n*(a^2)*trigamma(a) B <- b0+(A-a0)/a-n*log(a)+n*digamma(a)+Q if(abs(a/(A/B)-1)<e){ return(rgamma(1,shape=A,rate=B)) } } return(rgamma(1,shape=A,rate=B)) }
/scratch/gouwar.j/cran-all/cranData/BMRMM/R/model_cont_isi_fn.R
################################################################################ ### This function is used when ISI is ignored or modeled as a discrete state ### ################################################################################ model_transition <- function(data,random_effect,fixed_effect,simsize,burnin) { ################## process data ################## # data columns should be: id, selected covariates, prev state, current state # construct isi data and covs for simulation num_row <- nrow(data) Xexgns <- as.matrix(data[,2:(ncol(data)-2)]) # load covariate values if(ncol(Xexgns)==1) { colnames(Xexgns) <- colnames(data)[2] } Id <- data[,1] Ytminus1 <- data[,ncol(data)-1] # previous state Yt <- data[,ncol(data)] # current state d0 <- length(unique(Yt)) # number of unique states sz <- dim(Xexgns) p <- sz[2] # number of covariates dpreds <- as.vector(apply(Xexgns,2,function(x) length(unique(x)))) # size of each covariate num_pairs <- rep(0,p) for(pp in 1:p) { num_pairs[pp] <- choose(dpreds[pp],2) } v0 <- cbind(Xexgns,Id) v0 <- v0[do.call(order,as.data.frame(v0)),] v00 <- unique(v0) # unique combination of (x_{s,1},x_{s,2},...,id) m00starts <- which(!duplicated(v0)) # start of each unique combination Ts <- c(diff(m00starts),nrow(v0)-m00starts[length(m00starts)]+1) # frequency of each combination ################ assign priors ################### pialpha <- ones(1,p) dmax <- max(dpreds) lambdaalpha_exgns <- 1 lambdaalpha_anmls <- 1 lambdaalpha0 <- 1 ################ MCMC sampler #################### N_MCMC <- simsize N_Thin <- 5 N_Store <- 0 if(is.null(burnin)) burnin <- floor(N_MCMC/2) np <- p Xnew <- Xexgns M <- repmat(dpreds,N_MCMC+1,1) G <- zeros(p,dmax) pi <- zeros(p,dmax) logmarginalprobs <- zeros(p,dmax) Ntot <- sz[1] z <- ones(Ntot,p) # covariate values for(j in 1:p) { G[j,1:dpreds[j]] <- 1:dpreds[j] z[,j] <- G[j,Xnew[,j]] pi[j,1:dpreds[j]] <- 1/dpreds[j] } GG <- G log0 <- zeros(N_MCMC,1) ind_all <- unique(sortrows(as.matrix(cbind(Xnew,Id)))) if (random_effect && fixed_effect) { piv <- 0.8*ones(max(Id),d0) # probability for population-level effect } else if (random_effect) { piv <- zeros(max(Id),d0) # probability for population-level effect } else { piv <- ones(max(Id),d0) # probability for population-level effect } v <- sample(0:1,Ntot,TRUE,prob=c(1-piv[1,1],piv[1,1])) # v=0:anmls; v=1:exgns ### estimate TP_All (transition probabilities for all) C <- array(0,dim=c(d0,d0,dpreds,max(Id))) v0 <- cbind(Yt,Ytminus1,Xnew,Id) v0 <- v0[do.call(order,as.data.frame(v0)),] v00 <- unique(v0) m00starts <- which(!duplicated(v0)) Ts <- c(diff(m00starts),nrow(v0)-m00starts[length(m00starts)]+1) C[as.matrix(v00)] <- C[as.matrix(v00)]+Ts T_All <- C TP_All <- array(0,dim=c(d0,d0,dpreds,max(Id))) for(i in 1:nrow(ind_all)) { attr <- ind_all[i,] for(j in 1:d0) { index <- cbind(1:d0,repmat(j,d0,1),repmat(attr,d0,1)) TP_All[index] <- C[index]/sum(C[index]) } } ### estimate TP_Anmls T_Anmls <- apply(T_All,c(1,2,length(size(T_All))),sum) TP_Anmls <- array(0,dim=c(d0,d0,max(Id))) for(i in 1:max(Id)) { for(j in 1:d0) { index <- cbind(1:d0,repmat(j,d0,1),repmat(i,d0,1)) TP_Anmls[index] <- T_Anmls[index]/sum(T_Anmls[index]) } } ### estimate TP_Exgns T_Exgns <- apply(T_All,1:(length(size(T_All))-1),sum) TP_Exgns <- array(0,dim=c(d0,d0,dpreds)) v0 <- unique(Xnew) for(i in 1:nrow(v0)) { attr <- as.numeric(v0[i,]) for(j in 1:d0) { index <- cbind(1:d0,repmat(j,d0,1),repmat(attr,d0,1)) TP_Exgns[index] <- T_Exgns[index]/sum(T_Exgns[index]) } } ### initialize lambda00 C <- as.matrix(table(Yt)) lambda00 <- C/sum(C) lambda00_mat_expanded <- repmat(lambda00,1,d0) ### initialize lambda0 C <- zeros(d0,d0) v0 <- cbind(Yt,Ytminus1) v0 <- v0[do.call(order,as.data.frame(v0)),] v00 <- unique(v0) m00starts <- which(!duplicated(v0)) Ts <- c(diff(m00starts),nrow(v0)-m00starts[length(m00starts)]+1) C[as.matrix(v00)] <- C[as.matrix(v00)]+Ts lambda0 <- C/repmat(colSums(C),d0,1) lambda0[,is.nan(colSums(lambda0))] <- ones(size(lambda0)[1],1)/size(lambda0)[1] lambda0_mat_expanded_exgns <- repmat(lambda0,1,prod(dpreds)) lambda0_mat_expanded_anmls <- repmat(lambda0,1,max(Id)) ### initialize lambda_exgns C <- array(0,dim=c(d0,d0,dpreds)) v0 <- cbind(Yt,Ytminus1,z) v0 <- v0[do.call(order,as.data.frame(v0)),] v00 <- unique(v0) m00starts <- which(!duplicated(v0)) Ts <- c(diff(m00starts),nrow(v0)-m00starts[length(m00starts)]+1) C[as.matrix(v00)] <- C[as.matrix(v00)]+Ts Cdata <- matrix(C,nrow=d0) lambda_exgns_mat <- zeros(d0,size(Cdata)[2]) for(i in 1:ncol(Cdata)) { lambda_exgns_mat[,i] <- rgamma(d0,shape=Cdata[,i]+lambdaalpha_exgns*lambda0_mat_expanded_exgns[,i],scale=1) lambda_exgns_mat[,i] <- lambda_exgns_mat[,i]/sum(lambda_exgns_mat[,i]) } lambda_exgns_mat[lambda_exgns_mat==0] <- min(min(lambda_exgns_mat[lambda_exgns_mat>0]),0.0001) lambda_exgns <- array(lambda_exgns_mat,dim=c(d0,d0,dpreds)) ### initialize lambda_anmls C <- array(0,dim=c(d0,d0,max(Id))) v0 <- cbind(Yt,Ytminus1,Id) v0 <- v0[do.call(order,as.data.frame(v0)),] v00 <- unique(v0) m00starts <- which(!duplicated(v0)) Ts <- c(diff(m00starts),nrow(v0)-m00starts[length(m00starts)]+1) C[as.matrix(v00)] <- C[as.matrix(v00)]+Ts Cdata <- matrix(C,nrow=d0) lambda_anmls_mat <- zeros(d0,size(Cdata)[2]) for(i in 1:ncol(Cdata)) { lambda_anmls_mat[,i] <- rgamma(d0,shape=Cdata[,i]+lambdaalpha_anmls*lambda0_mat_expanded_anmls[,i],scale=1) lambda_anmls_mat[,i] <- lambda_anmls_mat[,i]/sum(lambda_anmls_mat[,i]) } lambda_anmls_mat[lambda_anmls_mat==0] <- min(min(lambda_anmls_mat[lambda_anmls_mat>0]),0.0001) lambda_anmls <- array(lambda_anmls_mat,dim=c(d0,d0,max(Id))) ### MCMC storage lambda_anmls_mat_tmp <- array(0,dim=c(d0,d0,max(Id))) tp.all.post.mean <- array(0,dim=c(d0,d0,dpreds,max(Id))) tp.exgns.post.mean <- array(0,dim=c(d0,d0,dpreds)) TP_Exgns_Store <- array(0,dim=c(d0,d0,dpreds,floor((N_MCMC-burnin)/N_Thin))) tp.exgns.diffs.store <- array(0,dim=c(p,max(num_pairs),d0,d0,rep(max(dpreds),length(dpreds)-1),floor((N_MCMC-burnin)/N_Thin))) TP_Anmls_Store <- array(0,dim=c(d0,d0,max(Id),floor((N_MCMC-burnin)/N_Thin))) TP_Exgns_Comp_Post_Mean <- array(0,dim=c(d0,d0,dpreds)) TP_Anmls_Comp_Post_Mean <- array(0,dim=c(d0,d0,max(Id))) TP_Anmls_Comp_Store <- array(0,dim=c(d0,d0,max(Id),floor((N_MCMC-burnin)/N_Thin))) tp.exgns.all.itns <- array(0,dim=c(d0,d0,dpreds,N_MCMC)) ### start sampler for(kkk in 1:N_MCMC) { ### updating z (#clusters) M00 <- M[kkk,] if(kkk>1 && fixed_effect) { for(j in 1:p) { for(k in 1:dpreds[j]) { for(l in 1:dpreds[j]) { GG[j,k] <- l zz <- z zz[,j] <- GG[j,Xnew[,j]] logmarginalprobs[j,l] <- trans_logml(zz,Yt,Ytminus1,v,lambdaalpha_exgns*lambda0_mat_expanded_exgns,dpreds) } logmarginalprobs[j,1:dpreds[j]] <- logmarginalprobs[j,1:dpreds[j]]-max(logmarginalprobs[j,1:dpreds[j]]) logprobs <- log(pi[j,1:dpreds[j]])+logmarginalprobs[j,1:dpreds[j]] probs <- exp(logprobs)/sum(exp(logprobs)) GG[j,k] <- sample(1:dpreds[j],1,TRUE,probs) } G[j,] <- GG[j,] z[,j] <- GG[j,Xnew[,j]] M00[j] <- length(unique(z[,j])) } M[kkk+1,] <- M00 } ### updating pi for(j in 1:p) { uzj <- unique(z[,j]) ztab2 <- zeros(1,dpreds[j]) ztab2[1:max(uzj)] <- rep(1,max(uzj)) pi[j,1:dpreds[j]] <- rgamma(dpreds[j],shape=pialpha[j]+ztab2,scale=pialpha[j]+1) pi[j,1:dpreds[j]] <- pi[j,1:dpreds[j]]/sum(pi[j,1:dpreds[j]]) } if (!random_effect || !fixed_effect) { ### updating v (v=0: anmls; v=1: exgns) prob <- zeros(Ntot,2) piv_mat <- matrix(piv,nrow=max(Id)) prob[,1] <- (1-piv_mat[cbind(Id,Ytminus1)])*lambda_anmls[cbind(Yt,Ytminus1,Id)] prob[,2] <- piv_mat[cbind(Id,Ytminus1)]*lambda_exgns[cbind(Yt,Ytminus1,z)] prob <- prob/rowSums(prob) v <- rbinom(Ntot,1,prob[,2]) ### updating piv C <- array(0,dim=c(2,d0,max(Id))) v0 <- cbind(v+1,Ytminus1,Id) v0 <- v0[do.call(order,as.data.frame(v0)),] v00 <- unique(v0) m00starts <- which(!duplicated(v0)) Ts <- c(diff(m00starts),nrow(v0)-m00starts[length(m00starts)]+1) C[as.matrix(v00)] <- C[as.matrix(v00)]+Ts for(j in 1:max(Id)) { Cdata <- C[,,j] piv[j,] <- 1-rbeta(d0,Cdata[1,]+1,Cdata[2,]+1) } } ### updating lambda_exgns C1 <- array(0,dim=c(d0,d0,dpreds)) v0 <- cbind(Yt[v==1],Ytminus1[v==1],z[v==1,]) v0 <- v0[do.call(order,as.data.frame(v0)),] v00 <- unique(v0) m00starts <- which(!duplicated(v0)) Ts <- c(diff(m00starts),nrow(v0)-m00starts[length(m00starts)]+1) C1[as.matrix(v00)] <- C1[as.matrix(v00)]+Ts C1data <- matrix(C1,nrow=d0) sz1 <- size(C1data) lambda_exgns_mat <- zeros(d0,sz1[2])+NaN for(j in 1:sz1[2]) { while(sum(is.na(lambda_exgns_mat[,j]))>0) { lambda_exgns_mat[,j] <- rgamma(d0,shape=C1data[,j]+lambdaalpha_exgns*lambda0_mat_expanded_exgns[,j],scale=1) lambda_exgns_mat[,j] <- lambda_exgns_mat[,j]/sum(lambda_exgns_mat[,j]) } } lambda_exgns_mat[lambda_exgns_mat==0] <- min(min(lambda_exgns_mat[lambda_exgns_mat>0]),0.0001) lambda_exgns <- array(lambda_exgns_mat,dim=c(d0,d0,dpreds)) cov_combs <- unique(Xexgns) for(row in 1:nrow(cov_combs)) { v0 <- expand.grid(1:d0,1:d0) v0 <- as.matrix(cbind(v0,repmat(as.numeric(cov_combs[row,]),nrow(v0),1))) v00 <- cbind(v0,rep(kkk,nrow(v0))) tp.exgns.all.itns[v00] <- lambda_exgns[v0] } ### updating lambda_anmls C2 <- array(0,dim=c(d0,d0,max(Id))) v0 <- cbind(Yt[v==0],Ytminus1[v==0],Id[v==0]) v0 <- v0[do.call(order,as.data.frame(v0)),] v00 <- unique(v0) m00starts <- which(!duplicated(v0)) Ts <- c(diff(m00starts),nrow(v0)-m00starts[length(m00starts)]+1) C2[as.matrix(v00)] <- C2[as.matrix(v00)]+Ts C2data <- matrix(C2,nrow=d0) sz2 <- size(C2data) lambda_anmls_mat <- zeros(d0,sz2[2])+NaN for(j in 1:sz2[2]) { while(sum(is.na(lambda_anmls_mat[,j]))>0) { lambda_anmls_mat[,j] <- rgamma(d0,shape=C2data[,j]+lambdaalpha_anmls*lambda0_mat_expanded_anmls[,j],scale=1) lambda_anmls_mat[,j] <- lambda_anmls_mat[,j]/sum(lambda_anmls_mat[,j]) } } lambda_anmls_mat[lambda_anmls_mat==0] <- min(min(lambda_anmls_mat[lambda_anmls_mat>0]),0.0001) lambda_anmls <- array(lambda_anmls_mat,dim=c(d0,d0,max(Id))) ### updating hyper-parameters if (kkk>N_MCMC/4) { ### updating lambdaalpha_exgns C1 <- array(0,dim=c(d0,d0,dpreds)) v0 <- cbind(Yt[v==1],Ytminus1[v==1],z[v==1,]) v0 <- v0[do.call(order,as.data.frame(v0)),] v00 <- unique(v0) m00starts <- which(!duplicated(v0)) Ts <- c(diff(m00starts),nrow(v0)-m00starts[length(m00starts)]+1) C1[as.matrix(v00)] <- C1[as.matrix(v00)]+Ts C1data <- matrix(C1,nrow=d0) sz1 <- size(C1data) vmat_exgns <- zeros(sz1[1],sz1[2]) for(j in 1:sz1[2]) { jtemp <- j%%d0 jtemp[jtemp==0] <- d0 for(i in 1:sz1[1]) { if(C1data[i,j]>0) { prob <- lambdaalpha_exgns*lambda0[i,jtemp]/((1:C1data[i,j])-1+lambdaalpha_exgns*lambda0[i,jtemp]) prob[prob<0] <- 0 vmat_exgns[i,j] <- vmat_exgns[i,j]+sum(rbinom(length(prob),1,prob)) } } } vmat_exgns_colsums <- colSums(vmat_exgns) vmat_exgns <- array(vmat_exgns,dim=c(d0,d0,prod(dpreds))) vmat_exgns <- apply(vmat_exgns,c(1,2),sum) C1data_colsums <- colSums(C1data) rmat_exgns <- rbeta(sz1[2],lambdaalpha_exgns+1,C1data_colsums) smat_exgns <- rbinom(sz1[2],1,C1data_colsums/(C1data_colsums+lambdaalpha_exgns)) lambdaalpha_exgns <- rgamma(1,shape=1+sum(vmat_exgns_colsums)-sum(smat_exgns),scale=1/(1-sum(log(rmat_exgns)))) ### updating lambdaalpha_anmls C2 <- array(0,dim=c(d0,d0,max(Id))) v0 <- cbind(Yt[v==0],Ytminus1[v==0],Id[v==0]) v0 <- v0[do.call(order,as.data.frame(v0)),] v00 <- unique(v0) m00starts <- which(!duplicated(v0)) Ts <- c(diff(m00starts),nrow(v0)-m00starts[length(m00starts)]+1) C2[as.matrix(v00)] <- C2[as.matrix(v00)]+Ts C2data <- matrix(C2,nrow=d0) sz2 <- size(C2data) vmat_anmls <- zeros(sz2[1],sz2[2]) for(j in 1:sz2[2]) { jtemp <- j%%d0 jtemp[jtemp==0] <- d0 for(i in 1:sz2[1]) { if(C2data[i,j]>0) { prob <- lambdaalpha_anmls*lambda0[i,jtemp]/((1:C2data[i,j])-1+lambdaalpha_anmls*lambda0[i,jtemp]) prob[prob<0] <- 0 vmat_anmls[i,j] <- vmat_anmls[i,j]+sum(rbinom(length(prob),1,prob)) } } } vmat_anmls_colsums <- colSums(vmat_anmls) vmat_anmls <- array(vmat_anmls,dim=c(d0,d0,max(Id))) vmat_anmls <- apply(vmat_anmls,c(1,2),sum) C2data_colsums <- colSums(C2data) rmat_anmls <- rbeta(sz2[2],lambdaalpha_anmls+1,C2data_colsums) smat_anmls <- rbinom(sz2[2],1,C2data_colsums/(C2data_colsums+lambdaalpha_anmls)) lambdaalpha_anmls <- rgamma(1,shape=1+sum(vmat_anmls_colsums)-sum(smat_anmls),scale=1/(1-sum(log(rmat_anmls)))) ### updating lambda0 a <- vmat_exgns+vmat_anmls sz <- size(lambda0) for(j in 1:sz[2]) { lambda0[,j] <- rgamma(d0,shape=a[,j]+lambdaalpha0*lambda00_mat_expanded[,j],scale=1); lambda0[,j] <- lambda0[,j]/sum(lambda0[,j]) } lambda0[lambda0==0] <- min(min(lambda0[lambda0>0]),0.0001) lambda0_mat_expanded_exgns <- repmat(lambda0,1,prod(dpreds)) lambda0_mat_expanded_anmls <- repmat(lambda0,1,max(Id)) } ### print progress of MCMC sampler if(kkk%%100==0) print(paste("Transition Probabilities: Iteration ",kkk)) #ind1 <- which(M00>1) #np <- length(ind1) #cat(sprintf('k=%i, %i important predictors = {',kkk,np)) #for(i in 1:length(ind1)) { # cat(sprintf(' X(%i)(%i)',ind1[i],M00[ind1[i]])) #} #cat(sprintf(' }. mean piv=%f, lambdaalpha_exgns=%f, lambdaalpha_anmls=%f \n',mean(piv),lambdaalpha_exgns,lambdaalpha_anmls)) ### storage after burn-in if(kkk>burnin & kkk%%N_Thin==0) { N_Store <- N_Store+1 pistar <- array(0,dim=c(p,dmax,dmax)) idx <- repmat(((1:dmax)-1)*dmax,p,1)+G for(j in 1:p) { V <- zeros(dmax,dmax) V[idx[j,]] <- 1 pistar[j,1:dmax,1:dmax] <- t(V) } TP_Exgns_Comp <- lambda_exgns TP_Exgns_Comp_Mean <- apply(array(lambda_exgns,dim=c(d0,d0,prod(dpreds))),c(1,2),sum)/prod(dpreds) TP_Exgns_Comp_Mean_Anmls <- array(repmat(TP_Exgns_Comp_Mean,max(Id)),dim=c(d0,d0,max(Id))) all_trans <- cbind(rep(1:d0,each=d0),rep(1:d0,d0)) for(i in 1:nrow(ind_all)) { attr <- ind_all[i,1:(ncol(ind_all)-1)] id <- ind_all[i,ncol(ind_all)] lambda_anmls_mat_tmp[,,id] <- lambda_anmls[,,id] all_index <- as.matrix(cbind(all_trans,repmat(c(attr,id),nrow(all_trans),1))) anmls_mat <- matrix(lambda_anmls[all_index[,c(1,2,ncol(all_index))]],nrow=d0) exgns_mat <- matrix(lambda_exgns[all_index[,c(1:(ncol(all_index)-1))]],nrow=d0) tp.all.post.mean_Temp <- repmat(1-piv[id,],d0,1)*anmls_mat+repmat(piv[id,],d0,1)*exgns_mat tp.all.post.mean[all_index] <- tp.all.post.mean[all_index]+tp.all.post.mean_Temp[all_trans] } TP_Anmls_Comp <- lambda_anmls TP_Anmls_Comp_Mean <- apply(lambda_anmls_mat_tmp,c(1,2),sum)/max(Id) TP_Anmls_Comp_Mean_Exgns <- array(repmat(TP_Anmls_Comp_Mean,1,prod(dpreds)),dim=c(d0,d0,dpreds)) TP_Exgns_kkk <- (lambda0_mat_expanded_exgns+array(TP_Exgns_Comp,dim=c(d0,d0*prod(dpreds))))/2 TP_Exgns_kkk <- array(TP_Exgns_kkk,dim=c(d0,d0,dpreds)) tp.exgns.post.mean <- tp.exgns.post.mean + TP_Exgns_kkk TP_Exgns_Comp_Post_Mean <- TP_Exgns_Comp_Post_Mean + TP_Exgns_Comp TP_Anmls_Comp_Post_Mean <- TP_Anmls_Comp_Post_Mean + TP_Anmls_Comp v0 <- unique(cbind(Yt,Ytminus1,Xnew)) v00 <- cbind(v0,rep(N_Store,nrow(v0))) TP_Exgns_Store[as.matrix(v00)] <- TP_Exgns_kkk[as.matrix(v0)] for(j in 1:max(Id)) { TP_Anmls_Comp_Store[,,j,N_Store] <- (1-piv[j,])*(lambda_anmls_mat_tmp[,,j]-lambda0) } for(pp in 1:p) { num_pair <- num_pairs[pp] pair <- combn(dpreds[pp],2) inds <- 1:p inds <- inds[inds!=pp] for(np in 1:num_pair) { lvl1 <- pair[1,np] lvl2 <- pair[2,np] v0 <- unique(cbind(Ytminus1,Yt,Xexgns[,inds])) v1 <- zeros(nrow(v0),2+p) v1[,1:2] <- as.matrix(v0[,1:2]) if (length(inds)>0) { v1[,inds+2] <- as.matrix(v0[,3:ncol(v0)]) } v1[,pp+2] <- lvl1 v2 <- v1 v2[,pp+2] <- lvl2 v0 <- cbind(rep(pp,nrow(v0)),rep(np,nrow(v0)),v0,rep(N_Store,nrow(v0))) tp.exgns.diffs.store[as.matrix(v0)] <- TP_Exgns_kkk[v1]-TP_Exgns_kkk[v2] } } } } tp.exgns.post.mean <- tp.exgns.post.mean/N_Store tp.exgns.post.std <- apply(TP_Exgns_Store,1:(length(size(TP_Exgns_Store))-1),sd) TP_Exgns_Comp_Post_Mean <- TP_Exgns_Comp_Post_Mean/N_Store TP_Anmls_Comp_Post_Mean <- TP_Anmls_Comp_Post_Mean/N_Store tp.all.post.mean <- tp.all.post.mean/N_Store TP_Anmls_Comp_Post_Std <- apply(TP_Anmls_Comp_Store,c(1,2),sd) results <- list("covs"=Xexgns, "dpreds"=dpreds, "MCMCparams"=list("simsize"=simsize,"burnin"=burnin,"N_Thin"=N_Thin), "tp.exgns.post.mean"=tp.exgns.post.mean, "tp.exgns.post.std"=tp.exgns.post.std, "tp.anmls.post.mean"=TP_Anmls_Comp_Post_Mean, "tp.all.post.mean"=tp.all.post.mean, "tp.anmls.post.std"=TP_Anmls_Comp_Post_Std, "tp.exgns.diffs.store"=tp.exgns.diffs.store, "tp.exgns.all.itns"=tp.exgns.all.itns, "clusters"=M, "type"="Transition Probabilities") return(results) }
/scratch/gouwar.j/cran-all/cranData/BMRMM/R/model_transition.R
# Supporting functions for model_transition.R ############## likelihood function ############## trans_logml <- function(zz,Yt,Ytminus1,v,lambda0_mat_expanded,dpreds) { d0 <- length(unique(Yt)) v0 <- cbind(Yt[v==1],Ytminus1[v==1],zz[v==1,]) v0 <- v0[do.call(order,as.data.frame(v0)),] v00 <- unique(v0) m00starts <- which(!duplicated(v0)) Ts <- c(diff(m00starts),nrow(v0)-m00starts[length(m00starts)]+1) C <- array(0,dim=c(d0,d0,dpreds)) C[as.matrix(v00)] <- C[as.matrix(v00)]+Ts Cdata <- matrix(C,nrow=d0) lambda0_mat_expanded[lambda0_mat_expanded==0] <- 10^-5 loglik <- sum(gammaln(Cdata+lambda0_mat_expanded))-sum(gammaln(colSums(Cdata)+colSums(lambda0_mat_expanded)))-sum(gammaln(lambda0_mat_expanded))+sum(gammaln(colSums(lambda0_mat_expanded))) return(loglik) }
/scratch/gouwar.j/cran-all/cranData/BMRMM/R/model_transition_fn.R
################################################## ################## Plot Results ################## ################################################## ############################################ ############## Summary Method ############## ############################################ #' Summary Method for Objects of Class `BMRMM` #' #' Summarizing an object of class `BMRMM`, including results for transition probabilities and duration times, if applicable. #' #' @param object an object of class `BMRMM`. #' @param delta threshold for the null hypothesis for the local tests of transition probabilities (see Details). Default is 0.02. #' @param digits integer used for number formatting. Default is 2. #' @param ... further arguments for the summary function. #' #' @return An object of class `BMRMMsummary` with the following elements: \tabular{ll}{ #' `trans.global` \tab global test results for transition probabilities (see Details). \cr #' `trans.probs.mean` \tab mean for the posterior transition probabilities. \cr #' `trans.probs.sd` \tab standard deviation for the posterior transition probabilities. \cr #' `trans.local.mean.diff` \tab the absolute difference in transition probabilities for a pair of covariate levels (see Details). \cr #' `trans.local.null.test` \tab probability for the null hypothesis that the difference between two covariate levels is not significant (see Details). \cr #' `dur.global` \tab global test results for duration times (see Details). \cr #' `dur.mix.params` \tab mixture parameters taken from the last MCMC iteration if duration times follow a mixture gamma distribution. \cr #' `dur.mix.probs`\tab mixture probabilities for each covariate taken from the last MCMC iteration if duration times follow a mixture gamma distribution. \cr #' } #' #' @details We give more explanation for the global tests and local tests results. #' \itemize{ #' \item{Global tests (for both transition probabilities and duration times)} #' #' {Global tests are presented as a matrix, where the row denote the number of clusters and the column represents covariates. #' For each row `i` and column `j`, the matrix entry is the percentage of the number of the clusters within the stored MCMC samples #' for this covariate, i.e., an estimation for `Pr(# clusters for covariate j == i)`. We note that the probability #' `Pr(# clusters for covariate j > 1)` would be the probability for the null hypothesis that the covariate `j` is significant.} #' \item{Local tests (for transition probabilities only)} #' #' {Local tests focus on a particular covariate and compare the influence among its levels when the other covariates values are fixed. \cr #' Given a pair of levels of covariate `j`, say `j_1` and `j_2`, and given the levels of other covariates, #' the null hypothesis is that the difference between `j_1` and `j_2` is not significant for transition probabilities. #' It is calculated as the percentage of the samples with absolute difference less than `delta`. #' #' The local tests provide two matrices of size `d0` x `d0` where `d0` is the number of states: \cr #' \enumerate{ #' \item `mean.diff` -- the mean of the absolute difference in each transition type between levels `j_1` and `j_2`; #' \item `null.test` -- the probability of the null hypothesis that `j_1` and `j_2` have the same significance for each transition type. #' }} #' } #' #' @seealso #' [BMRMM::plot.BMRMMsummary] for plotting the summary results. #' #' @examples #' results <- BMRMM(foxp2sm, num.cov = 2, simsize = 50, #' cov.labels = list(c("F", "W"), c("U", "L", "A")), #' duration.distr = list('mixgamma',shape=rep(1,3),rate=rep(1,3))) #' sm <- summary(results) #' sm #' #' @export summary.BMRMM <- function(object, delta=0.02, digits=2, ...) { if (!inherits(object, "BMRMM")) { stop("'object' must be of class 'BMRMM'") } summary_list <- list() if ("results.trans" %in% names(object)) { # transition probabilities posterior mean & sd summary_list$trans.global <- global_test(object$results.trans) cov.combs <- sortrows((unique(object$results.trans$covs))) cov.combs <- as.list(data.frame(t(cov.combs))) trans.probs.res <- get_trans_mat_by_level(object$results.trans,cov.combs,digits) summary_list$trans.probs.mean <- trans.probs.res[[1]] summary_list$trans.probs.sd <- trans.probs.res[[2]] # transition probabilities local test - mean diff & null test trans.local.mean.diff <- vector("list",size(object$results.trans$covs,2)) trans.local.null.test <- vector("list",size(object$results.trans$covs,2)) names(trans.local.mean.diff) <- colnames(object$results.trans$covs) names(trans.local.null.test) <- colnames(object$results.trans$covs) for(pp in 1:length(trans.local.mean.diff)) { inds <- 1:length(object$results.trans$dpreds) inds <- inds[inds != pp] other_cov_levels <- as.matrix(unique(object$results.trans$covs[,inds])) if(size(other_cov_levels)[1]==1) { other_cov_levels <- matrix(other_cov_levels,nrow=size(other_cov_levels)[2]) } all_pairs <- t(combn(object$results.trans$dpreds[pp],2)) trans.local.res <- get_tp_local_test_results_by_level(object$results.trans,pp,delta,other_cov_levels,all_pairs,digits) trans.local.mean.diff[[pp]] <- trans.local.res$mean.diff trans.local.null.test[[pp]] <- trans.local.res$null.test } summary_list$trans.local.mean.diff <- trans.local.mean.diff summary_list$trans.local.null.test <- trans.local.null.test } if ("results.duration" %in% names(object)) { summary_list$dur.global <- global_test(object$results.duration) summary_list$dur.mix.params <- get_mixture_params(object$results.duration,digits) summary_list$dur.mix.probs <- get_mixture_probs_by_cov(object$results.duration,digits) } class(summary_list) <- "BMRMMsummary" return(summary_list) } ############################################################# ############## Plot Function for BMRMM Summary ############## ############################################################# #' Plot Method for Visualizing BMRMM Summary #' #' Visualization of a specified field of a `BMRMMsummary` object. #' #' @param x an object of class `BMRMMsummary`. #' @param type a string indicating the plot(s) to draw. Must be named after a field of `object`. #' @param xlab x-axis label. Default is NULL. #' @param ylab y-axis label. Default is NULL. #' @param main main title. Default is NULL. #' @param col color of the plot. Default is NULL. #' @param ... further arguments for the plot function. #' #' @return None #' #' @examples #' results <- BMRMM(foxp2sm, num.cov = 2, simsize = 50, #' cov.labels = list(c("F", "W"), c("U", "L", "A")), #' duration.distr = list('mixgamma',shape=rep(1,3),rate=rep(1,3))) #' fit.summary <- summary(results) #' plot(fit.summary, 'trans.probs.mean') #' plot(fit.summary, 'dur.mix.probs') #' #' @seealso [BMRMM::summary.BMRMM()] #' #' @export plot.BMRMMsummary <- function(x,type,xlab=NULL,ylab=NULL,main=NULL,col=NULL,...) { if (!inherits(x, "BMRMMsummary")) { stop("'x' must be of class 'BMRMMsummary'") } if (!type %in% names(x)) { stop(paste("'x' does not have field", type), sep=' ') } object <- x if (type == 'trans.global') { if (missing(main)) main <- 'Global Test Results for Transition Probabilities' if (missing(xlab)) xlab <- '' if (missing(ylab)) ylab <- 'Proportions' if (missing(col)) col <- terrain.colors(nrow(object$trans.global)) args.legend <- list(x="topright",legend=rownames(object$trans.global),xpd=TRUE) barplot(object$trans.global,main=main,ylab=ylab,col=col,ylim=c(0,1), legend.text=TRUE,beside=TRUE,xlab=xlab,args.legend=args.legend,...) } else if (type == 'trans.probs.mean') { if (missing(main)) main <- 'Trans. Probs. Posterior Mean' if (missing(xlab)) xlab <- 'y_{t-1}' if (missing(ylab)) ylab <- 'y_{t}' for (ii in 1:length(object$trans.probs.mean)) { cov.name <- paste('Covariates',names(object$trans.probs.mean)[ii],sep=' ') plot.heatmap(object$trans.probs.mean[[ii]],xlab=xlab,ylab=ylab,main=paste(cov.name,main,sep='\n'),...) } } else if (type == 'trans.probs.sd') { if (missing(main)) main <- 'Trans. Probs. Posterior Std' if (missing(xlab)) xlab <- 'y_{t-1}' if (missing(ylab)) ylab <- 'y_{t}' for (ii in 1:length(object$trans.probs.sd)) { cov.name <- paste('Covariates',names(object$trans.probs.sd)[ii],sep=' ') plot.heatmap(object$trans.probs.sd[[ii]],xlab=xlab,ylab=ylab,main=paste(cov.name,main,sep='\n'),...) } } else if (type == 'trans.local.mean.diff') { if (missing(main)) main <- 'Mean Diff. in Trans. Probs.' if (missing(xlab)) xlab <- 'y_{t-1}' if (missing(ylab)) ylab <- 'y_{t}' for(pp in 1:length(object$trans.local.mean.diff)) { for(rr in 1:length(object$trans.local.mean.diff[[pp]])) { fixed.cov.names <- names(object$trans.local.mean.diff[[pp]])[rr] for(kk in 1:length(object$trans.local.mean.diff[[pp]][[rr]])) { comp.cov.names <- names(object$trans.local.mean.diff[[pp]][[rr]])[kk] cov.names <- paste(fixed.cov.names,comp.cov.names,sep='\n') plot.heatmap(object$trans.local.mean.diff[[pp]][[rr]][[kk]],xlab=xlab,ylab=ylab,main=paste(cov.names,main,sep='\n'),...) } } } } else if (type == 'trans.local.null.test') { if (missing(main)) main <- 'Probability for H_0' if (missing(xlab)) xlab <- 'y_{t-1}' if (missing(ylab)) ylab <- 'y_{t}' for(pp in 1:length(object$trans.local.null.test)) { for(rr in 1:length(object$trans.local.null.test[[pp]])) { fixed.cov.names <- names(object$trans.local.null.test[[pp]])[rr] for(kk in 1:length(object$trans.local.null.test[[pp]][[rr]])) { comp.cov.names <- names(object$trans.local.null.test[[pp]][[rr]])[kk] cov.names <- paste(fixed.cov.names,comp.cov.names,sep='\n') plot.heatmap(object$trans.local.null.test[[pp]][[rr]][[kk]],xlab=xlab,ylab=ylab,main=paste(cov.names,main,sep='\n'),...) } } } } else if (type == 'dur.global') { if (missing(main)) main <- 'Global Test Results for Duration Times' if (missing(xlab)) xlab <- '' if (missing(ylab)) ylab <- 'Proportions' if (missing(col)) col <- terrain.colors(nrow(object$dur.global)) args.legend <- list(x="topright",legend=rownames(object$dur.global),xpd=TRUE) barplot(object$dur.global,main=main,ylab=ylab,col=col,ylim=c(0,1), legend.text=TRUE,beside=TRUE,xlab=xlab,args.legend=args.legend,...) } else if (type == 'dur.mix.params') { if (missing(main)) { main <- 'Mixture Parameters' } plot.heatmap(object$dur.mix.params,xlab='',ylab='',main=main,...) } else if (type == 'dur.mix.probs') { if(missing(main)) { main <- 'Mixture Probabilities for' } for(ii in 1:length(object$dur.mix.probs)) { cov.name <- paste("Covariate", names(object$dur.mix.probs)[ii], sep=' ') plot.heatmap(object$dur.mix.probs[[ii]],xlab='',ylab='',main=paste(main,cov.name,sep=' '),...) } } else { stop("'type' must be one of the following: 'trans.global', trans.probs.mean', 'trans.probs.sd', 'trans.local.mean.diff', 'trans.local.null.test', 'dur.global', 'dur.mix.params', 'dur.mix.probs'") } } ########################################################### ############## Histograms for Duration Times ############## ########################################################### #' Histogram of Duration Times #' #' Plots the histogram of duration times in two ways as the users desire: #' \enumerate{ #' \item Histogram of all duration times superimposed the posterior mean mixture gamma distribution; #' \item Histogram of a specified mixture component superimposed the gamma distribution with shape and rate #' parameters taken from the last MCMC iteration. #' } #' #' @param x an object of class `BMRMM`. #' @param comp one of #' \itemize{ #' \item `NULL`, which means the histogram for all duration times is plotted with the posterior mean mixture gamma distribution. Default option. #' \item an integer specifying the mixture component for which the corresponding histogram is plotted with mixture gamma parameters taken from the last MCMC iteration. #' } #' @param xlim a range of x values with sensible defaults. Default is `NULL`, which is to use `c(min(duration), max(duration))`. #' @param breaks an integer giving the number of cells for the histogram. Default is `NULL`, which is to use the Freedman-Diaconis rule, i.e., `(max(duration)-min(duration))*n^(1/3)/2/IQR(duration)`. #' @param main main title. Default is `NULL`, which is to use `"Histogram with Posterior Mean"` when `comp` is `NULL` and `"Component X"` if `comp` is specified. #' @param col color of the histogram bars. Default is `gray`. #' @param xlab x-axis label. Default is `"Duration times"`. #' @param ylab y-axis label. Default is `"Density"`. #' @param ... further arguments for the hist function. #' #' @examples #' results <- BMRMM(foxp2sm, num.cov = 2, simsize = 50, #' duration.distr = list('mixgamma',shape=rep(1,3),rate=rep(1,3))) #' #' # plot the histogram of all duration times superimposed with #' # the posterior mixture gamma distribution #' hist(results, xlim = c(0, 1), breaks = 50) #' #' # plot the histogram for components 1 superimposed with #' # the mixture gamma distribution of the last MCMC iteration #' hist(results, components = 1) #' #' @return An object of class `histogram`. #' #' @export hist.BMRMM <- function(x,comp=NULL,xlim=NULL,breaks=NULL,main=NULL, col='gray',xlab='Duration times',ylab='Density',...) { if (!inherits(x, "BMRMM")) stop("'x' must be of class 'BMRMM'") if (!"results.duration" %in% names(x)) stop("'x' does not contain results for duration times") object <- x$results.duration if (missing(comp)) { dat <- object$duration.times } else if (!is.numeric(comp) || comp < 1 || comp > size(object$shape.samples,2)) { stop(sprintf("'comp' must be an integer within range [%s, %s]",1,size(object$shape.samples,2))) } else { dat <- object$duration.times[object$comp.assignment==comp] if (length(dat)==0) stop(paste("Component ",comp,'is empty')) } if(missing(xlim)) xlim <- c(min(dat),max(dat)) if(missing(breaks)) breaks <- (max(dat)-min(dat))*length(dat)^(1/3)/2/IQR(dat) if(missing(main) || is.null(comp)) main <- 'Histogram with Posterior Mean' if(missing(main) || !is.null(comp)) main <- paste('Component',comp) x_axis <- get_x_axis(xlim) if (missing(comp)) { quan <- quantile_calc(object,object$marginal.prob,object$shape.samples,object$rate.samples,xlim) res <- hist(dat,freq=FALSE,breaks=breaks,xlim=xlim,col=col,main=main,xlab=xlab,ylab=ylab,...) polygon(c(x_axis,rev(x_axis)),c(quan$p95,rev(quan$p5)), # draw credible region col=rgb(190,190,190,130,maxColorValue=255), border=NA) lines(x=x_axis,y=quan$p50,col="red",lwd=2) # superimpose posterior mean } else { res <- hist(dat,freq=FALSE,xlim=xlim,main=main,breaks=breaks,col=col,border=FALSE,xlab=xlab,ylab=ylab,...) lines(x=x_axis,y=dgamma(x_axis,shape=object$shape.samples[nrow(object$shape.samples),comp], rate=object$rate.samples[nrow(object$shape.samples),comp]),col="red",lwd=2) } return(res) } ############################################## ############## Diagnostic Plots ############## ############################################## #' MCMC Diagnostic Plots for Transition Probabilities and Duration Times #' #' Provides the traceplots and autocorrelation plots for (i) transition probabilities and (ii) mixture gamma shape and rate parameters. #' #' @param object an object of class `BMRMM` #' @param cov.combs a list of covariate level combinations. Default is `NULL`, which is all possible combination of covariate levels. #' @param transitions a list of pairs denoting state transitions. Default is `NULL`, which is all possible state transitions. #' @param components a numeric vector denoting the mixture components of interest. Default is `NULL`, which is a list of all mixture components. #' #' @return None #' #' @examples #' #' results <- BMRMM(foxp2sm, num.cov = 2, simsize = 80, #' duration.distr = list('mixgamma',shape=rep(1,3),rate=rep(1,3))) #' diag.BMRMM(results) #' diag.BMRMM(results, cov.combs = list(c(1,1),c(1,2)), #' transitions = list(c(1,1)), components = c(3)) #' #' @export diag.BMRMM <- function(object,cov.combs=NULL,transitions=NULL,components=NULL) { if (!inherits(object, "BMRMM")) { stop("'object' must be of class 'BMRMM'") } if ("results.trans" %in% names(object)) { ind <- match("results.trans", names(object)) result <- object[[ind]] if(missing(cov.combs)) { cov.combs <- sortrows((unique(result$covs))) rownames(cov.combs) <- 1:nrow(cov.combs) cov.combs <- as.list(data.frame(t(cov.combs))) } if(missing(transitions)) { transitions <- expand.grid(1:length(result$state.labels),1:length(result$state.labels)) transitions <- as.list(data.frame(t(transitions))) } for(row in cov.combs) { label <- unlist(lapply(1:length(row), function(x) result$cov.labels[[x]][row[x]])) title <- paste(c("Covariates {",label,'}'),collapse=" ") d0 <- size(result$tp.exgns.all.itns)[1] simsize <- result$MCMCparams$simsize burnin <- result$MCMCparams$burnin thin_ind <- seq(burnin,simsize,result$MCMCparams$N_Thin) for(transition in transitions) { i <- transition[1] j <- transition[2] ind <- cbind(repmat(c(i,j,row),simsize,1),1:simsize) plot(result$tp.exgns.all.itns[ind],type='l',xlab='Iteration number',col='#00A600', ylab=paste(c('Prob(',result$state.labels[j],'->',result$state.labels[i],')'),collapse=''), main=paste('Traceplot for',title),ylim=c(0,1)) abline(v=burnin,col='red',lwd=2) abline(h=mean(result$tp.exgns.all.itns[ind][burnin:simsize]),col='blue',lty=2,lwd=2) acf(result$tp.exgns.all.itns[ind][thin_ind],main=paste('Autocorrelation Plot for',title)) } } } if ("results.duration" %in% names(object)) { ind <- match("results.duration", names(object)) result <- object[[ind]] burnin <- result$MCMCparams$burnin simsize <- result$MCMCparams$simsize af.thin <- seq(burnin+1,simsize,5) if(missing(components)) components <- 1:ncol(result$shape.samples) for(kk in components) { plot(result$shape.samples[,kk],type='l',xlab='',ylab='',col='#00A600',main=paste("Traceplot for Shape, Comp",kk)) abline(v=burnin,col='red',lwd=2) abline(h=mean(result$shape.samples[burnin:simsize,kk]),col='blue',lty=2,lwd=2) plot(result$rate.samples[,kk],type='l',xlab='',ylab='',col='#00A600',main=paste("Traceplot for Rate, Comp",kk)) abline(v=burnin,col='red',lwd=2) abline(h=mean(result$rate.samples[burnin:simsize,kk]),col='blue',lty=2,lwd=2) acf(result$shape.samples[af.thin,kk],main=paste('Autocorrelation after thin. for Shape, Comp',kk)) acf(result$rate.samples[af.thin,kk],main=paste('Autocorrelation after thin. for Rate, Comp',kk)) } } } ############################################# ############## Model Selection ############## ############################################# #' Model Selection Scores for the Number of Components for Duration Times #' #' Provides the LPML (Geisser and Eddy, 1979) and WAIC (Watanabe, 2010) scores of the Bayesian Markov renewal mixture models #' #' The two scores can be used to compare different choices of isi_num_comp, i.e., the number #' of the mixture gamma components. Larger values of LPML and smaller values of WAIC #' indicate better model fits. #' #' @examples #' #' results <- BMRMM(foxp2sm, num.cov = 2, simsize = 50, #' duration.distr = list('mixgamma',shape=rep(1,3),rate=rep(1,3))) #' model.selection.scores(results) #' #' @references #' Geisser, S. and Eddy, W. F. (1979). A predictive approach to model selection. Journal of the American Statistical Association, 74, 153–160. \cr\cr #' Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation and widely applicable information criterion in singular learning theory. Journal of Machine Learning Research, 11, 3571–3594. #' #' @param object An object of class BMRMM. #' #' @return a list consisting of LPML and WAIC scores for gamma mixture models. #' #' @export model.selection.scores <- function(object) { if (!inherits(object, "BMRMM")) { stop("'object' must be of class 'BMRMM'") } if (!"results.duration" %in% names(object)) { stop("'object' does not contain results for duration times") } ind <- match("results.duration",names(object)) isi <- object[[ind]]$duration.times pis <- object[[ind]]$marginal.prob alphas <- object[[ind]]$shape.samples betas <- object[[ind]]$rate.samples lpml <- 0 waic <- 0 p_waic <- 0 for(k in 1:length(isi)) { expectation <- 0 inv_expectation <- 0 log_expectation <- 0 for(j in 1:nrow(alphas)) { expectation <- expectation + mix_gam(isi[k],pis[j,],alphas[j,],betas[j,]) inv_expectation <- inv_expectation + 1/mix_gam(isi[k],pis[j,],alphas[j,],betas[j,]) log_expectation <- log_expectation + log(mix_gam(isi[k],pis[j,],alphas[j,],betas[j,])) } expectation <- expectation/nrow(alphas) inv_expectation <- inv_expectation/nrow(alphas) log_expectation <- log_expectation/nrow(alphas) lpml <- lpml + log(1/inv_expectation) waic <- waic - 2*log(expectation) p_waic <- p_waic + 2*(log(expectation)-log_expectation) } return(list('LPML'=lpml,'WAIC'=waic+2*p_waic)) }
/scratch/gouwar.j/cran-all/cranData/BMRMM/R/print_and_plot_results.R
# Supporting functions for print_and_plot_results.R # global test global_test <- function(results) { simsize <- results$MCMCparams$simsize burnin <- results$MCMCparams$burnin N_Thin <- results$MCMCparams$N_Thin ind <- seq(burnin+1,simsize,N_Thin) M <- results$clusters cluster_data <- c() label_data <- c() for(pp in 1:ncol(M)) { cluster_data <- c(cluster_data,M[ind,pp]) label_data <- c(label_data,rep(colnames(results$covs)[pp],length(ind))) } counts <- table(cluster_data,label_data)/length(ind) return(counts) } # mixture parameters get_mixture_params <- function(results,digits) { simsize <- results$MCMCparams$simsize m <- cbind(shape.k=results$shape.samples[simsize,],rate.k=results$rate.samples[simsize,]) m <- round(m,digits) rownames(m) <- paste0(rep('Comp ',nrow(m)),1:nrow(m)) return(m) } # mixture probabilities get_mixture_probs_by_cov <- function(results,digits) { mix_probs_res <- list() ind_arr <- list() for (i in 1:length(results$dpreds)) ind_arr <- c(ind_arr,list(1:results$dpreds[i])) ind_mat <- do.call(expand.grid,ind_arr) ind_mat <- cbind(rep(results$MCMCparams$simsize,nrow(ind_mat)),ind_mat) for (pp in 1:ncol(results$covs)) { prob_to_plot <- matrix(0,nrow=size(results$shape.samples,2),ncol=results$dpreds[pp]) for (cc in 1:size(results$shape.samples,2)) { select_ind <- as.matrix(cbind(ind_mat,rep(cc,nrow(ind_mat)))) dat <- array(results$duration.exgns.store[select_ind],dim=results$dpreds) prob_to_plot[cc,] <- apply(dat,c(pp),mean) } colnames(prob_to_plot) <- results$cov.labels[[pp]] prob_to_plot <- round(prob_to_plot, digits) rownames(prob_to_plot) <- paste0(rep('Comp ',nrow(prob_to_plot)),1:nrow(prob_to_plot)) mix_probs_res[[length(mix_probs_res)+1]] <- prob_to_plot } names(mix_probs_res) <- colnames(results$covs) return(mix_probs_res) } # get transition probabilities by covariate levels get_trans_mat_by_level <- function(results,cov_combs,digits) { mat_mean <- list() mat_sd <- list() mat_lab <- list() for(row in cov_combs) { label <- paste(lapply(1:length(row), function(x) results$cov.labels[[x]][row[x]]),collapse=',') label <- paste('(',label,')',sep='') v0 <- expand.grid(1:length(results$state.labels),1:length(results$state.labels)) v0 <- as.matrix(cbind(v0,repmat(row,nrow(v0),1))) trans_mat <- t(round(matrix(results$tp.exgns.post.mean[v0],nrow=length(results$state.labels)),digits)) sd_mat <- t(round(matrix(results$tp.exgns.post.std[v0],nrow=length(results$state.labels)),digits)) colnames(trans_mat) <- results$state.labels rownames(trans_mat) <- results$state.labels colnames(sd_mat) <- results$state.labels rownames(sd_mat) <- results$state.labels mat_mean[[length(mat_mean)+1]] <- trans_mat mat_sd[[length(mat_sd)+1]] <- sd_mat mat_lab[[length(mat_lab)+1]] <- label } names(mat_mean) <- mat_lab names(mat_sd) <- mat_lab return(list(mat_mean,mat_sd)) } # local test: get probabilities for H0 for given delta postprob <- function(x,delta) { return(length(x[x<delta])/length(x)) } # local test: get local test results given covariates levels get_tp_local_test_results_by_level <- function(results,cov,delta,levels,comp_pairs,digits) { tp.exgns.diffs.store_Mean <- apply(results$tp.exgns.diffs.store,1:(length(size(results$tp.exgns.diffs.store))-1),mean) tp.exgns.diffs.store_Prob <- apply(abs(results$tp.exgns.diffs.store),1:(length(size(results$tp.exgns.diffs.store))-1),function(x) postprob(x,delta)) inds <- 1:length(results$dpreds) inds <- inds[inds!=cov] if (ncol(results$clusters)==1) { levels <- matrix(rep(0,2),1) } res_by_levels_mean.diff <- vector("list",nrow(levels)) res_by_levels_null.test <- vector("list",nrow(levels)) for(row in 1:nrow(levels)) { level <- as.numeric(levels[row,]) res_by_comp.pairs_mean.diff <- vector("list",nrow(comp_pairs)) res_by_comp.pairs_null.test <- vector("list",nrow(comp_pairs)) for(np in 1:nrow(comp_pairs)) { covs_levels_1 <- rep(0,length(results$dpreds)) covs_levels_1[inds] <- level covs_levels_1[cov] <- comp_pairs[np,1] covs_levels_2 <- covs_levels_1 covs_levels_2[cov] <- comp_pairs[np,2] covs_levels_1 <- sapply(1:size(covs_levels_1)[2],function(i) results$cov.labels[[i]][covs_levels_1[i]]) covs_levels_2 <- sapply(1:size(covs_levels_2)[2],function(i) results$cov.labels[[i]][covs_levels_2[i]]) v0 <- expand.grid(1:length(results$state.labels),1:length(results$state.labels)) pair_ind <- which(apply(comp_pairs,1,function(row) all.equal(row,comp_pairs[np,])=='TRUE')) if (ncol(results$clusters)==1) { v0 <- as.matrix(cbind(repmat(c(cov,pair_ind),nrow(v0),1),v0)) } else { v0 <- as.matrix(cbind(repmat(c(cov,pair_ind),nrow(v0),1),v0,repmat(level,nrow(v0),1))) } mean_diff_mat <- abs(matrix(tp.exgns.diffs.store_Mean[v0],nrow=size(results$tp.exgns.post.mean)[1],dimnames=list(results$state.labels,results$state.labels))) prob_diff_mat <- matrix(tp.exgns.diffs.store_Prob[v0],nrow=size(results$tp.exgns.post.mean)[1],dimnames=list(results$state.labels,results$state.labels)) mean_diff_mat <- t(round(mean_diff_mat,digits)) prob_diff_mat <- t(round(prob_diff_mat,digits)) res_by_comp.pairs_mean.diff[[np]] <- mean_diff_mat res_by_comp.pairs_null.test[[np]] <- prob_diff_mat comp_pair_name <- sapply(comp_pairs[np,],function(x) results$cov.labels[[cov]][x]) comp_pair_name <- paste('Compare:',paste(comp_pair_name,collapse='&'),sep='') names(res_by_comp.pairs_mean.diff)[[np]] <- comp_pair_name names(res_by_comp.pairs_null.test)[[np]] <- comp_pair_name } res_by_levels_mean.diff[[row]] <- res_by_comp.pairs_mean.diff res_by_levels_null.test[[row]] <- res_by_comp.pairs_null.test level_name <- sapply(1:length(inds),function(x) results$cov.labels[[inds[x]]][levels[row,x]]) level_name <- paste('Fixing:',paste(level_name,collapse=' '),sep='') names(res_by_levels_mean.diff)[[row]] <- level_name names(res_by_levels_null.test)[[row]] <- level_name } return(list('mean.diff'=res_by_levels_mean.diff,'null.test'=res_by_levels_null.test)) } # transition probabilities diagnostic plots by covariates levels get_tp_diagnostic_plots_by_level <- function(results,transitions,cov_combs) { for(row in cov_combs) { label <- unlist(lapply(1:length(row), function(x) results$cov.labels[[x]][row[x]])) title <- paste(c("Covariates {",label,'}'),collapse=" ") d0 <- size(results$tp.exgns.all.itns)[1] simsize <- results$MCMCparams$simsize burnin <- results$MCMCparams$burnin thin_ind <- seq(burnin,simsize,results$MCMCparams$N_Thin) for(transition in transitions) { i <- transition[1] j <- transition[2] ind <- cbind(repmat(c(i,j,row),simsize,1),1:simsize) plot(results$tp.exgns.all.itns[ind],type='l',xlab='Iteration number',col='#00A600', ylab=paste(c('Prob(',results$state.labels[j],'->',results$state.labels[i],')'),collapse=''),main=paste('Traceplot for',title),ylim=c(0,1)) abline(v=burnin,col='red',lwd=2) abline(h=mean(results$tp.exgns.all.itns[ind][burnin:simsize]),col='blue',lty=2,lwd=2) acf(results$tp.exgns.all.itns[ind][thin_ind],main=paste('Autocorrelation Plot for',title)) } } } # mixture gamma distributions mix_gam <- function(x,pi,alpha,beta) { ret <- 0 for(i in 1:length(pi)) { ret <- ret + pi[i]*dgamma(x,shape=alpha[i],rate=beta[i]) } return(ret) } # mixture gamma distribution # get discretized x-axis get_x_axis <- function(x_range) { return(seq(x_range[1],x_range[2],(x_range[2]-x_range[1])/200)) } # ISI: get 5%, 50% & 95% quantile for posterior distribution quantile_calc <- function(results,prob_mat,Alpha,Beta,x_range) { x_axis <- get_x_axis(x_range) ind <- seq(results$MCMCparams$burnin+1,results$MCMCparams$simsize,5) a_s <- Alpha[ind,] pi_s <- prob_mat[ind,] pi_s[pi_s==0] <- 10^-5 b_s <- Beta[ind,] percent5 <- rep(0,length(x_axis)) percent50 <- rep(0,length(x_axis)) percent95 <- rep(0,length(x_axis)) for(i in 1:length(x_axis)) { val <- sapply(1:length(ind),function(x) mix_gam(x_axis[i],pi_s[x,],a_s[x,],b_s[x,])) res <- quantile(val,c(.05,.5,.95)) percent5[i] <- res['5%'] percent50[i] <- res['50%'] percent95[i] <- res['95%'] } return(list('p5'=percent5,'p50'=percent50,'p95'=percent95)) } # heatmap function plot.heatmap <- function(x,xlab=NULL,ylab=NULL,main=NULL,...) { if(!is.matrix(x)) { stop("'x' must be of class 'matrix'") } mat <- x image.plot(1:ncol(mat),1:nrow(mat),t(mat)[,c(nrow(mat):1)],col=terrain.colors(60), axes=FALSE,ylab=ylab,xlab=xlab,main=main,...) grid(nx = ncol(mat), ny = nrow(mat), lty = 'solid', col='black') axis(1, 1:ncol(mat), colnames(mat)) axis(2, 1:nrow(mat), rownames(mat)[nrow(mat):1]) for (x in 1:ncol(mat)) for (y in 1:nrow(mat)) text(x,y,mat[nrow(mat)+1-y,x]) }
/scratch/gouwar.j/cran-all/cranData/BMRMM/R/print_and_plot_results_fn.R
#' @details #' The key function you need is \code{\link{bms}}. #' @author Martin Feldkircher, Paul Hofmarcher, and Stefan Zeugner #' @seealso \code{\link{coef.bma}}, \code{\link{plotModelsize}} and #' \code{\link{density.bma}} for some operations on the resulting 'bma' object, #' as well as #' \code{\link{predict.bma}} or \code{\link{gdensity}}, or #' \code{\link{zlm}} for individual Zellner regression models. #' #' Check \url{http://bms.zeugner.eu} for additional help. #' @references #' \url{http://bms.zeugner.eu}: BMS package homepage with help and tutorials #' #' Feldkircher, M. and S. Zeugner (2015): Bayesian Model Averaging Employing #' Fixed and Flexible Priors: The BMS Package for R, Journal of Statistical Software 68(4). #' #' Feldkircher, M. and S. Zeugner (2009): Benchmark Priors #' Revisited: On Adaptive Shrinkage and the Supermodel Effect in Bayesian Model #' Averaging, IMF Working Paper 09/202. #' #' @keywords internal "_PACKAGE"
/scratch/gouwar.j/cran-all/cranData/BMS/R/BMS-package.R
########################################### # This version: adjusted on 2011-05-05 # ########################################### # it includes all the auxiliary functions that should only be called INSIDE the bms function #START: SUBFUNCTIONS ################ .ols.terms2<-function(positions,yty,k=NULL,N=N,K=K,XtX.big=XtX.big,Xty.big=Xty.big,...){ # function calculates ols terms uhat'uhat ("ymy"), beta, (X'X)^-1,... # it's child function child.ymy and mutate are most used syminv <- function(symmat, ndim=ncol(symmat)) { #this does the same as chol2inv(chol.default(x)), but is stripped-down for speed purposes # Caution: symmat must always have length(symmat)>0!!! if (!is.matrix(symmat)) {symmat=as.matrix(symmat)} return( chol2inv(chol(symmat), size=ndim) ) } if(is.null(k)) k=length(positions) XtXinv.return=numeric(0) # if nullmodel if(sum(k)==0){ Xty=numeric(0);XtXinv=matrix(0,0,0);bhat=numeric(0);ymy=yty;positions=0; } else { XtX<-XtX.big[positions,positions,drop=FALSE] Xty<-Xty.big[positions] #do cholesky split: A=XtX=LL' #get lower triangular matrix from cholesky split XtXinv<-syminv(XtX,ndim=k) bhat<-crossprod(XtXinv,Xty); ymy<-yty-crossprod(Xty,bhat)[[1]] } return(list( full.results = function() { return(list(ymy=ymy, bhat=bhat, diag.inverse=XtXinv[1:k+0:(k-1)*k])) }, child.ymy = function(addix=0,dropix=0,...) { if (!any(as.logical(c(addix,dropix)))) {return(ymy)} if (all(as.logical(c(addix,dropix)))) { #swap jhere={1:k}[positions==dropix]; poshere=positions[-jhere];Xj=XtXinv[,jhere];Xtxi=XtX.big[poshere,addix] bxlessj=crossprod(XtXinv,XtX.big[positions,addix])-Xj*XtX.big[addix,dropix]; bhatx=bxlessj[-jhere]-Xj[-jhere]*bxlessj[jhere]/Xj[jhere] child.ymy = ymy+bhat[jhere]^2/Xj[jhere]-{Xty.big[addix]-crossprod(Xty.big[poshere],bhatx)[[1]]}^2/{XtX.big[addix,addix]-crossprod(bhatx,Xtxi)[[1]]} return(child.ymy) } else { if (addix==0) { #drop jhere={1:k}[positions==dropix] child.ymy=ymy+bhat[jhere]^2/XtXinv[jhere,jhere] return(child.ymy) } else { #add Xtxi=XtX.big[positions,addix] bhatx=crossprod(XtXinv,Xtxi)[,1] child.ymy = ymy - {Xty.big[addix]-crossprod(bhatx,Xty)[[1]]}^2 /{XtX.big[addix,addix]-crossprod(bhatx,Xtxi)[[1]]} return(child.ymy) } } }, mutate= function(addix=0,dropix=0,newpos=numeric(0),newk=0,...) { #return(ols.terms2(newpos,yty,length(k),N,K=K,XtX.big=XtX.big,Xty.big=Xty.big,...,return.inverse=F)) if (newk==0) { XtXinv<<-matrix(0,0,0); Xty<<-numeric(0) } else { # if (newk<7|dropix[[1]]*addix[[1]]!=0|length(c(dropix,addix))>2) { # if (newk<7|addix[[1]]!=0|length(c(dropix,addix))>2) { # Xty<<-Xty.big[newpos] XtXinv<<- syminv(XtX.big[newpos,newpos,drop=FALSE],ndim=newk) } else { if (dropix[1]>0) { jhere=sum(positions<=dropix) Xty<<-Xty[-jhere] Xj=XtXinv[,jhere]; XtXinv<<- {XtXinv-tcrossprod(Xj/Xj[jhere],Xj)}[-jhere,-jhere] } else { jhere=sum(positions<addix)+1 Xtxx=XtX.big[addix,newpos];Xtx=Xtxx[-jhere];Xty<<-Xty.big[newpos] bhatx=crossprod(XtXinv,Xtx)[,1]; bhatxadj=c(bhatx[0:(jhere-1)],-1,bhatx[jhere:k]); if (jhere==newk) bhatxadj=bhatxadj[-(jhere+1:2)] newinv=tcrossprod(bhatxadj,bhatxadj/(Xtxx[jhere]-crossprod(Xtx,bhatx)[[1]])) newinv[-jhere,-jhere]=newinv[-jhere,-jhere]+XtXinv XtXinv<<- newinv; } }} # if (any(diag(XtXinv)<0)) browser() positions<<-newpos; k<<-newk; bhat<<-crossprod(XtXinv,Xty)[,1]; ymy<<-yty-crossprod(Xty,bhat)[[1]] return(list(ymy=ymy, bhat=bhat, diag.inverse=XtXinv[1:k+0:{k-1}*k])) }, return.inverse= function() XtXinv, ymy=ymy, bhat=bhat, diag.inverse=XtXinv[1:k+0:{k-1}*k] )) } ## MODEL PRIOR DEFINITIONS ##################### #the following functions define an mprior.info object # out of the provided parameters # mpparam: corresponds to argument 'mprior.size' in function bms # K: the number of covariates # arguments passed from bms to .mprior.*.init: # mpmode=mprior,mpparam=mprior.size,K=K,X.data=X.data,fixed.pos=fixed.pos # Example: # bms(attitude,mprior=.mprior.uniform.init) # bms(attitude,mprior=.mprior.pip.init(K=ncol(attitude)-1,mpparam=seq(.1,.6,.1))) .mprior.uniform.init = function(K,...) { #defines uniform model prior return(list( mp.mode="uniform", #name of the model prior mp.msize=K/2, #prior expected model size pmp=function(...) return(0), #the actual model prior function mp.Kdist=exp(lchoose(K,0:K)-K*log(2)) #a K+1 vector of prior model probailtiy for each model size - optional, but necessary for some processing functions )) } .mprior.fixedt.init = function(K, mpparam, ...) { #Defines binomial model prior ("fixed theta") #user checks: if (is.na(mpparam[1])) mpparam<-K/2 if((mpparam[[1]]>=K)&(length(mpparam)==1)){ warning("Submitted prior model size is >= than the nr. of regressors\n, used K/2 instead\n\n") mpparam<-K/2 } # actual model prior m=mpparam[[1]] return(list( mp.mode="fixed", mp.msize=m, pmp=function(ki,...) { post.odds1=ki*log(m/K)+{K-ki}*log(1-m/K) return(post.odds1) }, mp.Kdist=stats::dbinom(x=0:K,size=K,prob=m/K,log=FALSE) )) } .mprior.randomt.init = function(K, mpparam, ...) { #model prior definition for beta-binmomial (random theta) model prior #user checks: if (is.na(mpparam[1])) mpparam<-K/2 if((mpparam[[1]]>=K)&(length(mpparam)==1)){ warning("Submitted prior model size is >= than the nr. of regressors\n, used K/2 instead\n\n") mpparam<-K/2 } # necessary initialization variables for speeding up sampling m=mpparam[[1]] vecofpriors=lgamma(1+0:K)+lgamma({K-m}/m+K-0:K) beta.bin=function(a=1,b=(K-m)/m,K=K,w=0:K){lgamma(a+b)-{lgamma(a)+lgamma(b)+lgamma(a+b+K)}+log(choose(K,w))+lgamma(a+w)+lgamma(b+K-w)} #actual mpinfo object return(list( mp.mode="random", mp.msize=m, pmp=function(ki,...) { return(vecofpriors[[ki+1]]) }, mp.Kdist=exp(beta.bin(a=1,b={K-m}/m,K=K,w=0:K)) )) } .getpolycoefs <- function(polyroots) { # helper function for mprior.pip.init #given the roots of a homogenous polynomial, # this function finds the polynomial coefficients (ordered from highest exponent term to constant) if (length(polyroots)==1) return(c(1,polyroots)) restterms=.getpolycoefs(polyroots[-1]) c(restterms,0)+c(0,polyroots[1]*restterms) } .mprior.pip.init = function(K,mpparam,...) { #defines model prior for custom prior inclusion probabilities if (any(is.na(mpparam))) mpparam=rep(.5,K); if (!is.numeric(mpparam)) stop("For prior inclusion probabilites, you need to provide a K vector with elements between 0 and 1 for argument 'mprior.size'.") mpparam=as.vector(mpparam) if (!((length(mpparam)==K)&all(mpparam>0)&all(mpparam<=1))) stop("For prior inclusion probabilites, you need to provide a K vector with elements between 0 and 1 for argument 'mprior.size'.") if (any(mpparam==1L)) warning("Prior Inclsuion Prob. = 1 are impractical. Try using the argument fixed.reg") inclfacts=log(mpparam/(1-mpparam)) return(list( mp.mode="pip", mp.msize=sum(mpparam), pmp=function(mdraw,...) { return(sum(inclfacts[as.logical(mdraw)])) }, mp.Kdist=.getpolycoefs(mpparam/{1-mpparam})*prod(1-mpparam) )) } .mprior.customk.init = function(K, mpparam, ...) { # defines model prior for custom size-based model priors if (any(is.na(mpparam))) mpparam=rep(.5,K); if (!is.numeric(mpparam)) stop("For custom model size priors, you need to provide a K+1 vector with positive elements for argument 'mprior.size'.") mpparam=as.vector(mpparam) if (!((length(mpparam)==(K+1))&all(mpparam>0))) { stop("For custom model size priors, you need to provide a K+1 vector with positive elements for argument 'mprior.size'.") } mpkvec=log(mpparam) return(list( mp.mode="custom", mp.msize=sum(choose(K,0:K)*mpparam*{0:K})/sum(choose(K,0:K)*mpparam), pmp=function(ki,...) { return(mpkvec[[ki+1]]) }, mp.Kdist=choose(K,0:K)*mpparam/sum(choose(K,0:K)*mpparam) )) } .fixedset.mprior = function(mprior.function,fullK,fixed.pos=numeric(0),K=NA,...) { #CAUTION: NO adjustment for mpparam yet! if (length(fixed.pos)==0) return(mprior.function(K=fullK,...)) fixed.pos={1:fullK}[fixed.pos] #to convert from possible binary vector flexpos={1:fullK}[-fixed.pos]; flexk=length(flexpos) mprior=mprior.function(K=flexk,...) fixk=length(fixed.pos) mpl=list( mp.mode=mprior$mp.mode, mp.msize=mprior$mp.msize+fixk, pmp=function(ki,mdraw,...) { return(mprior$pmp(ki=ki-fixk,mdraw=mdraw[flexpos])) }, mp.Kdist = c(numeric(fixk),mprior$mp.Kdist) ) return(mpl) } .choose.mprior <- function(mpmode,mpparam,K,...,fixed.pos=numeric(0)) { #this function fetches an mprior.info object # mpmode: corresponds to argument 'mprior' in function bms # mpmode determines one of 5 generic model prior classes: # uniform, fixed equal inclusion probs, random 'theta' equal inclusion probs, # custom model size priors and custom inclusion probs # but can also be a custom function or list such as the indivdual model prior definition functions # mpparam: corresponds to argument 'mprior.size' in function bms # K: the number of covariates # fixed.pos: the positions of the variables in X the need to be kept in any model origargs=list(mpmode=mpmode,mpparam=mpparam) #read input arguments fixed.pos={1:K}[fixed.pos]; fixed.exist=as.logical(length(fixed.pos)); fixk=length(fixed.pos) if (!( is.character(mpmode)|| is.function(mpmode) || is.list(mpmode))) stop("'mprior' parameter must be character! (or function/list)") #switch list: choose model prior - Caution: mpparam is adjusted for fixed.set variables if (is.function(mpmode) || is.list(mpmode)) { # custom model prior provided as function mpinfo=mpmode; } else if (any(grep("fix",mpmode,ignore.case=TRUE))) { #fixed theta prior mpinfo=.mprior.fixedt.init if (is.numeric(mpparam)) mpparam=mpparam[[1]]-fixk } else if (any(grep("unif",mpmode,ignore.case=TRUE))) { #uniform mpinfo=.mprior.uniform.init } else if (any(grep("custom",mpmode,ignore.case=TRUE))) { #custom model size prior mpinfo=.mprior.customk.init if (fixed.exist && is.numeric(mpparam)) if (length(mpparam)==K+1) mpparam=mpparam[(fixk+1):length(mpparam)] } else if (any(grep("pip",mpmode,ignore.case=TRUE))) { #prior inclusion probabilities mpinfo=.mprior.pip.init if (fixed.exist && is.numeric(mpparam)) if (length(mpparam)==K) mpparam=mpparam[-fixed.pos] } else { #random theta prior mpinfo=.mprior.randomt.init if (is.numeric(mpparam)) mpparam=mpparam[[1]]-fixk } if (is.function(mpinfo)) mpinfo=.fixedset.mprior(mpinfo,fullK=K,fixed.pos=fixed.pos,K=NA,mpparam=mpparam, mpmode=mpmode, ...) if (!all( c("mp.mode","mp.msize", "pmp") %in% names(mpinfo))) stop("The provided custom-built model prior is deficient.") if (!("origargs" %in% names(mpinfo))) mpinfo$origargs=origargs; if (length(fixed.pos)>0) mpinfo$fixed.pos=fixed.pos class(mpinfo) <- c("mprior", class(mpinfo)) return(mpinfo) } ## end: model priors .starter=function(K,start.value,y,N=N,XtX.big=XtX.big,Xty.big=Xty.big,X=X,fixed.pos=numeric(0)){ #"starter" draws random start vector of size start.value, and keeps those regressors with a t-stat>0.2 # in case user submitted a single number as start.value # we randomly draw a model with start.value regressors # finally only regressors with t-stats>0.2 are kept and build the starting model #some input checks if (is.na(start.value[1])) {start.value=min((N-3),K)} if (any(start.value<0)|!(is.numeric(start.value)|is.logical(start.value))) { start.value=min((N-3),K) warning("Argument 'start.value' did not conform to required format. start.value has been changed to default - min(N-3,K)") } if (length(start.value)==0) {start.value=numeric(K)} if (length(start.value)==1) {if (start.value==0) {start.value=numeric(K)}} if (length(start.value)>1 && any(start.value>1)) {sv=numeric(K); sv[start.value]=1; start.value=sv; rm(sv) } if(length(start.value)==1){ if (start.value>min((N-3),K)){ cat("Submitted Start value is too large, used\n min(N-3,K) as starting model size instead\n\n") start.value=min((N-3),K) } # draw randomly sorter=stats::runif(K) start.position=order(sorter,seq(1:K))[1:start.value] # calculate bhats and t-stats XtX.start<-XtX.big[start.position,start.position] XtXinv.start<-chol2inv(chol(XtX.start)) bhat=XtXinv.start%*%Xty.big[start.position] e=y-X[,start.position]%*%bhat sse=crossprod(e) s2=as.numeric(sse/(N-length(start.position))) bcov=s2*XtXinv.start bt=bhat/sqrt(diag(bcov)) # choose only regressors with t-stat>0.2 molddraw=rep(0,K) goodguy=as.numeric(abs(bt)>.2) molddraw[start.position]=goodguy start.position = (1:K)[as.logical(molddraw)] outstart=list(molddraw=molddraw,bhat=bhat,start.position=start.position) } # else we start with the user specified starting model # in this case we do not test whether the t-stat's are greater than 0.2 # but start with exactly the selected model no matter whether it is a # good starting model or not # in case you want to start with the null model if(length(start.value)>1 && sum(start.value)==0){ outstart=list(molddraw=rep(0,K),bhat=rep(0,K),start.position=integer(0)) } if(length(start.value)>1 && sum(start.value)>0){ if(length(start.value)!=K){ # in case user specified a too big model stop("Starting Model contains unequal to K regressors,please respecify") } start.position=which(as.logical(start.value)) XtX.start<-XtX.big[start.position,start.position] XtXinv.start<-chol2inv(chol(XtX.start)) bhat=XtXinv.start%*%Xty.big[start.position] molddraw=rep(0,K); molddraw[start.position]=1 outstart=list(molddraw=molddraw,bhat=bhat,start.position=start.position) } fixed.pos=(1:K)[fixed.pos] if (length(fixed.pos)>0) { outstart$molddraw[fixed.pos]=1 outstart$start.position = (1:K)[as.logical(outstart$molddraw)] } return(outstart) } ########################################################################################################################### #Sample Functions ############################################################################################################################ #First, we have implemented the original FLS Sample Function; here a variable is drawn from the set of # K regressors, then #conditional on whether it is included in the current model it can be discarded or added. .fls.samp=function(molddraw=molddraw,K=K,...,maxk=Inf,oldk=0){ # the original FLS Sample Function; here a variable is drawn from the set of K regressors, then #conditional on whether it is included in the current model it can be discarded or added. indch<-ceiling(stats::runif(1,0,K)) #rounding to the smallest integer part by floor, uniform distr. [0,1] bdropit<-as.logical(molddraw[[indch]]); if (oldk==maxk) if (!bdropit) {indch=(1:K)[molddraw==1][[ceiling(stats::runif(1,0,sum(molddraw)))]]; bdropit=molddraw[[indch]]} if (bdropit){ #dropping addvar<-0;dropvar<-indch; molddraw[[indch]]<-0 } else { #adding addvar<-indch;dropvar<-0; molddraw[[indch]]<-1 } # addvar<-(!bdropit)*indch; dropvar<-bdropit*indch; # molddraw[[indch]]<- !bdropit positionnew <- {1:K}[molddraw==1] return(list(mnewdraw=molddraw,positionnew=positionnew,addi=addvar,dropi=dropvar)) } ############################################################################################################################# #Second, a reversible jump algorithm, where we #have added a move step. See below ############################################################################################################################## #Reversible Jump Algorithm .rev.jump=function(molddraw=molddraw,K=K,...,maxk=Inf,oldk=0){ #Reversible Jump Algorithm: with equal prob decides between swap step or add/drop step (.fls.samp) rev.idx=ceiling(stats::runif(1,0,2)) #rev.idx is a flag that indicates the three possible steps of #the reversible jump algorithm, 1=birth or death and 2=move. # Perform Death, Birth or Move Step # if rev.idx is 1, do the same as in fls sampler (i.e. increase or decrease depending # on variables already included in the model if(rev.idx==1){ birth.death=.fls.samp(molddraw=molddraw,K=K,maxk=maxk,oldk=oldk) mnewdraw=birth.death[["mnewdraw"]] positionnew=birth.death[["positionnew"]] addvar=birth.death[["addi"]]; dropvar=birth.death[["dropi"]] } #move step if(rev.idx==2){ var.in=(1:K)[as.logical(molddraw)] #positions of the variables that are currently in the model var.out=(1:K)[!as.logical(molddraw)] #positions of the variables that are currently out of the model var.in.rand=ceiling(length(var.in)*stats::runif(1,0,1)); addvar=var.out[ceiling(length(var.out)*stats::runif(1,0,1))] dropvar=var.in[var.in.rand] mnewdraw=molddraw; mnewdraw[addvar]=1; mnewdraw[dropvar]=0; positionnew=(1:K)[as.logical(mnewdraw)] dropvar=max(dropvar,0); addvar=max(addvar,0) # in case one of the indexes is integer(0) (borderline case) } return(list(mnewdraw=mnewdraw,positionnew=positionnew,addi=addvar,dropi=dropvar)) } ############################################################################################################################# #Third, there is a 'contiguity enumeration' sampler, that enumerates all possible combinations of models. #In particular it moves between continguous models (without repeating itself). i.e. there is always only an "add" or "drop" opration in the move to the next model. ############################################################################################################################## .iterenum <- function(molddraw=numeric(0),K=length(molddraw),...) { #Contiguity Enumeration Sampler # takes a binary vector (like molddraw) and iterates it such that it performs 'contiguity enumeration', # i.e. enumerating all possible combinations by always changing only one entry in molddraw even.lead1={1:K}[!{cumsum(molddraw)%%2}]; i=even.lead1[length(even.lead1)] # i ist the last entry where the number of 1's up to entry i is even: molddraw[i]=!molddraw[i]; # then change entry i (either T->F or F->T) addi=molddraw[i]*i;dropi={!molddraw[i]}*i; #indch=i return(list(mnewdraw=molddraw,positionnew={1:K}[as.logical(molddraw)],addi=addi,dropi=dropi)) } ##### Enumeration in case K>N-2 ###################### .iterenum.bone = function(molddraw=numeric(0),maxk=Inf) { # an auxiliary function for iterenum.KgtN even.lead1=((1:length(molddraw))[!(cumsum(molddraw)%%2)]); i=even.lead1[length(even.lead1)] # i ist the last entry where the number of 1's up to entry i is even: molddraw[i]=!molddraw[i]; # then change entry i (either T->F or F->T) if (sum(molddraw)>maxk) return(.iterenum.bone(molddraw,maxk)) else return(molddraw) } .iterenum.KgtN <- function(molddraw=numeric(0),maxk=Inf,oldk=0,...) { #iterenum.KgtN is slightly slower than iterenum and itererates only through models with k<maxk; else similar to iterenum mnewdraw=.iterenum.bone(molddraw=molddraw,maxk) addi=(1:length(mnewdraw))[molddraw<mnewdraw]; if (length(addi)==0) addi=0 dropi=(1:length(mnewdraw))[molddraw>mnewdraw]; if (length(dropi)==0) dropi=0 return(list(mnewdraw=mnewdraw,positionnew=(1:length(mnewdraw))[as.logical(mnewdraw)],addi=addi,dropi=dropi)) } ##### ############ #get the enumeration sampler vector for a certain iteration index .enum_fromindex <- function(lindex){ #lindex: an integer index; this function returns a logical vector that corresponds to the enumeration sampler draw for this index (without leading zeros) lindex=lindex[[1]] if (lindex==0) return(FALSE) log2=ceiling(log(lindex+1,2)) return( as.logical((lindex+2^((log2-1):0))%/%(2^(log2:1))%% 2)) } .enum_startend = function(iter=NA,start.value=0,K=1,maxk=K, fixed.pos=numeric(0)) { #does user checks and returns the starting model for enumeration and the number of iterations # in its most basic version, it returns list(numeric(K), 2^K-1) # however, it also adjusts for N-3<K and fixed variables # moreover, it converts to a subspace of the enumeration space if iter and start.value say so fixed.pos={1:K}[fixed.pos] effk=K-length(fixed.pos) flexpos={1:K}; if (length(fixed.pos)>0) flexpos={1:K}[-fixed.pos] start.value2=0 if (length(start.value)==1) { start.value2=suppressWarnings(as.integer(start.value)) if (any(is.na(start.value2))|start.value2[[1]]<0|start.value2[[1]]>=(2^effk-1)) { start.value=0;start.value2=0 } } else { start.value=0 } if (length(start.value)==1) { #if startvalue is an integer index satysfying above conditions then convert it into a 'draw' to start from start.value_cut=.enum_fromindex(start.value2) start.value=rep(1,K) start.value[flexpos]=c(numeric(effk-length(start.value_cut)),start.value_cut) } # do all the other enumeration stuff if (K>maxk) {lastindex=2^effk-1-sum(choose(effk,(maxk+1):effk))} else lastindex=2^effk-1 if (is.na(iter)) {iter=lastindex-start.value2} #default iter is the rest to complete the enumeration from start.value iter=min(iter,2^effk-1-start.value2); # don't do too much! return(list(start.value=start.value, iter=iter)) } .fixedset.sampler = function(sampler.function, fullK, fixed.pos=numeric(0),...) { #converts any model sampler function into one that retains fixed variables defined in fixed.pos if (length(fixed.pos)==0) return(sampler.function) #initialize fixed.pos={1:fullK}[fixed.pos] #to convert from possible binary vector flexpos={1:fullK}[-fixed.pos]; flexk=length(flexpos) outdraw=rep(1,fullK) #now create a 'stoepsler' function that plugs in the draw of flexible varaibles into the entire set outfun = function(molddraw=molddraw,K=flexk,...) { flexdraw=sampler.function(molddraw=molddraw[flexpos],K=flexk,...) outdraw[flexpos]=flexdraw[["mnewdraw"]] addi=flexdraw[["addi"]]; dropi=flexdraw[["dropi"]]; #indch= flexpos[flexdraw[["indch"]]]; #is this really necessary? if (is.numeric(addi)||is.numeric(dropi)) { if (addi>0) addi= flexpos[addi] else addi=0 if (dropi>0) dropi= flexpos[dropi] else dropi=0 } return(list(mnewdraw=outdraw,positionnew={1:fullK}[as.logical(outdraw)],addi=addi,dropi=dropi)) } return(outfun) } ########################################################################################################################### #SAMPLERS WITH INTERACTION TERMS ############################################################################################################################ #initialization .constr.intmat = function(X,K) { #this function identifies the columns of X named with multiple terms (sep'd by "?") as interactions #and constructs a matrix whose rows identify the base terms corresponding to each interaction term intix=grep("#",colnames(X),fixed=TRUE) # indices of columsn with interaction terms mPlus=diag(K) colnames(mPlus)<-colnames(X) for (jj in 1:length(intix)) { cix= intix[jj] mPlus[cix,unlist(strsplit(colnames(mPlus)[cix],"#",fixed=TRUE))]=1 # put a one in row (interaction term) and columns (all base terms) } return(mPlus) } #First, we have implemented the original FLS Sample Function; here a variable is drawn from the set of # K regressors, then #conditional on whether it is included in the current model it can be discarded or added. .fls.samp.int=function(molddraw=molddraw,K=K,mPlus=mPlus,maxk=Inf,oldk=0){ #interactions sampler for .fls.samp indch=ceiling(stats::runif(1,0,1)*K) #rounding to the smallest integer part by floor, uniform distr. [0,1] # have to make sure that we delete all the regressors if (molddraw[indch]==1){ #dropping mnewdraw=as.numeric(molddraw>mPlus[,indch]) dropvar = (1:K)[xor(molddraw,mnewdraw)]; addvar=0; } else{ #adding mnewdraw=as.numeric(molddraw|mPlus[indch,]) addvar = (1:K)[xor(molddraw,mnewdraw)]; dropvar=0; } positionnew=which(mnewdraw==1) if (length(positionnew)>maxk) { return(.fls.samp.int(molddraw=molddraw,K=K,mPlus=mPlus,maxk,oldk)) } else { return(list(mnewdraw=mnewdraw,positionnew=positionnew,addi=addvar,dropi=dropvar)) } } ############################################################################################################################# #Second, we have implemented a reversible jump algorithm, where we #have added a move step. See below ############################################################################################################################## #Reversible Jump Algorithm .rev.jump.int=function(molddraw=molddraw,K=K,mPlus=mPlus,maxk=Inf,oldk=0){ #interactions sampler for .rev.jump rev.idx=floor(stats::runif(1,0,1)*2) #rev.idx is a flag that indicates the three possible steps of #the reversible jump algorithm, 1=birth or death and 2=move. # Perform Death, Birth or Move Step # if rev.idx is 1, do the same as in fls sampler (i.e. increase or decrease depending # on variables already included in the model if((rev.idx)|oldk==0){ birth.death=.fls.samp.int(molddraw=molddraw,K=K,mPlus=mPlus,maxk,oldk) mnewdraw=birth.death$mnewdraw positionnew=birth.death$positionnew addvar=birth.death$addi; dropvar=birth.death$dropi } else { var.in=(1:K)[as.logical(molddraw)] #positions of the variables that are currently in the model var.out=(1:K)[!as.logical(molddraw)] #positions of the variables that are currently out of the model mnewdraw=(molddraw>mPlus[,var.in[ceiling(length(var.in)*stats::runif(1,0,1))]]) mnewdraw=mnewdraw|mPlus[var.out[ceiling(length(var.out)*stats::runif(1,0,1))],] positionnew=(1:K)[mnewdraw] addvar = (1:K)[molddraw<mnewdraw]; dropvar = (1:K)[molddraw>mnewdraw]; if (length(dropvar)==0) dropvar=0; if (length(addvar)==0) addvar=0 } #return(list(mnewdraw=as.numeric(mnewdraw),positionnew=positionnew,addi=addvar,dropi=dropvar,indch=rev.idx)) if (length(positionnew)>maxk) { return(.rev.jump.int(molddraw=molddraw,K=K,mPlus=mPlus,maxk,oldk)) } else { return(list(mnewdraw=as.numeric(mnewdraw),positionnew=positionnew,addi=addvar,dropi=dropvar)) } } .post.calc <- function(gprior.info,add.otherstats,k.vec,null.count,X.data,topmods,b1mo,b2mo,iter,burn,inccount,models.visited,K,N,msize,timed,cumsumweights=NA,mcmc="bd",possign=NA) { #customized function for posterior results from bms postad.k.vec <- function(k.vec, null.count) c(null.count,k.vec) #concatenates the vector of freqencies for 1:K model sizes with freq for null model postad.gprior.info <- function(gprior.info,add.otherstats=numeric(0),cumsumweights=1) { # adjusts the gprior.info object resulting from choose.gprior(): # add.otherstats: vector, cumsumweights: scalar by which otherstats are to be divided (typically nb of draws 'iter') if (gprior.info$return.g.stats) { if (length(add.otherstats)>0) { gprior.info$shrinkage.moments=add.otherstats/cumsumweights } else { gprior.info$shrinkage.moments=1/(1+1/gprior.info$g) } } return(gprior.info) } postad.reg.names <- function(X.data) { # extracts the column names of covariates or constructs ones: X.data is data.frame xdcn=colnames(X.data) if(is.null(xdcn)) {xdcn<- rep("",NCOL(X.data))} if(anyNA(xdcn)) {xdcn[is.na(xdcn)]<- rep("",NCOL(X.data))[is.na(xdcn)]} if(any(trimws(xdcn)=='')) {xdcn[trimws(xdcn)=='']<-paste0("Vbl",0:K)[trimws(xdcn)==''] } reg.names=xdcn[-1] return(reg.names) } gprior.info=postad.gprior.info(gprior.info,add.otherstats,cumsumweights) k.vec = postad.k.vec(k.vec,null.count) #if (is.na(cumsumweights)) cumsumweights=iter cons=.post.constant(X.data,b1mo/cumsumweights) pmp.10=pmp.bma(topmods,oldstyle=TRUE) if (nrow(pmp.10)==1|suppressWarnings(length(grep("error",class(try(cor(pmp.10[,1],pmp.10[,2]),silent=TRUE)))))) { corr.pmp=NA } else { if (var(pmp.10[,2])==0) corr.pmp=NA else corr.pmp=cor(pmp.10[,1],pmp.10[,2]) } if (is.na(possign[[1]])) possign=numeric(K) info.object=list(iter=iter,burn=burn,inccount=inccount,models.visited=models.visited,b1mo=b1mo,b2mo=b2mo, add.otherstats=add.otherstats,cumsumweights=cumsumweights,K=K,N=N,corr.pmp=corr.pmp,msize=msize,timed=timed,k.vec=k.vec,cons=cons,pos.sign=possign) reg.names=postad.reg.names(X.data) return(list(info=info.object,k.vec=k.vec,cons=cons,gprior.info=gprior.info,pmp.10=pmp.10,reg.names=reg.names)) } ################ g prior functions ######################################### # a gprior object is a list, or a function that returns that list # such a gprior object can be passed to bms (cf example below) # # EXAMPLE: a simple constant g prior list is returned by the following function: # # simpleg <- function(g=NA,N=100,K=2,yty=1,...,myg=NULL) { # # bms/zlm calls this function with the following arguments: g, N (nb obs), K (nb of total variables), yty (TSS), null.lik (usually NA) and return.g.stats (same as bms argument g.stats), y and X # # additional arguments may be used by providing bms with the output of this function, rather than the function itself # if (is.null(myg)) myg=10 #stuff to be valid for all evaluations of the sub-functions below # # gprior.info=list( #general information for ppost-processing # gtype = "wappler-g", # a name for the g prior sub-type # is.constant = TRUE, # whether g is a predifined scalar, or changes with models (important for post-processing) # g=myg, #a scalar - only necessary if is.constant==TRUE # return.g.stats = FALSE # whether g-statistics should be collected; TRUE possible in principle, but mor bug-prone # ) # # # the actual gprior-computation routines are in a sub-element of the gprior object - note that myg was defined upfront # gprior.info$lprobcalc = list( # just.loglik = function(ymy, k, ...) { # #for speed reasons, this function is used when evaluationg the liklihood of a candiade model in bms # return(.5*{ (N-1-k)*log(1+myg)-(N-1)*log(myg*ymy + yty) } ) #the (scalar) log-likelihood of the model # }, # lprob.all=function(ymy,k,bhat,diag.inverse,...) { # return(list( # lprob=.5*{ (N-1-k)*log(1+myg) - (N-1)*log(myg*ymy + yty)}, # log-likelihood # b1new = myg/(1+myg)*bhat, # the posterior expected coefficents # b2new ={(yty/myg+ymy) * (myg/(1+myg))^2 /(N-3)}*diag.inverse + (myg/(1+myg)*bhat)^2, #second posteriro moment of the coefficients # otherstats=numeric(0) #addtional stats to be counted when return.g.stats==TRUE # )) # } # ) # # return(gprior.info) # } # bms(attitude,g=simpleg) # zlm(attitude,g=simpleg) # bms(attitude,g=simpleg(myg=20,N=nrow(attitude),K=ncol(attitude)-1,yty=var(attitude[,1])*(nrow(attitude)-1))) .gprior.constg.init <- function(g=NA,return.g.stats=TRUE,N=N,K=K,yty=1,null.lik=NA,...) { gg=NULL if (!(is.character(g)||is.numeric(g))) g="UIP" if (any(grep("BRIC",g,ignore.case=TRUE))|any(grep("FLS",g,ignore.case=TRUE))) { if (N<=(K^2)){gg=(K^2)} else { gg=N } gtype="BRIC" } if (any(grep("RIC",g,ignore.case=TRUE))&& (!any(grep("BRIC",g,ignore.case=TRUE)))) { gg=(K^2) gtype="RIC" } if (any(grep("HQ",g,ignore.case=TRUE))|any(grep("Hannan",g,ignore.case=TRUE))) { gg=(log(N))^3 gtype="Hannan-Quinn" } if(is.numeric(g)){ gg=g gtype="numeric" } if (is.null(gg)) { if (!(any(grep("UIP",g,ignore.case=TRUE))|any(grep("BIC",g,ignore.case=TRUE)))) warning("The provided g prior could not be identified. Therefore the default g prior (UIP) has been selected.") gg=N gtype="UIP" } gprior.info=list(gtype=gtype, is.constant=TRUE, return.g.stats=return.g.stats, shrinkage.moments=gg/(gg+1), g=gg) g=gg if (!is.numeric(null.lik)) { null.lik={1-N}/2*log(yty) } g2=g/{g+1}; l1g=log(1+g) ; g2sq=g2^2; n1=N-1 gprior.info$lprobcalc <- list( #estimates the standard posterior stats under a cfixed g-prior #lprob.constg.init is called only once: it calculates constant terms to reduce redundancy #each lprob..init function has the two subfunctions: # * just.loglik: this just calculates the log likelihood from given terms - as the sampler mostly needs just that # * lprob.all: this calculates the log lik, as well as b1new (E(beta|Y) the expected value (normal-gamma) of coefficients), and b2new (the Expected value of coefficents squared E(beta^2|Y)= Var(beta|Y)+E(beta|Y)^2) #function(ymy, k) { gg=N; l1g=log(1+gg); n1=N-1; g2=g0/(1+g0); .5*{-k*l1g - n1*log(1-g2*(1-ymy/yty))} } -n1 just.loglik=function(ymy,k,...) { return(.5*{{n1-k}*l1g-n1*log(g*ymy + yty)}) }, lprob.all=function(ymy,k,bhat,diag.inverse,...) { b1new = g2*bhat return(list(lprob=.5*{{n1-k}*l1g-n1*log(g*ymy + yty)},b1new = b1new,b2new ={{yty/g+ymy}*g2sq/{N-3}}*diag.inverse+b1new^2,otherstats=numeric(0))) } ) class(gprior.info) <- c("gprior",class(gprior.info)) return(gprior.info) } .gprior.eblocal.init <- function(g=NA,return.g.stats=TRUE,N=N,K=K,yty=1,null.lik=NA,...) { gprior.info=list(gtype="EBL", is.constant=FALSE, return.g.stats=return.g.stats, shrinkage.moments=numeric(1), g=NA) if (!is.numeric(null.lik)) { null.lik=(1-N)/2*log(yty) } ymy.current=-1; k.current=-1; loglik=null.lik; Fstat=numeric(0); g2=0 if (return.g.stats) { otherstats=numeric(1) } else { otherstats=numeric(0) } gprior.info$lprobcalc <- list( # estimates the local Empirical Bayes g prior as given in Liang et al (2008): "Mixtures of g Priors for Bayesian Variable Selection"; JASA #lprob.eblocal.init is called only once: it initializes an object that links just.loglik and lprob.all #each lprob..init function has the two subfunctions: # * just.loglik: this just calculates the log likelihood from given terms - as the sampler mostly needs just that # * lprob.all: this calculates the log lik, as well as b1new (E(beta|Y) the expected value (normal-gamma) of coefficients), and b2new (the Expected value of coefficents squared E(beta^2|Y)= Var(beta|Y)+E(beta|Y)^2) # if at initalization return.g.stats=TRUE, then it additionally returns the estimated value of the shrinkage factor g2 just.loglik=function(ymy,k,...) { ymy.current<<-ymy; k.current<<- k; if (k==0) { return(null.lik) } Fstat<<-(N-k-1)/k*(yty-ymy)/ymy if (Fstat>1) { #g0=1/max(Fstat-1,0) g0<-1/(Fstat-1); g2<<-1/(g0+1) # g0 corresponds to 1/g } else { g2<<-0; return(null.lik) } lFstat=log(Fstat); lg02=-lFstat; #Note that: g2=1-1/F, lg02=log(g0*g2)=log(1/Fstat), loglik=.5* (-k*log(Fstat)-(N-1)*log(ymy+(1/Fstat-1)*yty)-(N-1)*(log(F-1)-log(Fstat)) loglik<<-.5*{k*lg02-{N-1}*{log(ymy + g0*yty) + {log(Fstat-1)-lFstat }}} return(loglik) }, lprob.all=function(ymy,k,bhat,diag.inverse,...) { if (k==0) { return(list(lprob=null.lik, b1new=numeric(0), b2new=numeric(0),otherstats=otherstats)) } if ((ymy!=ymy.current)|(k!=k.current)) { #if just.loglik was already called just before with the same parameters, no need to calculate the F-stat stuff again Fstat<<- {N-k-1}/k*(yty-ymy)/ymy if (Fstat>1) { g0=1/{Fstat-1}; g2<<-1/(g0+1) lFstat=log(Fstat); lg02=-lFstat; loglik<<-.5*{k*lg02-{N-1}*{log(ymy + g0*yty) + {log(Fstat-1)-lFstat }}} } else { g0=0; g2<<-0; loglik<<-null.lik } } if (return.g.stats) { otherstats=g2 } b1new = g2*bhat if (g2>0) { b2new ={{(1/g2-1)*yty+ymy}*{g2^2}/{N-3}}*diag.inverse+b1new^2 } else {b2new = numeric(k)} return(list(lprob=loglik,b1new = b1new,b2new=b2new,otherstats=otherstats)) } ) class(gprior.info) <- c("gprior",class(gprior.info)) return(gprior.info) } .gprior.hyperg.init <- function(g=NA,return.g.stats=TRUE,N=N,K=K,yty=1,null.lik=NA,...) { #user checks if (!is.character(g)) g="hyper" if (any(grep("=",g))) { f21a=suppressWarnings(as.numeric(unlist(strsplit(g,"="))[2])) if (!is.numeric(f21a)|is.na(f21a)) { f21a.char=suppressWarnings(as.character(unlist(strsplit(g,"="))[2])) if (any(grep("bric",f21a.char,ignore.case=TRUE))) { f21a=2+2/max(N,K^2) } else if (any(grep("uip",f21a.char,ignore.case=TRUE))) { f21a=2+2/N } else { warning("You did not supply a proper 'a' parameter for the hyper g prior (like e.g. the format g='hyperg=3.1' or g='hyper=UIP') - thus set to default value 'hyper=UIP' instead.") f21a=2+2/N } } else { if (f21a<=2|f21a>4) { f21a=2+2/N warning("You provided an 'a' parameter for the hyper g prior that is not element of (2,4]. I chose the default value 'hyper=UIP' instead.") } } } else { f21a=2+2/N } gprior.info=list(gtype="hyper", is.constant=FALSE, return.g.stats=return.g.stats, shrinkage.moments=numeric(2),g=NA, hyper.parameter=f21a) #estimates the hyper g prior as given in Liang et al (2008): "Mixtures of g Priors for Bayesian Variable Selection"; JASA #initializing an object that links just.loglik and lprob.all # argument f21a corresponds to the hyper-parameter "a" in Liang et al. (2008): any value in ]2,4], default 3 #each lprob..init function has the two subfunctions: # * just.loglik: this just calculates the log likelihood from given terms - as the sampler mostly needs just that # * lprob.all: this calculates the log lik, as well as b1new (E(beta|Y) the expected value (normal-gamma) of coefficients), and b2new (the Expected value of coefficents squared E(beta^2|Y)= Var(beta|Y)+E(beta|Y)^2) #initalizing global values if (!is.numeric(null.lik)) { null.lik={1-N}/2*log(yty) } gmoments=numeric(2) N12={N-1}/2; la2=log(f21a-2) log.lik = null.lik; ymy.current=-1; k.current=-1; intconstinv=f21a-2; #intconstinv is the inverse of 1/(a-2) times the integration constant, i.e. (k+a-2)/ 2F1( (N-1)/2,1, (k+a)/2, R^2 ) #initalizing the 2F1 hypergeometric function object f21o=.f21_4hyperg(N,K,f21a) gprior.info$lprobcalc <- list( just.loglik=function(ymy,k,...) { if (k==0) {return(null.lik)} ymy.current <<- ymy; k.current <<- k intconstinv <<- {k+f21a-2}/f21o[["calcit"]](1-ymy/yty,k) if (intconstinv<0) {intconstinv <<- k+f21a-2} #this may happen because of numerical inaccuracies: e.g. ymy marginally greater than yty log.lik <<- null.lik + la2 - log(intconstinv) return(log.lik) }, lprob.all=function(ymy,k,bhat,diag.inverse,...) { if (k==0) {return(list(lprob=null.lik,b1new=numeric(0), b2new=numeric(0),otherstats=c(2/f21a,8/f21a/(f21a+2))))} N3=N-3; ka2=k+f21a-2; R2=1-ymy/yty; #collect terms if ((ymy!=ymy.current)|(k!=k.current)) { #if just.loglik was already called just before with the same parameters, no need to calculate the F-stat stuff again intconstinv <<- ka2/f21o[["calcit"]](R2,k) log.lik <<- null.lik + la2 - log(intconstinv) } g2hyper= {intconstinv-ka2+N3*R2}/{R2*{N3-ka2}} # E(g/(1+g)|Y) gbetavar = {{1+2/N3*R2/{1-R2}}*intconstinv+{N3-2}*R2-ka2} *N3*{1-R2}/{N3-ka2}/{N3-ka2-2}/R2 * yty/N3*diag.inverse #Cov(beta|Y) if (return.g.stats) { ka=ka2+2; Eg22= { {{N3-2}*R2-ka}*intconstinv + {N3*R2-ka2}^2-2*{N3*R2^2-ka2} }/R2^2/{N3-ka2}/{N3-ka} gmoments=c(g2hyper,Eg22) } return(list(lprob=log.lik, b1new = g2hyper*bhat ,b2new =gbetavar+g2hyper^2*bhat^2, otherstats=gmoments)) } ) class(gprior.info) <- c("gprior",class(gprior.info)) return(gprior.info) } ### used for gprior.hyperg.init ##### .f21_4hyperg=function(N,K,f21a,ltermbounds=c(200,600,1400,3000)) { # this function calculates the value of a Gaussian hypergeometric function 2F1((N-1)/2,1,(f21a+k)/2,z) # as given in Liang et al (2008): "Mixtures of g Priors for Bayesian Variable Selection"; JASA, formula (18) # first initialize the object by calling myobject = .f21_4hyoperg() : this calcs recycable terms to reduce redundancy # # then evaluate by myobject$calcit(z,k) #Initialization: create.lterms=function(cc) lapply(as.list(ltermbounds),function(x) (a+0:x)/(cc+0:x)) # a subfunction a=(N-1)/2; cv=(f21a+0:K)/2 #create the a and c terms for the (K+1) different model sizes lterms=(lapply(cv,create.lterms)) #create the Pochhammer terms in vectors of different lengths, lterms is a list: each element has a list of which each element contains Pochhammer terms up to the index specified in ltermbounds (see function argument) ltermbounds=c(0,ltermbounds[-length(ltermbounds)]) #this is just used for deciding which vector length to take return(list( calcit=function(z,k) { # this function actually calculates 2F1((N-1)/2,1,(f21a+k)/2,z) in its power series formulation nbterms=sum(ceiling(abs({a-cv[k+1]}/{1-z})*1.5)>=ltermbounds) # l=(a-c)/(1-z) is the index from which on the summands decline # this times 1.5 decides which lvector length (lbounds) to take return(sum(cumprod(z*lterms[[k+1]][[nbterms]]))+1) #calculate it } )) } .f21simple=function(a,c,z) { # this function calculates the value of the Gaussian hypergeometric function 2F1(a,1,c,z), where 0=<z=<1 # this function is a simple wrapper destined for user application f21o=.f21_4hyperg(2*a+1,0,c*2) f21o$calcit(z,0) } .choose.gprior <- function(g,N,K,return.g.stats=FALSE,yty=N,...) { #chooses the g-prior subject to the two parameters given from bms() user input #returns a list with gtype, is.constant, g, as well as prior-specific elements # use benchmark uip criterion as standard setting # g corresponds to argument "g" in function bms, return.g.stats to "g.stats" # N is number of obs, K max number of regressors # # returns a list conforming to the gprior.info in function 'bms' # first check for user-defined g-priors if (is.list(g)) { if (!all(c("gtype","is.constant","return.g.stats", "lprobcalc") %in% names(g))) stop("The provided g-prior list (in argument 'g') does not conform to the standards of a g-prior object.") if (!("g" %in% names(g))) {g$is.constant = FALSE; g$g=NA} if (!("shrinkage.moments" %in% names(g))) {g$shrinkage.moments=ifelse((g$is.constant&&is.numeric(g)), g/(1+g), 0)} if (!all(sapply(g$lprobcalc,is.function))) stop("The slot 'lprobcalc' in the provided g-prior list (in argument 'g') does not conform to the standards of a g-prior object.") return(g) } if (is.function(g)) { return(g(g=g,return.g.stats=return.g.stats,N=N,K=K,yty=yty,...)) } # now guess a standard g-prior from user's arguments if(is.numeric(g)){ return(.gprior.constg.init(g=g,return.g.stats=return.g.stats,N=N,K=K,yty=yty)) } # if user specifies empirical bayes estimation (local) if (any(grep("EBL",g,ignore.case=TRUE))) { return(.gprior.eblocal.init(g=g,return.g.stats=return.g.stats,N=N,K=K,yty=yty)) } # if user specifies hyper-prior (local) if (any(grep("hyper",g,ignore.case=TRUE))) { return(.gprior.hyperg.init(g=g,return.g.stats=return.g.stats,N=N,K=K,yty=yty)) } return(.gprior.constg.init(g=g,return.g.stats=return.g.stats,N=N,K=K,yty=yty)) } # end g-Stuff ########################################## # FUNCTIONS DESIGNED FOR BEING CALLED IN bms() ####################################### # these functions are only subfunction to be called inside bms() # [therefore they necessitate the statement environemnt(SUBFUNCTION) <- environment() inside bms() ] # the are defined outside of bms() for readability and modularity purposes # compatibility with package version 0.2.5 .lprob.constg.init = function(...) { gpo=.gprior.constg.init(...) return(gpo$lprobcalc) } .lprob.eblocal.init = function(...) { gpo=.gprior.eblocal.init(...) return(gpo$lprobcalc) } .lprob.hyperg.init = function(...) { gpo=.gprior.hyperg.init(...) return(gpo$lprobcalc) }
/scratch/gouwar.j/cran-all/cranData/BMS/R/aux_inner.R